00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011 #include "ruby/ruby.h"
00012 #include "ruby/st.h"
00013 #include "ruby/encoding.h"
00014
00015 #include "gc.h"
00016 #include "vm_core.h"
00017 #include "iseq.h"
00018 #include "eval_intern.h"
00019
00020 #include "vm_insnhelper.h"
00021 #include "vm_insnhelper.c"
00022 #include "vm_exec.h"
00023 #include "vm_exec.c"
00024
00025 #include "vm_method.c"
00026 #include "vm_eval.c"
00027
00028 #include <assert.h>
00029
00030 #define BUFSIZE 0x100
00031 #define PROCDEBUG 0
00032
00033 VALUE rb_cRubyVM;
00034 VALUE rb_cThread;
00035 VALUE rb_cEnv;
00036 VALUE rb_mRubyVMFrozenCore;
00037
00038 VALUE ruby_vm_global_state_version = 1;
00039 VALUE ruby_vm_const_missing_count = 0;
00040
00041 char ruby_vm_redefined_flag[BOP_LAST_];
00042
00043 rb_thread_t *ruby_current_thread = 0;
00044 rb_vm_t *ruby_current_vm = 0;
00045
00046 static void thread_free(void *ptr);
00047
00048 VALUE rb_insns_name_array(void);
00049
00050 void vm_analysis_operand(int insn, int n, VALUE op);
00051 void vm_analysis_register(int reg, int isset);
00052 void vm_analysis_insn(int insn);
00053
00054 void
00055 rb_vm_change_state(void)
00056 {
00057 INC_VM_STATE_VERSION();
00058 }
00059
00060 void
00061 rb_vm_inc_const_missing_count(void)
00062 {
00063 ruby_vm_const_missing_count +=1;
00064 }
00065
00066
00067
00068 static inline VALUE
00069 rb_vm_set_finish_env(rb_thread_t * th)
00070 {
00071 vm_push_frame(th, 0, VM_FRAME_MAGIC_FINISH,
00072 Qnil, th->cfp->lfp[0], 0,
00073 th->cfp->sp, 0, 1);
00074 th->cfp->pc = (VALUE *)&finish_insn_seq[0];
00075 return Qtrue;
00076 }
00077
00078 static void
00079 vm_set_top_stack(rb_thread_t * th, VALUE iseqval)
00080 {
00081 rb_iseq_t *iseq;
00082 GetISeqPtr(iseqval, iseq);
00083
00084 if (iseq->type != ISEQ_TYPE_TOP) {
00085 rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
00086 }
00087
00088
00089 rb_vm_set_finish_env(th);
00090
00091 vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP,
00092 th->top_self, 0, iseq->iseq_encoded,
00093 th->cfp->sp, 0, iseq->local_size);
00094
00095 CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00096 }
00097
00098 static void
00099 vm_set_eval_stack(rb_thread_t * th, VALUE iseqval, const NODE *cref)
00100 {
00101 rb_iseq_t *iseq;
00102 rb_block_t * const block = th->base_block;
00103 GetISeqPtr(iseqval, iseq);
00104
00105
00106 rb_vm_set_finish_env(th);
00107 vm_push_frame(th, iseq, VM_FRAME_MAGIC_EVAL, block->self,
00108 GC_GUARDED_PTR(block->dfp), iseq->iseq_encoded,
00109 th->cfp->sp, block->lfp, iseq->local_size);
00110
00111 if (cref) {
00112 th->cfp->dfp[-1] = (VALUE)cref;
00113 }
00114
00115 CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00116 }
00117
00118 static void
00119 vm_set_main_stack(rb_thread_t *th, VALUE iseqval)
00120 {
00121 VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
00122 rb_binding_t *bind;
00123 rb_iseq_t *iseq;
00124 rb_env_t *env;
00125
00126 GetBindingPtr(toplevel_binding, bind);
00127 GetEnvPtr(bind->env, env);
00128 th->base_block = &env->block;
00129 vm_set_eval_stack(th, iseqval, 0);
00130 th->base_block = 0;
00131
00132
00133 GetISeqPtr(iseqval, iseq);
00134 if (bind && iseq->local_size > 0) {
00135 bind->env = rb_vm_make_env_object(th, th->cfp);
00136 }
00137
00138 CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00139 }
00140
00141 rb_control_frame_t *
00142 rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00143 {
00144 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00145 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00146 return cfp;
00147 }
00148 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00149 }
00150 return 0;
00151 }
00152
00153 static rb_control_frame_t *
00154 vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00155 {
00156 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00157 return cfp;
00158 }
00159
00160 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00161
00162 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00163 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00164 return cfp;
00165 }
00166
00167 if ((cfp->flag & VM_FRAME_FLAG_PASSED) == 0) {
00168 break;
00169 }
00170 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00171 }
00172 return 0;
00173 }
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184
00185 #define ENV_IN_HEAP_P(th, env) \
00186 (!((th)->stack < (env) && (env) < ((th)->stack + (th)->stack_size)))
00187 #define ENV_VAL(env) ((env)[1])
00188
00189 static void
00190 env_mark(void * const ptr)
00191 {
00192 RUBY_MARK_ENTER("env");
00193 if (ptr) {
00194 const rb_env_t * const env = ptr;
00195
00196 if (env->env) {
00197
00198 RUBY_GC_INFO("env->env\n");
00199 rb_gc_mark_locations(env->env, env->env + env->env_size);
00200 }
00201
00202 RUBY_GC_INFO("env->prev_envval\n");
00203 RUBY_MARK_UNLESS_NULL(env->prev_envval);
00204 RUBY_MARK_UNLESS_NULL(env->block.self);
00205 RUBY_MARK_UNLESS_NULL(env->block.proc);
00206
00207 if (env->block.iseq) {
00208 if (BUILTIN_TYPE(env->block.iseq) == T_NODE) {
00209 RUBY_MARK_UNLESS_NULL((VALUE)env->block.iseq);
00210 }
00211 else {
00212 RUBY_MARK_UNLESS_NULL(env->block.iseq->self);
00213 }
00214 }
00215 }
00216 RUBY_MARK_LEAVE("env");
00217 }
00218
00219 static void
00220 env_free(void * const ptr)
00221 {
00222 RUBY_FREE_ENTER("env");
00223 if (ptr) {
00224 const rb_env_t * const env = ptr;
00225 RUBY_FREE_UNLESS_NULL(env->env);
00226 ruby_xfree(ptr);
00227 }
00228 RUBY_FREE_LEAVE("env");
00229 }
00230
00231 static size_t
00232 env_memsize(const void *ptr)
00233 {
00234 if (ptr) {
00235 const rb_env_t * const env = ptr;
00236 size_t size = sizeof(rb_env_t);
00237 if (env->env) {
00238 size += env->env_size * sizeof(VALUE);
00239 }
00240 return size;
00241 }
00242 return 0;
00243 }
00244
00245 static const rb_data_type_t env_data_type = {
00246 "VM/env",
00247 env_mark, env_free, env_memsize,
00248 };
00249
00250 static VALUE
00251 env_alloc(void)
00252 {
00253 VALUE obj;
00254 rb_env_t *env;
00255 obj = TypedData_Make_Struct(rb_cEnv, rb_env_t, &env_data_type, env);
00256 env->env = 0;
00257 env->prev_envval = 0;
00258 env->block.iseq = 0;
00259 return obj;
00260 }
00261
00262 static VALUE check_env_value(VALUE envval);
00263
00264 static int
00265 check_env(rb_env_t * const env)
00266 {
00267 printf("---\n");
00268 printf("envptr: %p\n", (void *)&env->block.dfp[0]);
00269 printf("orphan: %p\n", (void *)env->block.dfp[1]);
00270 printf("inheap: %p\n", (void *)env->block.dfp[2]);
00271 printf("envval: %10p ", (void *)env->block.dfp[3]);
00272 dp(env->block.dfp[3]);
00273 printf("penvv : %10p ", (void *)env->block.dfp[4]);
00274 dp(env->block.dfp[4]);
00275 printf("lfp: %10p\n", (void *)env->block.lfp);
00276 printf("dfp: %10p\n", (void *)env->block.dfp);
00277 if (env->block.dfp[4]) {
00278 printf(">>\n");
00279 check_env_value(env->block.dfp[4]);
00280 printf("<<\n");
00281 }
00282 return 1;
00283 }
00284
00285 static VALUE
00286 check_env_value(VALUE envval)
00287 {
00288 rb_env_t *env;
00289 GetEnvPtr(envval, env);
00290
00291 if (check_env(env)) {
00292 return envval;
00293 }
00294 rb_bug("invalid env");
00295 return Qnil;
00296 }
00297
00298 static VALUE
00299 vm_make_env_each(rb_thread_t * const th, rb_control_frame_t * const cfp,
00300 VALUE *envptr, VALUE * const endptr)
00301 {
00302 VALUE envval, penvval = 0;
00303 rb_env_t *env;
00304 VALUE *nenvptr;
00305 int i, local_size;
00306
00307 if (ENV_IN_HEAP_P(th, envptr)) {
00308 return ENV_VAL(envptr);
00309 }
00310
00311 if (envptr != endptr) {
00312 VALUE *penvptr = GC_GUARDED_PTR_REF(*envptr);
00313 rb_control_frame_t *pcfp = cfp;
00314
00315 if (ENV_IN_HEAP_P(th, penvptr)) {
00316 penvval = ENV_VAL(penvptr);
00317 }
00318 else {
00319 while (pcfp->dfp != penvptr) {
00320 pcfp++;
00321 if (pcfp->dfp == 0) {
00322 SDR();
00323 rb_bug("invalid dfp");
00324 }
00325 }
00326 penvval = vm_make_env_each(th, pcfp, penvptr, endptr);
00327 cfp->lfp = pcfp->lfp;
00328 *envptr = GC_GUARDED_PTR(pcfp->dfp);
00329 }
00330 }
00331
00332
00333 envval = env_alloc();
00334 GetEnvPtr(envval, env);
00335
00336 if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00337 local_size = 2;
00338 }
00339 else {
00340 local_size = cfp->iseq->local_size;
00341 }
00342
00343 env->env_size = local_size + 1 + 2;
00344 env->local_size = local_size;
00345 env->env = ALLOC_N(VALUE, env->env_size);
00346 env->prev_envval = penvval;
00347
00348 for (i = 0; i <= local_size; i++) {
00349 env->env[i] = envptr[-local_size + i];
00350 #if 0
00351 fprintf(stderr, "%2d ", &envptr[-local_size + i] - th->stack); dp(env->env[i]);
00352 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00353
00354 envptr[-local_size + i] = 0;
00355 }
00356 #endif
00357 }
00358
00359 *envptr = envval;
00360 nenvptr = &env->env[i - 1];
00361 nenvptr[1] = envval;
00362 nenvptr[2] = penvval;
00363
00364
00365 cfp->dfp = nenvptr;
00366 if (envptr == endptr) {
00367 cfp->lfp = nenvptr;
00368 }
00369
00370
00371 env->block.self = cfp->self;
00372 env->block.lfp = cfp->lfp;
00373 env->block.dfp = cfp->dfp;
00374 env->block.iseq = cfp->iseq;
00375
00376 if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00377
00378 env->block.iseq = 0;
00379 }
00380 return envval;
00381 }
00382
00383 static int
00384 collect_local_variables_in_iseq(rb_iseq_t *iseq, const VALUE ary)
00385 {
00386 int i;
00387 if (!iseq) return 0;
00388 for (i = 0; i < iseq->local_table_size; i++) {
00389 ID lid = iseq->local_table[i];
00390 if (rb_is_local_id(lid)) {
00391 rb_ary_push(ary, ID2SYM(lid));
00392 }
00393 }
00394 return 1;
00395 }
00396
00397 static int
00398 collect_local_variables_in_env(rb_env_t * env, const VALUE ary)
00399 {
00400
00401 while (collect_local_variables_in_iseq(env->block.iseq, ary),
00402 env->prev_envval) {
00403 GetEnvPtr(env->prev_envval, env);
00404 }
00405 return 0;
00406 }
00407
00408 static int
00409 vm_collect_local_variables_in_heap(rb_thread_t *th, VALUE *dfp, VALUE ary)
00410 {
00411 if (ENV_IN_HEAP_P(th, dfp)) {
00412 rb_env_t *env;
00413 GetEnvPtr(ENV_VAL(dfp), env);
00414 collect_local_variables_in_env(env, ary);
00415 return 1;
00416 }
00417 else {
00418 return 0;
00419 }
00420 }
00421
00422 VALUE
00423 rb_vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp)
00424 {
00425 VALUE envval;
00426
00427 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_FINISH) {
00428
00429 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00430 }
00431
00432 envval = vm_make_env_each(th, cfp, cfp->dfp, cfp->lfp);
00433
00434 if (PROCDEBUG) {
00435 check_env_value(envval);
00436 }
00437
00438 return envval;
00439 }
00440
00441 void
00442 rb_vm_stack_to_heap(rb_thread_t * const th)
00443 {
00444 rb_control_frame_t *cfp = th->cfp;
00445 while ((cfp = rb_vm_get_ruby_level_next_cfp(th, cfp)) != 0) {
00446 rb_vm_make_env_object(th, cfp);
00447 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00448 }
00449 }
00450
00451
00452
00453 static VALUE
00454 vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block)
00455 {
00456 VALUE proc = block->proc;
00457
00458 if (block->proc) {
00459 return block->proc;
00460 }
00461
00462 proc = rb_vm_make_proc(th, block, rb_cProc);
00463 block->proc = proc;
00464
00465 return proc;
00466 }
00467
00468 VALUE
00469 rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass)
00470 {
00471 VALUE procval, envval, blockprocval = 0;
00472 rb_proc_t *proc;
00473 rb_control_frame_t *cfp = RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block);
00474
00475 if (block->proc) {
00476 rb_bug("rb_vm_make_proc: Proc value is already created.");
00477 }
00478
00479 if (GC_GUARDED_PTR_REF(cfp->lfp[0])) {
00480 rb_proc_t *p;
00481
00482 blockprocval = vm_make_proc_from_block(
00483 th, (rb_block_t *)GC_GUARDED_PTR_REF(*cfp->lfp));
00484
00485 GetProcPtr(blockprocval, p);
00486 *cfp->lfp = GC_GUARDED_PTR(&p->block);
00487 }
00488
00489 envval = rb_vm_make_env_object(th, cfp);
00490
00491 if (PROCDEBUG) {
00492 check_env_value(envval);
00493 }
00494 procval = rb_proc_alloc(klass);
00495 GetProcPtr(procval, proc);
00496 proc->blockprocval = blockprocval;
00497 proc->block.self = block->self;
00498 proc->block.lfp = block->lfp;
00499 proc->block.dfp = block->dfp;
00500 proc->block.iseq = block->iseq;
00501 proc->block.proc = procval;
00502 proc->envval = envval;
00503 proc->safe_level = th->safe_level;
00504
00505 if (VMDEBUG) {
00506 if (th->stack < block->dfp && block->dfp < th->stack + th->stack_size) {
00507 rb_bug("invalid ptr: block->dfp");
00508 }
00509 if (th->stack < block->lfp && block->lfp < th->stack + th->stack_size) {
00510 rb_bug("invalid ptr: block->lfp");
00511 }
00512 }
00513
00514 return procval;
00515 }
00516
00517
00518
00519 static inline VALUE
00520 invoke_block_from_c(rb_thread_t *th, const rb_block_t *block,
00521 VALUE self, int argc, const VALUE *argv,
00522 const rb_block_t *blockptr, const NODE *cref)
00523 {
00524 if (SPECIAL_CONST_P(block->iseq))
00525 return Qnil;
00526 else if (BUILTIN_TYPE(block->iseq) != T_NODE) {
00527 const rb_iseq_t *iseq = block->iseq;
00528 const rb_control_frame_t *cfp;
00529 rb_control_frame_t *ncfp;
00530 int i, opt_pc, arg_size = iseq->arg_size;
00531 int type = block_proc_is_lambda(block->proc) ?
00532 VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
00533
00534 rb_vm_set_finish_env(th);
00535
00536 cfp = th->cfp;
00537 CHECK_STACK_OVERFLOW(cfp, argc + iseq->stack_max);
00538
00539 for (i=0; i<argc; i++) {
00540 cfp->sp[i] = argv[i];
00541 }
00542
00543 opt_pc = vm_yield_setup_args(th, iseq, argc, cfp->sp, blockptr,
00544 type == VM_FRAME_MAGIC_LAMBDA);
00545
00546 ncfp = vm_push_frame(th, iseq, type,
00547 self, GC_GUARDED_PTR(block->dfp),
00548 iseq->iseq_encoded + opt_pc, cfp->sp + arg_size, block->lfp,
00549 iseq->local_size - arg_size);
00550 ncfp->me = th->passed_me;
00551 th->passed_me = 0;
00552 th->passed_block = blockptr;
00553
00554 if (cref) {
00555 th->cfp->dfp[-1] = (VALUE)cref;
00556 }
00557
00558 return vm_exec(th);
00559 }
00560 else {
00561 return vm_yield_with_cfunc(th, block, self, argc, argv, blockptr);
00562 }
00563 }
00564
00565 static inline const rb_block_t *
00566 check_block(rb_thread_t *th)
00567 {
00568 const rb_block_t *blockptr = GC_GUARDED_PTR_REF(th->cfp->lfp[0]);
00569
00570 if (blockptr == 0) {
00571 rb_vm_localjump_error("no block given", Qnil, 0);
00572 }
00573
00574 return blockptr;
00575 }
00576
00577 static inline VALUE
00578 vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const NODE *cref)
00579 {
00580 const rb_block_t *blockptr = check_block(th);
00581 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, cref);
00582 }
00583
00584 static inline VALUE
00585 vm_yield(rb_thread_t *th, int argc, const VALUE *argv)
00586 {
00587 const rb_block_t *blockptr = check_block(th);
00588 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, 0);
00589 }
00590
00591 VALUE
00592 rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
00593 int argc, const VALUE *argv, const rb_block_t * blockptr)
00594 {
00595 VALUE val = Qundef;
00596 int state;
00597 volatile int stored_safe = th->safe_level;
00598
00599 TH_PUSH_TAG(th);
00600 if ((state = EXEC_TAG()) == 0) {
00601 if (!proc->is_from_method) {
00602 th->safe_level = proc->safe_level;
00603 }
00604 val = invoke_block_from_c(th, &proc->block, self, argc, argv, blockptr, 0);
00605 }
00606 TH_POP_TAG();
00607
00608 if (!proc->is_from_method) {
00609 th->safe_level = stored_safe;
00610 }
00611
00612 if (state) {
00613 JUMP_TAG(state);
00614 }
00615 return val;
00616 }
00617
00618
00619
00620 static rb_control_frame_t *
00621 vm_normal_frame(rb_thread_t *th, rb_control_frame_t *cfp)
00622 {
00623 while (cfp->pc == 0) {
00624 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00625 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00626 return 0;
00627 }
00628 }
00629 return cfp;
00630 }
00631
00632 static VALUE
00633 vm_cfp_svar_get(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key)
00634 {
00635 cfp = vm_normal_frame(th, cfp);
00636 return lfp_svar_get(th, cfp ? cfp->lfp : 0, key);
00637 }
00638
00639 static void
00640 vm_cfp_svar_set(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key, const VALUE val)
00641 {
00642 cfp = vm_normal_frame(th, cfp);
00643 lfp_svar_set(th, cfp ? cfp->lfp : 0, key, val);
00644 }
00645
00646 static VALUE
00647 vm_svar_get(VALUE key)
00648 {
00649 rb_thread_t *th = GET_THREAD();
00650 return vm_cfp_svar_get(th, th->cfp, key);
00651 }
00652
00653 static void
00654 vm_svar_set(VALUE key, VALUE val)
00655 {
00656 rb_thread_t *th = GET_THREAD();
00657 vm_cfp_svar_set(th, th->cfp, key, val);
00658 }
00659
00660 VALUE
00661 rb_backref_get(void)
00662 {
00663 return vm_svar_get(1);
00664 }
00665
00666 void
00667 rb_backref_set(VALUE val)
00668 {
00669 vm_svar_set(1, val);
00670 }
00671
00672 VALUE
00673 rb_lastline_get(void)
00674 {
00675 return vm_svar_get(0);
00676 }
00677
00678 void
00679 rb_lastline_set(VALUE val)
00680 {
00681 vm_svar_set(0, val);
00682 }
00683
00684
00685
00686 int
00687 rb_vm_get_sourceline(const rb_control_frame_t *cfp)
00688 {
00689 int line_no = 0;
00690 const rb_iseq_t *iseq = cfp->iseq;
00691
00692 if (RUBY_VM_NORMAL_ISEQ_P(iseq) && iseq->insn_info_size > 0) {
00693 rb_num_t i;
00694 size_t pos = cfp->pc - cfp->iseq->iseq_encoded;
00695
00696 if (iseq->insn_info_table[0].position == pos) goto found;
00697 for (i = 1; i < iseq->insn_info_size; i++) {
00698 if (iseq->insn_info_table[i].position == pos) {
00699 line_no = iseq->insn_info_table[i - 1].line_no;
00700 goto found;
00701 }
00702 }
00703 line_no = iseq->insn_info_table[i - 1].line_no;
00704 }
00705 found:
00706 return line_no;
00707 }
00708
00709 static int
00710 vm_backtrace_each(rb_thread_t *th, int lev, void (*init)(void *), rb_backtrace_iter_func *iter, void *arg)
00711 {
00712 const rb_control_frame_t *limit_cfp = th->cfp;
00713 const rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
00714 VALUE file = Qnil;
00715 int line_no = 0;
00716
00717 cfp -= 2;
00718 while (lev-- >= 0) {
00719 if (++limit_cfp > cfp) {
00720 return FALSE;
00721 }
00722 }
00723 if (init) (*init)(arg);
00724 limit_cfp = RUBY_VM_NEXT_CONTROL_FRAME(limit_cfp);
00725 if (th->vm->progname) file = th->vm->progname;
00726 while (cfp > limit_cfp) {
00727 if (cfp->iseq != 0) {
00728 if (cfp->pc != 0) {
00729 rb_iseq_t *iseq = cfp->iseq;
00730
00731 line_no = rb_vm_get_sourceline(cfp);
00732 file = iseq->filename;
00733 if ((*iter)(arg, file, line_no, iseq->name)) break;
00734 }
00735 }
00736 else if (RUBYVM_CFUNC_FRAME_P(cfp)) {
00737 ID id;
00738 extern VALUE ruby_engine_name;
00739
00740 if (NIL_P(file)) file = ruby_engine_name;
00741 if (cfp->me->def)
00742 id = cfp->me->def->original_id;
00743 else
00744 id = cfp->me->called_id;
00745 if (id != ID_ALLOCATOR && (*iter)(arg, file, line_no, rb_id2str(id)))
00746 break;
00747 }
00748 cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp);
00749 }
00750 return TRUE;
00751 }
00752
00753 static void
00754 vm_backtrace_alloc(void *arg)
00755 {
00756 VALUE *aryp = arg;
00757 *aryp = rb_ary_new();
00758 }
00759
00760 static int
00761 vm_backtrace_push(void *arg, VALUE file, int line_no, VALUE name)
00762 {
00763 VALUE *aryp = arg;
00764 VALUE bt;
00765
00766 if (line_no) {
00767 bt = rb_enc_sprintf(rb_enc_compatible(file, name), "%s:%d:in `%s'",
00768 RSTRING_PTR(file), line_no, RSTRING_PTR(name));
00769 }
00770 else {
00771 bt = rb_enc_sprintf(rb_enc_compatible(file, name), "%s:in `%s'",
00772 RSTRING_PTR(file), RSTRING_PTR(name));
00773 }
00774 rb_ary_push(*aryp, bt);
00775 return 0;
00776 }
00777
00778 static inline VALUE
00779 vm_backtrace(rb_thread_t *th, int lev)
00780 {
00781 VALUE ary = 0;
00782
00783 if (lev < 0) {
00784 ary = rb_ary_new();
00785 }
00786 vm_backtrace_each(th, lev, vm_backtrace_alloc, vm_backtrace_push, &ary);
00787 if (!ary) return Qnil;
00788 return rb_ary_reverse(ary);
00789 }
00790
00791 const char *
00792 rb_sourcefile(void)
00793 {
00794 rb_thread_t *th = GET_THREAD();
00795 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00796
00797 if (cfp) {
00798 return RSTRING_PTR(cfp->iseq->filename);
00799 }
00800 else {
00801 return 0;
00802 }
00803 }
00804
00805 int
00806 rb_sourceline(void)
00807 {
00808 rb_thread_t *th = GET_THREAD();
00809 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00810
00811 if (cfp) {
00812 return rb_vm_get_sourceline(cfp);
00813 }
00814 else {
00815 return 0;
00816 }
00817 }
00818
00819 NODE *
00820 rb_vm_cref(void)
00821 {
00822 rb_thread_t *th = GET_THREAD();
00823 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00824 return vm_get_cref(cfp->iseq, cfp->lfp, cfp->dfp);
00825 }
00826
00827 #if 0
00828 void
00829 debug_cref(NODE *cref)
00830 {
00831 while (cref) {
00832 dp(cref->nd_clss);
00833 printf("%ld\n", cref->nd_visi);
00834 cref = cref->nd_next;
00835 }
00836 }
00837 #endif
00838
00839 VALUE
00840 rb_vm_cbase(void)
00841 {
00842 rb_thread_t *th = GET_THREAD();
00843 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00844
00845 return vm_get_cbase(cfp->iseq, cfp->lfp, cfp->dfp);
00846 }
00847
00848
00849
00850 static VALUE
00851 make_localjump_error(const char *mesg, VALUE value, int reason)
00852 {
00853 extern VALUE rb_eLocalJumpError;
00854 VALUE exc = rb_exc_new2(rb_eLocalJumpError, mesg);
00855 ID id;
00856
00857 switch (reason) {
00858 case TAG_BREAK:
00859 CONST_ID(id, "break");
00860 break;
00861 case TAG_REDO:
00862 CONST_ID(id, "redo");
00863 break;
00864 case TAG_RETRY:
00865 CONST_ID(id, "retry");
00866 break;
00867 case TAG_NEXT:
00868 CONST_ID(id, "next");
00869 break;
00870 case TAG_RETURN:
00871 CONST_ID(id, "return");
00872 break;
00873 default:
00874 CONST_ID(id, "noreason");
00875 break;
00876 }
00877 rb_iv_set(exc, "@exit_value", value);
00878 rb_iv_set(exc, "@reason", ID2SYM(id));
00879 return exc;
00880 }
00881
00882 void
00883 rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
00884 {
00885 VALUE exc = make_localjump_error(mesg, value, reason);
00886 rb_exc_raise(exc);
00887 }
00888
00889 VALUE
00890 rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
00891 {
00892 VALUE result = Qnil;
00893
00894 if (val == Qundef) {
00895 val = GET_THREAD()->tag->retval;
00896 }
00897 switch (state) {
00898 case 0:
00899 break;
00900 case TAG_RETURN:
00901 result = make_localjump_error("unexpected return", val, state);
00902 break;
00903 case TAG_BREAK:
00904 result = make_localjump_error("unexpected break", val, state);
00905 break;
00906 case TAG_NEXT:
00907 result = make_localjump_error("unexpected next", val, state);
00908 break;
00909 case TAG_REDO:
00910 result = make_localjump_error("unexpected redo", Qnil, state);
00911 break;
00912 case TAG_RETRY:
00913 result = make_localjump_error("retry outside of rescue clause", Qnil, state);
00914 break;
00915 default:
00916 break;
00917 }
00918 return result;
00919 }
00920
00921 void
00922 rb_vm_jump_tag_but_local_jump(int state, VALUE val)
00923 {
00924 if (val != Qnil) {
00925 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, val);
00926 rb_exc_raise(exc);
00927 }
00928 JUMP_TAG(state);
00929 }
00930
00931 NORETURN(static void vm_iter_break(rb_thread_t *th));
00932
00933 static void
00934 vm_iter_break(rb_thread_t *th)
00935 {
00936 rb_control_frame_t *cfp = th->cfp;
00937 VALUE *dfp = GC_GUARDED_PTR_REF(*cfp->dfp);
00938
00939 th->state = TAG_BREAK;
00940 th->errinfo = (VALUE)NEW_THROW_OBJECT(Qnil, (VALUE)dfp, TAG_BREAK);
00941 TH_JUMP_TAG(th, TAG_BREAK);
00942 }
00943
00944 void
00945 rb_iter_break(void)
00946 {
00947 vm_iter_break(GET_THREAD());
00948 }
00949
00950
00951
00952 static st_table *vm_opt_method_table = 0;
00953
00954 static void
00955 rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me)
00956 {
00957 VALUE bop;
00958 if (!me->def || me->def->type == VM_METHOD_TYPE_CFUNC) {
00959 if (st_lookup(vm_opt_method_table, (st_data_t)me, &bop)) {
00960 ruby_vm_redefined_flag[bop] = 1;
00961 }
00962 }
00963 }
00964
00965 static void
00966 add_opt_method(VALUE klass, ID mid, VALUE bop)
00967 {
00968 rb_method_entry_t *me;
00969 if (st_lookup(RCLASS_M_TBL(klass), mid, (void *)&me) && me->def &&
00970 me->def->type == VM_METHOD_TYPE_CFUNC) {
00971 st_insert(vm_opt_method_table, (st_data_t)me, (st_data_t)bop);
00972 }
00973 else {
00974 rb_bug("undefined optimized method: %s", rb_id2name(mid));
00975 }
00976 }
00977
00978 static void
00979 vm_init_redefined_flag(void)
00980 {
00981 ID mid;
00982 VALUE bop;
00983
00984 vm_opt_method_table = st_init_numtable();
00985
00986 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
00987 #define C(k) add_opt_method(rb_c##k, mid, bop)
00988 OP(PLUS, PLUS), (C(Fixnum), C(Float), C(String), C(Array));
00989 OP(MINUS, MINUS), (C(Fixnum));
00990 OP(MULT, MULT), (C(Fixnum), C(Float));
00991 OP(DIV, DIV), (C(Fixnum), C(Float));
00992 OP(MOD, MOD), (C(Fixnum), C(Float));
00993 OP(Eq, EQ), (C(Fixnum), C(Float), C(String));
00994 OP(Eqq, EQQ), (C(Fixnum), C(Bignum), C(Float), C(Symbol), C(String));
00995 OP(LT, LT), (C(Fixnum));
00996 OP(LE, LE), (C(Fixnum));
00997 OP(LTLT, LTLT), (C(String), C(Array));
00998 OP(AREF, AREF), (C(Array), C(Hash));
00999 OP(ASET, ASET), (C(Array), C(Hash));
01000 OP(Length, LENGTH), (C(Array), C(String), C(Hash));
01001 OP(Size, SIZE), (C(Array), C(String), C(Hash));
01002 OP(Succ, SUCC), (C(Fixnum), C(String), C(Time));
01003 OP(GT, GT), (C(Fixnum));
01004 OP(GE, GE), (C(Fixnum));
01005 #undef C
01006 #undef OP
01007 }
01008
01009
01010
01011 #if VMDEBUG
01012 static const char *
01013 vm_frametype_name(const rb_control_frame_t *cfp)
01014 {
01015 switch (VM_FRAME_TYPE(cfp)) {
01016 case VM_FRAME_MAGIC_METHOD: return "method";
01017 case VM_FRAME_MAGIC_BLOCK: return "block";
01018 case VM_FRAME_MAGIC_CLASS: return "class";
01019 case VM_FRAME_MAGIC_TOP: return "top";
01020 case VM_FRAME_MAGIC_FINISH: return "finish";
01021 case VM_FRAME_MAGIC_CFUNC: return "cfunc";
01022 case VM_FRAME_MAGIC_PROC: return "proc";
01023 case VM_FRAME_MAGIC_IFUNC: return "ifunc";
01024 case VM_FRAME_MAGIC_EVAL: return "eval";
01025 case VM_FRAME_MAGIC_LAMBDA: return "lambda";
01026 default:
01027 rb_bug("unknown frame");
01028 }
01029 }
01030 #endif
01031
01032
01033
01034
01035
01036
01037
01038
01039
01040
01041
01042
01043
01044
01045
01046
01047
01048
01049
01050
01051
01052
01053
01054
01055
01056
01057
01058
01059
01060
01061
01062
01063
01064
01065
01066
01067
01068
01069
01070
01071
01072
01073
01074
01075
01076
01077
01078
01079
01080
01081
01082
01083
01084
01085
01086
01087
01088
01089
01090
01091
01092
01093
01094
01095
01096
01097
01098
01099
01100
01101
01102
01103
01104
01105
01106
01107
01108
01109
01110
01111
01112
01113
01114
01115
01116
01117
01118
01119
01120
01121
01122
01123
01124
01125
01126
01127
01128
01129
01130
01131
01132
01133
01134
01135 static VALUE
01136 vm_exec(rb_thread_t *th)
01137 {
01138 int state;
01139 VALUE result, err;
01140 VALUE initial = 0;
01141 VALUE *escape_dfp = NULL;
01142
01143 TH_PUSH_TAG(th);
01144 _tag.retval = Qnil;
01145 if ((state = EXEC_TAG()) == 0) {
01146 vm_loop_start:
01147 result = vm_exec_core(th, initial);
01148 if ((state = th->state) != 0) {
01149 err = result;
01150 th->state = 0;
01151 goto exception_handler;
01152 }
01153 }
01154 else {
01155 int i;
01156 struct iseq_catch_table_entry *entry;
01157 unsigned long epc, cont_pc, cont_sp;
01158 VALUE catch_iseqval;
01159 rb_control_frame_t *cfp;
01160 VALUE type;
01161
01162 err = th->errinfo;
01163
01164 exception_handler:
01165 cont_pc = cont_sp = catch_iseqval = 0;
01166
01167 while (th->cfp->pc == 0 || th->cfp->iseq == 0) {
01168 if (UNLIKELY(VM_FRAME_TYPE(th->cfp) == VM_FRAME_MAGIC_CFUNC)) {
01169 const rb_method_entry_t *me = th->cfp->me;
01170 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->cfp->self, me->called_id, me->klass);
01171 }
01172 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01173 }
01174
01175 cfp = th->cfp;
01176 epc = cfp->pc - cfp->iseq->iseq_encoded;
01177
01178 if (state == TAG_BREAK || state == TAG_RETURN) {
01179 escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
01180
01181 if (cfp->dfp == escape_dfp) {
01182 if (state == TAG_RETURN) {
01183 if ((cfp + 1)->pc != &finish_insn_seq[0]) {
01184 SET_THROWOBJ_CATCH_POINT(err, (VALUE)(cfp + 1)->dfp);
01185 SET_THROWOBJ_STATE(err, state = TAG_BREAK);
01186 }
01187 else {
01188 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01189 entry = &cfp->iseq->catch_table[i];
01190 if (entry->start < epc && entry->end >= epc) {
01191 if (entry->type == CATCH_TYPE_ENSURE) {
01192 catch_iseqval = entry->iseq;
01193 cont_pc = entry->cont;
01194 cont_sp = entry->sp;
01195 break;
01196 }
01197 }
01198 }
01199 if (!catch_iseqval) {
01200 result = GET_THROWOBJ_VAL(err);
01201 th->errinfo = Qnil;
01202 th->cfp += 2;
01203 goto finish_vme;
01204 }
01205 }
01206
01207 }
01208 else {
01209
01210 #if OPT_STACK_CACHING
01211 initial = (GET_THROWOBJ_VAL(err));
01212 #else
01213 *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01214 #endif
01215 th->errinfo = Qnil;
01216 goto vm_loop_start;
01217 }
01218 }
01219 }
01220
01221 if (state == TAG_RAISE) {
01222 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01223 entry = &cfp->iseq->catch_table[i];
01224 if (entry->start < epc && entry->end >= epc) {
01225
01226 if (entry->type == CATCH_TYPE_RESCUE ||
01227 entry->type == CATCH_TYPE_ENSURE) {
01228 catch_iseqval = entry->iseq;
01229 cont_pc = entry->cont;
01230 cont_sp = entry->sp;
01231 break;
01232 }
01233 }
01234 }
01235 }
01236 else if (state == TAG_RETRY) {
01237 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01238 entry = &cfp->iseq->catch_table[i];
01239 if (entry->start < epc && entry->end >= epc) {
01240
01241 if (entry->type == CATCH_TYPE_ENSURE) {
01242 catch_iseqval = entry->iseq;
01243 cont_pc = entry->cont;
01244 cont_sp = entry->sp;
01245 break;
01246 }
01247 else if (entry->type == CATCH_TYPE_RETRY) {
01248 VALUE *escape_dfp;
01249 escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
01250 if (cfp->dfp == escape_dfp) {
01251 cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01252 th->errinfo = Qnil;
01253 goto vm_loop_start;
01254 }
01255 }
01256 }
01257 }
01258 }
01259 else if (state == TAG_BREAK && ((VALUE)escape_dfp & ~0x03) == 0) {
01260 type = CATCH_TYPE_BREAK;
01261
01262 search_restart_point:
01263 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01264 entry = &cfp->iseq->catch_table[i];
01265
01266 if (entry->start < epc && entry->end >= epc) {
01267 if (entry->type == CATCH_TYPE_ENSURE) {
01268 catch_iseqval = entry->iseq;
01269 cont_pc = entry->cont;
01270 cont_sp = entry->sp;
01271 break;
01272 }
01273 else if (entry->type == type) {
01274 cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01275 cfp->sp = cfp->bp + entry->sp;
01276
01277 if (state != TAG_REDO) {
01278 #if OPT_STACK_CACHING
01279 initial = (GET_THROWOBJ_VAL(err));
01280 #else
01281 *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01282 #endif
01283 }
01284 th->errinfo = Qnil;
01285 goto vm_loop_start;
01286 }
01287 }
01288 }
01289 }
01290 else if (state == TAG_REDO) {
01291 type = CATCH_TYPE_REDO;
01292 goto search_restart_point;
01293 }
01294 else if (state == TAG_NEXT) {
01295 type = CATCH_TYPE_NEXT;
01296 goto search_restart_point;
01297 }
01298 else {
01299 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01300 entry = &cfp->iseq->catch_table[i];
01301 if (entry->start < epc && entry->end >= epc) {
01302
01303 if (entry->type == CATCH_TYPE_ENSURE) {
01304 catch_iseqval = entry->iseq;
01305 cont_pc = entry->cont;
01306 cont_sp = entry->sp;
01307 break;
01308 }
01309 }
01310 }
01311 }
01312
01313 if (catch_iseqval != 0) {
01314
01315 rb_iseq_t *catch_iseq;
01316
01317
01318 GetISeqPtr(catch_iseqval, catch_iseq);
01319 cfp->sp = cfp->bp + cont_sp;
01320 cfp->pc = cfp->iseq->iseq_encoded + cont_pc;
01321
01322
01323 cfp->sp[0] = err;
01324 vm_push_frame(th, catch_iseq, VM_FRAME_MAGIC_BLOCK,
01325 cfp->self, (VALUE)cfp->dfp, catch_iseq->iseq_encoded,
01326 cfp->sp + 1 , cfp->lfp, catch_iseq->local_size - 1);
01327
01328 state = 0;
01329 th->state = 0;
01330 th->errinfo = Qnil;
01331 goto vm_loop_start;
01332 }
01333 else {
01334
01335
01336 switch (VM_FRAME_TYPE(th->cfp)) {
01337 case VM_FRAME_MAGIC_METHOD:
01338 EXEC_EVENT_HOOK(th, RUBY_EVENT_RETURN, th->cfp->self, 0, 0);
01339 break;
01340 case VM_FRAME_MAGIC_CLASS:
01341 EXEC_EVENT_HOOK(th, RUBY_EVENT_END, th->cfp->self, 0, 0);
01342 break;
01343 }
01344
01345 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01346
01347 if (VM_FRAME_TYPE(th->cfp) != VM_FRAME_MAGIC_FINISH) {
01348 goto exception_handler;
01349 }
01350 else {
01351 vm_pop_frame(th);
01352 th->errinfo = err;
01353 TH_POP_TAG2();
01354 JUMP_TAG(state);
01355 }
01356 }
01357 }
01358 finish_vme:
01359 TH_POP_TAG();
01360 return result;
01361 }
01362
01363
01364
01365 VALUE
01366 rb_iseq_eval(VALUE iseqval)
01367 {
01368 rb_thread_t *th = GET_THREAD();
01369 VALUE val;
01370 volatile VALUE tmp;
01371
01372 vm_set_top_stack(th, iseqval);
01373
01374 val = vm_exec(th);
01375 tmp = iseqval;
01376 return val;
01377 }
01378
01379 VALUE
01380 rb_iseq_eval_main(VALUE iseqval)
01381 {
01382 rb_thread_t *th = GET_THREAD();
01383 VALUE val;
01384 volatile VALUE tmp;
01385
01386 vm_set_main_stack(th, iseqval);
01387
01388 val = vm_exec(th);
01389 tmp = iseqval;
01390 return val;
01391 }
01392
01393 int
01394 rb_thread_method_id_and_class(rb_thread_t *th,
01395 ID *idp, VALUE *klassp)
01396 {
01397 rb_control_frame_t *cfp = th->cfp;
01398 rb_iseq_t *iseq = cfp->iseq;
01399 if (!iseq && cfp->me) {
01400 if (idp) *idp = cfp->me->def->original_id;
01401 if (klassp) *klassp = cfp->me->klass;
01402 return 1;
01403 }
01404 while (iseq) {
01405 if (RUBY_VM_IFUNC_P(iseq)) {
01406 if (idp) CONST_ID(*idp, "<ifunc>");
01407 if (klassp) *klassp = 0;
01408 return 1;
01409 }
01410 if (iseq->defined_method_id) {
01411 if (idp) *idp = iseq->defined_method_id;
01412 if (klassp) *klassp = iseq->klass;
01413 return 1;
01414 }
01415 if (iseq->local_iseq == iseq) {
01416 break;
01417 }
01418 iseq = iseq->parent_iseq;
01419 }
01420 return 0;
01421 }
01422
01423 int
01424 rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
01425 {
01426 return rb_thread_method_id_and_class(GET_THREAD(), idp, klassp);
01427 }
01428
01429 VALUE
01430 rb_thread_current_status(const rb_thread_t *th)
01431 {
01432 const rb_control_frame_t *cfp = th->cfp;
01433 VALUE str = Qnil;
01434
01435 if (cfp->iseq != 0) {
01436 if (cfp->pc != 0) {
01437 rb_iseq_t *iseq = cfp->iseq;
01438 int line_no = rb_vm_get_sourceline(cfp);
01439 char *file = RSTRING_PTR(iseq->filename);
01440 str = rb_sprintf("%s:%d:in `%s'",
01441 file, line_no, RSTRING_PTR(iseq->name));
01442 }
01443 }
01444 else if (cfp->me->def->original_id) {
01445 str = rb_sprintf("`%s#%s' (cfunc)",
01446 RSTRING_PTR(rb_class_name(cfp->me->klass)),
01447 rb_id2name(cfp->me->def->original_id));
01448 }
01449
01450 return str;
01451 }
01452
01453 VALUE
01454 rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
01455 const rb_block_t *blockptr, VALUE filename, VALUE filepath)
01456 {
01457 rb_thread_t *th = GET_THREAD();
01458 const rb_control_frame_t *reg_cfp = th->cfp;
01459 volatile VALUE iseqval = rb_iseq_new(0, filename, filename, filepath, 0, ISEQ_TYPE_TOP);
01460 VALUE val;
01461
01462 vm_push_frame(th, DATA_PTR(iseqval), VM_FRAME_MAGIC_TOP,
01463 recv, (VALUE)blockptr, 0, reg_cfp->sp, 0, 1);
01464
01465 val = (*func)(arg);
01466
01467 vm_pop_frame(th);
01468 return val;
01469 }
01470
01471
01472
01473 static int
01474 vm_mark_each_thread_func(st_data_t key, st_data_t value, st_data_t dummy)
01475 {
01476 VALUE thval = (VALUE)key;
01477 rb_gc_mark(thval);
01478 return ST_CONTINUE;
01479 }
01480
01481 static void
01482 mark_event_hooks(rb_event_hook_t *hook)
01483 {
01484 while (hook) {
01485 rb_gc_mark(hook->data);
01486 hook = hook->next;
01487 }
01488 }
01489
01490 void
01491 rb_vm_mark(void *ptr)
01492 {
01493 int i;
01494
01495 RUBY_MARK_ENTER("vm");
01496 RUBY_GC_INFO("-------------------------------------------------\n");
01497 if (ptr) {
01498 rb_vm_t *vm = ptr;
01499 if (vm->living_threads) {
01500 st_foreach(vm->living_threads, vm_mark_each_thread_func, 0);
01501 }
01502 RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
01503 RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
01504 RUBY_MARK_UNLESS_NULL(vm->load_path);
01505 RUBY_MARK_UNLESS_NULL(vm->loaded_features);
01506 RUBY_MARK_UNLESS_NULL(vm->top_self);
01507 RUBY_MARK_UNLESS_NULL(vm->coverages);
01508 rb_gc_mark_locations(vm->special_exceptions, vm->special_exceptions + ruby_special_error_count);
01509
01510 if (vm->loading_table) {
01511 rb_mark_tbl(vm->loading_table);
01512 }
01513
01514 mark_event_hooks(vm->event_hooks);
01515
01516 for (i = 0; i < RUBY_NSIG; i++) {
01517 if (vm->trap_list[i].cmd)
01518 rb_gc_mark(vm->trap_list[i].cmd);
01519 }
01520 }
01521
01522 RUBY_MARK_LEAVE("vm");
01523 }
01524
01525 #define vm_free 0
01526
01527 int
01528 ruby_vm_destruct(void *ptr)
01529 {
01530 RUBY_FREE_ENTER("vm");
01531 if (ptr) {
01532 rb_vm_t *vm = ptr;
01533 rb_thread_t *th = vm->main_thread;
01534 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01535 struct rb_objspace *objspace = vm->objspace;
01536 #endif
01537 rb_gc_force_recycle(vm->self);
01538 vm->main_thread = 0;
01539 if (th) {
01540 thread_free(th);
01541 }
01542 if (vm->living_threads) {
01543 st_free_table(vm->living_threads);
01544 vm->living_threads = 0;
01545 }
01546 rb_thread_lock_unlock(&vm->global_vm_lock);
01547 rb_thread_lock_destroy(&vm->global_vm_lock);
01548 ruby_xfree(vm);
01549 ruby_current_vm = 0;
01550 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01551 if (objspace) {
01552 rb_objspace_free(objspace);
01553 }
01554 #endif
01555 }
01556 RUBY_FREE_LEAVE("vm");
01557 return 0;
01558 }
01559
01560 static size_t
01561 vm_memsize(const void *ptr)
01562 {
01563 if (ptr) {
01564 const rb_vm_t *vmobj = ptr;
01565 return sizeof(rb_vm_t) + st_memsize(vmobj->living_threads);
01566 }
01567 else {
01568 return 0;
01569 }
01570 }
01571
01572 static const rb_data_type_t vm_data_type = {
01573 "VM",
01574 rb_vm_mark, vm_free, vm_memsize,
01575 };
01576
01577 static void
01578 vm_init2(rb_vm_t *vm)
01579 {
01580 MEMZERO(vm, rb_vm_t, 1);
01581 vm->src_encoding_index = -1;
01582 }
01583
01584
01585
01586 #define USE_THREAD_DATA_RECYCLE 1
01587
01588 #if USE_THREAD_DATA_RECYCLE
01589 #define RECYCLE_MAX 64
01590 static VALUE *thread_recycle_stack_slot[RECYCLE_MAX];
01591 static int thread_recycle_stack_count = 0;
01592
01593 static VALUE *
01594 thread_recycle_stack(size_t size)
01595 {
01596 if (thread_recycle_stack_count) {
01597 return thread_recycle_stack_slot[--thread_recycle_stack_count];
01598 }
01599 else {
01600 return ALLOC_N(VALUE, size);
01601 }
01602 }
01603
01604 #else
01605 #define thread_recycle_stack(size) ALLOC_N(VALUE, (size))
01606 #endif
01607
01608 void
01609 rb_thread_recycle_stack_release(VALUE *stack)
01610 {
01611 #if USE_THREAD_DATA_RECYCLE
01612 if (thread_recycle_stack_count < RECYCLE_MAX) {
01613 thread_recycle_stack_slot[thread_recycle_stack_count++] = stack;
01614 return;
01615 }
01616 #endif
01617 ruby_xfree(stack);
01618 }
01619
01620 #ifdef USE_THREAD_RECYCLE
01621 static rb_thread_t *
01622 thread_recycle_struct(void)
01623 {
01624 void *p = ALLOC_N(rb_thread_t, 1);
01625 memset(p, 0, sizeof(rb_thread_t));
01626 return p;
01627 }
01628 #endif
01629
01630 void rb_gc_mark_machine_stack(rb_thread_t *th);
01631
01632 void
01633 rb_thread_mark(void *ptr)
01634 {
01635 rb_thread_t *th = NULL;
01636 RUBY_MARK_ENTER("thread");
01637 if (ptr) {
01638 th = ptr;
01639 if (th->stack) {
01640 VALUE *p = th->stack;
01641 VALUE *sp = th->cfp->sp;
01642 rb_control_frame_t *cfp = th->cfp;
01643 rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);
01644
01645 while (p < sp) {
01646 rb_gc_mark(*p++);
01647 }
01648 rb_gc_mark_locations(p, p + th->mark_stack_len);
01649
01650 while (cfp != limit_cfp) {
01651 rb_iseq_t *iseq = cfp->iseq;
01652 rb_gc_mark(cfp->proc);
01653 rb_gc_mark(cfp->self);
01654 if (iseq) {
01655 rb_gc_mark(RUBY_VM_NORMAL_ISEQ_P(iseq) ? iseq->self : (VALUE)iseq);
01656 }
01657 if (cfp->me) ((rb_method_entry_t *)cfp->me)->mark = 1;
01658 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01659 }
01660 }
01661
01662
01663 RUBY_MARK_UNLESS_NULL(th->first_proc);
01664 if (th->first_proc) RUBY_MARK_UNLESS_NULL(th->first_args);
01665
01666 RUBY_MARK_UNLESS_NULL(th->thgroup);
01667 RUBY_MARK_UNLESS_NULL(th->value);
01668 RUBY_MARK_UNLESS_NULL(th->errinfo);
01669 RUBY_MARK_UNLESS_NULL(th->thrown_errinfo);
01670 RUBY_MARK_UNLESS_NULL(th->local_svar);
01671 RUBY_MARK_UNLESS_NULL(th->top_self);
01672 RUBY_MARK_UNLESS_NULL(th->top_wrapper);
01673 RUBY_MARK_UNLESS_NULL(th->fiber);
01674 RUBY_MARK_UNLESS_NULL(th->root_fiber);
01675 RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
01676 RUBY_MARK_UNLESS_NULL(th->last_status);
01677
01678 RUBY_MARK_UNLESS_NULL(th->locking_mutex);
01679
01680 rb_mark_tbl(th->local_storage);
01681
01682 if (GET_THREAD() != th && th->machine_stack_start && th->machine_stack_end) {
01683 rb_gc_mark_machine_stack(th);
01684 rb_gc_mark_locations((VALUE *)&th->machine_regs,
01685 (VALUE *)(&th->machine_regs) +
01686 sizeof(th->machine_regs) / sizeof(VALUE));
01687 }
01688
01689 mark_event_hooks(th->event_hooks);
01690 }
01691
01692 RUBY_MARK_LEAVE("thread");
01693 }
01694
01695 static void
01696 thread_free(void *ptr)
01697 {
01698 rb_thread_t *th;
01699 RUBY_FREE_ENTER("thread");
01700
01701 if (ptr) {
01702 th = ptr;
01703
01704 if (!th->root_fiber) {
01705 RUBY_FREE_UNLESS_NULL(th->stack);
01706 }
01707
01708 if (th->locking_mutex != Qfalse) {
01709 rb_bug("thread_free: locking_mutex must be NULL (%p:%ld)", (void *)th, th->locking_mutex);
01710 }
01711 if (th->keeping_mutexes != NULL) {
01712 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, th->keeping_mutexes);
01713 }
01714
01715 if (th->local_storage) {
01716 st_free_table(th->local_storage);
01717 }
01718
01719 #if USE_VALUE_CACHE
01720 {
01721 VALUE *ptr = th->value_cache_ptr;
01722 while (*ptr) {
01723 VALUE v = *ptr;
01724 RBASIC(v)->flags = 0;
01725 RBASIC(v)->klass = 0;
01726 ptr++;
01727 }
01728 }
01729 #endif
01730
01731 if (th->vm && th->vm->main_thread == th) {
01732 RUBY_GC_INFO("main thread\n");
01733 }
01734 else {
01735 #ifdef USE_SIGALTSTACK
01736 if (th->altstack) {
01737 free(th->altstack);
01738 }
01739 #endif
01740 ruby_xfree(ptr);
01741 }
01742 }
01743 RUBY_FREE_LEAVE("thread");
01744 }
01745
01746 static size_t
01747 thread_memsize(const void *ptr)
01748 {
01749 if (ptr) {
01750 const rb_thread_t *th = ptr;
01751 size_t size = sizeof(rb_thread_t);
01752
01753 if (!th->root_fiber) {
01754 size += th->stack_size * sizeof(VALUE);
01755 }
01756 if (th->local_storage) {
01757 st_memsize(th->local_storage);
01758 }
01759 return size;
01760 }
01761 else {
01762 return 0;
01763 }
01764 }
01765
01766 static const rb_data_type_t thread_data_type = {
01767 "VM/thread",
01768 rb_thread_mark,
01769 thread_free,
01770 thread_memsize,
01771 };
01772
01773 static VALUE
01774 thread_alloc(VALUE klass)
01775 {
01776 VALUE volatile obj;
01777 #ifdef USE_THREAD_RECYCLE
01778 rb_thread_t *th = thread_recycle_struct();
01779 obj = TypedData_Wrap_Struct(klass, &thread_data_type, th);
01780 #else
01781 rb_thread_t *th;
01782 obj = TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
01783 #endif
01784 return obj;
01785 }
01786
01787 static void
01788 th_init2(rb_thread_t *th, VALUE self)
01789 {
01790 th->self = self;
01791
01792
01793 th->stack_size = RUBY_VM_THREAD_STACK_SIZE;
01794 th->stack = thread_recycle_stack(th->stack_size);
01795
01796 th->cfp = (void *)(th->stack + th->stack_size);
01797
01798 vm_push_frame(th, 0, VM_FRAME_MAGIC_TOP, Qnil, 0, 0,
01799 th->stack, 0, 1);
01800
01801 th->status = THREAD_RUNNABLE;
01802 th->errinfo = Qnil;
01803 th->last_status = Qnil;
01804
01805 #if USE_VALUE_CACHE
01806 th->value_cache_ptr = &th->value_cache[0];
01807 #endif
01808 }
01809
01810 static void
01811 th_init(rb_thread_t *th, VALUE self)
01812 {
01813 th_init2(th, self);
01814 }
01815
01816 static VALUE
01817 ruby_thread_init(VALUE self)
01818 {
01819 rb_thread_t *th;
01820 rb_vm_t *vm = GET_THREAD()->vm;
01821 GetThreadPtr(self, th);
01822
01823 th_init(th, self);
01824 th->vm = vm;
01825
01826 th->top_wrapper = 0;
01827 th->top_self = rb_vm_top_self();
01828 return self;
01829 }
01830
01831 VALUE
01832 rb_thread_alloc(VALUE klass)
01833 {
01834 VALUE self = thread_alloc(klass);
01835 ruby_thread_init(self);
01836 return self;
01837 }
01838
01839 static void
01840 vm_define_method(rb_thread_t *th, VALUE obj, ID id, VALUE iseqval,
01841 rb_num_t is_singleton, NODE *cref)
01842 {
01843 VALUE klass = cref->nd_clss;
01844 int noex = (int)cref->nd_visi;
01845 rb_iseq_t *miseq;
01846 GetISeqPtr(iseqval, miseq);
01847
01848 if (NIL_P(klass)) {
01849 rb_raise(rb_eTypeError, "no class/module to add method");
01850 }
01851
01852 if (is_singleton) {
01853 if (FIXNUM_P(obj) || SYMBOL_P(obj)) {
01854 rb_raise(rb_eTypeError,
01855 "can't define singleton method \"%s\" for %s",
01856 rb_id2name(id), rb_obj_classname(obj));
01857 }
01858
01859 if (OBJ_FROZEN(obj)) {
01860 rb_error_frozen("object");
01861 }
01862
01863 klass = rb_singleton_class(obj);
01864 noex = NOEX_PUBLIC;
01865 }
01866
01867
01868 COPY_CREF(miseq->cref_stack, cref);
01869 miseq->klass = klass;
01870 miseq->defined_method_id = id;
01871 rb_add_method(klass, id, VM_METHOD_TYPE_ISEQ, miseq, noex);
01872
01873 if (!is_singleton && noex == NOEX_MODFUNC) {
01874 rb_add_method(rb_singleton_class(klass), id, VM_METHOD_TYPE_ISEQ, miseq, NOEX_PUBLIC);
01875 }
01876 INC_VM_STATE_VERSION();
01877 }
01878
01879 #define REWIND_CFP(expr) do { \
01880 rb_thread_t *th__ = GET_THREAD(); \
01881 th__->cfp++; expr; th__->cfp--; \
01882 } while (0)
01883
01884 static VALUE
01885 m_core_define_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
01886 {
01887 REWIND_CFP({
01888 vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 0, rb_vm_cref());
01889 });
01890 return Qnil;
01891 }
01892
01893 static VALUE
01894 m_core_define_singleton_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
01895 {
01896 REWIND_CFP({
01897 vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 1, rb_vm_cref());
01898 });
01899 return Qnil;
01900 }
01901
01902 static VALUE
01903 m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
01904 {
01905 REWIND_CFP({
01906 rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
01907 });
01908 return Qnil;
01909 }
01910
01911 static VALUE
01912 m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
01913 {
01914 REWIND_CFP({
01915 rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
01916 });
01917 return Qnil;
01918 }
01919
01920 static VALUE
01921 m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
01922 {
01923 REWIND_CFP({
01924 rb_undef(cbase, SYM2ID(sym));
01925 INC_VM_STATE_VERSION();
01926 });
01927 return Qnil;
01928 }
01929
01930 static VALUE
01931 m_core_set_postexe(VALUE self, VALUE iseqval)
01932 {
01933 REWIND_CFP({
01934 rb_iseq_t *blockiseq;
01935 rb_block_t *blockptr;
01936 rb_thread_t *th = GET_THREAD();
01937 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
01938 VALUE proc;
01939 extern void rb_call_end_proc(VALUE data);
01940
01941 GetISeqPtr(iseqval, blockiseq);
01942
01943 blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
01944 blockptr->iseq = blockiseq;
01945 blockptr->proc = 0;
01946
01947 proc = rb_vm_make_proc(th, blockptr, rb_cProc);
01948 rb_set_end_proc(rb_call_end_proc, proc);
01949 });
01950 return Qnil;
01951 }
01952
01953 extern VALUE *rb_gc_stack_start;
01954 extern size_t rb_gc_stack_maxsize;
01955 #ifdef __ia64
01956 extern VALUE *rb_gc_register_stack_start;
01957 #endif
01958
01959
01960
01961
01962 static VALUE
01963 sdr(void)
01964 {
01965 rb_vm_bugreport();
01966 return Qnil;
01967 }
01968
01969
01970 static VALUE
01971 nsdr(void)
01972 {
01973 VALUE ary = rb_ary_new();
01974 #if HAVE_BACKTRACE
01975 #include <execinfo.h>
01976 #define MAX_NATIVE_TRACE 1024
01977 static void *trace[MAX_NATIVE_TRACE];
01978 int n = backtrace(trace, MAX_NATIVE_TRACE);
01979 char **syms = backtrace_symbols(trace, n);
01980 int i;
01981
01982 if (syms == 0) {
01983 rb_memerror();
01984 }
01985
01986 for (i=0; i<n; i++) {
01987 rb_ary_push(ary, rb_str_new2(syms[i]));
01988 }
01989 free(syms);
01990 #endif
01991 return ary;
01992 }
01993
01994 void
01995 Init_VM(void)
01996 {
01997 VALUE opts;
01998 VALUE klass;
01999 VALUE fcore;
02000
02001
02002 rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
02003 rb_undef_alloc_func(rb_cRubyVM);
02004 rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
02005
02006
02007 fcore = rb_class_new(rb_cBasicObject);
02008 RBASIC(fcore)->flags = T_ICLASS;
02009 klass = rb_singleton_class(fcore);
02010 rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
02011 rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
02012 rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
02013 rb_define_method_id(klass, id_core_define_method, m_core_define_method, 3);
02014 rb_define_method_id(klass, id_core_define_singleton_method, m_core_define_singleton_method, 3);
02015 rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 1);
02016 rb_obj_freeze(fcore);
02017 rb_gc_register_mark_object(fcore);
02018 rb_mRubyVMFrozenCore = fcore;
02019
02020
02021 rb_cEnv = rb_define_class_under(rb_cRubyVM, "Env", rb_cObject);
02022 rb_undef_alloc_func(rb_cEnv);
02023 rb_undef_method(CLASS_OF(rb_cEnv), "new");
02024
02025
02026 rb_cThread = rb_define_class("Thread", rb_cObject);
02027 rb_undef_alloc_func(rb_cThread);
02028
02029
02030 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN", rb_hash_new());
02031 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_REGS", rb_hash_new());
02032 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN_BIGRAM", rb_hash_new());
02033 rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
02034
02035 #if OPT_DIRECT_THREADED_CODE
02036 rb_ary_push(opts, rb_str_new2("direct threaded code"));
02037 #elif OPT_TOKEN_THREADED_CODE
02038 rb_ary_push(opts, rb_str_new2("token threaded code"));
02039 #elif OPT_CALL_THREADED_CODE
02040 rb_ary_push(opts, rb_str_new2("call threaded code"));
02041 #endif
02042
02043 #if OPT_BASIC_OPERATIONS
02044 rb_ary_push(opts, rb_str_new2("optimize basic operation"));
02045 #endif
02046
02047 #if OPT_STACK_CACHING
02048 rb_ary_push(opts, rb_str_new2("stack caching"));
02049 #endif
02050 #if OPT_OPERANDS_UNIFICATION
02051 rb_ary_push(opts, rb_str_new2("operands unification]"));
02052 #endif
02053 #if OPT_INSTRUCTIONS_UNIFICATION
02054 rb_ary_push(opts, rb_str_new2("instructions unification"));
02055 #endif
02056 #if OPT_INLINE_METHOD_CACHE
02057 rb_ary_push(opts, rb_str_new2("inline method cache"));
02058 #endif
02059 #if OPT_BLOCKINLINING
02060 rb_ary_push(opts, rb_str_new2("block inlining"));
02061 #endif
02062
02063
02064 rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
02065
02066
02067 #if VMDEBUG
02068 rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
02069 rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
02070 #else
02071 (void)sdr;
02072 (void)nsdr;
02073 #endif
02074
02075
02076 {
02077 rb_vm_t *vm = ruby_current_vm;
02078 rb_thread_t *th = GET_THREAD();
02079 VALUE filename = rb_str_new2("<main>");
02080 volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
02081 volatile VALUE th_self;
02082 rb_iseq_t *iseq;
02083
02084
02085 vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
02086
02087
02088 th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
02089 vm->main_thread = th;
02090 vm->running_thread = th;
02091 th->vm = vm;
02092 th->top_wrapper = 0;
02093 th->top_self = rb_vm_top_self();
02094 rb_thread_set_current(th);
02095
02096 vm->living_threads = st_init_numtable();
02097 st_insert(vm->living_threads, th_self, (st_data_t) th->thread_id);
02098
02099 rb_gc_register_mark_object(iseqval);
02100 GetISeqPtr(iseqval, iseq);
02101 th->cfp->iseq = iseq;
02102 th->cfp->pc = iseq->iseq_encoded;
02103 th->cfp->self = th->top_self;
02104
02105 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
02106 }
02107 vm_init_redefined_flag();
02108 }
02109
02110 void
02111 rb_vm_set_progname(VALUE filename)
02112 {
02113 rb_thread_t *th = GET_VM()->main_thread;
02114 rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
02115 --cfp;
02116 cfp->iseq->filename = filename;
02117 }
02118
02119 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02120 struct rb_objspace *rb_objspace_alloc(void);
02121 #endif
02122 void ruby_thread_init_stack(rb_thread_t *th);
02123
02124 extern void Init_native_thread(void);
02125
02126 void
02127 Init_BareVM(void)
02128 {
02129
02130 rb_vm_t * vm = malloc(sizeof(*vm));
02131 rb_thread_t * th = malloc(sizeof(*th));
02132 if (!vm || !th) {
02133 fprintf(stderr, "[FATAL] failed to allocate memory\n");
02134 exit(EXIT_FAILURE);
02135 }
02136 MEMZERO(th, rb_thread_t, 1);
02137
02138 rb_thread_set_current_raw(th);
02139
02140 vm_init2(vm);
02141 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02142 vm->objspace = rb_objspace_alloc();
02143 #endif
02144 ruby_current_vm = vm;
02145
02146 Init_native_thread();
02147 th_init2(th, 0);
02148 th->vm = vm;
02149 ruby_thread_init_stack(th);
02150 }
02151
02152
02153
02154 static VALUE
02155 main_to_s(VALUE obj)
02156 {
02157 return rb_str_new2("main");
02158 }
02159
02160 VALUE
02161 rb_vm_top_self(void)
02162 {
02163 return GET_VM()->top_self;
02164 }
02165
02166 void
02167 Init_top_self(void)
02168 {
02169 rb_vm_t *vm = GET_VM();
02170
02171 vm->top_self = rb_obj_alloc(rb_cObject);
02172 rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
02173
02174
02175 vm->mark_object_ary = rb_ary_tmp_new(1);
02176 }
02177
02178 VALUE *
02179 ruby_vm_verbose_ptr(rb_vm_t *vm)
02180 {
02181 return &vm->verbose;
02182 }
02183
02184 VALUE *
02185 ruby_vm_debug_ptr(rb_vm_t *vm)
02186 {
02187 return &vm->debug;
02188 }
02189
02190 VALUE *
02191 rb_ruby_verbose_ptr(void)
02192 {
02193 return ruby_vm_verbose_ptr(GET_VM());
02194 }
02195
02196 VALUE *
02197 rb_ruby_debug_ptr(void)
02198 {
02199 return ruby_vm_debug_ptr(GET_VM());
02200 }
02201