00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011 #include "ruby/ruby.h"
00012 #include "ruby/vm.h"
00013 #include "ruby/st.h"
00014 #include "ruby/encoding.h"
00015 #include "internal.h"
00016
00017 #include "gc.h"
00018 #include "vm_core.h"
00019 #include "iseq.h"
00020 #include "eval_intern.h"
00021
00022 #include "vm_insnhelper.h"
00023 #include "vm_insnhelper.c"
00024 #include "vm_exec.h"
00025 #include "vm_exec.c"
00026
00027 #include "vm_method.c"
00028 #include "vm_eval.c"
00029
00030 #include <assert.h>
00031
00032 #define BUFSIZE 0x100
00033 #define PROCDEBUG 0
00034
00035 VALUE rb_cRubyVM;
00036 VALUE rb_cThread;
00037 VALUE rb_cEnv;
00038 VALUE rb_mRubyVMFrozenCore;
00039
00040 VALUE ruby_vm_const_missing_count = 0;
00041
00042 char ruby_vm_redefined_flag[BOP_LAST_];
00043
00044 rb_thread_t *ruby_current_thread = 0;
00045 rb_vm_t *ruby_current_vm = 0;
00046
00047 static void thread_free(void *ptr);
00048
00049 void vm_analysis_operand(int insn, int n, VALUE op);
00050 void vm_analysis_register(int reg, int isset);
00051 void vm_analysis_insn(int insn);
00052
00053
00054
00055
00056
00057
00058
00059 RUBY_FUNC_EXPORTED VALUE rb_vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp);
00060 RUBY_FUNC_EXPORTED int rb_vm_get_sourceline(const rb_control_frame_t *cfp);
00061
00062 void
00063 rb_vm_change_state(void)
00064 {
00065 INC_VM_STATE_VERSION();
00066 }
00067
00068 static void vm_clear_global_method_cache(void);
00069
00070 static void
00071 vm_clear_all_inline_method_cache(void)
00072 {
00073
00074
00075
00076
00077 }
00078
00079 static void
00080 vm_clear_all_cache()
00081 {
00082 vm_clear_global_method_cache();
00083 vm_clear_all_inline_method_cache();
00084 ruby_vm_global_state_version = 1;
00085 }
00086
00087 void
00088 rb_vm_inc_const_missing_count(void)
00089 {
00090 ruby_vm_const_missing_count +=1;
00091 }
00092
00093
00094
00095 static inline VALUE
00096 rb_vm_set_finish_env(rb_thread_t * th)
00097 {
00098 vm_push_frame(th, 0, VM_FRAME_MAGIC_FINISH,
00099 Qnil, th->cfp->lfp[0], 0,
00100 th->cfp->sp, 0, 1);
00101 th->cfp->pc = (VALUE *)&finish_insn_seq[0];
00102 return Qtrue;
00103 }
00104
00105 static void
00106 vm_set_top_stack(rb_thread_t * th, VALUE iseqval)
00107 {
00108 rb_iseq_t *iseq;
00109 GetISeqPtr(iseqval, iseq);
00110
00111 if (iseq->type != ISEQ_TYPE_TOP) {
00112 rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
00113 }
00114
00115
00116 rb_vm_set_finish_env(th);
00117
00118 vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP,
00119 th->top_self, 0, iseq->iseq_encoded,
00120 th->cfp->sp, 0, iseq->local_size);
00121
00122 CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00123 }
00124
00125 static void
00126 vm_set_eval_stack(rb_thread_t * th, VALUE iseqval, const NODE *cref)
00127 {
00128 rb_iseq_t *iseq;
00129 rb_block_t * const block = th->base_block;
00130 GetISeqPtr(iseqval, iseq);
00131
00132
00133 rb_vm_set_finish_env(th);
00134 vm_push_frame(th, iseq, VM_FRAME_MAGIC_EVAL, block->self,
00135 GC_GUARDED_PTR(block->dfp), iseq->iseq_encoded,
00136 th->cfp->sp, block->lfp, iseq->local_size);
00137
00138 if (cref) {
00139 th->cfp->dfp[-1] = (VALUE)cref;
00140 }
00141
00142 CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00143 }
00144
00145 static void
00146 vm_set_main_stack(rb_thread_t *th, VALUE iseqval)
00147 {
00148 VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
00149 rb_binding_t *bind;
00150 rb_iseq_t *iseq;
00151 rb_env_t *env;
00152
00153 GetBindingPtr(toplevel_binding, bind);
00154 GetEnvPtr(bind->env, env);
00155 th->base_block = &env->block;
00156 vm_set_eval_stack(th, iseqval, 0);
00157 th->base_block = 0;
00158
00159
00160 GetISeqPtr(iseqval, iseq);
00161 if (bind && iseq->local_size > 0) {
00162 bind->env = rb_vm_make_env_object(th, th->cfp);
00163 }
00164
00165 CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00166 }
00167
00168 rb_control_frame_t *
00169 rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00170 {
00171 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00172 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00173 return cfp;
00174 }
00175 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00176 }
00177 return 0;
00178 }
00179
00180 static rb_control_frame_t *
00181 vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00182 {
00183 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00184 return cfp;
00185 }
00186
00187 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00188
00189 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00190 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00191 return cfp;
00192 }
00193
00194 if ((cfp->flag & VM_FRAME_FLAG_PASSED) == 0) {
00195 break;
00196 }
00197 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00198 }
00199 return 0;
00200 }
00201
00202
00203
00204 void
00205 ruby_vm_at_exit(void (*func)(rb_vm_t *))
00206 {
00207 rb_ary_push((VALUE)&GET_VM()->at_exit, (VALUE)func);
00208 }
00209
00210 static void
00211 ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
00212 {
00213 VALUE hook = (VALUE)&vm->at_exit;
00214
00215 while (RARRAY_LEN(hook) > 0) {
00216 typedef void rb_vm_at_exit_func(rb_vm_t*);
00217 rb_vm_at_exit_func *func = (rb_vm_at_exit_func*)rb_ary_pop(hook);
00218 (*func)(vm);
00219 }
00220 rb_ary_free(hook);
00221 }
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233 #define ENV_IN_HEAP_P(th, env) \
00234 (!((th)->stack < (env) && (env) < ((th)->stack + (th)->stack_size)))
00235 #define ENV_VAL(env) ((env)[1])
00236
00237 static void
00238 env_mark(void * const ptr)
00239 {
00240 RUBY_MARK_ENTER("env");
00241 if (ptr) {
00242 const rb_env_t * const env = ptr;
00243
00244 if (env->env) {
00245
00246 RUBY_GC_INFO("env->env\n");
00247 rb_gc_mark_locations(env->env, env->env + env->env_size);
00248 }
00249
00250 RUBY_GC_INFO("env->prev_envval\n");
00251 RUBY_MARK_UNLESS_NULL(env->prev_envval);
00252 RUBY_MARK_UNLESS_NULL(env->block.self);
00253 RUBY_MARK_UNLESS_NULL(env->block.proc);
00254
00255 if (env->block.iseq) {
00256 if (BUILTIN_TYPE(env->block.iseq) == T_NODE) {
00257 RUBY_MARK_UNLESS_NULL((VALUE)env->block.iseq);
00258 }
00259 else {
00260 RUBY_MARK_UNLESS_NULL(env->block.iseq->self);
00261 }
00262 }
00263 }
00264 RUBY_MARK_LEAVE("env");
00265 }
00266
00267 static void
00268 env_free(void * const ptr)
00269 {
00270 RUBY_FREE_ENTER("env");
00271 if (ptr) {
00272 rb_env_t *const env = ptr;
00273 RUBY_FREE_UNLESS_NULL(env->env);
00274 ruby_xfree(ptr);
00275 }
00276 RUBY_FREE_LEAVE("env");
00277 }
00278
00279 static size_t
00280 env_memsize(const void *ptr)
00281 {
00282 if (ptr) {
00283 const rb_env_t * const env = ptr;
00284 size_t size = sizeof(rb_env_t);
00285 if (env->env) {
00286 size += env->env_size * sizeof(VALUE);
00287 }
00288 return size;
00289 }
00290 return 0;
00291 }
00292
00293 static const rb_data_type_t env_data_type = {
00294 "VM/env",
00295 {env_mark, env_free, env_memsize,},
00296 };
00297
00298 static VALUE
00299 env_alloc(void)
00300 {
00301 VALUE obj;
00302 rb_env_t *env;
00303 obj = TypedData_Make_Struct(rb_cEnv, rb_env_t, &env_data_type, env);
00304 env->env = 0;
00305 env->prev_envval = 0;
00306 env->block.iseq = 0;
00307 return obj;
00308 }
00309
00310 static VALUE check_env_value(VALUE envval);
00311
00312 static int
00313 check_env(rb_env_t * const env)
00314 {
00315 printf("---\n");
00316 printf("envptr: %p\n", (void *)&env->block.dfp[0]);
00317 printf("orphan: %p\n", (void *)env->block.dfp[1]);
00318 printf("inheap: %p\n", (void *)env->block.dfp[2]);
00319 printf("envval: %10p ", (void *)env->block.dfp[3]);
00320 dp(env->block.dfp[3]);
00321 printf("penvv : %10p ", (void *)env->block.dfp[4]);
00322 dp(env->block.dfp[4]);
00323 printf("lfp: %10p\n", (void *)env->block.lfp);
00324 printf("dfp: %10p\n", (void *)env->block.dfp);
00325 if (env->block.dfp[4]) {
00326 printf(">>\n");
00327 check_env_value(env->block.dfp[4]);
00328 printf("<<\n");
00329 }
00330 return 1;
00331 }
00332
00333 static VALUE
00334 check_env_value(VALUE envval)
00335 {
00336 rb_env_t *env;
00337 GetEnvPtr(envval, env);
00338
00339 if (check_env(env)) {
00340 return envval;
00341 }
00342 rb_bug("invalid env");
00343 return Qnil;
00344 }
00345
00346 static VALUE
00347 vm_make_env_each(rb_thread_t * const th, rb_control_frame_t * const cfp,
00348 VALUE *envptr, VALUE * const endptr)
00349 {
00350 VALUE envval, penvval = 0;
00351 rb_env_t *env;
00352 VALUE *nenvptr;
00353 int i, local_size;
00354
00355 if (ENV_IN_HEAP_P(th, envptr)) {
00356 return ENV_VAL(envptr);
00357 }
00358
00359 if (envptr != endptr) {
00360 VALUE *penvptr = GC_GUARDED_PTR_REF(*envptr);
00361 rb_control_frame_t *pcfp = cfp;
00362
00363 if (ENV_IN_HEAP_P(th, penvptr)) {
00364 penvval = ENV_VAL(penvptr);
00365 }
00366 else {
00367 while (pcfp->dfp != penvptr) {
00368 pcfp++;
00369 if (pcfp->dfp == 0) {
00370 SDR();
00371 rb_bug("invalid dfp");
00372 }
00373 }
00374 penvval = vm_make_env_each(th, pcfp, penvptr, endptr);
00375 cfp->lfp = pcfp->lfp;
00376 *envptr = GC_GUARDED_PTR(pcfp->dfp);
00377 }
00378 }
00379
00380
00381 envval = env_alloc();
00382 GetEnvPtr(envval, env);
00383
00384 if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00385 local_size = 2;
00386 }
00387 else {
00388 local_size = cfp->iseq->local_size;
00389 }
00390
00391 env->env_size = local_size + 1 + 2;
00392 env->local_size = local_size;
00393 env->env = ALLOC_N(VALUE, env->env_size);
00394 env->prev_envval = penvval;
00395
00396 for (i = 0; i <= local_size; i++) {
00397 env->env[i] = envptr[-local_size + i];
00398 #if 0
00399 fprintf(stderr, "%2d ", &envptr[-local_size + i] - th->stack); dp(env->env[i]);
00400 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00401
00402 envptr[-local_size + i] = 0;
00403 }
00404 #endif
00405 }
00406
00407 *envptr = envval;
00408 nenvptr = &env->env[i - 1];
00409 nenvptr[1] = envval;
00410 nenvptr[2] = penvval;
00411
00412
00413 cfp->dfp = nenvptr;
00414 if (envptr == endptr) {
00415 cfp->lfp = nenvptr;
00416 }
00417
00418
00419 env->block.self = cfp->self;
00420 env->block.lfp = cfp->lfp;
00421 env->block.dfp = cfp->dfp;
00422 env->block.iseq = cfp->iseq;
00423
00424 if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00425
00426 env->block.iseq = 0;
00427 }
00428 return envval;
00429 }
00430
00431 static int
00432 collect_local_variables_in_iseq(rb_iseq_t *iseq, const VALUE ary)
00433 {
00434 int i;
00435 if (!iseq) return 0;
00436 for (i = 0; i < iseq->local_table_size; i++) {
00437 ID lid = iseq->local_table[i];
00438 if (rb_is_local_id(lid)) {
00439 rb_ary_push(ary, ID2SYM(lid));
00440 }
00441 }
00442 return 1;
00443 }
00444
00445 static int
00446 collect_local_variables_in_env(rb_env_t * env, const VALUE ary)
00447 {
00448
00449 while (collect_local_variables_in_iseq(env->block.iseq, ary),
00450 env->prev_envval) {
00451 GetEnvPtr(env->prev_envval, env);
00452 }
00453 return 0;
00454 }
00455
00456 static int
00457 vm_collect_local_variables_in_heap(rb_thread_t *th, VALUE *dfp, VALUE ary)
00458 {
00459 if (ENV_IN_HEAP_P(th, dfp)) {
00460 rb_env_t *env;
00461 GetEnvPtr(ENV_VAL(dfp), env);
00462 collect_local_variables_in_env(env, ary);
00463 return 1;
00464 }
00465 else {
00466 return 0;
00467 }
00468 }
00469
00470 static VALUE vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block);
00471 static VALUE vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp, VALUE *blockprocptr);
00472
00473 VALUE
00474 rb_vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp)
00475 {
00476 VALUE blockprocval;
00477 return vm_make_env_object(th, cfp, &blockprocval);
00478 }
00479
00480 static VALUE
00481 vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp, VALUE *blockprocptr)
00482 {
00483 VALUE envval;
00484 VALUE *lfp;
00485 rb_block_t *blockptr;
00486
00487 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_FINISH) {
00488
00489 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00490 }
00491
00492 lfp = cfp->lfp;
00493 blockptr = GC_GUARDED_PTR_REF(lfp[0]);
00494
00495 if (blockptr && !(lfp[0] & 0x02)) {
00496 VALUE blockprocval = vm_make_proc_from_block(th, blockptr);
00497 rb_proc_t *p;
00498 GetProcPtr(blockprocval, p);
00499 lfp[0] = GC_GUARDED_PTR(&p->block);
00500 *blockprocptr = blockprocval;
00501 }
00502
00503 envval = vm_make_env_each(th, cfp, cfp->dfp, cfp->lfp);
00504 rb_vm_rewrite_dfp_in_errinfo(th);
00505
00506 if (PROCDEBUG) {
00507 check_env_value(envval);
00508 }
00509
00510 return envval;
00511 }
00512
00513 void
00514 rb_vm_rewrite_dfp_in_errinfo(rb_thread_t *th)
00515 {
00516 rb_control_frame_t *cfp = th->cfp;
00517 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00518
00519 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) &&
00520 (cfp->iseq->type == ISEQ_TYPE_RESCUE ||
00521 cfp->iseq->type == ISEQ_TYPE_ENSURE)) {
00522 VALUE errinfo = cfp->dfp[-2];
00523 if (RB_TYPE_P(errinfo, T_NODE)) {
00524 VALUE *escape_dfp = GET_THROWOBJ_CATCH_POINT(errinfo);
00525 if (! ENV_IN_HEAP_P(th, escape_dfp)) {
00526 VALUE dfpval = *escape_dfp;
00527 if (CLASS_OF(dfpval) == rb_cEnv) {
00528 rb_env_t *dfpenv;
00529 GetEnvPtr(dfpval, dfpenv);
00530 SET_THROWOBJ_CATCH_POINT(errinfo, (VALUE)(dfpenv->env + dfpenv->local_size));
00531 }
00532 }
00533 }
00534 }
00535 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00536 }
00537 }
00538
00539 void
00540 rb_vm_stack_to_heap(rb_thread_t *th)
00541 {
00542 rb_control_frame_t *cfp = th->cfp;
00543 while ((cfp = rb_vm_get_ruby_level_next_cfp(th, cfp)) != 0) {
00544 rb_vm_make_env_object(th, cfp);
00545 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00546 }
00547 }
00548
00549
00550
00551 static VALUE
00552 vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block)
00553 {
00554 if (!block->proc) {
00555 block->proc = rb_vm_make_proc(th, block, rb_cProc);
00556 }
00557 return block->proc;
00558 }
00559
00560 VALUE
00561 rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass)
00562 {
00563 VALUE procval, envval, blockprocval = 0;
00564 rb_proc_t *proc;
00565 rb_control_frame_t *cfp = RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block);
00566
00567 if (block->proc) {
00568 rb_bug("rb_vm_make_proc: Proc value is already created.");
00569 }
00570
00571 envval = vm_make_env_object(th, cfp, &blockprocval);
00572
00573 if (PROCDEBUG) {
00574 check_env_value(envval);
00575 }
00576 procval = rb_proc_alloc(klass);
00577 GetProcPtr(procval, proc);
00578 proc->blockprocval = blockprocval;
00579 proc->block.self = block->self;
00580 proc->block.lfp = block->lfp;
00581 proc->block.dfp = block->dfp;
00582 proc->block.iseq = block->iseq;
00583 proc->block.proc = procval;
00584 proc->envval = envval;
00585 proc->safe_level = th->safe_level;
00586
00587 if (VMDEBUG) {
00588 if (th->stack < block->dfp && block->dfp < th->stack + th->stack_size) {
00589 rb_bug("invalid ptr: block->dfp");
00590 }
00591 if (th->stack < block->lfp && block->lfp < th->stack + th->stack_size) {
00592 rb_bug("invalid ptr: block->lfp");
00593 }
00594 }
00595
00596 return procval;
00597 }
00598
00599
00600
00601 static inline VALUE
00602 invoke_block_from_c(rb_thread_t *th, const rb_block_t *block,
00603 VALUE self, int argc, const VALUE *argv,
00604 const rb_block_t *blockptr, const NODE *cref)
00605 {
00606 if (SPECIAL_CONST_P(block->iseq))
00607 return Qnil;
00608 else if (BUILTIN_TYPE(block->iseq) != T_NODE) {
00609 const rb_iseq_t *iseq = block->iseq;
00610 const rb_control_frame_t *cfp;
00611 rb_control_frame_t *ncfp;
00612 int i, opt_pc, arg_size = iseq->arg_size;
00613 int type = block_proc_is_lambda(block->proc) ?
00614 VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
00615
00616 rb_vm_set_finish_env(th);
00617
00618 cfp = th->cfp;
00619 CHECK_STACK_OVERFLOW(cfp, argc + iseq->stack_max);
00620
00621 for (i=0; i<argc; i++) {
00622 cfp->sp[i] = argv[i];
00623 }
00624
00625 opt_pc = vm_yield_setup_args(th, iseq, argc, cfp->sp, blockptr,
00626 type == VM_FRAME_MAGIC_LAMBDA);
00627
00628 ncfp = vm_push_frame(th, iseq, type,
00629 self, GC_GUARDED_PTR(block->dfp),
00630 iseq->iseq_encoded + opt_pc, cfp->sp + arg_size, block->lfp,
00631 iseq->local_size - arg_size);
00632 ncfp->me = th->passed_me;
00633 th->passed_me = 0;
00634 th->passed_block = blockptr;
00635
00636 if (cref) {
00637 th->cfp->dfp[-1] = (VALUE)cref;
00638 }
00639
00640 return vm_exec(th);
00641 }
00642 else {
00643 return vm_yield_with_cfunc(th, block, self, argc, argv, blockptr);
00644 }
00645 }
00646
00647 static inline const rb_block_t *
00648 check_block(rb_thread_t *th)
00649 {
00650 const rb_block_t *blockptr = GC_GUARDED_PTR_REF(th->cfp->lfp[0]);
00651
00652 if (blockptr == 0) {
00653 rb_vm_localjump_error("no block given", Qnil, 0);
00654 }
00655
00656 return blockptr;
00657 }
00658
00659 static inline VALUE
00660 vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const NODE *cref)
00661 {
00662 const rb_block_t *blockptr = check_block(th);
00663 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, cref);
00664 }
00665
00666 static inline VALUE
00667 vm_yield(rb_thread_t *th, int argc, const VALUE *argv)
00668 {
00669 const rb_block_t *blockptr = check_block(th);
00670 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, 0);
00671 }
00672
00673 VALUE
00674 rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
00675 int argc, const VALUE *argv, const rb_block_t * blockptr)
00676 {
00677 VALUE val = Qundef;
00678 int state;
00679 volatile int stored_safe = th->safe_level;
00680
00681 TH_PUSH_TAG(th);
00682 if ((state = EXEC_TAG()) == 0) {
00683 if (!proc->is_from_method) {
00684 th->safe_level = proc->safe_level;
00685 }
00686 val = invoke_block_from_c(th, &proc->block, self, argc, argv, blockptr, 0);
00687 }
00688 TH_POP_TAG();
00689
00690 if (!proc->is_from_method) {
00691 th->safe_level = stored_safe;
00692 }
00693
00694 if (state) {
00695 JUMP_TAG(state);
00696 }
00697 return val;
00698 }
00699
00700
00701
00702 static rb_control_frame_t *
00703 vm_normal_frame(rb_thread_t *th, rb_control_frame_t *cfp)
00704 {
00705 while (cfp->pc == 0) {
00706 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00707 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00708 return 0;
00709 }
00710 }
00711 return cfp;
00712 }
00713
00714 static VALUE
00715 vm_cfp_svar_get(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key)
00716 {
00717 cfp = vm_normal_frame(th, cfp);
00718 return lfp_svar_get(th, cfp ? cfp->lfp : 0, key);
00719 }
00720
00721 static void
00722 vm_cfp_svar_set(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key, const VALUE val)
00723 {
00724 cfp = vm_normal_frame(th, cfp);
00725 lfp_svar_set(th, cfp ? cfp->lfp : 0, key, val);
00726 }
00727
00728 static VALUE
00729 vm_svar_get(VALUE key)
00730 {
00731 rb_thread_t *th = GET_THREAD();
00732 return vm_cfp_svar_get(th, th->cfp, key);
00733 }
00734
00735 static void
00736 vm_svar_set(VALUE key, VALUE val)
00737 {
00738 rb_thread_t *th = GET_THREAD();
00739 vm_cfp_svar_set(th, th->cfp, key, val);
00740 }
00741
00742 VALUE
00743 rb_backref_get(void)
00744 {
00745 return vm_svar_get(1);
00746 }
00747
00748 void
00749 rb_backref_set(VALUE val)
00750 {
00751 vm_svar_set(1, val);
00752 }
00753
00754 VALUE
00755 rb_lastline_get(void)
00756 {
00757 return vm_svar_get(0);
00758 }
00759
00760 void
00761 rb_lastline_set(VALUE val)
00762 {
00763 vm_svar_set(0, val);
00764 }
00765
00766
00767
00768 int
00769 rb_vm_get_sourceline(const rb_control_frame_t *cfp)
00770 {
00771 int line_no = 0;
00772 const rb_iseq_t *iseq = cfp->iseq;
00773
00774 if (RUBY_VM_NORMAL_ISEQ_P(iseq) && iseq->insn_info_size > 0) {
00775 rb_num_t i;
00776 size_t pos = cfp->pc - cfp->iseq->iseq_encoded;
00777
00778 if (iseq->insn_info_table[0].position == pos) goto found;
00779 for (i = 1; i < iseq->insn_info_size; i++) {
00780 if (iseq->insn_info_table[i].position == pos) {
00781 line_no = iseq->insn_info_table[i - 1].line_no;
00782 goto found;
00783 }
00784 }
00785 line_no = iseq->insn_info_table[i - 1].line_no;
00786 }
00787 found:
00788 return line_no;
00789 }
00790
00791 static int
00792 vm_backtrace_each(rb_thread_t *th, int lev, void (*init)(void *), rb_backtrace_iter_func *iter, void *arg)
00793 {
00794 const rb_control_frame_t *limit_cfp = th->cfp;
00795 const rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
00796 VALUE file = Qnil;
00797 int line_no = 0;
00798
00799 cfp -= 2;
00800 while (lev-- >= 0) {
00801 if (++limit_cfp > cfp) {
00802 return FALSE;
00803 }
00804 }
00805 if (init) (*init)(arg);
00806 limit_cfp = RUBY_VM_NEXT_CONTROL_FRAME(limit_cfp);
00807 if (th->vm->progname) file = th->vm->progname;
00808 while (cfp > limit_cfp) {
00809 if (cfp->iseq != 0) {
00810 if (cfp->pc != 0) {
00811 rb_iseq_t *iseq = cfp->iseq;
00812
00813 line_no = rb_vm_get_sourceline(cfp);
00814 file = iseq->filename;
00815 if ((*iter)(arg, file, line_no, iseq->name)) break;
00816 }
00817 }
00818 else if (RUBYVM_CFUNC_FRAME_P(cfp)) {
00819 ID id;
00820 extern VALUE ruby_engine_name;
00821
00822 if (NIL_P(file)) file = ruby_engine_name;
00823 if (cfp->me->def)
00824 id = cfp->me->def->original_id;
00825 else
00826 id = cfp->me->called_id;
00827 if (id != ID_ALLOCATOR && (*iter)(arg, file, line_no, rb_id2str(id)))
00828 break;
00829 }
00830 cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp);
00831 }
00832 return TRUE;
00833 }
00834
00835 static void
00836 vm_backtrace_alloc(void *arg)
00837 {
00838 VALUE *aryp = arg;
00839 *aryp = rb_ary_new();
00840 }
00841
00842 static int
00843 vm_backtrace_push(void *arg, VALUE file, int line_no, VALUE name)
00844 {
00845 VALUE *aryp = arg;
00846 VALUE bt;
00847
00848 if (line_no) {
00849 bt = rb_enc_sprintf(rb_enc_compatible(file, name), "%s:%d:in `%s'",
00850 RSTRING_PTR(file), line_no, RSTRING_PTR(name));
00851 }
00852 else {
00853 bt = rb_enc_sprintf(rb_enc_compatible(file, name), "%s:in `%s'",
00854 RSTRING_PTR(file), RSTRING_PTR(name));
00855 }
00856 rb_ary_push(*aryp, bt);
00857 return 0;
00858 }
00859
00860 static inline VALUE
00861 vm_backtrace(rb_thread_t *th, int lev)
00862 {
00863 VALUE ary = 0;
00864
00865 if (lev < 0) {
00866 ary = rb_ary_new();
00867 }
00868 vm_backtrace_each(th, lev, vm_backtrace_alloc, vm_backtrace_push, &ary);
00869 if (!ary) return Qnil;
00870 return rb_ary_reverse(ary);
00871 }
00872
00873 const char *
00874 rb_sourcefile(void)
00875 {
00876 rb_thread_t *th = GET_THREAD();
00877 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00878
00879 if (cfp) {
00880 return RSTRING_PTR(cfp->iseq->filename);
00881 }
00882 else {
00883 return 0;
00884 }
00885 }
00886
00887 int
00888 rb_sourceline(void)
00889 {
00890 rb_thread_t *th = GET_THREAD();
00891 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00892
00893 if (cfp) {
00894 return rb_vm_get_sourceline(cfp);
00895 }
00896 else {
00897 return 0;
00898 }
00899 }
00900
00901 NODE *
00902 rb_vm_cref(void)
00903 {
00904 rb_thread_t *th = GET_THREAD();
00905 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00906
00907 if (cfp == 0) {
00908 rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
00909 }
00910 return vm_get_cref(cfp->iseq, cfp->lfp, cfp->dfp);
00911 }
00912
00913 #if 0
00914 void
00915 debug_cref(NODE *cref)
00916 {
00917 while (cref) {
00918 dp(cref->nd_clss);
00919 printf("%ld\n", cref->nd_visi);
00920 cref = cref->nd_next;
00921 }
00922 }
00923 #endif
00924
00925 VALUE
00926 rb_vm_cbase(void)
00927 {
00928 rb_thread_t *th = GET_THREAD();
00929 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00930
00931 if (cfp == 0) {
00932 rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
00933 }
00934 return vm_get_cbase(cfp->iseq, cfp->lfp, cfp->dfp);
00935 }
00936
00937
00938
00939 static VALUE
00940 make_localjump_error(const char *mesg, VALUE value, int reason)
00941 {
00942 extern VALUE rb_eLocalJumpError;
00943 VALUE exc = rb_exc_new2(rb_eLocalJumpError, mesg);
00944 ID id;
00945
00946 switch (reason) {
00947 case TAG_BREAK:
00948 CONST_ID(id, "break");
00949 break;
00950 case TAG_REDO:
00951 CONST_ID(id, "redo");
00952 break;
00953 case TAG_RETRY:
00954 CONST_ID(id, "retry");
00955 break;
00956 case TAG_NEXT:
00957 CONST_ID(id, "next");
00958 break;
00959 case TAG_RETURN:
00960 CONST_ID(id, "return");
00961 break;
00962 default:
00963 CONST_ID(id, "noreason");
00964 break;
00965 }
00966 rb_iv_set(exc, "@exit_value", value);
00967 rb_iv_set(exc, "@reason", ID2SYM(id));
00968 return exc;
00969 }
00970
00971 void
00972 rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
00973 {
00974 VALUE exc = make_localjump_error(mesg, value, reason);
00975 rb_exc_raise(exc);
00976 }
00977
00978 VALUE
00979 rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
00980 {
00981 VALUE result = Qnil;
00982
00983 if (val == Qundef) {
00984 val = GET_THREAD()->tag->retval;
00985 }
00986 switch (state) {
00987 case 0:
00988 break;
00989 case TAG_RETURN:
00990 result = make_localjump_error("unexpected return", val, state);
00991 break;
00992 case TAG_BREAK:
00993 result = make_localjump_error("unexpected break", val, state);
00994 break;
00995 case TAG_NEXT:
00996 result = make_localjump_error("unexpected next", val, state);
00997 break;
00998 case TAG_REDO:
00999 result = make_localjump_error("unexpected redo", Qnil, state);
01000 break;
01001 case TAG_RETRY:
01002 result = make_localjump_error("retry outside of rescue clause", Qnil, state);
01003 break;
01004 default:
01005 break;
01006 }
01007 return result;
01008 }
01009
01010 void
01011 rb_vm_jump_tag_but_local_jump(int state, VALUE val)
01012 {
01013 if (val != Qnil) {
01014 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, val);
01015 if (!NIL_P(exc)) rb_exc_raise(exc);
01016 }
01017 JUMP_TAG(state);
01018 }
01019
01020 NORETURN(static void vm_iter_break(rb_thread_t *th));
01021
01022 static void
01023 vm_iter_break(rb_thread_t *th)
01024 {
01025 rb_control_frame_t *cfp = th->cfp;
01026 VALUE *dfp = GC_GUARDED_PTR_REF(*cfp->dfp);
01027
01028 th->state = TAG_BREAK;
01029 th->errinfo = (VALUE)NEW_THROW_OBJECT(Qnil, (VALUE)dfp, TAG_BREAK);
01030 TH_JUMP_TAG(th, TAG_BREAK);
01031 }
01032
01033 void
01034 rb_iter_break(void)
01035 {
01036 vm_iter_break(GET_THREAD());
01037 }
01038
01039
01040
01041 static st_table *vm_opt_method_table = 0;
01042
01043 static void
01044 rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me)
01045 {
01046 st_data_t bop;
01047 if (!me->def || me->def->type == VM_METHOD_TYPE_CFUNC) {
01048 if (st_lookup(vm_opt_method_table, (st_data_t)me, &bop)) {
01049 ruby_vm_redefined_flag[bop] = 1;
01050 }
01051 }
01052 }
01053
01054 static void
01055 add_opt_method(VALUE klass, ID mid, VALUE bop)
01056 {
01057 rb_method_entry_t *me;
01058 if (st_lookup(RCLASS_M_TBL(klass), mid, (void *)&me) && me->def &&
01059 me->def->type == VM_METHOD_TYPE_CFUNC) {
01060 st_insert(vm_opt_method_table, (st_data_t)me, (st_data_t)bop);
01061 }
01062 else {
01063 rb_bug("undefined optimized method: %s", rb_id2name(mid));
01064 }
01065 }
01066
01067 static void
01068 vm_init_redefined_flag(void)
01069 {
01070 ID mid;
01071 VALUE bop;
01072
01073 vm_opt_method_table = st_init_numtable();
01074
01075 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
01076 #define C(k) add_opt_method(rb_c##k, mid, bop)
01077 OP(PLUS, PLUS), (C(Fixnum), C(Float), C(String), C(Array));
01078 OP(MINUS, MINUS), (C(Fixnum));
01079 OP(MULT, MULT), (C(Fixnum), C(Float));
01080 OP(DIV, DIV), (C(Fixnum), C(Float));
01081 OP(MOD, MOD), (C(Fixnum), C(Float));
01082 OP(Eq, EQ), (C(Fixnum), C(Float), C(String));
01083 OP(Eqq, EQQ), (C(Fixnum), C(Bignum), C(Float), C(Symbol), C(String));
01084 OP(LT, LT), (C(Fixnum));
01085 OP(LE, LE), (C(Fixnum));
01086 OP(LTLT, LTLT), (C(String), C(Array));
01087 OP(AREF, AREF), (C(Array), C(Hash));
01088 OP(ASET, ASET), (C(Array), C(Hash));
01089 OP(Length, LENGTH), (C(Array), C(String), C(Hash));
01090 OP(Size, SIZE), (C(Array), C(String), C(Hash));
01091 OP(Succ, SUCC), (C(Fixnum), C(String), C(Time));
01092 OP(GT, GT), (C(Fixnum));
01093 OP(GE, GE), (C(Fixnum));
01094 #undef C
01095 #undef OP
01096 }
01097
01098
01099
01100 #if VMDEBUG
01101 static const char *
01102 vm_frametype_name(const rb_control_frame_t *cfp)
01103 {
01104 switch (VM_FRAME_TYPE(cfp)) {
01105 case VM_FRAME_MAGIC_METHOD: return "method";
01106 case VM_FRAME_MAGIC_BLOCK: return "block";
01107 case VM_FRAME_MAGIC_CLASS: return "class";
01108 case VM_FRAME_MAGIC_TOP: return "top";
01109 case VM_FRAME_MAGIC_FINISH: return "finish";
01110 case VM_FRAME_MAGIC_CFUNC: return "cfunc";
01111 case VM_FRAME_MAGIC_PROC: return "proc";
01112 case VM_FRAME_MAGIC_IFUNC: return "ifunc";
01113 case VM_FRAME_MAGIC_EVAL: return "eval";
01114 case VM_FRAME_MAGIC_LAMBDA: return "lambda";
01115 default:
01116 rb_bug("unknown frame");
01117 }
01118 }
01119 #endif
01120
01121
01122
01123
01124
01125
01126
01127
01128
01129
01130
01131
01132
01133
01134
01135
01136
01137
01138
01139
01140
01141
01142
01143
01144
01145
01146
01147
01148
01149
01150
01151
01152
01153
01154
01155
01156
01157
01158
01159
01160
01161
01162
01163
01164
01165
01166
01167
01168
01169
01170
01171
01172
01173
01174
01175
01176
01177
01178
01179
01180
01181
01182
01183
01184
01185
01186
01187
01188
01189
01190
01191
01192
01193
01194
01195
01196
01197
01198
01199
01200
01201
01202
01203
01204
01205
01206
01207
01208
01209
01210
01211
01212
01213
01214
01215
01216
01217
01218
01219
01220
01221
01222
01223
01224 static VALUE
01225 vm_exec(rb_thread_t *th)
01226 {
01227 int state;
01228 VALUE result, err;
01229 VALUE initial = 0;
01230 VALUE *escape_dfp = NULL;
01231
01232 TH_PUSH_TAG(th);
01233 _tag.retval = Qnil;
01234 if ((state = EXEC_TAG()) == 0) {
01235 vm_loop_start:
01236 result = vm_exec_core(th, initial);
01237 if ((state = th->state) != 0) {
01238 err = result;
01239 th->state = 0;
01240 goto exception_handler;
01241 }
01242 }
01243 else {
01244 int i;
01245 struct iseq_catch_table_entry *entry;
01246 unsigned long epc, cont_pc, cont_sp;
01247 VALUE catch_iseqval;
01248 rb_control_frame_t *cfp;
01249 VALUE type;
01250
01251 err = th->errinfo;
01252
01253 exception_handler:
01254 cont_pc = cont_sp = catch_iseqval = 0;
01255
01256 while (th->cfp->pc == 0 || th->cfp->iseq == 0) {
01257 if (UNLIKELY(VM_FRAME_TYPE(th->cfp) == VM_FRAME_MAGIC_CFUNC)) {
01258 const rb_method_entry_t *me = th->cfp->me;
01259 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->cfp->self, me->called_id, me->klass);
01260 }
01261 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01262 }
01263
01264 cfp = th->cfp;
01265 epc = cfp->pc - cfp->iseq->iseq_encoded;
01266
01267 if (state == TAG_BREAK || state == TAG_RETURN) {
01268 escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
01269
01270 if (cfp->dfp == escape_dfp) {
01271 if (state == TAG_RETURN) {
01272 if ((cfp + 1)->pc != &finish_insn_seq[0]) {
01273 SET_THROWOBJ_CATCH_POINT(err, (VALUE)(cfp + 1)->dfp);
01274 SET_THROWOBJ_STATE(err, state = TAG_BREAK);
01275 }
01276 else {
01277 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01278 entry = &cfp->iseq->catch_table[i];
01279 if (entry->start < epc && entry->end >= epc) {
01280 if (entry->type == CATCH_TYPE_ENSURE) {
01281 catch_iseqval = entry->iseq;
01282 cont_pc = entry->cont;
01283 cont_sp = entry->sp;
01284 break;
01285 }
01286 }
01287 }
01288 if (!catch_iseqval) {
01289 result = GET_THROWOBJ_VAL(err);
01290 th->errinfo = Qnil;
01291 th->cfp += 2;
01292 goto finish_vme;
01293 }
01294 }
01295
01296 }
01297 else {
01298
01299 #if OPT_STACK_CACHING
01300 initial = (GET_THROWOBJ_VAL(err));
01301 #else
01302 *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01303 #endif
01304 th->errinfo = Qnil;
01305 goto vm_loop_start;
01306 }
01307 }
01308 }
01309
01310 if (state == TAG_RAISE) {
01311 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01312 entry = &cfp->iseq->catch_table[i];
01313 if (entry->start < epc && entry->end >= epc) {
01314
01315 if (entry->type == CATCH_TYPE_RESCUE ||
01316 entry->type == CATCH_TYPE_ENSURE) {
01317 catch_iseqval = entry->iseq;
01318 cont_pc = entry->cont;
01319 cont_sp = entry->sp;
01320 break;
01321 }
01322 }
01323 }
01324 }
01325 else if (state == TAG_RETRY) {
01326 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01327 entry = &cfp->iseq->catch_table[i];
01328 if (entry->start < epc && entry->end >= epc) {
01329
01330 if (entry->type == CATCH_TYPE_ENSURE) {
01331 catch_iseqval = entry->iseq;
01332 cont_pc = entry->cont;
01333 cont_sp = entry->sp;
01334 break;
01335 }
01336 else if (entry->type == CATCH_TYPE_RETRY) {
01337 VALUE *escape_dfp;
01338 escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
01339 if (cfp->dfp == escape_dfp) {
01340 cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01341 th->errinfo = Qnil;
01342 goto vm_loop_start;
01343 }
01344 }
01345 }
01346 }
01347 }
01348 else if (state == TAG_BREAK && ((VALUE)escape_dfp & ~0x03) == 0) {
01349 type = CATCH_TYPE_BREAK;
01350
01351 search_restart_point:
01352 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01353 entry = &cfp->iseq->catch_table[i];
01354
01355 if (entry->start < epc && entry->end >= epc) {
01356 if (entry->type == CATCH_TYPE_ENSURE) {
01357 catch_iseqval = entry->iseq;
01358 cont_pc = entry->cont;
01359 cont_sp = entry->sp;
01360 break;
01361 }
01362 else if (entry->type == type) {
01363 cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01364 cfp->sp = cfp->bp + entry->sp;
01365
01366 if (state != TAG_REDO) {
01367 #if OPT_STACK_CACHING
01368 initial = (GET_THROWOBJ_VAL(err));
01369 #else
01370 *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01371 #endif
01372 }
01373 th->state = 0;
01374 th->errinfo = Qnil;
01375 goto vm_loop_start;
01376 }
01377 }
01378 }
01379 }
01380 else if (state == TAG_REDO) {
01381 type = CATCH_TYPE_REDO;
01382 goto search_restart_point;
01383 }
01384 else if (state == TAG_NEXT) {
01385 type = CATCH_TYPE_NEXT;
01386 goto search_restart_point;
01387 }
01388 else {
01389 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01390 entry = &cfp->iseq->catch_table[i];
01391 if (entry->start < epc && entry->end >= epc) {
01392
01393 if (entry->type == CATCH_TYPE_ENSURE) {
01394 catch_iseqval = entry->iseq;
01395 cont_pc = entry->cont;
01396 cont_sp = entry->sp;
01397 break;
01398 }
01399 }
01400 }
01401 }
01402
01403 if (catch_iseqval != 0) {
01404
01405 rb_iseq_t *catch_iseq;
01406
01407
01408 GetISeqPtr(catch_iseqval, catch_iseq);
01409 cfp->sp = cfp->bp + cont_sp;
01410 cfp->pc = cfp->iseq->iseq_encoded + cont_pc;
01411
01412
01413 cfp->sp[0] = err;
01414 vm_push_frame(th, catch_iseq, VM_FRAME_MAGIC_BLOCK,
01415 cfp->self, (VALUE)cfp->dfp, catch_iseq->iseq_encoded,
01416 cfp->sp + 1 , cfp->lfp, catch_iseq->local_size - 1);
01417
01418 state = 0;
01419 th->state = 0;
01420 th->errinfo = Qnil;
01421 goto vm_loop_start;
01422 }
01423 else {
01424
01425
01426 switch (VM_FRAME_TYPE(th->cfp)) {
01427 case VM_FRAME_MAGIC_METHOD:
01428 EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->cfp->self, 0, 0);
01429 break;
01430 case VM_FRAME_MAGIC_CLASS:
01431 EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_END, th->cfp->self, 0, 0);
01432 break;
01433 }
01434
01435 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01436
01437 if (VM_FRAME_TYPE(th->cfp) != VM_FRAME_MAGIC_FINISH) {
01438 goto exception_handler;
01439 }
01440 else {
01441 vm_pop_frame(th);
01442 th->errinfo = err;
01443 TH_POP_TAG2();
01444 JUMP_TAG(state);
01445 }
01446 }
01447 }
01448 finish_vme:
01449 TH_POP_TAG();
01450 return result;
01451 }
01452
01453
01454
01455 VALUE
01456 rb_iseq_eval(VALUE iseqval)
01457 {
01458 rb_thread_t *th = GET_THREAD();
01459 VALUE val;
01460 volatile VALUE tmp;
01461
01462 vm_set_top_stack(th, iseqval);
01463
01464 val = vm_exec(th);
01465 tmp = iseqval;
01466 return val;
01467 }
01468
01469 VALUE
01470 rb_iseq_eval_main(VALUE iseqval)
01471 {
01472 rb_thread_t *th = GET_THREAD();
01473 VALUE val;
01474 volatile VALUE tmp;
01475
01476 vm_set_main_stack(th, iseqval);
01477
01478 val = vm_exec(th);
01479 tmp = iseqval;
01480 return val;
01481 }
01482
01483 int
01484 rb_thread_method_id_and_class(rb_thread_t *th,
01485 ID *idp, VALUE *klassp)
01486 {
01487 rb_control_frame_t *cfp = th->cfp;
01488 rb_iseq_t *iseq = cfp->iseq;
01489 if (!iseq && cfp->me) {
01490 if (idp) *idp = cfp->me->def->original_id;
01491 if (klassp) *klassp = cfp->me->klass;
01492 return 1;
01493 }
01494 while (iseq) {
01495 if (RUBY_VM_IFUNC_P(iseq)) {
01496 if (idp) CONST_ID(*idp, "<ifunc>");
01497 if (klassp) *klassp = 0;
01498 return 1;
01499 }
01500 if (iseq->defined_method_id) {
01501 if (idp) *idp = iseq->defined_method_id;
01502 if (klassp) *klassp = iseq->klass;
01503 return 1;
01504 }
01505 if (iseq->local_iseq == iseq) {
01506 break;
01507 }
01508 iseq = iseq->parent_iseq;
01509 }
01510 return 0;
01511 }
01512
01513 int
01514 rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
01515 {
01516 return rb_thread_method_id_and_class(GET_THREAD(), idp, klassp);
01517 }
01518
01519 VALUE
01520 rb_thread_current_status(const rb_thread_t *th)
01521 {
01522 const rb_control_frame_t *cfp = th->cfp;
01523 VALUE str = Qnil;
01524
01525 if (cfp->iseq != 0) {
01526 if (cfp->pc != 0) {
01527 rb_iseq_t *iseq = cfp->iseq;
01528 int line_no = rb_vm_get_sourceline(cfp);
01529 char *file = RSTRING_PTR(iseq->filename);
01530 str = rb_sprintf("%s:%d:in `%s'",
01531 file, line_no, RSTRING_PTR(iseq->name));
01532 }
01533 }
01534 else if (cfp->me->def->original_id) {
01535 str = rb_sprintf("`%s#%s' (cfunc)",
01536 rb_class2name(cfp->me->klass),
01537 rb_id2name(cfp->me->def->original_id));
01538 }
01539
01540 return str;
01541 }
01542
01543 VALUE
01544 rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
01545 const rb_block_t *blockptr, VALUE filename)
01546 {
01547 rb_thread_t *th = GET_THREAD();
01548 const rb_control_frame_t *reg_cfp = th->cfp;
01549 volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
01550 VALUE val;
01551
01552 vm_push_frame(th, DATA_PTR(iseqval), VM_FRAME_MAGIC_TOP,
01553 recv, (VALUE)blockptr, 0, reg_cfp->sp, 0, 1);
01554
01555 val = (*func)(arg);
01556
01557 vm_pop_frame(th);
01558 return val;
01559 }
01560
01561
01562
01563 static int
01564 vm_mark_each_thread_func(st_data_t key, st_data_t value, st_data_t dummy)
01565 {
01566 VALUE thval = (VALUE)key;
01567 rb_gc_mark(thval);
01568 return ST_CONTINUE;
01569 }
01570
01571 static void
01572 mark_event_hooks(rb_event_hook_t *hook)
01573 {
01574 while (hook) {
01575 rb_gc_mark(hook->data);
01576 hook = hook->next;
01577 }
01578 }
01579
01580 void
01581 rb_vm_mark(void *ptr)
01582 {
01583 int i;
01584
01585 RUBY_MARK_ENTER("vm");
01586 RUBY_GC_INFO("-------------------------------------------------\n");
01587 if (ptr) {
01588 rb_vm_t *vm = ptr;
01589 if (vm->living_threads) {
01590 st_foreach(vm->living_threads, vm_mark_each_thread_func, 0);
01591 }
01592 RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
01593 RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
01594 RUBY_MARK_UNLESS_NULL(vm->load_path);
01595 RUBY_MARK_UNLESS_NULL(vm->loaded_features);
01596 RUBY_MARK_UNLESS_NULL(vm->top_self);
01597 RUBY_MARK_UNLESS_NULL(vm->coverages);
01598 rb_gc_mark_locations(vm->special_exceptions, vm->special_exceptions + ruby_special_error_count);
01599
01600 if (vm->loading_table) {
01601 rb_mark_tbl(vm->loading_table);
01602 }
01603
01604 mark_event_hooks(vm->event_hooks);
01605
01606 for (i = 0; i < RUBY_NSIG; i++) {
01607 if (vm->trap_list[i].cmd)
01608 rb_gc_mark(vm->trap_list[i].cmd);
01609 }
01610 }
01611
01612 RUBY_MARK_LEAVE("vm");
01613 }
01614
01615 #define vm_free 0
01616
01617 int
01618 ruby_vm_destruct(rb_vm_t *vm)
01619 {
01620 RUBY_FREE_ENTER("vm");
01621 if (vm) {
01622 rb_thread_t *th = vm->main_thread;
01623 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01624 struct rb_objspace *objspace = vm->objspace;
01625 #endif
01626 rb_gc_force_recycle(vm->self);
01627 vm->main_thread = 0;
01628 if (th) {
01629 rb_fiber_reset_root_local_storage(th->self);
01630 thread_free(th);
01631 }
01632 if (vm->living_threads) {
01633 st_free_table(vm->living_threads);
01634 vm->living_threads = 0;
01635 }
01636 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01637 if (objspace) {
01638 rb_objspace_free(objspace);
01639 }
01640 #endif
01641 ruby_vm_run_at_exit_hooks(vm);
01642 rb_vm_gvl_destroy(vm);
01643 ruby_xfree(vm);
01644 ruby_current_vm = 0;
01645 }
01646 RUBY_FREE_LEAVE("vm");
01647 return 0;
01648 }
01649
01650 static size_t
01651 vm_memsize(const void *ptr)
01652 {
01653 if (ptr) {
01654 const rb_vm_t *vmobj = ptr;
01655 return sizeof(rb_vm_t) + st_memsize(vmobj->living_threads);
01656 }
01657 else {
01658 return 0;
01659 }
01660 }
01661
01662 static const rb_data_type_t vm_data_type = {
01663 "VM",
01664 {rb_vm_mark, vm_free, vm_memsize,},
01665 };
01666
01667 static void
01668 vm_init2(rb_vm_t *vm)
01669 {
01670 MEMZERO(vm, rb_vm_t, 1);
01671 vm->src_encoding_index = -1;
01672 vm->at_exit.basic.flags = (T_ARRAY | RARRAY_EMBED_FLAG) & ~RARRAY_EMBED_LEN_MASK;
01673 vm->at_exit.basic.klass = 0;
01674 }
01675
01676
01677
01678 #define USE_THREAD_DATA_RECYCLE 1
01679
01680 #if USE_THREAD_DATA_RECYCLE
01681 #define RECYCLE_MAX 64
01682 static VALUE *thread_recycle_stack_slot[RECYCLE_MAX];
01683 static int thread_recycle_stack_count = 0;
01684
01685 static VALUE *
01686 thread_recycle_stack(size_t size)
01687 {
01688 if (thread_recycle_stack_count) {
01689 return thread_recycle_stack_slot[--thread_recycle_stack_count];
01690 }
01691 else {
01692 return ALLOC_N(VALUE, size);
01693 }
01694 }
01695
01696 #else
01697 #define thread_recycle_stack(size) ALLOC_N(VALUE, (size))
01698 #endif
01699
01700 void
01701 rb_thread_recycle_stack_release(VALUE *stack)
01702 {
01703 #if USE_THREAD_DATA_RECYCLE
01704 if (thread_recycle_stack_count < RECYCLE_MAX) {
01705 thread_recycle_stack_slot[thread_recycle_stack_count++] = stack;
01706 return;
01707 }
01708 #endif
01709 ruby_xfree(stack);
01710 }
01711
01712 #ifdef USE_THREAD_RECYCLE
01713 static rb_thread_t *
01714 thread_recycle_struct(void)
01715 {
01716 void *p = ALLOC_N(rb_thread_t, 1);
01717 memset(p, 0, sizeof(rb_thread_t));
01718 return p;
01719 }
01720 #endif
01721
01722 void
01723 rb_thread_mark(void *ptr)
01724 {
01725 rb_thread_t *th = NULL;
01726 RUBY_MARK_ENTER("thread");
01727 if (ptr) {
01728 th = ptr;
01729 if (th->stack) {
01730 VALUE *p = th->stack;
01731 VALUE *sp = th->cfp->sp;
01732 rb_control_frame_t *cfp = th->cfp;
01733 rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);
01734
01735 while (p < sp) {
01736 rb_gc_mark(*p++);
01737 }
01738 rb_gc_mark_locations(p, p + th->mark_stack_len);
01739
01740 while (cfp != limit_cfp) {
01741 rb_iseq_t *iseq = cfp->iseq;
01742 rb_gc_mark(cfp->proc);
01743 rb_gc_mark(cfp->self);
01744 if (iseq) {
01745 rb_gc_mark(RUBY_VM_NORMAL_ISEQ_P(iseq) ? iseq->self : (VALUE)iseq);
01746 }
01747 if (cfp->me) {
01748
01749 ((rb_method_entry_t *)cfp->me)->mark = 1;
01750 rb_mark_method_entry(cfp->me);
01751 }
01752 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01753 }
01754 }
01755
01756
01757 RUBY_MARK_UNLESS_NULL(th->first_proc);
01758 if (th->first_proc) RUBY_MARK_UNLESS_NULL(th->first_args);
01759
01760 RUBY_MARK_UNLESS_NULL(th->thgroup);
01761 RUBY_MARK_UNLESS_NULL(th->value);
01762 RUBY_MARK_UNLESS_NULL(th->errinfo);
01763 RUBY_MARK_UNLESS_NULL(th->thrown_errinfo);
01764 RUBY_MARK_UNLESS_NULL(th->local_svar);
01765 RUBY_MARK_UNLESS_NULL(th->top_self);
01766 RUBY_MARK_UNLESS_NULL(th->top_wrapper);
01767 RUBY_MARK_UNLESS_NULL(th->fiber);
01768 RUBY_MARK_UNLESS_NULL(th->root_fiber);
01769 RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
01770 RUBY_MARK_UNLESS_NULL(th->last_status);
01771
01772 RUBY_MARK_UNLESS_NULL(th->locking_mutex);
01773
01774 rb_mark_tbl(th->local_storage);
01775
01776 if (GET_THREAD() != th && th->machine_stack_start && th->machine_stack_end) {
01777 rb_gc_mark_machine_stack(th);
01778 rb_gc_mark_locations((VALUE *)&th->machine_regs,
01779 (VALUE *)(&th->machine_regs) +
01780 sizeof(th->machine_regs) / sizeof(VALUE));
01781 }
01782
01783 mark_event_hooks(th->event_hooks);
01784 }
01785
01786 RUBY_MARK_LEAVE("thread");
01787 }
01788
01789 static void
01790 thread_free(void *ptr)
01791 {
01792 rb_thread_t *th;
01793 RUBY_FREE_ENTER("thread");
01794
01795 if (ptr) {
01796 th = ptr;
01797
01798 if (!th->root_fiber) {
01799 RUBY_FREE_UNLESS_NULL(th->stack);
01800 }
01801
01802 if (th->locking_mutex != Qfalse) {
01803 rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
01804 }
01805 if (th->keeping_mutexes != NULL) {
01806 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
01807 }
01808
01809 if (th->local_storage) {
01810 st_free_table(th->local_storage);
01811 }
01812
01813 if (th->vm && th->vm->main_thread == th) {
01814 RUBY_GC_INFO("main thread\n");
01815 }
01816 else {
01817 #ifdef USE_SIGALTSTACK
01818 if (th->altstack) {
01819 free(th->altstack);
01820 }
01821 #endif
01822 ruby_xfree(ptr);
01823 }
01824 if (ruby_current_thread == th)
01825 ruby_current_thread = NULL;
01826 }
01827 RUBY_FREE_LEAVE("thread");
01828 }
01829
01830 static size_t
01831 thread_memsize(const void *ptr)
01832 {
01833 if (ptr) {
01834 const rb_thread_t *th = ptr;
01835 size_t size = sizeof(rb_thread_t);
01836
01837 if (!th->root_fiber) {
01838 size += th->stack_size * sizeof(VALUE);
01839 }
01840 if (th->local_storage) {
01841 size += st_memsize(th->local_storage);
01842 }
01843 return size;
01844 }
01845 else {
01846 return 0;
01847 }
01848 }
01849
01850 #define thread_data_type ruby_threadptr_data_type
01851 const rb_data_type_t ruby_threadptr_data_type = {
01852 "VM/thread",
01853 {
01854 rb_thread_mark,
01855 thread_free,
01856 thread_memsize,
01857 },
01858 };
01859
01860 VALUE
01861 rb_obj_is_thread(VALUE obj)
01862 {
01863 if (rb_typeddata_is_kind_of(obj, &thread_data_type)) {
01864 return Qtrue;
01865 }
01866 else {
01867 return Qfalse;
01868 }
01869 }
01870
01871 static VALUE
01872 thread_alloc(VALUE klass)
01873 {
01874 VALUE volatile obj;
01875 #ifdef USE_THREAD_RECYCLE
01876 rb_thread_t *th = thread_recycle_struct();
01877 obj = TypedData_Wrap_Struct(klass, &thread_data_type, th);
01878 #else
01879 rb_thread_t *th;
01880 obj = TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
01881 #endif
01882 return obj;
01883 }
01884
01885 static void
01886 th_init(rb_thread_t *th, VALUE self)
01887 {
01888 th->self = self;
01889
01890
01891 #ifdef USE_SIGALTSTACK
01892
01893 th->altstack = malloc(ALT_STACK_SIZE);
01894 #endif
01895 th->stack_size = RUBY_VM_THREAD_STACK_SIZE;
01896 th->stack = thread_recycle_stack(th->stack_size);
01897
01898 th->cfp = (void *)(th->stack + th->stack_size);
01899
01900 vm_push_frame(th, 0, VM_FRAME_MAGIC_TOP, Qnil, 0, 0,
01901 th->stack, 0, 1);
01902
01903 th->status = THREAD_RUNNABLE;
01904 th->errinfo = Qnil;
01905 th->last_status = Qnil;
01906 th->waiting_fd = -1;
01907 }
01908
01909 static VALUE
01910 ruby_thread_init(VALUE self)
01911 {
01912 rb_thread_t *th;
01913 rb_vm_t *vm = GET_THREAD()->vm;
01914 GetThreadPtr(self, th);
01915
01916 th_init(th, self);
01917 th->vm = vm;
01918
01919 th->top_wrapper = 0;
01920 th->top_self = rb_vm_top_self();
01921 return self;
01922 }
01923
01924 VALUE
01925 rb_thread_alloc(VALUE klass)
01926 {
01927 VALUE self = thread_alloc(klass);
01928 ruby_thread_init(self);
01929 return self;
01930 }
01931
01932 static void
01933 vm_define_method(rb_thread_t *th, VALUE obj, ID id, VALUE iseqval,
01934 rb_num_t is_singleton, NODE *cref)
01935 {
01936 VALUE klass = cref->nd_clss;
01937 int noex = (int)cref->nd_visi;
01938 rb_iseq_t *miseq;
01939 GetISeqPtr(iseqval, miseq);
01940
01941 if (miseq->klass) {
01942 iseqval = rb_iseq_clone(iseqval, 0);
01943 RB_GC_GUARD(iseqval);
01944 GetISeqPtr(iseqval, miseq);
01945 }
01946
01947 if (NIL_P(klass)) {
01948 rb_raise(rb_eTypeError, "no class/module to add method");
01949 }
01950
01951 if (is_singleton) {
01952 if (FIXNUM_P(obj) || SYMBOL_P(obj)) {
01953 rb_raise(rb_eTypeError,
01954 "can't define singleton method \"%s\" for %s",
01955 rb_id2name(id), rb_obj_classname(obj));
01956 }
01957
01958 rb_check_frozen(obj);
01959 klass = rb_singleton_class(obj);
01960 noex = NOEX_PUBLIC;
01961 }
01962
01963
01964 COPY_CREF(miseq->cref_stack, cref);
01965 miseq->cref_stack->nd_visi = NOEX_PUBLIC;
01966 miseq->klass = klass;
01967 miseq->defined_method_id = id;
01968 rb_add_method(klass, id, VM_METHOD_TYPE_ISEQ, miseq, noex);
01969
01970 if (!is_singleton && noex == NOEX_MODFUNC) {
01971 rb_add_method(rb_singleton_class(klass), id, VM_METHOD_TYPE_ISEQ, miseq, NOEX_PUBLIC);
01972 }
01973 INC_VM_STATE_VERSION();
01974 }
01975
01976 #define REWIND_CFP(expr) do { \
01977 rb_thread_t *th__ = GET_THREAD(); \
01978 th__->cfp++; expr; th__->cfp--; \
01979 } while (0)
01980
01981 static VALUE
01982 m_core_define_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
01983 {
01984 REWIND_CFP({
01985 vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 0, rb_vm_cref());
01986 });
01987 return Qnil;
01988 }
01989
01990 static VALUE
01991 m_core_define_singleton_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
01992 {
01993 REWIND_CFP({
01994 vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 1, rb_vm_cref());
01995 });
01996 return Qnil;
01997 }
01998
01999 static VALUE
02000 m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
02001 {
02002 REWIND_CFP({
02003 rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
02004 });
02005 return Qnil;
02006 }
02007
02008 static VALUE
02009 m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
02010 {
02011 REWIND_CFP({
02012 rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
02013 });
02014 return Qnil;
02015 }
02016
02017 static VALUE
02018 m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
02019 {
02020 REWIND_CFP({
02021 rb_undef(cbase, SYM2ID(sym));
02022 INC_VM_STATE_VERSION();
02023 });
02024 return Qnil;
02025 }
02026
02027 static VALUE
02028 m_core_set_postexe(VALUE self, VALUE iseqval)
02029 {
02030 REWIND_CFP({
02031 rb_iseq_t *blockiseq;
02032 rb_block_t *blockptr;
02033 rb_thread_t *th = GET_THREAD();
02034 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
02035 VALUE proc;
02036
02037 if (cfp == 0) {
02038 rb_bug("m_core_set_postexe: unreachable");
02039 }
02040
02041 GetISeqPtr(iseqval, blockiseq);
02042
02043 blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
02044 blockptr->iseq = blockiseq;
02045 blockptr->proc = 0;
02046
02047 proc = rb_vm_make_proc(th, blockptr, rb_cProc);
02048 rb_set_end_proc(rb_call_end_proc, proc);
02049 });
02050 return Qnil;
02051 }
02052
02053 extern VALUE *rb_gc_stack_start;
02054 extern size_t rb_gc_stack_maxsize;
02055 #ifdef __ia64
02056 extern VALUE *rb_gc_register_stack_start;
02057 #endif
02058
02059
02060
02061
02062 static VALUE
02063 sdr(void)
02064 {
02065 rb_vm_bugreport();
02066 return Qnil;
02067 }
02068
02069
02070 static VALUE
02071 nsdr(void)
02072 {
02073 VALUE ary = rb_ary_new();
02074 #if HAVE_BACKTRACE
02075 #include <execinfo.h>
02076 #define MAX_NATIVE_TRACE 1024
02077 static void *trace[MAX_NATIVE_TRACE];
02078 int n = backtrace(trace, MAX_NATIVE_TRACE);
02079 char **syms = backtrace_symbols(trace, n);
02080 int i;
02081
02082 if (syms == 0) {
02083 rb_memerror();
02084 }
02085
02086 for (i=0; i<n; i++) {
02087 rb_ary_push(ary, rb_str_new2(syms[i]));
02088 }
02089 free(syms);
02090 #endif
02091 return ary;
02092 }
02093
02094 void
02095 Init_VM(void)
02096 {
02097 VALUE opts;
02098 VALUE klass;
02099 VALUE fcore;
02100
02101
02102 rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
02103 rb_undef_alloc_func(rb_cRubyVM);
02104 rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
02105
02106
02107 fcore = rb_class_new(rb_cBasicObject);
02108 RBASIC(fcore)->flags = T_ICLASS;
02109 klass = rb_singleton_class(fcore);
02110 rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
02111 rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
02112 rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
02113 rb_define_method_id(klass, id_core_define_method, m_core_define_method, 3);
02114 rb_define_method_id(klass, id_core_define_singleton_method, m_core_define_singleton_method, 3);
02115 rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 1);
02116 rb_define_method_id(klass, idProc, rb_block_proc, 0);
02117 rb_define_method_id(klass, idLambda, rb_block_lambda, 0);
02118 rb_obj_freeze(fcore);
02119 rb_gc_register_mark_object(fcore);
02120 rb_mRubyVMFrozenCore = fcore;
02121
02122
02123 rb_cEnv = rb_define_class_under(rb_cRubyVM, "Env", rb_cObject);
02124 rb_undef_alloc_func(rb_cEnv);
02125 rb_undef_method(CLASS_OF(rb_cEnv), "new");
02126
02127
02128 rb_cThread = rb_define_class("Thread", rb_cObject);
02129 rb_undef_alloc_func(rb_cThread);
02130
02131
02132 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN", rb_hash_new());
02133 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_REGS", rb_hash_new());
02134 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN_BIGRAM", rb_hash_new());
02135 rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
02136
02137 #if OPT_DIRECT_THREADED_CODE
02138 rb_ary_push(opts, rb_str_new2("direct threaded code"));
02139 #elif OPT_TOKEN_THREADED_CODE
02140 rb_ary_push(opts, rb_str_new2("token threaded code"));
02141 #elif OPT_CALL_THREADED_CODE
02142 rb_ary_push(opts, rb_str_new2("call threaded code"));
02143 #endif
02144
02145 #if OPT_STACK_CACHING
02146 rb_ary_push(opts, rb_str_new2("stack caching"));
02147 #endif
02148 #if OPT_OPERANDS_UNIFICATION
02149 rb_ary_push(opts, rb_str_new2("operands unification]"));
02150 #endif
02151 #if OPT_INSTRUCTIONS_UNIFICATION
02152 rb_ary_push(opts, rb_str_new2("instructions unification"));
02153 #endif
02154 #if OPT_INLINE_METHOD_CACHE
02155 rb_ary_push(opts, rb_str_new2("inline method cache"));
02156 #endif
02157 #if OPT_BLOCKINLINING
02158 rb_ary_push(opts, rb_str_new2("block inlining"));
02159 #endif
02160
02161
02162 rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
02163
02164
02165 #if VMDEBUG
02166 rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
02167 rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
02168 #else
02169 (void)sdr;
02170 (void)nsdr;
02171 #endif
02172
02173
02174 {
02175 rb_vm_t *vm = ruby_current_vm;
02176 rb_thread_t *th = GET_THREAD();
02177 VALUE filename = rb_str_new2("<main>");
02178 volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
02179 volatile VALUE th_self;
02180 rb_iseq_t *iseq;
02181
02182
02183 vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
02184
02185
02186 th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
02187 vm->main_thread = th;
02188 vm->running_thread = th;
02189 th->vm = vm;
02190 th->top_wrapper = 0;
02191 th->top_self = rb_vm_top_self();
02192 rb_thread_set_current(th);
02193
02194 vm->living_threads = st_init_numtable();
02195 st_insert(vm->living_threads, th_self, (st_data_t) th->thread_id);
02196
02197 rb_gc_register_mark_object(iseqval);
02198 GetISeqPtr(iseqval, iseq);
02199 th->cfp->iseq = iseq;
02200 th->cfp->pc = iseq->iseq_encoded;
02201 th->cfp->self = th->top_self;
02202
02203
02204
02205
02206 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
02207 }
02208 vm_init_redefined_flag();
02209 }
02210
02211 void
02212 rb_vm_set_progname(VALUE filename)
02213 {
02214 rb_thread_t *th = GET_VM()->main_thread;
02215 rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
02216 --cfp;
02217 cfp->iseq->filename = filename;
02218 }
02219
02220 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02221 struct rb_objspace *rb_objspace_alloc(void);
02222 #endif
02223
02224 void
02225 Init_BareVM(void)
02226 {
02227
02228 rb_vm_t * vm = malloc(sizeof(*vm));
02229 rb_thread_t * th = malloc(sizeof(*th));
02230 if (!vm || !th) {
02231 fprintf(stderr, "[FATAL] failed to allocate memory\n");
02232 exit(EXIT_FAILURE);
02233 }
02234 MEMZERO(th, rb_thread_t, 1);
02235
02236 rb_thread_set_current_raw(th);
02237
02238 vm_init2(vm);
02239 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02240 vm->objspace = rb_objspace_alloc();
02241 #endif
02242 ruby_current_vm = vm;
02243
02244 Init_native_thread();
02245 th_init(th, 0);
02246 th->vm = vm;
02247 ruby_thread_init_stack(th);
02248 }
02249
02250
02251
02252 static VALUE
02253 main_to_s(VALUE obj)
02254 {
02255 return rb_str_new2("main");
02256 }
02257
02258 VALUE
02259 rb_vm_top_self(void)
02260 {
02261 return GET_VM()->top_self;
02262 }
02263
02264 void
02265 Init_top_self(void)
02266 {
02267 rb_vm_t *vm = GET_VM();
02268
02269 vm->top_self = rb_obj_alloc(rb_cObject);
02270 rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
02271
02272
02273 vm->mark_object_ary = rb_ary_tmp_new(1);
02274 }
02275
02276 VALUE *
02277 ruby_vm_verbose_ptr(rb_vm_t *vm)
02278 {
02279 return &vm->verbose;
02280 }
02281
02282 VALUE *
02283 ruby_vm_debug_ptr(rb_vm_t *vm)
02284 {
02285 return &vm->debug;
02286 }
02287
02288 VALUE *
02289 rb_ruby_verbose_ptr(void)
02290 {
02291 return ruby_vm_verbose_ptr(GET_VM());
02292 }
02293
02294 VALUE *
02295 rb_ruby_debug_ptr(void)
02296 {
02297 return ruby_vm_debug_ptr(GET_VM());
02298 }
02299