Skip to content
threads.d 135 KiB
Newer Older
/* -*- mode: c -*- */
/*
    threads.d -- Posix threads.
*/
/*
    Copyright (c) 2003, Juan Jose Garcia Ripoll.
    Copyright (c) 2010-2012, Jean-Claude Beaudoin.

    MKCL is free software; you can redistribute it and/or
    modify it under the terms of the GNU Lesser General Public
    License as published by the Free Software Foundation; either
    version 3 of the License, or (at your option) any later version.

    See file '../../Copyright' for full details.
*/

#include <mkcl/mkcl.h>
#include <mkcl/mkcl-gc.h>
#include <mkcl/mkcl-math.h>
#include <mkcl/mkcl-inl.h>
#include <mkcl/internal.h>

#include <limits.h>

#include <string.h>
#include <errno.h>
#include <time.h>
#include <stdlib.h>
#include <signal.h>

#ifdef HAVE_GETTIMEOFDAY
# include <sys/time.h>
#endif
#ifdef HAVE_SCHED_YIELD
# include <sched.h>
#endif

#ifdef __linux
/* This _true_pthread_join gives you direct access to the real pthread_join, not the Boehm's GC wrapped one. */
static int _true_pthread_join(pthread_t thread, void ** retval);
#endif /* __linux */

static void setup_thread_bindings(MKCL, mkcl_object initial_bindings);


#ifdef MKCL_WINDOWS
char * mkcl_handle_debug_name(MKCL, char * prefix)
{
  static unsigned long count = 0;
  static const char * const format = "MKCL(%lu): %s %lu";
  DWORD pid = GetCurrentProcessId();
  char * name = mkcl_alloc_atomic(env, sizeof(format) + 12 + strlen(prefix) + 12);

  sprintf(name, format, pid, prefix, count++);
  return name;
}
#endif

#ifdef MKCL_WINDOWS
static DWORD cl_env_key;
#else
static pthread_key_t cl_env_key;
#endif

const mkcl_env mkcl_thread_env(void)
{
#ifdef MKCL_WINDOWS
  return TlsGetValue(cl_env_key);
#else
  return pthread_getspecific(cl_env_key);
#endif
}

static void
mkcl_set_thread_env(MKCL)
{
#if defined(MKCL_WINDOWS)
  if (!TlsSetValue(cl_env_key, env))
    mkcl_FEwin32_error(env, "mkcl_set_thread_env failed on TlsSetValue", 0);
#else
  if (pthread_setspecific(cl_env_key, env))
    mkcl_FElibc_error(env, "mkcl_set_thread_env failed on pthread_setspecific()", 0);
#endif
}


/*----------------------------------------------------------------------
 */


inline mkcl_object
mkcl_current_thread(MKCL)
{
  return env->own_thread;
}

mkcl_object mk_mt_current_thread(MKCL)
{
  @(return mkcl_current_thread(env));
}

/*----------------------------------------------------------------------
 * THREAD OBJECT
 */

inline static void
mkcl_assert_type_thread(MKCL, mkcl_object o)
{
  if (mkcl_type_of(o) != mkcl_t_thread)
    mkcl_FEwrong_type_argument(env, @'mt::thread', o);
}

static void
thread_final_cleanup(MKCL, mkcl_object thread)
{
  /* This routine performs some cleanup before a thread is completely
   * done. For instance, it has to remove the associated thread
   * object from the list, and it has to dealloc some memory.
   */
  /* const mkcl_env env = thread->thread.env; */

  thread->thread.status = mkcl_thread_done;
  env->disable_interrupts = 2; /* This prevents any interrupts, even forced. */
#if MKCL_DEBUG_INTERRUPT_MASK
  env->interrupt_disabler_lineno = __LINE__;
  env->interrupt_disabler_file = __FILE__;
#endif
  mkcl_reset_stacks(env);
  env->fpe_control_bits = 0;

  mkcl_set_thread_env(NULL);

  if (thread->thread.detached
      || thread->thread.result_value == @':imported'
      || thread->thread.result_value == @':imported-and-gc-registered')
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
    {
      mkcl_remove_thread_from_global_thread_list(env, thread);
    }

#if __unix
  if (thread->thread.running_lock) /* should not be here */
    {
      pthread_mutex_destroy(thread->thread.running_lock);
      thread->thread.running_lock = NULL;
    }
  sigemptyset(&thread->thread.saved_sigmask);
#endif

  /* cleanup the private state of the thread */
  thread->thread.interrupt = mk_cl_Cnil;
  thread->thread.sigmask_frs_marker = NULL;
  thread->thread.resume_handler_ran = FALSE;
  thread->thread.interrupt_count = 0;
  {
    int i, j;

    for (i = 0; i < MKCL_MAX_INTERRUPTS; i++)
      {
	thread->thread.interrupted_threads[i].thread_ident = 0;

	thread->thread.interrupted_threads[i].cs_org = NULL;
	thread->thread.interrupted_threads[i].cs_limit = NULL;
	thread->thread.interrupted_threads[i].cs_size = 0;
	thread->thread.interrupted_threads[i].cs_overflow_size = 0;
	thread->thread.interrupted_threads[i].cs_overflowing = FALSE;

	thread->thread.interrupted_threads[i].nvalues = 0;
	for (j = 0; j < MKCL_MULTIPLE_VALUES_LIMIT; j++)
	  thread->thread.interrupted_threads[i].values[j] = mk_cl_Cnil;
      }
  }
}

#ifdef MKCL_WINDOWS
# if 0
/* For use with direct call to CreateThread */
typedef DWORD thread_value_t;
#  define CALL_CONV WINAPI
# else
/* For calls to _beginthreadex */
typedef unsigned thread_value_t;
#  define CALL_CONV __stdcall
# endif
#elif defined(__linux)
typedef void * thread_value_t;
# define CALL_CONV
#endif

static void push_on_global_thread_list(MKCL, mkcl_object thread)
{
  volatile bool locked = false;

  MKCL_UNWIND_PROTECT_BEGIN(env) {
    MKCL_LIBC_NO_INTR(env, (MKCL_THREAD_LIST_LOCK(), locked = true));
    mkcl_core.threads = MKCL_CONS(env, thread, mkcl_core.threads);
  } MKCL_UNWIND_PROTECT_EXIT {
    if (locked) MKCL_THREAD_LIST_UNLOCK();
  } MKCL_UNWIND_PROTECT_END;
}

void mkcl_remove_thread_from_global_thread_list(MKCL, mkcl_object thread)
{
  volatile bool locked = false;
  
  MKCL_UNWIND_PROTECT_BEGIN(env) {
    MKCL_LIBC_NO_INTR(env, (MKCL_THREAD_LIST_LOCK(), locked = true));
    mkcl_core.threads = mkcl_remove_eq(env, thread, mkcl_core.threads);
  } MKCL_UNWIND_PROTECT_EXIT {
    if (locked) MKCL_THREAD_LIST_UNLOCK();
  } MKCL_UNWIND_PROTECT_END;
}


void mkcl_register_thread_as_active(MKCL, mkcl_object thread)
{
  thread->thread.status = mkcl_thread_active;
  push_on_global_thread_list(env, thread);
}

void mkcl_setup_thread_lisp_context(MKCL, char * const stack_mark_address)
{
  mkcl_object thread = env->own_thread;

  mkcl_set_thread_env(env);
  thread->thread.tid = mkcl_gettid();
  mkcl_init_call_stack_overflow_area(env, stack_mark_address);
  
  mk_si_clear_all_fpe(env);
  mk_si_enable_fpe(env, @':default');
  
  if (!mkcl_Null(thread->thread.initial_bindings))
    setup_thread_bindings(env, thread->thread.initial_bindings);
  
  mkcl_bds_bind(env, @'mt::*thread*', thread);
  mkcl_bds_bind(env, @'mkcl::*current-working-directory*', mk_cl_Cnil);
  mkcl_bds_bind(env, @'mkcl::*all-current-working-directories*', mk_cl_Cnil);
  mkcl_bds_bind(env, @'si::*dynamic-cons-stack*', mk_cl_Cnil);
  mk_si_trim_dynamic_cons_stack(env);
}

void mkcl_cleanup_thread_lisp_context(MKCL)
{
#if 0
  mkcl_bds_unwind1(env); /* si::*dynamic-cons-stack* */
  mkcl_bds_unwind1(env); /* mkcl::*all-current-working-directories* */
  mkcl_bds_unwind1(env); /* mkcl::*current-working-directory* */
  mkcl_bds_unwind1(env); /* mt::*thread* */
#endif
}

mkcl_object
mkcl_top_apply(MKCL, mkcl_object function, mkcl_object args)
{
  static mkcl_object top_apply_fun = mk_cl_Cnil;
  mkcl_object value;

  if (mkcl_Null(top_apply_fun))
    {
      top_apply_fun = MKCL_SYM_FUN(@'si::top-apply');
      if (mkcl_Null(top_apply_fun))
	value = mk_cl_apply(env, 2, function, args);
      else
	value = mkcl_funcall2(env, top_apply_fun, function, args);
    }
  else
    value = mkcl_funcall2(env, top_apply_fun, function, args);
  return value;
}

static thread_value_t CALL_CONV thread_entry_point(void *arg)
{
  char stack_mark = 0;
  mkcl_object thread = (mkcl_object)arg;
  const mkcl_env env = thread->thread.env;
  thread_value_t status = (thread_value_t) MKCL_THREAD_NORMAL_EXIT;

  if (env->own_thread != thread) return (thread_value_t) MKCL_THREAD_ABORTED;

#ifndef MKCL_WINDOWS
  pthread_mutex_lock(thread->thread.running_lock);
  /* Insert some private os-level thread initialization here */
  pthread_mutex_unlock(thread->thread.running_lock);
  pthread_mutex_destroy(thread->thread.running_lock);
  thread->thread.running_lock = NULL;
#endif

  /*    The CATCH_ALL point provides us with an elegant way to
   *    exit the thread: we just do an unwind to frs_org.
   */
  MKCL_CATCH_ALL_BEGIN(env) {
    mkcl_object value;

    MKCL_SETUP_CALL_STACK_ROOT_GUARD(env);

    /* 1) Setup the environment for the execution of the thread */
    mkcl_setup_thread_lisp_context(env, &stack_mark);

    mkcl_register_thread_as_active(env, thread);
    mkcl_enable_interrupts(env);

    /* 2) Execute the code. */
    value = mkcl_top_apply(env, thread->thread.function, thread->thread.args);

    mkcl_cleanup_thread_lisp_context(env);
    if (thread->thread.result_value == MKCL_OBJNULL)
      thread->thread.result_value = value;
    status = (thread_value_t) MKCL_THREAD_NORMAL_EXIT;
#if 0
  } MKCL_CATCH_ALL_IF_CAUGHT {
    /* mkcl_bds_unwind1(env); */
    /* mkcl_bds_unwind1(env); */
    /* mkcl_bds_unwind1(env); */
    /* mkcl_bds_unwind1(env); */
#endif
  } MKCL_CATCH_ALL_END;
  status = (thread_value_t) mkcl_exit_status(env);
  thread->thread.status = mkcl_thread_done;

  /* 3) Exit the thread through this point. */
  thread_final_cleanup(env, thread);
  return status;
}

#ifdef __linux

static void * signal_servicing_thread_entry_point(void *arg)
{
  char stack_mark = 0;
  mkcl_object thread = MKCL_CAR((mkcl_object) arg);
  const mkcl_env env = thread->thread.env;
  int sig = mkcl_fixnum_to_word(MKCL_CDR((mkcl_object) arg));

  if (env->own_thread != thread) return (void *) MKCL_THREAD_ABORTED;

  pthread_mutex_lock(thread->thread.running_lock);
  /* Insert some private thread initialization here */
  pthread_mutex_unlock(thread->thread.running_lock);
  pthread_mutex_destroy(thread->thread.running_lock);
  thread->thread.running_lock = NULL;

  /* The CATCH_ALL point provides us with an elegant way
   * to exit the thread: we just do an unwind to frs_org.
   */
  MKCL_CATCH_ALL_BEGIN(env) {
    MKCL_SETUP_CALL_STACK_ROOT_GUARD(env);

    /* 1) Setup the environment for the execution of the thread */
#if 1
    mkcl_setup_thread_lisp_context(env, &stack_mark);
#else
    mkcl_set_thread_env(env);
    thread->thread.tid = mkcl_gettid();
    mkcl_init_call_stack_overflow_area(env, &stack_mark);

    mk_si_clear_all_fpe(env);
    mk_si_enable_fpe(env, @':default');
    
    if (!mkcl_Null(thread->thread.initial_bindings))
      setup_thread_bindings(env, thread->thread.initial_bindings);
    
    mkcl_bds_bind(env, @'mt::*thread*', thread);
    mkcl_bds_bind(env, @'mkcl::*current-working-directory*', mk_cl_Cnil);
    mkcl_bds_bind(env, @'mkcl::*all-current-working-directories*', mk_cl_Cnil);
    mkcl_bds_bind(env, @'si::*dynamic-cons-stack*', mk_cl_Cnil);
    mk_si_trim_dynamic_cons_stack(env);
#endif

    mkcl_register_thread_as_active(env, thread);
    mkcl_enable_interrupts(env);

    /* 2) Execute the code. */
    for (;;)
      {
	int rc = 0;
	int received_signo;

	do {
	  MKCL_LIBC_Zzz(env, @':io', rc = sem_wait(mkcl_signals[sig].sem)); /* a sleeping point */
	} while ( rc && errno == EINTR );
	if ( rc ) mkcl_C_lose(env, "signal_servicing_thread_entry_point failed on sem_wait");
	mk_mt_test_for_thread_shutdown(env);

	received_signo = ((sig == 0) ? mkcl_terminal_signal_number : sig);
	mkcl_top_apply(env, thread->thread.function, MKCL_CONS(env, MKCL_MAKE_FIXNUM(received_signo), mk_cl_Cnil));
      }
    mkcl_cleanup_thread_lisp_context(env);
#if 0
  } MKCL_CATCH_ALL_IF_CAUGHT {
    /* mkcl_bds_unwind1(env); */
    /* mkcl_bds_unwind1(env); */
    /* mkcl_bds_unwind1(env); */
    /* mkcl_bds_unwind1(env); */
    /* We could add some special action here to help stack unwinding
       but we don't need to do anything yet. */
#endif
  } MKCL_CATCH_ALL_END;
  thread->thread.status = mkcl_thread_done;

  thread_final_cleanup(env, thread);
  return NULL;
}

#endif /* __linux */

#ifdef __linux
static pthread_mutex_t mkcl_interrupt_thread_lock;
static sigset_t mkcl_standard_sigmask;
#elif defined(MKCL_WINDOWS)
static HANDLE mkcl_interrupt_thread_lock;
#endif


#ifdef __linux

static sem_t mkcl_run_interrupt_function_sem_obj;
static sem_t * mkcl_run_interrupt_function = &mkcl_run_interrupt_function_sem_obj;

static void *
interrupt_thread_entry_point(void *arg)
{
  char stack_mark = 0;
  int rc = 0;
  mkcl_object thread = (mkcl_object)arg;
  const mkcl_env env = thread->thread.env;

  if (env->own_thread != thread) return (void *) MKCL_THREAD_ABORTED;

  pthread_sigmask(SIG_SETMASK, &mkcl_standard_sigmask, NULL);

#if 1
  {
    struct timespec timeout;

    rc = clock_gettime(CLOCK_REALTIME, &timeout);
    if (rc)
      mkcl_FElibc_error(env, "interrupt_thread_entry_point() failed on clock_gettime", 0);

    timeout.tv_nsec += 5 * 1000000; /* 5ms */
    if (timeout.tv_nsec >= 1000000000) {
      timeout.tv_nsec -= 1000000000;
      timeout.tv_sec++;
    }

    do {
      rc = sem_timedwait(mkcl_run_interrupt_function, &timeout);
    } while ( rc && errno == EINTR );
    if ( rc )
      if ( errno == ETIMEDOUT )
	{ fputs("\n;; MKCL internal error: interrupt synchronization timedout!\n", stderr); }
      else mkcl_C_lose(env, "mk_mt_interrupt_thread failed on sem_timedwait");
  }
#else
  do {
    rc = sem_wait(mkcl_run_interrupt_function);
  } while ( rc && errno == EINTR );
  if ( rc ) mkcl_C_lose(env, "interrupt_thread_entry_point failed on sem_wait");
#endif

  /* The CATCH_ALL point provides us with an elegant way to
   * exit the thread: we just do an unwind to frs_org.
   */
  MKCL_CATCH_ALL_BEGIN(env) {
    /* 1) Setup the environment for the execution of the thread */
    mkcl_set_thread_env(env);
    mkcl_init_call_stack_overflow_area(env, &stack_mark);
    mk_si_clear_all_fpe(env);
    mkcl_reactivate_fpe_set(env);

    mkcl_enable_interrupts(env);
    /* 2) Execute the code. */
    mkcl_funcall0(env, thread->thread.interrupt);
    mkcl_disable_interrupts(env);
    env->nlj_fr = NULL; /* make sure to signal a normal exit */
#if 0
  } MKCL_CATCH_ALL_IF_CAUGHT {
    /* We could add some special action here to help stack unwinding
       but we don't need to do anything yet. */
#endif
  } MKCL_CATCH_ALL_END;

  if (pthread_mutex_lock(&mkcl_interrupt_thread_lock))
    mkcl_lose(env, "interrupt_thread_entry_point failed on pthread_mutex_lock");

  {
    int rc = 0;
    int sig = mkcl_get_option(MKCL_OPT_THREAD_RESUME_SIGNAL);
    int count = thread->thread.interrupt_count - 1;
    struct interrupted_thread_ctrl * p = &(thread->thread.interrupted_threads[count]);
	  
    thread->thread.thread = p->thread_ident;

    env->cs_org = p->cs_org;
    env->cs_limit = p->cs_limit;
    env->cs_size = p->cs_size;
    env->cs_overflow_size = p->cs_overflow_size;
    env->cs_overflowing = p->cs_overflowing;

    if ( env->nlj_fr == NULL )
      { /* This is a normal (not unwinding) return. */
	env->disable_interrupts = p->disable_interrupts;
#if MKCL_DEBUG_INTERRUPT_MASK	
	env->interrupt_disabler_file = p->interrupt_disabler_file;
	env->interrupt_disabler_lineno = p->interrupt_disabler_lineno;
#endif
      }

    /* Restore interrupted thread values. */
    env->nvalues = p->nvalues;
    {
      int j;
      const int max = p->nvalues;
      mkcl_object * const values = p->values;
      mkcl_object * const env_values = env->values;
      
      for (j = 0; j < max; j++) env_values[j] = values[j];
    }

    thread->thread.interrupt_count = count; /* interrupted_threads stack poped */

    mkcl_interrupted_thread_env = env;
    env->own_thread->thread.resume_handler_ran = FALSE;

    /* resume the interrupted thread */
    if ((rc = pthread_kill(thread->thread.thread, sig)))
      switch (rc)
	{
	case ESRCH: /* the interrupted thread died on us unexpectedly! */
	  /* Should we flag this untimely death or simply wimper away? */
	  /* Let's be quiet for now. */
	  break;
	case EINVAL:
	  mkcl_lose(env, "interrupt_thread_entry_point: invalid signal used with pthread_kill");
	  break;
	default:
	  mkcl_lose(env, "interrupt_thread_entry_point failed on pthread_kill");
	  break;
	}
    else
      {
	do {
	  rc = sem_wait(mkcl_interrupted_thread_resumed);
	} while ( rc && errno == EINTR );
	if ( rc ) mkcl_C_lose(env, "interrupt_thread_entry_point failed on sem_wait");
      }
    mkcl_interrupted_thread_env = NULL;
  }

  if (pthread_mutex_unlock(&mkcl_interrupt_thread_lock))
    mkcl_lose(env, "interrupt_thread_entry_point failed on pthread_mutex_unlock");

  pthread_detach(pthread_self()); /* no join for interrupt threads. */

  return NULL;
}

#elif defined(MKCL_WINDOWS)

static void resume_thread_unwinding(void)
{ /* This function is never called through normal means.
     Its call is the result a direct instruction pointer
     manipulation through the "CONTEXT" structure.
  */
  /* In this function we cannot assume that we have a valid
     stack frame to work with, so no automatic variable allowed!
  */
  const mkcl_env env = MKCL_ENV();

  mkcl_unwind(env, env->nlj_fr);
  /* We should never return from the line just above! */
  mkcl_lose(env, "resume_thread_unwinding failed!");
}

static thread_value_t CALL_CONV interrupt_thread_entry_point(void * arg)
{
  char stack_mark = 0;
  mkcl_object thread = (mkcl_object)arg;
  const mkcl_env env = thread->thread.env;

  if (env->own_thread != thread) return MKCL_THREAD_ABORTED;

  /* The CATCH_ALL point is the destination provides us with an elegant way
   * to exit the thread: we just do an unwind up to frs_top.
   */
  MKCL_CATCH_ALL_BEGIN(env) {
    /* 1) Setup the environment for the execution of the thread */
    mkcl_set_thread_env(env);
    mkcl_init_call_stack_overflow_area(env, &stack_mark);
    
    mk_si_clear_all_fpe(env);
    mkcl_reactivate_fpe_set(env);
    
    mkcl_enable_interrupts(env);
    /* 2) Execute the code. */
    mkcl_funcall0(env, thread->thread.interrupt);
    mkcl_disable_interrupts(env);
    env->nlj_fr = NULL; /* make sure to signal a normal exit */
#if 0
  } MKCL_CATCH_ALL_IF_CAUGHT {
    /* We could add some special action here to help stack unwinding
       but we don't need to do anything yet. */
#endif
  } MKCL_CATCH_ALL_END;

  {
    DWORD wait_val;

    MKCL_LIBC_NO_INTR(env, wait_val = WaitForSingleObject(mkcl_interrupt_thread_lock, INFINITE));
    switch (wait_val)
      {
      case WAIT_OBJECT_0: break;
      case WAIT_TIMEOUT:
      case WAIT_ABANDONED:
      case WAIT_FAILED:
      default:
	mkcl_FEwin32_error(env, "interrupt-thread failed to acquire lock", 0);
      }
  }

  {
    HANDLE os_thread;
    HANDLE old_os_thread;
    int count = thread->thread.interrupt_count - 1;
    struct interrupted_thread_ctrl * p = &(thread->thread.interrupted_threads[count]);
	  
    old_os_thread = thread->thread.thread;
    if (!CloseHandle(old_os_thread))
      mkcl_FEwin32_error(env, "Cannot call CloseHandle to dispose of old interrupt thread for thread ~A", 1, thread);
    os_thread = thread->thread.thread = p->thread_ident;
    p->thread_ident = NULL;

    env->cs_org = p->cs_org;
    env->cs_limit = p->cs_limit;
    env->cs_size = p->cs_size;
    env->cs_overflow_size = p->cs_overflow_size;
    env->cs_overflowing = p->cs_overflowing;

    if ( env->nlj_fr == NULL )
      { /* This is a normal (not unwinding) return. */
	env->disable_interrupts = p->disable_interrupts;
#if MKCL_DEBUG_INTERRUPT_MASK	
	env->interrupt_disabler_file = p->interrupt_disabler_file;
	env->interrupt_disabler_lineno = p->interrupt_disabler_lineno;
#endif
      }

    /* Restore interrupted thread values. */
    env->nvalues = p->nvalues;
    {
      int j;
      const int max = p->nvalues;
      mkcl_object * const values = p->values;
      mkcl_object * const env_values = env->values;
      
      for (j = 0; j < max; j++) env_values[j] = values[j];
    }
    
    thread->thread.interrupt_count = count; /* interrupted_threads stack poped */

    env->own_thread->thread.resume_handler_ran = FALSE; /* needed? No. */

    if ( env->nlj_fr != NULL )
      {
	CONTEXT context;
	context.ContextFlags = CONTEXT_CONTROL | CONTEXT_INTEGER;
	if (!GetThreadContext(os_thread, &context))
	  mkcl_FEwin32_error(env, "Cannot get context for thread ~A", 1, thread);
#if defined(_X86_)
	context.Eip = (DWORD) resume_thread_unwinding;
#elif defined(__x86_64)
	context.Rip = (DWORD64) resume_thread_unwinding;
#else
#error Unknown processor architecture
#endif
	if (!SetThreadContext(os_thread, &context))
	  mkcl_FEwin32_error(env, "Cannot set context for thread ~A", 1, thread);
      }

    {
      DWORD count;

      if ((count = ResumeThread(os_thread)) == (DWORD)-1)
	mkcl_FEwin32_error(env, "Cannot resume interrupted thread ~A", 1, thread);
    }
  }

  if (!ReleaseMutex(mkcl_interrupt_thread_lock))
    mkcl_FEwin32_error(env, "interrupt thread failed to release interrupt lock", 0);

  return 0;
}

#endif /* defined(MKCL_WINDOWS) */



static void setup_thread_bindings(MKCL, mkcl_object initial_bindings)
{
  mkcl_object l = initial_bindings;
  mkcl_object table = mk_cl__make_hash_table(env, @'eq',
					     MKCL_MAKE_FIXNUM(1024),
					     MKCL_MAKE_FIXNUM(1024),
					     MKCL_MAKE_FIXNUM(1));

  mkcl_loop_for_in(env, l)
    {
      mkcl_object cell = MKCL_CONS_CAR(l);

      mkcl_object sym = MKCL_CONS_CAR(cell);
      mkcl_object value = MKCL_CONS_CDR(cell);

      mkcl_check_symbol(env, sym);
      if (NULL == mkcl_search_hash(env, sym, table))
	{
	  mkcl_sethash(env, sym, table, value);
	  mkcl_bds_bind(env, sym, value);
	}
    }
  mkcl_end_loop_for_in;
}


static mkcl_object
_mkcl_specials_snapshot(MKCL)
{
  struct mkcl_bds_bd * bds_top = env->bds_top;
  struct mkcl_bds_bd * bds = env->bds_org;
  mkcl_object init_bind = mk_cl_Cnil;

  for (; bds <= bds_top; bds++)
    init_bind = mk_cl_acons(env, bds->symbol, MKCL_SYM_VAL(env, bds->symbol), init_bind);
  return(init_bind);
}

void
_mkcl_dealloc_env(MKCL)
{
  int i;
  for (i = 0; i < 3; i++)
    {
      _mkcl_big_clear(env->big_register[i]);
    }
  mkcl_dealloc(env, env);
}

const mkcl_env _mkcl_alloc_env(MKCL)
{
  /* This function is called so early in the life of a MKCL world that we cannot
     allow it to throw a CL condition under any circonstance,
     or to report any kind of error in any case.
  */
  const mkcl_env new_env = _mkcl_alloc_raw_env(env);

  if (new_env == NULL) /* alloc failed no point in going any further. */
    return(NULL);
  /*
   * An uninitialized environment _always_ disables interrupts. They
   * are activated later on by the thread entry point or mkcl_init_unixint().
   */
  mkcl_disable_interrupts(new_env);
  new_env->sleeping_on = mk_cl_Cnil;

  new_env->cs_org = NULL;
  new_env->cs_limit = NULL;
  new_env->cs_size = 0;
  new_env->cs_overflow_size = 0;
  new_env->cs_overflowing = FALSE;

  new_env->nvalues = 0;
  {
    int i;

    for (i = 0; i < MKCL_MULTIPLE_VALUES_LIMIT; i++)
      new_env->values[i] = mk_cl_Cnil;
  }

  new_env->function = mk_cl_Cnil;

  new_env->temp_stack_size = 0;
  new_env->temp_stack_size_limit = 0;
  new_env->temp_stack = NULL;
  new_env->temp_stack_top = NULL;
  new_env->temp_stack_upper_bound = NULL;
  new_env->temp_stack_overflow_size = 0;
  new_env->temp_stack_overflowing = FALSE;

  new_env->bds_size = 0;
  new_env->bds_size_limit = 0;
  new_env->bds_org = NULL;
  new_env->bds_top = NULL;
  new_env->bds_upper_bound = NULL;
  new_env->bds_overflow_size = 0;
  new_env->bds_overflowing = FALSE;

  new_env->specials_size = MKCL_STARTUP_SPECIALS_SIZE;
  new_env->specials = ((env == NULL) 
		       ? _mkcl_boot_alloc_unprotected(MKCL_STARTUP_SPECIALS_SIZE * sizeof(mkcl_object))
		       : mkcl_alloc(env, MKCL_STARTUP_SPECIALS_SIZE * sizeof(mkcl_object)));

  if (new_env->specials)
    {
      mkcl_index i;
      
      for (i = 0; i < MKCL_STARTUP_SPECIALS_SIZE; i++)
	new_env->specials[i] = MKCL_END_OF_BDS_CHAIN;
    }
  else
    return(NULL);

  new_env->ihs_top = NULL;

  new_env->frs_size = 0;
  new_env->frs_size_limit = 0;
  new_env->frs_org = NULL;
  new_env->frs_top = NULL;
  new_env->frs_upper_bound = NULL;
  new_env->nlj_fr = NULL;
  new_env->go_label_index = -1; /* Normally this should be an invalid index. */
  new_env->frs_overflow_size = 0;
  new_env->frs_overflowing = FALSE;

  new_env->string_pool = mk_cl_Cnil;

  new_env->c_env = NULL;

  new_env->fmt_aux_stream = mk_cl_Cnil;

  new_env->print_pretty = FALSE;
  new_env->queue = NULL;
  new_env->indent_stack = NULL;
  new_env->qh = new_env->qt = new_env->qc = new_env->isp = new_env->iisp = 0;

  new_env->big_register[0] = mk_cl_Cnil;
  new_env->big_register[1] = mk_cl_Cnil;
  new_env->big_register[2] = mk_cl_Cnil;

  new_env->own_thread = mk_cl_Cnil;

  new_env->method_hash_clear_list = mk_cl_Cnil;
  new_env->method_hash = mk_cl_Cnil;
  new_env->method_spec_vector = mk_cl_Cnil;
  new_env->method_generation = 0;

  new_env->fficall = NULL;

  new_env->altstack = NULL;
  new_env->altstack_size = 0;

  new_env->fpe_control_bits = 0;

  new_env->interrupt_disabler_file = NULL;
  new_env->interrupt_disabler_lineno = 0;

  new_env->fp_drone = 0;
  /* new_env->alloc = NULL; _mkcl_alloc_raw_env() takes care of this one. */

  new_env->cs_org_request = NULL;
  new_env->cs_size_request = 0;

  return new_env;
}

void
mkcl_init_env(MKCL, mkcl_env new_env, struct mkcl_thread_init_parameters * params)
{
  new_env->c_env = NULL;

  new_env->string_pool = mk_cl_Cnil;

  new_env->method_hash = mk_cl_Cnil;
  new_env->method_spec_vector = mk_cl_Cnil;
  new_env->method_generation = 0;
  _mkcl_set_method_hash_size(new_env, 4096);
  new_env->method_hash_clear_list = mk_cl_Cnil;

  mkcl_init_stacks(env, new_env, params);

  {
    int i;
    for (i = 0; i < 3; i++)
      {
	mkcl_object x = mkcl_alloc_raw_bignum(env);

	_mkcl_big_init2(x, MKCL_BIG_REGISTER_SIZE);
	new_env->big_register[i] = x;
      }
  }

  new_env->fpe_control_bits = 0;
}

mkcl_object
mkcl_make_thread(MKCL, mkcl_object name, mkcl_object initial_bindings, struct mkcl_thread_init_parameters * params)
{
  const mkcl_env new_env = _mkcl_alloc_env(env);
  mkcl_object thread = mkcl_alloc_raw_thread(env);

  thread->thread.status = mkcl_thread_initialized;
  thread->thread.name = name;
  thread->thread.function = mk_cl_Cnil;
  thread->thread.args = mk_cl_Cnil;
  thread->thread.result_value = MKCL_OBJNULL;
  thread->thread.detached = FALSE;
  thread->thread.thread = 0;
  thread->thread.base_thread = 0;
  thread->thread.tid = 0;
  thread->thread.interrupt = mk_cl_Cnil;
  thread->thread.plist = mk_cl_Cnil;
  thread->thread.shutdown_requested = false;
  thread->thread.env = new_env;

#if __unix
  thread->thread.running_lock = NULL;
  sigemptyset(&thread->thread.saved_sigmask);
#endif
  thread->thread.resume_handler_ran = FALSE;
  thread->thread.sigmask_frs_marker = NULL;
  thread->thread.interrupt_count = 0;
  {
    int i, j;

    for (i = 0; i < MKCL_MAX_INTERRUPTS; i++)
      {
	thread->thread.interrupted_threads[i].thread_ident = 0;

	thread->thread.interrupted_threads[i].cs_org = NULL;
	thread->thread.interrupted_threads[i].cs_limit = NULL;
	thread->thread.interrupted_threads[i].cs_size = 0;
	thread->thread.interrupted_threads[i].cs_overflow_size = 0;
	thread->thread.interrupted_threads[i].cs_overflowing = FALSE;

	thread->thread.interrupted_threads[i].disable_interrupts = FALSE;
	thread->thread.interrupted_threads[i].interrupt_disabler_file = NULL;
	thread->thread.interrupted_threads[i].interrupt_disabler_lineno = 0;

	thread->thread.interrupted_threads[i].nvalues = 0;

	mkcl_object * values = thread->thread.interrupted_threads[i].values;
	for (j = 0; j < MKCL_MULTIPLE_VALUES_LIMIT; j++) values[j] = mk_cl_Cnil;
      }   
  }

  thread->thread.initial_bindings = NULL;

  if (initial_bindings == mk_cl_Ct)
    { /* New thread inherits its initial bindings from its creator thread. */
      thread->thread.initial_bindings = _mkcl_specials_snapshot(env);
    }
  else
    thread->thread.initial_bindings = mk_cl_copy_alist(env, initial_bindings);

  new_env->own_thread = thread;
  mkcl_init_env(env, new_env, params);

  mk_si_set_finalizer(env, thread, mk_cl_Ct);

  return thread;
}

#ifdef MKCL_WINDOWS
static CRITICAL_SECTION mkcl_imported_thread_pool_lock;
static HANDLE mkcl_imported_thread_pool_empty; /* Semaphore */
static HANDLE mkcl_imported_thread_pool_full; /* Semaphore */
#else
static pthread_mutex_t mkcl_imported_thread_pool_lock;
static sem_t mkcl_imported_thread_pool_empty_sem_obj;
static sem_t * mkcl_imported_thread_pool_empty = &mkcl_imported_thread_pool_empty_sem_obj;
static sem_t mkcl_imported_thread_pool_full_sem_obj;
static sem_t * mkcl_imported_thread_pool_full = &mkcl_imported_thread_pool_full_sem_obj;
#endif


static void fill_imported_thread_pool(MKCL)
{
  volatile bool locked = false;
  mkcl_object head;
  int i;

  MKCL_UNWIND_PROTECT_BEGIN(env) {
    mkcl_interrupt_status old_intr;
    struct mkcl_thread_init_parameters init_params = { 0 };

    mkcl_get_interrupt_status(env, &old_intr);
    mkcl_disable_interrupts(env);
#ifdef MKCL_WINDOWS
    EnterCriticalSection(&mkcl_imported_thread_pool_lock);
#else
    if (pthread_mutex_lock(&mkcl_imported_thread_pool_lock))
      mkcl_lose(env, "fill_imported_thread_pool failed on pthread_mutex_lock");
#endif
    locked = true;
    mkcl_set_interrupt_status(env, &old_intr);

    head = mkcl_core.imported_thread_pool;
    for (i = 0; i < 5; i++)
      head = MKCL_CONS(env, mkcl_make_thread(env, mk_cl_Cnil, mk_cl_Cnil, &init_params), head);
    mkcl_core.imported_thread_pool = head;

  } MKCL_UNWIND_PROTECT_EXIT {
#ifdef MKCL_WINDOWS
    if (locked)