Skip to content
Snippets Groups Projects
pthread_support.c 49.6 KiB
Newer Older
jsturm's avatar
jsturm committed
#   ifdef IA64
	me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack() + SP_SLOP;
#   endif
    /* Add some slop to the stack pointer, since the wrapped call may 	*/
    /* end up pushing more callee-save registers.			*/
#   ifndef GC_DARWIN_THREADS
#   ifdef STACK_GROWS_UP
	me -> stop_info.stack_ptr += SP_SLOP;
#   else
	me -> stop_info.stack_ptr -= SP_SLOP;
#   endif
#   endif
    me -> thread_blocked = TRUE;
    UNLOCK();
}

void GC_end_blocking(void) {
    GC_thread me;
    LOCK();   /* This will block if the world is stopped.	*/
    me = GC_lookup_thread(pthread_self());
    GC_ASSERT(me -> thread_blocked);
    me -> thread_blocked = FALSE;
    UNLOCK();
}
    
#if defined(GC_DGUX386_THREADS)
#define __d10_sleep sleep
#endif /* GC_DGUX386_THREADS */

/* A wrapper for the standard C sleep function	*/
int WRAP_FUNC(sleep) (unsigned int seconds)
{
    int result;

    GC_start_blocking();
    result = REAL_FUNC(sleep)(seconds);
    GC_end_blocking();
    return result;
}

struct start_info {
    void *(*start_routine)(void *);
    void *arg;
    word flags;
    sem_t registered;   	/* 1 ==> in our thread table, but 	*/
				/* parent hasn't yet noticed.		*/
};

/* Called at thread exit.				*/
/* Never called for main thread.  That's OK, since it	*/
/* results in at most a tiny one-time leak.  And 	*/
/* linuxthreads doesn't reclaim the main threads 	*/
/* resources or id anyway.				*/
void GC_thread_exit_proc(void *arg)
{
    GC_thread me;

    LOCK();
    me = GC_lookup_thread(pthread_self());
    GC_destroy_thread_local(me);
    if (me -> flags & DETACHED) {
    	GC_delete_thread(pthread_self());
    } else {
	me -> flags |= FINISHED;
    }
#   if defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
       && !defined(USE_COMPILER_TLS) && !defined(DBG_HDRS_ALL)
jsturm's avatar
jsturm committed
      GC_remove_specific(GC_thread_key);
#   endif
    /* The following may run the GC from "nonexistent" thread.	*/
jsturm's avatar
jsturm committed
    GC_wait_for_gc_completion(FALSE);
    UNLOCK();
}

int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
{
    int result;
    GC_thread thread_gc_id;
    
    LOCK();
    thread_gc_id = GC_lookup_thread(thread);
    /* This is guaranteed to be the intended one, since the thread id	*/
    /* cant have been recycled by pthreads.				*/
    UNLOCK();
    result = REAL_FUNC(pthread_join)(thread, retval);
# if defined (GC_FREEBSD_THREADS)
    /* On FreeBSD, the wrapped pthread_join() sometimes returns (what
       appears to be) a spurious EINTR which caused the test and real code
       to gratuitously fail.  Having looked at system pthread library source
       code, I see how this return code may be generated.  In one path of
       code, pthread_join() just returns the errno setting of the thread
       being joined.  This does not match the POSIX specification or the
       local man pages thus I have taken the liberty to catch this one
       spurious return value properly conditionalized on GC_FREEBSD_THREADS. */
    if (result == EINTR) result = 0;
# endif
    if (result == 0) {
        LOCK();
        /* Here the pthread thread id may have been recycled. */
        GC_delete_gc_thread(thread, thread_gc_id);
        UNLOCK();
    }
    return result;
}

int
WRAP_FUNC(pthread_detach)(pthread_t thread)
{
    int result;
    GC_thread thread_gc_id;
    
    LOCK();
    thread_gc_id = GC_lookup_thread(thread);
    UNLOCK();
    result = REAL_FUNC(pthread_detach)(thread);
    if (result == 0) {
      LOCK();
      thread_gc_id -> flags |= DETACHED;
      /* Here the pthread thread id may have been recycled. */
      if (thread_gc_id -> flags & FINISHED) {
        GC_delete_gc_thread(thread, thread_gc_id);
      }
      UNLOCK();
    }
    return result;
}

GC_bool GC_in_thread_creation = FALSE;

tromey's avatar
tromey committed
GC_PTR GC_get_thread_stack_base()
{  
# ifdef HAVE_PTHREAD_GETATTR_NP
  pthread_t my_pthread;
  pthread_attr_t attr;
  ptr_t stack_addr;
  size_t stack_size;
  
  my_pthread = pthread_self();  
  if (pthread_getattr_np (my_pthread, &attr) != 0)
    {
#   ifdef DEBUG_THREADS
ro's avatar
ro committed
      GC_printf0("Can not determine stack base for attached thread");
tromey's avatar
tromey committed
  pthread_attr_getstack (&attr, (void **) &stack_addr, &stack_size);
  pthread_attr_destroy (&attr);
  
#   ifdef DEBUG_THREADS
	GC_printf1("attached thread stack address: 0x%x\n", stack_addr);
#   endif

#   ifdef STACK_GROWS_DOWN
      return stack_addr + stack_size;
#   else
danglin's avatar
danglin committed
      return stack_addr;
tromey's avatar
tromey committed
#   endif

# else
#   ifdef DEBUG_THREADS
iains's avatar
 
iains committed
	GC_printf0("Can not determine stack base for attached thread");
tromey's avatar
tromey committed
#   endif
  return 0;
# endif
}

void GC_register_my_thread()
{
  GC_thread me;
  pthread_t my_pthread;

  my_pthread = pthread_self();
#   ifdef DEBUG_THREADS
      GC_printf1("Attaching thread 0x%lx\n", my_pthread);
      GC_printf1("pid = %ld\n", (long) getpid());
#   endif
  
  /* Check to ensure this thread isn't attached already. */
  LOCK();
  me = GC_lookup_thread (my_pthread);
  UNLOCK();
  if (me != 0)
    {
#   ifdef DEBUG_THREADS
      GC_printf1("Attempt to re-attach known thread 0x%lx\n", my_pthread);
#   endif
      return;
    }

  LOCK();
  GC_in_thread_creation = TRUE;
  me = GC_new_thread(my_pthread);
  GC_in_thread_creation = FALSE;

  me -> flags |= DETACHED;  

#ifdef GC_DARWIN_THREADS
    me -> stop_info.mach_thread = mach_thread_self();
#else
    me -> stack_end = GC_get_thread_stack_base();    
    if (me -> stack_end == 0)
      GC_abort("Can not determine stack base for attached thread");
    
#   ifdef STACK_GROWS_DOWN
      me -> stop_info.stack_ptr = me -> stack_end - 0x10;
#   else
      me -> stop_info.stack_ptr = me -> stack_end + 0x10;
#   endif
#endif

#   ifdef IA64
      me -> backing_store_end = (ptr_t)
			(GC_save_regs_in_stack() & ~(GC_page_size - 1));
      /* This is also < 100% convincing.  We should also read this 	*/
      /* from /proc, but the hook to do so isn't there yet.		*/
#   endif /* IA64 */

#   if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
        GC_init_thread_local(me);
#   endif
  UNLOCK();
}

void GC_unregister_my_thread()
{
  pthread_t my_pthread;

  my_pthread = pthread_self();

#   ifdef DEBUG_THREADS
      GC_printf1("Detaching thread 0x%lx\n", my_pthread);
#   endif

  GC_thread_exit_proc (0);
}

jsturm's avatar
jsturm committed
void * GC_start_routine(void * arg)
{
    int dummy;
    struct start_info * si = arg;
    void * result;
    GC_thread me;
    pthread_t my_pthread;
    void *(*start)(void *);
    void *start_arg;

    my_pthread = pthread_self();
#   ifdef DEBUG_THREADS
        GC_printf1("Starting thread 0x%lx\n", my_pthread);
        GC_printf1("pid = %ld\n", (long) getpid());
        GC_printf1("sp = 0x%lx\n", (long) &arg);
#   endif
    LOCK();
jsturm's avatar
jsturm committed
    me = GC_new_thread(my_pthread);
jsturm's avatar
jsturm committed
#ifdef GC_DARWIN_THREADS
    me -> stop_info.mach_thread = mach_thread_self();
#else
    me -> stop_info.stack_ptr = 0;
#endif
    me -> flags = si -> flags;
    /* me -> stack_end = GC_linux_stack_base(); -- currently (11/99)	*/
    /* doesn't work because the stack base in /proc/self/stat is the 	*/
    /* one for the main thread.  There is a strong argument that that's	*/
    /* a kernel bug, but a pervasive one.				*/
#   ifdef STACK_GROWS_DOWN
      me -> stack_end = (ptr_t)(((word)(&dummy) + (GC_page_size - 1))
		                & ~(GC_page_size - 1));
#	  ifndef GC_DARWIN_THREADS
        me -> stop_info.stack_ptr = me -> stack_end - 0x10;
#	  endif
	/* Needs to be plausible, since an asynchronous stack mark	*/
	/* should not crash.						*/
#   else
      me -> stack_end = (ptr_t)((word)(&dummy) & ~(GC_page_size - 1));
      me -> stop_info.stack_ptr = me -> stack_end + 0x10;
#   endif
    /* This is dubious, since we may be more than a page into the stack, */
    /* and hence skip some of it, though it's not clear that matters.	 */
#   ifdef IA64
      me -> backing_store_end = (ptr_t)
			(GC_save_regs_in_stack() & ~(GC_page_size - 1));
      /* This is also < 100% convincing.  We should also read this 	*/
      /* from /proc, but the hook to do so isn't there yet.		*/
#   endif /* IA64 */
    UNLOCK();
    start = si -> start_routine;
#   ifdef DEBUG_THREADS
	GC_printf1("start_routine = 0x%lx\n", start);
#   endif
    start_arg = si -> arg;
    sem_post(&(si -> registered));	/* Last action on si.	*/
    					/* OK to deallocate.	*/
    pthread_cleanup_push(GC_thread_exit_proc, 0);
#   if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
 	LOCK();
        GC_init_thread_local(me);
	UNLOCK();
#   endif
    result = (*start)(start_arg);
#if DEBUG_THREADS
        GC_printf1("Finishing thread 0x%x\n", pthread_self());
#endif
    me -> status = result;
    pthread_cleanup_pop(1);
    /* Cleanup acquires lock, ensuring that we can't exit		*/
    /* while a collection that thinks we're alive is trying to stop     */
    /* us.								*/
    return(result);
}

int
tromey's avatar
tromey committed
WRAP_FUNC(pthread_create)(pthread_t *new_thread,
jsturm's avatar
jsturm committed
		  const pthread_attr_t *attr,
                  void *(*start_routine)(void *), void *arg)
{
    int result;
    int detachstate;
    word my_flags = 0;
    struct start_info * si; 
	/* This is otherwise saved only in an area mmapped by the thread */
	/* library, which isn't visible to the collector.		 */
 
    /* We resist the temptation to muck with the stack size here,	*/
    /* even if the default is unreasonably small.  That's the client's	*/
    /* responsibility.							*/

    LOCK();
    si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info),
						 NORMAL);
    UNLOCK();
    if (!parallel_initialized) GC_init_parallel();
    if (0 == si) return(ENOMEM);
    sem_init(&(si -> registered), 0, 0);
    si -> start_routine = start_routine;
    si -> arg = arg;
    LOCK();
    if (!GC_thr_initialized) GC_thr_init();
#   ifdef GC_ASSERTIONS
      {
bryce's avatar
bryce committed
	size_t stack_size;
jsturm's avatar
jsturm committed
	if (NULL == attr) {
	   pthread_attr_t my_attr;
	   pthread_attr_init(&my_attr);
	   pthread_attr_getstacksize(&my_attr, &stack_size);
	} else {
	   pthread_attr_getstacksize(attr, &stack_size);
	}
bryce's avatar
bryce committed
#       ifdef PARALLEL_MARK
	  GC_ASSERT(stack_size >= (8*HBLKSIZE*sizeof(word)));
#       else
          /* FreeBSD-5.3/Alpha: default pthread stack is 64K, 	*/
	  /* HBLKSIZE=8192, sizeof(word)=8			*/
	  GC_ASSERT(stack_size >= 65536);
#       endif
jsturm's avatar
jsturm committed
	/* Our threads may need to do some work for the GC.	*/
	/* Ridiculously small threads won't work, and they	*/
	/* probably wouldn't work anyway.			*/
      }
#   endif
    if (NULL == attr) {
	detachstate = PTHREAD_CREATE_JOINABLE;
    } else { 
        pthread_attr_getdetachstate(attr, &detachstate);
    }
    if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
    si -> flags = my_flags;
    UNLOCK();
#   ifdef DEBUG_THREADS
        GC_printf1("About to start new thread from thread 0x%X\n",
		   pthread_self());
#   endif

tromey's avatar
tromey committed
    result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
jsturm's avatar
jsturm committed

#   ifdef DEBUG_THREADS
        GC_printf1("Started thread 0x%X\n", *new_thread);
#   endif
    /* Wait until child has been added to the thread table.		*/
    /* This also ensures that we hold onto si until the child is done	*/
    /* with it.  Thus it doesn't matter whether it is otherwise		*/
    /* visible to the collector.					*/
    if (0 == result) {
	while (0 != sem_wait(&(si -> registered))) {
            if (EINTR != errno) ABORT("sem_wait failed");
	}
    }
    sem_destroy(&(si -> registered));
    LOCK();
    GC_INTERNAL_FREE(si);
    UNLOCK();

    return(result);
}

#ifdef GENERIC_COMPARE_AND_SWAP
  pthread_mutex_t GC_compare_and_swap_lock = PTHREAD_MUTEX_INITIALIZER;

  GC_bool GC_compare_and_exchange(volatile GC_word *addr,
  			          GC_word old, GC_word new_val)
  {
    GC_bool result;
    pthread_mutex_lock(&GC_compare_and_swap_lock);
    if (*addr == old) {
      *addr = new_val;
      result = TRUE;
    } else {
      result = FALSE;
    }
    pthread_mutex_unlock(&GC_compare_and_swap_lock);
    return result;
  }
  
  GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much)
  {
    GC_word old;
    pthread_mutex_lock(&GC_compare_and_swap_lock);
    old = *addr;
    *addr = old + how_much;
    pthread_mutex_unlock(&GC_compare_and_swap_lock);
    return old;
  }

#endif /* GENERIC_COMPARE_AND_SWAP */
/* Spend a few cycles in a way that can't introduce contention with	*/
/* othre threads.							*/
void GC_pause()
{
    int i;
#   if !defined(__GNUC__) || defined(__INTEL_COMPILER)
      volatile word dummy = 0;
#   endif
jsturm's avatar
jsturm committed

    for (i = 0; i < 10; ++i) { 
#     if defined(__GNUC__) && !defined(__INTEL_COMPILER)
jsturm's avatar
jsturm committed
        __asm__ __volatile__ (" " : : : "memory");
#     else
	/* Something that's unlikely to be optimized away. */
	GC_noop(++dummy);
#     endif
    }
}
    
#define SPIN_MAX 128	/* Maximum number of calls to GC_pause before	*/
jsturm's avatar
jsturm committed
			/* give up.					*/

VOLATILE GC_bool GC_collecting = 0;
			/* A hint that we're in the collector and       */
                        /* holding the allocation lock for an           */
                        /* extended period.                             */

#if !defined(USE_SPIN_LOCK) || defined(PARALLEL_MARK)
/* If we don't want to use the below spinlock implementation, either	*/
/* because we don't have a GC_test_and_set implementation, or because 	*/
/* we don't want to risk sleeping, we can still try spinning on 	*/
/* pthread_mutex_trylock for a while.  This appears to be very		*/
/* beneficial in many cases.						*/
/* I suspect that under high contention this is nearly always better	*/
/* than the spin lock.  But it's a bit slower on a uniprocessor.	*/
/* Hence we still default to the spin lock.				*/
/* This is also used to acquire the mark lock for the parallel		*/
/* marker.								*/

/* Here we use a strict exponential backoff scheme.  I don't know 	*/
/* whether that's better or worse than the above.  We eventually 	*/
/* yield by calling pthread_mutex_lock(); it never makes sense to	*/
/* explicitly sleep.							*/

#define LOCK_STATS
#ifdef LOCK_STATS
  unsigned long GC_spin_count = 0;
  unsigned long GC_block_count = 0;
  unsigned long GC_unlocked_count = 0;
#endif

jsturm's avatar
jsturm committed
void GC_generic_lock(pthread_mutex_t * lock)
{
#ifndef NO_PTHREAD_TRYLOCK
    unsigned pause_length = 1;
    unsigned i;
    
    if (0 == pthread_mutex_trylock(lock)) {
#       ifdef LOCK_STATS
	    ++GC_unlocked_count;
#       endif
	return;
    }
jsturm's avatar
jsturm committed
    for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
	for (i = 0; i < pause_length; ++i) {
	    GC_pause();
	}
        switch(pthread_mutex_trylock(lock)) {
	    case 0:
#		ifdef LOCK_STATS
		    ++GC_spin_count;
#		endif
jsturm's avatar
jsturm committed
		return;
	    case EBUSY:
		break;
	    default:
		ABORT("Unexpected error from pthread_mutex_trylock");
        }
    }
#endif /* !NO_PTHREAD_TRYLOCK */
#   ifdef LOCK_STATS
	++GC_block_count;
#   endif
jsturm's avatar
jsturm committed
    pthread_mutex_lock(lock);
}

#endif /* !USE_SPIN_LOCK || PARALLEL_MARK */

#if defined(USE_SPIN_LOCK)

/* Reasonably fast spin locks.  Basically the same implementation */
/* as STL alloc.h.  This isn't really the right way to do this.   */
/* but until the POSIX scheduling mess gets straightened out ...  */

volatile unsigned int GC_allocate_lock = 0;


void GC_lock()
{
#   define low_spin_max 30  /* spin cycles if we suspect uniprocessor */
#   define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
    static unsigned spin_max = low_spin_max;
    unsigned my_spin_max;
    static unsigned last_spins = 0;
    unsigned my_last_spins;
    int i;

    if (!GC_test_and_set(&GC_allocate_lock)) {
        return;
    }
    my_spin_max = spin_max;
    my_last_spins = last_spins;
    for (i = 0; i < my_spin_max; i++) {
        if (GC_collecting || GC_nprocs == 1) goto yield;
        if (i < my_last_spins/2 || GC_allocate_lock) {
            GC_pause();
            continue;
        }
        if (!GC_test_and_set(&GC_allocate_lock)) {
	    /*
             * got it!
             * Spinning worked.  Thus we're probably not being scheduled
             * against the other process with which we were contending.
             * Thus it makes sense to spin longer the next time.
	     */
            last_spins = i;
            spin_max = high_spin_max;
            return;
        }
    }
    /* We are probably being scheduled against the other process.  Sleep. */
    spin_max = low_spin_max;
yield:
    for (i = 0;; ++i) {
        if (!GC_test_and_set(&GC_allocate_lock)) {
            return;
        }
#       define SLEEP_THRESHOLD 12
		/* Under Linux very short sleeps tend to wait until	*/
		/* the current time quantum expires.  On old Linux	*/
		/* kernels nanosleep(<= 2ms) just spins under Linux.    */
		/* (Under 2.4, this happens only for real-time		*/
		/* processes.)  We want to minimize both behaviors	*/
		/* here.						*/
        if (i < SLEEP_THRESHOLD) {
            sched_yield();
	} else {
	    struct timespec ts;
	
	    if (i > 24) i = 24;
			/* Don't wait for more than about 15msecs, even	*/
			/* under extreme contention.			*/
	    ts.tv_sec = 0;
	    ts.tv_nsec = 1 << i;
	    nanosleep(&ts, 0);
	}
    }
}

#else  /* !USE_SPINLOCK */
void GC_lock()
{
#ifndef NO_PTHREAD_TRYLOCK
    if (1 == GC_nprocs || GC_collecting) {
	pthread_mutex_lock(&GC_allocate_ml);
    } else {
        GC_generic_lock(&GC_allocate_ml);
    }
#else  /* !NO_PTHREAD_TRYLOCK */
    pthread_mutex_lock(&GC_allocate_ml);
#endif /* !NO_PTHREAD_TRYLOCK */
}

#endif /* !USE_SPINLOCK */

#if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)

#ifdef GC_ASSERTIONS
  pthread_t GC_mark_lock_holder = NO_THREAD;
#endif

#if 0
  /* Ugly workaround for a linux threads bug in the final versions      */
  /* of glibc2.1.  Pthread_mutex_trylock sets the mutex owner           */
  /* field even when it fails to acquire the mutex.  This causes        */
  /* pthread_cond_wait to die.  Remove for glibc2.2.                    */
  /* According to the man page, we should use                           */
  /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually   */
  /* defined.                                                           */
  static pthread_mutex_t mark_mutex =
        {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
#else
  static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
#endif

static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;

void GC_acquire_mark_lock()
{
/*
    if (pthread_mutex_lock(&mark_mutex) != 0) {
	ABORT("pthread_mutex_lock failed");
    }
*/
    GC_generic_lock(&mark_mutex);
#   ifdef GC_ASSERTIONS
	GC_mark_lock_holder = pthread_self();
#   endif
}

void GC_release_mark_lock()
{
    GC_ASSERT(GC_mark_lock_holder == pthread_self());
#   ifdef GC_ASSERTIONS
	GC_mark_lock_holder = NO_THREAD;
#   endif
    if (pthread_mutex_unlock(&mark_mutex) != 0) {
	ABORT("pthread_mutex_unlock failed");
    }
}

/* Collector must wait for a freelist builders for 2 reasons:		*/
/* 1) Mark bits may still be getting examined without lock.		*/
/* 2) Partial free lists referenced only by locals may not be scanned 	*/
/*    correctly, e.g. if they contain "pointer-free" objects, since the	*/
/*    free-list link may be ignored.					*/
void GC_wait_builder()
{
    GC_ASSERT(GC_mark_lock_holder == pthread_self());
#   ifdef GC_ASSERTIONS
	GC_mark_lock_holder = NO_THREAD;
#   endif
    if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
	ABORT("pthread_cond_wait failed");
    }
    GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
#   ifdef GC_ASSERTIONS
	GC_mark_lock_holder = pthread_self();
#   endif
}

void GC_wait_for_reclaim()
{
    GC_acquire_mark_lock();
    while (GC_fl_builder_count > 0) {
	GC_wait_builder();
    }
    GC_release_mark_lock();
}

void GC_notify_all_builder()
{
    GC_ASSERT(GC_mark_lock_holder == pthread_self());
    if (pthread_cond_broadcast(&builder_cv) != 0) {
	ABORT("pthread_cond_broadcast failed");
    }
}

#endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */

#ifdef PARALLEL_MARK

static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;

void GC_wait_marker()
{
    GC_ASSERT(GC_mark_lock_holder == pthread_self());
#   ifdef GC_ASSERTIONS
	GC_mark_lock_holder = NO_THREAD;
#   endif
    if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
	ABORT("pthread_cond_wait failed");
    }
    GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
#   ifdef GC_ASSERTIONS
	GC_mark_lock_holder = pthread_self();
#   endif
}

void GC_notify_all_marker()
{
    if (pthread_cond_broadcast(&mark_cv) != 0) {
	ABORT("pthread_cond_broadcast failed");
    }
}

#endif /* PARALLEL_MARK */

# endif /* GC_LINUX_THREADS and friends */