diff --git a/libjava/ChangeLog b/libjava/ChangeLog
index 38bcb31fc19eb5beb38e8850eba17668713e114d..677fd8e29f824bd4dc8ff6ee4e88d162d8b37f9f 100644
--- a/libjava/ChangeLog
+++ b/libjava/ChangeLog
@@ -1,6 +1,13 @@
+2002-03-20  Bryce McKinlay  <bryce@waitaki.otago.ac.nz>
+
+	* posix-threads.cc (_Jv_ThreadSelf_out_of_line): Use write_barrier,
+	not release_set.
+	* sysdep/powerpc/locks.h (write_barrier): New function.
+	* sysdep/i386/locks.h (write_barrier): New function.
+
 2002-03-19  Martin Kahlert  <martin.kahlert@infineon.com>
 
-	* include/jni.h Use correct C comments 
+	* include/jni.h Use correct C comments.
 
 2002-03-18  Tom Tromey  <tromey@redhat.com>
 
diff --git a/libjava/posix-threads.cc b/libjava/posix-threads.cc
index 6442eaffb2ff9a4ee518fdbb9a36d628ee7fa01d..e92348bcc2cc078bdb768e9ba23c1c6c7ccda1b5 100644
--- a/libjava/posix-threads.cc
+++ b/libjava/posix-threads.cc
@@ -448,7 +448,8 @@ _Jv_ThreadSelf_out_of_line(volatile self_cache_entry *sce, size_t high_sp_bits)
 {
   pthread_t self = pthread_self();
   sce -> high_sp_bits = high_sp_bits;
-  release_set ((obj_addr_t *) &(sce -> self), self);
+  write_barrier();
+  sce -> self = self;
   return self;
 }
 
diff --git a/libjava/sysdep/i386/locks.h b/libjava/sysdep/i386/locks.h
index 0c029ac939aa9c457ee2caa7323e8c1fd2e5373e..a9501ae99b23729584bef89a2fdd62f99bbe1a3d 100644
--- a/libjava/sysdep/i386/locks.h
+++ b/libjava/sysdep/i386/locks.h
@@ -62,4 +62,12 @@ read_barrier()
 {
 }
 
+// Ensure that prior stores to memory are completed with respect to other
+// processors.
+inline static void
+write_barrier()
+{
+  // X86 does not reorder writes. We just need to ensure that gcc also doesn't.
+  __asm__ __volatile__(" " : : : "memory");
+}
 #endif
diff --git a/libjava/sysdep/powerpc/locks.h b/libjava/sysdep/powerpc/locks.h
index 414b5dcb7f8699f4adce83d4fe3361f805e29185..4d4532b1f83e9f831f238f157a093bbc661c7441 100644
--- a/libjava/sysdep/powerpc/locks.h
+++ b/libjava/sysdep/powerpc/locks.h
@@ -75,4 +75,12 @@ read_barrier()
   __asm__ __volatile__ ("isync" : : : "memory");
 }
 
+// Ensure that prior stores to memory are completed with respect to other
+// processors.
+inline static void
+write_barrier()
+{
+  __asm__ __volatile__ ("sync" : : : "memory");
+}
+
 #endif