From 4e0dde968cb4eb15f3bd1d84e752b9be057edba5 Mon Sep 17 00:00:00 2001
From: bryce <bryce@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Sun, 10 Mar 2002 03:31:08 +0000
Subject: [PATCH] libjava: 	* configure.in: Define SLOW_PTHREAD_SELF if
 configure.host set 	slow_pthread_self. Set up symlink for sysdeps
 directory. 	* configure.host: Document more shell variables. Set
 sysdeps_dir 	for most platforms. Set slow_pthread_self for i686. Set 
 enable_hash_synchronization_default and slow_pthread_self for PowerPC. 
 * posix-threads.cc (_Jv_ThreadSelf_out_of_line): Use release_set so 	that
 memory barrier is emitted where required. 	* include/posix-threads.h
 (_Jv_ThreadSelf for SLOW_PTHREAD_SELF): Add 	read_barrier() to enforce
 ordering of reads. 	* sysdep/powerpc/locks.h: New file. Implementation of
 synchronization 	primitives for PowerPC. 	* sysdep/i386/locks.h:
 New file. Synchronization primitives for i386 	moved from natObject.cc. 
 * sysdep/alpha/locks.h: Likewise. 	* sysdep/ia64/locks.h: Likewise. 
 * sysdep/generic/locks.h: Likewise. 	* java/lang/natObject.cc: Move thread
 synchronization primitives to 	system-dependent headers.

gcc/java:
	* decl.c (java_init_decl_processing): Make sure class_type_node
	alignment is not less than 64 bits if hash synchronization is enabled.

boehm-gc:
	* include/gc_priv.h: Define ALIGN_DOUBLE on 32 bit targets if GCJ
	support is enabled, for hash synchronization.


git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@50518 138bc75d-0d04-0410-961f-82ee72b054a4
---
 libjava/sysdep/alpha/locks.h   | 53 +++++++++++++++++++++++
 libjava/sysdep/generic/locks.h | 11 +++++
 libjava/sysdep/i386/locks.h    | 65 ++++++++++++++++++++++++++++
 libjava/sysdep/ia64/locks.h    | 50 ++++++++++++++++++++++
 libjava/sysdep/powerpc/locks.h | 78 ++++++++++++++++++++++++++++++++++
 5 files changed, 257 insertions(+)
 create mode 100644 libjava/sysdep/alpha/locks.h
 create mode 100644 libjava/sysdep/generic/locks.h
 create mode 100644 libjava/sysdep/i386/locks.h
 create mode 100644 libjava/sysdep/ia64/locks.h
 create mode 100644 libjava/sysdep/powerpc/locks.h

diff --git a/libjava/sysdep/alpha/locks.h b/libjava/sysdep/alpha/locks.h
new file mode 100644
index 000000000000..1c20249cce39
--- /dev/null
+++ b/libjava/sysdep/alpha/locks.h
@@ -0,0 +1,53 @@
+// locks.h - Thread synchronization primitives. Alpha implementation.
+
+/* Copyright (C) 2002  Free Software Foundation
+
+   This file is part of libgcj.
+
+This software is copyrighted work licensed under the terms of the
+Libgcj License.  Please consult the file "LIBGCJ_LICENSE" for
+details.  */
+
+#ifndef __SYSDEP_LOCKS_H__
+#define __SYSDEP_LOCKS_H__
+
+typedef size_t obj_addr_t;	/* Integer type big enough for object	*/
+				/* address.				*/
+
+inline static bool
+compare_and_swap(volatile obj_addr_t *addr,
+		  			      obj_addr_t old,
+					      obj_addr_t new_val) 
+{
+  unsigned long oldval;
+  char result;
+  __asm__ __volatile__(
+      "1:ldq_l %0, %1\n\t" \
+      "cmpeq %0, %5, %2\n\t" \
+      "beq %2, 2f\n\t" \
+      "mov %3, %0\n\t" \
+      "stq_c %0, %1\n\t" \
+      "bne %0, 2f\n\t" \
+      "br 1b\n\t" \
+      "2:mb"
+	      : "=&r"(oldval), "=m"(*addr), "=&r"(result)
+	      : "r" (new_val), "m"(*addr), "r"(old) : "memory");
+  return (bool) result;
+}
+
+inline static void
+release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
+{
+  __asm__ __volatile__("mb" : : : "memory");
+  *(addr) = new_val;
+}
+
+inline static bool
+compare_and_swap_release(volatile obj_addr_t *addr,
+		  				     obj_addr_t old,
+						     obj_addr_t new_val)
+{
+  return compare_and_swap(addr, old, new_val);
+}
+
+#endif
diff --git a/libjava/sysdep/generic/locks.h b/libjava/sysdep/generic/locks.h
new file mode 100644
index 000000000000..fce6c71669eb
--- /dev/null
+++ b/libjava/sysdep/generic/locks.h
@@ -0,0 +1,11 @@
+// locks.h - Thread synchronization primitives. Generic implementation.
+
+/* Copyright (C) 2002  Free Software Foundation
+
+   This file is part of libgcj.
+
+This software is copyrighted work licensed under the terms of the
+Libgcj License.  Please consult the file "LIBGCJ_LICENSE" for
+details.  */
+
+#error Thread synchronization primitives not implemented for this platform.
diff --git a/libjava/sysdep/i386/locks.h b/libjava/sysdep/i386/locks.h
new file mode 100644
index 000000000000..0c029ac939aa
--- /dev/null
+++ b/libjava/sysdep/i386/locks.h
@@ -0,0 +1,65 @@
+// locks.h - Thread synchronization primitives. X86 implementation.
+
+/* Copyright (C) 2002  Free Software Foundation
+
+   This file is part of libgcj.
+
+This software is copyrighted work licensed under the terms of the
+Libgcj License.  Please consult the file "LIBGCJ_LICENSE" for
+details.  */
+
+#ifndef __SYSDEP_LOCKS_H__
+#define __SYSDEP_LOCKS_H__
+
+typedef size_t obj_addr_t;	/* Integer type big enough for object	*/
+				/* address.				*/
+
+// Atomically replace *addr by new_val if it was initially equal to old.
+// Return true if the comparison succeeded.
+// Assumed to have acquire semantics, i.e. later memory operations
+// cannot execute before the compare_and_swap finishes.
+inline static bool
+compare_and_swap(volatile obj_addr_t *addr,
+		  			      obj_addr_t old,
+					      obj_addr_t new_val) 
+{
+  char result;
+  __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
+	      : "+m"(*(addr)), "=q"(result)
+	      : "r" (new_val), "a"(old)
+	      : "memory");
+  return (bool) result;
+}
+
+// Set *addr to new_val with release semantics, i.e. making sure
+// that prior loads and stores complete before this
+// assignment.
+// On X86, the hardware shouldn't reorder reads and writes,
+// so we just have to convince gcc not to do it either.
+inline static void
+release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
+{
+  __asm__ __volatile__(" " : : : "memory");
+  *(addr) = new_val;
+}
+
+// Compare_and_swap with release semantics instead of acquire semantics.
+// On many architecture, the operation makes both guarantees, so the
+// implementation can be the same.
+inline static bool
+compare_and_swap_release(volatile obj_addr_t *addr,
+		  				     obj_addr_t old,
+						     obj_addr_t new_val)
+{
+  return compare_and_swap(addr, old, new_val);
+}
+
+// Ensure that subsequent instructions do not execute on stale
+// data that was loaded from memory before the barrier.
+// On X86, the hardware ensures that reads are properly ordered.
+inline static void
+read_barrier()
+{
+}
+
+#endif
diff --git a/libjava/sysdep/ia64/locks.h b/libjava/sysdep/ia64/locks.h
new file mode 100644
index 000000000000..6edee83ea99a
--- /dev/null
+++ b/libjava/sysdep/ia64/locks.h
@@ -0,0 +1,50 @@
+// locks.h - Thread synchronization primitives. IA64 implementation.
+
+/* Copyright (C) 2002  Free Software Foundation
+
+   This file is part of libgcj.
+
+This software is copyrighted work licensed under the terms of the
+Libgcj License.  Please consult the file "LIBGCJ_LICENSE" for
+details.  */
+
+#ifndef __SYSDEP_LOCKS_H__
+#define __SYSDEP_LOCKS_H__
+
+typedef size_t obj_addr_t;	/* Integer type big enough for object	*/
+				/* address.				*/
+
+inline static bool
+compare_and_swap(volatile obj_addr_t *addr,
+	 				      obj_addr_t old,
+					      obj_addr_t new_val) 
+{
+  unsigned long oldval;
+  __asm__ __volatile__("mov ar.ccv=%4 ;; cmpxchg8.acq %0=%1,%2,ar.ccv"
+	      : "=r"(oldval), "=m"(*addr)
+	      : "r"(new_val), "1"(*addr), "r"(old) : "memory");
+  return (oldval == old);
+}
+
+// The fact that *addr is volatile should cause the compiler to
+// automatically generate an st8.rel.
+inline static void
+release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
+{
+  __asm__ __volatile__(" " : : : "memory");
+  *(addr) = new_val;
+}
+
+inline static bool
+compare_and_swap_release(volatile obj_addr_t *addr,
+	 				             obj_addr_t old,
+						     obj_addr_t new_val) 
+{
+  unsigned long oldval;
+  __asm__ __volatile__("mov ar.ccv=%4 ;; cmpxchg8.rel %0=%1,%2,ar.ccv"
+	      : "=r"(oldval), "=m"(*addr)
+	      : "r"(new_val), "1"(*addr), "r"(old) : "memory");
+  return (oldval == old);
+}
+
+#endif
diff --git a/libjava/sysdep/powerpc/locks.h b/libjava/sysdep/powerpc/locks.h
new file mode 100644
index 000000000000..414b5dcb7f86
--- /dev/null
+++ b/libjava/sysdep/powerpc/locks.h
@@ -0,0 +1,78 @@
+// locks.h - Thread synchronization primitives. PowerPC implementation.
+
+/* Copyright (C) 2002  Free Software Foundation
+
+   This file is part of libgcj.
+
+This software is copyrighted work licensed under the terms of the
+Libgcj License.  Please consult the file "LIBGCJ_LICENSE" for
+details.  */
+
+#ifndef __SYSDEP_LOCKS_H__
+#define __SYSDEP_LOCKS_H__
+
+typedef size_t obj_addr_t;	/* Integer type big enough for object	*/
+				/* address.				*/
+
+inline static bool
+compare_and_swap(volatile obj_addr_t *addr,
+		  			      obj_addr_t old,
+					      obj_addr_t new_val) 
+{
+  int ret;
+
+  __asm__ __volatile__ (
+	   "0:    lwarx %0,0,%1 ;"
+	   "      xor. %0,%3,%0;"
+	   "      bne 1f;"
+	   "      stwcx. %2,0,%1;"
+	   "      bne- 0b;"
+	   "1:    "
+	: "=&r"(ret)
+	: "r"(addr), "r"(new_val), "r"(old)
+	: "cr0", "memory");
+  /* This version of __compare_and_swap is to be used when acquiring
+     a lock, so we don't need to worry about whether other memory
+     operations have completed, but we do need to be sure that any loads
+     after this point really occur after we have acquired the lock.  */
+  __asm__ __volatile__ ("isync" : : : "memory");
+  return ret == 0;
+}
+
+inline static void
+release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
+{
+  __asm__ __volatile__ ("sync" : : : "memory");
+  *(addr) = new_val;
+}
+
+inline static bool
+compare_and_swap_release(volatile obj_addr_t *addr,
+		  				     obj_addr_t old,
+						     obj_addr_t new_val)
+{
+  int ret;
+
+  __asm__ __volatile__ ("sync" : : : "memory");
+  __asm__ __volatile__ (
+	   "0:    lwarx %0,0,%1 ;"
+	   "      xor. %0,%3,%0;"
+	   "      bne 1f;"
+	   "      stwcx. %2,0,%1;"
+	   "      bne- 0b;"
+	   "1:    "
+	: "=&r"(ret)
+	: "r"(addr), "r"(new_val), "r"(old)
+	: "cr0", "memory");
+  return ret == 0;
+}
+
+// Ensure that subsequent instructions do not execute on stale
+// data that was loaded from memory before the barrier.
+inline static void
+read_barrier()
+{
+  __asm__ __volatile__ ("isync" : : : "memory");
+}
+
+#endif
-- 
GitLab