From a3fb7997a5beae9ce90f682c7788faedc7b8bbac Mon Sep 17 00:00:00 2001
From: wilson <wilson@138bc75d-0d04-0410-961f-82ee72b054a4>
Date: Thu, 18 Aug 2005 20:01:54 +0000
Subject: [PATCH] Fix FreeBSD failure with recursive malloc call. * mf-hooks1.c
 (malloc, calloc, realloc, free, __mf_wrap_alloca_indirect): Call
 BEGIN_MALLOC_PROTECT before calling the real routines, and END_MALLOC_PROTECT
 afterwards. * mf-impl.h (enum __mf_state_enum): Expand comment.  Add
 in_malloc. (BEGIN_PROTECT): Handle in_malloc state. (BEGIN_MALLOC_PROTECT,
 END_MALLOC_PROTECT): New. * testsuite/libmudflap.c/hook2-allocstuff.c: New.

git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@103256 138bc75d-0d04-0410-961f-82ee72b054a4
---
 libmudflap/ChangeLog                          | 10 ++++++++
 libmudflap/mf-hooks1.c                        | 18 ++++++++++++++
 libmudflap/mf-impl.h                          | 24 +++++++++++++++++--
 .../testsuite/libmudflap.c/hook2-allocstuff.c |  9 +++++++
 4 files changed, 59 insertions(+), 2 deletions(-)
 create mode 100644 libmudflap/testsuite/libmudflap.c/hook2-allocstuff.c

diff --git a/libmudflap/ChangeLog b/libmudflap/ChangeLog
index 8be90f024877..717be4f7be62 100644
--- a/libmudflap/ChangeLog
+++ b/libmudflap/ChangeLog
@@ -1,3 +1,13 @@
+2005-08-17  Jim Wilson  <wilson@specifix.com>
+
+	* mf-hooks1.c (malloc, calloc, realloc, free,
+	__mf_wrap_alloca_indirect): Call BEGIN_MALLOC_PROTECT before calling
+	the real routines, and END_MALLOC_PROTECT afterwards.
+	* mf-impl.h (enum __mf_state_enum): Expand comment.  Add in_malloc.
+	(BEGIN_PROTECT): Handle in_malloc state.
+	(BEGIN_MALLOC_PROTECT, END_MALLOC_PROTECT): New.
+	* testsuite/libmudflap.c/hook2-allocstuff.c: New.
+
 2005-08-17  Kelley Cook  <kcook@gcc.gnu.org>
 
 	* All files: Update FSF address.
diff --git a/libmudflap/mf-hooks1.c b/libmudflap/mf-hooks1.c
index 0700d9a9335f..a99d7726e9da 100644
--- a/libmudflap/mf-hooks1.c
+++ b/libmudflap/mf-hooks1.c
@@ -108,7 +108,9 @@ WRAPPER(void *, malloc, size_t c)
   size_with_crumple_zones =
     CLAMPADD(c,CLAMPADD(__mf_opts.crumple_zone,
 			__mf_opts.crumple_zone));
+  BEGIN_MALLOC_PROTECT ();
   result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
+  END_MALLOC_PROTECT ();
 
   if (LIKELY(result))
     {
@@ -145,7 +147,9 @@ WRAPPER(void *, calloc, size_t c, size_t n)
     CLAMPADD((c * n), /* XXX: CLAMPMUL */
 	     CLAMPADD(__mf_opts.crumple_zone,
 		      __mf_opts.crumple_zone));
+  BEGIN_MALLOC_PROTECT ();
   result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
+  END_MALLOC_PROTECT ();
 
   if (LIKELY(result))
     memset (result, 0, size_with_crumple_zones);
@@ -187,7 +191,9 @@ WRAPPER(void *, realloc, void *buf, size_t c)
   size_with_crumple_zones =
     CLAMPADD(c, CLAMPADD(__mf_opts.crumple_zone,
 			 __mf_opts.crumple_zone));
+  BEGIN_MALLOC_PROTECT ();
   result = (char *) CALL_REAL (realloc, base, size_with_crumple_zones);
+  END_MALLOC_PROTECT ();
 
   /* Ensure heap wiping doesn't occur during this peculiar
      unregister/reregister pair.  */
@@ -272,7 +278,9 @@ WRAPPER(void, free, void *buf)
 			     (void *) freeme,
 			     __mf_opts.crumple_zone);
 	    }
+	  BEGIN_MALLOC_PROTECT ();
 	  CALL_REAL (free, freeme);
+	  END_MALLOC_PROTECT ();
 	}
     }
   else
@@ -287,7 +295,9 @@ WRAPPER(void, free, void *buf)
 			 (void *) buf,
 			 __mf_opts.crumple_zone);
 	}
+      BEGIN_MALLOC_PROTECT ();
       CALL_REAL (free, base);
+      END_MALLOC_PROTECT ();
     }
 }
 
@@ -420,8 +430,10 @@ __mf_wrap_alloca_indirect (size_t c)
     {
       struct alloca_tracking *next = alloca_history->next;
       __mf_unregister (alloca_history->ptr, 0, __MF_TYPE_HEAP);
+      BEGIN_MALLOC_PROTECT ();
       CALL_REAL (free, alloca_history->ptr);
       CALL_REAL (free, alloca_history);
+      END_MALLOC_PROTECT ();
       alloca_history = next;
     }
 
@@ -429,14 +441,20 @@ __mf_wrap_alloca_indirect (size_t c)
   result = NULL;
   if (LIKELY (c > 0)) /* alloca(0) causes no allocation.  */
     {
+      BEGIN_MALLOC_PROTECT ();
       track = (struct alloca_tracking *) CALL_REAL (malloc,
 						    sizeof (struct alloca_tracking));
+      END_MALLOC_PROTECT ();
       if (LIKELY (track != NULL))
 	{
+	  BEGIN_MALLOC_PROTECT ();
 	  result = CALL_REAL (malloc, c);
+	  END_MALLOC_PROTECT ();
 	  if (UNLIKELY (result == NULL))
 	    {
+	      BEGIN_MALLOC_PROTECT ();
 	      CALL_REAL (free, track);
+	      END_MALLOC_PROTECT ();
 	      /* Too bad.  XXX: What about errno?  */
 	    }
 	  else
diff --git a/libmudflap/mf-impl.h b/libmudflap/mf-impl.h
index 6b44edaba5c3..e61f4b7a85ad 100644
--- a/libmudflap/mf-impl.h
+++ b/libmudflap/mf-impl.h
@@ -94,9 +94,17 @@ extern int __mf_heuristic_check (uintptr_t, uintptr_t);
 /* Type definitions. */
 /* ------------------------------------------------------------------------ */
 
-/* The mf_state type codes describe recursion and initialization order. */
+/* The mf_state type codes describe recursion and initialization order.
 
-enum __mf_state_enum { active, reentrant };
+   reentrant means we are inside a mf-runtime support routine, such as
+   __mf_register, and thus there should be no calls to any wrapped functions,
+   such as the wrapped malloc.  This indicates a bug if it occurs.
+   in_malloc means we are inside a real malloc call inside a wrapped malloc
+   call, and thus there should be no calls to any wrapped functions like the
+   wrapped mmap.  This happens on some systems due to how the system libraries
+   are constructed.  */
+
+enum __mf_state_enum { active, reentrant, in_malloc }; 
 
 /* The __mf_options structure records optional or tunable aspects of the
  mudflap library's behavior. There is a single global instance of this
@@ -379,11 +387,23 @@ ret __mfwrap_ ## fname (__VA_ARGS__)
     __mf_reentrancy ++; \
     return CALL_REAL(fname, __VA_ARGS__);   \
   }                                         \
+  else if (UNLIKELY (__mf_get_state () == in_malloc))   \
+  {                                         \
+    return CALL_REAL(fname, __VA_ARGS__);   \
+  }                                         \
   else                                      \
   {                                         \
     TRACE ("%s\n", __PRETTY_FUNCTION__); \
   }
 
+/* There is an assumption here that these will only be called in routines
+   that call BEGIN_PROTECT at the start, and hence the state must always
+   be active when BEGIN_MALLOC_PROTECT is called.  */
+#define BEGIN_MALLOC_PROTECT() \
+  __mf_set_state (in_malloc)
+
+#define END_MALLOC_PROTECT() \
+  __mf_set_state (active)
 
 /* Unlocked variants of main entry points from mf-runtime.h.  */
 extern void __mfu_check (void *ptr, size_t sz, int type, const char *location);
diff --git a/libmudflap/testsuite/libmudflap.c/hook2-allocstuff.c b/libmudflap/testsuite/libmudflap.c/hook2-allocstuff.c
new file mode 100644
index 000000000000..d8fbec4676da
--- /dev/null
+++ b/libmudflap/testsuite/libmudflap.c/hook2-allocstuff.c
@@ -0,0 +1,9 @@
+/* Generates recursive malloc call on i386-freebsd4.10 with -fmudflap.  */
+#include <stdlib.h>
+
+int
+main (void)
+{
+  char *p = malloc (1<<24);
+  return 0;
+}
-- 
GitLab