diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index baabf6a5f1a68b341fe1523f82aa9685dc2c3849..eadb2fe1b48f9977e463aa76759a421a3b20bb52 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,8 @@
+2006-02-27  Daniel Berlin  <dberlin@dberlin.org>
+
+	* lambda-code.c (can_convert_to_perfect_nest): Allow any type of
+	scalar operation to be put back into the loop.
+
 2006-02-27  Jakub Jelinek  <jakub@redhat.com>
 
 	PR other/26208
diff --git a/gcc/lambda-code.c b/gcc/lambda-code.c
index 9d61c774231e3fdb261e26260b2c3f1ea2e899b4..a47f5801baef8eef7974066767f8a46cd5a43206 100644
--- a/gcc/lambda-code.c
+++ b/gcc/lambda-code.c
@@ -2257,18 +2257,15 @@ can_convert_to_perfect_nest (struct loop *loop,
 		if (stmt_uses_op (stmt, iv))
 		  goto fail;
 	      
-	      /* If this is a simple operation like a cast that is
-		 invariant in the inner loop, or after the inner loop,
-		 then see if we can place it back where it came from.
-		 This means that we will propagate casts and other
-		 cheap invariant operations *back* into or after
-		 the inner loop if we can interchange the loop, on the
-		 theory that we are going to gain a lot more by
-		 interchanging the loop than we are by leaving some
-		 invariant code there for some other pass to clean
-		 up.  */
+	      /* If this is a scalar operation that can be put back
+	         into the inner loop, or after the inner loop, through
+		 copying, then do so. This works on the theory that
+		 any amount of scalar code we have to reduplicate
+		 into or after the loops is less expensive that the
+		 win we get from rearranging the memory walk
+		 the loop is doing so that it has better
+		 cache behavior.  */
 	      if (TREE_CODE (stmt) == MODIFY_EXPR
-		  && is_gimple_cast (TREE_OPERAND (stmt, 1))
 		  && (can_put_in_inner_loop (loop->inner, stmt)
 		      || can_put_after_inner_loop (loop, stmt)))
 		continue;