00db10
commit 27822ce67fbf7f2b204992a410e7da2e8c1e2607
00db10
Author: Adhemerval Zanella <azanella@linux.vnet.ibm.com>
00db10
Date:   Wed Mar 26 15:37:35 2014 -0500
00db10
00db10
    Define _STRING_ARCH_unaligned unconditionally
00db10
    
00db10
    This patch defines _STRING_ARCH_unaligned to 0 on default bits/string.h
00db10
    header to avoid undefined compiler warnings on platforms that do not
00db10
    define it.  It also make adjustments in code where tests checked if macro
00db10
    existed or not.
00db10
00db10
Conflicts:
00db10
	resolv/res_send.c
00db10
00db10
Conflict due to stub resolver rebase in glibc-rh677316-resolv.patch.
00db10
00db10
diff --git a/bits/string.h b/bits/string.h
00db10
index f8630d2c52a9298a..b88a6bc601803f68 100644
00db10
--- a/bits/string.h
00db10
+++ b/bits/string.h
00db10
@@ -8,5 +8,7 @@
00db10
 #ifndef _BITS_STRING_H
00db10
 #define _BITS_STRING_H	1
00db10
 
00db10
+/* Define if architecture can access unaligned multi-byte variables.  */
00db10
+#define _STRING_ARCH_unaligned   0
00db10
 
00db10
 #endif /* bits/string.h */
00db10
diff --git a/crypt/sha256.c b/crypt/sha256.c
00db10
index aea94651391f19ae..1cbd2bc8381d6778 100644
00db10
--- a/crypt/sha256.c
00db10
+++ b/crypt/sha256.c
00db10
@@ -124,7 +124,7 @@ __sha256_finish_ctx (ctx, resbuf)
00db10
   memcpy (&ctx->buffer[bytes], fillbuf, pad);
00db10
 
00db10
   /* Put the 64-bit file length in *bits* at the end of the buffer.  */
00db10
-#ifdef _STRING_ARCH_unaligned
00db10
+#if _STRING_ARCH_unaligned
00db10
   ctx->buffer64[(bytes + pad) / 8] = SWAP64 (ctx->total64 << 3);
00db10
 #else
00db10
   ctx->buffer32[(bytes + pad + 4) / 4] = SWAP (ctx->total[TOTAL64_low] << 3);
00db10
diff --git a/iconv/gconv_simple.c b/iconv/gconv_simple.c
00db10
index 48932ee695083595..03fa5f2e2c771ebc 100644
00db10
--- a/iconv/gconv_simple.c
00db10
+++ b/iconv/gconv_simple.c
00db10
@@ -112,7 +112,7 @@ internal_ucs4_loop (struct __gconv_step *step,
00db10
   return result;
00db10
 }
00db10
 
00db10
-#ifndef _STRING_ARCH_unaligned
00db10
+#if !_STRING_ARCH_unaligned
00db10
 static inline int
00db10
 __attribute ((always_inline))
00db10
 internal_ucs4_loop_unaligned (struct __gconv_step *step,
00db10
@@ -289,7 +289,7 @@ ucs4_internal_loop (struct __gconv_step *step,
00db10
   return result;
00db10
 }
00db10
 
00db10
-#ifndef _STRING_ARCH_unaligned
00db10
+#if !_STRING_ARCH_unaligned
00db10
 static inline int
00db10
 __attribute ((always_inline))
00db10
 ucs4_internal_loop_unaligned (struct __gconv_step *step,
00db10
@@ -478,7 +478,7 @@ internal_ucs4le_loop (struct __gconv_step *step,
00db10
   return result;
00db10
 }
00db10
 
00db10
-#ifndef _STRING_ARCH_unaligned
00db10
+#if !_STRING_ARCH_unaligned
00db10
 static inline int
00db10
 __attribute ((always_inline))
00db10
 internal_ucs4le_loop_unaligned (struct __gconv_step *step,
00db10
@@ -660,7 +660,7 @@ ucs4le_internal_loop (struct __gconv_step *step,
00db10
   return result;
00db10
 }
00db10
 
00db10
-#ifndef _STRING_ARCH_unaligned
00db10
+#if !_STRING_ARCH_unaligned
00db10
 static inline int
00db10
 __attribute ((always_inline))
00db10
 ucs4le_internal_loop_unaligned (struct __gconv_step *step,
00db10
diff --git a/iconv/loop.c b/iconv/loop.c
00db10
index e11e86b5ecd4abd9..7b2499a3d0657265 100644
00db10
--- a/iconv/loop.c
00db10
+++ b/iconv/loop.c
00db10
@@ -63,7 +63,7 @@
00db10
    representations with a fixed width of 2 or 4 bytes.  But if we cannot
00db10
    access unaligned memory we still have to read byte-wise.  */
00db10
 #undef FCTNAME2
00db10
-#if defined _STRING_ARCH_unaligned || !defined DEFINE_UNALIGNED
00db10
+#if _STRING_ARCH_unaligned || !defined DEFINE_UNALIGNED
00db10
 /* We can handle unaligned memory access.  */
00db10
 # define get16(addr) *((const uint16_t *) (addr))
00db10
 # define get32(addr) *((const uint32_t *) (addr))
00db10
@@ -342,7 +342,7 @@ FCTNAME (LOOPFCT) (struct __gconv_step *step,
00db10
 
00db10
 /* Include the file a second time to define the function to handle
00db10
    unaligned access.  */
00db10
-#if !defined DEFINE_UNALIGNED && !defined _STRING_ARCH_unaligned \
00db10
+#if !defined DEFINE_UNALIGNED && !_STRING_ARCH_unaligned \
00db10
     && MIN_NEEDED_INPUT != 1 && MAX_NEEDED_INPUT % MIN_NEEDED_INPUT == 0 \
00db10
     && MIN_NEEDED_OUTPUT != 1 && MAX_NEEDED_OUTPUT % MIN_NEEDED_OUTPUT == 0
00db10
 # undef get16
00db10
diff --git a/iconv/skeleton.c b/iconv/skeleton.c
00db10
index 934b1fdde8d277df..176436a4c81f071b 100644
00db10
--- a/iconv/skeleton.c
00db10
+++ b/iconv/skeleton.c
00db10
@@ -203,7 +203,7 @@
00db10
 /* Define macros which can access unaligned buffers.  These macros are
00db10
    supposed to be used only in code outside the inner loops.  For the inner
00db10
    loops we have other definitions which allow optimized access.  */
00db10
-#ifdef _STRING_ARCH_unaligned
00db10
+#if _STRING_ARCH_unaligned
00db10
 /* We can handle unaligned memory access.  */
00db10
 # define get16u(addr) *((const uint16_t *) (addr))
00db10
 # define get32u(addr) *((const uint32_t *) (addr))
00db10
@@ -522,7 +522,7 @@ FUNCTION_NAME (struct __gconv_step *step, struct __gconv_step_data *data,
00db10
 	 INTERNAL, for which the subexpression evaluates to 1, but INTERNAL
00db10
 	 buffers are always aligned correctly.  */
00db10
 #define POSSIBLY_UNALIGNED \
00db10
-  (!defined _STRING_ARCH_unaligned					      \
00db10
+  (!_STRING_ARCH_unaligned					              \
00db10
    && (((FROM_LOOP_MIN_NEEDED_FROM != 1					      \
00db10
 	 && FROM_LOOP_MAX_NEEDED_FROM % FROM_LOOP_MIN_NEEDED_FROM == 0)	      \
00db10
 	&& (FROM_LOOP_MIN_NEEDED_TO != 1				      \
00db10
diff --git a/nscd/nscd_gethst_r.c b/nscd/nscd_gethst_r.c
00db10
index 41488ed6c033ffcd..5fe9f2f62fa28fd4 100644
00db10
--- a/nscd/nscd_gethst_r.c
00db10
+++ b/nscd/nscd_gethst_r.c
00db10
@@ -190,7 +190,7 @@ nscd_gethst_r (const char *key, size_t keylen, request_type type,
00db10
 	      goto out;
00db10
 	    }
00db10
 
00db10
-#ifndef _STRING_ARCH_unaligned
00db10
+#if !_STRING_ARCH_unaligned
00db10
 	  /* The aliases_len array in the mapped database might very
00db10
 	     well be unaligned.  We will access it word-wise so on
00db10
 	     platforms which do not tolerate unaligned accesses we
00db10
diff --git a/nscd/nscd_getserv_r.c b/nscd/nscd_getserv_r.c
00db10
index acf7e22f82582dbb..5880b1bc023d1c02 100644
00db10
--- a/nscd/nscd_getserv_r.c
00db10
+++ b/nscd/nscd_getserv_r.c
00db10
@@ -140,7 +140,7 @@ nscd_getserv_r (const char *crit, size_t critlen, const char *proto,
00db10
 				> recend, 0))
00db10
 	    goto out;
00db10
 
00db10
-#ifndef _STRING_ARCH_unaligned
00db10
+#if !_STRING_ARCH_unaligned
00db10
 	  /* The aliases_len array in the mapped database might very
00db10
 	     well be unaligned.  We will access it word-wise so on
00db10
 	     platforms which do not tolerate unaligned accesses we
00db10
diff --git a/nscd/nscd_helper.c b/nscd/nscd_helper.c
00db10
index 96fb93db768703cc..a46047b1fa0d502e 100644
00db10
--- a/nscd/nscd_helper.c
00db10
+++ b/nscd/nscd_helper.c
00db10
@@ -489,7 +489,7 @@ __nscd_cache_search (request_type type, const char *key, size_t keylen,
00db10
       struct hashentry *here = (struct hashentry *) (mapped->data + work);
00db10
       ref_t here_key, here_packet;
00db10
 
00db10
-#ifndef _STRING_ARCH_unaligned
00db10
+#if !_STRING_ARCH_unaligned
00db10
       /* Although during garbage collection when moving struct hashentry
00db10
 	 records around we first copy from old to new location and then
00db10
 	 adjust pointer from previous hashentry to it, there is no barrier
00db10
@@ -511,7 +511,7 @@ __nscd_cache_search (request_type type, const char *key, size_t keylen,
00db10
 	  struct datahead *dh
00db10
 	    = (struct datahead *) (mapped->data + here_packet);
00db10
 
00db10
-#ifndef _STRING_ARCH_unaligned
00db10
+#if !_STRING_ARCH_unaligned
00db10
 	  if ((uintptr_t) dh & (__alignof__ (*dh) - 1))
00db10
 	    return NULL;
00db10
 #endif
00db10
@@ -535,7 +535,7 @@ __nscd_cache_search (request_type type, const char *key, size_t keylen,
00db10
 	  struct hashentry *trailelem;
00db10
 	  trailelem = (struct hashentry *) (mapped->data + trail);
00db10
 
00db10
-#ifndef _STRING_ARCH_unaligned
00db10
+#if !_STRING_ARCH_unaligned
00db10
 	  /* We have to redo the checks.  Maybe the data changed.  */
00db10
 	  if ((uintptr_t) trailelem & (__alignof__ (*trailelem) - 1))
00db10
 	    return NULL;