-Index: gcc-4.2.3/configure.in
-===================================================================
---- gcc-4.2.3.orig/configure.in 2007-09-15 02:42:24.000000000 +0200
-+++ gcc-4.2.3/configure.in 2008-05-21 13:45:54.101287819 +0200
-@@ -503,6 +503,9 @@
+--- a/configure.in
++++ b/configure.in
+@@ -503,6 +503,9 @@ case "${target}" in
arm-*-riscix*)
noconfigdirs="$noconfigdirs ld target-libgloss ${libgcj}"
;;
avr-*-*)
noconfigdirs="$noconfigdirs target-libiberty target-libstdc++-v3 ${libgcj}"
;;
-Index: gcc-4.2.3/gcc/builtins.c
-===================================================================
---- gcc-4.2.3.orig/gcc/builtins.c 2008-01-23 11:38:21.000000000 +0100
-+++ gcc-4.2.3/gcc/builtins.c 2008-05-21 13:45:54.109288559 +0200
-@@ -9223,7 +9223,7 @@
+--- a/gcc/builtins.c
++++ b/gcc/builtins.c
+@@ -9223,7 +9223,7 @@ validate_arglist (tree arglist, ...)
do
{
switch (code)
{
case 0:
-Index: gcc-4.2.3/gcc/calls.c
-===================================================================
---- gcc-4.2.3.orig/gcc/calls.c 2007-09-01 17:28:30.000000000 +0200
-+++ gcc-4.2.3/gcc/calls.c 2008-05-21 13:45:54.117288181 +0200
-@@ -3447,7 +3447,7 @@
+--- a/gcc/calls.c
++++ b/gcc/calls.c
+@@ -3447,7 +3447,7 @@ emit_library_call_value_1 (int retval, r
for (; count < nargs; count++)
{
rtx val = va_arg (p, rtx);
/* We cannot convert the arg value to the mode the library wants here;
must do it earlier where we know the signedness of the arg. */
-Index: gcc-4.2.3/gcc/config/avr32/avr32.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ gcc-4.2.3/gcc/config/avr32/avr32.c 2008-05-21 13:45:54.145288116 +0200
-@@ -0,0 +1,7060 @@
+--- a/gcc/c-incpath.c
++++ b/gcc/c-incpath.c
+@@ -347,6 +347,18 @@ add_path (char *path, int chain, int cxx
+ char* c;
+ for (c = path; *c; c++)
+ if (*c == '\\') *c = '/';
++ /* Remove unnecessary trailing slashes. On some versions of MS
++ Windows, trailing _forward_ slashes cause no problems for stat().
++ On newer versions, stat() does not recognise a directory that ends
++ in a '\\' or '/', unless it is a drive root dir, such as "c:/",
++ where it is obligatory. */
++ int pathlen = strlen (path);
++ char* end = path + pathlen - 1;
++ /* Preserve the lead '/' or lead "c:/". */
++ char* start = path + (pathlen > 2 && path[1] == ':' ? 3 : 1);
++
++ for (; end > start && IS_DIR_SEPARATOR (*end); end--)
++ *end = 0;
+ #endif
+
+ p = XNEW (cpp_dir);
+--- /dev/null
++++ b/gcc/config/avr32/avr32.c
+@@ -0,0 +1,7915 @@
+/*
+ Target hooks and helper functions for AVR32.
+ Copyright 2003-2006 Atmel Corporation.
+static tree avr32_handle_isr_attribute (tree *, tree, tree, int, bool *);
+static tree avr32_handle_acall_attribute (tree *, tree, tree, int, bool *);
+static tree avr32_handle_fndecl_attribute (tree * node, tree name, tree args,
-+ int flags, bool * no_add_attrs);
++ int flags, bool * no_add_attrs);
+static void avr32_reorg (void);
+bool avr32_return_in_msb (tree type);
+bool avr32_vector_mode_supported (enum machine_mode mode);
+
+static void
+avr32_add_gc_roots (void)
-+ {
-+ gcc_obstack_init (&minipool_obstack);
-+ minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
-+ }
++{
++ gcc_obstack_init (&minipool_obstack);
++ minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
++}
+
+
+/* List of all known AVR32 parts */
+static const struct part_type_s avr32_part_types[] = {
-+ /* name, part_type, architecture type, macro */
-+ {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"},
-+ {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"},
-+ {"ap7010", PART_TYPE_AVR32_AP7010, ARCH_TYPE_AVR32_AP, "__AVR32_AP7010__"},
-+ {"ap7020", PART_TYPE_AVR32_AP7020, ARCH_TYPE_AVR32_AP, "__AVR32_AP7020__"},
-+ {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3A0256__"},
-+ {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3A0512__"},
-+ {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3A1128__"},
-+ {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3A1256__"},
-+ {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3A1512__"},
-+ {"uc3b064", PART_TYPE_AVR32_UC3B064, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3B064__"},
-+ {"uc3b0128", PART_TYPE_AVR32_UC3B0128, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3B0128__"},
-+ {"uc3b0256", PART_TYPE_AVR32_UC3B0256, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3B0256__"},
-+ {"uc3b164", PART_TYPE_AVR32_UC3B164, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3B164__"},
-+ {"uc3b1128", PART_TYPE_AVR32_UC3B1128, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3B1128__"},
-+ {"uc3b1256", PART_TYPE_AVR32_UC3B1256, ARCH_TYPE_AVR32_UC,
-+ "__AVR32_UC3B1256__"},
-+ {NULL, 0, 0, NULL}
++ /* name, part_type, architecture type, macro */
++ {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"},
++ {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"},
++ {"ap7001", PART_TYPE_AVR32_AP7001, ARCH_TYPE_AVR32_AP, "__AVR32_AP7001__"},
++ {"ap7002", PART_TYPE_AVR32_AP7002, ARCH_TYPE_AVR32_AP, "__AVR32_AP7002__"},
++ {"ap7200", PART_TYPE_AVR32_AP7200, ARCH_TYPE_AVR32_AP, "__AVR32_AP7200__"},
++ {"uc3a0128", PART_TYPE_AVR32_UC3A0128, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A0128__"},
++ {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A0256__"},
++ {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A0512__"},
++ {"uc3a0512es", PART_TYPE_AVR32_UC3A0512ES, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3A0512ES__"},
++ {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A1128__"},
++ {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A1256__"},
++ {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A1512__"},
++ {"uc3a1512es", PART_TYPE_AVR32_UC3A1512ES, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3A1512ES__"},
++ {"uc3a3revd", PART_TYPE_AVR32_UC3A3REVD, ARCH_TYPE_AVR32_UCR2NOMUL,
++ "__AVR32_UC3A3256S__"},
++ {"uc3a364", PART_TYPE_AVR32_UC3A364, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A364__"},
++ {"uc3a364s", PART_TYPE_AVR32_UC3A364S, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A364S__"},
++ {"uc3a3128", PART_TYPE_AVR32_UC3A3128, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A3128__"},
++ {"uc3a3128s", PART_TYPE_AVR32_UC3A3128S, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A3128S__"},
++ {"uc3a3256", PART_TYPE_AVR32_UC3A3256, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A3256__"},
++ {"uc3a3256s", PART_TYPE_AVR32_UC3A3256S, ARCH_TYPE_AVR32_UCR2,
++ "__AVR32_UC3A3256S__"},
++ {"uc3b064", PART_TYPE_AVR32_UC3B064, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B064__"},
++ {"uc3b0128", PART_TYPE_AVR32_UC3B0128, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B0128__"},
++ {"uc3b0256", PART_TYPE_AVR32_UC3B0256, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B0256__"},
++ {"uc3b0256es", PART_TYPE_AVR32_UC3B0256ES, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B0256ES__"},
++ {"uc3b164", PART_TYPE_AVR32_UC3B164, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B164__"},
++ {"uc3b1128", PART_TYPE_AVR32_UC3B1128, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B1128__"},
++ {"uc3b1256", PART_TYPE_AVR32_UC3B1256, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B1256__"},
++ {"uc3b1256es", PART_TYPE_AVR32_UC3B1256ES, ARCH_TYPE_AVR32_UCR1,
++ "__AVR32_UC3B1256ES__"},
++ {NULL, 0, 0, NULL}
+};
+
+/* List of all known AVR32 architectures */
+static const struct arch_type_s avr32_arch_types[] = {
-+ /* name, architecture type, microarchitecture type, feature flags, macro */
-+ {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B,
-+ (FLAG_AVR32_HAS_DSP
-+ | FLAG_AVR32_HAS_SIMD
-+ | FLAG_AVR32_HAS_UNALIGNED_WORD
-+ | FLAG_AVR32_HAS_CACHES
-+ | FLAG_AVR32_HAS_BRANCH_PRED
-+ | FLAG_AVR32_HAS_RETURN_STACK),
-+ "__AVR32_AP__"},
-+ {"uc", ARCH_TYPE_AVR32_UC, UARCH_TYPE_AVR32A,
-+ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW),
-+ "__AVR32_UC__"},
-+ {NULL, 0, 0, 0, NULL}
++ /* name, architecture type, microarchitecture type, feature flags, macro */
++ {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B,
++ (FLAG_AVR32_HAS_DSP
++ | FLAG_AVR32_HAS_SIMD
++ | FLAG_AVR32_HAS_UNALIGNED_WORD
++ | FLAG_AVR32_HAS_BRANCH_PRED | FLAG_AVR32_HAS_RETURN_STACK
++ | FLAG_AVR32_HAS_CACHES),
++ "__AVR32_AP__"},
++ {"ucr1", ARCH_TYPE_AVR32_UCR1, UARCH_TYPE_AVR32A,
++ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW),
++ "__AVR32_UC__=1"},
++ {"ucr2", ARCH_TYPE_AVR32_UCR2, UARCH_TYPE_AVR32A,
++ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
++ | FLAG_AVR32_HAS_V2_INSNS),
++ "__AVR32_UC__=2"},
++ {"ucr2nomul", ARCH_TYPE_AVR32_UCR2NOMUL, UARCH_TYPE_AVR32A,
++ (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
++ | FLAG_AVR32_HAS_V2_INSNS | FLAG_AVR32_HAS_NO_MUL_INSNS),
++ "__AVR32_UC__=3"},
++ {NULL, 0, 0, 0, NULL}
+};
+
+/* Default arch name */
-+const char *avr32_arch_name = "ap";
++const char *avr32_arch_name = "none";
+const char *avr32_part_name = "none";
+
+const struct part_type_s *avr32_part;
+const struct arch_type_s *avr32_arch;
+
++
+/* Set default target_flags. */
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS \
-+ (MASK_HAS_ASM_ADDR_PSEUDOS | MASK_MD_REORG_OPTIMIZATION)
++ (MASK_HAS_ASM_ADDR_PSEUDOS | MASK_MD_REORG_OPTIMIZATION | MASK_COND_EXEC_BEFORE_RELOAD)
+
-+void
++void
+avr32_optimization_options (int level,
+ int size){
+ if (AVR32_ALWAYS_PIC)
+/* Override command line options */
+void
+avr32_override_options (void)
-+ {
-+ const struct part_type_s *part;
-+ const struct arch_type_s *arch;
-+
-+ /* Check if part type is set. */
-+ for (part = avr32_part_types; part->name; part++)
-+ if (strcmp (part->name, avr32_part_name) == 0)
-+ break;
++{
++ const struct part_type_s *part;
++ const struct arch_type_s *arch;
+
-+ avr32_part = part;
++ /*Add backward compability*/
++ if (strcmp ("uc", avr32_arch_name)== 0)
++ {
++ fprintf (stderr, "Warning: Deprecated arch `%s' specified. "
++ "Please use '-march=ucr1' instead. "
++ "Converting to arch 'ucr1'\n",
++ avr32_arch_name);
++ avr32_arch_name="ucr1";
++ }
+
-+ if (!part->name)
-+ {
-+ fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n",
-+ avr32_part_name);
-+ for (part = avr32_part_types; part->name; part++)
-+ fprintf (stderr, "\t%s\n", part->name);
-+ avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE];
-+ }
++ /* Check if arch type is set. */
++ for (arch = avr32_arch_types; arch->name; arch++)
++ {
++ if (strcmp (arch->name, avr32_arch_name) == 0)
++ break;
++ }
++ avr32_arch = arch;
+
-+ avr32_arch = &avr32_arch_types[avr32_part->arch_type];
++ if (!arch->name && strcmp("none", avr32_arch_name) != 0)
++ {
++ fprintf (stderr, "Unknown arch `%s' specified\n"
++ "Known arch names:\n"
++ "\tuc (deprecated)\n",
++ avr32_arch_name);
++ for (arch = avr32_arch_types; arch->name; arch++)
++ fprintf (stderr, "\t%s\n", arch->name);
++ avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP];
++ }
+
-+ /* If part was set to "none" then check if arch was set. */
-+ if (strcmp (avr32_part->name, "none") == 0)
-+ {
-+ /* Check if arch type is set. */
-+ for (arch = avr32_arch_types; arch->name; arch++)
-+ if (strcmp (arch->name, avr32_arch_name) == 0)
-+ break;
++ /* Check if part type is set. */
++ for (part = avr32_part_types; part->name; part++)
++ if (strcmp (part->name, avr32_part_name) == 0)
++ break;
+
-+ avr32_arch = arch;
++ avr32_part = part;
++ if (!part->name)
++ {
++ fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n",
++ avr32_part_name);
++ for (part = avr32_part_types; part->name; part++)
++ {
++ if (strcmp("none", part->name) != 0)
++ fprintf (stderr, "\t%s\n", part->name);
++ }
++ /* Set default to NONE*/
++ avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE];
++ }
+
-+ if (!arch->name)
-+ {
-+ fprintf (stderr, "Unknown arch `%s' specified\nKnown arch names:\n",
-+ avr32_arch_name);
-+ for (arch = avr32_arch_types; arch->name; arch++)
-+ fprintf (stderr, "\t%s\n", arch->name);
-+ avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP];
-+ }
-+ }
++ /* NB! option -march= overrides option -mpart
++ * if both are used at the same time */
++ if (!arch->name)
++ avr32_arch = &avr32_arch_types[avr32_part->arch_type];
+
-+ /* If optimization level is two or greater, then align start of loops to a
++ /* If optimization level is two or greater, then align start of loops to a
+ word boundary since this will allow folding the first insn of the loop.
+ Do this only for targets supporting branch prediction. */
-+ if (optimize >= 2 && TARGET_BRANCH_PRED)
-+ align_loops = 2;
-+
++ if (optimize >= 2 && TARGET_BRANCH_PRED)
++ align_loops = 2;
+
-+ /* Enable section anchors if optimization is enabled. */
-+ if (optimize > 0 || optimize_size)
-+ flag_section_anchors = 1;
+
-+ /* Enable fast-float library if unsafe math optimizations
++ /* Enable fast-float library if unsafe math optimizations
+ are used. */
-+ if (flag_unsafe_math_optimizations)
-+ target_flags |= MASK_FAST_FLOAT;
++ if (flag_unsafe_math_optimizations)
++ target_flags |= MASK_FAST_FLOAT;
+
-+ /* Check if we should set avr32_imm_in_const_pool
-+ based on if caches are present or not. */
-+ if ( avr32_imm_in_const_pool == -1 )
-+ {
-+ if ( TARGET_CACHES )
-+ avr32_imm_in_const_pool = 1;
-+ else
-+ avr32_imm_in_const_pool = 0;
-+ }
-+
-+ avr32_add_gc_roots ();
-+ }
++ /* Check if we should set avr32_imm_in_const_pool
++ based on if caches are present or not. */
++ if ( avr32_imm_in_const_pool == -1 )
++ {
++ if ( TARGET_CACHES )
++ avr32_imm_in_const_pool = 1;
++ else
++ avr32_imm_in_const_pool = 0;
++ }
++
++ if (TARGET_NO_PIC)
++ flag_pic = 0;
++
++ avr32_add_gc_roots ();
++}
+
+
+/*
+compatibility reasons. Except in cases where required by standard
+or by a debugger, there is no reason why the stack layout used by GCC
+need agree with that used by other compilers for a machine.
-+ */
++*/
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue
+#undef TARGET_RETURN_IN_MSB
+#define TARGET_RETURN_IN_MSB avr32_return_in_msb
+
++#undef TARGET_ENCODE_SECTION_INFO
++#define TARGET_ENCODE_SECTION_INFO avr32_encode_section_info
++
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes
+
+#undef TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET ((1 << 15) - 1)
+
++#undef TARGET_SECONDARY_RELOAD
++#define TARGET_SECONDARY_RELOAD avr32_secondary_reload
++
++enum reg_class
++avr32_secondary_reload (bool in_p, rtx x, enum reg_class class ATTRIBUTE_UNUSED,
++ enum machine_mode mode, secondary_reload_info *sri)
++{
++
++ if ( avr32_rmw_memory_operand (x, mode) )
++ {
++ if (!in_p)
++ sri->icode = CODE_FOR_reload_out_rmw_memory_operand;
++ else
++ sri->icode = CODE_FOR_reload_in_rmw_memory_operand;
++ }
++ return NO_REGS;
++
++}
+
+/*
+ * Switches to the appropriate section for output of constant pool
+ Need to handle integer vectors */
+static bool
+avr32_assemble_integer (rtx x, unsigned int size, int aligned_p)
-+ {
-+ if (avr32_vector_mode_supported (GET_MODE (x)))
-+ {
-+ int i, units;
-+
-+ if (GET_CODE (x) != CONST_VECTOR)
-+ abort ();
-+
-+ units = CONST_VECTOR_NUNITS (x);
-+
-+ switch (GET_MODE (x))
-+ {
-+ case V2HImode:
-+ size = 2;
-+ break;
-+ case V4QImode:
-+ size = 1;
-+ break;
-+ default:
-+ abort ();
-+ }
++{
++ if (avr32_vector_mode_supported (GET_MODE (x)))
++ {
++ int i, units;
++
++ if (GET_CODE (x) != CONST_VECTOR)
++ abort ();
++
++ units = CONST_VECTOR_NUNITS (x);
++
++ switch (GET_MODE (x))
++ {
++ case V2HImode:
++ size = 2;
++ break;
++ case V4QImode:
++ size = 1;
++ break;
++ default:
++ abort ();
++ }
+
-+ for (i = 0; i < units; i++)
-+ {
-+ rtx elt;
++ for (i = 0; i < units; i++)
++ {
++ rtx elt;
+
-+ elt = CONST_VECTOR_ELT (x, i);
-+ assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1);
-+ }
++ elt = CONST_VECTOR_ELT (x, i);
++ assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1);
++ }
+
-+ return true;
-+ }
++ return true;
++ }
+
-+ return default_assemble_integer (x, size, aligned_p);
-+ }
++ return default_assemble_integer (x, size, aligned_p);
++}
+
+/*
+ * This target hook describes the relative costs of RTL expressions.
+/* Worker routine for avr32_rtx_costs. */
+static inline int
+avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED,
-+ enum rtx_code outer ATTRIBUTE_UNUSED)
-+ {
-+ enum machine_mode mode = GET_MODE (x);
++ enum rtx_code outer ATTRIBUTE_UNUSED)
++{
++ enum machine_mode mode = GET_MODE (x);
+
-+ switch (GET_CODE (x))
++ switch (GET_CODE (x))
+ {
+ case MEM:
+ /* Using pre decrement / post increment memory operations on the
+ avr32_uc architecture means that two writebacks must be performed
+ and hence two cycles are needed. */
+ if (!optimize_size
-+ && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD
-+ && avr32_arch->arch_type == ARCH_TYPE_AVR32_UC
-+ && (GET_CODE (XEXP (x, 0)) == PRE_DEC
-+ || GET_CODE (XEXP (x, 0)) == POST_INC))
-+ return COSTS_N_INSNS (5);
++ && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD
++ && TARGET_ARCH_UC
++ && (GET_CODE (XEXP (x, 0)) == PRE_DEC
++ || GET_CODE (XEXP (x, 0)) == POST_INC))
++ return COSTS_N_INSNS (5);
+
+ /* Memory costs quite a lot for the first word, but subsequent words
+ load at the equivalent of a single insn each. */
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
-+ return COSTS_N_INSNS (3 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD));
++ return COSTS_N_INSNS (3 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD));
+
+ return COSTS_N_INSNS (4);
+ case SYMBOL_REF:
+ case ROTATE:
+ case ROTATERT:
+ if (mode == TImode)
-+ return COSTS_N_INSNS (100);
++ return COSTS_N_INSNS (100);
+
+ if (mode == DImode)
-+ return COSTS_N_INSNS (10);
++ return COSTS_N_INSNS (10);
+ return COSTS_N_INSNS (4);
+ case ASHIFT:
+ case LSHIFTRT:
+ case ASHIFTRT:
+ case NOT:
+ if (mode == TImode)
-+ return COSTS_N_INSNS (10);
++ return COSTS_N_INSNS (10);
+
+ if (mode == DImode)
-+ return COSTS_N_INSNS (4);
++ return COSTS_N_INSNS (4);
+ return COSTS_N_INSNS (1);
+ case PLUS:
+ case MINUS:
+ case COMPARE:
+ case ABS:
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
-+ return COSTS_N_INSNS (100);
++ return COSTS_N_INSNS (100);
+
+ if (mode == TImode)
-+ return COSTS_N_INSNS (50);
++ return COSTS_N_INSNS (50);
+
+ if (mode == DImode)
-+ return COSTS_N_INSNS (2);
++ return COSTS_N_INSNS (2);
+ return COSTS_N_INSNS (1);
+
+ case MULT:
-+ {
-+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
-+ return COSTS_N_INSNS (300);
++ {
++ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
++ return COSTS_N_INSNS (300);
+
-+ if (mode == TImode)
-+ return COSTS_N_INSNS (16);
++ if (mode == TImode)
++ return COSTS_N_INSNS (16);
+
-+ if (mode == DImode)
-+ return COSTS_N_INSNS (4);
++ if (mode == DImode)
++ return COSTS_N_INSNS (4);
+
-+ if (mode == HImode)
-+ return COSTS_N_INSNS (2);
++ if (mode == HImode)
++ return COSTS_N_INSNS (2);
+
-+ return COSTS_N_INSNS (3);
-+ }
++ return COSTS_N_INSNS (3);
++ }
+ case IF_THEN_ELSE:
+ if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
-+ return COSTS_N_INSNS (4);
++ return COSTS_N_INSNS (4);
+ return COSTS_N_INSNS (1);
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ instrcutions only take one register operand which means that gcc
+ often must insert some move instrcutions */
+ if (mode == QImode || mode == HImode)
-+ return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1));
++ return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1));
+ return COSTS_N_INSNS (4);
+ case UNSPEC:
+ /* divmod operations */
+ if (XINT (x, 1) == UNSPEC_UDIVMODSI4_INTERNAL
-+ || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL)
-+ {
-+ return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
-+ }
++ || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL)
++ {
++ return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
++ }
+ /* Fallthrough */
+ default:
+ return COSTS_N_INSNS (1);
+ }
-+ }
++}
+
+static bool
+avr32_rtx_costs (rtx x, int code, int outer_code, int *total)
-+ {
-+ *total = avr32_rtx_costs_1 (x, code, outer_code);
-+ return true;
-+ }
++{
++ *total = avr32_rtx_costs_1 (x, code, outer_code);
++ return true;
++}
+
+
+bool
+avr32_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED)
-+ {
-+ /* Do not want symbols in the constant pool when compiling pic or if using
++{
++ /* Do not want symbols in the constant pool when compiling pic or if using
+ address pseudo instructions. */
-+ return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
-+ && avr32_find_symbol (x) != NULL_RTX);
-+ }
++ return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
++ && avr32_find_symbol (x) != NULL_RTX);
++}
+
+
+/* Table of machine attributes. */
+const struct attribute_spec avr32_attribute_table[] = {
-+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
-+ /* Interrupt Service Routines have special prologue and epilogue
++ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
++ /* Interrupt Service Routines have special prologue and epilogue
+ requirements. */
-+ {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute},
-+ {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute},
-+ {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute},
-+ {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute},
-+ {NULL, 0, 0, false, false, false, NULL}
++ {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute},
++ {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute},
++ {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute},
++ {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute},
++ {"rmw_addressable", 0, 0, true, false, false, NULL},
++ {NULL, 0, 0, false, false, false, NULL}
+};
+
+
+isr_attribute_arg;
+
+static const isr_attribute_arg isr_attribute_args[] = {
-+ {"FULL", AVR32_FT_ISR_FULL},
-+ {"full", AVR32_FT_ISR_FULL},
-+ {"HALF", AVR32_FT_ISR_HALF},
-+ {"half", AVR32_FT_ISR_HALF},
-+ {"NONE", AVR32_FT_ISR_NONE},
-+ {"none", AVR32_FT_ISR_NONE},
-+ {"UNDEF", AVR32_FT_ISR_NONE},
-+ {"undef", AVR32_FT_ISR_NONE},
-+ {"SWI", AVR32_FT_ISR_NONE},
-+ {"swi", AVR32_FT_ISR_NONE},
-+ {NULL, AVR32_FT_ISR_NONE}
++ {"FULL", AVR32_FT_ISR_FULL},
++ {"full", AVR32_FT_ISR_FULL},
++ {"HALF", AVR32_FT_ISR_HALF},
++ {"half", AVR32_FT_ISR_HALF},
++ {"NONE", AVR32_FT_ISR_NONE},
++ {"none", AVR32_FT_ISR_NONE},
++ {"UNDEF", AVR32_FT_ISR_NONE},
++ {"undef", AVR32_FT_ISR_NONE},
++ {"SWI", AVR32_FT_ISR_NONE},
++ {"swi", AVR32_FT_ISR_NONE},
++ {NULL, AVR32_FT_ISR_NONE}
+};
+
+/* Returns the (interrupt) function type of the current
+
+static unsigned long
+avr32_isr_value (tree argument)
-+ {
-+ const isr_attribute_arg *ptr;
-+ const char *arg;
++{
++ const isr_attribute_arg *ptr;
++ const char *arg;
+
-+ /* No argument - default to ISR_NONE. */
-+ if (argument == NULL_TREE)
-+ return AVR32_FT_ISR_NONE;
++ /* No argument - default to ISR_NONE. */
++ if (argument == NULL_TREE)
++ return AVR32_FT_ISR_NONE;
+
-+ /* Get the value of the argument. */
-+ if (TREE_VALUE (argument) == NULL_TREE
-+ || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
-+ return AVR32_FT_UNKNOWN;
++ /* Get the value of the argument. */
++ if (TREE_VALUE (argument) == NULL_TREE
++ || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
++ return AVR32_FT_UNKNOWN;
+
-+ arg = TREE_STRING_POINTER (TREE_VALUE (argument));
++ arg = TREE_STRING_POINTER (TREE_VALUE (argument));
+
-+ /* Check it against the list of known arguments. */
-+ for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
-+ if (streq (arg, ptr->arg))
-+ return ptr->return_value;
++ /* Check it against the list of known arguments. */
++ for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
++ if (streq (arg, ptr->arg))
++ return ptr->return_value;
+
-+ /* An unrecognized interrupt type. */
-+ return AVR32_FT_UNKNOWN;
-+ }
++ /* An unrecognized interrupt type. */
++ return AVR32_FT_UNKNOWN;
++}
+
+
+
+The compiler will print these strings at the start of a new line,
+followed immediately by the object's initial value. In most cases,
+the string should contain a tab, a pseudo-op, and then another tab.
-+ */
++*/
+#undef TARGET_ASM_BYTE_OP
+#define TARGET_ASM_BYTE_OP "\t.byte\t"
+#undef TARGET_ASM_ALIGNED_HI_OP
+
+
+ if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
-+ || vcall_offset
-+ || flag_pic)
++ || vcall_offset)
+ {
+ fputs ("\tpushm\tlr\n", file);
+ }
+ {
+ if (avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21"))
+ {
-+ fprintf (file, "\tsub\t%s, -0x%x\n", reg_names[this_regno],
-+ mi_delta);
++ fprintf (file, "\tsub\t%s, %d\n", reg_names[this_regno], -mi_delta);
+ }
+ else
+ {
+ /* Immediate is larger than k21 we must make us a temp register by
+ pushing a register to the stack. */
-+ fprintf (file, "\tmov\tlr, lo(%x)\n", mi_delta);
-+ fprintf (file, "\torh\tlr, hi(%x)\n", mi_delta);
++ fprintf (file, "\tmov\tlr, lo(%d)\n", mi_delta);
++ fprintf (file, "\torh\tlr, hi(%d)\n", mi_delta);
+ fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
+ }
+ }
+ }
+
+
-+ if ( (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
-+ || vcall_offset)
-+ && !flag_pic )
++ if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
++ || vcall_offset)
+ {
+ fputs ("\tpopm\tlr\n", file);
+ }
+
-+ if (flag_pic)
-+ {
-+ /* Load the got into lr and then load the pointer
-+ to the function from the got and put it on the stack.
-+ We can then call the function and restore lr by issuing
-+ a doubleword load from the stack. We do not use a popm/ldm
-+ since it will be treated as a return and might need a flushing
-+ of the return-stack if available. */
-+ rtx label = gen_label_rtx ();
-+ /* Load the got. */
-+ fputs ("\tlddpc\tlr, 0f\n", file);
-+ (*targetm.asm_out.internal_label) (file, "L",
-+ CODE_LABEL_NUMBER (label));
-+ fputs ("\trsub\tlr, pc\n", file);
-+ /* Load the function pointer. */
-+ fputs ("\tld.w\tlr, lr[", file);
-+ assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
-+ fputs ("@got]\n", file);
-+ /* Push the function pointer on the stack.*/
-+ fputs ("\tpushm\tlr\n", file);
-+ /* Restore the old lr value and load the function pointer into
-+ pc. */
-+ fputs ("\tld.d\tlr,sp++\n", file);
-+ fprintf (file, "\t.align 2\n");
-+ fprintf (file, "0:\t.long\t.L%d - _GLOBAL_OFFSET_TABLE_\n", CODE_LABEL_NUMBER (label));
-+ }
-+ else
-+ {
-+ fprintf (file, "\tlddpc\tpc, 0f\n");
-+ fprintf (file, "\t.align 2\n");
-+ fputs ("0:\t.long\t", file);
-+ assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
-+ fputc ('\n', file);
-+ }
++ /* Jump to the function. We assume that we can use an rjmp since the
++ function to jump to is local and probably not too far away from
++ the thunk. If this assumption proves to be wrong we could implement
++ this jump by calculating the offset between the jump source and destination
++ and put this in the constant pool and then perform an add to pc.
++ This would also be legitimate PIC code. But for now we hope that an rjmp
++ will be sufficient...
++ */
++ fputs ("\trjmp\t", file);
++ assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
++ fputc ('\n', file);
+ }
+
++
+/* Implements target hook vector_mode_supported. */
+bool
+avr32_vector_mode_supported (enum machine_mode mode)
-+ {
-+ if ((mode == V2HImode) || (mode == V4QImode))
-+ return true;
++{
++ if ((mode == V2HImode) || (mode == V4QImode))
++ return true;
+
-+ return false;
-+ }
++ return false;
++}
+
+
+#undef TARGET_INIT_LIBFUNCS
+#define TARGET_EXPAND_BUILTIN avr32_expand_builtin
+
+tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int,
-+void_ftype_ptr_int;
++ void_ftype_ptr_int;
+tree void_ftype_int, void_ftype_void, int_ftype_ptr_int;
+tree short_ftype_short, int_ftype_int_short, int_ftype_short_short,
-+short_ftype_short_short;
++ short_ftype_short_short;
+tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short;
+tree void_ftype_int_int_int_int_int, void_ftype_int_int_int;
+tree longlong_ftype_int_int, void_ftype_int_int_longlong;
+
+#define def_builtin(NAME, TYPE, CODE) \
+ lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
-+ BUILT_IN_MD, NULL, NULL_TREE)
++ BUILT_IN_MD, NULL, NULL_TREE)
+
+#define def_mbuiltin(MASK, NAME, TYPE, CODE) \
+ do \
-+ { \
-+ if ((MASK)) \
-+ lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
-+ BUILT_IN_MD, NULL, NULL_TREE); \
-+ } \
++ { \
++ if ((MASK)) \
++ lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
++ BUILT_IN_MD, NULL, NULL_TREE); \
++ } \
+ while (0)
+
+struct builtin_description
+ { 1, CODE_FOR_##code, "__builtin_" #code , \
+ AVR32_BUILTIN_##builtin, 0, 0, ftype }
+
-+ DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short),
-+ DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short),
-+ DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short),
-+ DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short),
-+ DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short),
-+ DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short),
-+ DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short),
-+ DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int),
-+ DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int),
-+ DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short),
-+ DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short)
++ DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short),
++ DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short),
++ DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short),
++ DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short),
++ DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short),
++ DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short),
++ DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short),
++ DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int),
++ DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int),
++ DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short),
++ DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short)
+};
+
+
+void
+avr32_init_builtins (void)
-+ {
-+ unsigned int i;
-+ const struct builtin_description *d;
-+ tree endlink = void_list_node;
-+ tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
-+ tree longlong_endlink =
-+ tree_cons (NULL_TREE, long_long_integer_type_node, endlink);
-+ tree short_endlink =
-+ tree_cons (NULL_TREE, short_integer_type_node, endlink);
-+ tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink);
-+
-+ /* int func (int) */
-+ int_ftype_int = build_function_type (integer_type_node, int_endlink);
-+
-+ /* short func (short) */
-+ short_ftype_short
++{
++ unsigned int i;
++ const struct builtin_description *d;
++ tree endlink = void_list_node;
++ tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
++ tree longlong_endlink =
++ tree_cons (NULL_TREE, long_long_integer_type_node, endlink);
++ tree short_endlink =
++ tree_cons (NULL_TREE, short_integer_type_node, endlink);
++ tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink);
++
++ /* int func (int) */
++ int_ftype_int = build_function_type (integer_type_node, int_endlink);
++
++ /* short func (short) */
++ short_ftype_short
+ = build_function_type (short_integer_type_node, short_endlink);
+
-+ /* short func (short, short) */
-+ short_ftype_short_short
++ /* short func (short, short) */
++ short_ftype_short_short
+ = build_function_type (short_integer_type_node,
-+ tree_cons (NULL_TREE, short_integer_type_node,
-+ short_endlink));
++ tree_cons (NULL_TREE, short_integer_type_node,
++ short_endlink));
+
-+ /* long long func (long long, short, short) */
-+ longlong_ftype_longlong_short_short
++ /* long long func (long long, short, short) */
++ longlong_ftype_longlong_short_short
+ = build_function_type (long_long_integer_type_node,
-+ tree_cons (NULL_TREE, long_long_integer_type_node,
-+ tree_cons (NULL_TREE,
-+ short_integer_type_node,
-+ short_endlink)));
++ tree_cons (NULL_TREE, long_long_integer_type_node,
++ tree_cons (NULL_TREE,
++ short_integer_type_node,
++ short_endlink)));
+
-+ /* long long func (short, short) */
-+ longlong_ftype_short_short
++ /* long long func (short, short) */
++ longlong_ftype_short_short
+ = build_function_type (long_long_integer_type_node,
-+ tree_cons (NULL_TREE, short_integer_type_node,
-+ short_endlink));
++ tree_cons (NULL_TREE, short_integer_type_node,
++ short_endlink));
+
-+ /* int func (int, int) */
-+ int_ftype_int_int
++ /* int func (int, int) */
++ int_ftype_int_int
+ = build_function_type (integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ int_endlink));
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink));
+
-+ /* long long func (int, int) */
-+ longlong_ftype_int_int
++ /* long long func (int, int) */
++ longlong_ftype_int_int
+ = build_function_type (long_long_integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ int_endlink));
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink));
+
-+ /* long long int func (long long, int, short) */
-+ longlong_ftype_longlong_int_short
++ /* long long int func (long long, int, short) */
++ longlong_ftype_longlong_int_short
+ = build_function_type (long_long_integer_type_node,
-+ tree_cons (NULL_TREE, long_long_integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ short_endlink)));
++ tree_cons (NULL_TREE, long_long_integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ short_endlink)));
+
-+ /* long long int func (int, short) */
-+ longlong_ftype_int_short
++ /* long long int func (int, short) */
++ longlong_ftype_int_short
+ = build_function_type (long_long_integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ short_endlink));
++ tree_cons (NULL_TREE, integer_type_node,
++ short_endlink));
+
-+ /* int func (int, short, short) */
-+ int_ftype_int_short_short
++ /* int func (int, short, short) */
++ int_ftype_int_short_short
+ = build_function_type (integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ tree_cons (NULL_TREE,
-+ short_integer_type_node,
-+ short_endlink)));
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE,
++ short_integer_type_node,
++ short_endlink)));
+
-+ /* int func (short, short) */
-+ int_ftype_short_short
++ /* int func (short, short) */
++ int_ftype_short_short
+ = build_function_type (integer_type_node,
-+ tree_cons (NULL_TREE, short_integer_type_node,
-+ short_endlink));
++ tree_cons (NULL_TREE, short_integer_type_node,
++ short_endlink));
+
-+ /* int func (int, short) */
-+ int_ftype_int_short
++ /* int func (int, short) */
++ int_ftype_int_short
+ = build_function_type (integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ short_endlink));
++ tree_cons (NULL_TREE, integer_type_node,
++ short_endlink));
+
-+ /* void func (int, int) */
-+ void_ftype_int_int
++ /* void func (int, int) */
++ void_ftype_int_int
+ = build_function_type (void_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ int_endlink));
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink));
+
-+ /* void func (int, int, int) */
-+ void_ftype_int_int_int
++ /* void func (int, int, int) */
++ void_ftype_int_int_int
+ = build_function_type (void_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ int_endlink)));
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink)));
+
-+ /* void func (int, int, long long) */
-+ void_ftype_int_int_longlong
++ /* void func (int, int, long long) */
++ void_ftype_int_int_longlong
+ = build_function_type (void_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ longlong_endlink)));
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ longlong_endlink)));
+
-+ /* void func (int, int, int, int, int) */
-+ void_ftype_int_int_int_int_int
++ /* void func (int, int, int, int, int) */
++ void_ftype_int_int_int_int_int
+ = build_function_type (void_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ tree_cons (NULL_TREE,
-+ integer_type_node,
-+ tree_cons
-+ (NULL_TREE,
-+ integer_type_node,
-+ int_endlink)))));
-+
-+ /* void func (void *, int) */
-+ void_ftype_ptr_int
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE,
++ integer_type_node,
++ tree_cons
++ (NULL_TREE,
++ integer_type_node,
++ int_endlink)))));
++
++ /* void func (void *, int) */
++ void_ftype_ptr_int
+ = build_function_type (void_type_node,
-+ tree_cons (NULL_TREE, ptr_type_node, int_endlink));
++ tree_cons (NULL_TREE, ptr_type_node, int_endlink));
+
-+ /* void func (int) */
-+ void_ftype_int = build_function_type (void_type_node, int_endlink);
++ /* void func (int) */
++ void_ftype_int = build_function_type (void_type_node, int_endlink);
+
-+ /* void func (void) */
-+ void_ftype_void = build_function_type (void_type_node, void_endlink);
++ /* void func (void) */
++ void_ftype_void = build_function_type (void_type_node, void_endlink);
+
-+ /* int func (void) */
-+ int_ftype_void = build_function_type (integer_type_node, void_endlink);
++ /* int func (void) */
++ int_ftype_void = build_function_type (integer_type_node, void_endlink);
+
-+ /* int func (void *, int) */
-+ int_ftype_ptr_int
++ /* int func (void *, int) */
++ int_ftype_ptr_int
+ = build_function_type (integer_type_node,
-+ tree_cons (NULL_TREE, ptr_type_node, int_endlink));
++ tree_cons (NULL_TREE, ptr_type_node, int_endlink));
+
-+ /* int func (int, int, int) */
-+ int_ftype_int_int_int
++ /* int func (int, int, int) */
++ int_ftype_int_int_int
+ = build_function_type (integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ tree_cons (NULL_TREE, integer_type_node,
-+ int_endlink)));
-+
-+ /* Initialize avr32 builtins. */
-+ def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR);
-+ def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR);
-+ def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR);
-+ def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR);
-+ def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE);
-+ def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC);
-+ def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR);
-+ def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS);
-+ def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW);
-+ def_builtin ("__builtin_breakpoint", void_ftype_void,
-+ AVR32_BUILTIN_BREAKPOINT);
-+ def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG);
-+ def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI);
-+ def_builtin ("__builtin_bswap_16", short_ftype_short,
-+ AVR32_BUILTIN_BSWAP16);
-+ def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32);
-+ def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int,
-+ AVR32_BUILTIN_COP);
-+ def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W);
-+ def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int,
-+ AVR32_BUILTIN_MVRC_W);
-+ def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int,
-+ AVR32_BUILTIN_MVCR_D);
-+ def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong,
-+ AVR32_BUILTIN_MVRC_D);
-+ def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS);
-+ def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU);
-+ def_builtin ("__builtin_satrnds", int_ftype_int_int_int,
-+ AVR32_BUILTIN_SATRNDS);
-+ def_builtin ("__builtin_satrndu", int_ftype_int_int_int,
-+ AVR32_BUILTIN_SATRNDU);
-+ def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR);
-+ def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR);
-+ def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short,
-+ AVR32_BUILTIN_MACSATHH_W);
-+ def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short,
-+ AVR32_BUILTIN_MACWH_D);
-+ def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short,
-+ AVR32_BUILTIN_MACHH_D);
-+
-+ /* Add all builtins that are more or less simple operations on two
++ tree_cons (NULL_TREE, integer_type_node,
++ tree_cons (NULL_TREE, integer_type_node,
++ int_endlink)));
++
++ /* Initialize avr32 builtins. */
++ def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR);
++ def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR);
++ def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR);
++ def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR);
++ def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE);
++ def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC);
++ def_builtin ("__builtin_ssrf", void_ftype_int, AVR32_BUILTIN_SSRF);
++ def_builtin ("__builtin_csrf", void_ftype_int, AVR32_BUILTIN_CSRF);
++ def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR);
++ def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS);
++ def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW);
++ def_builtin ("__builtin_breakpoint", void_ftype_void,
++ AVR32_BUILTIN_BREAKPOINT);
++ def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG);
++ def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI);
++ def_builtin ("__builtin_bswap_16", short_ftype_short,
++ AVR32_BUILTIN_BSWAP16);
++ def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32);
++ def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int,
++ AVR32_BUILTIN_COP);
++ def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W);
++ def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int,
++ AVR32_BUILTIN_MVRC_W);
++ def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int,
++ AVR32_BUILTIN_MVCR_D);
++ def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong,
++ AVR32_BUILTIN_MVRC_D);
++ def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS);
++ def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU);
++ def_builtin ("__builtin_satrnds", int_ftype_int_int_int,
++ AVR32_BUILTIN_SATRNDS);
++ def_builtin ("__builtin_satrndu", int_ftype_int_int_int,
++ AVR32_BUILTIN_SATRNDU);
++ def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR);
++ def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR);
++ def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short,
++ AVR32_BUILTIN_MACSATHH_W);
++ def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short,
++ AVR32_BUILTIN_MACWH_D);
++ def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short,
++ AVR32_BUILTIN_MACHH_D);
++ def_builtin ("__builtin_mems", void_ftype_ptr_int, AVR32_BUILTIN_MEMS);
++ def_builtin ("__builtin_memt", void_ftype_ptr_int, AVR32_BUILTIN_MEMT);
++ def_builtin ("__builtin_memc", void_ftype_ptr_int, AVR32_BUILTIN_MEMC);
++
++ /* Add all builtins that are more or less simple operations on two
+ operands. */
-+ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
-+ {
-+ /* Use one of the operands; the target can have a different mode for
++ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
++ {
++ /* Use one of the operands; the target can have a different mode for
+ mask-generating compares. */
+
-+ if (d->name == 0)
-+ continue;
++ if (d->name == 0)
++ continue;
+
-+ def_mbuiltin (d->mask, d->name, *(d->ftype), d->code);
-+ }
-+ }
++ def_mbuiltin (d->mask, d->name, *(d->ftype), d->code);
++ }
++}
+
+
+/* Subroutine of avr32_expand_builtin to take care of binop insns. */
+
+static rtx
+avr32_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
-+ {
-+ rtx pat;
-+ tree arg0 = TREE_VALUE (arglist);
-+ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
-+ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
-+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
-+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
-+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
-+
-+ if (!target
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
-+
-+ /* In case the insn wants input operands in modes different from the
++{
++ rtx pat;
++ tree arg0 = TREE_VALUE (arglist);
++ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
++ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ enum machine_mode tmode = insn_data[icode].operand[0].mode;
++ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
++ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
++
++ if (!target
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++
++ /* In case the insn wants input operands in modes different from the
+ result, abort. */
-+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
-+ {
-+ /* If op0 is already a reg we must cast it to the correct mode. */
-+ if (REG_P (op0))
-+ op0 = convert_to_mode (mode0, op0, 1);
-+ else
-+ op0 = copy_to_mode_reg (mode0, op0);
-+ }
-+ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
-+ {
-+ /* If op1 is already a reg we must cast it to the correct mode. */
-+ if (REG_P (op1))
-+ op1 = convert_to_mode (mode1, op1, 1);
-+ else
-+ op1 = copy_to_mode_reg (mode1, op1);
-+ }
-+ pat = GEN_FCN (icode) (target, op0, op1);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
-+ return target;
-+ }
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ /* If op0 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op0))
++ op0 = convert_to_mode (mode0, op0, 1);
++ else
++ op0 = copy_to_mode_reg (mode0, op0);
++ }
++ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
++ {
++ /* If op1 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op1))
++ op1 = convert_to_mode (mode1, op1, 1);
++ else
++ op1 = copy_to_mode_reg (mode1, op1);
++ }
++ pat = GEN_FCN (icode) (target, op0, op1);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++}
+
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient
+
+rtx
+avr32_expand_builtin (tree exp,
-+ rtx target,
-+ rtx subtarget ATTRIBUTE_UNUSED,
-+ enum machine_mode mode ATTRIBUTE_UNUSED,
-+ int ignore ATTRIBUTE_UNUSED)
-+ {
-+ const struct builtin_description *d;
-+ unsigned int i;
-+ enum insn_code icode;
-+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
-+ tree arglist = TREE_OPERAND (exp, 1);
-+ tree arg0, arg1, arg2;
-+ rtx op0, op1, op2, pat;
-+ enum machine_mode tmode, mode0, mode1;
-+ enum machine_mode arg0_mode;
-+ int fcode = DECL_FUNCTION_CODE (fndecl);
-+
-+ switch (fcode)
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ const struct builtin_description *d;
++ unsigned int i;
++ enum insn_code icode = 0;
++ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
++ tree arglist = TREE_OPERAND (exp, 1);
++ tree arg0, arg1, arg2;
++ rtx op0, op1, op2, pat;
++ enum machine_mode tmode, mode0, mode1;
++ enum machine_mode arg0_mode;
++ int fcode = DECL_FUNCTION_CODE (fndecl);
++
++ switch (fcode)
+ {
+ default:
+ break;
+ case AVR32_BUILTIN_SATU:
+ case AVR32_BUILTIN_SATRNDS:
+ case AVR32_BUILTIN_SATRNDU:
-+ {
-+ const char *fname;
-+ switch (fcode)
-+ {
-+ default:
-+ case AVR32_BUILTIN_SATS:
-+ icode = CODE_FOR_sats;
-+ fname = "sats";
-+ break;
-+ case AVR32_BUILTIN_SATU:
-+ icode = CODE_FOR_satu;
-+ fname = "satu";
-+ break;
-+ case AVR32_BUILTIN_SATRNDS:
-+ icode = CODE_FOR_satrnds;
-+ fname = "satrnds";
-+ break;
-+ case AVR32_BUILTIN_SATRNDU:
-+ icode = CODE_FOR_satrndu;
-+ fname = "satrndu";
-+ break;
-+ }
++ {
++ const char *fname;
++ switch (fcode)
++ {
++ default:
++ case AVR32_BUILTIN_SATS:
++ icode = CODE_FOR_sats;
++ fname = "sats";
++ break;
++ case AVR32_BUILTIN_SATU:
++ icode = CODE_FOR_satu;
++ fname = "satu";
++ break;
++ case AVR32_BUILTIN_SATRNDS:
++ icode = CODE_FOR_satrnds;
++ fname = "satrnds";
++ break;
++ case AVR32_BUILTIN_SATRNDU:
++ icode = CODE_FOR_satrndu;
++ fname = "satrndu";
++ break;
++ }
+
-+ arg0 = TREE_VALUE (arglist);
-+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
-+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
-+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
-+ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
++ arg0 = TREE_VALUE (arglist);
++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
++ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+
-+ tmode = insn_data[icode].operand[0].mode;
++ tmode = insn_data[icode].operand[0].mode;
+
+
-+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+
+
-+ if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0)))
-+ {
-+ op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0);
-+ }
++ if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0)))
++ {
++ op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0);
++ }
+
-+ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
-+ {
-+ error ("Parameter 2 to __builtin_%s should be a constant number.",
-+ fname);
-+ return NULL_RTX;
-+ }
++ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
++ {
++ error ("Parameter 2 to __builtin_%s should be a constant number.",
++ fname);
++ return NULL_RTX;
++ }
+
-+ if (!(*insn_data[icode].operand[1].predicate) (op2, SImode))
-+ {
-+ error ("Parameter 3 to __builtin_%s should be a constant number.",
-+ fname);
-+ return NULL_RTX;
-+ }
++ if (!(*insn_data[icode].operand[1].predicate) (op2, SImode))
++ {
++ error ("Parameter 3 to __builtin_%s should be a constant number.",
++ fname);
++ return NULL_RTX;
++ }
+
-+ emit_move_insn (target, op0);
-+ pat = GEN_FCN (icode) (target, op1, op2);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
++ emit_move_insn (target, op0);
++ pat = GEN_FCN (icode) (target, op1, op2);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
+
-+ return target;
-+ }
++ return target;
++ }
+ case AVR32_BUILTIN_MUSTR:
+ icode = CODE_FOR_mustr;
+ tmode = insn_data[icode].operand[0].mode;
+
+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return target;
+
+ mode0 = insn_data[icode].operand[1].mode;
+
+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
-+ {
-+ error ("Parameter 1 to __builtin_mfsr must be a constant number");
-+ }
++ {
++ error ("Parameter 1 to __builtin_mfsr must be a constant number");
++ }
+
+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return target;
+ case AVR32_BUILTIN_MTSR:
+ mode1 = insn_data[icode].operand[1].mode;
+
+ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
-+ {
-+ error ("Parameter 1 to __builtin_mtsr must be a constant number");
-+ return gen_reg_rtx (mode0);
-+ }
++ {
++ error ("Parameter 1 to __builtin_mtsr must be a constant number");
++ return gen_reg_rtx (mode0);
++ }
+ if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
-+ op1 = copy_to_mode_reg (mode1, op1);
++ op1 = copy_to_mode_reg (mode1, op1);
+ pat = GEN_FCN (icode) (op0, op1);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return NULL_RTX;
+ case AVR32_BUILTIN_MFDR:
+ mode0 = insn_data[icode].operand[1].mode;
+
+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
-+ {
-+ error ("Parameter 1 to __builtin_mfdr must be a constant number");
-+ }
++ {
++ error ("Parameter 1 to __builtin_mfdr must be a constant number");
++ }
+
+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return target;
+ case AVR32_BUILTIN_MTDR:
+ mode1 = insn_data[icode].operand[1].mode;
+
+ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
-+ {
-+ error ("Parameter 1 to __builtin_mtdr must be a constant number");
-+ return gen_reg_rtx (mode0);
-+ }
++ {
++ error ("Parameter 1 to __builtin_mtdr must be a constant number");
++ return gen_reg_rtx (mode0);
++ }
+ if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
-+ op1 = copy_to_mode_reg (mode1, op1);
++ op1 = copy_to_mode_reg (mode1, op1);
+ pat = GEN_FCN (icode) (op0, op1);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return NULL_RTX;
+ case AVR32_BUILTIN_CACHE:
+ mode1 = insn_data[icode].operand[1].mode;
+
+ if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
-+ {
-+ error ("Parameter 2 to __builtin_cache must be a constant number");
-+ return gen_reg_rtx (mode1);
-+ }
++ {
++ error ("Parameter 2 to __builtin_cache must be a constant number");
++ return gen_reg_rtx (mode1);
++ }
+
+ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
-+ op0 = copy_to_mode_reg (mode0, op0);
++ op0 = copy_to_mode_reg (mode0, op0);
+
+ pat = GEN_FCN (icode) (op0, op1);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return NULL_RTX;
+ case AVR32_BUILTIN_SYNC:
+ case AVR32_BUILTIN_MUSFR:
-+ {
-+ const char *fname;
-+ switch (fcode)
-+ {
-+ default:
-+ case AVR32_BUILTIN_SYNC:
-+ icode = CODE_FOR_sync;
-+ fname = "sync";
-+ break;
-+ case AVR32_BUILTIN_MUSFR:
-+ icode = CODE_FOR_musfr;
-+ fname = "musfr";
-+ break;
-+ }
-+
-+ arg0 = TREE_VALUE (arglist);
-+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ mode0 = insn_data[icode].operand[0].mode;
++ case AVR32_BUILTIN_SSRF:
++ case AVR32_BUILTIN_CSRF:
++ {
++ const char *fname;
++ switch (fcode)
++ {
++ default:
++ case AVR32_BUILTIN_SYNC:
++ icode = CODE_FOR_sync;
++ fname = "sync";
++ break;
++ case AVR32_BUILTIN_MUSFR:
++ icode = CODE_FOR_musfr;
++ fname = "musfr";
++ break;
++ case AVR32_BUILTIN_SSRF:
++ icode = CODE_FOR_ssrf;
++ fname = "ssrf";
++ break;
++ case AVR32_BUILTIN_CSRF:
++ icode = CODE_FOR_csrf;
++ fname = "csrf";
++ break;
++ }
+
-+ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
-+ {
-+ if (icode == CODE_FOR_musfr)
-+ op0 = copy_to_mode_reg (mode0, op0);
-+ else
-+ {
-+ error ("Parameter to __builtin_%s is illegal.", fname);
-+ return gen_reg_rtx (mode0);
-+ }
-+ }
-+ pat = GEN_FCN (icode) (op0);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
-+ return NULL_RTX;
-+ }
++ arg0 = TREE_VALUE (arglist);
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ mode0 = insn_data[icode].operand[0].mode;
++
++ if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
++ {
++ if (icode == CODE_FOR_musfr)
++ op0 = copy_to_mode_reg (mode0, op0);
++ else
++ {
++ error ("Parameter to __builtin_%s is illegal.", fname);
++ return gen_reg_rtx (mode0);
++ }
++ }
++ pat = GEN_FCN (icode) (op0);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return NULL_RTX;
++ }
+ case AVR32_BUILTIN_TLBR:
+ icode = CODE_FOR_tlbr;
+ pat = GEN_FCN (icode) (NULL_RTX);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return NULL_RTX;
+ case AVR32_BUILTIN_TLBS:
+ icode = CODE_FOR_tlbs;
+ pat = GEN_FCN (icode) (NULL_RTX);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return NULL_RTX;
+ case AVR32_BUILTIN_TLBW:
+ icode = CODE_FOR_tlbw;
+ pat = GEN_FCN (icode) (NULL_RTX);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return NULL_RTX;
+ case AVR32_BUILTIN_BREAKPOINT:
+ icode = CODE_FOR_breakpoint;
+ pat = GEN_FCN (icode) (NULL_RTX);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return NULL_RTX;
+ case AVR32_BUILTIN_XCHG:
+ mode1 = insn_data[icode].operand[2].mode;
+
+ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
-+ {
-+ op1 = copy_to_mode_reg (mode1, op1);
-+ }
++ {
++ op1 = copy_to_mode_reg (mode1, op1);
++ }
+
-+ op0 = gen_rtx_MEM (SImode, op0);
++ op0 = force_reg (GET_MODE (op0), op0);
++ op0 = gen_rtx_MEM (GET_MODE (op0), op0);
+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
-+ {
-+ error
-+ ("Parameter 1 to __builtin_xchg must be a pointer to an integer.");
-+ }
++ {
++ error
++ ("Parameter 1 to __builtin_xchg must be a pointer to an integer.");
++ }
+
+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return target;
+ case AVR32_BUILTIN_LDXI:
+ mode1 = insn_data[icode].operand[2].mode;
+
+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
-+ {
-+ op0 = copy_to_mode_reg (mode0, op0);
-+ }
++ {
++ op0 = copy_to_mode_reg (mode0, op0);
++ }
+
+ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
-+ {
-+ op1 = copy_to_mode_reg (mode1, op1);
-+ }
++ {
++ op1 = copy_to_mode_reg (mode1, op1);
++ }
+
+ if (!(*insn_data[icode].operand[3].predicate) (op2, SImode))
-+ {
-+ error
-+ ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)");
-+ return gen_reg_rtx (mode0);
-+ }
++ {
++ error
++ ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)");
++ return gen_reg_rtx (mode0);
++ }
+
+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0, op1, op2);
+ if (!pat)
-+ return 0;
++ return 0;
+ emit_insn (pat);
+ return target;
+ case AVR32_BUILTIN_BSWAP16:
-+ {
-+ icode = CODE_FOR_bswap_16;
-+ arg0 = TREE_VALUE (arglist);
-+ arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
-+ mode0 = insn_data[icode].operand[1].mode;
-+ if (arg0_mode != mode0)
-+ arg0 = build1 (NOP_EXPR,
-+ (*lang_hooks.types.type_for_mode) (mode0, 0), arg0);
++ {
++ icode = CODE_FOR_bswap_16;
++ arg0 = TREE_VALUE (arglist);
++ arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
++ mode0 = insn_data[icode].operand[1].mode;
++ if (arg0_mode != mode0)
++ arg0 = build1 (NOP_EXPR,
++ (*lang_hooks.types.type_for_mode) (mode0, 0), arg0);
+
-+ op0 = expand_expr (arg0, NULL_RTX, HImode, 0);
-+ tmode = insn_data[icode].operand[0].mode;
++ op0 = expand_expr (arg0, NULL_RTX, HImode, 0);
++ tmode = insn_data[icode].operand[0].mode;
+
+
-+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
-+ {
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ if ( CONST_INT_P (op0) )
++ {
++ HOST_WIDE_INT val = ( ((INTVAL (op0)&0x00ff) << 8) |
++ ((INTVAL (op0)&0xff00) >> 8) );
++ /* Sign extend 16-bit value to host wide int */
++ val <<= (HOST_BITS_PER_WIDE_INT - 16);
++ val >>= (HOST_BITS_PER_WIDE_INT - 16);
++ op0 = GEN_INT(val);
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++ emit_move_insn(target, op0);
++ return target;
++ }
++ else
+ op0 = copy_to_mode_reg (mode0, op0);
-+ }
++ }
+
-+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ {
-+ target = gen_reg_rtx (tmode);
-+ }
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ {
++ target = gen_reg_rtx (tmode);
++ }
+
+
-+ pat = GEN_FCN (icode) (target, op0);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
++ pat = GEN_FCN (icode) (target, op0);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
+
-+ return target;
-+ }
++ return target;
++ }
+ case AVR32_BUILTIN_BSWAP32:
-+ {
-+ icode = CODE_FOR_bswap_32;
-+ arg0 = TREE_VALUE (arglist);
-+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ tmode = insn_data[icode].operand[0].mode;
-+ mode0 = insn_data[icode].operand[1].mode;
-+
-+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
-+ {
++ {
++ icode = CODE_FOR_bswap_32;
++ arg0 = TREE_VALUE (arglist);
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ tmode = insn_data[icode].operand[0].mode;
++ mode0 = insn_data[icode].operand[1].mode;
++
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ if ( CONST_INT_P (op0) )
++ {
++ HOST_WIDE_INT val = ( ((INTVAL (op0)&0x000000ff) << 24) |
++ ((INTVAL (op0)&0x0000ff00) << 8) |
++ ((INTVAL (op0)&0x00ff0000) >> 8) |
++ ((INTVAL (op0)&0xff000000) >> 24) );
++ /* Sign extend 32-bit value to host wide int */
++ val <<= (HOST_BITS_PER_WIDE_INT - 32);
++ val >>= (HOST_BITS_PER_WIDE_INT - 32);
++ op0 = GEN_INT(val);
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++ emit_move_insn(target, op0);
++ return target;
++ }
++ else
+ op0 = copy_to_mode_reg (mode0, op0);
-+ }
++ }
+
-+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+
+
-+ pat = GEN_FCN (icode) (target, op0);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
++ pat = GEN_FCN (icode) (target, op0);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
+
-+ return target;
-+ }
++ return target;
++ }
+ case AVR32_BUILTIN_MVCR_W:
+ case AVR32_BUILTIN_MVCR_D:
-+ {
-+ arg0 = TREE_VALUE (arglist);
-+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
-+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ {
++ arg0 = TREE_VALUE (arglist);
++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+
-+ if (fcode == AVR32_BUILTIN_MVCR_W)
-+ icode = CODE_FOR_mvcrsi;
-+ else
-+ icode = CODE_FOR_mvcrdi;
++ if (fcode == AVR32_BUILTIN_MVCR_W)
++ icode = CODE_FOR_mvcrsi;
++ else
++ icode = CODE_FOR_mvcrdi;
+
-+ tmode = insn_data[icode].operand[0].mode;
++ tmode = insn_data[icode].operand[0].mode;
+
-+ if (target == 0
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ if (target == 0
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
+
-+ if (!(*insn_data[icode].operand[1].predicate) (op0, SImode))
-+ {
-+ error
-+ ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
-+ error ("Number should be between 0 and 7.");
-+ return NULL_RTX;
-+ }
++ if (!(*insn_data[icode].operand[1].predicate) (op0, SImode))
++ {
++ error
++ ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
++ error ("Number should be between 0 and 7.");
++ return NULL_RTX;
++ }
+
-+ if (!(*insn_data[icode].operand[2].predicate) (op1, SImode))
-+ {
-+ error
-+ ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
-+ error ("Number should be between 0 and 15.");
-+ return NULL_RTX;
-+ }
++ if (!(*insn_data[icode].operand[2].predicate) (op1, SImode))
++ {
++ error
++ ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
+
-+ pat = GEN_FCN (icode) (target, op0, op1);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
++ pat = GEN_FCN (icode) (target, op0, op1);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
+
-+ return target;
-+ }
++ return target;
++ }
+ case AVR32_BUILTIN_MACSATHH_W:
+ case AVR32_BUILTIN_MACWH_D:
+ case AVR32_BUILTIN_MACHH_D:
-+ {
-+ arg0 = TREE_VALUE (arglist);
-+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
-+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
-+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
-+ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
-+
-+ icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w :
-+ (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d :
-+ CODE_FOR_machh_d);
++ {
++ arg0 = TREE_VALUE (arglist);
++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
++ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
++
++ icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w :
++ (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d :
++ CODE_FOR_machh_d);
++
++ tmode = insn_data[icode].operand[0].mode;
++ mode0 = insn_data[icode].operand[1].mode;
++ mode1 = insn_data[icode].operand[2].mode;
++
++
++ if (!target
++ || GET_MODE (target) != tmode
++ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
++ target = gen_reg_rtx (tmode);
++
++ if (!(*insn_data[icode].operand[0].predicate) (op0, tmode))
++ {
++ /* If op0 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op0))
++ op0 = convert_to_mode (tmode, op0, 1);
++ else
++ op0 = copy_to_mode_reg (tmode, op0);
++ }
+
-+ tmode = insn_data[icode].operand[0].mode;
-+ mode0 = insn_data[icode].operand[1].mode;
-+ mode1 = insn_data[icode].operand[2].mode;
++ if (!(*insn_data[icode].operand[1].predicate) (op1, mode0))
++ {
++ /* If op1 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op1))
++ op1 = convert_to_mode (mode0, op1, 1);
++ else
++ op1 = copy_to_mode_reg (mode0, op1);
++ }
+
++ if (!(*insn_data[icode].operand[2].predicate) (op2, mode1))
++ {
++ /* If op1 is already a reg we must cast it to the correct mode. */
++ if (REG_P (op2))
++ op2 = convert_to_mode (mode1, op2, 1);
++ else
++ op2 = copy_to_mode_reg (mode1, op2);
++ }
+
-+ if (!target
-+ || GET_MODE (target) != tmode
-+ || !(*insn_data[icode].operand[0].predicate) (target, tmode))
-+ target = gen_reg_rtx (tmode);
++ emit_move_insn (target, op0);
+
-+ if (!(*insn_data[icode].operand[0].predicate) (op0, tmode))
-+ {
-+ /* If op0 is already a reg we must cast it to the correct mode. */
-+ if (REG_P (op0))
-+ op0 = convert_to_mode (tmode, op0, 1);
-+ else
-+ op0 = copy_to_mode_reg (tmode, op0);
-+ }
++ pat = GEN_FCN (icode) (target, op1, op2);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return target;
++ }
++ case AVR32_BUILTIN_MVRC_W:
++ case AVR32_BUILTIN_MVRC_D:
++ {
++ arg0 = TREE_VALUE (arglist);
++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
++ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
++
++ if (fcode == AVR32_BUILTIN_MVRC_W)
++ icode = CODE_FOR_mvrcsi;
++ else
++ icode = CODE_FOR_mvrcdi;
+
-+ if (!(*insn_data[icode].operand[1].predicate) (op1, mode0))
-+ {
-+ /* If op1 is already a reg we must cast it to the correct mode. */
-+ if (REG_P (op1))
-+ op1 = convert_to_mode (mode0, op1, 1);
-+ else
-+ op1 = copy_to_mode_reg (mode0, op1);
-+ }
++ if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
++ {
++ error ("Parameter 1 is not a valid coprocessor number.");
++ error ("Number should be between 0 and 7.");
++ return NULL_RTX;
++ }
+
-+ if (!(*insn_data[icode].operand[2].predicate) (op2, mode1))
-+ {
-+ /* If op1 is already a reg we must cast it to the correct mode. */
-+ if (REG_P (op2))
-+ op2 = convert_to_mode (mode1, op2, 1);
-+ else
-+ op2 = copy_to_mode_reg (mode1, op2);
-+ }
++ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
++ {
++ error ("Parameter 2 is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
+
-+ emit_move_insn (target, op0);
++ if (GET_CODE (op2) == CONST_INT
++ || GET_CODE (op2) == CONST
++ || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF)
++ {
++ op2 = force_const_mem (insn_data[icode].operand[2].mode, op2);
++ }
+
-+ pat = GEN_FCN (icode) (target, op1, op2);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
-+ return target;
-+ }
-+ case AVR32_BUILTIN_MVRC_W:
-+ case AVR32_BUILTIN_MVRC_D:
-+ {
-+ arg0 = TREE_VALUE (arglist);
-+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
-+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
-+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
-+ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
-+
-+ if (fcode == AVR32_BUILTIN_MVRC_W)
-+ icode = CODE_FOR_mvrcsi;
-+ else
-+ icode = CODE_FOR_mvrcdi;
++ if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2)))
++ op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
+
-+ if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
-+ {
-+ error ("Parameter 1 is not a valid coprocessor number.");
-+ error ("Number should be between 0 and 7.");
-+ return NULL_RTX;
-+ }
+
-+ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
-+ {
-+ error ("Parameter 2 is not a valid coprocessor register number.");
-+ error ("Number should be between 0 and 15.");
-+ return NULL_RTX;
-+ }
++ pat = GEN_FCN (icode) (op0, op1, op2);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
+
-+ if (GET_CODE (op2) == CONST_INT
-+ || GET_CODE (op2) == CONST
-+ || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF)
-+ {
-+ op2 = force_const_mem (insn_data[icode].operand[2].mode, op2);
-+ }
++ return NULL_RTX;
++ }
++ case AVR32_BUILTIN_COP:
++ {
++ rtx op3, op4;
++ tree arg3, arg4;
++ icode = CODE_FOR_cop;
++ arg0 = TREE_VALUE (arglist);
++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
++ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
++ arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
++ arg4 =
++ TREE_VALUE (TREE_CHAIN
++ (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist)))));
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
++ op3 = expand_expr (arg3, NULL_RTX, VOIDmode, 0);
++ op4 = expand_expr (arg4, NULL_RTX, VOIDmode, 0);
++
++ if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
++ {
++ error
++ ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
++ error ("Number should be between 0 and 7.");
++ return NULL_RTX;
++ }
+
-+ if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2)))
-+ op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
++ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
++ {
++ error
++ ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
+
++ if (!(*insn_data[icode].operand[2].predicate) (op2, SImode))
++ {
++ error
++ ("Parameter 3 to __builtin_cop is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
+
-+ pat = GEN_FCN (icode) (op0, op1, op2);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
++ if (!(*insn_data[icode].operand[3].predicate) (op3, SImode))
++ {
++ error
++ ("Parameter 4 to __builtin_cop is not a valid coprocessor register number.");
++ error ("Number should be between 0 and 15.");
++ return NULL_RTX;
++ }
+
-+ return NULL_RTX;
++ if (!(*insn_data[icode].operand[4].predicate) (op4, SImode))
++ {
++ error
++ ("Parameter 5 to __builtin_cop is not a valid coprocessor operation.");
++ error ("Number should be between 0 and 127.");
++ return NULL_RTX;
++ }
++
++ pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++
++ return target;
++ }
++ case AVR32_BUILTIN_MEMS:
++ case AVR32_BUILTIN_MEMC:
++ case AVR32_BUILTIN_MEMT:
++ {
++ if (!TARGET_RMW)
++ error ("Trying to use __builtin_mem(s/c/t) when target does not support RMW insns.");
++
++ switch (fcode) {
++ case AVR32_BUILTIN_MEMS:
++ icode = CODE_FOR_iorsi3;
++ break;
++ case AVR32_BUILTIN_MEMC:
++ icode = CODE_FOR_andsi3;
++ break;
++ case AVR32_BUILTIN_MEMT:
++ icode = CODE_FOR_xorsi3;
++ break;
+ }
-+ case AVR32_BUILTIN_COP:
-+ {
-+ rtx op3, op4;
-+ tree arg3, arg4;
-+ icode = CODE_FOR_cop;
-+ arg0 = TREE_VALUE (arglist);
-+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
-+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
-+ arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
-+ arg4 =
-+ TREE_VALUE (TREE_CHAIN
-+ (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist)))));
-+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
-+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
-+ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
-+ op3 = expand_expr (arg3, NULL_RTX, VOIDmode, 0);
-+ op4 = expand_expr (arg4, NULL_RTX, VOIDmode, 0);
-+
-+ if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
-+ {
-+ error
-+ ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
-+ error ("Number should be between 0 and 7.");
-+ return NULL_RTX;
-+ }
+
-+ if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
-+ {
-+ error
-+ ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
-+ error ("Number should be between 0 and 15.");
-+ return NULL_RTX;
-+ }
++ arg0 = TREE_VALUE (arglist);
++ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
++ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
++ if ( GET_CODE (op0) == SYMBOL_REF )
++ // This symbol must be RMW addressable
++ SYMBOL_REF_FLAGS (op0) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT);
++ op0 = gen_rtx_MEM(SImode, op0);
++ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
++ mode0 = insn_data[icode].operand[1].mode;
+
-+ if (!(*insn_data[icode].operand[2].predicate) (op2, SImode))
-+ {
-+ error
-+ ("Parameter 3 to __builtin_cop is not a valid coprocessor register number.");
-+ error ("Number should be between 0 and 15.");
-+ return NULL_RTX;
-+ }
+
-+ if (!(*insn_data[icode].operand[3].predicate) (op3, SImode))
-+ {
-+ error
-+ ("Parameter 4 to __builtin_cop is not a valid coprocessor register number.");
-+ error ("Number should be between 0 and 15.");
-+ return NULL_RTX;
-+ }
++ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
++ {
++ error ("Parameter 1 to __builtin_mem(s/c/t) must be a Ks15<<2 address or a rmw addressable symbol.");
++ }
+
-+ if (!(*insn_data[icode].operand[4].predicate) (op4, SImode))
-+ {
-+ error
-+ ("Parameter 5 to __builtin_cop is not a valid coprocessor operation.");
-+ error ("Number should be between 0 and 127.");
-+ return NULL_RTX;
-+ }
++ if ( !CONST_INT_P (op1)
++ || INTVAL (op1) > 31
++ || INTVAL (op1) < 0 )
++ error ("Parameter 2 to __builtin_mem(s/c/t) must be a constant between 0 and 31.");
+
-+ pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
-+ if (!pat)
-+ return 0;
-+ emit_insn (pat);
++ if ( fcode == AVR32_BUILTIN_MEMC )
++ op1 = GEN_INT((~(1 << INTVAL(op1)))&0xffffffff);
++ else
++ op1 = GEN_INT((1 << INTVAL(op1))&0xffffffff);
++ pat = GEN_FCN (icode) (op0, op0, op1);
++ if (!pat)
++ return 0;
++ emit_insn (pat);
++ return op0;
++ }
+
-+ return target;
-+ }
+ }
+
-+ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
-+ if (d->code == fcode)
-+ return avr32_expand_binop_builtin (d->icode, arglist, target);
++ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
++ if (d->code == fcode)
++ return avr32_expand_binop_builtin (d->icode, arglist, target);
+
+
-+ /* @@@ Should really do something sensible here. */
-+ return NULL_RTX;
-+ }
++ /* @@@ Should really do something sensible here. */
++ return NULL_RTX;
++}
+
+
+/* Handle an "interrupt" or "isr" attribute;
+
+static tree
+avr32_handle_isr_attribute (tree * node, tree name, tree args,
-+ int flags, bool * no_add_attrs)
-+ {
-+ if (DECL_P (*node))
-+ {
-+ if (TREE_CODE (*node) != FUNCTION_DECL)
-+ {
-+ warning ("`%s' attribute only applies to functions",
-+ IDENTIFIER_POINTER (name));
-+ *no_add_attrs = true;
-+ }
-+ /* FIXME: the argument if any is checked for type attributes; should it
++ int flags, bool * no_add_attrs)
++{
++ if (DECL_P (*node))
++ {
++ if (TREE_CODE (*node) != FUNCTION_DECL)
++ {
++ warning (OPT_Wattributes,"`%s' attribute only applies to functions",
++ IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ }
++ /* FIXME: the argument if any is checked for type attributes; should it
+ be checked for decl ones? */
-+ }
-+ else
-+ {
-+ if (TREE_CODE (*node) == FUNCTION_TYPE
-+ || TREE_CODE (*node) == METHOD_TYPE)
-+ {
-+ if (avr32_isr_value (args) == AVR32_FT_UNKNOWN)
-+ {
-+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
-+ *no_add_attrs = true;
-+ }
-+ }
-+ else if (TREE_CODE (*node) == POINTER_TYPE
-+ && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
-+ || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
-+ && avr32_isr_value (args) != AVR32_FT_UNKNOWN)
-+ {
-+ *node = build_variant_type_copy (*node);
-+ TREE_TYPE (*node) = build_type_attribute_variant
-+ (TREE_TYPE (*node),
-+ tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
-+ *no_add_attrs = true;
-+ }
-+ else
-+ {
-+ /* Possibly pass this attribute on from the type to a decl. */
-+ if (flags & ((int) ATTR_FLAG_DECL_NEXT
-+ | (int) ATTR_FLAG_FUNCTION_NEXT
-+ | (int) ATTR_FLAG_ARRAY_NEXT))
-+ {
-+ *no_add_attrs = true;
-+ return tree_cons (name, args, NULL_TREE);
-+ }
-+ else
-+ {
-+ warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
-+ }
-+ }
-+ }
++ }
++ else
++ {
++ if (TREE_CODE (*node) == FUNCTION_TYPE
++ || TREE_CODE (*node) == METHOD_TYPE)
++ {
++ if (avr32_isr_value (args) == AVR32_FT_UNKNOWN)
++ {
++ warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ }
++ }
++ else if (TREE_CODE (*node) == POINTER_TYPE
++ && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
++ || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
++ && avr32_isr_value (args) != AVR32_FT_UNKNOWN)
++ {
++ *node = build_variant_type_copy (*node);
++ TREE_TYPE (*node) = build_type_attribute_variant
++ (TREE_TYPE (*node),
++ tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
++ *no_add_attrs = true;
++ }
++ else
++ {
++ /* Possibly pass this attribute on from the type to a decl. */
++ if (flags & ((int) ATTR_FLAG_DECL_NEXT
++ | (int) ATTR_FLAG_FUNCTION_NEXT
++ | (int) ATTR_FLAG_ARRAY_NEXT))
++ {
++ *no_add_attrs = true;
++ return tree_cons (name, args, NULL_TREE);
++ }
++ else
++ {
++ warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
++ }
++ }
++ }
+
-+ return NULL_TREE;
-+ }
++ return NULL_TREE;
++}
+
+/* Handle an attribute requiring a FUNCTION_DECL;
+ arguments as in struct attribute_spec.handler. */
+static tree
+avr32_handle_fndecl_attribute (tree * node, tree name,
-+ tree args ATTRIBUTE_UNUSED,
-+ int flags ATTRIBUTE_UNUSED,
-+ bool * no_add_attrs)
-+ {
-+ if (TREE_CODE (*node) != FUNCTION_DECL)
-+ {
-+ warning ("%qs attribute only applies to functions",
-+ IDENTIFIER_POINTER (name));
-+ *no_add_attrs = true;
-+ }
++ tree args ATTRIBUTE_UNUSED,
++ int flags ATTRIBUTE_UNUSED,
++ bool * no_add_attrs)
++{
++ if (TREE_CODE (*node) != FUNCTION_DECL)
++ {
++ warning (OPT_Wattributes,"%qs attribute only applies to functions",
++ IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ }
+
-+ return NULL_TREE;
-+ }
++ return NULL_TREE;
++}
+
+
+/* Handle an acall attribute;
+
+static tree
+avr32_handle_acall_attribute (tree * node, tree name,
-+ tree args ATTRIBUTE_UNUSED,
-+ int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
-+ {
-+ if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE)
-+ {
-+ warning ("`%s' attribute not yet supported...",
-+ IDENTIFIER_POINTER (name));
-+ *no_add_attrs = true;
-+ return NULL_TREE;
-+ }
++ tree args ATTRIBUTE_UNUSED,
++ int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
++{
++ if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE)
++ {
++ warning (OPT_Wattributes,"`%s' attribute not yet supported...",
++ IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ return NULL_TREE;
++ }
+
-+ warning ("`%s' attribute only applies to functions",
-+ IDENTIFIER_POINTER (name));
-+ *no_add_attrs = true;
-+ return NULL_TREE;
-+ }
++ warning (OPT_Wattributes,"`%s' attribute only applies to functions",
++ IDENTIFIER_POINTER (name));
++ *no_add_attrs = true;
++ return NULL_TREE;
++}
+
+
+/* Return 0 if the attributes for two types are incompatible, 1 if they
+
+static int
+avr32_comp_type_attributes (tree type1, tree type2)
-+ {
-+ int acall1, acall2, isr1, isr2, naked1, naked2;
++{
++ int acall1, acall2, isr1, isr2, naked1, naked2;
+
-+ /* Check for mismatch of non-default calling convention. */
-+ if (TREE_CODE (type1) != FUNCTION_TYPE)
-+ return 1;
++ /* Check for mismatch of non-default calling convention. */
++ if (TREE_CODE (type1) != FUNCTION_TYPE)
++ return 1;
+
-+ /* Check for mismatched call attributes. */
-+ acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL;
-+ acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL;
-+ naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL;
-+ naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL;
-+ isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
-+ if (!isr1)
-+ isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
-+
-+ isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
-+ if (!isr2)
-+ isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
-+
-+ if ((acall1 && isr2)
-+ || (acall2 && isr1) || (naked1 && isr2) || (naked2 && isr1))
-+ return 0;
++ /* Check for mismatched call attributes. */
++ acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL;
++ acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL;
++ naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL;
++ naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL;
++ isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
++ if (!isr1)
++ isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
++
++ isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
++ if (!isr2)
++ isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
++
++ if ((acall1 && isr2)
++ || (acall2 && isr1) || (naked1 && isr2) || (naked2 && isr1))
++ return 0;
+
-+ return 1;
-+ }
++ return 1;
++}
+
+
+/* Computes the type of the current function. */
+
+static unsigned long
+avr32_compute_func_type (void)
-+ {
-+ unsigned long type = AVR32_FT_UNKNOWN;
-+ tree a;
-+ tree attr;
++{
++ unsigned long type = AVR32_FT_UNKNOWN;
++ tree a;
++ tree attr;
+
-+ if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
-+ abort ();
++ if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
++ abort ();
+
-+ /* Decide if the current function is volatile. Such functions never
++ /* Decide if the current function is volatile. Such functions never
+ return, and many memory cycles can be saved by not storing register
+ values that will never be needed again. This optimization was added to
+ speed up context switching in a kernel application. */
-+ if (optimize > 0
-+ && TREE_NOTHROW (current_function_decl)
-+ && TREE_THIS_VOLATILE (current_function_decl))
-+ type |= AVR32_FT_VOLATILE;
++ if (optimize > 0
++ && TREE_NOTHROW (current_function_decl)
++ && TREE_THIS_VOLATILE (current_function_decl))
++ type |= AVR32_FT_VOLATILE;
+
-+ if (cfun->static_chain_decl != NULL)
-+ type |= AVR32_FT_NESTED;
++ if (cfun->static_chain_decl != NULL)
++ type |= AVR32_FT_NESTED;
+
-+ attr = DECL_ATTRIBUTES (current_function_decl);
++ attr = DECL_ATTRIBUTES (current_function_decl);
+
-+ a = lookup_attribute ("isr", attr);
-+ if (a == NULL_TREE)
-+ a = lookup_attribute ("interrupt", attr);
++ a = lookup_attribute ("isr", attr);
++ if (a == NULL_TREE)
++ a = lookup_attribute ("interrupt", attr);
+
-+ if (a == NULL_TREE)
-+ type |= AVR32_FT_NORMAL;
-+ else
-+ type |= avr32_isr_value (TREE_VALUE (a));
++ if (a == NULL_TREE)
++ type |= AVR32_FT_NORMAL;
++ else
++ type |= avr32_isr_value (TREE_VALUE (a));
+
+
-+ a = lookup_attribute ("acall", attr);
-+ if (a != NULL_TREE)
-+ type |= AVR32_FT_ACALL;
++ a = lookup_attribute ("acall", attr);
++ if (a != NULL_TREE)
++ type |= AVR32_FT_ACALL;
+
-+ a = lookup_attribute ("naked", attr);
-+ if (a != NULL_TREE)
-+ type |= AVR32_FT_NAKED;
++ a = lookup_attribute ("naked", attr);
++ if (a != NULL_TREE)
++ type |= AVR32_FT_NAKED;
+
-+ return type;
-+ }
++ return type;
++}
+
+/* Returns the type of the current function. */
+
+static unsigned long
+avr32_current_func_type (void)
-+ {
-+ if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN)
-+ cfun->machine->func_type = avr32_compute_func_type ();
++{
++ if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN)
++ cfun->machine->func_type = avr32_compute_func_type ();
+
-+ return cfun->machine->func_type;
-+ }
++ return cfun->machine->func_type;
++}
+
+/*
+ This target hook should return true if we should not pass type solely
+ in registers. The file expr.h defines a definition that is usually appropriate,
+ refer to expr.h for additional documentation.
-+ */
++*/
+bool
+avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
-+ {
-+ if (type && AGGREGATE_TYPE_P (type)
-+ /* If the alignment is less than the size then pass in the struct on
++{
++ if (type && AGGREGATE_TYPE_P (type)
++ /* If the alignment is less than the size then pass in the struct on
+ the stack. */
-+ && ((unsigned int) TYPE_ALIGN_UNIT (type) <
-+ (unsigned int) int_size_in_bytes (type))
-+ /* If we support unaligned word accesses then structs of size 4 and 8
++ && ((unsigned int) TYPE_ALIGN_UNIT (type) <
++ (unsigned int) int_size_in_bytes (type))
++ /* If we support unaligned word accesses then structs of size 4 and 8
+ can have any alignment and still be passed in registers. */
-+ && !(TARGET_UNALIGNED_WORD
-+ && (int_size_in_bytes (type) == 4
-+ || int_size_in_bytes (type) == 8))
-+ /* Double word structs need only a word alignment. */
-+ && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4))
-+ return true;
++ && !(TARGET_UNALIGNED_WORD
++ && (int_size_in_bytes (type) == 4
++ || int_size_in_bytes (type) == 8))
++ /* Double word structs need only a word alignment. */
++ && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4))
++ return true;
+
-+ if (type && AGGREGATE_TYPE_P (type)
-+ /* Structs of size 3,5,6,7 are always passed in registers. */
-+ && (int_size_in_bytes (type) == 3
-+ || int_size_in_bytes (type) == 5
-+ || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7))
-+ return true;
++ if (type && AGGREGATE_TYPE_P (type)
++ /* Structs of size 3,5,6,7 are always passed in registers. */
++ && (int_size_in_bytes (type) == 3
++ || int_size_in_bytes (type) == 5
++ || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7))
++ return true;
+
+
-+ return (type && TREE_ADDRESSABLE (type));
-+ }
++ return (type && TREE_ADDRESSABLE (type));
++}
+
+
+bool
+avr32_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
-+ {
-+ return true;
-+ }
++{
++ return true;
++}
+
+/*
+ This target hook should return true if an argument at the position indicated
+ If the hook returns true, a copy of that argument is made in memory and a
+ pointer to the argument is passed instead of the argument itself. The pointer
+ is passed in whatever way is appropriate for passing a pointer to that type.
-+ */
++*/
+bool
+avr32_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
-+ enum machine_mode mode ATTRIBUTE_UNUSED,
-+ tree type, bool named ATTRIBUTE_UNUSED)
-+ {
-+ return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST));
-+ }
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ tree type, bool named ATTRIBUTE_UNUSED)
++{
++ return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST));
++}
+
+static int
+avr32_arg_partial_bytes (CUMULATIVE_ARGS * pcum ATTRIBUTE_UNUSED,
-+ enum machine_mode mode ATTRIBUTE_UNUSED,
-+ tree type ATTRIBUTE_UNUSED,
-+ bool named ATTRIBUTE_UNUSED)
-+ {
-+ return 0;
-+ }
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ tree type ATTRIBUTE_UNUSED,
++ bool named ATTRIBUTE_UNUSED)
++{
++ return 0;
++}
+
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+/*
+ Table used to convert from register number in the assembler instructions and
+ the register numbers used in gcc.
-+ */
++*/
+const int avr32_function_arg_reglist[] = {
-+ INTERNAL_REGNUM (12),
-+ INTERNAL_REGNUM (11),
-+ INTERNAL_REGNUM (10),
-+ INTERNAL_REGNUM (9),
-+ INTERNAL_REGNUM (8)
++ INTERNAL_REGNUM (12),
++ INTERNAL_REGNUM (11),
++ INTERNAL_REGNUM (10),
++ INTERNAL_REGNUM (9),
++ INTERNAL_REGNUM (8)
+};
+
+rtx avr32_compare_op0 = NULL_RTX;
+/*
+ Returns nonzero if it is allowed to store a value of mode mode in hard
+ register number regno.
-+ */
++*/
+int
+avr32_hard_regno_mode_ok (int regnr, enum machine_mode mode)
-+ {
-+ /* We allow only float modes in the fp-registers */
-+ if (regnr >= FIRST_FP_REGNUM
-+ && regnr <= LAST_FP_REGNUM && GET_MODE_CLASS (mode) != MODE_FLOAT)
-+ {
-+ return 0;
-+ }
++{
++ /* We allow only float modes in the fp-registers */
++ if (regnr >= FIRST_FP_REGNUM
++ && regnr <= LAST_FP_REGNUM && GET_MODE_CLASS (mode) != MODE_FLOAT)
++ {
++ return 0;
++ }
+
-+ switch (mode)
++ switch (mode)
+ {
+ case DImode: /* long long */
+ case DFmode: /* double */
+ case SCmode: /* __complex__ float */
+ case CSImode: /* __complex__ int */
+ if (regnr < 4)
-+ { /* long long int not supported in r12, sp, lr
++ { /* long long int not supported in r12, sp, lr
+ or pc. */
-+ return 0;
-+ }
++ return 0;
++ }
+ else
-+ {
-+ if (regnr % 2) /* long long int has to be refered in even
++ {
++ if (regnr % 2) /* long long int has to be refered in even
+ registers. */
-+ return 0;
-+ else
-+ return 1;
-+ }
++ return 0;
++ else
++ return 1;
++ }
+ case CDImode: /* __complex__ long long */
+ case DCmode: /* __complex__ double */
+ case TImode: /* 16 bytes */
+ if (regnr < 7)
-+ return 0;
++ return 0;
+ else if (regnr % 2)
-+ return 0;
++ return 0;
+ else
-+ return 1;
++ return 1;
+ default:
+ return 1;
+ }
-+ }
++}
+
+
+int
+avr32_rnd_operands (rtx add, rtx shift)
-+ {
-+ if (GET_CODE (shift) == CONST_INT &&
-+ GET_CODE (add) == CONST_INT && INTVAL (shift) > 0)
-+ {
-+ if ((1 << (INTVAL (shift) - 1)) == INTVAL (add))
-+ return TRUE;
-+ }
++{
++ if (GET_CODE (shift) == CONST_INT &&
++ GET_CODE (add) == CONST_INT && INTVAL (shift) > 0)
++ {
++ if ((1 << (INTVAL (shift) - 1)) == INTVAL (add))
++ return TRUE;
++ }
+
-+ return FALSE;
-+ }
++ return FALSE;
++}
+
+
+
+int
+avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, const char *str)
-+ {
-+ switch (c)
++{
++ switch (c)
+ {
+ case 'K':
+ case 'I':
+ {
-+ HOST_WIDE_INT min_value = 0, max_value = 0;
-+ char size_str[3];
-+ int const_size;
-+
-+ size_str[0] = str[2];
-+ size_str[1] = str[3];
-+ size_str[2] = '\0';
-+ const_size = atoi (size_str);
-+
-+ if (toupper (str[1]) == 'U')
-+ {
-+ min_value = 0;
-+ max_value = (1 << const_size) - 1;
-+ }
-+ else if (toupper (str[1]) == 'S')
-+ {
-+ min_value = -(1 << (const_size - 1));
-+ max_value = (1 << (const_size - 1)) - 1;
-+ }
++ HOST_WIDE_INT min_value = 0, max_value = 0;
++ char size_str[3];
++ int const_size;
++
++ size_str[0] = str[2];
++ size_str[1] = str[3];
++ size_str[2] = '\0';
++ const_size = atoi (size_str);
++
++ if (toupper (str[1]) == 'U')
++ {
++ min_value = 0;
++ max_value = (1 << const_size) - 1;
++ }
++ else if (toupper (str[1]) == 'S')
++ {
++ min_value = -(1 << (const_size - 1));
++ max_value = (1 << (const_size - 1)) - 1;
++ }
+
-+ if (c == 'I')
-+ {
-+ value = -value;
-+ }
++ if (c == 'I')
++ {
++ value = -value;
++ }
+
-+ if (value >= min_value && value <= max_value)
-+ {
-+ return 1;
-+ }
-+ break;
++ if (value >= min_value && value <= max_value)
++ {
++ return 1;
++ }
++ break;
+ }
+ case 'M':
-+ return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode);
++ return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode);
++ case 'J':
++ return avr32_hi16_immediate_operand (GEN_INT (value), VOIDmode);
++ case 'O':
++ return one_bit_set_operand (GEN_INT (value), VOIDmode);
++ case 'N':
++ return one_bit_cleared_operand (GEN_INT (value), VOIDmode);
++ case 'L':
++ /* The lower 16-bits are set. */
++ return ((value & 0xffff) == 0xffff) ;
+ }
+
-+ return 0;
-+ }
++ return 0;
++}
+
+
+/*Compute mask of which floating-point registers needs saving upon
+ entry to this function*/
+static unsigned long
+avr32_compute_save_fp_reg_mask (void)
-+ {
-+ unsigned long func_type = avr32_current_func_type ();
-+ unsigned int save_reg_mask = 0;
-+ unsigned int reg;
-+ unsigned int max_reg = 7;
-+ int save_all_call_used_regs = FALSE;
-+
-+ /* This only applies for hardware floating-point implementation. */
-+ if (!TARGET_HARD_FLOAT)
-+ return 0;
++{
++ unsigned long func_type = avr32_current_func_type ();
++ unsigned int save_reg_mask = 0;
++ unsigned int reg;
++ unsigned int max_reg = 7;
++ int save_all_call_used_regs = FALSE;
++
++ /* This only applies for hardware floating-point implementation. */
++ if (!TARGET_HARD_FLOAT)
++ return 0;
+
-+ if (IS_INTERRUPT (func_type))
-+ {
++ if (IS_INTERRUPT (func_type))
++ {
+
-+ /* Interrupt functions must not corrupt any registers, even call
++ /* Interrupt functions must not corrupt any registers, even call
+ clobbered ones. If this is a leaf function we can just examine the
+ registers used by the RTL, but otherwise we have to assume that
+ whatever function is called might clobber anything, and so we have
+ to save all the call-clobbered registers as well. */
-+ max_reg = 13;
-+ save_all_call_used_regs = !current_function_is_leaf;
-+ }
++ max_reg = 13;
++ save_all_call_used_regs = !current_function_is_leaf;
++ }
+
-+ /* All used registers used must be saved */
-+ for (reg = 0; reg <= max_reg; reg++)
-+ if (regs_ever_live[INTERNAL_FP_REGNUM (reg)]
-+ || (save_all_call_used_regs
-+ && call_used_regs[INTERNAL_FP_REGNUM (reg)]))
-+ save_reg_mask |= (1 << reg);
++ /* All used registers used must be saved */
++ for (reg = 0; reg <= max_reg; reg++)
++ if (regs_ever_live[INTERNAL_FP_REGNUM (reg)]
++ || (save_all_call_used_regs
++ && call_used_regs[INTERNAL_FP_REGNUM (reg)]))
++ save_reg_mask |= (1 << reg);
+
-+ return save_reg_mask;
-+ }
++ return save_reg_mask;
++}
+
+/*Compute mask of registers which needs saving upon function entry */
+static unsigned long
+avr32_compute_save_reg_mask (int push)
-+ {
-+ unsigned long func_type;
-+ unsigned int save_reg_mask = 0;
-+ unsigned int reg;
-+
-+ func_type = avr32_current_func_type ();
-+
-+ if (IS_INTERRUPT (func_type))
-+ {
-+ unsigned int max_reg = 12;
++{
++ unsigned long func_type;
++ unsigned int save_reg_mask = 0;
++ unsigned int reg;
+
++ func_type = avr32_current_func_type ();
+
-+ /* Get the banking scheme for the interrupt */
-+ switch (func_type)
-+ {
-+ case AVR32_FT_ISR_FULL:
-+ max_reg = 0;
-+ break;
-+ case AVR32_FT_ISR_HALF:
-+ max_reg = 7;
-+ break;
-+ case AVR32_FT_ISR_NONE:
-+ max_reg = 12;
-+ break;
-+ }
++ if (IS_INTERRUPT (func_type))
++ {
++ unsigned int max_reg = 12;
++
++
++ /* Get the banking scheme for the interrupt */
++ switch (func_type)
++ {
++ case AVR32_FT_ISR_FULL:
++ max_reg = 0;
++ break;
++ case AVR32_FT_ISR_HALF:
++ max_reg = 7;
++ break;
++ case AVR32_FT_ISR_NONE:
++ max_reg = 12;
++ break;
++ }
+
-+ /* Interrupt functions must not corrupt any registers, even call
++ /* Interrupt functions must not corrupt any registers, even call
+ clobbered ones. If this is a leaf function we can just examine the
+ registers used by the RTL, but otherwise we have to assume that
+ whatever function is called might clobber anything, and so we have
+ to save all the call-clobbered registers as well. */
+
-+ /* Need not push the registers r8-r12 for AVR32A architectures, as this
++ /* Need not push the registers r8-r12 for AVR32A architectures, as this
+ is automatially done in hardware. We also do not have any shadow
+ registers. */
-+ if (avr32_arch->uarch_type == UARCH_TYPE_AVR32A)
-+ {
-+ max_reg = 7;
-+ func_type = AVR32_FT_ISR_NONE;
-+ }
++ if (TARGET_UARCH_AVR32A)
++ {
++ max_reg = 7;
++ func_type = AVR32_FT_ISR_NONE;
++ }
+
-+ /* All registers which are used and is not shadowed must be saved */
-+ for (reg = 0; reg <= max_reg; reg++)
-+ if (regs_ever_live[INTERNAL_REGNUM (reg)]
-+ || (!current_function_is_leaf
-+ && call_used_regs[INTERNAL_REGNUM (reg)]))
-+ save_reg_mask |= (1 << reg);
-+
-+ /* Check LR */
-+ if ((regs_ever_live[LR_REGNUM]
-+ || !current_function_is_leaf || frame_pointer_needed)
-+ /* Only non-shadowed register models */
-+ && (func_type == AVR32_FT_ISR_NONE))
-+ save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
-+
-+ /* Make sure that the GOT register is pushed. */
-+ if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)
-+ && current_function_uses_pic_offset_table)
-+ save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
++ /* All registers which are used and is not shadowed must be saved */
++ for (reg = 0; reg <= max_reg; reg++)
++ if (regs_ever_live[INTERNAL_REGNUM (reg)]
++ || (!current_function_is_leaf
++ && call_used_regs[INTERNAL_REGNUM (reg)]))
++ save_reg_mask |= (1 << reg);
++
++ /* Check LR */
++ if ((regs_ever_live[LR_REGNUM]
++ || !current_function_is_leaf || frame_pointer_needed)
++ /* Only non-shadowed register models */
++ && (func_type == AVR32_FT_ISR_NONE))
++ save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
++
++ /* Make sure that the GOT register is pushed. */
++ if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)
++ && current_function_uses_pic_offset_table)
++ save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
+
-+ }
-+ else
-+ {
-+ int use_pushm = optimize_size;
++ }
++ else
++ {
++ int use_pushm = optimize_size;
+
-+ /* In the normal case we only need to save those registers which are
++ /* In the normal case we only need to save those registers which are
+ call saved and which are used by this function. */
-+ for (reg = 0; reg <= 7; reg++)
-+ if (regs_ever_live[INTERNAL_REGNUM (reg)]
-+ && !call_used_regs[INTERNAL_REGNUM (reg)])
-+ save_reg_mask |= (1 << reg);
++ for (reg = 0; reg <= 7; reg++)
++ if (regs_ever_live[INTERNAL_REGNUM (reg)]
++ && !call_used_regs[INTERNAL_REGNUM (reg)])
++ save_reg_mask |= (1 << reg);
+
-+ /* Make sure that the GOT register is pushed. */
-+ if (current_function_uses_pic_offset_table)
-+ save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
++ /* Make sure that the GOT register is pushed. */
++ if (current_function_uses_pic_offset_table)
++ save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
+
+
-+ /* If we optimize for size and do not have anonymous arguments: use
++ /* If we optimize for size and do not have anonymous arguments: use
+ popm/pushm always */
-+ if (use_pushm)
-+ {
-+ if ((save_reg_mask & (1 << 0))
-+ || (save_reg_mask & (1 << 1))
-+ || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3)))
-+ save_reg_mask |= 0xf;
-+
-+ if ((save_reg_mask & (1 << 4))
-+ || (save_reg_mask & (1 << 5))
-+ || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7)))
-+ save_reg_mask |= 0xf0;
-+
-+ if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9)))
-+ save_reg_mask |= 0x300;
-+ }
++ if (use_pushm)
++ {
++ if ((save_reg_mask & (1 << 0))
++ || (save_reg_mask & (1 << 1))
++ || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3)))
++ save_reg_mask |= 0xf;
++
++ if ((save_reg_mask & (1 << 4))
++ || (save_reg_mask & (1 << 5))
++ || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7)))
++ save_reg_mask |= 0xf0;
++
++ if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9)))
++ save_reg_mask |= 0x300;
++ }
+
+
-+ /* Check LR */
-+ if ((regs_ever_live[LR_REGNUM]
-+ || !current_function_is_leaf
-+ || (optimize_size
-+ && save_reg_mask
-+ && !current_function_calls_eh_return) || frame_pointer_needed))
-+ {
-+ if (push
-+ /* Never pop LR into PC for functions which
++ /* Check LR */
++ if ((regs_ever_live[LR_REGNUM]
++ || !current_function_is_leaf
++ || (optimize_size
++ && save_reg_mask
++ && !current_function_calls_eh_return) || frame_pointer_needed))
++ {
++ if (push
++ /* Never pop LR into PC for functions which
+ calls __builtin_eh_return, since we need to
+ fix the SP after the restoring of the registers
+ and before returning. */
-+ || current_function_calls_eh_return)
-+ {
-+ /* Push/Pop LR */
-+ save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
-+ }
-+ else
-+ {
-+ /* Pop PC */
-+ save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM));
-+ }
-+ }
-+ }
-+
++ || current_function_calls_eh_return)
++ {
++ /* Push/Pop LR */
++ save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
++ }
++ else
++ {
++ /* Pop PC */
++ save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM));
++ }
++ }
++ }
+
-+ /* Save registers so the exception handler can modify them. */
-+ if (current_function_calls_eh_return)
-+ {
-+ unsigned int i;
+
-+ for (i = 0;; i++)
-+ {
-+ reg = EH_RETURN_DATA_REGNO (i);
-+ if (reg == INVALID_REGNUM)
-+ break;
-+ save_reg_mask |= 1 << ASM_REGNUM (reg);
-+ }
-+ }
++ /* Save registers so the exception handler can modify them. */
++ if (current_function_calls_eh_return)
++ {
++ unsigned int i;
++
++ for (i = 0;; i++)
++ {
++ reg = EH_RETURN_DATA_REGNO (i);
++ if (reg == INVALID_REGNUM)
++ break;
++ save_reg_mask |= 1 << ASM_REGNUM (reg);
++ }
++ }
+
-+ return save_reg_mask;
-+ }
++ return save_reg_mask;
++}
+
+/*Compute total size in bytes of all saved registers */
+static int
+avr32_get_reg_mask_size (int reg_mask)
-+ {
-+ int reg, size;
-+ size = 0;
++{
++ int reg, size;
++ size = 0;
+
-+ for (reg = 0; reg <= 15; reg++)
-+ if (reg_mask & (1 << reg))
-+ size += 4;
++ for (reg = 0; reg <= 15; reg++)
++ if (reg_mask & (1 << reg))
++ size += 4;
+
-+ return size;
-+ }
++ return size;
++}
+
+/*Get a register from one of the registers which are saved onto the stack
+ upon function entry */
+
+static int
+avr32_get_saved_reg (int save_reg_mask)
-+ {
-+ unsigned int reg;
++{
++ unsigned int reg;
+
-+ /* Find the first register which is saved in the saved_reg_mask */
-+ for (reg = 0; reg <= 15; reg++)
-+ if (save_reg_mask & (1 << reg))
-+ return reg;
++ /* Find the first register which is saved in the saved_reg_mask */
++ for (reg = 0; reg <= 15; reg++)
++ if (save_reg_mask & (1 << reg))
++ return reg;
+
-+ return -1;
-+ }
++ return -1;
++}
+
+/* Return 1 if it is possible to return using a single instruction. */
+int
+avr32_use_return_insn (int iscond)
-+ {
-+ unsigned int func_type = avr32_current_func_type ();
-+ unsigned long saved_int_regs;
-+ unsigned long saved_fp_regs;
++{
++ unsigned int func_type = avr32_current_func_type ();
++ unsigned long saved_int_regs;
++ unsigned long saved_fp_regs;
+
-+ /* Never use a return instruction before reload has run. */
-+ if (!reload_completed)
-+ return 0;
++ /* Never use a return instruction before reload has run. */
++ if (!reload_completed)
++ return 0;
+
-+ /* Must adjust the stack for vararg functions. */
-+ if (current_function_args_info.uses_anonymous_args)
-+ return 0;
++ /* Must adjust the stack for vararg functions. */
++ if (current_function_args_info.uses_anonymous_args)
++ return 0;
+
-+ /* If there a stack adjstment. */
-+ if (get_frame_size ())
-+ return 0;
++ /* If there a stack adjstment. */
++ if (get_frame_size ())
++ return 0;
+
-+ saved_int_regs = avr32_compute_save_reg_mask (TRUE);
-+ saved_fp_regs = avr32_compute_save_fp_reg_mask ();
++ saved_int_regs = avr32_compute_save_reg_mask (TRUE);
++ saved_fp_regs = avr32_compute_save_fp_reg_mask ();
+
-+ /* Functions which have saved fp-regs on the stack can not be performed in
++ /* Functions which have saved fp-regs on the stack can not be performed in
+ one instruction */
-+ if (saved_fp_regs)
-+ return 0;
++ if (saved_fp_regs)
++ return 0;
+
-+ /* Conditional returns can not be performed in one instruction if we need
++ /* Conditional returns can not be performed in one instruction if we need
+ to restore registers from the stack */
-+ if (iscond && saved_int_regs)
-+ return 0;
++ if (iscond && saved_int_regs)
++ return 0;
+
-+ /* Conditional return can not be used for interrupt handlers. */
-+ if (iscond && IS_INTERRUPT (func_type))
-+ return 0;
++ /* Conditional return can not be used for interrupt handlers. */
++ if (iscond && IS_INTERRUPT (func_type))
++ return 0;
+
-+ /* For interrupt handlers which needs to pop registers */
-+ if (saved_int_regs && IS_INTERRUPT (func_type))
-+ return 0;
++ /* For interrupt handlers which needs to pop registers */
++ if (saved_int_regs && IS_INTERRUPT (func_type))
++ return 0;
+
+
-+ /* If there are saved registers but the LR isn't saved, then we need two
++ /* If there are saved registers but the LR isn't saved, then we need two
+ instructions for the return. */
-+ if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM))))
-+ return 0;
++ if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM))))
++ return 0;
+
+
-+ return 1;
-+ }
++ return 1;
++}
+
+
+/*Generate some function prologue info in the assembly file*/
+
+void
+avr32_target_asm_function_prologue (FILE * f, HOST_WIDE_INT frame_size)
-+ {
-+ if (IS_NAKED (avr32_current_func_type ()))
-+ fprintf (f,
-+ "\t# Function is naked: Prologue and epilogue provided by programmer\n");
++{
++ if (IS_NAKED (avr32_current_func_type ()))
++ fprintf (f,
++ "\t# Function is naked: Prologue and epilogue provided by programmer\n");
+
-+ if (IS_INTERRUPT (avr32_current_func_type ()))
-+ {
-+ switch (avr32_current_func_type ())
-+ {
-+ case AVR32_FT_ISR_FULL:
-+ fprintf (f,
-+ "\t# Interrupt Function: Fully shadowed register file\n");
-+ break;
-+ case AVR32_FT_ISR_HALF:
-+ fprintf (f,
-+ "\t# Interrupt Function: Half shadowed register file\n");
-+ break;
-+ default:
-+ case AVR32_FT_ISR_NONE:
-+ fprintf (f, "\t# Interrupt Function: No shadowed register file\n");
-+ break;
-+ }
-+ }
++ if (IS_INTERRUPT (avr32_current_func_type ()))
++ {
++ switch (avr32_current_func_type ())
++ {
++ case AVR32_FT_ISR_FULL:
++ fprintf (f,
++ "\t# Interrupt Function: Fully shadowed register file\n");
++ break;
++ case AVR32_FT_ISR_HALF:
++ fprintf (f,
++ "\t# Interrupt Function: Half shadowed register file\n");
++ break;
++ default:
++ case AVR32_FT_ISR_NONE:
++ fprintf (f, "\t# Interrupt Function: No shadowed register file\n");
++ break;
++ }
++ }
+
+
-+ fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n",
-+ current_function_args_size, frame_size,
-+ current_function_pretend_args_size);
++ fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n",
++ current_function_args_size, frame_size,
++ current_function_pretend_args_size);
+
-+ fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n",
-+ frame_pointer_needed, current_function_is_leaf);
++ fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n",
++ frame_pointer_needed, current_function_is_leaf);
+
-+ fprintf (f, "\t# uses_anonymous_args = %i\n",
-+ current_function_args_info.uses_anonymous_args);
-+ if (current_function_calls_eh_return)
-+ fprintf (f, "\t# Calls __builtin_eh_return.\n");
++ fprintf (f, "\t# uses_anonymous_args = %i\n",
++ current_function_args_info.uses_anonymous_args);
++ if (current_function_calls_eh_return)
++ fprintf (f, "\t# Calls __builtin_eh_return.\n");
+
-+ }
++}
+
+
+/* Generate and emit an insn that we will recognize as a pushm or stm.
+
+static rtx
+emit_multi_reg_push (int reglist, int usePUSHM)
-+ {
-+ rtx insn;
-+ rtx dwarf;
-+ rtx tmp;
-+ rtx reg;
-+ int i;
-+ int nr_regs;
-+ int index = 0;
++{
++ rtx insn;
++ rtx dwarf;
++ rtx tmp;
++ rtx reg;
++ int i;
++ int nr_regs;
++ int index = 0;
+
-+ if (usePUSHM)
-+ {
-+ insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist)));
-+ reglist = avr32_convert_to_reglist16 (reglist);
-+ }
-+ else
-+ {
-+ insn = emit_insn (gen_stm (stack_pointer_rtx,
-+ gen_rtx_CONST_INT (SImode, reglist),
-+ gen_rtx_CONST_INT (SImode, 1)));
-+ }
++ if (usePUSHM)
++ {
++ insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist)));
++ reglist = avr32_convert_to_reglist16 (reglist);
++ }
++ else
++ {
++ insn = emit_insn (gen_stm (stack_pointer_rtx,
++ gen_rtx_CONST_INT (SImode, reglist),
++ gen_rtx_CONST_INT (SImode, 1)));
++ }
+
-+ nr_regs = avr32_get_reg_mask_size (reglist) / 4;
-+ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
++ nr_regs = avr32_get_reg_mask_size (reglist) / 4;
++ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
+
-+ for (i = 15; i >= 0; i--)
-+ {
-+ if (reglist & (1 << i))
-+ {
-+ reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i));
-+ tmp = gen_rtx_SET (VOIDmode,
-+ gen_rtx_MEM (SImode,
-+ plus_constant (stack_pointer_rtx,
-+ 4 * index)), reg);
-+ RTX_FRAME_RELATED_P (tmp) = 1;
-+ XVECEXP (dwarf, 0, 1 + index++) = tmp;
-+ }
-+ }
++ for (i = 15; i >= 0; i--)
++ {
++ if (reglist & (1 << i))
++ {
++ reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i));
++ tmp = gen_rtx_SET (VOIDmode,
++ gen_rtx_MEM (SImode,
++ plus_constant (stack_pointer_rtx,
++ 4 * index)), reg);
++ RTX_FRAME_RELATED_P (tmp) = 1;
++ XVECEXP (dwarf, 0, 1 + index++) = tmp;
++ }
++ }
+
-+ tmp = gen_rtx_SET (SImode,
-+ stack_pointer_rtx,
-+ gen_rtx_PLUS (SImode,
-+ stack_pointer_rtx,
-+ GEN_INT (-4 * nr_regs)));
-+ RTX_FRAME_RELATED_P (tmp) = 1;
-+ XVECEXP (dwarf, 0, 0) = tmp;
-+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
-+ REG_NOTES (insn));
-+ return insn;
-+ }
++ tmp = gen_rtx_SET (SImode,
++ stack_pointer_rtx,
++ gen_rtx_PLUS (SImode,
++ stack_pointer_rtx,
++ GEN_INT (-4 * nr_regs)));
++ RTX_FRAME_RELATED_P (tmp) = 1;
++ XVECEXP (dwarf, 0, 0) = tmp;
++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
++ REG_NOTES (insn));
++ return insn;
++}
+
+
+static rtx
+emit_multi_fp_reg_push (int reglist)
-+ {
-+ rtx insn;
-+ rtx dwarf;
-+ rtx tmp;
-+ rtx reg;
-+ int i;
-+ int nr_regs;
-+ int index = 0;
++{
++ rtx insn;
++ rtx dwarf;
++ rtx tmp;
++ rtx reg;
++ int i;
++ int nr_regs;
++ int index = 0;
+
-+ insn = emit_insn (gen_stm_fp (stack_pointer_rtx,
-+ gen_rtx_CONST_INT (SImode, reglist),
-+ gen_rtx_CONST_INT (SImode, 1)));
++ insn = emit_insn (gen_stm_fp (stack_pointer_rtx,
++ gen_rtx_CONST_INT (SImode, reglist),
++ gen_rtx_CONST_INT (SImode, 1)));
+
-+ nr_regs = avr32_get_reg_mask_size (reglist) / 4;
-+ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
++ nr_regs = avr32_get_reg_mask_size (reglist) / 4;
++ dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
+
-+ for (i = 15; i >= 0; i--)
-+ {
-+ if (reglist & (1 << i))
-+ {
-+ reg = gen_rtx_REG (SImode, INTERNAL_FP_REGNUM (i));
-+ tmp = gen_rtx_SET (VOIDmode,
-+ gen_rtx_MEM (SImode,
-+ plus_constant (stack_pointer_rtx,
-+ 4 * index)), reg);
-+ RTX_FRAME_RELATED_P (tmp) = 1;
-+ XVECEXP (dwarf, 0, 1 + index++) = tmp;
-+ }
-+ }
++ for (i = 15; i >= 0; i--)
++ {
++ if (reglist & (1 << i))
++ {
++ reg = gen_rtx_REG (SImode, INTERNAL_FP_REGNUM (i));
++ tmp = gen_rtx_SET (VOIDmode,
++ gen_rtx_MEM (SImode,
++ plus_constant (stack_pointer_rtx,
++ 4 * index)), reg);
++ RTX_FRAME_RELATED_P (tmp) = 1;
++ XVECEXP (dwarf, 0, 1 + index++) = tmp;
++ }
++ }
+
-+ tmp = gen_rtx_SET (SImode,
-+ stack_pointer_rtx,
-+ gen_rtx_PLUS (SImode,
-+ stack_pointer_rtx,
-+ GEN_INT (-4 * nr_regs)));
-+ RTX_FRAME_RELATED_P (tmp) = 1;
-+ XVECEXP (dwarf, 0, 0) = tmp;
-+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
-+ REG_NOTES (insn));
-+ return insn;
-+ }
++ tmp = gen_rtx_SET (SImode,
++ stack_pointer_rtx,
++ gen_rtx_PLUS (SImode,
++ stack_pointer_rtx,
++ GEN_INT (-4 * nr_regs)));
++ RTX_FRAME_RELATED_P (tmp) = 1;
++ XVECEXP (dwarf, 0, 0) = tmp;
++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
++ REG_NOTES (insn));
++ return insn;
++}
+
+rtx
+avr32_gen_load_multiple (rtx * regs, int count, rtx from,
-+ int write_back, int in_struct_p, int scalar_p)
-+ {
++ int write_back, int in_struct_p, int scalar_p)
++{
+
-+ rtx result;
-+ int i = 0, j;
++ rtx result;
++ int i = 0, j;
+
-+ result =
-+ gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0)));
++ result =
++ gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0)));
+
-+ if (write_back)
-+ {
-+ XVECEXP (result, 0, 0)
-+ = gen_rtx_SET (GET_MODE (from), from,
-+ plus_constant (from, count * 4));
-+ i = 1;
-+ count++;
-+ }
++ if (write_back)
++ {
++ XVECEXP (result, 0, 0)
++ = gen_rtx_SET (GET_MODE (from), from,
++ plus_constant (from, count * 4));
++ i = 1;
++ count++;
++ }
+
+
-+ for (j = 0; i < count; i++, j++)
-+ {
-+ rtx unspec;
-+ rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4));
-+ MEM_IN_STRUCT_P (mem) = in_struct_p;
-+ MEM_SCALAR_P (mem) = scalar_p;
-+ unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM);
-+ XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec);
-+ }
++ for (j = 0; i < count; i++, j++)
++ {
++ rtx unspec;
++ rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4));
++ MEM_IN_STRUCT_P (mem) = in_struct_p;
++ MEM_SCALAR_P (mem) = scalar_p;
++ unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM);
++ XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec);
++ }
+
-+ return result;
-+ }
++ return result;
++}
+
+
+rtx
+avr32_gen_store_multiple (rtx * regs, int count, rtx to,
-+ int in_struct_p, int scalar_p)
-+ {
-+ rtx result;
-+ int i = 0, j;
++ int in_struct_p, int scalar_p)
++{
++ rtx result;
++ int i = 0, j;
+
-+ result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
++ result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
+
-+ for (j = 0; i < count; i++, j++)
-+ {
-+ rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4));
-+ MEM_IN_STRUCT_P (mem) = in_struct_p;
-+ MEM_SCALAR_P (mem) = scalar_p;
-+ XVECEXP (result, 0, i)
-+ = gen_rtx_SET (VOIDmode, mem,
-+ gen_rtx_UNSPEC (VOIDmode,
-+ gen_rtvec (1, regs[j]),
-+ UNSPEC_STORE_MULTIPLE));
-+ }
++ for (j = 0; i < count; i++, j++)
++ {
++ rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4));
++ MEM_IN_STRUCT_P (mem) = in_struct_p;
++ MEM_SCALAR_P (mem) = scalar_p;
++ XVECEXP (result, 0, i)
++ = gen_rtx_SET (VOIDmode, mem,
++ gen_rtx_UNSPEC (VOIDmode,
++ gen_rtvec (1, regs[j]),
++ UNSPEC_STORE_MULTIPLE));
++ }
+
-+ return result;
-+ }
++ return result;
++}
+
+
+/* Move a block of memory if it is word aligned or we support unaligned
+
+int
+avr32_gen_movmemsi (rtx * operands)
-+ {
-+ HOST_WIDE_INT bytes_to_go;
-+ rtx src, dst;
-+ rtx st_src, st_dst;
-+ int ptr_offset = 0;
-+ int block_size;
-+ int dst_in_struct_p, src_in_struct_p;
-+ int dst_scalar_p, src_scalar_p;
-+ int unaligned;
-+
-+ if (GET_CODE (operands[2]) != CONST_INT
-+ || GET_CODE (operands[3]) != CONST_INT
-+ || INTVAL (operands[2]) > 64
-+ || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD))
-+ return 0;
++{
++ HOST_WIDE_INT bytes_to_go;
++ rtx src, dst;
++ rtx st_src, st_dst;
++ int src_offset = 0, dst_offset = 0;
++ int block_size;
++ int dst_in_struct_p, src_in_struct_p;
++ int dst_scalar_p, src_scalar_p;
++ int unaligned;
++
++ if (GET_CODE (operands[2]) != CONST_INT
++ || GET_CODE (operands[3]) != CONST_INT
++ || INTVAL (operands[2]) > 64
++ || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD))
++ return 0;
+
-+ unaligned = (INTVAL (operands[3]) & 3) != 0;
++ unaligned = (INTVAL (operands[3]) & 3) != 0;
+
-+ block_size = 4;
++ block_size = 4;
+
-+ st_dst = XEXP (operands[0], 0);
-+ st_src = XEXP (operands[1], 0);
++ st_dst = XEXP (operands[0], 0);
++ st_src = XEXP (operands[1], 0);
+
-+ dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
-+ dst_scalar_p = MEM_SCALAR_P (operands[0]);
-+ src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
-+ src_scalar_p = MEM_SCALAR_P (operands[1]);
++ dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
++ dst_scalar_p = MEM_SCALAR_P (operands[0]);
++ src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
++ src_scalar_p = MEM_SCALAR_P (operands[1]);
+
-+ dst = copy_to_mode_reg (SImode, st_dst);
-+ src = copy_to_mode_reg (SImode, st_src);
++ dst = copy_to_mode_reg (SImode, st_dst);
++ src = copy_to_mode_reg (SImode, st_src);
+
-+ bytes_to_go = INTVAL (operands[2]);
++ bytes_to_go = INTVAL (operands[2]);
+
-+ while (bytes_to_go)
-+ {
-+ enum machine_mode move_mode;
-+ /* (Seems to be a problem with reloads for the movti pattern so this is
-+ disabled until that problem is resolved)
++ while (bytes_to_go)
++ {
++ enum machine_mode move_mode;
++ /* (Seems to be a problem with reloads for the movti pattern so this is
++ disabled until that problem is resolved)
+ UPDATE: Problem seems to be solved now.... */
-+ if (bytes_to_go >= GET_MODE_SIZE (TImode) && !unaligned
-+ /* Do not emit ldm/stm for UC3 as ld.d/st.d is more optimal. */
-+ && avr32_arch->arch_type != ARCH_TYPE_AVR32_UC)
-+ move_mode = TImode;
-+ else if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned)
-+ move_mode = DImode;
-+ else if (bytes_to_go >= GET_MODE_SIZE (SImode))
-+ move_mode = SImode;
-+ else
-+ move_mode = QImode;
++ if (bytes_to_go >= GET_MODE_SIZE (TImode) && !unaligned
++ /* Do not emit ldm/stm for UC3 as ld.d/st.d is more optimal. */
++ && !TARGET_ARCH_UC)
++ move_mode = TImode;
++ else if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned)
++ move_mode = DImode;
++ else if (bytes_to_go >= GET_MODE_SIZE (SImode))
++ move_mode = SImode;
++ else
++ move_mode = QImode;
+
++ {
++ rtx src_mem;
++ rtx dst_mem = gen_rtx_MEM (move_mode,
++ gen_rtx_PLUS (SImode, dst,
++ GEN_INT (dst_offset)));
++ dst_offset += GET_MODE_SIZE (move_mode);
++ if ( 0 /* This causes an error in GCC. Think there is
++ something wrong in the gcse pass which causes REQ_EQUIV notes
++ to be wrong so disabling it for now. */
++ && move_mode == TImode
++ && INTVAL (operands[2]) > GET_MODE_SIZE (TImode) )
+ {
-+ rtx dst_mem = gen_rtx_MEM (move_mode,
-+ gen_rtx_PLUS (SImode, dst,
-+ GEN_INT (ptr_offset)));
-+ rtx src_mem = gen_rtx_MEM (move_mode,
-+ gen_rtx_PLUS (SImode, src,
-+ GEN_INT (ptr_offset)));
-+ ptr_offset += GET_MODE_SIZE (move_mode);
-+ bytes_to_go -= GET_MODE_SIZE (move_mode);
-+
-+ MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p;
-+ MEM_SCALAR_P (dst_mem) = dst_scalar_p;
-+
-+ MEM_IN_STRUCT_P (src_mem) = src_in_struct_p;
-+ MEM_SCALAR_P (src_mem) = src_scalar_p;
-+ emit_move_insn (dst_mem, src_mem);
-+
++ src_mem = gen_rtx_MEM (move_mode,
++ gen_rtx_POST_INC (SImode, src));
++ }
++ else
++ {
++ src_mem = gen_rtx_MEM (move_mode,
++ gen_rtx_PLUS (SImode, src,
++ GEN_INT (src_offset)));
++ src_offset += GET_MODE_SIZE (move_mode);
+ }
++
++ bytes_to_go -= GET_MODE_SIZE (move_mode);
++
++ MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p;
++ MEM_SCALAR_P (dst_mem) = dst_scalar_p;
++
++ MEM_IN_STRUCT_P (src_mem) = src_in_struct_p;
++ MEM_SCALAR_P (src_mem) = src_scalar_p;
++ emit_move_insn (dst_mem, src_mem);
++
+ }
++ }
+
-+ return 1;
-+ }
++ return 1;
++}
+
+
+
+/*Expand the prologue instruction*/
+void
+avr32_expand_prologue (void)
-+ {
-+ rtx insn, dwarf;
-+ unsigned long saved_reg_mask, saved_fp_reg_mask;
-+ int reglist8 = 0;
++{
++ rtx insn, dwarf;
++ unsigned long saved_reg_mask, saved_fp_reg_mask;
++ int reglist8 = 0;
+
-+ /* Naked functions does not have a prologue */
-+ if (IS_NAKED (avr32_current_func_type ()))
-+ return;
++ /* Naked functions does not have a prologue */
++ if (IS_NAKED (avr32_current_func_type ()))
++ return;
+
-+ saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
++ saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
+
-+ if (saved_reg_mask)
-+ {
-+ /* Must push used registers */
-+
-+ /* Should we use POPM or LDM? */
-+ int usePUSHM = TRUE;
-+ reglist8 = 0;
-+ if (((saved_reg_mask & (1 << 0)) ||
-+ (saved_reg_mask & (1 << 1)) ||
-+ (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
-+ {
-+ /* One of R0-R3 should at least be pushed */
-+ if (((saved_reg_mask & (1 << 0)) &&
-+ (saved_reg_mask & (1 << 1)) &&
-+ (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
-+ {
-+ /* All should be pushed */
-+ reglist8 |= 0x01;
-+ }
-+ else
-+ {
-+ usePUSHM = FALSE;
-+ }
-+ }
++ if (saved_reg_mask)
++ {
++ /* Must push used registers */
++
++ /* Should we use POPM or LDM? */
++ int usePUSHM = TRUE;
++ reglist8 = 0;
++ if (((saved_reg_mask & (1 << 0)) ||
++ (saved_reg_mask & (1 << 1)) ||
++ (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
++ {
++ /* One of R0-R3 should at least be pushed */
++ if (((saved_reg_mask & (1 << 0)) &&
++ (saved_reg_mask & (1 << 1)) &&
++ (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
++ {
++ /* All should be pushed */
++ reglist8 |= 0x01;
++ }
++ else
++ {
++ usePUSHM = FALSE;
++ }
++ }
+
-+ if (((saved_reg_mask & (1 << 4)) ||
-+ (saved_reg_mask & (1 << 5)) ||
-+ (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
-+ {
-+ /* One of R4-R7 should at least be pushed */
-+ if (((saved_reg_mask & (1 << 4)) &&
-+ (saved_reg_mask & (1 << 5)) &&
-+ (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
-+ {
-+ if (usePUSHM)
-+ /* All should be pushed */
-+ reglist8 |= 0x02;
-+ }
-+ else
-+ {
-+ usePUSHM = FALSE;
-+ }
-+ }
++ if (((saved_reg_mask & (1 << 4)) ||
++ (saved_reg_mask & (1 << 5)) ||
++ (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
++ {
++ /* One of R4-R7 should at least be pushed */
++ if (((saved_reg_mask & (1 << 4)) &&
++ (saved_reg_mask & (1 << 5)) &&
++ (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
++ {
++ if (usePUSHM)
++ /* All should be pushed */
++ reglist8 |= 0x02;
++ }
++ else
++ {
++ usePUSHM = FALSE;
++ }
++ }
+
-+ if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
-+ {
-+ /* One of R8-R9 should at least be pushed */
-+ if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
-+ {
-+ if (usePUSHM)
-+ /* All should be pushed */
-+ reglist8 |= 0x04;
-+ }
-+ else
-+ {
-+ usePUSHM = FALSE;
-+ }
-+ }
++ if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
++ {
++ /* One of R8-R9 should at least be pushed */
++ if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
++ {
++ if (usePUSHM)
++ /* All should be pushed */
++ reglist8 |= 0x04;
++ }
++ else
++ {
++ usePUSHM = FALSE;
++ }
++ }
+
-+ if (saved_reg_mask & (1 << 10))
-+ reglist8 |= 0x08;
++ if (saved_reg_mask & (1 << 10))
++ reglist8 |= 0x08;
+
-+ if (saved_reg_mask & (1 << 11))
-+ reglist8 |= 0x10;
++ if (saved_reg_mask & (1 << 11))
++ reglist8 |= 0x10;
+
-+ if (saved_reg_mask & (1 << 12))
-+ reglist8 |= 0x20;
++ if (saved_reg_mask & (1 << 12))
++ reglist8 |= 0x20;
+
-+ if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
-+ {
-+ /* Push LR */
-+ reglist8 |= 0x40;
-+ }
++ if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
++ {
++ /* Push LR */
++ reglist8 |= 0x40;
++ }
+
-+ if (usePUSHM)
-+ {
-+ insn = emit_multi_reg_push (reglist8, TRUE);
-+ }
-+ else
-+ {
-+ insn = emit_multi_reg_push (saved_reg_mask, FALSE);
-+ }
-+ RTX_FRAME_RELATED_P (insn) = 1;
++ if (usePUSHM)
++ {
++ insn = emit_multi_reg_push (reglist8, TRUE);
++ }
++ else
++ {
++ insn = emit_multi_reg_push (saved_reg_mask, FALSE);
++ }
++ RTX_FRAME_RELATED_P (insn) = 1;
+
-+ /* Prevent this instruction from being scheduled after any other
++ /* Prevent this instruction from being scheduled after any other
+ instructions. */
-+ emit_insn (gen_blockage ());
-+ }
++ emit_insn (gen_blockage ());
++ }
+
-+ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
-+ if (saved_fp_reg_mask)
-+ {
-+ insn = emit_multi_fp_reg_push (saved_fp_reg_mask);
-+ RTX_FRAME_RELATED_P (insn) = 1;
++ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
++ if (saved_fp_reg_mask)
++ {
++ insn = emit_multi_fp_reg_push (saved_fp_reg_mask);
++ RTX_FRAME_RELATED_P (insn) = 1;
+
-+ /* Prevent this instruction from being scheduled after any other
++ /* Prevent this instruction from being scheduled after any other
+ instructions. */
-+ emit_insn (gen_blockage ());
-+ }
++ emit_insn (gen_blockage ());
++ }
+
-+ /* Set frame pointer */
-+ if (frame_pointer_needed)
-+ {
-+ insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
-+ RTX_FRAME_RELATED_P (insn) = 1;
-+ }
++ /* Set frame pointer */
++ if (frame_pointer_needed)
++ {
++ insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
++ RTX_FRAME_RELATED_P (insn) = 1;
++ }
+
-+ if (get_frame_size () > 0)
-+ {
-+ if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21"))
-+ {
-+ insn = emit_insn (gen_rtx_SET (SImode,
-+ stack_pointer_rtx,
-+ gen_rtx_PLUS (SImode,
-+ stack_pointer_rtx,
-+ gen_rtx_CONST_INT
-+ (SImode,
-+ -get_frame_size
-+ ()))));
-+ RTX_FRAME_RELATED_P (insn) = 1;
-+ }
-+ else
-+ {
-+ /* Immediate is larger than k21 We must either check if we can use
++ if (get_frame_size () > 0)
++ {
++ if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21"))
++ {
++ insn = emit_insn (gen_rtx_SET (SImode,
++ stack_pointer_rtx,
++ gen_rtx_PLUS (SImode,
++ stack_pointer_rtx,
++ gen_rtx_CONST_INT
++ (SImode,
++ -get_frame_size
++ ()))));
++ RTX_FRAME_RELATED_P (insn) = 1;
++ }
++ else
++ {
++ /* Immediate is larger than k21 We must either check if we can use
+ one of the pushed reegisters as temporary storage or we must
+ make us a temp register by pushing a register to the stack. */
-+ rtx temp_reg, const_pool_entry, insn;
-+ if (saved_reg_mask)
-+ {
-+ temp_reg =
-+ gen_rtx_REG (SImode,
-+ INTERNAL_REGNUM (avr32_get_saved_reg
-+ (saved_reg_mask)));
-+ }
-+ else
-+ {
-+ temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7));
-+ emit_move_insn (gen_rtx_MEM
-+ (SImode,
-+ gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)),
-+ temp_reg);
-+ }
++ rtx temp_reg, const_pool_entry, insn;
++ if (saved_reg_mask)
++ {
++ temp_reg =
++ gen_rtx_REG (SImode,
++ INTERNAL_REGNUM (avr32_get_saved_reg
++ (saved_reg_mask)));
++ }
++ else
++ {
++ temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7));
++ emit_move_insn (gen_rtx_MEM
++ (SImode,
++ gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)),
++ temp_reg);
++ }
+
-+ const_pool_entry =
-+ force_const_mem (SImode,
-+ gen_rtx_CONST_INT (SImode, get_frame_size ()));
-+ emit_move_insn (temp_reg, const_pool_entry);
-+
-+ insn = emit_insn (gen_rtx_SET (SImode,
-+ stack_pointer_rtx,
-+ gen_rtx_MINUS (SImode,
-+ stack_pointer_rtx,
-+ temp_reg)));
-+
-+ dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
-+ gen_rtx_PLUS (SImode, stack_pointer_rtx,
-+ GEN_INT (-get_frame_size ())));
-+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
-+ dwarf, REG_NOTES (insn));
-+ RTX_FRAME_RELATED_P (insn) = 1;
-+
-+ if (!saved_reg_mask)
-+ {
-+ insn =
-+ emit_move_insn (temp_reg,
-+ gen_rtx_MEM (SImode,
-+ gen_rtx_POST_INC (SImode,
-+ gen_rtx_REG
-+ (SImode,
-+ 13))));
-+ }
++ const_pool_entry =
++ force_const_mem (SImode,
++ gen_rtx_CONST_INT (SImode, get_frame_size ()));
++ emit_move_insn (temp_reg, const_pool_entry);
++
++ insn = emit_insn (gen_rtx_SET (SImode,
++ stack_pointer_rtx,
++ gen_rtx_MINUS (SImode,
++ stack_pointer_rtx,
++ temp_reg)));
++
++ dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
++ gen_rtx_PLUS (SImode, stack_pointer_rtx,
++ GEN_INT (-get_frame_size ())));
++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
++ dwarf, REG_NOTES (insn));
++ RTX_FRAME_RELATED_P (insn) = 1;
++
++ if (!saved_reg_mask)
++ {
++ insn =
++ emit_move_insn (temp_reg,
++ gen_rtx_MEM (SImode,
++ gen_rtx_POST_INC (SImode,
++ gen_rtx_REG
++ (SImode,
++ 13))));
++ }
+
-+ /* Mark the temp register as dead */
-+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg,
-+ REG_NOTES (insn));
++ /* Mark the temp register as dead */
++ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg,
++ REG_NOTES (insn));
+
+
-+ }
++ }
+
-+ /* Prevent the the stack adjustment to be scheduled after any
++ /* Prevent the the stack adjustment to be scheduled after any
+ instructions using the frame pointer. */
-+ emit_insn (gen_blockage ());
-+ }
++ emit_insn (gen_blockage ());
++ }
+
-+ /* Load GOT */
-+ if (flag_pic)
-+ {
-+ avr32_load_pic_register ();
++ /* Load GOT */
++ if (flag_pic)
++ {
++ avr32_load_pic_register ();
+
-+ /* gcc does not know that load or call instructions might use the pic
++ /* gcc does not know that load or call instructions might use the pic
+ register so it might schedule these instructions before the loading
+ of the pic register. To avoid this emit a barrier for now. TODO!
+ Find out a better way to let gcc know which instructions might use
+ the pic register. */
-+ emit_insn (gen_blockage ());
-+ }
-+ return;
-+ }
++ emit_insn (gen_blockage ());
++ }
++ return;
++}
+
+void
+avr32_set_return_address (rtx source, rtx scratch)
-+ {
-+ rtx addr;
-+ unsigned long saved_regs;
++{
++ rtx addr;
++ unsigned long saved_regs;
+
-+ saved_regs = avr32_compute_save_reg_mask (TRUE);
++ saved_regs = avr32_compute_save_reg_mask (TRUE);
+
-+ if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM))))
-+ emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
-+ else
-+ {
-+ if (frame_pointer_needed)
-+ addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM);
-+ else
-+ if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks16"))
-+ {
-+ addr = plus_constant (stack_pointer_rtx, get_frame_size ());
-+ }
-+ else
-+ {
-+ emit_insn (gen_movsi (scratch, GEN_INT (get_frame_size ())));
-+ addr = scratch;
-+ }
-+ emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
-+ }
-+ }
++ if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM))))
++ emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
++ else
++ {
++ if (frame_pointer_needed)
++ addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM);
++ else
++ if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks16"))
++ {
++ addr = plus_constant (stack_pointer_rtx, get_frame_size ());
++ }
++ else
++ {
++ emit_insn (gen_movsi (scratch, GEN_INT (get_frame_size ())));
++ addr = scratch;
++ }
++ emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
++ }
++}
+
+
+
+
+int
+avr32_adjust_insn_length (rtx insn ATTRIBUTE_UNUSED,
-+ int length ATTRIBUTE_UNUSED)
-+ {
-+ return length;
-+ }
++ int length ATTRIBUTE_UNUSED)
++{
++ return length;
++}
+
+void
+avr32_output_return_instruction (int single_ret_inst ATTRIBUTE_UNUSED,
-+ int iscond ATTRIBUTE_UNUSED,
-+ rtx cond ATTRIBUTE_UNUSED, rtx r12_imm)
-+ {
-+
-+ unsigned long saved_reg_mask, saved_fp_reg_mask;
-+ int insert_ret = TRUE;
-+ int reglist8 = 0;
-+ int stack_adjustment = get_frame_size ();
-+ unsigned int func_type = avr32_current_func_type ();
-+ FILE *f = asm_out_file;
++ int iscond ATTRIBUTE_UNUSED,
++ rtx cond ATTRIBUTE_UNUSED, rtx r12_imm)
++{
+
-+ /* Naked functions does not have an epilogue */
-+ if (IS_NAKED (func_type))
-+ return;
++ unsigned long saved_reg_mask, saved_fp_reg_mask;
++ int insert_ret = TRUE;
++ int reglist8 = 0;
++ int stack_adjustment = get_frame_size ();
++ unsigned int func_type = avr32_current_func_type ();
++ FILE *f = asm_out_file;
+
-+ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
++ /* Naked functions does not have an epilogue */
++ if (IS_NAKED (func_type))
++ return;
+
-+ saved_reg_mask = avr32_compute_save_reg_mask (FALSE);
++ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
+
-+ /* Reset frame pointer */
-+ if (stack_adjustment > 0)
-+ {
-+ if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21"))
-+ {
-+ fprintf (f, "\tsub\tsp, %i # Reset Frame Pointer\n",
-+ -stack_adjustment);
-+ }
-+ else
-+ {
-+ /* TODO! Is it safe to use r8 as scratch?? */
-+ fprintf (f, "\tmov\tr8, lo(%i) # Reset Frame Pointer\n",
-+ -stack_adjustment);
-+ fprintf (f, "\torh\tr8, hi(%i) # Reset Frame Pointer\n",
-+ -stack_adjustment);
-+ fprintf (f, "\tadd\tsp, r8 # Reset Frame Pointer\n");
-+ }
-+ }
++ saved_reg_mask = avr32_compute_save_reg_mask (FALSE);
+
-+ if (saved_fp_reg_mask)
-+ {
-+ char reglist[64]; /* 64 bytes should be enough... */
-+ avr32_make_fp_reglist_w (saved_fp_reg_mask, (char *) reglist);
-+ fprintf (f, "\tldcm.w\tcp0, sp++, %s\n", reglist);
-+ if (saved_fp_reg_mask & ~0xff)
-+ {
-+ saved_fp_reg_mask &= ~0xff;
-+ avr32_make_fp_reglist_d (saved_fp_reg_mask, (char *) reglist);
-+ fprintf (f, "\tldcm.d\tcp0, sp++, %s\n", reglist);
-+ }
-+ }
++ /* Reset frame pointer */
++ if (stack_adjustment > 0)
++ {
++ if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21"))
++ {
++ fprintf (f, "\tsub\tsp, %i # Reset Frame Pointer\n",
++ -stack_adjustment);
++ }
++ else
++ {
++ /* TODO! Is it safe to use r8 as scratch?? */
++ fprintf (f, "\tmov\tr8, lo(%i) # Reset Frame Pointer\n",
++ -stack_adjustment);
++ fprintf (f, "\torh\tr8, hi(%i) # Reset Frame Pointer\n",
++ -stack_adjustment);
++ fprintf (f, "\tadd\tsp, r8 # Reset Frame Pointer\n");
++ }
++ }
+
-+ if (saved_reg_mask)
-+ {
-+ /* Must pop used registers */
++ if (saved_fp_reg_mask)
++ {
++ char reglist[64]; /* 64 bytes should be enough... */
++ avr32_make_fp_reglist_w (saved_fp_reg_mask, (char *) reglist);
++ fprintf (f, "\tldcm.w\tcp0, sp++, %s\n", reglist);
++ if (saved_fp_reg_mask & ~0xff)
++ {
++ saved_fp_reg_mask &= ~0xff;
++ avr32_make_fp_reglist_d (saved_fp_reg_mask, (char *) reglist);
++ fprintf (f, "\tldcm.d\tcp0, sp++, %s\n", reglist);
++ }
++ }
+
-+ /* Should we use POPM or LDM? */
-+ int usePOPM = TRUE;
-+ if (((saved_reg_mask & (1 << 0)) ||
-+ (saved_reg_mask & (1 << 1)) ||
-+ (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
-+ {
-+ /* One of R0-R3 should at least be popped */
-+ if (((saved_reg_mask & (1 << 0)) &&
-+ (saved_reg_mask & (1 << 1)) &&
-+ (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
-+ {
-+ /* All should be popped */
-+ reglist8 |= 0x01;
-+ }
-+ else
-+ {
-+ usePOPM = FALSE;
-+ }
-+ }
++ if (saved_reg_mask)
++ {
++ /* Must pop used registers */
++
++ /* Should we use POPM or LDM? */
++ int usePOPM = TRUE;
++ if (((saved_reg_mask & (1 << 0)) ||
++ (saved_reg_mask & (1 << 1)) ||
++ (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
++ {
++ /* One of R0-R3 should at least be popped */
++ if (((saved_reg_mask & (1 << 0)) &&
++ (saved_reg_mask & (1 << 1)) &&
++ (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
++ {
++ /* All should be popped */
++ reglist8 |= 0x01;
++ }
++ else
++ {
++ usePOPM = FALSE;
++ }
++ }
+
-+ if (((saved_reg_mask & (1 << 4)) ||
-+ (saved_reg_mask & (1 << 5)) ||
-+ (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
-+ {
-+ /* One of R0-R3 should at least be popped */
-+ if (((saved_reg_mask & (1 << 4)) &&
-+ (saved_reg_mask & (1 << 5)) &&
-+ (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
-+ {
-+ if (usePOPM)
-+ /* All should be popped */
-+ reglist8 |= 0x02;
-+ }
-+ else
-+ {
-+ usePOPM = FALSE;
-+ }
-+ }
++ if (((saved_reg_mask & (1 << 4)) ||
++ (saved_reg_mask & (1 << 5)) ||
++ (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
++ {
++ /* One of R0-R3 should at least be popped */
++ if (((saved_reg_mask & (1 << 4)) &&
++ (saved_reg_mask & (1 << 5)) &&
++ (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
++ {
++ if (usePOPM)
++ /* All should be popped */
++ reglist8 |= 0x02;
++ }
++ else
++ {
++ usePOPM = FALSE;
++ }
++ }
+
-+ if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
-+ {
-+ /* One of R8-R9 should at least be pushed */
-+ if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
-+ {
-+ if (usePOPM)
-+ /* All should be pushed */
-+ reglist8 |= 0x04;
-+ }
-+ else
-+ {
-+ usePOPM = FALSE;
-+ }
-+ }
++ if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
++ {
++ /* One of R8-R9 should at least be pushed */
++ if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
++ {
++ if (usePOPM)
++ /* All should be pushed */
++ reglist8 |= 0x04;
++ }
++ else
++ {
++ usePOPM = FALSE;
++ }
++ }
+
-+ if (saved_reg_mask & (1 << 10))
-+ reglist8 |= 0x08;
++ if (saved_reg_mask & (1 << 10))
++ reglist8 |= 0x08;
+
-+ if (saved_reg_mask & (1 << 11))
-+ reglist8 |= 0x10;
++ if (saved_reg_mask & (1 << 11))
++ reglist8 |= 0x10;
+
-+ if (saved_reg_mask & (1 << 12))
-+ reglist8 |= 0x20;
++ if (saved_reg_mask & (1 << 12))
++ reglist8 |= 0x20;
+
-+ if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
-+ /* Pop LR */
-+ reglist8 |= 0x40;
++ if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
++ /* Pop LR */
++ reglist8 |= 0x40;
+
-+ if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
-+ /* Pop LR into PC. */
-+ reglist8 |= 0x80;
++ if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
++ /* Pop LR into PC. */
++ reglist8 |= 0x80;
+
-+ if (usePOPM)
-+ {
-+ char reglist[64]; /* 64 bytes should be enough... */
-+ avr32_make_reglist8 (reglist8, (char *) reglist);
++ if (usePOPM)
++ {
++ char reglist[64]; /* 64 bytes should be enough... */
++ avr32_make_reglist8 (reglist8, (char *) reglist);
+
-+ if (reglist8 & 0x80)
-+ /* This instruction is also a return */
-+ insert_ret = FALSE;
++ if (reglist8 & 0x80)
++ /* This instruction is also a return */
++ insert_ret = FALSE;
+
-+ if (r12_imm && !insert_ret)
-+ fprintf (f, "\tpopm\t%s, r12=%li\n", reglist, INTVAL (r12_imm));
-+ else
-+ fprintf (f, "\tpopm\t%s\n", reglist);
++ if (r12_imm && !insert_ret)
++ fprintf (f, "\tpopm\t%s, r12=%li\n", reglist, INTVAL (r12_imm));
++ else
++ fprintf (f, "\tpopm\t%s\n", reglist);
+
-+ }
-+ else
-+ {
-+ char reglist[64]; /* 64 bytes should be enough... */
-+ avr32_make_reglist16 (saved_reg_mask, (char *) reglist);
-+ if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
-+ /* This instruction is also a return */
-+ insert_ret = FALSE;
-+
-+ if (r12_imm && !insert_ret)
-+ fprintf (f, "\tldm\tsp++, %s, r12=%li\n", reglist,
-+ INTVAL (r12_imm));
-+ else
-+ fprintf (f, "\tldm\tsp++, %s\n", reglist);
++ }
++ else
++ {
++ char reglist[64]; /* 64 bytes should be enough... */
++ avr32_make_reglist16 (saved_reg_mask, (char *) reglist);
++ if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
++ /* This instruction is also a return */
++ insert_ret = FALSE;
++
++ if (r12_imm && !insert_ret)
++ fprintf (f, "\tldm\tsp++, %s, r12=%li\n", reglist,
++ INTVAL (r12_imm));
++ else
++ fprintf (f, "\tldm\tsp++, %s\n", reglist);
+
-+ }
++ }
+
-+ }
++ }
+
-+ /* Stack adjustment for exception handler. */
-+ if (current_function_calls_eh_return)
-+ fprintf (f, "\tadd\tsp, r%d\n", ASM_REGNUM (EH_RETURN_STACKADJ_REGNO));
++ /* Stack adjustment for exception handler. */
++ if (current_function_calls_eh_return)
++ fprintf (f, "\tadd\tsp, r%d\n", ASM_REGNUM (EH_RETURN_STACKADJ_REGNO));
+
+
-+ if (IS_INTERRUPT (func_type))
-+ {
-+ fprintf (f, "\trete\n");
-+ }
-+ else if (insert_ret)
-+ {
-+ if (r12_imm)
-+ fprintf (f, "\tretal\t%li\n", INTVAL (r12_imm));
-+ else
-+ fprintf (f, "\tretal\tr12\n");
-+ }
-+ }
++ if (IS_INTERRUPT (func_type))
++ {
++ fprintf (f, "\trete\n");
++ }
++ else if (insert_ret)
++ {
++ if (r12_imm)
++ fprintf (f, "\tretal\t%li\n", INTVAL (r12_imm));
++ else
++ fprintf (f, "\tretal\tr12\n");
++ }
++}
+
+/* Function for converting a fp-register mask to a
+ reglistCPD8 register list string. */
+void
+avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string)
-+ {
-+ int i;
++{
++ int i;
+
-+ /* Make sure reglist_string is empty */
-+ reglist_string[0] = '\0';
++ /* Make sure reglist_string is empty */
++ reglist_string[0] = '\0';
+
-+ for (i = 0; i < NUM_FP_REGS; i += 2)
-+ {
-+ if (reglist_mask & (1 << i))
-+ {
-+ strlen (reglist_string) ?
-+ sprintf (reglist_string, "%s, %s-%s", reglist_string,
-+ reg_names[INTERNAL_FP_REGNUM (i)],
-+ reg_names[INTERNAL_FP_REGNUM (i + 1)]) :
-+ sprintf (reglist_string, "%s-%s",
-+ reg_names[INTERNAL_FP_REGNUM (i)],
-+ reg_names[INTERNAL_FP_REGNUM (i + 1)]);
-+ }
-+ }
-+ }
++ for (i = 0; i < NUM_FP_REGS; i += 2)
++ {
++ if (reglist_mask & (1 << i))
++ {
++ strlen (reglist_string) ?
++ sprintf (reglist_string, "%s, %s-%s", reglist_string,
++ reg_names[INTERNAL_FP_REGNUM (i)],
++ reg_names[INTERNAL_FP_REGNUM (i + 1)]) :
++ sprintf (reglist_string, "%s-%s",
++ reg_names[INTERNAL_FP_REGNUM (i)],
++ reg_names[INTERNAL_FP_REGNUM (i + 1)]);
++ }
++ }
++}
+
+/* Function for converting a fp-register mask to a
+ reglistCP8 register list string. */
+void
+avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string)
-+ {
-+ int i;
++{
++ int i;
+
-+ /* Make sure reglist_string is empty */
-+ reglist_string[0] = '\0';
++ /* Make sure reglist_string is empty */
++ reglist_string[0] = '\0';
+
-+ for (i = 0; i < NUM_FP_REGS; ++i)
-+ {
-+ if (reglist_mask & (1 << i))
-+ {
-+ strlen (reglist_string) ?
-+ sprintf (reglist_string, "%s, %s", reglist_string,
-+ reg_names[INTERNAL_FP_REGNUM (i)]) :
-+ sprintf (reglist_string, "%s", reg_names[INTERNAL_FP_REGNUM (i)]);
-+ }
-+ }
-+ }
++ for (i = 0; i < NUM_FP_REGS; ++i)
++ {
++ if (reglist_mask & (1 << i))
++ {
++ strlen (reglist_string) ?
++ sprintf (reglist_string, "%s, %s", reglist_string,
++ reg_names[INTERNAL_FP_REGNUM (i)]) :
++ sprintf (reglist_string, "%s", reg_names[INTERNAL_FP_REGNUM (i)]);
++ }
++ }
++}
+
+void
+avr32_make_reglist16 (int reglist16_vect, char *reglist16_string)
-+ {
-+ int i;
++{
++ int i;
+
-+ /* Make sure reglist16_string is empty */
-+ reglist16_string[0] = '\0';
++ /* Make sure reglist16_string is empty */
++ reglist16_string[0] = '\0';
+
-+ for (i = 0; i < 16; ++i)
-+ {
-+ if (reglist16_vect & (1 << i))
-+ {
-+ strlen (reglist16_string) ?
-+ sprintf (reglist16_string, "%s, %s", reglist16_string,
-+ reg_names[INTERNAL_REGNUM (i)]) :
-+ sprintf (reglist16_string, "%s", reg_names[INTERNAL_REGNUM (i)]);
-+ }
-+ }
-+ }
++ for (i = 0; i < 16; ++i)
++ {
++ if (reglist16_vect & (1 << i))
++ {
++ strlen (reglist16_string) ?
++ sprintf (reglist16_string, "%s, %s", reglist16_string,
++ reg_names[INTERNAL_REGNUM (i)]) :
++ sprintf (reglist16_string, "%s", reg_names[INTERNAL_REGNUM (i)]);
++ }
++ }
++}
+
+int
+avr32_convert_to_reglist16 (int reglist8_vect)
-+ {
-+ int reglist16_vect = 0;
-+ if (reglist8_vect & 0x1)
-+ reglist16_vect |= 0xF;
-+ if (reglist8_vect & 0x2)
-+ reglist16_vect |= 0xF0;
-+ if (reglist8_vect & 0x4)
-+ reglist16_vect |= 0x300;
-+ if (reglist8_vect & 0x8)
-+ reglist16_vect |= 0x400;
-+ if (reglist8_vect & 0x10)
-+ reglist16_vect |= 0x800;
-+ if (reglist8_vect & 0x20)
-+ reglist16_vect |= 0x1000;
-+ if (reglist8_vect & 0x40)
-+ reglist16_vect |= 0x4000;
-+ if (reglist8_vect & 0x80)
-+ reglist16_vect |= 0x8000;
-+
-+ return reglist16_vect;
-+ }
++{
++ int reglist16_vect = 0;
++ if (reglist8_vect & 0x1)
++ reglist16_vect |= 0xF;
++ if (reglist8_vect & 0x2)
++ reglist16_vect |= 0xF0;
++ if (reglist8_vect & 0x4)
++ reglist16_vect |= 0x300;
++ if (reglist8_vect & 0x8)
++ reglist16_vect |= 0x400;
++ if (reglist8_vect & 0x10)
++ reglist16_vect |= 0x800;
++ if (reglist8_vect & 0x20)
++ reglist16_vect |= 0x1000;
++ if (reglist8_vect & 0x40)
++ reglist16_vect |= 0x4000;
++ if (reglist8_vect & 0x80)
++ reglist16_vect |= 0x8000;
++
++ return reglist16_vect;
++}
+
+void
+avr32_make_reglist8 (int reglist8_vect, char *reglist8_string)
-+ {
-+ /* Make sure reglist8_string is empty */
-+ reglist8_string[0] = '\0';
-+
-+ if (reglist8_vect & 0x1)
-+ sprintf (reglist8_string, "r0-r3");
-+ if (reglist8_vect & 0x2)
-+ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r4-r7",
-+ reglist8_string) :
-+ sprintf (reglist8_string, "r4-r7");
-+ if (reglist8_vect & 0x4)
-+ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r8-r9",
-+ reglist8_string) :
-+ sprintf (reglist8_string, "r8-r9");
-+ if (reglist8_vect & 0x8)
-+ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r10",
-+ reglist8_string) :
-+ sprintf (reglist8_string, "r10");
-+ if (reglist8_vect & 0x10)
-+ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r11",
-+ reglist8_string) :
-+ sprintf (reglist8_string, "r11");
-+ if (reglist8_vect & 0x20)
-+ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r12",
-+ reglist8_string) :
-+ sprintf (reglist8_string, "r12");
-+ if (reglist8_vect & 0x40)
-+ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, lr",
-+ reglist8_string) :
-+ sprintf (reglist8_string, "lr");
-+ if (reglist8_vect & 0x80)
-+ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, pc",
-+ reglist8_string) :
-+ sprintf (reglist8_string, "pc");
-+ }
++{
++ /* Make sure reglist8_string is empty */
++ reglist8_string[0] = '\0';
++
++ if (reglist8_vect & 0x1)
++ sprintf (reglist8_string, "r0-r3");
++ if (reglist8_vect & 0x2)
++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r4-r7",
++ reglist8_string) :
++ sprintf (reglist8_string, "r4-r7");
++ if (reglist8_vect & 0x4)
++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r8-r9",
++ reglist8_string) :
++ sprintf (reglist8_string, "r8-r9");
++ if (reglist8_vect & 0x8)
++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r10",
++ reglist8_string) :
++ sprintf (reglist8_string, "r10");
++ if (reglist8_vect & 0x10)
++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r11",
++ reglist8_string) :
++ sprintf (reglist8_string, "r11");
++ if (reglist8_vect & 0x20)
++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r12",
++ reglist8_string) :
++ sprintf (reglist8_string, "r12");
++ if (reglist8_vect & 0x40)
++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, lr",
++ reglist8_string) :
++ sprintf (reglist8_string, "lr");
++ if (reglist8_vect & 0x80)
++ strlen (reglist8_string) ? sprintf (reglist8_string, "%s, pc",
++ reglist8_string) :
++ sprintf (reglist8_string, "pc");
++}
+
+int
+avr32_eh_return_data_regno (int n)
-+ {
-+ if (n >= 0 && n <= 3)
-+ return 8 + n;
-+ else
-+ return INVALID_REGNUM;
-+ }
++{
++ if (n >= 0 && n <= 3)
++ return 8 + n;
++ else
++ return INVALID_REGNUM;
++}
+
+/* Compute the distance from register FROM to register TO.
+ These can be the arg pointer, the frame pointer or
+
+int
+avr32_initial_elimination_offset (int from, int to)
-+ {
-+ int i;
-+ int call_saved_regs = 0;
-+ unsigned long saved_reg_mask, saved_fp_reg_mask;
-+ unsigned int local_vars = get_frame_size ();
++{
++ int i;
++ int call_saved_regs = 0;
++ unsigned long saved_reg_mask, saved_fp_reg_mask;
++ unsigned int local_vars = get_frame_size ();
+
-+ saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
-+ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
++ saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
++ saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
+
-+ for (i = 0; i < 16; ++i)
-+ {
-+ if (saved_reg_mask & (1 << i))
-+ call_saved_regs += 4;
-+ }
++ for (i = 0; i < 16; ++i)
++ {
++ if (saved_reg_mask & (1 << i))
++ call_saved_regs += 4;
++ }
+
-+ for (i = 0; i < NUM_FP_REGS; ++i)
-+ {
-+ if (saved_fp_reg_mask & (1 << i))
-+ call_saved_regs += 4;
-+ }
++ for (i = 0; i < NUM_FP_REGS; ++i)
++ {
++ if (saved_fp_reg_mask & (1 << i))
++ call_saved_regs += 4;
++ }
+
-+ switch (from)
++ switch (from)
+ {
+ case ARG_POINTER_REGNUM:
+ switch (to)
-+ {
-+ case STACK_POINTER_REGNUM:
-+ return call_saved_regs + local_vars;
-+ case FRAME_POINTER_REGNUM:
-+ return call_saved_regs;
-+ default:
-+ abort ();
-+ }
++ {
++ case STACK_POINTER_REGNUM:
++ return call_saved_regs + local_vars;
++ case FRAME_POINTER_REGNUM:
++ return call_saved_regs;
++ default:
++ abort ();
++ }
+ case FRAME_POINTER_REGNUM:
+ switch (to)
-+ {
-+ case STACK_POINTER_REGNUM:
-+ return local_vars;
-+ default:
-+ abort ();
-+ }
++ {
++ case STACK_POINTER_REGNUM:
++ return local_vars;
++ default:
++ abort ();
++ }
+ default:
+ abort ();
+ }
-+ }
++}
+
+
+/*
+ Returns a rtx used when passing the next argument to a function.
+ avr32_init_cumulative_args() and avr32_function_arg_advance() sets witch
+ register to use.
-+ */
++*/
+rtx
+avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
-+ tree type, int named)
-+ {
-+ int index = -1;
++ tree type, int named)
++{
++ int index = -1;
+
-+ HOST_WIDE_INT arg_size, arg_rsize;
-+ if (type)
-+ {
-+ arg_size = int_size_in_bytes (type);
-+ }
-+ else
-+ {
-+ arg_size = GET_MODE_SIZE (mode);
-+ }
-+ arg_rsize = PUSH_ROUNDING (arg_size);
++ HOST_WIDE_INT arg_size, arg_rsize;
++ if (type)
++ {
++ arg_size = int_size_in_bytes (type);
++ }
++ else
++ {
++ arg_size = GET_MODE_SIZE (mode);
++ }
++ arg_rsize = PUSH_ROUNDING (arg_size);
+
-+ /*
++ /*
+ The last time this macro is called, it is called with mode == VOIDmode,
+ and its result is passed to the call or call_value pattern as operands 2
+ and 3 respectively. */
-+ if (mode == VOIDmode)
-+ {
-+ return gen_rtx_CONST_INT (SImode, 22); /* ToDo: fixme. */
-+ }
++ if (mode == VOIDmode)
++ {
++ return gen_rtx_CONST_INT (SImode, 22); /* ToDo: fixme. */
++ }
+
-+ if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named)
-+ {
-+ return NULL_RTX;
-+ }
++ if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named)
++ {
++ return NULL_RTX;
++ }
+
-+ if (arg_rsize == 8)
-+ {
-+ /* use r11:r10 or r9:r8. */
-+ if (!(GET_USED_INDEX (cum, 1) || GET_USED_INDEX (cum, 2)))
-+ index = 1;
-+ else if (!(GET_USED_INDEX (cum, 3) || GET_USED_INDEX (cum, 4)))
-+ index = 3;
-+ else
-+ index = -1;
-+ }
-+ else if (arg_rsize == 4)
-+ { /* Use first available register */
-+ index = 0;
-+ while (index <= LAST_CUM_REG_INDEX && GET_USED_INDEX (cum, index))
-+ index++;
-+ if (index > LAST_CUM_REG_INDEX)
-+ index = -1;
-+ }
++ if (arg_rsize == 8)
++ {
++ /* use r11:r10 or r9:r8. */
++ if (!(GET_USED_INDEX (cum, 1) || GET_USED_INDEX (cum, 2)))
++ index = 1;
++ else if (!(GET_USED_INDEX (cum, 3) || GET_USED_INDEX (cum, 4)))
++ index = 3;
++ else
++ index = -1;
++ }
++ else if (arg_rsize == 4)
++ { /* Use first available register */
++ index = 0;
++ while (index <= LAST_CUM_REG_INDEX && GET_USED_INDEX (cum, index))
++ index++;
++ if (index > LAST_CUM_REG_INDEX)
++ index = -1;
++ }
+
-+ SET_REG_INDEX (cum, index);
++ SET_REG_INDEX (cum, index);
+
-+ if (GET_REG_INDEX (cum) >= 0)
-+ return gen_rtx_REG (mode,
-+ avr32_function_arg_reglist[GET_REG_INDEX (cum)]);
++ if (GET_REG_INDEX (cum) >= 0)
++ return gen_rtx_REG (mode,
++ avr32_function_arg_reglist[GET_REG_INDEX (cum)]);
+
-+ return NULL_RTX;
-+ }
++ return NULL_RTX;
++}
+
+/*
+ Set the register used for passing the first argument to a function.
-+ */
++*/
+void
-+avr32_init_cumulative_args (CUMULATIVE_ARGS * cum, tree fntype,
++avr32_init_cumulative_args (CUMULATIVE_ARGS * cum,
++ tree fntype ATTRIBUTE_UNUSED,
+ rtx libname ATTRIBUTE_UNUSED,
+ tree fndecl ATTRIBUTE_UNUSED)
+ {
+ 0 r1 ||
+ 1 r0 _||_________
+
-+ */
++*/
+void
+avr32_function_arg_advance (CUMULATIVE_ARGS * cum, enum machine_mode mode,
-+ tree type, int named ATTRIBUTE_UNUSED)
-+ {
-+ HOST_WIDE_INT arg_size, arg_rsize;
++ tree type, int named ATTRIBUTE_UNUSED)
++{
++ HOST_WIDE_INT arg_size, arg_rsize;
+
-+ if (type)
-+ {
-+ arg_size = int_size_in_bytes (type);
-+ }
-+ else
-+ {
-+ arg_size = GET_MODE_SIZE (mode);
-+ }
-+ arg_rsize = PUSH_ROUNDING (arg_size);
++ if (type)
++ {
++ arg_size = int_size_in_bytes (type);
++ }
++ else
++ {
++ arg_size = GET_MODE_SIZE (mode);
++ }
++ arg_rsize = PUSH_ROUNDING (arg_size);
+
-+ /* It the argument had to be passed in stack, no register is used. */
-+ if ((*targetm.calls.must_pass_in_stack) (mode, type))
-+ {
-+ cum->stack_pushed_args_size += PUSH_ROUNDING (int_size_in_bytes (type));
-+ return;
-+ }
++ /* It the argument had to be passed in stack, no register is used. */
++ if ((*targetm.calls.must_pass_in_stack) (mode, type))
++ {
++ cum->stack_pushed_args_size += PUSH_ROUNDING (int_size_in_bytes (type));
++ return;
++ }
+
-+ /* Mark the used registers as "used". */
-+ if (GET_REG_INDEX (cum) >= 0)
-+ {
-+ SET_USED_INDEX (cum, GET_REG_INDEX (cum));
-+ if (arg_rsize == 8)
-+ {
-+ SET_USED_INDEX (cum, (GET_REG_INDEX (cum) + 1));
-+ }
-+ }
-+ else
-+ {
-+ /* Had to use stack */
-+ cum->stack_pushed_args_size += arg_rsize;
-+ }
-+ }
++ /* Mark the used registers as "used". */
++ if (GET_REG_INDEX (cum) >= 0)
++ {
++ SET_USED_INDEX (cum, GET_REG_INDEX (cum));
++ if (arg_rsize == 8)
++ {
++ SET_USED_INDEX (cum, (GET_REG_INDEX (cum) + 1));
++ }
++ }
++ else
++ {
++ /* Had to use stack */
++ cum->stack_pushed_args_size += arg_rsize;
++ }
++}
+
+/*
+ Defines witch direction to go to find the next register to use if the
+ size not a multiple of 4. */
+enum direction
+avr32_function_arg_padding (enum machine_mode mode ATTRIBUTE_UNUSED,
-+ tree type)
-+ {
-+ /* Pad upward for all aggregates except byte and halfword sized aggregates
++ tree type)
++{
++ /* Pad upward for all aggregates except byte and halfword sized aggregates
+ which can be passed in registers. */
-+ if (type
-+ && AGGREGATE_TYPE_P (type)
-+ && (int_size_in_bytes (type) != 1)
-+ && !((int_size_in_bytes (type) == 2)
-+ && TYPE_ALIGN_UNIT (type) >= 2)
-+ && (int_size_in_bytes (type) & 0x3))
-+ {
-+ return upward;
-+ }
++ if (type
++ && AGGREGATE_TYPE_P (type)
++ && (int_size_in_bytes (type) != 1)
++ && !((int_size_in_bytes (type) == 2)
++ && TYPE_ALIGN_UNIT (type) >= 2)
++ && (int_size_in_bytes (type) & 0x3))
++ {
++ return upward;
++ }
+
-+ return downward;
-+ }
++ return downward;
++}
+
+/*
+ Return a rtx used for the return value from a function call.
-+ */
++*/
+rtx
+avr32_function_value (tree type, tree func, bool outgoing ATTRIBUTE_UNUSED)
-+ {
-+ if (avr32_return_in_memory (type, func))
-+ return NULL_RTX;
++{
++ if (avr32_return_in_memory (type, func))
++ return NULL_RTX;
+
-+ if (int_size_in_bytes (type) <= 4)
-+ if (avr32_return_in_msb (type))
-+ /* Aggregates of size less than a word which does align the data in the
++ if (int_size_in_bytes (type) <= 4)
++ if (avr32_return_in_msb (type))
++ /* Aggregates of size less than a word which does align the data in the
+ MSB must use SImode for r12. */
-+ return gen_rtx_REG (SImode, RET_REGISTER);
-+ else
-+ return gen_rtx_REG (TYPE_MODE (type), RET_REGISTER);
-+ else if (int_size_in_bytes (type) <= 8)
-+ return gen_rtx_REG (TYPE_MODE (type), INTERNAL_REGNUM (11));
++ return gen_rtx_REG (SImode, RET_REGISTER);
++ else
++ return gen_rtx_REG (TYPE_MODE (type), RET_REGISTER);
++ else if (int_size_in_bytes (type) <= 8)
++ return gen_rtx_REG (TYPE_MODE (type), INTERNAL_REGNUM (11));
+
-+ return NULL_RTX;
-+ }
++ return NULL_RTX;
++}
+
+/*
+ Return a rtx used for the return value from a library function call.
-+ */
++*/
+rtx
+avr32_libcall_value (enum machine_mode mode)
-+ {
++{
+
-+ if (GET_MODE_SIZE (mode) <= 4)
-+ return gen_rtx_REG (mode, RET_REGISTER);
-+ else if (GET_MODE_SIZE (mode) <= 8)
-+ return gen_rtx_REG (mode, INTERNAL_REGNUM (11));
-+ else
-+ return NULL_RTX;
-+ }
++ if (GET_MODE_SIZE (mode) <= 4)
++ return gen_rtx_REG (mode, RET_REGISTER);
++ else if (GET_MODE_SIZE (mode) <= 8)
++ return gen_rtx_REG (mode, INTERNAL_REGNUM (11));
++ else
++ return NULL_RTX;
++}
+
+/* Return TRUE if X references a SYMBOL_REF. */
+int
+symbol_mentioned_p (rtx x)
-+ {
-+ const char *fmt;
-+ int i;
++{
++ const char *fmt;
++ int i;
+
-+ if (GET_CODE (x) == SYMBOL_REF)
-+ return 1;
++ if (GET_CODE (x) == SYMBOL_REF)
++ return 1;
+
-+ fmt = GET_RTX_FORMAT (GET_CODE (x));
++ fmt = GET_RTX_FORMAT (GET_CODE (x));
+
-+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
-+ {
-+ if (fmt[i] == 'E')
-+ {
-+ int j;
++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
++ {
++ if (fmt[i] == 'E')
++ {
++ int j;
+
-+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
-+ if (symbol_mentioned_p (XVECEXP (x, i, j)))
-+ return 1;
-+ }
-+ else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
-+ return 1;
-+ }
++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
++ if (symbol_mentioned_p (XVECEXP (x, i, j)))
++ return 1;
++ }
++ else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
++ return 1;
++ }
+
-+ return 0;
-+ }
++ return 0;
++}
+
+/* Return TRUE if X references a LABEL_REF. */
+int
+label_mentioned_p (rtx x)
-+ {
-+ const char *fmt;
-+ int i;
++{
++ const char *fmt;
++ int i;
+
-+ if (GET_CODE (x) == LABEL_REF)
-+ return 1;
++ if (GET_CODE (x) == LABEL_REF)
++ return 1;
+
-+ fmt = GET_RTX_FORMAT (GET_CODE (x));
-+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
-+ {
-+ if (fmt[i] == 'E')
-+ {
-+ int j;
++ fmt = GET_RTX_FORMAT (GET_CODE (x));
++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
++ {
++ if (fmt[i] == 'E')
++ {
++ int j;
+
-+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
-+ if (label_mentioned_p (XVECEXP (x, i, j)))
-+ return 1;
-+ }
-+ else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
-+ return 1;
-+ }
++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
++ if (label_mentioned_p (XVECEXP (x, i, j)))
++ return 1;
++ }
++ else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
++ return 1;
++ }
+
-+ return 0;
-+ }
++ return 0;
++}
++
++/* Return TRUE if X contains a MEM expression. */
++int
++mem_mentioned_p (rtx x)
++{
++ const char *fmt;
++ int i;
++
++ if (MEM_P (x))
++ return 1;
++
++ fmt = GET_RTX_FORMAT (GET_CODE (x));
++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
++ {
++ if (fmt[i] == 'E')
++ {
++ int j;
++
++ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
++ if (mem_mentioned_p (XVECEXP (x, i, j)))
++ return 1;
++ }
++ else if (fmt[i] == 'e' && mem_mentioned_p (XEXP (x, i)))
++ return 1;
++ }
+
++ return 0;
++}
+
+int
+avr32_legitimate_pic_operand_p (rtx x)
-+ {
++{
+
-+ /* We can't have const, this must be broken down to a symbol. */
-+ if (GET_CODE (x) == CONST)
-+ return FALSE;
++ /* We can't have const, this must be broken down to a symbol. */
++ if (GET_CODE (x) == CONST)
++ return FALSE;
+
-+ /* Can't access symbols or labels via the constant pool either */
-+ if ((GET_CODE (x) == SYMBOL_REF
-+ && CONSTANT_POOL_ADDRESS_P (x)
-+ && (symbol_mentioned_p (get_pool_constant (x))
-+ || label_mentioned_p (get_pool_constant (x)))))
-+ return FALSE;
++ /* Can't access symbols or labels via the constant pool either */
++ if ((GET_CODE (x) == SYMBOL_REF
++ && CONSTANT_POOL_ADDRESS_P (x)
++ && (symbol_mentioned_p (get_pool_constant (x))
++ || label_mentioned_p (get_pool_constant (x)))))
++ return FALSE;
+
-+ return TRUE;
-+ }
++ return TRUE;
++}
+
+
+rtx
+legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
-+ rtx reg)
-+ {
++ rtx reg)
++{
+
-+ if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
-+ {
-+ int subregs = 0;
++ if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
++ {
++ int subregs = 0;
+
-+ if (reg == 0)
-+ {
-+ if (no_new_pseudos)
-+ abort ();
-+ else
-+ reg = gen_reg_rtx (Pmode);
++ if (reg == 0)
++ {
++ if (no_new_pseudos)
++ abort ();
++ else
++ reg = gen_reg_rtx (Pmode);
+
-+ subregs = 1;
-+ }
++ subregs = 1;
++ }
+
-+ emit_move_insn (reg, orig);
++ emit_move_insn (reg, orig);
+
-+ /* Only set current function as using pic offset table if flag_pic is
++ /* Only set current function as using pic offset table if flag_pic is
+ set. This is because this function is also used if
+ TARGET_HAS_ASM_ADDR_PSEUDOS is set. */
-+ if (flag_pic)
-+ current_function_uses_pic_offset_table = 1;
++ if (flag_pic)
++ current_function_uses_pic_offset_table = 1;
+
-+ /* Put a REG_EQUAL note on this insn, so that it can be optimized by
++ /* Put a REG_EQUAL note on this insn, so that it can be optimized by
+ loop. */
-+ return reg;
-+ }
-+ else if (GET_CODE (orig) == CONST)
-+ {
-+ rtx base, offset;
-+
-+ if (flag_pic
-+ && GET_CODE (XEXP (orig, 0)) == PLUS
-+ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
-+ return orig;
-+
-+ if (reg == 0)
-+ {
-+ if (no_new_pseudos)
-+ abort ();
-+ else
-+ reg = gen_reg_rtx (Pmode);
-+ }
++ return reg;
++ }
++ else if (GET_CODE (orig) == CONST)
++ {
++ rtx base, offset;
++
++ if (flag_pic
++ && GET_CODE (XEXP (orig, 0)) == PLUS
++ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
++ return orig;
++
++ if (reg == 0)
++ {
++ if (no_new_pseudos)
++ abort ();
++ else
++ reg = gen_reg_rtx (Pmode);
++ }
+
-+ if (GET_CODE (XEXP (orig, 0)) == PLUS)
-+ {
-+ base =
-+ legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
-+ offset =
-+ legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
-+ base == reg ? 0 : reg);
-+ }
-+ else
-+ abort ();
++ if (GET_CODE (XEXP (orig, 0)) == PLUS)
++ {
++ base =
++ legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
++ offset =
++ legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
++ base == reg ? 0 : reg);
++ }
++ else
++ abort ();
+
-+ if (GET_CODE (offset) == CONST_INT)
-+ {
-+ /* The base register doesn't really matter, we only want to test
++ if (GET_CODE (offset) == CONST_INT)
++ {
++ /* The base register doesn't really matter, we only want to test
+ the index for the appropriate mode. */
-+ if (!avr32_const_ok_for_constraint_p (INTVAL (offset), 'I', "Is21"))
-+ {
-+ if (!no_new_pseudos)
-+ offset = force_reg (Pmode, offset);
-+ else
-+ abort ();
-+ }
++ if (!avr32_const_ok_for_constraint_p (INTVAL (offset), 'I', "Is21"))
++ {
++ if (!no_new_pseudos)
++ offset = force_reg (Pmode, offset);
++ else
++ abort ();
++ }
+
-+ if (GET_CODE (offset) == CONST_INT)
-+ return plus_constant (base, INTVAL (offset));
-+ }
++ if (GET_CODE (offset) == CONST_INT)
++ return plus_constant (base, INTVAL (offset));
++ }
+
-+ return gen_rtx_PLUS (Pmode, base, offset);
-+ }
++ return gen_rtx_PLUS (Pmode, base, offset);
++ }
+
-+ return orig;
-+ }
++ return orig;
++}
+
+/* Generate code to load the PIC register. */
+void
+avr32_load_pic_register (void)
-+ {
-+ rtx l1, pic_tmp;
-+ rtx global_offset_table;
++{
++ rtx l1, pic_tmp;
++ rtx global_offset_table;
+
-+ if ((current_function_uses_pic_offset_table == 0) || TARGET_NO_INIT_GOT)
-+ return;
++ if ((current_function_uses_pic_offset_table == 0) || TARGET_NO_INIT_GOT)
++ return;
+
-+ if (!flag_pic)
-+ abort ();
++ if (!flag_pic)
++ abort ();
+
-+ l1 = gen_label_rtx ();
++ l1 = gen_label_rtx ();
+
-+ global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
-+ pic_tmp =
-+ gen_rtx_CONST (Pmode,
-+ gen_rtx_MINUS (SImode, gen_rtx_LABEL_REF (Pmode, l1),
-+ global_offset_table));
-+ emit_insn (gen_pic_load_addr
-+ (pic_offset_table_rtx, force_const_mem (SImode, pic_tmp)));
-+ emit_insn (gen_pic_compute_got_from_pc (pic_offset_table_rtx, l1));
++ global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
++ pic_tmp =
++ gen_rtx_CONST (Pmode,
++ gen_rtx_MINUS (SImode, gen_rtx_LABEL_REF (Pmode, l1),
++ global_offset_table));
++ emit_insn (gen_pic_load_addr
++ (pic_offset_table_rtx, force_const_mem (SImode, pic_tmp)));
++ emit_insn (gen_pic_compute_got_from_pc (pic_offset_table_rtx, l1));
+
-+ /* Need to emit this whether or not we obey regdecls, since setjmp/longjmp
++ /* Need to emit this whether or not we obey regdecls, since setjmp/longjmp
+ can cause life info to screw up. */
-+ emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
-+ }
++ emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
++}
+
+
+
+ 4-byte register, FUNCTION_VALUE should provide an SImode rtx. */
+bool
+avr32_return_in_msb (tree type ATTRIBUTE_UNUSED)
-+ {
-+ /* if ( AGGREGATE_TYPE_P (type) ) if ((int_size_in_bytes(type) == 1) ||
++{
++ /* if ( AGGREGATE_TYPE_P (type) ) if ((int_size_in_bytes(type) == 1) ||
+ ((int_size_in_bytes(type) == 2) && TYPE_ALIGN_UNIT(type) >= 2)) return
+ false; else return true; */
+
-+ return false;
-+ }
++ return false;
++}
+
+
+/*
+
+ BLKmode and all other modes that is larger than 64 bits are returned in
+ memory.
-+ */
++*/
+bool
+avr32_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
-+ {
-+ if (TYPE_MODE (type) == VOIDmode)
-+ return false;
++{
++ if (TYPE_MODE (type) == VOIDmode)
++ return false;
+
-+ if (int_size_in_bytes (type) > (2 * UNITS_PER_WORD)
-+ || int_size_in_bytes (type) == -1)
-+ {
-+ return true;
-+ }
++ if (int_size_in_bytes (type) > (2 * UNITS_PER_WORD)
++ || int_size_in_bytes (type) == -1)
++ {
++ return true;
++ }
+
-+ /* If we have an aggregate then use the same mechanism as when checking if
++ /* If we have an aggregate then use the same mechanism as when checking if
+ it should be passed on the stack. */
-+ if (type
-+ && AGGREGATE_TYPE_P (type)
-+ && (*targetm.calls.must_pass_in_stack) (TYPE_MODE (type), type))
-+ return true;
++ if (type
++ && AGGREGATE_TYPE_P (type)
++ && (*targetm.calls.must_pass_in_stack) (TYPE_MODE (type), type))
++ return true;
+
-+ return false;
-+ }
++ return false;
++}
+
+
+/* Output the constant part of the trampoline.
+ ; filled in by avr32_initialize_trampoline()
+ .long 0 ; Address to subrutine,
+ ; filled in by avr32_initialize_trampoline()
-+ */
++*/
+void
+avr32_trampoline_template (FILE * file)
-+ {
-+ fprintf (file, "\tlddpc r0, pc[8]\n");
-+ fprintf (file, "\tlddpc pc, pc[8]\n");
-+ /* make room for the address of the static chain. */
-+ fprintf (file, "\t.long\t0\n");
-+ /* make room for the address to the subrutine. */
-+ fprintf (file, "\t.long\t0\n");
-+ }
++{
++ fprintf (file, "\tlddpc r0, pc[8]\n");
++ fprintf (file, "\tlddpc pc, pc[8]\n");
++ /* make room for the address of the static chain. */
++ fprintf (file, "\t.long\t0\n");
++ /* make room for the address to the subrutine. */
++ fprintf (file, "\t.long\t0\n");
++}
+
+
+/*
+ Initialize the variable parts of a trampoline.
-+ */
++*/
+void
+avr32_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
-+ {
-+ /* Store the address to the static chain. */
-+ emit_move_insn (gen_rtx_MEM
-+ (SImode, plus_constant (addr, TRAMPOLINE_SIZE - 4)),
-+ static_chain);
-+
-+ /* Store the address to the function. */
-+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, TRAMPOLINE_SIZE)),
-+ fnaddr);
-+
-+ emit_insn (gen_cache (gen_rtx_REG (SImode, 13),
-+ gen_rtx_CONST_INT (SImode,
-+ AVR32_CACHE_INVALIDATE_ICACHE)));
-+ }
++{
++ /* Store the address to the static chain. */
++ emit_move_insn (gen_rtx_MEM
++ (SImode, plus_constant (addr, TRAMPOLINE_SIZE - 4)),
++ static_chain);
++
++ /* Store the address to the function. */
++ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, TRAMPOLINE_SIZE)),
++ fnaddr);
++
++ emit_insn (gen_cache (gen_rtx_REG (SImode, 13),
++ gen_rtx_CONST_INT (SImode,
++ AVR32_CACHE_INVALIDATE_ICACHE)));
++}
+
+/* Return nonzero if X is valid as an addressing register. */
+int
+avr32_address_register_rtx_p (rtx x, int strict_p)
-+ {
-+ int regno;
++{
++ int regno;
+
-+ if (!register_operand(x, GET_MODE(x)))
-+ return 0;
++ if (!register_operand(x, GET_MODE(x)))
++ return 0;
+
-+ /* If strict we require the register to be a hard register. */
-+ if (strict_p
-+ && !REG_P(x))
-+ return 0;
++ /* If strict we require the register to be a hard register. */
++ if (strict_p
++ && !REG_P(x))
++ return 0;
+
-+ regno = REGNO (x);
++ regno = REGNO (x);
+
-+ if (strict_p)
-+ return REGNO_OK_FOR_BASE_P (regno);
++ if (strict_p)
++ return REGNO_OK_FOR_BASE_P (regno);
+
-+ return (regno <= LAST_REGNUM || regno >= FIRST_PSEUDO_REGISTER);
-+ }
++ return (regno <= LAST_REGNUM || regno >= FIRST_PSEUDO_REGISTER);
++}
+
+/* Return nonzero if INDEX is valid for an address index operand. */
+int
+avr32_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
-+ {
-+ enum rtx_code code = GET_CODE (index);
++{
++ enum rtx_code code = GET_CODE (index);
+
-+ if (GET_MODE_SIZE (mode) > 8)
-+ return 0;
++ if (GET_MODE_SIZE (mode) > 8)
++ return 0;
+
-+ /* Standard coprocessor addressing modes. */
-+ if (code == CONST_INT)
-+ {
-+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
-+ /* Coprocessor mem insns has a smaller reach than ordinary mem insns */
-+ return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ku14");
-+ else
-+ return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ks16");
-+ }
++ /* Standard coprocessor addressing modes. */
++ if (code == CONST_INT)
++ {
++ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
++ /* Coprocessor mem insns has a smaller reach than ordinary mem insns */
++ return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ku14");
++ else
++ return CONST_OK_FOR_CONSTRAINT_P (INTVAL (index), 'K', "Ks16");
++ }
+
-+ if (avr32_address_register_rtx_p (index, strict_p))
-+ return 1;
++ if (avr32_address_register_rtx_p (index, strict_p))
++ return 1;
+
-+ if (code == MULT)
-+ {
-+ rtx xiop0 = XEXP (index, 0);
-+ rtx xiop1 = XEXP (index, 1);
-+ return ((avr32_address_register_rtx_p (xiop0, strict_p)
-+ && power_of_two_operand (xiop1, SImode)
-+ && (INTVAL (xiop1) <= 8))
-+ || (avr32_address_register_rtx_p (xiop1, strict_p)
-+ && power_of_two_operand (xiop0, SImode)
-+ && (INTVAL (xiop0) <= 8)));
-+ }
-+ else if (code == ASHIFT)
-+ {
-+ rtx op = XEXP (index, 1);
++ if (code == MULT)
++ {
++ rtx xiop0 = XEXP (index, 0);
++ rtx xiop1 = XEXP (index, 1);
++ return ((avr32_address_register_rtx_p (xiop0, strict_p)
++ && power_of_two_operand (xiop1, SImode)
++ && (INTVAL (xiop1) <= 8))
++ || (avr32_address_register_rtx_p (xiop1, strict_p)
++ && power_of_two_operand (xiop0, SImode)
++ && (INTVAL (xiop0) <= 8)));
++ }
++ else if (code == ASHIFT)
++ {
++ rtx op = XEXP (index, 1);
+
-+ return (avr32_address_register_rtx_p (XEXP (index, 0), strict_p)
-+ && GET_CODE (op) == CONST_INT
-+ && INTVAL (op) > 0 && INTVAL (op) <= 3);
-+ }
++ return (avr32_address_register_rtx_p (XEXP (index, 0), strict_p)
++ && GET_CODE (op) == CONST_INT
++ && INTVAL (op) > 0 && INTVAL (op) <= 3);
++ }
+
-+ return 0;
-+ }
++ return 0;
++}
+
+/*
+ Used in the GO_IF_LEGITIMATE_ADDRESS macro. Returns a nonzero value if
+
+ Returns NO_REGS if the address is not legatime, GENERAL_REGS or ALL_REGS
+ if it is.
-+ */
++*/
+
+/* Forward declaration*/
+int is_minipool_label (rtx label);
+
+int
+avr32_legitimate_address (enum machine_mode mode, rtx x, int strict)
-+ {
++{
+
-+ switch (GET_CODE (x))
++ switch (GET_CODE (x))
+ {
+ case REG:
+ return avr32_address_register_rtx_p (x, strict);
++ case CONST_INT:
++ return ((mode==SImode)
++ && CONST_OK_FOR_CONSTRAINT_P(INTVAL(x), 'K', "Ks17"));
+ case CONST:
-+ {
-+ rtx label = avr32_find_symbol (x);
-+ if (label
-+ &&
-+ ((CONSTANT_POOL_ADDRESS_P (label)
-+ && !(flag_pic
-+ && (symbol_mentioned_p (get_pool_constant (label))
-+ || label_mentioned_p (get_pool_constant (label)))))
-+ /* TODO! Can this ever happen??? */
-+ || ((GET_CODE (label) == LABEL_REF)
-+ && GET_CODE (XEXP (label, 0)) == CODE_LABEL
-+ && is_minipool_label (XEXP (label, 0)))))
-+ {
-+ return TRUE;
-+ }
-+ }
-+ break;
++ {
++ rtx label = avr32_find_symbol (x);
++ if (label
++ &&
++ ((CONSTANT_POOL_ADDRESS_P (label)
++ && !(flag_pic
++ && (symbol_mentioned_p (get_pool_constant (label))
++ || label_mentioned_p (get_pool_constant (label)))))
++ /* TODO! Can this ever happen??? */
++ || ((GET_CODE (label) == LABEL_REF)
++ && GET_CODE (XEXP (label, 0)) == CODE_LABEL
++ && is_minipool_label (XEXP (label, 0)))
++ /*|| ((GET_CODE (label) == SYMBOL_REF)
++ && mode == SImode
++ && SYMBOL_REF_RMW_ADDR(label))*/))
++ {
++ return TRUE;
++ }
++ }
++ break;
+ case LABEL_REF:
+ if (GET_CODE (XEXP (x, 0)) == CODE_LABEL
-+ && is_minipool_label (XEXP (x, 0)))
-+ {
-+ return TRUE;
-+ }
++ && is_minipool_label (XEXP (x, 0)))
++ {
++ return TRUE;
++ }
+ break;
+ case SYMBOL_REF:
-+ {
-+ if (CONSTANT_POOL_ADDRESS_P (x)
-+ && !(flag_pic
-+ && (symbol_mentioned_p (get_pool_constant (x))
-+ || label_mentioned_p (get_pool_constant (x)))))
-+ return TRUE;
-+ /*
-+ A symbol_ref is only legal if it is a function. If all of them are
-+ legal, a pseudo reg that is a constant will be replaced by a
-+ symbol_ref and make illegale code. SYMBOL_REF_FLAG is set by
-+ ENCODE_SECTION_INFO. */
-+ else if (SYMBOL_REF_RCALL_FUNCTION_P (x))
-+ return TRUE;
-+ break;
-+ }
++ {
++ if (CONSTANT_POOL_ADDRESS_P (x)
++ && !(flag_pic
++ && (symbol_mentioned_p (get_pool_constant (x))
++ || label_mentioned_p (get_pool_constant (x)))))
++ return TRUE;
++ else if (SYMBOL_REF_RCALL_FUNCTION_P (x)
++ || (mode == SImode
++ && SYMBOL_REF_RMW_ADDR (x)))
++ return TRUE;
++ break;
++ }
+ case PRE_DEC: /* (pre_dec (...)) */
+ case POST_INC: /* (post_inc (...)) */
+ return avr32_address_register_rtx_p (XEXP (x, 0), strict);
+ case PLUS: /* (plus (...) (...)) */
-+ {
-+ rtx xop0 = XEXP (x, 0);
-+ rtx xop1 = XEXP (x, 1);
++ {
++ rtx xop0 = XEXP (x, 0);
++ rtx xop1 = XEXP (x, 1);
+
-+ return ((avr32_address_register_rtx_p (xop0, strict)
-+ && avr32_legitimate_index_p (mode, xop1, strict))
-+ || (avr32_address_register_rtx_p (xop1, strict)
-+ && avr32_legitimate_index_p (mode, xop0, strict)));
-+ }
++ return ((avr32_address_register_rtx_p (xop0, strict)
++ && avr32_legitimate_index_p (mode, xop1, strict))
++ || (avr32_address_register_rtx_p (xop1, strict)
++ && avr32_legitimate_index_p (mode, xop0, strict)));
++ }
+ default:
+ break;
+ }
+
-+ return FALSE;
-+ }
++ return FALSE;
++}
+
+
+int
++avr32_const_ok_for_move (HOST_WIDE_INT c)
++{
++ if ( TARGET_V2_INSNS )
++ return ( avr32_const_ok_for_constraint_p (c, 'K', "Ks21")
++ /* movh instruction */
++ || avr32_hi16_immediate_operand (GEN_INT(c), VOIDmode) );
++ else
++ return avr32_const_ok_for_constraint_p (c, 'K', "Ks21");
++}
++
++int
+avr32_const_double_immediate (rtx value)
-+ {
-+ HOST_WIDE_INT hi, lo;
++{
++ HOST_WIDE_INT hi, lo;
+
-+ if (GET_CODE (value) != CONST_DOUBLE)
-+ return FALSE;
++ if (GET_CODE (value) != CONST_DOUBLE)
++ return FALSE;
+
-+ if (SCALAR_FLOAT_MODE_P (GET_MODE (value)))
-+ {
-+ HOST_WIDE_INT target_float[2];
-+ hi = lo = 0;
-+ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (value),
-+ GET_MODE (value));
-+ lo = target_float[0];
-+ hi = target_float[1];
-+ }
-+ else
-+ {
-+ hi = CONST_DOUBLE_HIGH (value);
-+ lo = CONST_DOUBLE_LOW (value);
-+ }
++ if (SCALAR_FLOAT_MODE_P (GET_MODE (value)))
++ {
++ HOST_WIDE_INT target_float[2];
++ hi = lo = 0;
++ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (value),
++ GET_MODE (value));
++ lo = target_float[0];
++ hi = target_float[1];
++ }
++ else
++ {
++ hi = CONST_DOUBLE_HIGH (value);
++ lo = CONST_DOUBLE_LOW (value);
++ }
+
-+ if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21")
-+ && (GET_MODE (value) == SFmode
-+ || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21")))
-+ {
-+ return TRUE;
-+ }
++ if (avr32_const_ok_for_constraint_p (lo, 'K', "Ks21")
++ && (GET_MODE (value) == SFmode
++ || avr32_const_ok_for_constraint_p (hi, 'K', "Ks21")))
++ {
++ return TRUE;
++ }
+
-+ return FALSE;
-+ }
++ return FALSE;
++}
+
+
+int
+avr32_legitimate_constant_p (rtx x)
-+ {
-+ switch (GET_CODE (x))
++{
++ switch (GET_CODE (x))
+ {
+ case CONST_INT:
+ /* Check if we should put large immediate into constant pool
+ if (!avr32_imm_in_const_pool)
+ return 1;
+
-+ return avr32_const_ok_for_constraint_p (INTVAL (x), 'K', "Ks21");
++ return avr32_const_ok_for_move (INTVAL (x));
+ case CONST_DOUBLE:
+ /* Check if we should put large immediate into constant pool
+ or load them directly with mov/orh.*/
+ return 1;
+
+ if (GET_MODE (x) == SFmode
-+ || GET_MODE (x) == DFmode || GET_MODE (x) == DImode)
-+ return avr32_const_double_immediate (x);
++ || GET_MODE (x) == DFmode || GET_MODE (x) == DImode)
++ return avr32_const_double_immediate (x);
+ else
-+ return 0;
++ return 0;
+ case LABEL_REF:
-+ return flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS;
+ case SYMBOL_REF:
-+ return flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS;
++ return avr32_find_symbol (x) && (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS);
+ case CONST:
+ case HIGH:
+ case CONST_VECTOR:
+ debug_rtx (x);
+ return 1;
+ }
-+ }
++}
+
+
+/* Strip any special encoding from labels */
+ while (1)
+ {
+ switch (stripped[0])
-+ {
-+ case '#':
-+ stripped = strchr (name + 1, '#') + 1;
-+ break;
-+ case '*':
-+ stripped = &stripped[1];
-+ break;
-+ default:
-+ return stripped;
-+ }
++ {
++ case '#':
++ stripped = strchr (name + 1, '#') + 1;
++ break;
++ case '*':
++ stripped = &stripped[1];
++ break;
++ default:
++ return stripped;
++ }
+ }
+}
+
+
+ machine->minipool_label_head = 0;
+ machine->minipool_label_tail = 0;
++ machine->ifcvt_after_reload = 0;
+ return machine;
+}
+
+void
+avr32_init_expanders (void)
-+ {
-+ /* Arrange to initialize and mark the machine per-function status. */
-+ init_machine_status = avr32_init_machine_status;
-+ }
++{
++ /* Arrange to initialize and mark the machine per-function status. */
++ init_machine_status = avr32_init_machine_status;
++}
+
+
+/* Return an RTX indicating where the return address to the
+
+rtx
+avr32_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
-+ {
-+ if (count != 0)
-+ return NULL_RTX;
++{
++ if (count != 0)
++ return NULL_RTX;
+
-+ return get_hard_reg_initial_val (Pmode, LR_REGNUM);
-+ }
++ return get_hard_reg_initial_val (Pmode, LR_REGNUM);
++}
+
+
+void
+avr32_encode_section_info (tree decl, rtx rtl, int first)
-+ {
++{
++ default_encode_section_info(decl, rtl, first);
+
-+ if (first && DECL_P (decl))
-+ {
-+ /* Set SYMBOL_REG_FLAG for local functions */
-+ if (!TREE_PUBLIC (decl) && TREE_CODE (decl) == FUNCTION_DECL)
-+ {
-+ if ((*targetm.binds_local_p) (decl))
-+ {
-+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
-+ }
-+ }
-+ }
++ if ( TREE_CODE (decl) == VAR_DECL
++ && (GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
++ && (lookup_attribute ("rmw_addressable", DECL_ATTRIBUTES (decl))
++ || TARGET_RMW_ADDRESSABLE_DATA) ){
++ if ( !TARGET_RMW || flag_pic )
++ return;
++ SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT);
+ }
++}
++
++void
++avr32_asm_output_label (FILE * stream, const char *name)
++{
++ name = avr32_strip_name_encoding (name);
++
++ /* Print the label. */
++ assemble_name (stream, name);
++ fprintf (stream, ":\n");
++}
++
+
+
+void
-+avr32_asm_output_ascii (FILE * stream, char *ptr, int len)
-+ {
-+ int i, i_new = 0;
-+ char *new_ptr = xmalloc (4 * len);
-+ if (new_ptr == NULL)
-+ internal_error ("Out of memory.");
-+
-+ for (i = 0; i < len; i++)
-+ {
-+ if (ptr[i] == '\n')
-+ {
-+ new_ptr[i_new++] = '\\';
-+ new_ptr[i_new++] = '0';
-+ new_ptr[i_new++] = '1';
-+ new_ptr[i_new++] = '2';
-+ }
-+ else if (ptr[i] == '\"')
-+ {
-+ new_ptr[i_new++] = '\\';
-+ new_ptr[i_new++] = '\"';
-+ }
-+ else if (ptr[i] == '\\')
-+ {
-+ new_ptr[i_new++] = '\\';
-+ new_ptr[i_new++] = '\\';
-+ }
-+ else if (ptr[i] == '\0' && i + 1 < len)
-+ {
-+ new_ptr[i_new++] = '\\';
-+ new_ptr[i_new++] = '0';
-+ }
-+ else
-+ {
-+ new_ptr[i_new++] = ptr[i];
-+ }
-+ }
-+
-+ /* Terminate new_ptr. */
-+ new_ptr[i_new] = '\0';
-+ fprintf (stream, "\t.ascii\t\"%s\"\n", new_ptr);
-+ free (new_ptr);
-+ }
-+
-+
-+void
-+avr32_asm_output_label (FILE * stream, const char *name)
-+ {
-+ name = avr32_strip_name_encoding (name);
-+
-+ /* Print the label. */
-+ assemble_name (stream, name);
-+ fprintf (stream, ":\n");
-+ }
-+
-+
-+
-+void
-+avr32_asm_weaken_label (FILE * stream, const char *name)
-+ {
-+ fprintf (stream, "\t.weak ");
-+ assemble_name (stream, name);
-+ fprintf (stream, "\n");
-+ }
++avr32_asm_weaken_label (FILE * stream, const char *name)
++{
++ fprintf (stream, "\t.weak ");
++ assemble_name (stream, name);
++ fprintf (stream, "\n");
++}
+
+/*
+ Checks if a labelref is equal to a reserved word in the assembler. If it is,
+ insert a '_' before the label name.
-+ */
++*/
+void
+avr32_asm_output_labelref (FILE * stream, const char *name)
-+ {
-+ int verbatim = FALSE;
-+ const char *stripped = name;
-+ int strip_finished = FALSE;
++{
++ int verbatim = FALSE;
++ const char *stripped = name;
++ int strip_finished = FALSE;
+
-+ while (!strip_finished)
-+ {
-+ switch (stripped[0])
-+ {
-+ case '#':
-+ stripped = strchr (name + 1, '#') + 1;
-+ break;
-+ case '*':
-+ stripped = &stripped[1];
-+ verbatim = TRUE;
-+ break;
-+ default:
-+ strip_finished = TRUE;
-+ break;
-+ }
-+ }
++ while (!strip_finished)
++ {
++ switch (stripped[0])
++ {
++ case '#':
++ stripped = strchr (name + 1, '#') + 1;
++ break;
++ case '*':
++ stripped = &stripped[1];
++ verbatim = TRUE;
++ break;
++ default:
++ strip_finished = TRUE;
++ break;
++ }
++ }
+
-+ if (verbatim)
-+ fputs (stripped, stream);
-+ else
-+ asm_fprintf (stream, "%U%s", stripped);
-+ }
++ if (verbatim)
++ fputs (stripped, stream);
++ else
++ asm_fprintf (stream, "%U%s", stripped);
++}
+
+
+
+ Returns NULL_RTX if the compare is not redundant
+ or the new condition to use in the conditional
+ instruction if the compare is redundant.
-+ */
++*/
+static rtx
+is_compare_redundant (rtx compare_exp, rtx next_cond)
-+ {
-+ int z_flag_valid = FALSE;
-+ int n_flag_valid = FALSE;
-+ rtx new_cond;
++{
++ int z_flag_valid = FALSE;
++ int n_flag_valid = FALSE;
++ rtx new_cond;
+
-+ if (GET_CODE (compare_exp) != COMPARE)
-+ return NULL_RTX;
++ if (GET_CODE (compare_exp) != COMPARE
++ && GET_CODE (compare_exp) != AND)
++ return NULL_RTX;
+
+
-+ if (rtx_equal_p (cc_prev_status.mdep.value, compare_exp))
-+ {
-+ /* cc0 already contains the correct comparison -> delete cmp insn */
-+ return next_cond;
-+ }
++ if (rtx_equal_p (cc_prev_status.mdep.value, compare_exp))
++ {
++ /* cc0 already contains the correct comparison -> delete cmp insn */
++ return next_cond;
++ }
+
-+ if (GET_MODE (compare_exp) != SImode)
-+ return NULL_RTX;
++ if (GET_MODE (compare_exp) != SImode)
++ return NULL_RTX;
+
-+ switch (cc_prev_status.mdep.flags)
++ switch (cc_prev_status.mdep.flags)
+ {
+ case CC_SET_VNCZ:
+ case CC_SET_NCZ:
+ z_flag_valid = TRUE;
+ }
+
-+ if (cc_prev_status.mdep.value
-+ && REG_P (XEXP (compare_exp, 0))
-+ && REGNO (XEXP (compare_exp, 0)) == REGNO (cc_prev_status.mdep.value)
-+ && GET_CODE (XEXP (compare_exp, 1)) == CONST_INT
-+ && next_cond != NULL_RTX)
-+ {
-+ if (INTVAL (XEXP (compare_exp, 1)) == 0
-+ && z_flag_valid
-+ && (GET_CODE (next_cond) == EQ || GET_CODE (next_cond) == NE))
-+ /* We can skip comparison Z flag is already reflecting ops[0] */
-+ return next_cond;
-+ else if (n_flag_valid
-+ && ((INTVAL (XEXP (compare_exp, 1)) == 0
-+ && (GET_CODE (next_cond) == GE
-+ || GET_CODE (next_cond) == LT))
-+ || (INTVAL (XEXP (compare_exp, 1)) == -1
-+ && (GET_CODE (next_cond) == GT
-+ || GET_CODE (next_cond) == LE))))
-+ {
-+ /* We can skip comparison N flag is already reflecting ops[0],
++ if (cc_prev_status.mdep.value
++ && GET_CODE (compare_exp) == COMPARE
++ && REG_P (XEXP (compare_exp, 0))
++ && REGNO (XEXP (compare_exp, 0)) == REGNO (cc_prev_status.mdep.value)
++ && GET_CODE (XEXP (compare_exp, 1)) == CONST_INT
++ && next_cond != NULL_RTX)
++ {
++ if (INTVAL (XEXP (compare_exp, 1)) == 0
++ && z_flag_valid
++ && (GET_CODE (next_cond) == EQ || GET_CODE (next_cond) == NE))
++ /* We can skip comparison Z flag is already reflecting ops[0] */
++ return next_cond;
++ else if (n_flag_valid
++ && ((INTVAL (XEXP (compare_exp, 1)) == 0
++ && (GET_CODE (next_cond) == GE
++ || GET_CODE (next_cond) == LT))
++ || (INTVAL (XEXP (compare_exp, 1)) == -1
++ && (GET_CODE (next_cond) == GT
++ || GET_CODE (next_cond) == LE))))
++ {
++ /* We can skip comparison N flag is already reflecting ops[0],
+ which means that we can use the mi/pl conditions to check if
+ ops[0] is GE or LT 0. */
-+ if ((GET_CODE (next_cond) == GE) || (GET_CODE (next_cond) == GT))
-+ new_cond =
-+ gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
-+ UNSPEC_COND_PL);
-+ else
-+ new_cond =
-+ gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
-+ UNSPEC_COND_MI);
-+ return new_cond;
-+ }
-+ }
-+ return NULL_RTX;
-+ }
++ if ((GET_CODE (next_cond) == GE) || (GET_CODE (next_cond) == GT))
++ new_cond =
++ gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
++ UNSPEC_COND_PL);
++ else
++ new_cond =
++ gen_rtx_UNSPEC (GET_MODE (next_cond), gen_rtvec (2, cc0_rtx, const0_rtx),
++ UNSPEC_COND_MI);
++ return new_cond;
++ }
++ }
++ return NULL_RTX;
++}
+
+/* Updates cc_status. */
+void
+avr32_notice_update_cc (rtx exp, rtx insn)
-+ {
-+ switch (get_attr_cc (insn))
++{
++ enum attr_cc attr_cc = get_attr_cc (insn);
++
++ if ( attr_cc == CC_SET_Z_IF_NOT_V2 )
++ if (TARGET_V2_INSNS)
++ attr_cc = CC_NONE;
++ else
++ attr_cc = CC_SET_Z;
++
++ switch (attr_cc)
+ {
+ case CC_CALL_SET:
+ CC_STATUS_INIT;
+ FPCC_STATUS_INIT;
+ /* Check if the function call returns a value in r12 */
+ if (REG_P (recog_data.operand[0])
-+ && REGNO (recog_data.operand[0]) == RETVAL_REGNUM)
-+ {
-+ cc_status.flags = 0;
-+ cc_status.mdep.value =
-+ gen_rtx_COMPARE (SImode, recog_data.operand[0], const0_rtx);
-+ cc_status.mdep.flags = CC_SET_VNCZ;
++ && REGNO (recog_data.operand[0]) == RETVAL_REGNUM)
++ {
++ cc_status.flags = 0;
++ cc_status.mdep.value =
++ gen_rtx_COMPARE (SImode, recog_data.operand[0], const0_rtx);
++ cc_status.mdep.flags = CC_SET_VNCZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
+
-+ }
++ }
+ break;
+ case CC_COMPARE:
-+ /* Check that compare will not be optimized away if so nothing should
-+ be done */
-+ if (is_compare_redundant (SET_SRC (exp), get_next_insn_cond (insn)) ==
-+ NULL_RTX)
-+ {
++ {
++ /* Check that compare will not be optimized away if so nothing should
++ be done */
++ rtx compare_exp = SET_SRC (exp);
++ /* Check if we have a tst expression. If so convert it to a
++ compare with 0. */
++ if ( REG_P (SET_SRC (exp)) )
++ compare_exp = gen_rtx_COMPARE (GET_MODE (SET_SRC (exp)),
++ SET_SRC (exp),
++ const0_rtx);
++
++ if (is_compare_redundant (compare_exp, get_next_insn_cond (insn)) ==
++ NULL_RTX)
++ {
+
-+ /* Reset the nonstandard flag */
-+ CC_STATUS_INIT;
-+ cc_status.flags = 0;
-+ cc_status.mdep.value = SET_SRC (exp);
-+ cc_status.mdep.flags = CC_SET_VNCZ;
-+ }
++ /* Reset the nonstandard flag */
++ CC_STATUS_INIT;
++ cc_status.flags = 0;
++ cc_status.mdep.value = compare_exp;
++ cc_status.mdep.flags = CC_SET_VNCZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
++ }
++ }
+ break;
+ case CC_CMP_COND_INSN:
-+ {
-+ /* Conditional insn that emit the compare itself. */
-+ rtx cmp = gen_rtx_COMPARE (GET_MODE (recog_data.operand[4]),
-+ recog_data.operand[4],
-+ recog_data.operand[5]);
++ {
++ /* Conditional insn that emit the compare itself. */
++ rtx cmp;
++ rtx cmp_op0, cmp_op1;
++ rtx cond;
++ rtx dest;
++ rtx next_insn = next_nonnote_insn (insn);
++
++ if ( GET_CODE (exp) == COND_EXEC )
++ {
++ cmp_op0 = XEXP (COND_EXEC_TEST (exp), 0);
++ cmp_op1 = XEXP (COND_EXEC_TEST (exp), 1);
++ cond = COND_EXEC_TEST (exp);
++ dest = SET_DEST (COND_EXEC_CODE (exp));
++ }
++ else
++ {
++ /* If then else conditional. compare operands are in operands
++ 4 and 5. */
++ cmp_op0 = recog_data.operand[4];
++ cmp_op1 = recog_data.operand[5];
++ cond = recog_data.operand[1];
++ dest = SET_DEST (exp);
++ }
+
-+ if (is_compare_redundant (cmp, recog_data.operand[1]) == NULL_RTX)
-+ {
++ if ( GET_CODE (cmp_op0) == AND )
++ cmp = cmp_op0;
++ else
++ cmp = gen_rtx_COMPARE (GET_MODE (cmp_op0),
++ cmp_op0,
++ cmp_op1);
++
++ /* Check if the conditional insns updates a register present
++ in the comparison, if so then we must reset the cc_status. */
++ if (REG_P (dest)
++ && (reg_mentioned_p (dest, cmp_op0)
++ || reg_mentioned_p (dest, cmp_op1))
++ && GET_CODE (exp) != COND_EXEC )
++ {
++ CC_STATUS_INIT;
++ }
++ else if (is_compare_redundant (cmp, cond) == NULL_RTX)
++ {
++ /* Reset the nonstandard flag */
++ CC_STATUS_INIT;
++ if ( GET_CODE (cmp_op0) == AND )
++ {
++ cc_status.flags = CC_INVERTED;
++ cc_status.mdep.flags = CC_SET_Z;
++ }
++ else
++ {
++ cc_status.flags = 0;
++ cc_status.mdep.flags = CC_SET_VNCZ;
++ }
++ cc_status.mdep.value = cmp;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
++ }
+
-+ /* Reset the nonstandard flag */
-+ CC_STATUS_INIT;
-+ cc_status.flags = 0;
-+ cc_status.mdep.value = cmp;
-+ cc_status.mdep.flags = CC_SET_VNCZ;
-+ }
-+ }
-+ break;
++
++ /* Check if we have a COND_EXEC insn which updates one
++ of the registers in the compare status. */
++ if (REG_P (dest)
++ && (reg_mentioned_p (dest, cmp_op0)
++ || reg_mentioned_p (dest, cmp_op1))
++ && GET_CODE (exp) == COND_EXEC )
++ cc_status.mdep.cond_exec_cmp_clobbered = 1;
++
++ if ( cc_status.mdep.cond_exec_cmp_clobbered
++ && GET_CODE (exp) == COND_EXEC
++ && next_insn != NULL
++ && INSN_P (next_insn)
++ && !(GET_CODE (PATTERN (next_insn)) == COND_EXEC
++ && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 0), cmp_op0)
++ && rtx_equal_p (XEXP (COND_EXEC_TEST (PATTERN (next_insn)), 1), cmp_op1)
++ && (GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == GET_CODE (cond)
++ || GET_CODE (COND_EXEC_TEST (PATTERN (next_insn))) == reverse_condition (GET_CODE (cond)))) )
++ {
++ /* We have a sequence of conditional insns where the compare status has been clobbered
++ since the compare no longer reflects the content of the values to compare. */
++ CC_STATUS_INIT;
++ cc_status.mdep.cond_exec_cmp_clobbered = 1;
++ }
++
++ }
++ break;
+ case CC_FPCOMPARE:
+ /* Check that floating-point compare will not be optimized away if so
+ nothing should be done */
+ if (!rtx_equal_p (cc_prev_status.mdep.fpvalue, SET_SRC (exp)))
-+ {
-+ /* cc0 already contains the correct comparison -> delete cmp insn */
-+ /* Reset the nonstandard flag */
-+ cc_status.mdep.fpvalue = SET_SRC (exp);
-+ cc_status.mdep.fpflags = CC_SET_CZ;
-+ }
++ {
++ /* cc0 already contains the correct comparison -> delete cmp insn */
++ /* Reset the nonstandard flag */
++ cc_status.mdep.fpvalue = SET_SRC (exp);
++ cc_status.mdep.fpflags = CC_SET_CZ;
++ }
+ break;
+ case CC_FROM_FPCC:
+ /* Flags are updated with flags from Floating-point coprocessor, set
+ cc_status.flags = CC_INVERTED;
+ cc_status.mdep.value = SET_SRC (exp);
+ cc_status.mdep.flags = CC_SET_Z;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
+ break;
+ case CC_NONE:
+ /* Insn does not affect CC at all. Check if the instruction updates
+ some of the register currently reflected in cc0 */
+
+ if ((GET_CODE (exp) == SET)
-+ && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value)
-+ && (reg_mentioned_p (SET_DEST (exp), cc_status.value1)
-+ || reg_mentioned_p (SET_DEST (exp), cc_status.value2)
-+ || reg_mentioned_p (SET_DEST (exp), cc_status.mdep.value)))
-+ {
-+ CC_STATUS_INIT;
-+ }
++ && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value)
++ && (reg_mentioned_p (SET_DEST (exp), cc_status.value1)
++ || reg_mentioned_p (SET_DEST (exp), cc_status.value2)
++ || reg_mentioned_p (SET_DEST (exp), cc_status.mdep.value)))
++ {
++ CC_STATUS_INIT;
++ }
+
+ /* If this is a parallel we must step through each of the parallel
+ expressions */
+ if (GET_CODE (exp) == PARALLEL)
-+ {
-+ int i;
-+ for (i = 0; i < XVECLEN (exp, 0); ++i)
-+ {
-+ rtx vec_exp = XVECEXP (exp, 0, i);
-+ if ((GET_CODE (vec_exp) == SET)
-+ && (cc_status.value1 || cc_status.value2
-+ || cc_status.mdep.value)
-+ && (reg_mentioned_p (SET_DEST (vec_exp), cc_status.value1)
-+ || reg_mentioned_p (SET_DEST (vec_exp),
-+ cc_status.value2)
-+ || reg_mentioned_p (SET_DEST (vec_exp),
-+ cc_status.mdep.value)))
-+ {
-+ CC_STATUS_INIT;
-+ }
-+ }
-+ }
++ {
++ int i;
++ for (i = 0; i < XVECLEN (exp, 0); ++i)
++ {
++ rtx vec_exp = XVECEXP (exp, 0, i);
++ if ((GET_CODE (vec_exp) == SET)
++ && (cc_status.value1 || cc_status.value2
++ || cc_status.mdep.value)
++ && (reg_mentioned_p (SET_DEST (vec_exp), cc_status.value1)
++ || reg_mentioned_p (SET_DEST (vec_exp),
++ cc_status.value2)
++ || reg_mentioned_p (SET_DEST (vec_exp),
++ cc_status.mdep.value)))
++ {
++ CC_STATUS_INIT;
++ }
++ }
++ }
+
+ /* Check if we have memory opartions with post_inc or pre_dec on the
+ register currently reflected in cc0 */
+ if (GET_CODE (exp) == SET
-+ && GET_CODE (SET_SRC (exp)) == MEM
-+ && (GET_CODE (XEXP (SET_SRC (exp), 0)) == POST_INC
-+ || GET_CODE (XEXP (SET_SRC (exp), 0)) == PRE_DEC)
-+ &&
-+ (reg_mentioned_p
-+ (XEXP (XEXP (SET_SRC (exp), 0), 0), cc_status.value1)
-+ || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
-+ cc_status.value2)
-+ || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
-+ cc_status.mdep.value)))
-+ CC_STATUS_INIT;
++ && GET_CODE (SET_SRC (exp)) == MEM
++ && (GET_CODE (XEXP (SET_SRC (exp), 0)) == POST_INC
++ || GET_CODE (XEXP (SET_SRC (exp), 0)) == PRE_DEC)
++ &&
++ (reg_mentioned_p
++ (XEXP (XEXP (SET_SRC (exp), 0), 0), cc_status.value1)
++ || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
++ cc_status.value2)
++ || reg_mentioned_p (XEXP (XEXP (SET_SRC (exp), 0), 0),
++ cc_status.mdep.value)))
++ CC_STATUS_INIT;
+
+ if (GET_CODE (exp) == SET
-+ && GET_CODE (SET_DEST (exp)) == MEM
-+ && (GET_CODE (XEXP (SET_DEST (exp), 0)) == POST_INC
-+ || GET_CODE (XEXP (SET_DEST (exp), 0)) == PRE_DEC)
-+ &&
-+ (reg_mentioned_p
-+ (XEXP (XEXP (SET_DEST (exp), 0), 0), cc_status.value1)
-+ || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
-+ cc_status.value2)
-+ || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
-+ cc_status.mdep.value)))
-+ CC_STATUS_INIT;
++ && GET_CODE (SET_DEST (exp)) == MEM
++ && (GET_CODE (XEXP (SET_DEST (exp), 0)) == POST_INC
++ || GET_CODE (XEXP (SET_DEST (exp), 0)) == PRE_DEC)
++ &&
++ (reg_mentioned_p
++ (XEXP (XEXP (SET_DEST (exp), 0), 0), cc_status.value1)
++ || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
++ cc_status.value2)
++ || reg_mentioned_p (XEXP (XEXP (SET_DEST (exp), 0), 0),
++ cc_status.mdep.value)))
++ CC_STATUS_INIT;
+ break;
+
+ case CC_SET_VNCZ:
+ CC_STATUS_INIT;
+ cc_status.mdep.value = recog_data.operand[0];
+ cc_status.mdep.flags = CC_SET_VNCZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
+ break;
+
+ case CC_SET_NCZ:
+ CC_STATUS_INIT;
+ cc_status.mdep.value = recog_data.operand[0];
+ cc_status.mdep.flags = CC_SET_NCZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
+ break;
+
+ case CC_SET_CZ:
+ CC_STATUS_INIT;
+ cc_status.mdep.value = recog_data.operand[0];
+ cc_status.mdep.flags = CC_SET_CZ;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
+ break;
+
+ case CC_SET_Z:
+ CC_STATUS_INIT;
+ cc_status.mdep.value = recog_data.operand[0];
+ cc_status.mdep.flags = CC_SET_Z;
++ cc_status.mdep.cond_exec_cmp_clobbered = 0;
+ break;
+
+ case CC_CLOBBER:
+ default:
+ CC_STATUS_INIT;
+ }
-+ }
++}
+
+
+/*
+ Outputs to stdio stream stream the assembler syntax for an instruction
+ operand x. x is an RTL expression.
-+ */
++*/
+void
+avr32_print_operand (FILE * stream, rtx x, int code)
-+ {
-+ int error = 0;
++{
++ int error = 0;
++
++ if ( code == '?' )
++ {
++ /* Predicable instruction, print condition code */
++
++ /* If the insn should not be conditional then do nothing. */
++ if ( current_insn_predicate == NULL_RTX )
++ return;
++
++ /* Set x to the predicate to force printing
++ the condition later on. */
++ x = current_insn_predicate;
++
++ /* Reverse condition if useing bld insn. */
++ if ( GET_CODE (XEXP(current_insn_predicate,0)) == AND )
++ x = reversed_condition (current_insn_predicate);
++ }
++ else if ( code == '!' )
++ {
++ /* Output compare for conditional insn if needed. */
++ rtx new_cond;
++ gcc_assert ( current_insn_predicate != NULL_RTX );
++ new_cond = avr32_output_cmp(current_insn_predicate,
++ GET_MODE(XEXP(current_insn_predicate,0)),
++ XEXP(current_insn_predicate,0),
++ XEXP(current_insn_predicate,1));
++
++ /* Check if the new condition is a special avr32 condition
++ specified using UNSPECs. If so we must handle it differently. */
++ if ( GET_CODE (new_cond) == UNSPEC )
++ {
++ current_insn_predicate =
++ gen_rtx_UNSPEC (CCmode,
++ gen_rtvec (2,
++ XEXP(current_insn_predicate,0),
++ XEXP(current_insn_predicate,1)),
++ XINT (new_cond, 1));
++ }
++ else
++ {
++ PUT_CODE(current_insn_predicate, GET_CODE(new_cond));
++ }
++ return;
++ }
+
-+ switch (GET_CODE (x))
++ switch (GET_CODE (x))
+ {
+ case UNSPEC:
+ switch (XINT (x, 1))
-+ {
-+ case UNSPEC_COND_PL:
-+ if (code == 'i')
-+ fputs ("mi", stream);
-+ else
-+ fputs ("pl", stream);
-+ break;
-+ case UNSPEC_COND_MI:
-+ if (code == 'i')
-+ fputs ("pl", stream);
-+ else
-+ fputs ("mi", stream);
-+ break;
-+ default:
-+ error = 1;
-+ }
++ {
++ case UNSPEC_COND_PL:
++ if (code == 'i')
++ fputs ("mi", stream);
++ else
++ fputs ("pl", stream);
++ break;
++ case UNSPEC_COND_MI:
++ if (code == 'i')
++ fputs ("pl", stream);
++ else
++ fputs ("mi", stream);
++ break;
++ default:
++ error = 1;
++ }
+ break;
+ case EQ:
+ if (code == 'i')
-+ fputs ("ne", stream);
++ fputs ("ne", stream);
+ else
-+ fputs ("eq", stream);
++ fputs ("eq", stream);
+ break;
+ case NE:
+ if (code == 'i')
-+ fputs ("eq", stream);
++ fputs ("eq", stream);
+ else
-+ fputs ("ne", stream);
++ fputs ("ne", stream);
+ break;
+ case GT:
+ if (code == 'i')
-+ fputs ("le", stream);
++ fputs ("le", stream);
+ else
-+ fputs ("gt", stream);
++ fputs ("gt", stream);
+ break;
+ case GTU:
+ if (code == 'i')
-+ fputs ("ls", stream);
++ fputs ("ls", stream);
+ else
-+ fputs ("hi", stream);
++ fputs ("hi", stream);
+ break;
+ case LT:
+ if (code == 'i')
-+ fputs ("ge", stream);
++ fputs ("ge", stream);
+ else
-+ fputs ("lt", stream);
++ fputs ("lt", stream);
+ break;
+ case LTU:
+ if (code == 'i')
-+ fputs ("hs", stream);
++ fputs ("hs", stream);
+ else
-+ fputs ("lo", stream);
++ fputs ("lo", stream);
+ break;
+ case GE:
+ if (code == 'i')
-+ fputs ("lt", stream);
++ fputs ("lt", stream);
+ else
-+ fputs ("ge", stream);
++ fputs ("ge", stream);
+ break;
+ case GEU:
+ if (code == 'i')
-+ fputs ("lo", stream);
++ fputs ("lo", stream);
+ else
-+ fputs ("hs", stream);
++ fputs ("hs", stream);
+ break;
+ case LE:
+ if (code == 'i')
-+ fputs ("gt", stream);
++ fputs ("gt", stream);
+ else
-+ fputs ("le", stream);
++ fputs ("le", stream);
+ break;
+ case LEU:
+ if (code == 'i')
-+ fputs ("hi", stream);
++ fputs ("hi", stream);
+ else
-+ fputs ("ls", stream);
++ fputs ("ls", stream);
+ break;
+ case CONST_INT:
-+ {
-+ HOST_WIDE_INT value = INTVAL (x);
++ {
++ HOST_WIDE_INT value = INTVAL (x);
+
-+ switch (code)
++ switch (code)
+ {
+ case 'm':
+ if ( HOST_BITS_PER_WIDE_INT > BITS_PER_WORD )
+ value = bitpos;
+ }
+ break;
++ case 'z':
++ {
++ /* Set to bit position of first bit cleared in immediate */
++ int i, bitpos = 32;
++ for (i = 0; i < 32; i++)
++ if (!(value & (1 << i)))
++ {
++ bitpos = i;
++ break;
++ }
++ value = bitpos;
++ }
++ break;
+ case 'r':
+ {
+ /* Reglist 8 */
+ char op[50];
+ op[0] = '\0';
-+
++
+ if (value & 0x01)
+ sprintf (op, "r0-r3");
+ if (value & 0x02)
+ strlen (op) ? sprintf (op, "%s, lr", op) : sprintf (op, "lr");
+ if (value & 0x80)
+ strlen (op) ? sprintf (op, "%s, pc", op) : sprintf (op, "pc");
-+
++
+ fputs (op, stream);
+ return;
+ }
+ int i;
+ reglist16_string[0] = '\0';
+
-+ for (i = 0; i < 16; ++i)
-+ {
-+ if (value & (1 << i))
-+ {
-+ strlen (reglist16_string) ? sprintf (reglist16_string,
-+ "%s, %s",
-+ reglist16_string,
-+ reg_names
-+ [INTERNAL_REGNUM
-+ (i)]) :
-+ sprintf (reglist16_string, "%s",
-+ reg_names[INTERNAL_REGNUM (i)]);
-+ }
-+ }
-+ fputs (reglist16_string, stream);
-+ return;
-+ }
-+ case 'C':
-+ {
-+ /* RegListCP8 */
-+ char reglist_string[100];
-+ avr32_make_fp_reglist_w (value, (char *) reglist_string);
-+ fputs (reglist_string, stream);
-+ return;
-+ }
-+ case 'D':
-+ {
-+ /* RegListCPD8 */
-+ char reglist_string[100];
-+ avr32_make_fp_reglist_d (value, (char *) reglist_string);
-+ fputs (reglist_string, stream);
-+ return;
-+ }
-+ case 'h':
-+ /* Print halfword part of word */
-+ fputs (value ? "b" : "t", stream);
-+ return;
-+ }
++ for (i = 0; i < 16; ++i)
++ {
++ if (value & (1 << i))
++ {
++ strlen (reglist16_string) ? sprintf (reglist16_string,
++ "%s, %s",
++ reglist16_string,
++ reg_names
++ [INTERNAL_REGNUM
++ (i)]) :
++ sprintf (reglist16_string, "%s",
++ reg_names[INTERNAL_REGNUM (i)]);
++ }
++ }
++ fputs (reglist16_string, stream);
++ return;
++ }
++ case 'C':
++ {
++ /* RegListCP8 */
++ char reglist_string[100];
++ avr32_make_fp_reglist_w (value, (char *) reglist_string);
++ fputs (reglist_string, stream);
++ return;
++ }
++ case 'D':
++ {
++ /* RegListCPD8 */
++ char reglist_string[100];
++ avr32_make_fp_reglist_d (value, (char *) reglist_string);
++ fputs (reglist_string, stream);
++ return;
++ }
++ case 'h':
++ /* Print halfword part of word */
++ fputs (value ? "b" : "t", stream);
++ return;
++ }
+
-+ /* Print Value */
-+ fprintf (stream, "%d", value);
-+ break;
-+ }
++ /* Print Value */
++ fprintf (stream, "%d", value);
++ break;
++ }
+ case CONST_DOUBLE:
-+ {
-+ HOST_WIDE_INT hi, lo;
-+ if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
-+ {
-+ HOST_WIDE_INT target_float[2];
-+ hi = lo = 0;
-+ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (x),
-+ GET_MODE (x));
-+ /* For doubles the most significant part starts at index 0. */
-+ if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
-+ {
-+ hi = target_float[0];
-+ lo = target_float[1];
-+ }
-+ else
-+ {
-+ lo = target_float[0];
-+ }
-+ }
-+ else
-+ {
-+ hi = CONST_DOUBLE_HIGH (x);
-+ lo = CONST_DOUBLE_LOW (x);
-+ }
++ {
++ HOST_WIDE_INT hi, lo;
++ if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
++ {
++ HOST_WIDE_INT target_float[2];
++ hi = lo = 0;
++ real_to_target (target_float, CONST_DOUBLE_REAL_VALUE (x),
++ GET_MODE (x));
++ /* For doubles the most significant part starts at index 0. */
++ if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
++ {
++ hi = target_float[0];
++ lo = target_float[1];
++ }
++ else
++ {
++ lo = target_float[0];
++ }
++ }
++ else
++ {
++ hi = CONST_DOUBLE_HIGH (x);
++ lo = CONST_DOUBLE_LOW (x);
++ }
+
-+ if (code == 'm')
-+ fprintf (stream, "%ld", hi);
-+ else
-+ fprintf (stream, "%ld", lo);
++ if (code == 'm')
++ fprintf (stream, "%ld", hi);
++ else
++ fprintf (stream, "%ld", lo);
+
-+ break;
-+ }
++ break;
++ }
+ case CONST:
+ output_addr_const (stream, XEXP (XEXP (x, 0), 0));
+ fprintf (stream, "+%ld", INTVAL (XEXP (XEXP (x, 0), 1)));
+ case REG:
+ /* Swap register name if the register is DImode or DFmode. */
+ if (GET_MODE (x) == DImode || GET_MODE (x) == DFmode)
-+ {
-+ /* Double register must have an even numbered address */
-+ gcc_assert (!(REGNO (x) % 2));
-+ if (code == 'm')
-+ fputs (reg_names[true_regnum (x)], stream);
-+ else
-+ fputs (reg_names[true_regnum (x) + 1], stream);
-+ }
++ {
++ /* Double register must have an even numbered address */
++ gcc_assert (!(REGNO (x) % 2));
++ if (code == 'm')
++ fputs (reg_names[true_regnum (x)], stream);
++ else
++ fputs (reg_names[true_regnum (x) + 1], stream);
++ }
+ else if (GET_MODE (x) == TImode)
-+ {
-+ switch (code)
-+ {
-+ case 'T':
-+ fputs (reg_names[true_regnum (x)], stream);
-+ break;
-+ case 'U':
-+ fputs (reg_names[true_regnum (x) + 1], stream);
-+ break;
-+ case 'L':
-+ fputs (reg_names[true_regnum (x) + 2], stream);
-+ break;
-+ case 'B':
-+ fputs (reg_names[true_regnum (x) + 3], stream);
-+ break;
-+ default:
-+ fprintf (stream, "%s, %s, %s, %s",
-+ reg_names[true_regnum (x) + 3],
-+ reg_names[true_regnum (x) + 2],
-+ reg_names[true_regnum (x) + 1],
-+ reg_names[true_regnum (x)]);
-+ break;
-+ }
-+ }
++ {
++ switch (code)
++ {
++ case 'T':
++ fputs (reg_names[true_regnum (x)], stream);
++ break;
++ case 'U':
++ fputs (reg_names[true_regnum (x) + 1], stream);
++ break;
++ case 'L':
++ fputs (reg_names[true_regnum (x) + 2], stream);
++ break;
++ case 'B':
++ fputs (reg_names[true_regnum (x) + 3], stream);
++ break;
++ default:
++ fprintf (stream, "%s, %s, %s, %s",
++ reg_names[true_regnum (x) + 3],
++ reg_names[true_regnum (x) + 2],
++ reg_names[true_regnum (x) + 1],
++ reg_names[true_regnum (x)]);
++ break;
++ }
++ }
+ else
-+ {
-+ fputs (reg_names[true_regnum (x)], stream);
-+ }
++ {
++ fputs (reg_names[true_regnum (x)], stream);
++ }
+ break;
+ case CODE_LABEL:
+ case LABEL_REF:
+ break;
+ case MEM:
+ switch (GET_CODE (XEXP (x, 0)))
-+ {
-+ case LABEL_REF:
-+ case SYMBOL_REF:
-+ output_addr_const (stream, XEXP (x, 0));
-+ break;
-+ case MEM:
-+ switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
-+ {
-+ case SYMBOL_REF:
-+ output_addr_const (stream, XEXP (XEXP (x, 0), 0));
-+ break;
-+ default:
-+ error = 1;
-+ break;
-+ }
-+ break;
-+ case REG:
-+ avr32_print_operand (stream, XEXP (x, 0), 0);
-+ if (code != 'p')
-+ fputs ("[0]", stream);
-+ break;
-+ case PRE_DEC:
-+ fputs ("--", stream);
-+ avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
-+ break;
-+ case POST_INC:
-+ avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
-+ fputs ("++", stream);
-+ break;
-+ case PLUS:
-+ {
-+ rtx op0 = XEXP (XEXP (x, 0), 0);
-+ rtx op1 = XEXP (XEXP (x, 0), 1);
-+ rtx base = NULL_RTX, offset = NULL_RTX;
-+
-+ if (avr32_address_register_rtx_p (op0, 1))
-+ {
-+ base = op0;
-+ offset = op1;
-+ }
-+ else if (avr32_address_register_rtx_p (op1, 1))
-+ {
-+ /* Operands are switched. */
-+ base = op1;
-+ offset = op0;
-+ }
++ {
++ case LABEL_REF:
++ case SYMBOL_REF:
++ output_addr_const (stream, XEXP (x, 0));
++ break;
++ case MEM:
++ switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
++ {
++ case SYMBOL_REF:
++ output_addr_const (stream, XEXP (XEXP (x, 0), 0));
++ break;
++ default:
++ error = 1;
++ break;
++ }
++ break;
++ case REG:
++ avr32_print_operand (stream, XEXP (x, 0), 0);
++ if (code != 'p')
++ fputs ("[0]", stream);
++ break;
++ case PRE_DEC:
++ fputs ("--", stream);
++ avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
++ break;
++ case POST_INC:
++ avr32_print_operand (stream, XEXP (XEXP (x, 0), 0), 0);
++ fputs ("++", stream);
++ break;
++ case PLUS:
++ {
++ rtx op0 = XEXP (XEXP (x, 0), 0);
++ rtx op1 = XEXP (XEXP (x, 0), 1);
++ rtx base = NULL_RTX, offset = NULL_RTX;
++
++ if (avr32_address_register_rtx_p (op0, 1))
++ {
++ base = op0;
++ offset = op1;
++ }
++ else if (avr32_address_register_rtx_p (op1, 1))
++ {
++ /* Operands are switched. */
++ base = op1;
++ offset = op0;
++ }
+
-+ gcc_assert (base && offset
-+ && avr32_address_register_rtx_p (base, 1)
-+ && avr32_legitimate_index_p (GET_MODE (x), offset,
-+ 1));
++ gcc_assert (base && offset
++ && avr32_address_register_rtx_p (base, 1)
++ && avr32_legitimate_index_p (GET_MODE (x), offset,
++ 1));
+
-+ avr32_print_operand (stream, base, 0);
-+ fputs ("[", stream);
-+ avr32_print_operand (stream, offset, 0);
-+ fputs ("]", stream);
-+ break;
-+ }
-+ case CONST:
-+ output_addr_const (stream, XEXP (XEXP (XEXP (x, 0), 0), 0));
-+ fprintf (stream, " + %ld",
-+ INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)));
-+ break;
-+ default:
-+ error = 1;
-+ }
++ avr32_print_operand (stream, base, 0);
++ fputs ("[", stream);
++ avr32_print_operand (stream, offset, 0);
++ fputs ("]", stream);
++ break;
++ }
++ case CONST:
++ output_addr_const (stream, XEXP (XEXP (XEXP (x, 0), 0), 0));
++ fprintf (stream, " + %ld",
++ INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)));
++ break;
++ case CONST_INT:
++ avr32_print_operand (stream, XEXP (x, 0), 0);
++ break;
++ default:
++ error = 1;
++ }
+ break;
+ case MULT:
-+ {
-+ int value = INTVAL (XEXP (x, 1));
-+
-+ /* Convert immediate in multiplication into a shift immediate */
-+ switch (value)
-+ {
-+ case 2:
-+ value = 1;
-+ break;
-+ case 4:
-+ value = 2;
-+ break;
-+ case 8:
-+ value = 3;
-+ break;
-+ default:
-+ value = 0;
-+ }
-+ fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
-+ value);
-+ break;
-+ }
++ {
++ int value = INTVAL (XEXP (x, 1));
++
++ /* Convert immediate in multiplication into a shift immediate */
++ switch (value)
++ {
++ case 2:
++ value = 1;
++ break;
++ case 4:
++ value = 2;
++ break;
++ case 8:
++ value = 3;
++ break;
++ default:
++ value = 0;
++ }
++ fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
++ value);
++ break;
++ }
+ case ASHIFT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
-+ fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
-+ (int) INTVAL (XEXP (x, 1)));
++ fprintf (stream, "%s << %i", reg_names[true_regnum (XEXP (x, 0))],
++ (int) INTVAL (XEXP (x, 1)));
+ else if (REG_P (XEXP (x, 1)))
-+ fprintf (stream, "%s << %s", reg_names[true_regnum (XEXP (x, 0))],
-+ reg_names[true_regnum (XEXP (x, 1))]);
++ fprintf (stream, "%s << %s", reg_names[true_regnum (XEXP (x, 0))],
++ reg_names[true_regnum (XEXP (x, 1))]);
+ else
-+ {
-+ error = 1;
-+ }
++ {
++ error = 1;
++ }
+ break;
+ case LSHIFTRT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
-+ fprintf (stream, "%s >> %i", reg_names[true_regnum (XEXP (x, 0))],
-+ (int) INTVAL (XEXP (x, 1)));
++ fprintf (stream, "%s >> %i", reg_names[true_regnum (XEXP (x, 0))],
++ (int) INTVAL (XEXP (x, 1)));
+ else if (REG_P (XEXP (x, 1)))
-+ fprintf (stream, "%s >> %s", reg_names[true_regnum (XEXP (x, 0))],
-+ reg_names[true_regnum (XEXP (x, 1))]);
++ fprintf (stream, "%s >> %s", reg_names[true_regnum (XEXP (x, 0))],
++ reg_names[true_regnum (XEXP (x, 1))]);
+ else
-+ {
-+ error = 1;
-+ }
++ {
++ error = 1;
++ }
+ fprintf (stream, ">>");
+ break;
+ case PARALLEL:
-+ {
-+ /* Load store multiple */
-+ int i;
-+ int count = XVECLEN (x, 0);
-+ int reglist16 = 0;
-+ char reglist16_string[100];
++ {
++ /* Load store multiple */
++ int i;
++ int count = XVECLEN (x, 0);
++ int reglist16 = 0;
++ char reglist16_string[100];
++
++ for (i = 0; i < count; ++i)
++ {
++ rtx vec_elm = XVECEXP (x, 0, i);
++ if (GET_MODE (vec_elm) != SET)
++ {
++ debug_rtx (vec_elm);
++ internal_error ("Unknown element in parallel expression!");
++ }
++ if (GET_MODE (XEXP (vec_elm, 0)) == REG)
++ {
++ /* Load multiple */
++ reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 0)));
++ }
++ else
++ {
++ /* Store multiple */
++ reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 1)));
++ }
++ }
+
-+ for (i = 0; i < count; ++i)
-+ {
-+ rtx vec_elm = XVECEXP (x, 0, i);
-+ if (GET_MODE (vec_elm) != SET)
-+ {
-+ debug_rtx (vec_elm);
-+ internal_error ("Unknown element in parallel expression!");
-+ }
-+ if (GET_MODE (XEXP (vec_elm, 0)) == REG)
-+ {
-+ /* Load multiple */
-+ reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 0)));
-+ }
-+ else
-+ {
-+ /* Store multiple */
-+ reglist16 |= 1 << ASM_REGNUM (REGNO (XEXP (vec_elm, 1)));
-+ }
-+ }
++ avr32_make_reglist16 (reglist16, reglist16_string);
++ fputs (reglist16_string, stream);
+
-+ avr32_make_reglist16 (reglist16, reglist16_string);
-+ fputs (reglist16_string, stream);
++ break;
++ }
+
-+ break;
-+ }
++ case PLUS:
++ {
++ rtx op0 = XEXP (x, 0);
++ rtx op1 = XEXP (x, 1);
++ rtx base = NULL_RTX, offset = NULL_RTX;
++
++ if (avr32_address_register_rtx_p (op0, 1))
++ {
++ base = op0;
++ offset = op1;
++ }
++ else if (avr32_address_register_rtx_p (op1, 1))
++ {
++ /* Operands are switched. */
++ base = op1;
++ offset = op0;
++ }
++
++ gcc_assert (base && offset
++ && avr32_address_register_rtx_p (base, 1)
++ && avr32_legitimate_index_p (GET_MODE (x), offset, 1));
++
++ avr32_print_operand (stream, base, 0);
++ fputs ("[", stream);
++ avr32_print_operand (stream, offset, 0);
++ fputs ("]", stream);
++ break;
++ }
+
+ default:
+ error = 1;
+ }
+
-+ if (error)
-+ {
-+ debug_rtx (x);
-+ internal_error ("Illegal expression for avr32_print_operand");
-+ }
-+ }
++ if (error)
++ {
++ debug_rtx (x);
++ internal_error ("Illegal expression for avr32_print_operand");
++ }
++}
+
+rtx
+avr32_get_note_reg_equiv (rtx insn)
-+ {
-+ rtx note;
++{
++ rtx note;
+
-+ note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
++ note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
+
-+ if (note != NULL_RTX)
-+ return XEXP (note, 0);
-+ else
-+ return NULL_RTX;
-+ }
++ if (note != NULL_RTX)
++ return XEXP (note, 0);
++ else
++ return NULL_RTX;
++}
+
+/*
+ Outputs to stdio stream stream the assembler syntax for an instruction
+ expression.
+
+ ToDo: fixme.
-+ */
++*/
+void
+avr32_print_operand_address (FILE * stream, rtx x)
-+ {
-+ fprintf (stream, "(%d) /* address */", REGNO (x));
-+ }
++{
++ fprintf (stream, "(%d) /* address */", REGNO (x));
++}
+
+/* Return true if _GLOBAL_OFFSET_TABLE_ symbol is mentioned. */
+bool
+avr32_got_mentioned_p (rtx addr)
-+ {
-+ if (GET_CODE (addr) == MEM)
-+ addr = XEXP (addr, 0);
-+ while (GET_CODE (addr) == CONST)
-+ addr = XEXP (addr, 0);
-+ if (GET_CODE (addr) == SYMBOL_REF)
-+ {
-+ return streq (XSTR (addr, 0), "_GLOBAL_OFFSET_TABLE_");
-+ }
-+ if (GET_CODE (addr) == PLUS || GET_CODE (addr) == MINUS)
-+ {
-+ bool l1, l2;
++{
++ if (GET_CODE (addr) == MEM)
++ addr = XEXP (addr, 0);
++ while (GET_CODE (addr) == CONST)
++ addr = XEXP (addr, 0);
++ if (GET_CODE (addr) == SYMBOL_REF)
++ {
++ return streq (XSTR (addr, 0), "_GLOBAL_OFFSET_TABLE_");
++ }
++ if (GET_CODE (addr) == PLUS || GET_CODE (addr) == MINUS)
++ {
++ bool l1, l2;
+
-+ l1 = avr32_got_mentioned_p (XEXP (addr, 0));
-+ l2 = avr32_got_mentioned_p (XEXP (addr, 1));
-+ return l1 || l2;
-+ }
-+ return false;
-+ }
++ l1 = avr32_got_mentioned_p (XEXP (addr, 0));
++ l2 = avr32_got_mentioned_p (XEXP (addr, 1));
++ return l1 || l2;
++ }
++ return false;
++}
+
+
+/* Find the symbol in an address expression. */
+
+rtx
+avr32_find_symbol (rtx addr)
-+ {
-+ if (GET_CODE (addr) == MEM)
-+ addr = XEXP (addr, 0);
++{
++ if (GET_CODE (addr) == MEM)
++ addr = XEXP (addr, 0);
+
-+ while (GET_CODE (addr) == CONST)
-+ addr = XEXP (addr, 0);
++ while (GET_CODE (addr) == CONST)
++ addr = XEXP (addr, 0);
+
-+ if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
-+ return addr;
-+ if (GET_CODE (addr) == PLUS)
-+ {
-+ rtx l1, l2;
-+
-+ l1 = avr32_find_symbol (XEXP (addr, 0));
-+ l2 = avr32_find_symbol (XEXP (addr, 1));
-+ if (l1 != NULL_RTX && l2 == NULL_RTX)
-+ return l1;
-+ else if (l1 == NULL_RTX && l2 != NULL_RTX)
-+ return l2;
-+ }
++ if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
++ return addr;
++ if (GET_CODE (addr) == PLUS)
++ {
++ rtx l1, l2;
++
++ l1 = avr32_find_symbol (XEXP (addr, 0));
++ l2 = avr32_find_symbol (XEXP (addr, 1));
++ if (l1 != NULL_RTX && l2 == NULL_RTX)
++ return l1;
++ else if (l1 == NULL_RTX && l2 != NULL_RTX)
++ return l2;
++ }
+
-+ return NULL_RTX;
-+ }
++ return NULL_RTX;
++}
+
+
+/* Routines for manipulation of the constant pool. */
+/* Fixes less than a word need padding out to a word boundary. */
+#define MINIPOOL_FIX_SIZE(mode, value) \
+ (IS_FORCE_MINIPOOL(value) ? 0 : \
-+ (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4))
++ (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4))
+
+#define IS_FORCE_MINIPOOL(x) \
+ (GET_CODE(x) == UNSPEC && \
-+ XINT(x, 1) == UNSPEC_FORCE_MINIPOOL)
++ XINT(x, 1) == UNSPEC_FORCE_MINIPOOL)
+
+static Mnode *minipool_vector_head;
+static Mnode *minipool_vector_tail;
+ of the TABLE or NULL_RTX. */
+static rtx
+is_jump_table (rtx insn)
-+ {
-+ rtx table;
-+
-+ if (GET_CODE (insn) == JUMP_INSN
-+ && JUMP_LABEL (insn) != NULL
-+ && ((table = next_real_insn (JUMP_LABEL (insn)))
-+ == next_real_insn (insn))
-+ && table != NULL
-+ && GET_CODE (table) == JUMP_INSN
-+ && (GET_CODE (PATTERN (table)) == ADDR_VEC
-+ || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
-+ return table;
-+
-+ return NULL_RTX;
-+ }
++{
++ rtx table;
++
++ if (GET_CODE (insn) == JUMP_INSN
++ && JUMP_LABEL (insn) != NULL
++ && ((table = next_real_insn (JUMP_LABEL (insn)))
++ == next_real_insn (insn))
++ && table != NULL
++ && GET_CODE (table) == JUMP_INSN
++ && (GET_CODE (PATTERN (table)) == ADDR_VEC
++ || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
++ return table;
++
++ return NULL_RTX;
++}
+
+static HOST_WIDE_INT
+get_jump_table_size (rtx insn)
-+ {
-+ /* ADDR_VECs only take room if read-only data does into the text section. */
-+ if (JUMP_TABLES_IN_TEXT_SECTION
++{
++ /* ADDR_VECs only take room if read-only data does into the text section. */
++ if (JUMP_TABLES_IN_TEXT_SECTION
+#if !defined(READONLY_DATA_SECTION_ASM_OP)
-+ || 1
++ || 1
+#endif
+ )
-+ {
-+ rtx body = PATTERN (insn);
-+ int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
++ {
++ rtx body = PATTERN (insn);
++ int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
+
-+ return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
-+ }
++ return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, elt);
++ }
+
-+ return 0;
-+ }
++ return 0;
++}
+
+/* Move a minipool fix MP from its current location to before MAX_MP.
+ If MAX_MP is NULL, then MP doesn't need moving, but the addressing
+ constraints may need updating. */
+static Mnode *
+move_minipool_fix_forward_ref (Mnode * mp, Mnode * max_mp,
-+ HOST_WIDE_INT max_address)
-+ {
-+ /* This should never be true and the code below assumes these are
++ HOST_WIDE_INT max_address)
++{
++ /* This should never be true and the code below assumes these are
+ different. */
-+ if (mp == max_mp)
-+ abort ();
++ if (mp == max_mp)
++ abort ();
+
-+ if (max_mp == NULL)
-+ {
-+ if (max_address < mp->max_address)
-+ mp->max_address = max_address;
-+ }
++ if (max_mp == NULL)
++ {
++ if (max_address < mp->max_address)
++ mp->max_address = max_address;
++ }
++ else
++ {
++ if (max_address > max_mp->max_address - mp->fix_size)
++ mp->max_address = max_mp->max_address - mp->fix_size;
+ else
-+ {
-+ if (max_address > max_mp->max_address - mp->fix_size)
-+ mp->max_address = max_mp->max_address - mp->fix_size;
-+ else
-+ mp->max_address = max_address;
++ mp->max_address = max_address;
+
-+ /* Unlink MP from its current position. Since max_mp is non-null,
++ /* Unlink MP from its current position. Since max_mp is non-null,
+ mp->prev must be non-null. */
-+ mp->prev->next = mp->next;
-+ if (mp->next != NULL)
-+ mp->next->prev = mp->prev;
-+ else
-+ minipool_vector_tail = mp->prev;
-+
-+ /* Re-insert it before MAX_MP. */
-+ mp->next = max_mp;
-+ mp->prev = max_mp->prev;
-+ max_mp->prev = mp;
++ mp->prev->next = mp->next;
++ if (mp->next != NULL)
++ mp->next->prev = mp->prev;
++ else
++ minipool_vector_tail = mp->prev;
+
-+ if (mp->prev != NULL)
-+ mp->prev->next = mp;
-+ else
-+ minipool_vector_head = mp;
-+ }
++ /* Re-insert it before MAX_MP. */
++ mp->next = max_mp;
++ mp->prev = max_mp->prev;
++ max_mp->prev = mp;
+
-+ /* Save the new entry. */
-+ max_mp = mp;
++ if (mp->prev != NULL)
++ mp->prev->next = mp;
++ else
++ minipool_vector_head = mp;
++ }
+
-+ /* Scan over the preceding entries and adjust their addresses as required.
-+ */
-+ while (mp->prev != NULL
-+ && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
-+ {
-+ mp->prev->max_address = mp->max_address - mp->prev->fix_size;
-+ mp = mp->prev;
-+ }
++ /* Save the new entry. */
++ max_mp = mp;
+
-+ return max_mp;
++ /* Scan over the preceding entries and adjust their addresses as required.
++ */
++ while (mp->prev != NULL
++ && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
++ {
++ mp->prev->max_address = mp->max_address - mp->prev->fix_size;
++ mp = mp->prev;
+ }
+
++ return max_mp;
++}
++
+/* Add a constant to the minipool for a forward reference. Returns the
+ node added or NULL if the constant will not fit in this pool. */
+static Mnode *
+ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
+ {
+ if (GET_CODE (fix->value) == GET_CODE (mp->value)
-+ && fix->mode == mp->mode
-+ && (GET_CODE (fix->value) != CODE_LABEL
-+ || (CODE_LABEL_NUMBER (fix->value)
-+ == CODE_LABEL_NUMBER (mp->value)))
-+ && rtx_equal_p (fix->value, mp->value))
-+ {
-+ /* More than one fix references this entry. */
-+ mp->refcount++;
-+ return move_minipool_fix_forward_ref (mp, max_mp, max_address);
-+ }
++ && fix->mode == mp->mode
++ && (GET_CODE (fix->value) != CODE_LABEL
++ || (CODE_LABEL_NUMBER (fix->value)
++ == CODE_LABEL_NUMBER (mp->value)))
++ && rtx_equal_p (fix->value, mp->value))
++ {
++ /* More than one fix references this entry. */
++ mp->refcount++;
++ return move_minipool_fix_forward_ref (mp, max_mp, max_address);
++ }
+
+ /* Note the insertion point if necessary. */
+ if (max_mp == NULL && mp->max_address > max_address)
-+ max_mp = mp;
++ max_mp = mp;
+
+ }
+
+ mp->prev = minipool_vector_tail;
+
+ if (mp->prev == NULL)
-+ {
-+ minipool_vector_head = mp;
-+ minipool_vector_label = gen_label_rtx ();
-+ }
++ {
++ minipool_vector_head = mp;
++ minipool_vector_label = gen_label_rtx ();
++ }
+ else
-+ mp->prev->next = mp;
++ mp->prev->next = mp;
+
+ minipool_vector_tail = mp;
+ }
+ else
+ {
+ if (max_address > max_mp->max_address - mp->fix_size)
-+ mp->max_address = max_mp->max_address - mp->fix_size;
++ mp->max_address = max_mp->max_address - mp->fix_size;
+ else
-+ mp->max_address = max_address;
++ mp->max_address = max_address;
+
+ mp->next = max_mp;
+ mp->prev = max_mp->prev;
+ max_mp->prev = mp;
+ if (mp->prev != NULL)
-+ mp->prev->next = mp;
++ mp->prev->next = mp;
+ else
-+ minipool_vector_head = mp;
++ minipool_vector_head = mp;
+ }
+
+ /* Save the new entry. */
+ /* Scan over the preceding entries and adjust their addresses as required.
+ */
+ while (mp->prev != NULL
-+ && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
++ && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
+ {
+ mp->prev->max_address = mp->max_address - mp->prev->fix_size;
+ mp = mp->prev;
+
+static Mnode *
+move_minipool_fix_backward_ref (Mnode * mp, Mnode * min_mp,
-+ HOST_WIDE_INT min_address)
-+ {
-+ HOST_WIDE_INT offset;
++ HOST_WIDE_INT min_address)
++{
++ HOST_WIDE_INT offset;
+
-+ /* This should never be true, and the code below assumes these are
++ /* This should never be true, and the code below assumes these are
+ different. */
-+ if (mp == min_mp)
-+ abort ();
++ if (mp == min_mp)
++ abort ();
+
-+ if (min_mp == NULL)
-+ {
-+ if (min_address > mp->min_address)
-+ mp->min_address = min_address;
-+ }
-+ else
-+ {
-+ /* We will adjust this below if it is too loose. */
-+ mp->min_address = min_address;
++ if (min_mp == NULL)
++ {
++ if (min_address > mp->min_address)
++ mp->min_address = min_address;
++ }
++ else
++ {
++ /* We will adjust this below if it is too loose. */
++ mp->min_address = min_address;
+
-+ /* Unlink MP from its current position. Since min_mp is non-null,
++ /* Unlink MP from its current position. Since min_mp is non-null,
+ mp->next must be non-null. */
-+ mp->next->prev = mp->prev;
-+ if (mp->prev != NULL)
-+ mp->prev->next = mp->next;
-+ else
-+ minipool_vector_head = mp->next;
-+
-+ /* Reinsert it after MIN_MP. */
-+ mp->prev = min_mp;
-+ mp->next = min_mp->next;
-+ min_mp->next = mp;
-+ if (mp->next != NULL)
-+ mp->next->prev = mp;
-+ else
-+ minipool_vector_tail = mp;
-+ }
++ mp->next->prev = mp->prev;
++ if (mp->prev != NULL)
++ mp->prev->next = mp->next;
++ else
++ minipool_vector_head = mp->next;
+
-+ min_mp = mp;
++ /* Reinsert it after MIN_MP. */
++ mp->prev = min_mp;
++ mp->next = min_mp->next;
++ min_mp->next = mp;
++ if (mp->next != NULL)
++ mp->next->prev = mp;
++ else
++ minipool_vector_tail = mp;
++ }
+
-+ offset = 0;
-+ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
-+ {
-+ mp->offset = offset;
-+ if (mp->refcount > 0)
-+ offset += mp->fix_size;
++ min_mp = mp;
+
-+ if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
-+ mp->next->min_address = mp->min_address + mp->fix_size;
-+ }
++ offset = 0;
++ for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
++ {
++ mp->offset = offset;
++ if (mp->refcount > 0)
++ offset += mp->fix_size;
+
-+ return min_mp;
++ if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
++ mp->next->min_address = mp->min_address + mp->fix_size;
+ }
+
++ return min_mp;
++}
++
+/* Add a constant to the minipool for a backward reference. Returns the
+ node added or NULL if the constant will not fit in this pool.
+
+ range, then we don't try. This ensures that we can't fail later on. */
+ if (min_address >= minipool_barrier->address
+ || (minipool_vector_tail->min_address + fix->fix_size
-+ >= minipool_barrier->address))
++ >= minipool_barrier->address))
+ return NULL;
+
+ /* Scan the pool to see if a constant with the same value has already been
+ for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
+ {
+ if (GET_CODE (fix->value) == GET_CODE (mp->value)
-+ && fix->mode == mp->mode
-+ && (GET_CODE (fix->value) != CODE_LABEL
-+ || (CODE_LABEL_NUMBER (fix->value)
-+ == CODE_LABEL_NUMBER (mp->value)))
-+ && rtx_equal_p (fix->value, mp->value)
-+ /* Check that there is enough slack to move this entry to the end
++ && fix->mode == mp->mode
++ && (GET_CODE (fix->value) != CODE_LABEL
++ || (CODE_LABEL_NUMBER (fix->value)
++ == CODE_LABEL_NUMBER (mp->value)))
++ && rtx_equal_p (fix->value, mp->value)
++ /* Check that there is enough slack to move this entry to the end
+ of the table (this is conservative). */
-+ && (mp->max_address
-+ > (minipool_barrier->address
-+ + minipool_vector_tail->offset
-+ + minipool_vector_tail->fix_size)))
-+ {
-+ mp->refcount++;
-+ return move_minipool_fix_backward_ref (mp, min_mp, min_address);
-+ }
++ && (mp->max_address
++ > (minipool_barrier->address
++ + minipool_vector_tail->offset
++ + minipool_vector_tail->fix_size)))
++ {
++ mp->refcount++;
++ return move_minipool_fix_backward_ref (mp, min_mp, min_address);
++ }
+
+ if (min_mp != NULL)
-+ mp->min_address += fix->fix_size;
++ mp->min_address += fix->fix_size;
+ else
-+ {
-+ /* Note the insertion point if necessary. */
-+ if (mp->min_address < min_address)
-+ {
-+ min_mp = mp;
-+ }
-+ else if (mp->max_address
-+ < minipool_barrier->address + mp->offset + fix->fix_size)
-+ {
-+ /* Inserting before this entry would push the fix beyond its
++ {
++ /* Note the insertion point if necessary. */
++ if (mp->min_address < min_address)
++ {
++ min_mp = mp;
++ }
++ else if (mp->max_address
++ < minipool_barrier->address + mp->offset + fix->fix_size)
++ {
++ /* Inserting before this entry would push the fix beyond its
+ maximum address (which can happen if we have re-located a
+ forwards fix); force the new fix to come after it. */
-+ min_mp = mp;
-+ min_address = mp->min_address + fix->fix_size;
-+ }
-+ }
++ min_mp = mp;
++ min_address = mp->min_address + fix->fix_size;
++ }
++ }
+ }
+
+ /* We need to create a new entry. */
+ mp->next = minipool_vector_head;
+
+ if (mp->next == NULL)
-+ {
-+ minipool_vector_tail = mp;
-+ minipool_vector_label = gen_label_rtx ();
-+ }
++ {
++ minipool_vector_tail = mp;
++ minipool_vector_label = gen_label_rtx ();
++ }
+ else
-+ mp->next->prev = mp;
++ mp->next->prev = mp;
+
+ minipool_vector_head = mp;
+ }
+ min_mp->next = mp;
+
+ if (mp->next != NULL)
-+ mp->next->prev = mp;
++ mp->next->prev = mp;
+ else
-+ minipool_vector_tail = mp;
++ minipool_vector_tail = mp;
+ }
+
+ /* Save the new entry. */