6ba8b7f74a62392d76888747ca35f2bd49e68186
[openwrt/svn-archive/archive.git] / toolchain / gcc / patches / 4.2.4 / 900-avr32_support.patch
1 --- a/configure.in
2 +++ b/configure.in
3 @@ -503,6 +503,9 @@ case "${target}" in
4 arm-*-riscix*)
5 noconfigdirs="$noconfigdirs ld target-libgloss ${libgcj}"
6 ;;
7 + avr32-*-*)
8 + noconfigdirs="$noconfigdirs target-libiberty target-libmudflap target-libffi ${libgcj}"
9 + ;;
10 avr-*-*)
11 noconfigdirs="$noconfigdirs target-libiberty target-libstdc++-v3 ${libgcj}"
12 ;;
13 --- a/gcc/builtins.c
14 +++ b/gcc/builtins.c
15 @@ -9223,7 +9223,7 @@ validate_arglist (tree arglist, ...)
16
17 do
18 {
19 - code = va_arg (ap, enum tree_code);
20 + code = va_arg (ap, int);
21 switch (code)
22 {
23 case 0:
24 --- a/gcc/calls.c
25 +++ b/gcc/calls.c
26 @@ -3447,7 +3447,7 @@ emit_library_call_value_1 (int retval, r
27 for (; count < nargs; count++)
28 {
29 rtx val = va_arg (p, rtx);
30 - enum machine_mode mode = va_arg (p, enum machine_mode);
31 + enum machine_mode mode = va_arg (p, int);
32
33 /* We cannot convert the arg value to the mode the library wants here;
34 must do it earlier where we know the signedness of the arg. */
35 --- a/gcc/c-incpath.c
36 +++ b/gcc/c-incpath.c
37 @@ -347,6 +347,18 @@ add_path (char *path, int chain, int cxx
38 char* c;
39 for (c = path; *c; c++)
40 if (*c == '\\') *c = '/';
41 + /* Remove unnecessary trailing slashes. On some versions of MS
42 + Windows, trailing _forward_ slashes cause no problems for stat().
43 + On newer versions, stat() does not recognise a directory that ends
44 + in a '\\' or '/', unless it is a drive root dir, such as "c:/",
45 + where it is obligatory. */
46 + int pathlen = strlen (path);
47 + char* end = path + pathlen - 1;
48 + /* Preserve the lead '/' or lead "c:/". */
49 + char* start = path + (pathlen > 2 && path[1] == ':' ? 3 : 1);
50 +
51 + for (; end > start && IS_DIR_SEPARATOR (*end); end--)
52 + *end = 0;
53 #endif
54
55 p = XNEW (cpp_dir);
56 --- /dev/null
57 +++ b/gcc/config/avr32/avr32.c
58 @@ -0,0 +1,7915 @@
59 +/*
60 + Target hooks and helper functions for AVR32.
61 + Copyright 2003-2006 Atmel Corporation.
62 +
63 + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
64 + Initial porting by Anders ´┐Żdland.
65 +
66 + This file is part of GCC.
67 +
68 + This program is free software; you can redistribute it and/or modify
69 + it under the terms of the GNU General Public License as published by
70 + the Free Software Foundation; either version 2 of the License, or
71 + (at your option) any later version.
72 +
73 + This program is distributed in the hope that it will be useful,
74 + but WITHOUT ANY WARRANTY; without even the implied warranty of
75 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
76 + GNU General Public License for more details.
77 +
78 + You should have received a copy of the GNU General Public License
79 + along with this program; if not, write to the Free Software
80 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
81 +
82 +#include "config.h"
83 +#include "system.h"
84 +#include "coretypes.h"
85 +#include "tm.h"
86 +#include "rtl.h"
87 +#include "tree.h"
88 +#include "obstack.h"
89 +#include "regs.h"
90 +#include "hard-reg-set.h"
91 +#include "real.h"
92 +#include "insn-config.h"
93 +#include "conditions.h"
94 +#include "output.h"
95 +#include "insn-attr.h"
96 +#include "flags.h"
97 +#include "reload.h"
98 +#include "function.h"
99 +#include "expr.h"
100 +#include "optabs.h"
101 +#include "toplev.h"
102 +#include "recog.h"
103 +#include "ggc.h"
104 +#include "except.h"
105 +#include "c-pragma.h"
106 +#include "integrate.h"
107 +#include "tm_p.h"
108 +#include "langhooks.h"
109 +
110 +#include "target.h"
111 +#include "target-def.h"
112 +
113 +#include <ctype.h>
114 +
115 +/* Forward definitions of types. */
116 +typedef struct minipool_node Mnode;
117 +typedef struct minipool_fixup Mfix;
118 +
119 +/* Obstack for minipool constant handling. */
120 +static struct obstack minipool_obstack;
121 +static char *minipool_startobj;
122 +static rtx minipool_vector_label;
123 +
124 +/* True if we are currently building a constant table. */
125 +int making_const_table;
126 +
127 +/* Some forward function declarations */
128 +static unsigned long avr32_isr_value (tree);
129 +static unsigned long avr32_compute_func_type (void);
130 +static tree avr32_handle_isr_attribute (tree *, tree, tree, int, bool *);
131 +static tree avr32_handle_acall_attribute (tree *, tree, tree, int, bool *);
132 +static tree avr32_handle_fndecl_attribute (tree * node, tree name, tree args,
133 + int flags, bool * no_add_attrs);
134 +static void avr32_reorg (void);
135 +bool avr32_return_in_msb (tree type);
136 +bool avr32_vector_mode_supported (enum machine_mode mode);
137 +static void avr32_init_libfuncs (void);
138 +
139 +
140 +static void
141 +avr32_add_gc_roots (void)
142 +{
143 + gcc_obstack_init (&minipool_obstack);
144 + minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
145 +}
146 +
147 +
148 +/* List of all known AVR32 parts */
149 +static const struct part_type_s avr32_part_types[] = {
150 + /* name, part_type, architecture type, macro */
151 + {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"},
152 + {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"},
153 + {"ap7001", PART_TYPE_AVR32_AP7001, ARCH_TYPE_AVR32_AP, "__AVR32_AP7001__"},
154 + {"ap7002", PART_TYPE_AVR32_AP7002, ARCH_TYPE_AVR32_AP, "__AVR32_AP7002__"},
155 + {"ap7200", PART_TYPE_AVR32_AP7200, ARCH_TYPE_AVR32_AP, "__AVR32_AP7200__"},
156 + {"uc3a0128", PART_TYPE_AVR32_UC3A0128, ARCH_TYPE_AVR32_UCR2,
157 + "__AVR32_UC3A0128__"},
158 + {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UCR2,
159 + "__AVR32_UC3A0256__"},
160 + {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UCR2,
161 + "__AVR32_UC3A0512__"},
162 + {"uc3a0512es", PART_TYPE_AVR32_UC3A0512ES, ARCH_TYPE_AVR32_UCR1,
163 + "__AVR32_UC3A0512ES__"},
164 + {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UCR2,
165 + "__AVR32_UC3A1128__"},
166 + {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UCR2,
167 + "__AVR32_UC3A1256__"},
168 + {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UCR2,
169 + "__AVR32_UC3A1512__"},
170 + {"uc3a1512es", PART_TYPE_AVR32_UC3A1512ES, ARCH_TYPE_AVR32_UCR1,
171 + "__AVR32_UC3A1512ES__"},
172 + {"uc3a3revd", PART_TYPE_AVR32_UC3A3REVD, ARCH_TYPE_AVR32_UCR2NOMUL,
173 + "__AVR32_UC3A3256S__"},
174 + {"uc3a364", PART_TYPE_AVR32_UC3A364, ARCH_TYPE_AVR32_UCR2,
175 + "__AVR32_UC3A364__"},
176 + {"uc3a364s", PART_TYPE_AVR32_UC3A364S, ARCH_TYPE_AVR32_UCR2,
177 + "__AVR32_UC3A364S__"},
178 + {"uc3a3128", PART_TYPE_AVR32_UC3A3128, ARCH_TYPE_AVR32_UCR2,
179 + "__AVR32_UC3A3128__"},
180 + {"uc3a3128s", PART_TYPE_AVR32_UC3A3128S, ARCH_TYPE_AVR32_UCR2,
181 + "__AVR32_UC3A3128S__"},
182 + {"uc3a3256", PART_TYPE_AVR32_UC3A3256, ARCH_TYPE_AVR32_UCR2,
183 + "__AVR32_UC3A3256__"},
184 + {"uc3a3256s", PART_TYPE_AVR32_UC3A3256S, ARCH_TYPE_AVR32_UCR2,
185 + "__AVR32_UC3A3256S__"},
186 + {"uc3b064", PART_TYPE_AVR32_UC3B064, ARCH_TYPE_AVR32_UCR1,
187 + "__AVR32_UC3B064__"},
188 + {"uc3b0128", PART_TYPE_AVR32_UC3B0128, ARCH_TYPE_AVR32_UCR1,
189 + "__AVR32_UC3B0128__"},
190 + {"uc3b0256", PART_TYPE_AVR32_UC3B0256, ARCH_TYPE_AVR32_UCR1,
191 + "__AVR32_UC3B0256__"},
192 + {"uc3b0256es", PART_TYPE_AVR32_UC3B0256ES, ARCH_TYPE_AVR32_UCR1,
193 + "__AVR32_UC3B0256ES__"},
194 + {"uc3b164", PART_TYPE_AVR32_UC3B164, ARCH_TYPE_AVR32_UCR1,
195 + "__AVR32_UC3B164__"},
196 + {"uc3b1128", PART_TYPE_AVR32_UC3B1128, ARCH_TYPE_AVR32_UCR1,
197 + "__AVR32_UC3B1128__"},
198 + {"uc3b1256", PART_TYPE_AVR32_UC3B1256, ARCH_TYPE_AVR32_UCR1,
199 + "__AVR32_UC3B1256__"},
200 + {"uc3b1256es", PART_TYPE_AVR32_UC3B1256ES, ARCH_TYPE_AVR32_UCR1,
201 + "__AVR32_UC3B1256ES__"},
202 + {NULL, 0, 0, NULL}
203 +};
204 +
205 +/* List of all known AVR32 architectures */
206 +static const struct arch_type_s avr32_arch_types[] = {
207 + /* name, architecture type, microarchitecture type, feature flags, macro */
208 + {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B,
209 + (FLAG_AVR32_HAS_DSP
210 + | FLAG_AVR32_HAS_SIMD
211 + | FLAG_AVR32_HAS_UNALIGNED_WORD
212 + | FLAG_AVR32_HAS_BRANCH_PRED | FLAG_AVR32_HAS_RETURN_STACK
213 + | FLAG_AVR32_HAS_CACHES),
214 + "__AVR32_AP__"},
215 + {"ucr1", ARCH_TYPE_AVR32_UCR1, UARCH_TYPE_AVR32A,
216 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW),
217 + "__AVR32_UC__=1"},
218 + {"ucr2", ARCH_TYPE_AVR32_UCR2, UARCH_TYPE_AVR32A,
219 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
220 + | FLAG_AVR32_HAS_V2_INSNS),
221 + "__AVR32_UC__=2"},
222 + {"ucr2nomul", ARCH_TYPE_AVR32_UCR2NOMUL, UARCH_TYPE_AVR32A,
223 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
224 + | FLAG_AVR32_HAS_V2_INSNS | FLAG_AVR32_HAS_NO_MUL_INSNS),
225 + "__AVR32_UC__=3"},
226 + {NULL, 0, 0, 0, NULL}
227 +};
228 +
229 +/* Default arch name */
230 +const char *avr32_arch_name = "none";
231 +const char *avr32_part_name = "none";
232 +
233 +const struct part_type_s *avr32_part;
234 +const struct arch_type_s *avr32_arch;
235 +
236 +
237 +/* Set default target_flags. */
238 +#undef TARGET_DEFAULT_TARGET_FLAGS
239 +#define TARGET_DEFAULT_TARGET_FLAGS \
240 + (MASK_HAS_ASM_ADDR_PSEUDOS | MASK_MD_REORG_OPTIMIZATION | MASK_COND_EXEC_BEFORE_RELOAD)
241 +
242 +void
243 +avr32_optimization_options (int level,
244 + int size){
245 + if (AVR32_ALWAYS_PIC)
246 + flag_pic = 1;
247 +
248 + /* Enable section anchors if optimization is enabled. */
249 + if (level > 0 || size)
250 + flag_section_anchors = 1;
251 +}
252 +
253 +/* Override command line options */
254 +void
255 +avr32_override_options (void)
256 +{
257 + const struct part_type_s *part;
258 + const struct arch_type_s *arch;
259 +
260 + /*Add backward compability*/
261 + if (strcmp ("uc", avr32_arch_name)== 0)
262 + {
263 + fprintf (stderr, "Warning: Deprecated arch `%s' specified. "
264 + "Please use '-march=ucr1' instead. "
265 + "Converting to arch 'ucr1'\n",
266 + avr32_arch_name);
267 + avr32_arch_name="ucr1";
268 + }
269 +
270 + /* Check if arch type is set. */
271 + for (arch = avr32_arch_types; arch->name; arch++)
272 + {
273 + if (strcmp (arch->name, avr32_arch_name) == 0)
274 + break;
275 + }
276 + avr32_arch = arch;
277 +
278 + if (!arch->name && strcmp("none", avr32_arch_name) != 0)
279 + {
280 + fprintf (stderr, "Unknown arch `%s' specified\n"
281 + "Known arch names:\n"
282 + "\tuc (deprecated)\n",
283 + avr32_arch_name);
284 + for (arch = avr32_arch_types; arch->name; arch++)
285 + fprintf (stderr, "\t%s\n", arch->name);
286 + avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP];
287 + }
288 +
289 + /* Check if part type is set. */
290 + for (part = avr32_part_types; part->name; part++)
291 + if (strcmp (part->name, avr32_part_name) == 0)
292 + break;
293 +
294 + avr32_part = part;
295 + if (!part->name)
296 + {
297 + fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n",
298 + avr32_part_name);
299 + for (part = avr32_part_types; part->name; part++)
300 + {
301 + if (strcmp("none", part->name) != 0)
302 + fprintf (stderr, "\t%s\n", part->name);
303 + }
304 + /* Set default to NONE*/
305 + avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE];
306 + }
307 +
308 + /* NB! option -march= overrides option -mpart
309 + * if both are used at the same time */
310 + if (!arch->name)
311 + avr32_arch = &avr32_arch_types[avr32_part->arch_type];
312 +
313 + /* If optimization level is two or greater, then align start of loops to a
314 + word boundary since this will allow folding the first insn of the loop.
315 + Do this only for targets supporting branch prediction. */
316 + if (optimize >= 2 && TARGET_BRANCH_PRED)
317 + align_loops = 2;
318 +
319 +
320 + /* Enable fast-float library if unsafe math optimizations
321 + are used. */
322 + if (flag_unsafe_math_optimizations)
323 + target_flags |= MASK_FAST_FLOAT;
324 +
325 + /* Check if we should set avr32_imm_in_const_pool
326 + based on if caches are present or not. */
327 + if ( avr32_imm_in_const_pool == -1 )
328 + {
329 + if ( TARGET_CACHES )
330 + avr32_imm_in_const_pool = 1;
331 + else
332 + avr32_imm_in_const_pool = 0;
333 + }
334 +
335 + if (TARGET_NO_PIC)
336 + flag_pic = 0;
337 +
338 + avr32_add_gc_roots ();
339 +}
340 +
341 +
342 +/*
343 +If defined, a function that outputs the assembler code for entry to a
344 +function. The prologue is responsible for setting up the stack frame,
345 +initializing the frame pointer register, saving registers that must be
346 +saved, and allocating size additional bytes of storage for the
347 +local variables. size is an integer. file is a stdio
348 +stream to which the assembler code should be output.
349 +
350 +The label for the beginning of the function need not be output by this
351 +macro. That has already been done when the macro is run.
352 +
353 +To determine which registers to save, the macro can refer to the array
354 +regs_ever_live: element r is nonzero if hard register
355 +r is used anywhere within the function. This implies the function
356 +prologue should save register r, provided it is not one of the
357 +call-used registers. (TARGET_ASM_FUNCTION_EPILOGUE must likewise use
358 +regs_ever_live.)
359 +
360 +On machines that have ``register windows'', the function entry code does
361 +not save on the stack the registers that are in the windows, even if
362 +they are supposed to be preserved by function calls; instead it takes
363 +appropriate steps to ``push'' the register stack, if any non-call-used
364 +registers are used in the function.
365 +
366 +On machines where functions may or may not have frame-pointers, the
367 +function entry code must vary accordingly; it must set up the frame
368 +pointer if one is wanted, and not otherwise. To determine whether a
369 +frame pointer is in wanted, the macro can refer to the variable
370 +frame_pointer_needed. The variable's value will be 1 at run
371 +time in a function that needs a frame pointer. (see Elimination).
372 +
373 +The function entry code is responsible for allocating any stack space
374 +required for the function. This stack space consists of the regions
375 +listed below. In most cases, these regions are allocated in the
376 +order listed, with the last listed region closest to the top of the
377 +stack (the lowest address if STACK_GROWS_DOWNWARD is defined, and
378 +the highest address if it is not defined). You can use a different order
379 +for a machine if doing so is more convenient or required for
380 +compatibility reasons. Except in cases where required by standard
381 +or by a debugger, there is no reason why the stack layout used by GCC
382 +need agree with that used by other compilers for a machine.
383 +*/
384 +
385 +#undef TARGET_ASM_FUNCTION_PROLOGUE
386 +#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue
387 +
388 +
389 +#undef TARGET_DEFAULT_SHORT_ENUMS
390 +#define TARGET_DEFAULT_SHORT_ENUMS hook_bool_void_false
391 +
392 +#undef TARGET_PROMOTE_FUNCTION_ARGS
393 +#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
394 +
395 +#undef TARGET_PROMOTE_FUNCTION_RETURN
396 +#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
397 +
398 +#undef TARGET_PROMOTE_PROTOTYPES
399 +#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
400 +
401 +#undef TARGET_MUST_PASS_IN_STACK
402 +#define TARGET_MUST_PASS_IN_STACK avr32_must_pass_in_stack
403 +
404 +#undef TARGET_PASS_BY_REFERENCE
405 +#define TARGET_PASS_BY_REFERENCE avr32_pass_by_reference
406 +
407 +#undef TARGET_STRICT_ARGUMENT_NAMING
408 +#define TARGET_STRICT_ARGUMENT_NAMING avr32_strict_argument_naming
409 +
410 +#undef TARGET_VECTOR_MODE_SUPPORTED_P
411 +#define TARGET_VECTOR_MODE_SUPPORTED_P avr32_vector_mode_supported
412 +
413 +#undef TARGET_RETURN_IN_MEMORY
414 +#define TARGET_RETURN_IN_MEMORY avr32_return_in_memory
415 +
416 +#undef TARGET_RETURN_IN_MSB
417 +#define TARGET_RETURN_IN_MSB avr32_return_in_msb
418 +
419 +#undef TARGET_ENCODE_SECTION_INFO
420 +#define TARGET_ENCODE_SECTION_INFO avr32_encode_section_info
421 +
422 +#undef TARGET_ARG_PARTIAL_BYTES
423 +#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes
424 +
425 +#undef TARGET_STRIP_NAME_ENCODING
426 +#define TARGET_STRIP_NAME_ENCODING avr32_strip_name_encoding
427 +
428 +#define streq(string1, string2) (strcmp (string1, string2) == 0)
429 +
430 +#undef TARGET_NARROW_VOLATILE_BITFIELD
431 +#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
432 +
433 +#undef TARGET_ATTRIBUTE_TABLE
434 +#define TARGET_ATTRIBUTE_TABLE avr32_attribute_table
435 +
436 +#undef TARGET_COMP_TYPE_ATTRIBUTES
437 +#define TARGET_COMP_TYPE_ATTRIBUTES avr32_comp_type_attributes
438 +
439 +
440 +#undef TARGET_RTX_COSTS
441 +#define TARGET_RTX_COSTS avr32_rtx_costs
442 +
443 +#undef TARGET_CANNOT_FORCE_CONST_MEM
444 +#define TARGET_CANNOT_FORCE_CONST_MEM avr32_cannot_force_const_mem
445 +
446 +#undef TARGET_ASM_INTEGER
447 +#define TARGET_ASM_INTEGER avr32_assemble_integer
448 +
449 +#undef TARGET_FUNCTION_VALUE
450 +#define TARGET_FUNCTION_VALUE avr32_function_value
451 +
452 +#undef TARGET_MIN_ANCHOR_OFFSET
453 +#define TARGET_MIN_ANCHOR_OFFSET (0)
454 +
455 +#undef TARGET_MAX_ANCHOR_OFFSET
456 +#define TARGET_MAX_ANCHOR_OFFSET ((1 << 15) - 1)
457 +
458 +#undef TARGET_SECONDARY_RELOAD
459 +#define TARGET_SECONDARY_RELOAD avr32_secondary_reload
460 +
461 +enum reg_class
462 +avr32_secondary_reload (bool in_p, rtx x, enum reg_class class ATTRIBUTE_UNUSED,
463 + enum machine_mode mode, secondary_reload_info *sri)
464 +{
465 +
466 + if ( avr32_rmw_memory_operand (x, mode) )
467 + {
468 + if (!in_p)
469 + sri->icode = CODE_FOR_reload_out_rmw_memory_operand;
470 + else
471 + sri->icode = CODE_FOR_reload_in_rmw_memory_operand;
472 + }
473 + return NO_REGS;
474 +
475 +}
476 +
477 +/*
478 + * Switches to the appropriate section for output of constant pool
479 + * entry x in mode. You can assume that x is some kind of constant in
480 + * RTL. The argument mode is redundant except in the case of a
481 + * const_int rtx. Select the section by calling readonly_data_ section
482 + * or one of the alternatives for other sections. align is the
483 + * constant alignment in bits.
484 + *
485 + * The default version of this function takes care of putting symbolic
486 + * constants in flag_ pic mode in data_section and everything else in
487 + * readonly_data_section.
488 + */
489 +//#undef TARGET_ASM_SELECT_RTX_SECTION
490 +//#define TARGET_ASM_SELECT_RTX_SECTION avr32_select_rtx_section
491 +
492 +
493 +/*
494 + * If non-null, this hook performs a target-specific pass over the
495 + * instruction stream. The compiler will run it at all optimization
496 + * levels, just before the point at which it normally does
497 + * delayed-branch scheduling.
498 + *
499 + * The exact purpose of the hook varies from target to target. Some
500 + * use it to do transformations that are necessary for correctness,
501 + * such as laying out in-function constant pools or avoiding hardware
502 + * hazards. Others use it as an opportunity to do some
503 + * machine-dependent optimizations.
504 + *
505 + * You need not implement the hook if it has nothing to do. The
506 + * default definition is null.
507 + */
508 +#undef TARGET_MACHINE_DEPENDENT_REORG
509 +#define TARGET_MACHINE_DEPENDENT_REORG avr32_reorg
510 +
511 +/* Target hook for assembling integer objects.
512 + Need to handle integer vectors */
513 +static bool
514 +avr32_assemble_integer (rtx x, unsigned int size, int aligned_p)
515 +{
516 + if (avr32_vector_mode_supported (GET_MODE (x)))
517 + {
518 + int i, units;
519 +
520 + if (GET_CODE (x) != CONST_VECTOR)
521 + abort ();
522 +
523 + units = CONST_VECTOR_NUNITS (x);
524 +
525 + switch (GET_MODE (x))
526 + {
527 + case V2HImode:
528 + size = 2;
529 + break;
530 + case V4QImode:
531 + size = 1;
532 + break;
533 + default:
534 + abort ();
535 + }
536 +
537 + for (i = 0; i < units; i++)
538 + {
539 + rtx elt;
540 +
541 + elt = CONST_VECTOR_ELT (x, i);
542 + assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1);
543 + }
544 +
545 + return true;
546 + }
547 +
548 + return default_assemble_integer (x, size, aligned_p);
549 +}
550 +
551 +/*
552 + * This target hook describes the relative costs of RTL expressions.
553 + *
554 + * The cost may depend on the precise form of the expression, which is
555 + * available for examination in x, and the rtx code of the expression
556 + * in which it is contained, found in outer_code. code is the
557 + * expression code--redundant, since it can be obtained with GET_CODE
558 + * (x).
559 + *
560 + * In implementing this hook, you can use the construct COSTS_N_INSNS
561 + * (n) to specify a cost equal to n fast instructions.
562 + *
563 + * On entry to the hook, *total contains a default estimate for the
564 + * cost of the expression. The hook should modify this value as
565 + * necessary. Traditionally, the default costs are COSTS_N_INSNS (5)
566 + * for multiplications, COSTS_N_INSNS (7) for division and modulus
567 + * operations, and COSTS_N_INSNS (1) for all other operations.
568 + *
569 + * When optimizing for code size, i.e. when optimize_size is non-zero,
570 + * this target hook should be used to estimate the relative size cost
571 + * of an expression, again relative to COSTS_N_INSNS.
572 + *
573 + * The hook returns true when all subexpressions of x have been
574 + * processed, and false when rtx_cost should recurse.
575 + */
576 +
577 +/* Worker routine for avr32_rtx_costs. */
578 +static inline int
579 +avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED,
580 + enum rtx_code outer ATTRIBUTE_UNUSED)
581 +{
582 + enum machine_mode mode = GET_MODE (x);
583 +
584 + switch (GET_CODE (x))
585 + {
586 + case MEM:
587 + /* Using pre decrement / post increment memory operations on the
588 + avr32_uc architecture means that two writebacks must be performed
589 + and hence two cycles are needed. */
590 + if (!optimize_size
591 + && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD
592 + && TARGET_ARCH_UC
593 + && (GET_CODE (XEXP (x, 0)) == PRE_DEC
594 + || GET_CODE (XEXP (x, 0)) == POST_INC))
595 + return COSTS_N_INSNS (5);
596 +
597 + /* Memory costs quite a lot for the first word, but subsequent words
598 + load at the equivalent of a single insn each. */
599 + if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
600 + return COSTS_N_INSNS (3 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD));
601 +
602 + return COSTS_N_INSNS (4);
603 + case SYMBOL_REF:
604 + case CONST:
605 + /* These are valid for the pseudo insns: lda.w and call which operates
606 + on direct addresses. We assume that the cost of a lda.w is the same
607 + as the cost of a ld.w insn. */
608 + return (outer == SET) ? COSTS_N_INSNS (4) : COSTS_N_INSNS (1);
609 + case DIV:
610 + case MOD:
611 + case UDIV:
612 + case UMOD:
613 + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
614 +
615 + case ROTATE:
616 + case ROTATERT:
617 + if (mode == TImode)
618 + return COSTS_N_INSNS (100);
619 +
620 + if (mode == DImode)
621 + return COSTS_N_INSNS (10);
622 + return COSTS_N_INSNS (4);
623 + case ASHIFT:
624 + case LSHIFTRT:
625 + case ASHIFTRT:
626 + case NOT:
627 + if (mode == TImode)
628 + return COSTS_N_INSNS (10);
629 +
630 + if (mode == DImode)
631 + return COSTS_N_INSNS (4);
632 + return COSTS_N_INSNS (1);
633 + case PLUS:
634 + case MINUS:
635 + case NEG:
636 + case COMPARE:
637 + case ABS:
638 + if (GET_MODE_CLASS (mode) == MODE_FLOAT)
639 + return COSTS_N_INSNS (100);
640 +
641 + if (mode == TImode)
642 + return COSTS_N_INSNS (50);
643 +
644 + if (mode == DImode)
645 + return COSTS_N_INSNS (2);
646 + return COSTS_N_INSNS (1);
647 +
648 + case MULT:
649 + {
650 + if (GET_MODE_CLASS (mode) == MODE_FLOAT)
651 + return COSTS_N_INSNS (300);
652 +
653 + if (mode == TImode)
654 + return COSTS_N_INSNS (16);
655 +
656 + if (mode == DImode)
657 + return COSTS_N_INSNS (4);
658 +
659 + if (mode == HImode)
660 + return COSTS_N_INSNS (2);
661 +
662 + return COSTS_N_INSNS (3);
663 + }
664 + case IF_THEN_ELSE:
665 + if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
666 + return COSTS_N_INSNS (4);
667 + return COSTS_N_INSNS (1);
668 + case SIGN_EXTEND:
669 + case ZERO_EXTEND:
670 + /* Sign/Zero extensions of registers cost quite much since these
671 + instrcutions only take one register operand which means that gcc
672 + often must insert some move instrcutions */
673 + if (mode == QImode || mode == HImode)
674 + return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1));
675 + return COSTS_N_INSNS (4);
676 + case UNSPEC:
677 + /* divmod operations */
678 + if (XINT (x, 1) == UNSPEC_UDIVMODSI4_INTERNAL
679 + || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL)
680 + {
681 + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
682 + }
683 + /* Fallthrough */
684 + default:
685 + return COSTS_N_INSNS (1);
686 + }
687 +}
688 +
689 +static bool
690 +avr32_rtx_costs (rtx x, int code, int outer_code, int *total)
691 +{
692 + *total = avr32_rtx_costs_1 (x, code, outer_code);
693 + return true;
694 +}
695 +
696 +
697 +bool
698 +avr32_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED)
699 +{
700 + /* Do not want symbols in the constant pool when compiling pic or if using
701 + address pseudo instructions. */
702 + return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
703 + && avr32_find_symbol (x) != NULL_RTX);
704 +}
705 +
706 +
707 +/* Table of machine attributes. */
708 +const struct attribute_spec avr32_attribute_table[] = {
709 + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
710 + /* Interrupt Service Routines have special prologue and epilogue
711 + requirements. */
712 + {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute},
713 + {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute},
714 + {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute},
715 + {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute},
716 + {"rmw_addressable", 0, 0, true, false, false, NULL},
717 + {NULL, 0, 0, false, false, false, NULL}
718 +};
719 +
720 +
721 +typedef struct
722 +{
723 + const char *const arg;
724 + const unsigned long return_value;
725 +}
726 +isr_attribute_arg;
727 +
728 +static const isr_attribute_arg isr_attribute_args[] = {
729 + {"FULL", AVR32_FT_ISR_FULL},
730 + {"full", AVR32_FT_ISR_FULL},
731 + {"HALF", AVR32_FT_ISR_HALF},
732 + {"half", AVR32_FT_ISR_HALF},
733 + {"NONE", AVR32_FT_ISR_NONE},
734 + {"none", AVR32_FT_ISR_NONE},
735 + {"UNDEF", AVR32_FT_ISR_NONE},
736 + {"undef", AVR32_FT_ISR_NONE},
737 + {"SWI", AVR32_FT_ISR_NONE},
738 + {"swi", AVR32_FT_ISR_NONE},
739 + {NULL, AVR32_FT_ISR_NONE}
740 +};
741 +
742 +/* Returns the (interrupt) function type of the current
743 + function, or AVR32_FT_UNKNOWN if the type cannot be determined. */
744 +
745 +static unsigned long
746 +avr32_isr_value (tree argument)
747 +{
748 + const isr_attribute_arg *ptr;
749 + const char *arg;
750 +
751 + /* No argument - default to ISR_NONE. */
752 + if (argument == NULL_TREE)
753 + return AVR32_FT_ISR_NONE;
754 +
755 + /* Get the value of the argument. */
756 + if (TREE_VALUE (argument) == NULL_TREE
757 + || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
758 + return AVR32_FT_UNKNOWN;
759 +
760 + arg = TREE_STRING_POINTER (TREE_VALUE (argument));
761 +
762 + /* Check it against the list of known arguments. */
763 + for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
764 + if (streq (arg, ptr->arg))
765 + return ptr->return_value;
766 +
767 + /* An unrecognized interrupt type. */
768 + return AVR32_FT_UNKNOWN;
769 +}
770 +
771 +
772 +
773 +/*
774 +These hooks specify assembly directives for creating certain kinds
775 +of integer object. The TARGET_ASM_BYTE_OP directive creates a
776 +byte-sized object, the TARGET_ASM_ALIGNED_HI_OP one creates an
777 +aligned two-byte object, and so on. Any of the hooks may be
778 +NULL, indicating that no suitable directive is available.
779 +
780 +The compiler will print these strings at the start of a new line,
781 +followed immediately by the object's initial value. In most cases,
782 +the string should contain a tab, a pseudo-op, and then another tab.
783 +*/
784 +#undef TARGET_ASM_BYTE_OP
785 +#define TARGET_ASM_BYTE_OP "\t.byte\t"
786 +#undef TARGET_ASM_ALIGNED_HI_OP
787 +#define TARGET_ASM_ALIGNED_HI_OP "\t.align 1\n\t.short\t"
788 +#undef TARGET_ASM_ALIGNED_SI_OP
789 +#define TARGET_ASM_ALIGNED_SI_OP "\t.align 2\n\t.int\t"
790 +#undef TARGET_ASM_ALIGNED_DI_OP
791 +#define TARGET_ASM_ALIGNED_DI_OP NULL
792 +#undef TARGET_ASM_ALIGNED_TI_OP
793 +#define TARGET_ASM_ALIGNED_TI_OP NULL
794 +#undef TARGET_ASM_UNALIGNED_HI_OP
795 +#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
796 +#undef TARGET_ASM_UNALIGNED_SI_OP
797 +#define TARGET_ASM_UNALIGNED_SI_OP "\t.int\t"
798 +#undef TARGET_ASM_UNALIGNED_DI_OP
799 +#define TARGET_ASM_UNALIGNED_DI_OP NULL
800 +#undef TARGET_ASM_UNALIGNED_TI_OP
801 +#define TARGET_ASM_UNALIGNED_TI_OP NULL
802 +
803 +#undef TARGET_ASM_OUTPUT_MI_THUNK
804 +#define TARGET_ASM_OUTPUT_MI_THUNK avr32_output_mi_thunk
805 +
806 +#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
807 +#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
808 +
809 +static void
810 +avr32_output_mi_thunk (FILE * file,
811 + tree thunk ATTRIBUTE_UNUSED,
812 + HOST_WIDE_INT delta,
813 + HOST_WIDE_INT vcall_offset, tree function)
814 + {
815 + int mi_delta = delta;
816 + int this_regno =
817 + (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function) ?
818 + INTERNAL_REGNUM (11) : INTERNAL_REGNUM (12));
819 +
820 +
821 + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
822 + || vcall_offset)
823 + {
824 + fputs ("\tpushm\tlr\n", file);
825 + }
826 +
827 +
828 + if (mi_delta != 0)
829 + {
830 + if (avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21"))
831 + {
832 + fprintf (file, "\tsub\t%s, %d\n", reg_names[this_regno], -mi_delta);
833 + }
834 + else
835 + {
836 + /* Immediate is larger than k21 we must make us a temp register by
837 + pushing a register to the stack. */
838 + fprintf (file, "\tmov\tlr, lo(%d)\n", mi_delta);
839 + fprintf (file, "\torh\tlr, hi(%d)\n", mi_delta);
840 + fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
841 + }
842 + }
843 +
844 +
845 + if (vcall_offset != 0)
846 + {
847 + fprintf (file, "\tld.w\tlr, %s[0]\n", reg_names[this_regno]);
848 + fprintf (file, "\tld.w\tlr, lr[%i]\n", (int) vcall_offset);
849 + fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
850 + }
851 +
852 +
853 + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
854 + || vcall_offset)
855 + {
856 + fputs ("\tpopm\tlr\n", file);
857 + }
858 +
859 + /* Jump to the function. We assume that we can use an rjmp since the
860 + function to jump to is local and probably not too far away from
861 + the thunk. If this assumption proves to be wrong we could implement
862 + this jump by calculating the offset between the jump source and destination
863 + and put this in the constant pool and then perform an add to pc.
864 + This would also be legitimate PIC code. But for now we hope that an rjmp
865 + will be sufficient...
866 + */
867 + fputs ("\trjmp\t", file);
868 + assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
869 + fputc ('\n', file);
870 + }
871 +
872 +
873 +/* Implements target hook vector_mode_supported. */
874 +bool
875 +avr32_vector_mode_supported (enum machine_mode mode)
876 +{
877 + if ((mode == V2HImode) || (mode == V4QImode))
878 + return true;
879 +
880 + return false;
881 +}
882 +
883 +
884 +#undef TARGET_INIT_LIBFUNCS
885 +#define TARGET_INIT_LIBFUNCS avr32_init_libfuncs
886 +
887 +#undef TARGET_INIT_BUILTINS
888 +#define TARGET_INIT_BUILTINS avr32_init_builtins
889 +
890 +#undef TARGET_EXPAND_BUILTIN
891 +#define TARGET_EXPAND_BUILTIN avr32_expand_builtin
892 +
893 +tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int,
894 + void_ftype_ptr_int;
895 +tree void_ftype_int, void_ftype_void, int_ftype_ptr_int;
896 +tree short_ftype_short, int_ftype_int_short, int_ftype_short_short,
897 + short_ftype_short_short;
898 +tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short;
899 +tree void_ftype_int_int_int_int_int, void_ftype_int_int_int;
900 +tree longlong_ftype_int_int, void_ftype_int_int_longlong;
901 +tree int_ftype_int_int_int, longlong_ftype_longlong_int_short;
902 +tree longlong_ftype_longlong_short_short, int_ftype_int_short_short;
903 +
904 +#define def_builtin(NAME, TYPE, CODE) \
905 + lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
906 + BUILT_IN_MD, NULL, NULL_TREE)
907 +
908 +#define def_mbuiltin(MASK, NAME, TYPE, CODE) \
909 + do \
910 + { \
911 + if ((MASK)) \
912 + lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
913 + BUILT_IN_MD, NULL, NULL_TREE); \
914 + } \
915 + while (0)
916 +
917 +struct builtin_description
918 +{
919 + const unsigned int mask;
920 + const enum insn_code icode;
921 + const char *const name;
922 + const int code;
923 + const enum rtx_code comparison;
924 + const unsigned int flag;
925 + const tree *ftype;
926 +};
927 +
928 +static const struct builtin_description bdesc_2arg[] = {
929 +#define DSP_BUILTIN(code, builtin, ftype) \
930 + { 1, CODE_FOR_##code, "__builtin_" #code , \
931 + AVR32_BUILTIN_##builtin, 0, 0, ftype }
932 +
933 + DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short),
934 + DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short),
935 + DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short),
936 + DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short),
937 + DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short),
938 + DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short),
939 + DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short),
940 + DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int),
941 + DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int),
942 + DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short),
943 + DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short)
944 +};
945 +
946 +
947 +void
948 +avr32_init_builtins (void)
949 +{
950 + unsigned int i;
951 + const struct builtin_description *d;
952 + tree endlink = void_list_node;
953 + tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
954 + tree longlong_endlink =
955 + tree_cons (NULL_TREE, long_long_integer_type_node, endlink);
956 + tree short_endlink =
957 + tree_cons (NULL_TREE, short_integer_type_node, endlink);
958 + tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink);
959 +
960 + /* int func (int) */
961 + int_ftype_int = build_function_type (integer_type_node, int_endlink);
962 +
963 + /* short func (short) */
964 + short_ftype_short
965 + = build_function_type (short_integer_type_node, short_endlink);
966 +
967 + /* short func (short, short) */
968 + short_ftype_short_short
969 + = build_function_type (short_integer_type_node,
970 + tree_cons (NULL_TREE, short_integer_type_node,
971 + short_endlink));
972 +
973 + /* long long func (long long, short, short) */
974 + longlong_ftype_longlong_short_short
975 + = build_function_type (long_long_integer_type_node,
976 + tree_cons (NULL_TREE, long_long_integer_type_node,
977 + tree_cons (NULL_TREE,
978 + short_integer_type_node,
979 + short_endlink)));
980 +
981 + /* long long func (short, short) */
982 + longlong_ftype_short_short
983 + = build_function_type (long_long_integer_type_node,
984 + tree_cons (NULL_TREE, short_integer_type_node,
985 + short_endlink));
986 +
987 + /* int func (int, int) */
988 + int_ftype_int_int
989 + = build_function_type (integer_type_node,
990 + tree_cons (NULL_TREE, integer_type_node,
991 + int_endlink));
992 +
993 + /* long long func (int, int) */
994 + longlong_ftype_int_int
995 + = build_function_type (long_long_integer_type_node,
996 + tree_cons (NULL_TREE, integer_type_node,
997 + int_endlink));
998 +
999 + /* long long int func (long long, int, short) */
1000 + longlong_ftype_longlong_int_short
1001 + = build_function_type (long_long_integer_type_node,
1002 + tree_cons (NULL_TREE, long_long_integer_type_node,
1003 + tree_cons (NULL_TREE, integer_type_node,
1004 + short_endlink)));
1005 +
1006 + /* long long int func (int, short) */
1007 + longlong_ftype_int_short
1008 + = build_function_type (long_long_integer_type_node,
1009 + tree_cons (NULL_TREE, integer_type_node,
1010 + short_endlink));
1011 +
1012 + /* int func (int, short, short) */
1013 + int_ftype_int_short_short
1014 + = build_function_type (integer_type_node,
1015 + tree_cons (NULL_TREE, integer_type_node,
1016 + tree_cons (NULL_TREE,
1017 + short_integer_type_node,
1018 + short_endlink)));
1019 +
1020 + /* int func (short, short) */
1021 + int_ftype_short_short
1022 + = build_function_type (integer_type_node,
1023 + tree_cons (NULL_TREE, short_integer_type_node,
1024 + short_endlink));
1025 +
1026 + /* int func (int, short) */
1027 + int_ftype_int_short
1028 + = build_function_type (integer_type_node,
1029 + tree_cons (NULL_TREE, integer_type_node,
1030 + short_endlink));
1031 +
1032 + /* void func (int, int) */
1033 + void_ftype_int_int
1034 + = build_function_type (void_type_node,
1035 + tree_cons (NULL_TREE, integer_type_node,
1036 + int_endlink));
1037 +
1038 + /* void func (int, int, int) */
1039 + void_ftype_int_int_int
1040 + = build_function_type (void_type_node,
1041 + tree_cons (NULL_TREE, integer_type_node,
1042 + tree_cons (NULL_TREE, integer_type_node,
1043 + int_endlink)));
1044 +
1045 + /* void func (int, int, long long) */
1046 + void_ftype_int_int_longlong
1047 + = build_function_type (void_type_node,
1048 + tree_cons (NULL_TREE, integer_type_node,
1049 + tree_cons (NULL_TREE, integer_type_node,
1050 + longlong_endlink)));
1051 +
1052 + /* void func (int, int, int, int, int) */
1053 + void_ftype_int_int_int_int_int
1054 + = build_function_type (void_type_node,
1055 + tree_cons (NULL_TREE, integer_type_node,
1056 + tree_cons (NULL_TREE, integer_type_node,
1057 + tree_cons (NULL_TREE,
1058 + integer_type_node,
1059 + tree_cons
1060 + (NULL_TREE,
1061 + integer_type_node,
1062 + int_endlink)))));
1063 +
1064 + /* void func (void *, int) */
1065 + void_ftype_ptr_int
1066 + = build_function_type (void_type_node,
1067 + tree_cons (NULL_TREE, ptr_type_node, int_endlink));
1068 +
1069 + /* void func (int) */
1070 + void_ftype_int = build_function_type (void_type_node, int_endlink);
1071 +
1072 + /* void func (void) */
1073 + void_ftype_void = build_function_type (void_type_node, void_endlink);
1074 +
1075 + /* int func (void) */
1076 + int_ftype_void = build_function_type (integer_type_node, void_endlink);
1077 +
1078 + /* int func (void *, int) */
1079 + int_ftype_ptr_int
1080 + = build_function_type (integer_type_node,
1081 + tree_cons (NULL_TREE, ptr_type_node, int_endlink));
1082 +
1083 + /* int func (int, int, int) */
1084 + int_ftype_int_int_int
1085 + = build_function_type (integer_type_node,
1086 + tree_cons (NULL_TREE, integer_type_node,
1087 + tree_cons (NULL_TREE, integer_type_node,
1088 + int_endlink)));
1089 +
1090 + /* Initialize avr32 builtins. */
1091 + def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR);
1092 + def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR);
1093 + def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR);
1094 + def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR);
1095 + def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE);
1096 + def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC);
1097 + def_builtin ("__builtin_ssrf", void_ftype_int, AVR32_BUILTIN_SSRF);
1098 + def_builtin ("__builtin_csrf", void_ftype_int, AVR32_BUILTIN_CSRF);
1099 + def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR);
1100 + def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS);
1101 + def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW);
1102 + def_builtin ("__builtin_breakpoint", void_ftype_void,
1103 + AVR32_BUILTIN_BREAKPOINT);
1104 + def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG);
1105 + def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI);
1106 + def_builtin ("__builtin_bswap_16", short_ftype_short,
1107 + AVR32_BUILTIN_BSWAP16);
1108 + def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32);
1109 + def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int,
1110 + AVR32_BUILTIN_COP);
1111 + def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W);
1112 + def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int,
1113 + AVR32_BUILTIN_MVRC_W);
1114 + def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int,
1115 + AVR32_BUILTIN_MVCR_D);
1116 + def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong,
1117 + AVR32_BUILTIN_MVRC_D);
1118 + def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS);
1119 + def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU);
1120 + def_builtin ("__builtin_satrnds", int_ftype_int_int_int,
1121 + AVR32_BUILTIN_SATRNDS);
1122 + def_builtin ("__builtin_satrndu", int_ftype_int_int_int,
1123 + AVR32_BUILTIN_SATRNDU);
1124 + def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR);
1125 + def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR);
1126 + def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short,
1127 + AVR32_BUILTIN_MACSATHH_W);
1128 + def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short,
1129 + AVR32_BUILTIN_MACWH_D);
1130 + def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short,
1131 + AVR32_BUILTIN_MACHH_D);
1132 + def_builtin ("__builtin_mems", void_ftype_ptr_int, AVR32_BUILTIN_MEMS);
1133 + def_builtin ("__builtin_memt", void_ftype_ptr_int, AVR32_BUILTIN_MEMT);
1134 + def_builtin ("__builtin_memc", void_ftype_ptr_int, AVR32_BUILTIN_MEMC);
1135 +
1136 + /* Add all builtins that are more or less simple operations on two
1137 + operands. */
1138 + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
1139 + {
1140 + /* Use one of the operands; the target can have a different mode for
1141 + mask-generating compares. */
1142 +
1143 + if (d->name == 0)
1144 + continue;
1145 +
1146 + def_mbuiltin (d->mask, d->name, *(d->ftype), d->code);
1147 + }
1148 +}
1149 +
1150 +
1151 +/* Subroutine of avr32_expand_builtin to take care of binop insns. */
1152 +
1153 +static rtx
1154 +avr32_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
1155 +{
1156 + rtx pat;
1157 + tree arg0 = TREE_VALUE (arglist);
1158 + tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1159 + rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1160 + rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1161 + enum machine_mode tmode = insn_data[icode].operand[0].mode;
1162 + enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1163 + enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1164 +
1165 + if (!target
1166 + || GET_MODE (target) != tmode
1167 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1168 + target = gen_reg_rtx (tmode);
1169 +
1170 + /* In case the insn wants input operands in modes different from the
1171 + result, abort. */
1172 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1173 + {
1174 + /* If op0 is already a reg we must cast it to the correct mode. */
1175 + if (REG_P (op0))
1176 + op0 = convert_to_mode (mode0, op0, 1);
1177 + else
1178 + op0 = copy_to_mode_reg (mode0, op0);
1179 + }
1180 + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1181 + {
1182 + /* If op1 is already a reg we must cast it to the correct mode. */
1183 + if (REG_P (op1))
1184 + op1 = convert_to_mode (mode1, op1, 1);
1185 + else
1186 + op1 = copy_to_mode_reg (mode1, op1);
1187 + }
1188 + pat = GEN_FCN (icode) (target, op0, op1);
1189 + if (!pat)
1190 + return 0;
1191 + emit_insn (pat);
1192 + return target;
1193 +}
1194 +
1195 +/* Expand an expression EXP that calls a built-in function,
1196 + with result going to TARGET if that's convenient
1197 + (and in mode MODE if that's convenient).
1198 + SUBTARGET may be used as the target for computing one of EXP's operands.
1199 + IGNORE is nonzero if the value is to be ignored. */
1200 +
1201 +rtx
1202 +avr32_expand_builtin (tree exp,
1203 + rtx target,
1204 + rtx subtarget ATTRIBUTE_UNUSED,
1205 + enum machine_mode mode ATTRIBUTE_UNUSED,
1206 + int ignore ATTRIBUTE_UNUSED)
1207 +{
1208 + const struct builtin_description *d;
1209 + unsigned int i;
1210 + enum insn_code icode = 0;
1211 + tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
1212 + tree arglist = TREE_OPERAND (exp, 1);
1213 + tree arg0, arg1, arg2;
1214 + rtx op0, op1, op2, pat;
1215 + enum machine_mode tmode, mode0, mode1;
1216 + enum machine_mode arg0_mode;
1217 + int fcode = DECL_FUNCTION_CODE (fndecl);
1218 +
1219 + switch (fcode)
1220 + {
1221 + default:
1222 + break;
1223 +
1224 + case AVR32_BUILTIN_SATS:
1225 + case AVR32_BUILTIN_SATU:
1226 + case AVR32_BUILTIN_SATRNDS:
1227 + case AVR32_BUILTIN_SATRNDU:
1228 + {
1229 + const char *fname;
1230 + switch (fcode)
1231 + {
1232 + default:
1233 + case AVR32_BUILTIN_SATS:
1234 + icode = CODE_FOR_sats;
1235 + fname = "sats";
1236 + break;
1237 + case AVR32_BUILTIN_SATU:
1238 + icode = CODE_FOR_satu;
1239 + fname = "satu";
1240 + break;
1241 + case AVR32_BUILTIN_SATRNDS:
1242 + icode = CODE_FOR_satrnds;
1243 + fname = "satrnds";
1244 + break;
1245 + case AVR32_BUILTIN_SATRNDU:
1246 + icode = CODE_FOR_satrndu;
1247 + fname = "satrndu";
1248 + break;
1249 + }
1250 +
1251 + arg0 = TREE_VALUE (arglist);
1252 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1253 + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1254 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1255 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1256 + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1257 +
1258 + tmode = insn_data[icode].operand[0].mode;
1259 +
1260 +
1261 + if (target == 0
1262 + || GET_MODE (target) != tmode
1263 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1264 + target = gen_reg_rtx (tmode);
1265 +
1266 +
1267 + if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0)))
1268 + {
1269 + op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0);
1270 + }
1271 +
1272 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1273 + {
1274 + error ("Parameter 2 to __builtin_%s should be a constant number.",
1275 + fname);
1276 + return NULL_RTX;
1277 + }
1278 +
1279 + if (!(*insn_data[icode].operand[1].predicate) (op2, SImode))
1280 + {
1281 + error ("Parameter 3 to __builtin_%s should be a constant number.",
1282 + fname);
1283 + return NULL_RTX;
1284 + }
1285 +
1286 + emit_move_insn (target, op0);
1287 + pat = GEN_FCN (icode) (target, op1, op2);
1288 + if (!pat)
1289 + return 0;
1290 + emit_insn (pat);
1291 +
1292 + return target;
1293 + }
1294 + case AVR32_BUILTIN_MUSTR:
1295 + icode = CODE_FOR_mustr;
1296 + tmode = insn_data[icode].operand[0].mode;
1297 +
1298 + if (target == 0
1299 + || GET_MODE (target) != tmode
1300 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1301 + target = gen_reg_rtx (tmode);
1302 + pat = GEN_FCN (icode) (target);
1303 + if (!pat)
1304 + return 0;
1305 + emit_insn (pat);
1306 + return target;
1307 +
1308 + case AVR32_BUILTIN_MFSR:
1309 + icode = CODE_FOR_mfsr;
1310 + arg0 = TREE_VALUE (arglist);
1311 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1312 + tmode = insn_data[icode].operand[0].mode;
1313 + mode0 = insn_data[icode].operand[1].mode;
1314 +
1315 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1316 + {
1317 + error ("Parameter 1 to __builtin_mfsr must be a constant number");
1318 + }
1319 +
1320 + if (target == 0
1321 + || GET_MODE (target) != tmode
1322 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1323 + target = gen_reg_rtx (tmode);
1324 + pat = GEN_FCN (icode) (target, op0);
1325 + if (!pat)
1326 + return 0;
1327 + emit_insn (pat);
1328 + return target;
1329 + case AVR32_BUILTIN_MTSR:
1330 + icode = CODE_FOR_mtsr;
1331 + arg0 = TREE_VALUE (arglist);
1332 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1333 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1334 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1335 + mode0 = insn_data[icode].operand[0].mode;
1336 + mode1 = insn_data[icode].operand[1].mode;
1337 +
1338 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1339 + {
1340 + error ("Parameter 1 to __builtin_mtsr must be a constant number");
1341 + return gen_reg_rtx (mode0);
1342 + }
1343 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1344 + op1 = copy_to_mode_reg (mode1, op1);
1345 + pat = GEN_FCN (icode) (op0, op1);
1346 + if (!pat)
1347 + return 0;
1348 + emit_insn (pat);
1349 + return NULL_RTX;
1350 + case AVR32_BUILTIN_MFDR:
1351 + icode = CODE_FOR_mfdr;
1352 + arg0 = TREE_VALUE (arglist);
1353 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1354 + tmode = insn_data[icode].operand[0].mode;
1355 + mode0 = insn_data[icode].operand[1].mode;
1356 +
1357 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1358 + {
1359 + error ("Parameter 1 to __builtin_mfdr must be a constant number");
1360 + }
1361 +
1362 + if (target == 0
1363 + || GET_MODE (target) != tmode
1364 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1365 + target = gen_reg_rtx (tmode);
1366 + pat = GEN_FCN (icode) (target, op0);
1367 + if (!pat)
1368 + return 0;
1369 + emit_insn (pat);
1370 + return target;
1371 + case AVR32_BUILTIN_MTDR:
1372 + icode = CODE_FOR_mtdr;
1373 + arg0 = TREE_VALUE (arglist);
1374 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1375 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1376 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1377 + mode0 = insn_data[icode].operand[0].mode;
1378 + mode1 = insn_data[icode].operand[1].mode;
1379 +
1380 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1381 + {
1382 + error ("Parameter 1 to __builtin_mtdr must be a constant number");
1383 + return gen_reg_rtx (mode0);
1384 + }
1385 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1386 + op1 = copy_to_mode_reg (mode1, op1);
1387 + pat = GEN_FCN (icode) (op0, op1);
1388 + if (!pat)
1389 + return 0;
1390 + emit_insn (pat);
1391 + return NULL_RTX;
1392 + case AVR32_BUILTIN_CACHE:
1393 + icode = CODE_FOR_cache;
1394 + arg0 = TREE_VALUE (arglist);
1395 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1396 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1397 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1398 + mode0 = insn_data[icode].operand[0].mode;
1399 + mode1 = insn_data[icode].operand[1].mode;
1400 +
1401 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1402 + {
1403 + error ("Parameter 2 to __builtin_cache must be a constant number");
1404 + return gen_reg_rtx (mode1);
1405 + }
1406 +
1407 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1408 + op0 = copy_to_mode_reg (mode0, op0);
1409 +
1410 + pat = GEN_FCN (icode) (op0, op1);
1411 + if (!pat)
1412 + return 0;
1413 + emit_insn (pat);
1414 + return NULL_RTX;
1415 + case AVR32_BUILTIN_SYNC:
1416 + case AVR32_BUILTIN_MUSFR:
1417 + case AVR32_BUILTIN_SSRF:
1418 + case AVR32_BUILTIN_CSRF:
1419 + {
1420 + const char *fname;
1421 + switch (fcode)
1422 + {
1423 + default:
1424 + case AVR32_BUILTIN_SYNC:
1425 + icode = CODE_FOR_sync;
1426 + fname = "sync";
1427 + break;
1428 + case AVR32_BUILTIN_MUSFR:
1429 + icode = CODE_FOR_musfr;
1430 + fname = "musfr";
1431 + break;
1432 + case AVR32_BUILTIN_SSRF:
1433 + icode = CODE_FOR_ssrf;
1434 + fname = "ssrf";
1435 + break;
1436 + case AVR32_BUILTIN_CSRF:
1437 + icode = CODE_FOR_csrf;
1438 + fname = "csrf";
1439 + break;
1440 + }
1441 +
1442 + arg0 = TREE_VALUE (arglist);
1443 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1444 + mode0 = insn_data[icode].operand[0].mode;
1445 +
1446 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1447 + {
1448 + if (icode == CODE_FOR_musfr)
1449 + op0 = copy_to_mode_reg (mode0, op0);
1450 + else
1451 + {
1452 + error ("Parameter to __builtin_%s is illegal.", fname);
1453 + return gen_reg_rtx (mode0);
1454 + }
1455 + }
1456 + pat = GEN_FCN (icode) (op0);
1457 + if (!pat)
1458 + return 0;
1459 + emit_insn (pat);
1460 + return NULL_RTX;
1461 + }
1462 + case AVR32_BUILTIN_TLBR:
1463 + icode = CODE_FOR_tlbr;
1464 + pat = GEN_FCN (icode) (NULL_RTX);
1465 + if (!pat)
1466 + return 0;
1467 + emit_insn (pat);
1468 + return NULL_RTX;
1469 + case AVR32_BUILTIN_TLBS:
1470 + icode = CODE_FOR_tlbs;
1471 + pat = GEN_FCN (icode) (NULL_RTX);
1472 + if (!pat)
1473 + return 0;
1474 + emit_insn (pat);
1475 + return NULL_RTX;
1476 + case AVR32_BUILTIN_TLBW:
1477 + icode = CODE_FOR_tlbw;
1478 + pat = GEN_FCN (icode) (NULL_RTX);
1479 + if (!pat)
1480 + return 0;
1481 + emit_insn (pat);
1482 + return NULL_RTX;
1483 + case AVR32_BUILTIN_BREAKPOINT:
1484 + icode = CODE_FOR_breakpoint;
1485 + pat = GEN_FCN (icode) (NULL_RTX);
1486 + if (!pat)
1487 + return 0;
1488 + emit_insn (pat);
1489 + return NULL_RTX;
1490 + case AVR32_BUILTIN_XCHG:
1491 + icode = CODE_FOR_sync_lock_test_and_setsi;
1492 + arg0 = TREE_VALUE (arglist);
1493 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1494 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1495 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1496 + tmode = insn_data[icode].operand[0].mode;
1497 + mode0 = insn_data[icode].operand[1].mode;
1498 + mode1 = insn_data[icode].operand[2].mode;
1499 +
1500 + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1501 + {
1502 + op1 = copy_to_mode_reg (mode1, op1);
1503 + }
1504 +
1505 + op0 = force_reg (GET_MODE (op0), op0);
1506 + op0 = gen_rtx_MEM (GET_MODE (op0), op0);
1507 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1508 + {
1509 + error
1510 + ("Parameter 1 to __builtin_xchg must be a pointer to an integer.");
1511 + }
1512 +
1513 + if (target == 0
1514 + || GET_MODE (target) != tmode
1515 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1516 + target = gen_reg_rtx (tmode);
1517 + pat = GEN_FCN (icode) (target, op0, op1);
1518 + if (!pat)
1519 + return 0;
1520 + emit_insn (pat);
1521 + return target;
1522 + case AVR32_BUILTIN_LDXI:
1523 + icode = CODE_FOR_ldxi;
1524 + arg0 = TREE_VALUE (arglist);
1525 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1526 + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1527 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1528 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1529 + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1530 + tmode = insn_data[icode].operand[0].mode;
1531 + mode0 = insn_data[icode].operand[1].mode;
1532 + mode1 = insn_data[icode].operand[2].mode;
1533 +
1534 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1535 + {
1536 + op0 = copy_to_mode_reg (mode0, op0);
1537 + }
1538 +
1539 + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1540 + {
1541 + op1 = copy_to_mode_reg (mode1, op1);
1542 + }
1543 +
1544 + if (!(*insn_data[icode].operand[3].predicate) (op2, SImode))
1545 + {
1546 + error
1547 + ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)");
1548 + return gen_reg_rtx (mode0);
1549 + }
1550 +
1551 + if (target == 0
1552 + || GET_MODE (target) != tmode
1553 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1554 + target = gen_reg_rtx (tmode);
1555 + pat = GEN_FCN (icode) (target, op0, op1, op2);
1556 + if (!pat)
1557 + return 0;
1558 + emit_insn (pat);
1559 + return target;
1560 + case AVR32_BUILTIN_BSWAP16:
1561 + {
1562 + icode = CODE_FOR_bswap_16;
1563 + arg0 = TREE_VALUE (arglist);
1564 + arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
1565 + mode0 = insn_data[icode].operand[1].mode;
1566 + if (arg0_mode != mode0)
1567 + arg0 = build1 (NOP_EXPR,
1568 + (*lang_hooks.types.type_for_mode) (mode0, 0), arg0);
1569 +
1570 + op0 = expand_expr (arg0, NULL_RTX, HImode, 0);
1571 + tmode = insn_data[icode].operand[0].mode;
1572 +
1573 +
1574 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1575 + {
1576 + if ( CONST_INT_P (op0) )
1577 + {
1578 + HOST_WIDE_INT val = ( ((INTVAL (op0)&0x00ff) << 8) |
1579 + ((INTVAL (op0)&0xff00) >> 8) );
1580 + /* Sign extend 16-bit value to host wide int */
1581 + val <<= (HOST_BITS_PER_WIDE_INT - 16);
1582 + val >>= (HOST_BITS_PER_WIDE_INT - 16);
1583 + op0 = GEN_INT(val);
1584 + if (target == 0
1585 + || GET_MODE (target) != tmode
1586 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1587 + target = gen_reg_rtx (tmode);
1588 + emit_move_insn(target, op0);
1589 + return target;
1590 + }
1591 + else
1592 + op0 = copy_to_mode_reg (mode0, op0);
1593 + }
1594 +
1595 + if (target == 0
1596 + || GET_MODE (target) != tmode
1597 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1598 + {
1599 + target = gen_reg_rtx (tmode);
1600 + }
1601 +
1602 +
1603 + pat = GEN_FCN (icode) (target, op0);
1604 + if (!pat)
1605 + return 0;
1606 + emit_insn (pat);
1607 +
1608 + return target;
1609 + }
1610 + case AVR32_BUILTIN_BSWAP32:
1611 + {
1612 + icode = CODE_FOR_bswap_32;
1613 + arg0 = TREE_VALUE (arglist);
1614 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1615 + tmode = insn_data[icode].operand[0].mode;
1616 + mode0 = insn_data[icode].operand[1].mode;
1617 +
1618 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1619 + {
1620 + if ( CONST_INT_P (op0) )
1621 + {
1622 + HOST_WIDE_INT val = ( ((INTVAL (op0)&0x000000ff) << 24) |
1623 + ((INTVAL (op0)&0x0000ff00) << 8) |
1624 + ((INTVAL (op0)&0x00ff0000) >> 8) |
1625 + ((INTVAL (op0)&0xff000000) >> 24) );
1626 + /* Sign extend 32-bit value to host wide int */
1627 + val <<= (HOST_BITS_PER_WIDE_INT - 32);
1628 + val >>= (HOST_BITS_PER_WIDE_INT - 32);
1629 + op0 = GEN_INT(val);
1630 + if (target == 0
1631 + || GET_MODE (target) != tmode
1632 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1633 + target = gen_reg_rtx (tmode);
1634 + emit_move_insn(target, op0);
1635 + return target;
1636 + }
1637 + else
1638 + op0 = copy_to_mode_reg (mode0, op0);
1639 + }
1640 +
1641 + if (target == 0
1642 + || GET_MODE (target) != tmode
1643 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1644 + target = gen_reg_rtx (tmode);
1645 +
1646 +
1647 + pat = GEN_FCN (icode) (target, op0);
1648 + if (!pat)
1649 + return 0;
1650 + emit_insn (pat);
1651 +
1652 + return target;
1653 + }
1654 + case AVR32_BUILTIN_MVCR_W:
1655 + case AVR32_BUILTIN_MVCR_D:
1656 + {
1657 + arg0 = TREE_VALUE (arglist);
1658 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1659 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1660 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1661 +
1662 + if (fcode == AVR32_BUILTIN_MVCR_W)
1663 + icode = CODE_FOR_mvcrsi;
1664 + else
1665 + icode = CODE_FOR_mvcrdi;
1666 +
1667 + tmode = insn_data[icode].operand[0].mode;
1668 +
1669 + if (target == 0
1670 + || GET_MODE (target) != tmode
1671 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1672 + target = gen_reg_rtx (tmode);
1673 +
1674 + if (!(*insn_data[icode].operand[1].predicate) (op0, SImode))
1675 + {
1676 + error
1677 + ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
1678 + error ("Number should be between 0 and 7.");
1679 + return NULL_RTX;
1680 + }
1681 +
1682 + if (!(*insn_data[icode].operand[2].predicate) (op1, SImode))
1683 + {
1684 + error
1685 + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
1686 + error ("Number should be between 0 and 15.");
1687 + return NULL_RTX;
1688 + }
1689 +
1690 + pat = GEN_FCN (icode) (target, op0, op1);
1691 + if (!pat)
1692 + return 0;
1693 + emit_insn (pat);
1694 +
1695 + return target;
1696 + }
1697 + case AVR32_BUILTIN_MACSATHH_W:
1698 + case AVR32_BUILTIN_MACWH_D:
1699 + case AVR32_BUILTIN_MACHH_D:
1700 + {
1701 + arg0 = TREE_VALUE (arglist);
1702 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1703 + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1704 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1705 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1706 + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1707 +
1708 + icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w :
1709 + (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d :
1710 + CODE_FOR_machh_d);
1711 +
1712 + tmode = insn_data[icode].operand[0].mode;
1713 + mode0 = insn_data[icode].operand[1].mode;
1714 + mode1 = insn_data[icode].operand[2].mode;
1715 +
1716 +
1717 + if (!target
1718 + || GET_MODE (target) != tmode
1719 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1720 + target = gen_reg_rtx (tmode);
1721 +
1722 + if (!(*insn_data[icode].operand[0].predicate) (op0, tmode))
1723 + {
1724 + /* If op0 is already a reg we must cast it to the correct mode. */
1725 + if (REG_P (op0))
1726 + op0 = convert_to_mode (tmode, op0, 1);
1727 + else
1728 + op0 = copy_to_mode_reg (tmode, op0);
1729 + }
1730 +
1731 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode0))
1732 + {
1733 + /* If op1 is already a reg we must cast it to the correct mode. */
1734 + if (REG_P (op1))
1735 + op1 = convert_to_mode (mode0, op1, 1);
1736 + else
1737 + op1 = copy_to_mode_reg (mode0, op1);
1738 + }
1739 +
1740 + if (!(*insn_data[icode].operand[2].predicate) (op2, mode1))
1741 + {
1742 + /* If op1 is already a reg we must cast it to the correct mode. */
1743 + if (REG_P (op2))
1744 + op2 = convert_to_mode (mode1, op2, 1);
1745 + else
1746 + op2 = copy_to_mode_reg (mode1, op2);
1747 + }
1748 +
1749 + emit_move_insn (target, op0);
1750 +
1751 + pat = GEN_FCN (icode) (target, op1, op2);
1752 + if (!pat)
1753 + return 0;
1754 + emit_insn (pat);
1755 + return target;
1756 + }
1757 + case AVR32_BUILTIN_MVRC_W:
1758 + case AVR32_BUILTIN_MVRC_D:
1759 + {
1760 + arg0 = TREE_VALUE (arglist);
1761 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1762 + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1763 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1764 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1765 + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1766 +
1767 + if (fcode == AVR32_BUILTIN_MVRC_W)
1768 + icode = CODE_FOR_mvrcsi;
1769 + else
1770 + icode = CODE_FOR_mvrcdi;
1771 +
1772 + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
1773 + {
1774 + error ("Parameter 1 is not a valid coprocessor number.");
1775 + error ("Number should be between 0 and 7.");
1776 + return NULL_RTX;
1777 + }
1778 +
1779 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1780 + {
1781 + error ("Parameter 2 is not a valid coprocessor register number.");
1782 + error ("Number should be between 0 and 15.");
1783 + return NULL_RTX;
1784 + }
1785 +
1786 + if (GET_CODE (op2) == CONST_INT
1787 + || GET_CODE (op2) == CONST
1788 + || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF)
1789 + {
1790 + op2 = force_const_mem (insn_data[icode].operand[2].mode, op2);
1791 + }
1792 +
1793 + if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2)))
1794 + op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
1795 +
1796 +
1797 + pat = GEN_FCN (icode) (op0, op1, op2);
1798 + if (!pat)
1799 + return 0;
1800 + emit_insn (pat);
1801 +
1802 + return NULL_RTX;
1803 + }
1804 + case AVR32_BUILTIN_COP:
1805 + {
1806 + rtx op3, op4;
1807 + tree arg3, arg4;
1808 + icode = CODE_FOR_cop;
1809 + arg0 = TREE_VALUE (arglist);
1810 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1811 + arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
1812 + arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1813 + arg4 =
1814 + TREE_VALUE (TREE_CHAIN
1815 + (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist)))));
1816 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1817 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1818 + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
1819 + op3 = expand_expr (arg3, NULL_RTX, VOIDmode, 0);
1820 + op4 = expand_expr (arg4, NULL_RTX, VOIDmode, 0);
1821 +
1822 + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
1823 + {
1824 + error
1825 + ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
1826 + error ("Number should be between 0 and 7.");
1827 + return NULL_RTX;
1828 + }
1829 +
1830 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1831 + {
1832 + error
1833 + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
1834 + error ("Number should be between 0 and 15.");
1835 + return NULL_RTX;
1836 + }
1837 +
1838 + if (!(*insn_data[icode].operand[2].predicate) (op2, SImode))
1839 + {
1840 + error
1841 + ("Parameter 3 to __builtin_cop is not a valid coprocessor register number.");
1842 + error ("Number should be between 0 and 15.");
1843 + return NULL_RTX;
1844 + }
1845 +
1846 + if (!(*insn_data[icode].operand[3].predicate) (op3, SImode))
1847 + {
1848 + error
1849 + ("Parameter 4 to __builtin_cop is not a valid coprocessor register number.");
1850 + error ("Number should be between 0 and 15.");
1851 + return NULL_RTX;
1852 + }
1853 +
1854 + if (!(*insn_data[icode].operand[4].predicate) (op4, SImode))
1855 + {
1856 + error
1857 + ("Parameter 5 to __builtin_cop is not a valid coprocessor operation.");
1858 + error ("Number should be between 0 and 127.");
1859 + return NULL_RTX;
1860 + }
1861 +
1862 + pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
1863 + if (!pat)
1864 + return 0;
1865 + emit_insn (pat);
1866 +
1867 + return target;
1868 + }
1869 + case AVR32_BUILTIN_MEMS:
1870 + case AVR32_BUILTIN_MEMC:
1871 + case AVR32_BUILTIN_MEMT:
1872 + {
1873 + if (!TARGET_RMW)
1874 + error ("Trying to use __builtin_mem(s/c/t) when target does not support RMW insns.");
1875 +
1876 + switch (fcode) {
1877 + case AVR32_BUILTIN_MEMS:
1878 + icode = CODE_FOR_iorsi3;
1879 + break;
1880 + case AVR32_BUILTIN_MEMC:
1881 + icode = CODE_FOR_andsi3;
1882 + break;
1883 + case AVR32_BUILTIN_MEMT:
1884 + icode = CODE_FOR_xorsi3;
1885 + break;
1886 + }
1887 +
1888 + arg0 = TREE_VALUE (arglist);
1889 + arg1 = TREE_VALUE (TREE_CHAIN (arglist));
1890 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1891 + if ( GET_CODE (op0) == SYMBOL_REF )
1892 + // This symbol must be RMW addressable
1893 + SYMBOL_REF_FLAGS (op0) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT);
1894 + op0 = gen_rtx_MEM(SImode, op0);
1895 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1896 + mode0 = insn_data[icode].operand[1].mode;
1897 +
1898 +
1899 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1900 + {
1901 + error ("Parameter 1 to __builtin_mem(s/c/t) must be a Ks15<<2 address or a rmw addressable symbol.");
1902 + }
1903 +
1904 + if ( !CONST_INT_P (op1)
1905 + || INTVAL (op1) > 31
1906 + || INTVAL (op1) < 0 )
1907 + error ("Parameter 2 to __builtin_mem(s/c/t) must be a constant between 0 and 31.");
1908 +
1909 + if ( fcode == AVR32_BUILTIN_MEMC )
1910 + op1 = GEN_INT((~(1 << INTVAL(op1)))&0xffffffff);
1911 + else
1912 + op1 = GEN_INT((1 << INTVAL(op1))&0xffffffff);
1913 + pat = GEN_FCN (icode) (op0, op0, op1);
1914 + if (!pat)
1915 + return 0;
1916 + emit_insn (pat);
1917 + return op0;
1918 + }
1919 +
1920 + }
1921 +
1922 + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
1923 + if (d->code == fcode)
1924 + return avr32_expand_binop_builtin (d->icode, arglist, target);
1925 +
1926 +
1927 + /* @@@ Should really do something sensible here. */
1928 + return NULL_RTX;
1929 +}
1930 +
1931 +
1932 +/* Handle an "interrupt" or "isr" attribute;
1933 + arguments as in struct attribute_spec.handler. */
1934 +
1935 +static tree
1936 +avr32_handle_isr_attribute (tree * node, tree name, tree args,
1937 + int flags, bool * no_add_attrs)
1938 +{
1939 + if (DECL_P (*node))
1940 + {
1941 + if (TREE_CODE (*node) != FUNCTION_DECL)
1942 + {
1943 + warning (OPT_Wattributes,"`%s' attribute only applies to functions",
1944 + IDENTIFIER_POINTER (name));
1945 + *no_add_attrs = true;
1946 + }
1947 + /* FIXME: the argument if any is checked for type attributes; should it
1948 + be checked for decl ones? */
1949 + }
1950 + else
1951 + {
1952 + if (TREE_CODE (*node) == FUNCTION_TYPE
1953 + || TREE_CODE (*node) == METHOD_TYPE)
1954 + {
1955 + if (avr32_isr_value (args) == AVR32_FT_UNKNOWN)
1956 + {
1957 + warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
1958 + *no_add_attrs = true;
1959 + }
1960 + }
1961 + else if (TREE_CODE (*node) == POINTER_TYPE
1962 + && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
1963 + || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
1964 + && avr32_isr_value (args) != AVR32_FT_UNKNOWN)
1965 + {
1966 + *node = build_variant_type_copy (*node);
1967 + TREE_TYPE (*node) = build_type_attribute_variant
1968 + (TREE_TYPE (*node),
1969 + tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
1970 + *no_add_attrs = true;
1971 + }
1972 + else
1973 + {
1974 + /* Possibly pass this attribute on from the type to a decl. */
1975 + if (flags & ((int) ATTR_FLAG_DECL_NEXT
1976 + | (int) ATTR_FLAG_FUNCTION_NEXT
1977 + | (int) ATTR_FLAG_ARRAY_NEXT))
1978 + {
1979 + *no_add_attrs = true;
1980 + return tree_cons (name, args, NULL_TREE);
1981 + }
1982 + else
1983 + {
1984 + warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
1985 + }
1986 + }
1987 + }
1988 +
1989 + return NULL_TREE;
1990 +}
1991 +
1992 +/* Handle an attribute requiring a FUNCTION_DECL;
1993 + arguments as in struct attribute_spec.handler. */
1994 +static tree
1995 +avr32_handle_fndecl_attribute (tree * node, tree name,
1996 + tree args ATTRIBUTE_UNUSED,
1997 + int flags ATTRIBUTE_UNUSED,
1998 + bool * no_add_attrs)
1999 +{
2000 + if (TREE_CODE (*node) != FUNCTION_DECL)
2001 + {
2002 + warning (OPT_Wattributes,"%qs attribute only applies to functions",
2003 + IDENTIFIER_POINTER (name));
2004 + *no_add_attrs = true;
2005 + }
2006 +
2007 + return NULL_TREE;
2008 +}
2009 +
2010 +
2011 +/* Handle an acall attribute;
2012 + arguments as in struct attribute_spec.handler. */
2013 +
2014 +static tree
2015 +avr32_handle_acall_attribute (tree * node, tree name,
2016 + tree args ATTRIBUTE_UNUSED,
2017 + int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
2018 +{
2019 + if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE)
2020 + {
2021 + warning (OPT_Wattributes,"`%s' attribute not yet supported...",
2022 + IDENTIFIER_POINTER (name));
2023 + *no_add_attrs = true;
2024 + return NULL_TREE;
2025 + }
2026 +
2027 + warning (OPT_Wattributes,"`%s' attribute only applies to functions",
2028 + IDENTIFIER_POINTER (name));
2029 + *no_add_attrs = true;
2030 + return NULL_TREE;
2031 +}
2032 +
2033 +
2034 +/* Return 0 if the attributes for two types are incompatible, 1 if they
2035 + are compatible, and 2 if they are nearly compatible (which causes a
2036 + warning to be generated). */
2037 +
2038 +static int
2039 +avr32_comp_type_attributes (tree type1, tree type2)
2040 +{
2041 + int acall1, acall2, isr1, isr2, naked1, naked2;
2042 +
2043 + /* Check for mismatch of non-default calling convention. */
2044 + if (TREE_CODE (type1) != FUNCTION_TYPE)
2045 + return 1;
2046 +
2047 + /* Check for mismatched call attributes. */
2048 + acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL;
2049 + acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL;
2050 + naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL;
2051 + naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL;
2052 + isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2053 + if (!isr1)
2054 + isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2055 +
2056 + isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2057 + if (!isr2)
2058 + isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2059 +
2060 + if ((acall1 && isr2)
2061 + || (acall2 && isr1) || (naked1 && isr2) || (naked2 && isr1))
2062 + return 0;
2063 +
2064 + return 1;
2065 +}
2066 +
2067 +
2068 +/* Computes the type of the current function. */
2069 +
2070 +static unsigned long
2071 +avr32_compute_func_type (void)
2072 +{
2073 + unsigned long type = AVR32_FT_UNKNOWN;
2074 + tree a;
2075 + tree attr;
2076 +
2077 + if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
2078 + abort ();
2079 +
2080 + /* Decide if the current function is volatile. Such functions never
2081 + return, and many memory cycles can be saved by not storing register
2082 + values that will never be needed again. This optimization was added to
2083 + speed up context switching in a kernel application. */
2084 + if (optimize > 0
2085 + && TREE_NOTHROW (current_function_decl)
2086 + && TREE_THIS_VOLATILE (current_function_decl))
2087 + type |= AVR32_FT_VOLATILE;
2088 +
2089 + if (cfun->static_chain_decl != NULL)
2090 + type |= AVR32_FT_NESTED;
2091 +
2092 + attr = DECL_ATTRIBUTES (current_function_decl);
2093 +
2094 + a = lookup_attribute ("isr", attr);
2095 + if (a == NULL_TREE)
2096 + a = lookup_attribute ("interrupt", attr);
2097 +
2098 + if (a == NULL_TREE)
2099 + type |= AVR32_FT_NORMAL;
2100 + else
2101 + type |= avr32_isr_value (TREE_VALUE (a));
2102 +
2103 +
2104 + a = lookup_attribute ("acall", attr);
2105 + if (a != NULL_TREE)
2106 + type |= AVR32_FT_ACALL;
2107 +
2108 + a = lookup_attribute ("naked", attr);
2109 + if (a != NULL_TREE)
2110 + type |= AVR32_FT_NAKED;
2111 +
2112 + return type;
2113 +}
2114 +
2115 +/* Returns the type of the current function. */
2116 +
2117 +static unsigned long
2118 +avr32_current_func_type (void)
2119 +{
2120 + if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN)
2121 + cfun->machine->func_type = avr32_compute_func_type ();
2122 +
2123 + return cfun->machine->func_type;
2124 +}
2125 +
2126 +/*
2127 + This target hook should return true if we should not pass type solely
2128 + in registers. The file expr.h defines a definition that is usually appropriate,
2129 + refer to expr.h for additional documentation.
2130 +*/
2131 +bool
2132 +avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
2133 +{
2134 + if (type && AGGREGATE_TYPE_P (type)
2135 + /* If the alignment is less than the size then pass in the struct on
2136 + the stack. */
2137 + && ((unsigned int) TYPE_ALIGN_UNIT (type) <
2138 + (unsigned int) int_size_in_bytes (type))
2139 + /* If we support unaligned word accesses then structs of size 4 and 8
2140 + can have any alignment and still be passed in registers. */
2141 + && !(TARGET_UNALIGNED_WORD
2142 + && (int_size_in_bytes (type) == 4
2143 + || int_size_in_bytes (type) == 8))
2144 + /* Double word structs need only a word alignment. */
2145 + && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4))
2146 + return true;
2147 +
2148 + if (type && AGGREGATE_TYPE_P (type)
2149 + /* Structs of size 3,5,6,7 are always passed in registers. */
2150 + && (int_size_in_bytes (type) == 3
2151 + || int_size_in_bytes (type) == 5
2152 + || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7))
2153 + return true;
2154 +
2155 +
2156 + return (type && TREE_ADDRESSABLE (type));
2157 +}
2158 +
2159 +
2160 +bool
2161 +avr32_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
2162 +{
2163 + return true;
2164 +}
2165 +
2166 +/*
2167 + This target hook should return true if an argument at the position indicated
2168 + by cum should be passed by reference. This predicate is queried after target
2169 + independent reasons for being passed by reference, such as TREE_ADDRESSABLE (type).
2170 +
2171 + If the hook returns true, a copy of that argument is made in memory and a
2172 + pointer to the argument is passed instead of the argument itself. The pointer
2173 + is passed in whatever way is appropriate for passing a pointer to that type.
2174 +*/
2175 +bool
2176 +avr32_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
2177 + enum machine_mode mode ATTRIBUTE_UNUSED,
2178 + tree type, bool named ATTRIBUTE_UNUSED)
2179 +{
2180 + return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST));
2181 +}
2182 +
2183 +static int
2184 +avr32_arg_partial_bytes (CUMULATIVE_ARGS * pcum ATTRIBUTE_UNUSED,
2185 + enum machine_mode mode ATTRIBUTE_UNUSED,
2186 + tree type ATTRIBUTE_UNUSED,
2187 + bool named ATTRIBUTE_UNUSED)
2188 +{
2189 + return 0;
2190 +}
2191 +
2192 +
2193 +struct gcc_target targetm = TARGET_INITIALIZER;
2194 +
2195 +/*
2196 + Table used to convert from register number in the assembler instructions and
2197 + the register numbers used in gcc.
2198 +*/
2199 +const int avr32_function_arg_reglist[] = {
2200 + INTERNAL_REGNUM (12),
2201 + INTERNAL_REGNUM (11),
2202 + INTERNAL_REGNUM (10),
2203 + INTERNAL_REGNUM (9),
2204 + INTERNAL_REGNUM (8)
2205 +};
2206 +
2207 +rtx avr32_compare_op0 = NULL_RTX;
2208 +rtx avr32_compare_op1 = NULL_RTX;
2209 +rtx avr32_compare_operator = NULL_RTX;
2210 +rtx avr32_acc_cache = NULL_RTX;
2211 +
2212 +/*
2213 + Returns nonzero if it is allowed to store a value of mode mode in hard
2214 + register number regno.
2215 +*/
2216 +int
2217 +avr32_hard_regno_mode_ok (int regnr, enum machine_mode mode)
2218 +{
2219 + /* We allow only float modes in the fp-registers */
2220 + if (regnr >= FIRST_FP_REGNUM
2221 + && regnr <= LAST_FP_REGNUM && GET_MODE_CLASS (mode) != MODE_FLOAT)
2222 + {
2223 + return 0;
2224 + }
2225 +
2226 + switch (mode)
2227 + {
2228 + case DImode: /* long long */
2229 + case DFmode: /* double */
2230 + case SCmode: /* __complex__ float */
2231 + case CSImode: /* __complex__ int */
2232 + if (regnr < 4)
2233 + { /* long long int not supported in r12, sp, lr
2234 + or pc. */
2235 + return 0;
2236 + }
2237 + else
2238 + {
2239 + if (regnr % 2) /* long long int has to be refered in even
2240 + registers. */
2241 + return 0;
2242 + else
2243 + return 1;
2244 + }
2245 + case CDImode: /* __complex__ long long */
2246 + case DCmode: /* __complex__ double */
2247 + case TImode: /* 16 bytes */
2248 + if (regnr < 7)
2249 + return 0;
2250 + else if (regnr % 2)
2251 + return 0;
2252 + else
2253 + return 1;
2254 + default:
2255 + return 1;
2256 + }
2257 +}
2258 +
2259 +
2260 +int
2261 +avr32_rnd_operands (rtx add, rtx shift)
2262 +{
2263 + if (GET_CODE (shift) == CONST_INT &&
2264 + GET_CODE (add) == CONST_INT && INTVAL (shift) > 0)
2265 + {
2266 + if ((1 << (INTVAL (shift) - 1)) == INTVAL (add))
2267 + return TRUE;
2268 + }
2269 +
2270 + return FALSE;
2271 +}
2272 +
2273 +
2274 +
2275 +int
2276 +avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, const char *str)
2277 +{
2278 + switch (c)
2279 + {
2280 + case 'K':
2281 + case 'I':
2282 + {
2283 + HOST_WIDE_INT min_value = 0, max_value = 0;
2284 + char size_str[3];
2285 + int const_size;
2286 +
2287 + size_str[0] = str[2];
2288 + size_str[1] = str[3];
2289 + size_str[2] = '\0';
2290 + const_size = atoi (size_str);
2291 +
2292 + if (toupper (str[1]) == 'U')
2293 + {
2294 + min_value = 0;
2295 + max_value = (1 << const_size) - 1;
2296 + }
2297 + else if (toupper (str[1]) == 'S')
2298 + {
2299 + min_value = -(1 << (const_size - 1));
2300 + max_value = (1 << (const_size - 1)) - 1;
2301 + }
2302 +
2303 + if (c == 'I')
2304 + {
2305 + value = -value;
2306 + }
2307 +
2308 + if (value >= min_value && value <= max_value)
2309 + {
2310 + return 1;
2311 + }
2312 + break;
2313 + }
2314 + case 'M':
2315 + return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode);
2316 + case 'J':
2317 + return avr32_hi16_immediate_operand (GEN_INT (value), VOIDmode);
2318 + case 'O':
2319 + return one_bit_set_operand (GEN_INT (value), VOIDmode);
2320 + case 'N':
2321 + return one_bit_cleared_operand (GEN_INT (value), VOIDmode);
2322 + case 'L':
2323 + /* The lower 16-bits are set. */
2324 + return ((value & 0xffff) == 0xffff) ;
2325 + }
2326 +
2327 + return 0;
2328 +}
2329 +
2330 +
2331 +/*Compute mask of which floating-point registers needs saving upon
2332 + entry to this function*/
2333 +static unsigned long
2334 +avr32_compute_save_fp_reg_mask (void)
2335 +{
2336 + unsigned long func_type = avr32_current_func_type ();
2337 + unsigned int save_reg_mask = 0;
2338 + unsigned int reg;
2339 + unsigned int max_reg = 7;
2340 + int save_all_call_used_regs = FALSE;
2341 +
2342 + /* This only applies for hardware floating-point implementation. */
2343 + if (!TARGET_HARD_FLOAT)
2344 + return 0;
2345 +
2346 + if (IS_INTERRUPT (func_type))
2347 + {
2348 +
2349 + /* Interrupt functions must not corrupt any registers, even call
2350 + clobbered ones. If this is a leaf function we can just examine the
2351 + registers used by the RTL, but otherwise we have to assume that
2352 + whatever function is called might clobber anything, and so we have
2353 + to save all the call-clobbered registers as well. */
2354 + max_reg = 13;
2355 + save_all_call_used_regs = !current_function_is_leaf;
2356 + }
2357 +
2358 + /* All used registers used must be saved */
2359 + for (reg = 0; reg <= max_reg; reg++)
2360 + if (regs_ever_live[INTERNAL_FP_REGNUM (reg)]
2361 + || (save_all_call_used_regs
2362 + && call_used_regs[INTERNAL_FP_REGNUM (reg)]))
2363 + save_reg_mask |= (1 << reg);
2364 +
2365 + return save_reg_mask;
2366 +}
2367 +
2368 +/*Compute mask of registers which needs saving upon function entry */
2369 +static unsigned long
2370 +avr32_compute_save_reg_mask (int push)
2371 +{
2372 + unsigned long func_type;
2373 + unsigned int save_reg_mask = 0;
2374 + unsigned int reg;
2375 +
2376 + func_type = avr32_current_func_type ();
2377 +
2378 + if (IS_INTERRUPT (func_type))
2379 + {
2380 + unsigned int max_reg = 12;
2381 +
2382 +
2383 + /* Get the banking scheme for the interrupt */
2384 + switch (func_type)
2385 + {
2386 + case AVR32_FT_ISR_FULL:
2387 + max_reg = 0;
2388 + break;
2389 + case AVR32_FT_ISR_HALF:
2390 + max_reg = 7;
2391 + break;
2392 + case AVR32_FT_ISR_NONE:
2393 + max_reg = 12;
2394 + break;
2395 + }
2396 +
2397 + /* Interrupt functions must not corrupt any registers, even call
2398 + clobbered ones. If this is a leaf function we can just examine the
2399 + registers used by the RTL, but otherwise we have to assume that
2400 + whatever function is called might clobber anything, and so we have
2401 + to save all the call-clobbered registers as well. */
2402 +
2403 + /* Need not push the registers r8-r12 for AVR32A architectures, as this
2404 + is automatially done in hardware. We also do not have any shadow
2405 + registers. */
2406 + if (TARGET_UARCH_AVR32A)
2407 + {
2408 + max_reg = 7;
2409 + func_type = AVR32_FT_ISR_NONE;
2410 + }
2411 +
2412 + /* All registers which are used and is not shadowed must be saved */
2413 + for (reg = 0; reg <= max_reg; reg++)
2414 + if (regs_ever_live[INTERNAL_REGNUM (reg)]
2415 + || (!current_function_is_leaf
2416 + && call_used_regs[INTERNAL_REGNUM (reg)]))
2417 + save_reg_mask |= (1 << reg);
2418 +
2419 + /* Check LR */
2420 + if ((regs_ever_live[LR_REGNUM]
2421 + || !current_function_is_leaf || frame_pointer_needed)
2422 + /* Only non-shadowed register models */
2423 + && (func_type == AVR32_FT_ISR_NONE))
2424 + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
2425 +
2426 + /* Make sure that the GOT register is pushed. */
2427 + if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)
2428 + && current_function_uses_pic_offset_table)
2429 + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
2430 +
2431 + }
2432 + else
2433 + {
2434 + int use_pushm = optimize_size;
2435 +
2436 + /* In the normal case we only need to save those registers which are
2437 + call saved and which are used by this function. */
2438 + for (reg = 0; reg <= 7; reg++)
2439 + if (regs_ever_live[INTERNAL_REGNUM (reg)]
2440 + && !call_used_regs[INTERNAL_REGNUM (reg)])
2441 + save_reg_mask |= (1 << reg);
2442 +
2443 + /* Make sure that the GOT register is pushed. */
2444 + if (current_function_uses_pic_offset_table)
2445 + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
2446 +
2447 +
2448 + /* If we optimize for size and do not have anonymous arguments: use
2449 + popm/pushm always */
2450 + if (use_pushm)
2451 + {
2452 + if ((save_reg_mask & (1 << 0))
2453 + || (save_reg_mask & (1 << 1))
2454 + || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3)))
2455 + save_reg_mask |= 0xf;
2456 +
2457 + if ((save_reg_mask & (1 << 4))
2458 + || (save_reg_mask & (1 << 5))
2459 + || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7)))
2460 + save_reg_mask |= 0xf0;
2461 +
2462 + if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9)))
2463 + save_reg_mask |= 0x300;
2464 + }
2465 +
2466 +
2467 + /* Check LR */
2468 + if ((regs_ever_live[LR_REGNUM]
2469 + || !current_function_is_leaf
2470 + || (optimize_size
2471 + && save_reg_mask
2472 + && !current_function_calls_eh_return) || frame_pointer_needed))
2473 + {
2474 + if (push
2475 + /* Never pop LR into PC for functions which
2476 + calls __builtin_eh_return, since we need to
2477 + fix the SP after the restoring of the registers
2478 + and before returning. */
2479 + || current_function_calls_eh_return)
2480 + {
2481 + /* Push/Pop LR */
2482 + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
2483 + }
2484 + else
2485 + {
2486 + /* Pop PC */
2487 + save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM));
2488 + }
2489 + }
2490 + }
2491 +
2492 +
2493 + /* Save registers so the exception handler can modify them. */
2494 + if (current_function_calls_eh_return)
2495 + {
2496 + unsigned int i;
2497 +
2498 + for (i = 0;; i++)
2499 + {
2500 + reg = EH_RETURN_DATA_REGNO (i);
2501 + if (reg == INVALID_REGNUM)
2502 + break;
2503 + save_reg_mask |= 1 << ASM_REGNUM (reg);
2504 + }
2505 + }
2506 +
2507 + return save_reg_mask;
2508 +}
2509 +
2510 +/*Compute total size in bytes of all saved registers */
2511 +static int
2512 +avr32_get_reg_mask_size (int reg_mask)
2513 +{
2514 + int reg, size;
2515 + size = 0;
2516 +
2517 + for (reg = 0; reg <= 15; reg++)
2518 + if (reg_mask & (1 << reg))
2519 + size += 4;
2520 +
2521 + return size;
2522 +}
2523 +
2524 +/*Get a register from one of the registers which are saved onto the stack
2525 + upon function entry */
2526 +
2527 +static int
2528 +avr32_get_saved_reg (int save_reg_mask)
2529 +{
2530 + unsigned int reg;
2531 +
2532 + /* Find the first register which is saved in the saved_reg_mask */
2533 + for (reg = 0; reg <= 15; reg++)
2534 + if (save_reg_mask & (1 << reg))
2535 + return reg;
2536 +
2537 + return -1;
2538 +}
2539 +
2540 +/* Return 1 if it is possible to return using a single instruction. */
2541 +int
2542 +avr32_use_return_insn (int iscond)
2543 +{
2544 + unsigned int func_type = avr32_current_func_type ();
2545 + unsigned long saved_int_regs;
2546 + unsigned long saved_fp_regs;
2547 +
2548 + /* Never use a return instruction before reload has run. */
2549 + if (!reload_completed)
2550 + return 0;
2551 +
2552 + /* Must adjust the stack for vararg functions. */
2553 + if (current_function_args_info.uses_anonymous_args)
2554 + return 0;
2555 +
2556 + /* If there a stack adjstment. */
2557 + if (get_frame_size ())
2558 + return 0;
2559 +
2560 + saved_int_regs = avr32_compute_save_reg_mask (TRUE);
2561 + saved_fp_regs = avr32_compute_save_fp_reg_mask ();
2562 +
2563 + /* Functions which have saved fp-regs on the stack can not be performed in
2564 + one instruction */
2565 + if (saved_fp_regs)
2566 + return 0;
2567 +
2568 + /* Conditional returns can not be performed in one instruction if we need
2569 + to restore registers from the stack */
2570 + if (iscond && saved_int_regs)
2571 + return 0;
2572 +
2573 + /* Conditional return can not be used for interrupt handlers. */
2574 + if (iscond && IS_INTERRUPT (func_type))
2575 + return 0;
2576 +
2577 + /* For interrupt handlers which needs to pop registers */
2578 + if (saved_int_regs && IS_INTERRUPT (func_type))
2579 + return 0;
2580 +
2581 +
2582 + /* If there are saved registers but the LR isn't saved, then we need two
2583 + instructions for the return. */
2584 + if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM))))
2585 + return 0;
2586 +
2587 +
2588 + return 1;
2589 +}
2590 +
2591 +
2592 +/*Generate some function prologue info in the assembly file*/
2593 +
2594 +void
2595 +avr32_target_asm_function_prologue (FILE * f, HOST_WIDE_INT frame_size)
2596 +{
2597 + if (IS_NAKED (avr32_current_func_type ()))
2598 + fprintf (f,
2599 + "\t# Function is naked: Prologue and epilogue provided by programmer\n");
2600 +
2601 + if (IS_INTERRUPT (avr32_current_func_type ()))
2602 + {
2603 + switch (avr32_current_func_type ())
2604 + {
2605 + case AVR32_FT_ISR_FULL:
2606 + fprintf (f,
2607 + "\t# Interrupt Function: Fully shadowed register file\n");
2608 + break;
2609 + case AVR32_FT_ISR_HALF:
2610 + fprintf (f,
2611 + "\t# Interrupt Function: Half shadowed register file\n");
2612 + break;
2613 + default:
2614 + case AVR32_FT_ISR_NONE:
2615 + fprintf (f, "\t# Interrupt Function: No shadowed register file\n");
2616 + break;
2617 + }
2618 + }
2619 +
2620 +
2621 + fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n",
2622 + current_function_args_size, frame_size,
2623 + current_function_pretend_args_size);
2624 +
2625 + fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n",
2626 + frame_pointer_needed, current_function_is_leaf);
2627 +
2628 + fprintf (f, "\t# uses_anonymous_args = %i\n",
2629 + current_function_args_info.uses_anonymous_args);
2630 + if (current_function_calls_eh_return)
2631 + fprintf (f, "\t# Calls __builtin_eh_return.\n");
2632 +
2633 +}
2634 +
2635 +
2636 +/* Generate and emit an insn that we will recognize as a pushm or stm.
2637 + Unfortunately, since this insn does not reflect very well the actual
2638 + semantics of the operation, we need to annotate the insn for the benefit
2639 + of DWARF2 frame unwind information. */
2640 +
2641 +int avr32_convert_to_reglist16 (int reglist8_vect);
2642 +
2643 +static rtx
2644 +emit_multi_reg_push (int reglist, int usePUSHM)
2645 +{
2646 + rtx insn;
2647 + rtx dwarf;
2648 + rtx tmp;
2649 + rtx reg;
2650 + int i;
2651 + int nr_regs;
2652 + int index = 0;
2653 +
2654 + if (usePUSHM)
2655 + {
2656 + insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist)));
2657 + reglist = avr32_convert_to_reglist16 (reglist);
2658 + }
2659 + else
2660 + {
2661 + insn = emit_insn (gen_stm (stack_pointer_rtx,
2662 + gen_rtx_CONST_INT (SImode, reglist),
2663 + gen_rtx_CONST_INT (SImode, 1)));
2664 + }
2665 +
2666 + nr_regs = avr32_get_reg_mask_size (reglist) / 4;
2667 + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
2668 +
2669 + for (i = 15; i >= 0; i--)
2670 + {
2671 + if (reglist & (1 << i))
2672 + {
2673 + reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i));
2674 + tmp = gen_rtx_SET (VOIDmode,
2675 + gen_rtx_MEM (SImode,
2676 + plus_constant (stack_pointer_rtx,
2677 + 4 * index)), reg);
2678 + RTX_FRAME_RELATED_P (tmp) = 1;
2679 + XVECEXP (dwarf, 0, 1 + index++) = tmp;
2680 + }
2681 + }
2682 +
2683 + tmp = gen_rtx_SET (SImode,
2684 + stack_pointer_rtx,
2685 + gen_rtx_PLUS (SImode,
2686 + stack_pointer_rtx,
2687 + GEN_INT (-4 * nr_regs)));
2688 + RTX_FRAME_RELATED_P (tmp) = 1;
2689 + XVECEXP (dwarf, 0, 0) = tmp;
2690 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
2691 + REG_NOTES (insn));
2692 + return insn;
2693 +}
2694 +
2695 +
2696 +static rtx
2697 +emit_multi_fp_reg_push (int reglist)
2698 +{
2699 + rtx insn;
2700 + rtx dwarf;
2701 + rtx tmp;
2702 + rtx reg;
2703 + int i;
2704 + int nr_regs;
2705 + int index = 0;
2706 +
2707 + insn = emit_insn (gen_stm_fp (stack_pointer_rtx,
2708 + gen_rtx_CONST_INT (SImode, reglist),
2709 + gen_rtx_CONST_INT (SImode, 1)));
2710 +
2711 + nr_regs = avr32_get_reg_mask_size (reglist) / 4;
2712 + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
2713 +
2714 + for (i = 15; i >= 0; i--)
2715 + {
2716 + if (reglist & (1 << i))
2717 + {
2718 + reg = gen_rtx_REG (SImode, INTERNAL_FP_REGNUM (i));
2719 + tmp = gen_rtx_SET (VOIDmode,
2720 + gen_rtx_MEM (SImode,
2721 + plus_constant (stack_pointer_rtx,
2722 + 4 * index)), reg);
2723 + RTX_FRAME_RELATED_P (tmp) = 1;
2724 + XVECEXP (dwarf, 0, 1 + index++) = tmp;
2725 + }
2726 + }
2727 +
2728 + tmp = gen_rtx_SET (SImode,
2729 + stack_pointer_rtx,
2730 + gen_rtx_PLUS (SImode,
2731 + stack_pointer_rtx,
2732 + GEN_INT (-4 * nr_regs)));
2733 + RTX_FRAME_RELATED_P (tmp) = 1;
2734 + XVECEXP (dwarf, 0, 0) = tmp;
2735 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
2736 + REG_NOTES (insn));
2737 + return insn;
2738 +}
2739 +
2740 +rtx
2741 +avr32_gen_load_multiple (rtx * regs, int count, rtx from,
2742 + int write_back, int in_struct_p, int scalar_p)
2743 +{
2744 +
2745 + rtx result;
2746 + int i = 0, j;
2747 +
2748 + result =
2749 + gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0)));
2750 +
2751 + if (write_back)
2752 + {
2753 + XVECEXP (result, 0, 0)
2754 + = gen_rtx_SET (GET_MODE (from), from,
2755 + plus_constant (from, count * 4));
2756 + i = 1;
2757 + count++;
2758 + }
2759 +
2760 +
2761 + for (j = 0; i < count; i++, j++)
2762 + {
2763 + rtx unspec;
2764 + rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4));
2765 + MEM_IN_STRUCT_P (mem) = in_struct_p;
2766 + MEM_SCALAR_P (mem) = scalar_p;
2767 + unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM);
2768 + XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec);
2769 + }
2770 +
2771 + return result;
2772 +}
2773 +
2774 +
2775 +rtx
2776 +avr32_gen_store_multiple (rtx * regs, int count, rtx to,
2777 + int in_struct_p, int scalar_p)
2778 +{
2779 + rtx result;
2780 + int i = 0, j;
2781 +
2782 + result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
2783 +
2784 + for (j = 0; i < count; i++, j++)
2785 + {
2786 + rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4));
2787 + MEM_IN_STRUCT_P (mem) = in_struct_p;
2788 + MEM_SCALAR_P (mem) = scalar_p;
2789 + XVECEXP (result, 0, i)
2790 + = gen_rtx_SET (VOIDmode, mem,
2791 + gen_rtx_UNSPEC (VOIDmode,
2792 + gen_rtvec (1, regs[j]),
2793 + UNSPEC_STORE_MULTIPLE));
2794 + }
2795 +
2796 + return result;
2797 +}
2798 +
2799 +
2800 +/* Move a block of memory if it is word aligned or we support unaligned
2801 + word memory accesses. The size must be maximum 64 bytes. */
2802 +
2803 +int
2804 +avr32_gen_movmemsi (rtx * operands)
2805 +{
2806 + HOST_WIDE_INT bytes_to_go;
2807 + rtx src, dst;
2808 + rtx st_src, st_dst;
2809 + int src_offset = 0, dst_offset = 0;
2810 + int block_size;
2811 + int dst_in_struct_p, src_in_struct_p;
2812 + int dst_scalar_p, src_scalar_p;
2813 + int unaligned;
2814 +
2815 + if (GET_CODE (operands[2]) != CONST_INT
2816 + || GET_CODE (operands[3]) != CONST_INT
2817 + || INTVAL (operands[2]) > 64
2818 + || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD))
2819 + return 0;
2820 +
2821 + unaligned = (INTVAL (operands[3]) & 3) != 0;
2822 +
2823 + block_size = 4;
2824 +
2825 + st_dst = XEXP (operands[0], 0);
2826 + st_src = XEXP (operands[1], 0);
2827 +
2828 + dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
2829 + dst_scalar_p = MEM_SCALAR_P (operands[0]);
2830 + src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
2831 + src_scalar_p = MEM_SCALAR_P (operands[1]);
2832 +
2833 + dst = copy_to_mode_reg (SImode, st_dst);
2834 + src = copy_to_mode_reg (SImode, st_src);
2835 +
2836 + bytes_to_go = INTVAL (operands[2]);
2837 +
2838 + while (bytes_to_go)
2839 + {
2840 + enum machine_mode move_mode;
2841 + /* (Seems to be a problem with reloads for the movti pattern so this is
2842 + disabled until that problem is resolved)
2843 + UPDATE: Problem seems to be solved now.... */
2844 + if (bytes_to_go >= GET_MODE_SIZE (TImode) && !unaligned
2845 + /* Do not emit ldm/stm for UC3 as ld.d/st.d is more optimal. */
2846 + && !TARGET_ARCH_UC)
2847 + move_mode = TImode;
2848 + else if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned)
2849 + move_mode = DImode;
2850 + else if (bytes_to_go >= GET_MODE_SIZE (SImode))
2851 + move_mode = SImode;
2852 + else
2853 + move_mode = QImode;
2854 +
2855 + {
2856 + rtx src_mem;
2857 + rtx dst_mem = gen_rtx_MEM (move_mode,
2858 + gen_rtx_PLUS (SImode, dst,
2859 + GEN_INT (dst_offset)));
2860 + dst_offset += GET_MODE_SIZE (move_mode);
2861 + if ( 0 /* This causes an error in GCC. Think there is
2862 + something wrong in the gcse pass which causes REQ_EQUIV notes
2863 + to be wrong so disabling it for now. */
2864 + && move_mode == TImode
2865 + && INTVAL (operands[2]) > GET_MODE_SIZE (TImode) )
2866 + {
2867 + src_mem = gen_rtx_MEM (move_mode,
2868 + gen_rtx_POST_INC (SImode, src));
2869 + }
2870 + else
2871 + {
2872 + src_mem = gen_rtx_MEM (move_mode,
2873 + gen_rtx_PLUS (SImode, src,
2874 + GEN_INT (src_offset)));
2875 + src_offset += GET_MODE_SIZE (move_mode);
2876 + }
2877 +
2878 + bytes_to_go -= GET_MODE_SIZE (move_mode);
2879 +
2880 + MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p;
2881 + MEM_SCALAR_P (dst_mem) = dst_scalar_p;
2882 +
2883 + MEM_IN_STRUCT_P (src_mem) = src_in_struct_p;
2884 + MEM_SCALAR_P (src_mem) = src_scalar_p;
2885 + emit_move_insn (dst_mem, src_mem);
2886 +
2887 + }
2888 + }
2889 +
2890 + return 1;
2891 +}
2892 +
2893 +
2894 +
2895 +/*Expand the prologue instruction*/
2896 +void
2897 +avr32_expand_prologue (void)
2898 +{
2899 + rtx insn, dwarf;
2900 + unsigned long saved_reg_mask, saved_fp_reg_mask;
2901 + int reglist8 = 0;
2902 +
2903 + /* Naked functions does not have a prologue */
2904 + if (IS_NAKED (avr32_current_func_type ()))
2905 + return;
2906 +
2907 + saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
2908 +
2909 + if (saved_reg_mask)
2910 + {
2911 + /* Must push used registers */
2912 +
2913 + /* Should we use POPM or LDM? */
2914 + int usePUSHM = TRUE;
2915 + reglist8 = 0;
2916 + if (((saved_reg_mask & (1 << 0)) ||
2917 + (saved_reg_mask & (1 << 1)) ||
2918 + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
2919 + {
2920 + /* One of R0-R3 should at least be pushed */
2921 + if (((saved_reg_mask & (1 << 0)) &&
2922 + (saved_reg_mask & (1 << 1)) &&
2923 + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
2924 + {
2925 + /* All should be pushed */
2926 + reglist8 |= 0x01;
2927 + }
2928 + else
2929 + {
2930 + usePUSHM = FALSE;
2931 + }
2932 + }
2933 +
2934 + if (((saved_reg_mask & (1 << 4)) ||
2935 + (saved_reg_mask & (1 << 5)) ||
2936 + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
2937 + {
2938 + /* One of R4-R7 should at least be pushed */
2939 + if (((saved_reg_mask & (1 << 4)) &&
2940 + (saved_reg_mask & (1 << 5)) &&
2941 + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
2942 + {
2943 + if (usePUSHM)
2944 + /* All should be pushed */
2945 + reglist8 |= 0x02;
2946 + }
2947 + else
2948 + {
2949 + usePUSHM = FALSE;
2950 + }
2951 + }
2952 +
2953 + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
2954 + {
2955 + /* One of R8-R9 should at least be pushed */
2956 + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
2957 + {
2958 + if (usePUSHM)
2959 + /* All should be pushed */
2960 + reglist8 |= 0x04;
2961 + }
2962 + else
2963 + {
2964 + usePUSHM = FALSE;
2965 + }
2966 + }
2967 +
2968 + if (saved_reg_mask & (1 << 10))
2969 + reglist8 |= 0x08;
2970 +
2971 + if (saved_reg_mask & (1 << 11))
2972 + reglist8 |= 0x10;
2973 +
2974 + if (saved_reg_mask & (1 << 12))
2975 + reglist8 |= 0x20;
2976 +
2977 + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
2978 + {
2979 + /* Push LR */
2980 + reglist8 |= 0x40;
2981 + }
2982 +
2983 + if (usePUSHM)
2984 + {
2985 + insn = emit_multi_reg_push (reglist8, TRUE);
2986 + }
2987 + else
2988 + {
2989 + insn = emit_multi_reg_push (saved_reg_mask, FALSE);
2990 + }
2991 + RTX_FRAME_RELATED_P (insn) = 1;
2992 +
2993 + /* Prevent this instruction from being scheduled after any other
2994 + instructions. */
2995 + emit_insn (gen_blockage ());
2996 + }
2997 +
2998 + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
2999 + if (saved_fp_reg_mask)
3000 + {
3001 + insn = emit_multi_fp_reg_push (saved_fp_reg_mask);
3002 + RTX_FRAME_RELATED_P (insn) = 1;
3003 +
3004 + /* Prevent this instruction from being scheduled after any other
3005 + instructions. */
3006 + emit_insn (gen_blockage ());
3007 + }
3008 +
3009 + /* Set frame pointer */
3010 + if (frame_pointer_needed)
3011 + {
3012 + insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3013 + RTX_FRAME_RELATED_P (insn) = 1;
3014 + }
3015 +
3016 + if (get_frame_size () > 0)
3017 + {
3018 + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21"))
3019 + {
3020 + insn = emit_insn (gen_rtx_SET (SImode,
3021 + stack_pointer_rtx,
3022 + gen_rtx_PLUS (SImode,
3023 + stack_pointer_rtx,
3024 + gen_rtx_CONST_INT
3025 + (SImode,
3026 + -get_frame_size
3027 + ()))));
3028 + RTX_FRAME_RELATED_P (insn) = 1;
3029 + }
3030 + else
3031 + {
3032 + /* Immediate is larger than k21 We must either check if we can use
3033 + one of the pushed reegisters as temporary storage or we must
3034 + make us a temp register by pushing a register to the stack. */
3035 + rtx temp_reg, const_pool_entry, insn;
3036 + if (saved_reg_mask)
3037 + {
3038 + temp_reg =
3039 + gen_rtx_REG (SImode,
3040 + INTERNAL_REGNUM (avr32_get_saved_reg
3041 + (saved_reg_mask)));
3042 + }
3043 + else
3044 + {
3045 + temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7));
3046 + emit_move_insn (gen_rtx_MEM
3047 + (SImode,
3048 + gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)),
3049 + temp_reg);
3050 + }
3051 +
3052 + const_pool_entry =
3053 + force_const_mem (SImode,
3054 + gen_rtx_CONST_INT (SImode, get_frame_size ()));
3055 + emit_move_insn (temp_reg, const_pool_entry);
3056 +
3057 + insn = emit_insn (gen_rtx_SET (SImode,
3058 + stack_pointer_rtx,
3059 + gen_rtx_MINUS (SImode,
3060 + stack_pointer_rtx,
3061 + temp_reg)));
3062 +
3063 + dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
3064 + gen_rtx_PLUS (SImode, stack_pointer_rtx,
3065 + GEN_INT (-get_frame_size ())));
3066 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3067 + dwarf, REG_NOTES (insn));
3068 + RTX_FRAME_RELATED_P (insn) = 1;
3069 +
3070 + if (!saved_reg_mask)
3071 + {
3072 + insn =
3073 + emit_move_insn (temp_reg,
3074 + gen_rtx_MEM (SImode,
3075 + gen_rtx_POST_INC (SImode,
3076 + gen_rtx_REG
3077 + (SImode,
3078 + 13))));
3079 + }
3080 +
3081 + /* Mark the temp register as dead */
3082 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg,
3083 + REG_NOTES (insn));
3084 +
3085 +
3086 + }
3087 +
3088 + /* Prevent the the stack adjustment to be scheduled after any
3089 + instructions using the frame pointer. */
3090 + emit_insn (gen_blockage ());
3091 + }
3092 +
3093 + /* Load GOT */
3094 + if (flag_pic)
3095 + {
3096 + avr32_load_pic_register ();
3097 +
3098 + /* gcc does not know that load or call instructions might use the pic
3099 + register so it might schedule these instructions before the loading
3100 + of the pic register. To avoid this emit a barrier for now. TODO!
3101 + Find out a better way to let gcc know which instructions might use
3102 + the pic register. */
3103 + emit_insn (gen_blockage ());
3104 + }
3105 + return;
3106 +}
3107 +
3108 +void
3109 +avr32_set_return_address (rtx source, rtx scratch)
3110 +{
3111 + rtx addr;
3112 + unsigned long saved_regs;
3113 +
3114 + saved_regs = avr32_compute_save_reg_mask (TRUE);
3115 +
3116 + if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM))))
3117 + emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
3118 + else
3119 + {
3120 + if (frame_pointer_needed)
3121 + addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM);
3122 + else
3123 + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks16"))
3124 + {
3125 + addr = plus_constant (stack_pointer_rtx, get_frame_size ());
3126 + }
3127 + else
3128 + {
3129 + emit_insn (gen_movsi (scratch, GEN_INT (get_frame_size ())));
3130 + addr = scratch;
3131 + }
3132 + emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
3133 + }
3134 +}
3135 +
3136 +
3137 +
3138 +/* Return the length of INSN. LENGTH is the initial length computed by
3139 + attributes in the machine-description file. */
3140 +
3141 +int
3142 +avr32_adjust_insn_length (rtx insn ATTRIBUTE_UNUSED,
3143 + int length ATTRIBUTE_UNUSED)
3144 +{
3145 + return length;
3146 +}
3147 +
3148 +void
3149 +avr32_output_return_instruction (int single_ret_inst ATTRIBUTE_UNUSED,
3150 + int iscond ATTRIBUTE_UNUSED,
3151 + rtx cond ATTRIBUTE_UNUSED, rtx r12_imm)
3152 +{
3153 +
3154 + unsigned long saved_reg_mask, saved_fp_reg_mask;
3155 + int insert_ret = TRUE;
3156 + int reglist8 = 0;
3157 + int stack_adjustment = get_frame_size ();
3158 + unsigned int func_type = avr32_current_func_type ();
3159 + FILE *f = asm_out_file;
3160 +
3161 + /* Naked functions does not have an epilogue */
3162 + if (IS_NAKED (func_type))
3163 + return;
3164 +
3165 + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
3166 +
3167 + saved_reg_mask = avr32_compute_save_reg_mask (FALSE);
3168 +
3169 + /* Reset frame pointer */
3170 + if (stack_adjustment > 0)
3171 + {
3172 + if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21"))
3173 + {
3174 + fprintf (f, "\tsub\tsp, %i # Reset Frame Pointer\n",
3175 + -stack_adjustment);
3176 + }
3177 + else
3178 + {
3179 + /* TODO! Is it safe to use r8 as scratch?? */
3180 + fprintf (f, "\tmov\tr8, lo(%i) # Reset Frame Pointer\n",
3181 + -stack_adjustment);
3182 + fprintf (f, "\torh\tr8, hi(%i) # Reset Frame Pointer\n",
3183 + -stack_adjustment);
3184 + fprintf (f, "\tadd\tsp, r8 # Reset Frame Pointer\n");
3185 + }
3186 + }
3187 +
3188 + if (saved_fp_reg_mask)
3189 + {
3190 + char reglist[64]; /* 64 bytes should be enough... */
3191 + avr32_make_fp_reglist_w (saved_fp_reg_mask, (char *) reglist);
3192 + fprintf (f, "\tldcm.w\tcp0, sp++, %s\n", reglist);
3193 + if (saved_fp_reg_mask & ~0xff)
3194 + {
3195 + saved_fp_reg_mask &= ~0xff;
3196 + avr32_make_fp_reglist_d (saved_fp_reg_mask, (char *) reglist);
3197 + fprintf (f, "\tldcm.d\tcp0, sp++, %s\n", reglist);
3198 + }
3199 + }
3200 +
3201 + if (saved_reg_mask)
3202 + {
3203 + /* Must pop used registers */
3204 +
3205 + /* Should we use POPM or LDM? */
3206 + int usePOPM = TRUE;
3207 + if (((saved_reg_mask & (1 << 0)) ||
3208 + (saved_reg_mask & (1 << 1)) ||
3209 + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
3210 + {
3211 + /* One of R0-R3 should at least be popped */
3212 + if (((saved_reg_mask & (1 << 0)) &&
3213 + (saved_reg_mask & (1 << 1)) &&
3214 + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
3215 + {
3216 + /* All should be popped */
3217 + reglist8 |= 0x01;
3218 + }
3219 + else
3220 + {
3221 + usePOPM = FALSE;
3222 + }
3223 + }
3224 +
3225 + if (((saved_reg_mask & (1 << 4)) ||
3226 + (saved_reg_mask & (1 << 5)) ||
3227 + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
3228 + {
3229 + /* One of R0-R3 should at least be popped */
3230 + if (((saved_reg_mask & (1 << 4)) &&
3231 + (saved_reg_mask & (1 << 5)) &&
3232 + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
3233 + {
3234 + if (usePOPM)
3235 + /* All should be popped */
3236 + reglist8 |= 0x02;
3237 + }
3238 + else
3239 + {
3240 + usePOPM = FALSE;
3241 + }
3242 + }
3243 +
3244 + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
3245 + {
3246 + /* One of R8-R9 should at least be pushed */
3247 + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
3248 + {
3249 + if (usePOPM)
3250 + /* All should be pushed */
3251 + reglist8 |= 0x04;
3252 + }
3253 + else
3254 + {
3255 + usePOPM = FALSE;
3256 + }
3257 + }
3258 +
3259 + if (saved_reg_mask & (1 << 10))
3260 + reglist8 |= 0x08;
3261 +
3262 + if (saved_reg_mask & (1 << 11))
3263 + reglist8 |= 0x10;
3264 +
3265 + if (saved_reg_mask & (1 << 12))
3266 + reglist8 |= 0x20;
3267 +
3268 + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
3269 + /* Pop LR */
3270 + reglist8 |= 0x40;
3271 +
3272 + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
3273 + /* Pop LR into PC. */
3274 + reglist8 |= 0x80;
3275 +
3276 + if (usePOPM)
3277 + {
3278 + char reglist[64]; /* 64 bytes should be enough... */
3279 + avr32_make_reglist8 (reglist8, (char *) reglist);
3280 +
3281 + if (reglist8 & 0x80)
3282 + /* This instruction is also a return */
3283 + insert_ret = FALSE;
3284 +
3285 + if (r12_imm && !insert_ret)
3286 + fprintf (f, "\tpopm\t%s, r12=%li\n", reglist, INTVAL (r12_imm));
3287 + else
3288 + fprintf (f, "\tpopm\t%s\n", reglist);
3289 +
3290 + }
3291 + else
3292 + {
3293 + char reglist[64]; /* 64 bytes should be enough... */
3294 + avr32_make_reglist16 (saved_reg_mask, (char *) reglist);
3295 + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
3296 + /* This instruction is also a return */
3297 + insert_ret = FALSE;
3298 +
3299 + if (r12_imm && !insert_ret)
3300 + fprintf (f, "\tldm\tsp++, %s, r12=%li\n", reglist,
3301 + INTVAL (r12_imm));
3302 + else
3303 + fprintf (f, "\tldm\tsp++, %s\n", reglist);
3304 +
3305 + }
3306 +
3307 + }
3308 +
3309 + /* Stack adjustment for exception handler. */
3310 + if (current_function_calls_eh_return)
3311 + fprintf (f, "\tadd\tsp, r%d\n", ASM_REGNUM (EH_RETURN_STACKADJ_REGNO));
3312 +
3313 +
3314 + if (IS_INTERRUPT (func_type))
3315 + {
3316 + fprintf (f, "\trete\n");
3317 + }
3318 + else if (insert_ret)
3319 + {
3320 + if (r12_imm)
3321 + fprintf (f, "\tretal\t%li\n", INTVAL (r12_imm));
3322 + else
3323 + fprintf (f, "\tretal\tr12\n");
3324 + }
3325 +}
3326 +
3327 +/* Function for converting a fp-register mask to a
3328 + reglistCPD8 register list string. */
3329 +void
3330 +avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string)
3331 +{
3332 + int i;
3333 +
3334 + /* Make sure reglist_string is empty */
3335 + reglist_string[0] = '\0';
3336 +
3337 + for (i = 0; i < NUM_FP_REGS; i += 2)
3338 + {
3339 + if (reglist_mask & (1 << i))
3340 + {
3341 + strlen (reglist_string) ?
3342 + sprintf (reglist_string, "%s, %s-%s", reglist_string,
3343 + reg_names[INTERNAL_FP_REGNUM (i)],
3344 + reg_names[INTERNAL_FP_REGNUM (i + 1)]) :
3345 + sprintf (reglist_string, "%s-%s",
3346 + reg_names[INTERNAL_FP_REGNUM (i)],
3347 + reg_names[INTERNAL_FP_REGNUM (i + 1)]);
3348 + }
3349 + }
3350 +}
3351 +
3352 +/* Function for converting a fp-register mask to a
3353 + reglistCP8 register list string. */
3354 +void
3355 +avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string)
3356 +{
3357 + int i;
3358 +
3359 + /* Make sure reglist_string is empty */
3360 + reglist_string[0] = '\0';
3361 +
3362 + for (i = 0; i < NUM_FP_REGS; ++i)
3363 + {
3364 + if (reglist_mask & (1 << i))
3365 + {
3366 + strlen (reglist_string) ?
3367 + sprintf (reglist_string, "%s, %s", reglist_string,
3368 + reg_names[INTERNAL_FP_REGNUM (i)]) :
3369 + sprintf (reglist_string, "%s", reg_names[INTERNAL_FP_REGNUM (i)]);
3370 + }
3371 + }
3372 +}
3373 +
3374 +void
3375 +avr32_make_reglist16 (int reglist16_vect, char *reglist16_string)
3376 +{
3377 + int i;
3378 +
3379 + /* Make sure reglist16_string is empty */
3380 + reglist16_string[0] = '\0';
3381 +
3382 + for (i = 0; i < 16; ++i)
3383 + {
3384 + if (reglist16_vect & (1 << i))
3385 + {
3386 + strlen (reglist16_string) ?
3387 + sprintf (reglist16_string, "%s, %s", reglist16_string,
3388 + reg_names[INTERNAL_REGNUM (i)]) :
3389 + sprintf (reglist16_string, "%s", reg_names[INTERNAL_REGNUM (i)]);
3390 + }
3391 + }
3392 +}
3393 +
3394 +int
3395 +avr32_convert_to_reglist16 (int reglist8_vect)
3396 +{
3397 + int reglist16_vect = 0;
3398 + if (reglist8_vect & 0x1)
3399 + reglist16_vect |= 0xF;
3400 + if (reglist8_vect & 0x2)
3401 + reglist16_vect |= 0xF0;
3402 + if (reglist8_vect & 0x4)
3403 + reglist16_vect |= 0x300;
3404 + if (reglist8_vect & 0x8)
3405 + reglist16_vect |= 0x400;
3406 + if (reglist8_vect & 0x10)
3407 + reglist16_vect |= 0x800;
3408 + if (reglist8_vect & 0x20)
3409 + reglist16_vect |= 0x1000;
3410 + if (reglist8_vect & 0x40)
3411 + reglist16_vect |= 0x4000;
3412 + if (reglist8_vect & 0x80)
3413 + reglist16_vect |= 0x8000;
3414 +
3415 + return reglist16_vect;
3416 +}
3417 +
3418 +void
3419 +avr32_make_reglist8 (int reglist8_vect, char *reglist8_string)
3420 +{
3421 + /* Make sure reglist8_string is empty */
3422 + reglist8_string[0] = '\0';
3423 +
3424 + if (reglist8_vect & 0x1)
3425 + sprintf (reglist8_string, "r0-r3");
3426 + if (reglist8_vect & 0x2)
3427 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r4-r7",
3428 + reglist8_string) :
3429 + sprintf (reglist8_string, "r4-r7");
3430 + if (reglist8_vect & 0x4)
3431 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r8-r9",
3432 + reglist8_string) :
3433 + sprintf (reglist8_string, "r8-r9");
3434 + if (reglist8_vect & 0x8)
3435 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r10",
3436 + reglist8_string) :
3437 + sprintf (reglist8_string, "r10");
3438 + if (reglist8_vect & 0x10)
3439 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r11",
3440 + reglist8_string) :
3441 + sprintf (reglist8_string, "r11");
3442 + if (reglist8_vect & 0x20)
3443 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r12",
3444 + reglist8_string) :
3445 + sprintf (reglist8_string, "r12");
3446 + if (reglist8_vect & 0x40)
3447 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, lr",
3448 + reglist8_string) :
3449 + sprintf (reglist8_string, "lr");
3450 + if (reglist8_vect & 0x80)
3451 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, pc",
3452 + reglist8_string) :
3453 + sprintf (reglist8_string, "pc");
3454 +}
3455 +
3456 +int
3457 +avr32_eh_return_data_regno (int n)
3458 +{
3459 + if (n >= 0 && n <= 3)
3460 + return 8 + n;
3461 + else
3462 + return INVALID_REGNUM;
3463 +}
3464 +
3465 +/* Compute the distance from register FROM to register TO.
3466 + These can be the arg pointer, the frame pointer or
3467 + the stack pointer.
3468 + Typical stack layout looks like this:
3469 +
3470 + old stack pointer -> | |
3471 + ----
3472 + | | \
3473 + | | saved arguments for
3474 + | | vararg functions
3475 + arg_pointer -> | | /
3476 + --
3477 + | | \
3478 + | | call saved
3479 + | | registers
3480 + | | /
3481 + frame ptr -> --
3482 + | | \
3483 + | | local
3484 + | | variables
3485 + stack ptr --> | | /
3486 + --
3487 + | | \
3488 + | | outgoing
3489 + | | arguments
3490 + | | /
3491 + --
3492 +
3493 + For a given funciton some or all of these stack compomnents
3494 + may not be needed, giving rise to the possibility of
3495 + eliminating some of the registers.
3496 +
3497 + The values returned by this function must reflect the behaviour
3498 + of avr32_expand_prologue() and avr32_compute_save_reg_mask().
3499 +
3500 + The sign of the number returned reflects the direction of stack
3501 + growth, so the values are positive for all eliminations except
3502 + from the soft frame pointer to the hard frame pointer. */
3503 +
3504 +
3505 +int
3506 +avr32_initial_elimination_offset (int from, int to)
3507 +{
3508 + int i;
3509 + int call_saved_regs = 0;
3510 + unsigned long saved_reg_mask, saved_fp_reg_mask;
3511 + unsigned int local_vars = get_frame_size ();
3512 +
3513 + saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
3514 + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
3515 +
3516 + for (i = 0; i < 16; ++i)
3517 + {
3518 + if (saved_reg_mask & (1 << i))
3519 + call_saved_regs += 4;
3520 + }
3521 +
3522 + for (i = 0; i < NUM_FP_REGS; ++i)
3523 + {
3524 + if (saved_fp_reg_mask & (1 << i))
3525 + call_saved_regs += 4;
3526 + }
3527 +
3528 + switch (from)
3529 + {
3530 + case ARG_POINTER_REGNUM:
3531 + switch (to)
3532 + {
3533 + case STACK_POINTER_REGNUM:
3534 + return call_saved_regs + local_vars;
3535 + case FRAME_POINTER_REGNUM:
3536 + return call_saved_regs;
3537 + default:
3538 + abort ();
3539 + }
3540 + case FRAME_POINTER_REGNUM:
3541 + switch (to)
3542 + {
3543 + case STACK_POINTER_REGNUM:
3544 + return local_vars;
3545 + default:
3546 + abort ();
3547 + }
3548 + default:
3549 + abort ();
3550 + }
3551 +}
3552 +
3553 +
3554 +/*
3555 + Returns a rtx used when passing the next argument to a function.
3556 + avr32_init_cumulative_args() and avr32_function_arg_advance() sets witch
3557 + register to use.
3558 +*/
3559 +rtx
3560 +avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
3561 + tree type, int named)
3562 +{
3563 + int index = -1;
3564 +
3565 + HOST_WIDE_INT arg_size, arg_rsize;
3566 + if (type)
3567 + {
3568 + arg_size = int_size_in_bytes (type);
3569 + }
3570 + else
3571 + {
3572 + arg_size = GET_MODE_SIZE (mode);
3573 + }
3574 + arg_rsize = PUSH_ROUNDING (arg_size);
3575 +
3576 + /*
3577 + The last time this macro is called, it is called with mode == VOIDmode,
3578 + and its result is passed to the call or call_value pattern as operands 2
3579 + and 3 respectively. */
3580 + if (mode == VOIDmode)
3581 + {
3582 + return gen_rtx_CONST_INT (SImode, 22); /* ToDo: fixme. */
3583 + }
3584 +
3585 + if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named)
3586 + {
3587 + return NULL_RTX;
3588 + }
3589 +
3590 + if (arg_rsize == 8)