combine all avr32 patches into a single one
[openwrt/openwrt.git] / toolchain / gcc / patches / 4.3.5 / 930-avr32_support.patch
1 --- a/gcc/builtins.c
2 +++ b/gcc/builtins.c
3 @@ -10779,7 +10779,7 @@ validate_arglist (const_tree callexpr, .
4
5 do
6 {
7 - code = va_arg (ap, enum tree_code);
8 + code = va_arg (ap, int);
9 switch (code)
10 {
11 case 0:
12 --- a/gcc/calls.c
13 +++ b/gcc/calls.c
14 @@ -3496,7 +3496,7 @@ emit_library_call_value_1 (int retval, r
15 for (; count < nargs; count++)
16 {
17 rtx val = va_arg (p, rtx);
18 - enum machine_mode mode = va_arg (p, enum machine_mode);
19 + enum machine_mode mode = va_arg (p, int);
20
21 /* We cannot convert the arg value to the mode the library wants here;
22 must do it earlier where we know the signedness of the arg. */
23 --- /dev/null
24 +++ b/gcc/config/avr32/avr32.c
25 @@ -0,0 +1,7869 @@
26 +/*
27 + Target hooks and helper functions for AVR32.
28 + Copyright 2003-2006 Atmel Corporation.
29 +
30 + Written by Ronny Pedersen, Atmel Norway, <rpedersen@atmel.com>
31 + Initial porting by Anders ´┐Żdland.
32 +
33 + This file is part of GCC.
34 +
35 + This program is free software; you can redistribute it and/or modify
36 + it under the terms of the GNU General Public License as published by
37 + the Free Software Foundation; either version 2 of the License, or
38 + (at your option) any later version.
39 +
40 + This program is distributed in the hope that it will be useful,
41 + but WITHOUT ANY WARRANTY; without even the implied warranty of
42 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
43 + GNU General Public License for more details.
44 +
45 + You should have received a copy of the GNU General Public License
46 + along with this program; if not, write to the Free Software
47 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
48 +
49 +#include "config.h"
50 +#include "system.h"
51 +#include "coretypes.h"
52 +#include "tm.h"
53 +#include "rtl.h"
54 +#include "tree.h"
55 +#include "obstack.h"
56 +#include "regs.h"
57 +#include "hard-reg-set.h"
58 +#include "real.h"
59 +#include "insn-config.h"
60 +#include "conditions.h"
61 +#include "output.h"
62 +#include "insn-attr.h"
63 +#include "flags.h"
64 +#include "reload.h"
65 +#include "function.h"
66 +#include "expr.h"
67 +#include "optabs.h"
68 +#include "toplev.h"
69 +#include "recog.h"
70 +#include "ggc.h"
71 +#include "except.h"
72 +#include "c-pragma.h"
73 +#include "integrate.h"
74 +#include "tm_p.h"
75 +#include "langhooks.h"
76 +#include "hooks.h"
77 +#include "df.h"
78 +
79 +#include "target.h"
80 +#include "target-def.h"
81 +
82 +#include <ctype.h>
83 +
84 +/* Forward definitions of types. */
85 +typedef struct minipool_node Mnode;
86 +typedef struct minipool_fixup Mfix;
87 +
88 +/* Obstack for minipool constant handling. */
89 +static struct obstack minipool_obstack;
90 +static char *minipool_startobj;
91 +static rtx minipool_vector_label;
92 +
93 +/* True if we are currently building a constant table. */
94 +int making_const_table;
95 +
96 +/* Some forward function declarations */
97 +static unsigned long avr32_isr_value (tree);
98 +static unsigned long avr32_compute_func_type (void);
99 +static tree avr32_handle_isr_attribute (tree *, tree, tree, int, bool *);
100 +static tree avr32_handle_acall_attribute (tree *, tree, tree, int, bool *);
101 +static tree avr32_handle_fndecl_attribute (tree * node, tree name, tree args,
102 + int flags, bool * no_add_attrs);
103 +static void avr32_reorg (void);
104 +bool avr32_return_in_msb (tree type);
105 +bool avr32_vector_mode_supported (enum machine_mode mode);
106 +static void avr32_init_libfuncs (void);
107 +
108 +
109 +static void
110 +avr32_add_gc_roots (void)
111 +{
112 + gcc_obstack_init (&minipool_obstack);
113 + minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
114 +}
115 +
116 +
117 +/* List of all known AVR32 parts */
118 +static const struct part_type_s avr32_part_types[] = {
119 + /* name, part_type, architecture type, macro */
120 + {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"},
121 + {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"},
122 + {"ap7001", PART_TYPE_AVR32_AP7001, ARCH_TYPE_AVR32_AP, "__AVR32_AP7001__"},
123 + {"ap7002", PART_TYPE_AVR32_AP7002, ARCH_TYPE_AVR32_AP, "__AVR32_AP7002__"},
124 + {"ap7200", PART_TYPE_AVR32_AP7200, ARCH_TYPE_AVR32_AP, "__AVR32_AP7200__"},
125 + {"uc3a0128", PART_TYPE_AVR32_UC3A0128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0128__"},
126 + {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0256__"},
127 + {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0512__"},
128 + {"uc3a0512es", PART_TYPE_AVR32_UC3A0512ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3A0512ES__"},
129 + {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1128__"},
130 + {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1256__"},
131 + {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1512__"},
132 + {"uc3a1512es", PART_TYPE_AVR32_UC3A1512ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3A1512ES__"},
133 + {"uc3a3revd", PART_TYPE_AVR32_UC3A3REVD, ARCH_TYPE_AVR32_UCR2NOMUL, "__AVR32_UC3A3256S__"},
134 + {"uc3a364", PART_TYPE_AVR32_UC3A364, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A364__"},
135 + {"uc3a364s", PART_TYPE_AVR32_UC3A364S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A364S__"},
136 + {"uc3a3128", PART_TYPE_AVR32_UC3A3128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3128__"},
137 + {"uc3a3128s", PART_TYPE_AVR32_UC3A3128S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3128S__"},
138 + {"uc3a3256", PART_TYPE_AVR32_UC3A3256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3256__"},
139 + {"uc3a3256s", PART_TYPE_AVR32_UC3A3256S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3256S__"},
140 + {"uc3b064", PART_TYPE_AVR32_UC3B064, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B064__"},
141 + {"uc3b0128", PART_TYPE_AVR32_UC3B0128, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0128__"},
142 + {"uc3b0256", PART_TYPE_AVR32_UC3B0256, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0256__"},
143 + {"uc3b0256es", PART_TYPE_AVR32_UC3B0256ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0256ES__"},
144 + {"uc3b0512revc", PART_TYPE_AVR32_UC3B0512REVC, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B0512REVC__"},
145 + {"uc3b164", PART_TYPE_AVR32_UC3B164, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B164__"},
146 + {"uc3b1128", PART_TYPE_AVR32_UC3B1128, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1128__"},
147 + {"uc3b1256", PART_TYPE_AVR32_UC3B1256, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1256__"},
148 + {"uc3b1256es", PART_TYPE_AVR32_UC3B1256ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1256ES__"},
149 + {"uc3b1512revc", PART_TYPE_AVR32_UC3B1512REVC, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B1512REVC__"},
150 + {"uc3c0512c", PART_TYPE_AVR32_UC3C0512C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C0512C__"},
151 + {"uc3c0256c", PART_TYPE_AVR32_UC3C0256C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C0256C__"},
152 + {"uc3c0128c", PART_TYPE_AVR32_UC3C0128C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C0128C__"},
153 + {"uc3c064c", PART_TYPE_AVR32_UC3C064C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C064C__"},
154 + {"uc3c1512c", PART_TYPE_AVR32_UC3C1512C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C1512C__"},
155 + {"uc3c1256c", PART_TYPE_AVR32_UC3C1256C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C1256C__"},
156 + {"uc3c1128c", PART_TYPE_AVR32_UC3C1128C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C1128C__"},
157 + {"uc3c164c", PART_TYPE_AVR32_UC3C164C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C164C__"},
158 + {"uc3c2512c", PART_TYPE_AVR32_UC3C2512C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C2512C__"},
159 + {"uc3c2256c", PART_TYPE_AVR32_UC3C2256C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C2256C__"},
160 + {"uc3c2128c", PART_TYPE_AVR32_UC3C2128C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C2128C__"},
161 + {"uc3c264c", PART_TYPE_AVR32_UC3C264C, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C264C__"},
162 + {"uc3l064", PART_TYPE_AVR32_UC3L064, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L064__"},
163 + {"uc3l032", PART_TYPE_AVR32_UC3L032, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L032__"},
164 + {"uc3l016", PART_TYPE_AVR32_UC3L016, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L016__"},
165 + {NULL, 0, 0, NULL}
166 +};
167 +
168 +/* List of all known AVR32 architectures */
169 +static const struct arch_type_s avr32_arch_types[] = {
170 + /* name, architecture type, microarchitecture type, feature flags, macro */
171 + {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B,
172 + (FLAG_AVR32_HAS_DSP
173 + | FLAG_AVR32_HAS_SIMD
174 + | FLAG_AVR32_HAS_UNALIGNED_WORD
175 + | FLAG_AVR32_HAS_BRANCH_PRED | FLAG_AVR32_HAS_RETURN_STACK
176 + | FLAG_AVR32_HAS_CACHES),
177 + "__AVR32_AP__"},
178 + {"ucr1", ARCH_TYPE_AVR32_UCR1, UARCH_TYPE_AVR32A,
179 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW),
180 + "__AVR32_UC__=1"},
181 + {"ucr2", ARCH_TYPE_AVR32_UCR2, UARCH_TYPE_AVR32A,
182 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
183 + | FLAG_AVR32_HAS_V2_INSNS),
184 + "__AVR32_UC__=2"},
185 + {"ucr2nomul", ARCH_TYPE_AVR32_UCR2NOMUL, UARCH_TYPE_AVR32A,
186 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
187 + | FLAG_AVR32_HAS_V2_INSNS | FLAG_AVR32_HAS_NO_MUL_INSNS),
188 + "__AVR32_UC__=2"},
189 + {"ucr3", ARCH_TYPE_AVR32_UCR3, UARCH_TYPE_AVR32A,
190 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
191 + | FLAG_AVR32_HAS_V2_INSNS),
192 + "__AVR32_UC__=3"},
193 + {NULL, 0, 0, 0, NULL}
194 +};
195 +
196 +/* Default arch name */
197 +const char *avr32_arch_name = "none";
198 +const char *avr32_part_name = "none";
199 +
200 +const struct part_type_s *avr32_part;
201 +const struct arch_type_s *avr32_arch;
202 +
203 +
204 +/* Set default target_flags. */
205 +#undef TARGET_DEFAULT_TARGET_FLAGS
206 +#define TARGET_DEFAULT_TARGET_FLAGS \
207 + (MASK_HAS_ASM_ADDR_PSEUDOS | MASK_MD_REORG_OPTIMIZATION | MASK_COND_EXEC_BEFORE_RELOAD)
208 +
209 +void
210 +avr32_optimization_options (int level,
211 + int size){
212 + if (AVR32_ALWAYS_PIC)
213 + flag_pic = 1;
214 +
215 + /* Enable section anchors if optimization is enabled. */
216 + if (level > 0 || size)
217 + flag_section_anchors = 1;
218 +}
219 +
220 +/* Override command line options */
221 +void
222 +avr32_override_options (void)
223 +{
224 + const struct part_type_s *part;
225 + const struct arch_type_s *arch;
226 +
227 + /*Add backward compability*/
228 + if (strcmp ("uc", avr32_arch_name)== 0)
229 + {
230 + fprintf (stderr, "Warning: Deprecated arch `%s' specified. "
231 + "Please use '-march=ucr1' instead. "
232 + "Converting to arch 'ucr1'\n",
233 + avr32_arch_name);
234 + avr32_arch_name="ucr1";
235 + }
236 +
237 + /* Check if arch type is set. */
238 + for (arch = avr32_arch_types; arch->name; arch++)
239 + {
240 + if (strcmp (arch->name, avr32_arch_name) == 0)
241 + break;
242 + }
243 + avr32_arch = arch;
244 +
245 + if (!arch->name && strcmp("none", avr32_arch_name) != 0)
246 + {
247 + fprintf (stderr, "Unknown arch `%s' specified\n"
248 + "Known arch names:\n"
249 + "\tuc (deprecated)\n",
250 + avr32_arch_name);
251 + for (arch = avr32_arch_types; arch->name; arch++)
252 + fprintf (stderr, "\t%s\n", arch->name);
253 + avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP];
254 + }
255 +
256 + /* Check if part type is set. */
257 + for (part = avr32_part_types; part->name; part++)
258 + if (strcmp (part->name, avr32_part_name) == 0)
259 + break;
260 +
261 + avr32_part = part;
262 + if (!part->name)
263 + {
264 + fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n",
265 + avr32_part_name);
266 + for (part = avr32_part_types; part->name; part++)
267 + {
268 + if (strcmp("none", part->name) != 0)
269 + fprintf (stderr, "\t%s\n", part->name);
270 + }
271 + /* Set default to NONE*/
272 + avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE];
273 + }
274 +
275 + /* NB! option -march= overrides option -mpart
276 + * if both are used at the same time */
277 + if (!arch->name)
278 + avr32_arch = &avr32_arch_types[avr32_part->arch_type];
279 +
280 + /* If optimization level is two or greater, then align start of loops to a
281 + word boundary since this will allow folding the first insn of the loop.
282 + Do this only for targets supporting branch prediction. */
283 + if (optimize >= 2 && TARGET_BRANCH_PRED)
284 + align_loops = 2;
285 +
286 +
287 + /* Enable fast-float library if unsafe math optimizations
288 + are used. */
289 + if (flag_unsafe_math_optimizations)
290 + target_flags |= MASK_FAST_FLOAT;
291 +
292 + /* Check if we should set avr32_imm_in_const_pool
293 + based on if caches are present or not. */
294 + if ( avr32_imm_in_const_pool == -1 )
295 + {
296 + if ( TARGET_CACHES )
297 + avr32_imm_in_const_pool = 1;
298 + else
299 + avr32_imm_in_const_pool = 0;
300 + }
301 +
302 + if (TARGET_NO_PIC)
303 + flag_pic = 0;
304 +
305 + avr32_add_gc_roots ();
306 +}
307 +
308 +
309 +/*
310 +If defined, a function that outputs the assembler code for entry to a
311 +function. The prologue is responsible for setting up the stack frame,
312 +initializing the frame pointer register, saving registers that must be
313 +saved, and allocating size additional bytes of storage for the
314 +local variables. size is an integer. file is a stdio
315 +stream to which the assembler code should be output.
316 +
317 +The label for the beginning of the function need not be output by this
318 +macro. That has already been done when the macro is run.
319 +
320 +To determine which registers to save, the macro can refer to the array
321 +regs_ever_live: element r is nonzero if hard register
322 +r is used anywhere within the function. This implies the function
323 +prologue should save register r, provided it is not one of the
324 +call-used registers. (TARGET_ASM_FUNCTION_EPILOGUE must likewise use
325 +regs_ever_live.)
326 +
327 +On machines that have ``register windows'', the function entry code does
328 +not save on the stack the registers that are in the windows, even if
329 +they are supposed to be preserved by function calls; instead it takes
330 +appropriate steps to ``push'' the register stack, if any non-call-used
331 +registers are used in the function.
332 +
333 +On machines where functions may or may not have frame-pointers, the
334 +function entry code must vary accordingly; it must set up the frame
335 +pointer if one is wanted, and not otherwise. To determine whether a
336 +frame pointer is in wanted, the macro can refer to the variable
337 +frame_pointer_needed. The variable's value will be 1 at run
338 +time in a function that needs a frame pointer. (see Elimination).
339 +
340 +The function entry code is responsible for allocating any stack space
341 +required for the function. This stack space consists of the regions
342 +listed below. In most cases, these regions are allocated in the
343 +order listed, with the last listed region closest to the top of the
344 +stack (the lowest address if STACK_GROWS_DOWNWARD is defined, and
345 +the highest address if it is not defined). You can use a different order
346 +for a machine if doing so is more convenient or required for
347 +compatibility reasons. Except in cases where required by standard
348 +or by a debugger, there is no reason why the stack layout used by GCC
349 +need agree with that used by other compilers for a machine.
350 +*/
351 +
352 +#undef TARGET_ASM_FUNCTION_PROLOGUE
353 +#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue
354 +
355 +
356 +#undef TARGET_DEFAULT_SHORT_ENUMS
357 +#define TARGET_DEFAULT_SHORT_ENUMS hook_bool_void_false
358 +
359 +#undef TARGET_PROMOTE_FUNCTION_ARGS
360 +#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
361 +
362 +#undef TARGET_PROMOTE_FUNCTION_RETURN
363 +#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
364 +
365 +#undef TARGET_PROMOTE_PROTOTYPES
366 +#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
367 +
368 +#undef TARGET_MUST_PASS_IN_STACK
369 +#define TARGET_MUST_PASS_IN_STACK avr32_must_pass_in_stack
370 +
371 +#undef TARGET_PASS_BY_REFERENCE
372 +#define TARGET_PASS_BY_REFERENCE avr32_pass_by_reference
373 +
374 +#undef TARGET_STRICT_ARGUMENT_NAMING
375 +#define TARGET_STRICT_ARGUMENT_NAMING avr32_strict_argument_naming
376 +
377 +#undef TARGET_VECTOR_MODE_SUPPORTED_P
378 +#define TARGET_VECTOR_MODE_SUPPORTED_P avr32_vector_mode_supported
379 +
380 +#undef TARGET_RETURN_IN_MEMORY
381 +#define TARGET_RETURN_IN_MEMORY avr32_return_in_memory
382 +
383 +#undef TARGET_RETURN_IN_MSB
384 +#define TARGET_RETURN_IN_MSB avr32_return_in_msb
385 +
386 +#undef TARGET_ARG_PARTIAL_BYTES
387 +#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes
388 +
389 +#undef TARGET_STRIP_NAME_ENCODING
390 +#define TARGET_STRIP_NAME_ENCODING avr32_strip_name_encoding
391 +
392 +#define streq(string1, string2) (strcmp (string1, string2) == 0)
393 +
394 +#undef TARGET_NARROW_VOLATILE_BITFIELD
395 +#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
396 +
397 +#undef TARGET_ATTRIBUTE_TABLE
398 +#define TARGET_ATTRIBUTE_TABLE avr32_attribute_table
399 +
400 +#undef TARGET_COMP_TYPE_ATTRIBUTES
401 +#define TARGET_COMP_TYPE_ATTRIBUTES avr32_comp_type_attributes
402 +
403 +
404 +#undef TARGET_RTX_COSTS
405 +#define TARGET_RTX_COSTS avr32_rtx_costs
406 +
407 +#undef TARGET_CANNOT_FORCE_CONST_MEM
408 +#define TARGET_CANNOT_FORCE_CONST_MEM avr32_cannot_force_const_mem
409 +
410 +#undef TARGET_ASM_INTEGER
411 +#define TARGET_ASM_INTEGER avr32_assemble_integer
412 +
413 +#undef TARGET_FUNCTION_VALUE
414 +#define TARGET_FUNCTION_VALUE avr32_function_value
415 +
416 +#undef TARGET_MIN_ANCHOR_OFFSET
417 +#define TARGET_MIN_ANCHOR_OFFSET (0)
418 +
419 +#undef TARGET_MAX_ANCHOR_OFFSET
420 +#define TARGET_MAX_ANCHOR_OFFSET ((1 << 15) - 1)
421 +
422 +
423 +/*
424 + * Switches to the appropriate section for output of constant pool
425 + * entry x in mode. You can assume that x is some kind of constant in
426 + * RTL. The argument mode is redundant except in the case of a
427 + * const_int rtx. Select the section by calling readonly_data_ section
428 + * or one of the alternatives for other sections. align is the
429 + * constant alignment in bits.
430 + *
431 + * The default version of this function takes care of putting symbolic
432 + * constants in flag_ pic mode in data_section and everything else in
433 + * readonly_data_section.
434 + */
435 +//#undef TARGET_ASM_SELECT_RTX_SECTION
436 +//#define TARGET_ASM_SELECT_RTX_SECTION avr32_select_rtx_section
437 +
438 +
439 +/*
440 + * If non-null, this hook performs a target-specific pass over the
441 + * instruction stream. The compiler will run it at all optimization
442 + * levels, just before the point at which it normally does
443 + * delayed-branch scheduling.
444 + *
445 + * The exact purpose of the hook varies from target to target. Some
446 + * use it to do transformations that are necessary for correctness,
447 + * such as laying out in-function constant pools or avoiding hardware
448 + * hazards. Others use it as an opportunity to do some
449 + * machine-dependent optimizations.
450 + *
451 + * You need not implement the hook if it has nothing to do. The
452 + * default definition is null.
453 + */
454 +#undef TARGET_MACHINE_DEPENDENT_REORG
455 +#define TARGET_MACHINE_DEPENDENT_REORG avr32_reorg
456 +
457 +/* Target hook for assembling integer objects.
458 + Need to handle integer vectors */
459 +static bool
460 +avr32_assemble_integer (rtx x, unsigned int size, int aligned_p)
461 +{
462 + if (avr32_vector_mode_supported (GET_MODE (x)))
463 + {
464 + int i, units;
465 +
466 + if (GET_CODE (x) != CONST_VECTOR)
467 + abort ();
468 +
469 + units = CONST_VECTOR_NUNITS (x);
470 +
471 + switch (GET_MODE (x))
472 + {
473 + case V2HImode:
474 + size = 2;
475 + break;
476 + case V4QImode:
477 + size = 1;
478 + break;
479 + default:
480 + abort ();
481 + }
482 +
483 + for (i = 0; i < units; i++)
484 + {
485 + rtx elt;
486 +
487 + elt = CONST_VECTOR_ELT (x, i);
488 + assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1);
489 + }
490 +
491 + return true;
492 + }
493 +
494 + return default_assemble_integer (x, size, aligned_p);
495 +}
496 +
497 +/*
498 + * This target hook describes the relative costs of RTL expressions.
499 + *
500 + * The cost may depend on the precise form of the expression, which is
501 + * available for examination in x, and the rtx code of the expression
502 + * in which it is contained, found in outer_code. code is the
503 + * expression code--redundant, since it can be obtained with GET_CODE
504 + * (x).
505 + *
506 + * In implementing this hook, you can use the construct COSTS_N_INSNS
507 + * (n) to specify a cost equal to n fast instructions.
508 + *
509 + * On entry to the hook, *total contains a default estimate for the
510 + * cost of the expression. The hook should modify this value as
511 + * necessary. Traditionally, the default costs are COSTS_N_INSNS (5)
512 + * for multiplications, COSTS_N_INSNS (7) for division and modulus
513 + * operations, and COSTS_N_INSNS (1) for all other operations.
514 + *
515 + * When optimizing for code size, i.e. when optimize_size is non-zero,
516 + * this target hook should be used to estimate the relative size cost
517 + * of an expression, again relative to COSTS_N_INSNS.
518 + *
519 + * The hook returns true when all subexpressions of x have been
520 + * processed, and false when rtx_cost should recurse.
521 + */
522 +
523 +/* Worker routine for avr32_rtx_costs. */
524 +static inline int
525 +avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED,
526 + enum rtx_code outer ATTRIBUTE_UNUSED)
527 +{
528 + enum machine_mode mode = GET_MODE (x);
529 +
530 + switch (GET_CODE (x))
531 + {
532 + case MEM:
533 + /* Using pre decrement / post increment memory operations on the
534 + avr32_uc architecture means that two writebacks must be performed
535 + and hence two cycles are needed. */
536 + if (!optimize_size
537 + && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD
538 + && TARGET_ARCH_UC
539 + && (GET_CODE (XEXP (x, 0)) == PRE_DEC
540 + || GET_CODE (XEXP (x, 0)) == POST_INC))
541 + return COSTS_N_INSNS (5);
542 +
543 + /* Memory costs quite a lot for the first word, but subsequent words
544 + load at the equivalent of a single insn each. */
545 + if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
546 + return COSTS_N_INSNS (3 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD));
547 +
548 + return COSTS_N_INSNS (4);
549 + case SYMBOL_REF:
550 + case CONST:
551 + /* These are valid for the pseudo insns: lda.w and call which operates
552 + on direct addresses. We assume that the cost of a lda.w is the same
553 + as the cost of a ld.w insn. */
554 + return (outer == SET) ? COSTS_N_INSNS (4) : COSTS_N_INSNS (1);
555 + case DIV:
556 + case MOD:
557 + case UDIV:
558 + case UMOD:
559 + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
560 +
561 + case ROTATE:
562 + case ROTATERT:
563 + if (mode == TImode)
564 + return COSTS_N_INSNS (100);
565 +
566 + if (mode == DImode)
567 + return COSTS_N_INSNS (10);
568 + return COSTS_N_INSNS (4);
569 + case ASHIFT:
570 + case LSHIFTRT:
571 + case ASHIFTRT:
572 + case NOT:
573 + if (mode == TImode)
574 + return COSTS_N_INSNS (10);
575 +
576 + if (mode == DImode)
577 + return COSTS_N_INSNS (4);
578 + return COSTS_N_INSNS (1);
579 + case PLUS:
580 + case MINUS:
581 + case NEG:
582 + case COMPARE:
583 + case ABS:
584 + if (GET_MODE_CLASS (mode) == MODE_FLOAT)
585 + return COSTS_N_INSNS (100);
586 +
587 + if (mode == TImode)
588 + return COSTS_N_INSNS (50);
589 +
590 + if (mode == DImode)
591 + return COSTS_N_INSNS (2);
592 + return COSTS_N_INSNS (1);
593 +
594 + case MULT:
595 + {
596 + if (GET_MODE_CLASS (mode) == MODE_FLOAT)
597 + return COSTS_N_INSNS (300);
598 +
599 + if (mode == TImode)
600 + return COSTS_N_INSNS (16);
601 +
602 + if (mode == DImode)
603 + return COSTS_N_INSNS (4);
604 +
605 + if (mode == HImode)
606 + return COSTS_N_INSNS (2);
607 +
608 + return COSTS_N_INSNS (3);
609 + }
610 + case IF_THEN_ELSE:
611 + if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
612 + return COSTS_N_INSNS (4);
613 + return COSTS_N_INSNS (1);
614 + case SIGN_EXTEND:
615 + case ZERO_EXTEND:
616 + /* Sign/Zero extensions of registers cost quite much since these
617 + instrcutions only take one register operand which means that gcc
618 + often must insert some move instrcutions */
619 + if (mode == QImode || mode == HImode)
620 + return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1));
621 + return COSTS_N_INSNS (4);
622 + case UNSPEC:
623 + /* divmod operations */
624 + if (XINT (x, 1) == UNSPEC_UDIVMODSI4_INTERNAL
625 + || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL)
626 + {
627 + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
628 + }
629 + /* Fallthrough */
630 + default:
631 + return COSTS_N_INSNS (1);
632 + }
633 +}
634 +
635 +static bool
636 +avr32_rtx_costs (rtx x, int code, int outer_code, int *total)
637 +{
638 + *total = avr32_rtx_costs_1 (x, code, outer_code);
639 + return true;
640 +}
641 +
642 +
643 +bool
644 +avr32_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED)
645 +{
646 + /* Do not want symbols in the constant pool when compiling pic or if using
647 + address pseudo instructions. */
648 + return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
649 + && avr32_find_symbol (x) != NULL_RTX);
650 +}
651 +
652 +
653 +/* Table of machine attributes. */
654 +const struct attribute_spec avr32_attribute_table[] = {
655 + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
656 + /* Interrupt Service Routines have special prologue and epilogue
657 + requirements. */
658 + {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute},
659 + {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute},
660 + {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute},
661 + {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute},
662 + {NULL, 0, 0, false, false, false, NULL}
663 +};
664 +
665 +
666 +typedef struct
667 +{
668 + const char *const arg;
669 + const unsigned long return_value;
670 +}
671 +isr_attribute_arg;
672 +
673 +static const isr_attribute_arg isr_attribute_args[] = {
674 + {"FULL", AVR32_FT_ISR_FULL},
675 + {"full", AVR32_FT_ISR_FULL},
676 + {"HALF", AVR32_FT_ISR_HALF},
677 + {"half", AVR32_FT_ISR_HALF},
678 + {"NONE", AVR32_FT_ISR_NONE},
679 + {"none", AVR32_FT_ISR_NONE},
680 + {"UNDEF", AVR32_FT_ISR_NONE},
681 + {"undef", AVR32_FT_ISR_NONE},
682 + {"SWI", AVR32_FT_ISR_NONE},
683 + {"swi", AVR32_FT_ISR_NONE},
684 + {NULL, AVR32_FT_ISR_NONE}
685 +};
686 +
687 +/* Returns the (interrupt) function type of the current
688 + function, or AVR32_FT_UNKNOWN if the type cannot be determined. */
689 +
690 +static unsigned long
691 +avr32_isr_value (tree argument)
692 +{
693 + const isr_attribute_arg *ptr;
694 + const char *arg;
695 +
696 + /* No argument - default to ISR_NONE. */
697 + if (argument == NULL_TREE)
698 + return AVR32_FT_ISR_NONE;
699 +
700 + /* Get the value of the argument. */
701 + if (TREE_VALUE (argument) == NULL_TREE
702 + || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
703 + return AVR32_FT_UNKNOWN;
704 +
705 + arg = TREE_STRING_POINTER (TREE_VALUE (argument));
706 +
707 + /* Check it against the list of known arguments. */
708 + for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
709 + if (streq (arg, ptr->arg))
710 + return ptr->return_value;
711 +
712 + /* An unrecognized interrupt type. */
713 + return AVR32_FT_UNKNOWN;
714 +}
715 +
716 +
717 +
718 +/*
719 +These hooks specify assembly directives for creating certain kinds
720 +of integer object. The TARGET_ASM_BYTE_OP directive creates a
721 +byte-sized object, the TARGET_ASM_ALIGNED_HI_OP one creates an
722 +aligned two-byte object, and so on. Any of the hooks may be
723 +NULL, indicating that no suitable directive is available.
724 +
725 +The compiler will print these strings at the start of a new line,
726 +followed immediately by the object's initial value. In most cases,
727 +the string should contain a tab, a pseudo-op, and then another tab.
728 +*/
729 +#undef TARGET_ASM_BYTE_OP
730 +#define TARGET_ASM_BYTE_OP "\t.byte\t"
731 +#undef TARGET_ASM_ALIGNED_HI_OP
732 +#define TARGET_ASM_ALIGNED_HI_OP "\t.align 1\n\t.short\t"
733 +#undef TARGET_ASM_ALIGNED_SI_OP
734 +#define TARGET_ASM_ALIGNED_SI_OP "\t.align 2\n\t.int\t"
735 +#undef TARGET_ASM_ALIGNED_DI_OP
736 +#define TARGET_ASM_ALIGNED_DI_OP NULL
737 +#undef TARGET_ASM_ALIGNED_TI_OP
738 +#define TARGET_ASM_ALIGNED_TI_OP NULL
739 +#undef TARGET_ASM_UNALIGNED_HI_OP
740 +#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
741 +#undef TARGET_ASM_UNALIGNED_SI_OP
742 +#define TARGET_ASM_UNALIGNED_SI_OP "\t.int\t"
743 +#undef TARGET_ASM_UNALIGNED_DI_OP
744 +#define TARGET_ASM_UNALIGNED_DI_OP NULL
745 +#undef TARGET_ASM_UNALIGNED_TI_OP
746 +#define TARGET_ASM_UNALIGNED_TI_OP NULL
747 +
748 +#undef TARGET_ASM_OUTPUT_MI_THUNK
749 +#define TARGET_ASM_OUTPUT_MI_THUNK avr32_output_mi_thunk
750 +
751 +#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
752 +#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
753 +
754 +static void
755 +avr32_output_mi_thunk (FILE * file,
756 + tree thunk ATTRIBUTE_UNUSED,
757 + HOST_WIDE_INT delta,
758 + HOST_WIDE_INT vcall_offset, tree function)
759 + {
760 + int mi_delta = delta;
761 + int this_regno =
762 + (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function) ?
763 + INTERNAL_REGNUM (11) : INTERNAL_REGNUM (12));
764 +
765 +
766 + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
767 + || vcall_offset)
768 + {
769 + fputs ("\tpushm\tlr\n", file);
770 + }
771 +
772 +
773 + if (mi_delta != 0)
774 + {
775 + if (avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21"))
776 + {
777 + fprintf (file, "\tsub\t%s, %d\n", reg_names[this_regno], -mi_delta);
778 + }
779 + else
780 + {
781 + /* Immediate is larger than k21 we must make us a temp register by
782 + pushing a register to the stack. */
783 + fprintf (file, "\tmov\tlr, lo(%d)\n", mi_delta);
784 + fprintf (file, "\torh\tlr, hi(%d)\n", mi_delta);
785 + fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
786 + }
787 + }
788 +
789 +
790 + if (vcall_offset != 0)
791 + {
792 + fprintf (file, "\tld.w\tlr, %s[0]\n", reg_names[this_regno]);
793 + fprintf (file, "\tld.w\tlr, lr[%i]\n", (int) vcall_offset);
794 + fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
795 + }
796 +
797 +
798 + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
799 + || vcall_offset)
800 + {
801 + fputs ("\tpopm\tlr\n", file);
802 + }
803 +
804 + /* Jump to the function. We assume that we can use an rjmp since the
805 + function to jump to is local and probably not too far away from
806 + the thunk. If this assumption proves to be wrong we could implement
807 + this jump by calculating the offset between the jump source and destination
808 + and put this in the constant pool and then perform an add to pc.
809 + This would also be legitimate PIC code. But for now we hope that an rjmp
810 + will be sufficient...
811 + */
812 + fputs ("\trjmp\t", file);
813 + assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
814 + fputc ('\n', file);
815 + }
816 +
817 +
818 +/* Implements target hook vector_mode_supported. */
819 +bool
820 +avr32_vector_mode_supported (enum machine_mode mode)
821 +{
822 + if ((mode == V2HImode) || (mode == V4QImode))
823 + return true;
824 +
825 + return false;
826 +}
827 +
828 +
829 +#undef TARGET_INIT_LIBFUNCS
830 +#define TARGET_INIT_LIBFUNCS avr32_init_libfuncs
831 +
832 +#undef TARGET_INIT_BUILTINS
833 +#define TARGET_INIT_BUILTINS avr32_init_builtins
834 +
835 +#undef TARGET_EXPAND_BUILTIN
836 +#define TARGET_EXPAND_BUILTIN avr32_expand_builtin
837 +
838 +tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int,
839 + void_ftype_ptr_int;
840 +tree void_ftype_int, void_ftype_void, int_ftype_ptr_int;
841 +tree short_ftype_short, int_ftype_int_short, int_ftype_short_short,
842 + short_ftype_short_short;
843 +tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short;
844 +tree void_ftype_int_int_int_int_int, void_ftype_int_int_int;
845 +tree longlong_ftype_int_int, void_ftype_int_int_longlong;
846 +tree int_ftype_int_int_int, longlong_ftype_longlong_int_short;
847 +tree longlong_ftype_longlong_short_short, int_ftype_int_short_short;
848 +
849 +#define def_builtin(NAME, TYPE, CODE) \
850 + add_builtin_function ((NAME), (TYPE), (CODE), \
851 + BUILT_IN_MD, NULL, NULL_TREE)
852 +
853 +#define def_mbuiltin(MASK, NAME, TYPE, CODE) \
854 + do \
855 + { \
856 + if ((MASK)) \
857 + add_builtin_function ((NAME), (TYPE), (CODE), \
858 + BUILT_IN_MD, NULL, NULL_TREE); \
859 + } \
860 + while (0)
861 +
862 +struct builtin_description
863 +{
864 + const unsigned int mask;
865 + const enum insn_code icode;
866 + const char *const name;
867 + const int code;
868 + const enum rtx_code comparison;
869 + const unsigned int flag;
870 + const tree *ftype;
871 +};
872 +
873 +static const struct builtin_description bdesc_2arg[] = {
874 +#define DSP_BUILTIN(code, builtin, ftype) \
875 + { 1, CODE_FOR_##code, "__builtin_" #code , \
876 + AVR32_BUILTIN_##builtin, 0, 0, ftype }
877 +
878 + DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short),
879 + DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short),
880 + DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short),
881 + DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short),
882 + DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short),
883 + DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short),
884 + DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short),
885 + DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int),
886 + DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int),
887 + DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short),
888 + DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short)
889 +};
890 +
891 +
892 +void
893 +avr32_init_builtins (void)
894 +{
895 + unsigned int i;
896 + const struct builtin_description *d;
897 + tree endlink = void_list_node;
898 + tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
899 + tree longlong_endlink =
900 + tree_cons (NULL_TREE, long_long_integer_type_node, endlink);
901 + tree short_endlink =
902 + tree_cons (NULL_TREE, short_integer_type_node, endlink);
903 + tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink);
904 +
905 + /* int func (int) */
906 + int_ftype_int = build_function_type (integer_type_node, int_endlink);
907 +
908 + /* short func (short) */
909 + short_ftype_short
910 + = build_function_type (short_integer_type_node, short_endlink);
911 +
912 + /* short func (short, short) */
913 + short_ftype_short_short
914 + = build_function_type (short_integer_type_node,
915 + tree_cons (NULL_TREE, short_integer_type_node,
916 + short_endlink));
917 +
918 + /* long long func (long long, short, short) */
919 + longlong_ftype_longlong_short_short
920 + = build_function_type (long_long_integer_type_node,
921 + tree_cons (NULL_TREE, long_long_integer_type_node,
922 + tree_cons (NULL_TREE,
923 + short_integer_type_node,
924 + short_endlink)));
925 +
926 + /* long long func (short, short) */
927 + longlong_ftype_short_short
928 + = build_function_type (long_long_integer_type_node,
929 + tree_cons (NULL_TREE, short_integer_type_node,
930 + short_endlink));
931 +
932 + /* int func (int, int) */
933 + int_ftype_int_int
934 + = build_function_type (integer_type_node,
935 + tree_cons (NULL_TREE, integer_type_node,
936 + int_endlink));
937 +
938 + /* long long func (int, int) */
939 + longlong_ftype_int_int
940 + = build_function_type (long_long_integer_type_node,
941 + tree_cons (NULL_TREE, integer_type_node,
942 + int_endlink));
943 +
944 + /* long long int func (long long, int, short) */
945 + longlong_ftype_longlong_int_short
946 + = build_function_type (long_long_integer_type_node,
947 + tree_cons (NULL_TREE, long_long_integer_type_node,
948 + tree_cons (NULL_TREE, integer_type_node,
949 + short_endlink)));
950 +
951 + /* long long int func (int, short) */
952 + longlong_ftype_int_short
953 + = build_function_type (long_long_integer_type_node,
954 + tree_cons (NULL_TREE, integer_type_node,
955 + short_endlink));
956 +
957 + /* int func (int, short, short) */
958 + int_ftype_int_short_short
959 + = build_function_type (integer_type_node,
960 + tree_cons (NULL_TREE, integer_type_node,
961 + tree_cons (NULL_TREE,
962 + short_integer_type_node,
963 + short_endlink)));
964 +
965 + /* int func (short, short) */
966 + int_ftype_short_short
967 + = build_function_type (integer_type_node,
968 + tree_cons (NULL_TREE, short_integer_type_node,
969 + short_endlink));
970 +
971 + /* int func (int, short) */
972 + int_ftype_int_short
973 + = build_function_type (integer_type_node,
974 + tree_cons (NULL_TREE, integer_type_node,
975 + short_endlink));
976 +
977 + /* void func (int, int) */
978 + void_ftype_int_int
979 + = build_function_type (void_type_node,
980 + tree_cons (NULL_TREE, integer_type_node,
981 + int_endlink));
982 +
983 + /* void func (int, int, int) */
984 + void_ftype_int_int_int
985 + = build_function_type (void_type_node,
986 + tree_cons (NULL_TREE, integer_type_node,
987 + tree_cons (NULL_TREE, integer_type_node,
988 + int_endlink)));
989 +
990 + /* void func (int, int, long long) */
991 + void_ftype_int_int_longlong
992 + = build_function_type (void_type_node,
993 + tree_cons (NULL_TREE, integer_type_node,
994 + tree_cons (NULL_TREE, integer_type_node,
995 + longlong_endlink)));
996 +
997 + /* void func (int, int, int, int, int) */
998 + void_ftype_int_int_int_int_int
999 + = build_function_type (void_type_node,
1000 + tree_cons (NULL_TREE, integer_type_node,
1001 + tree_cons (NULL_TREE, integer_type_node,
1002 + tree_cons (NULL_TREE,
1003 + integer_type_node,
1004 + tree_cons
1005 + (NULL_TREE,
1006 + integer_type_node,
1007 + int_endlink)))));
1008 +
1009 + /* void func (void *, int) */
1010 + void_ftype_ptr_int
1011 + = build_function_type (void_type_node,
1012 + tree_cons (NULL_TREE, ptr_type_node, int_endlink));
1013 +
1014 + /* void func (int) */
1015 + void_ftype_int = build_function_type (void_type_node, int_endlink);
1016 +
1017 + /* void func (void) */
1018 + void_ftype_void = build_function_type (void_type_node, void_endlink);
1019 +
1020 + /* int func (void) */
1021 + int_ftype_void = build_function_type (integer_type_node, void_endlink);
1022 +
1023 + /* int func (void *, int) */
1024 + int_ftype_ptr_int
1025 + = build_function_type (integer_type_node,
1026 + tree_cons (NULL_TREE, ptr_type_node, int_endlink));
1027 +
1028 + /* int func (int, int, int) */
1029 + int_ftype_int_int_int
1030 + = build_function_type (integer_type_node,
1031 + tree_cons (NULL_TREE, integer_type_node,
1032 + tree_cons (NULL_TREE, integer_type_node,
1033 + int_endlink)));
1034 +
1035 + /* Initialize avr32 builtins. */
1036 + def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR);
1037 + def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR);
1038 + def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR);
1039 + def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR);
1040 + def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE);
1041 + def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC);
1042 + def_builtin ("__builtin_ssrf", void_ftype_int, AVR32_BUILTIN_SSRF);
1043 + def_builtin ("__builtin_csrf", void_ftype_int, AVR32_BUILTIN_CSRF);
1044 + def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR);
1045 + def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS);
1046 + def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW);
1047 + def_builtin ("__builtin_breakpoint", void_ftype_void,
1048 + AVR32_BUILTIN_BREAKPOINT);
1049 + def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG);
1050 + def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI);
1051 + def_builtin ("__builtin_bswap_16", short_ftype_short,
1052 + AVR32_BUILTIN_BSWAP16);
1053 + def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32);
1054 + def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int,
1055 + AVR32_BUILTIN_COP);
1056 + def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W);
1057 + def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int,
1058 + AVR32_BUILTIN_MVRC_W);
1059 + def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int,
1060 + AVR32_BUILTIN_MVCR_D);
1061 + def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong,
1062 + AVR32_BUILTIN_MVRC_D);
1063 + def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS);
1064 + def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU);
1065 + def_builtin ("__builtin_satrnds", int_ftype_int_int_int,
1066 + AVR32_BUILTIN_SATRNDS);
1067 + def_builtin ("__builtin_satrndu", int_ftype_int_int_int,
1068 + AVR32_BUILTIN_SATRNDU);
1069 + def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR);
1070 + def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR);
1071 + def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short,
1072 + AVR32_BUILTIN_MACSATHH_W);
1073 + def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short,
1074 + AVR32_BUILTIN_MACWH_D);
1075 + def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short,
1076 + AVR32_BUILTIN_MACHH_D);
1077 +
1078 + /* Add all builtins that are more or less simple operations on two
1079 + operands. */
1080 + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
1081 + {
1082 + /* Use one of the operands; the target can have a different mode for
1083 + mask-generating compares. */
1084 +
1085 + if (d->name == 0)
1086 + continue;
1087 +
1088 + def_mbuiltin (d->mask, d->name, *(d->ftype), d->code);
1089 + }
1090 +}
1091 +
1092 +
1093 +/* Subroutine of avr32_expand_builtin to take care of binop insns. */
1094 +
1095 +static rtx
1096 +avr32_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
1097 +{
1098 + rtx pat;
1099 + tree arg0 = CALL_EXPR_ARG (exp,0);
1100 + tree arg1 = CALL_EXPR_ARG (exp,1);
1101 + rtx op0 = expand_normal (arg0);
1102 + rtx op1 = expand_normal (arg1);
1103 + enum machine_mode tmode = insn_data[icode].operand[0].mode;
1104 + enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1105 + enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1106 +
1107 + if (!target
1108 + || GET_MODE (target) != tmode
1109 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1110 + target = gen_reg_rtx (tmode);
1111 +
1112 + /* In case the insn wants input operands in modes different from the
1113 + result, abort. */
1114 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1115 + {
1116 + /* If op0 is already a reg we must cast it to the correct mode. */
1117 + if (REG_P (op0))
1118 + op0 = convert_to_mode (mode0, op0, 1);
1119 + else
1120 + op0 = copy_to_mode_reg (mode0, op0);
1121 + }
1122 + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1123 + {
1124 + /* If op1 is already a reg we must cast it to the correct mode. */
1125 + if (REG_P (op1))
1126 + op1 = convert_to_mode (mode1, op1, 1);
1127 + else
1128 + op1 = copy_to_mode_reg (mode1, op1);
1129 + }
1130 + pat = GEN_FCN (icode) (target, op0, op1);
1131 + if (!pat)
1132 + return 0;
1133 + emit_insn (pat);
1134 + return target;
1135 +}
1136 +
1137 +/* Expand an expression EXP that calls a built-in function,
1138 + with result going to TARGET if that's convenient
1139 + (and in mode MODE if that's convenient).
1140 + SUBTARGET may be used as the target for computing one of EXP's operands.
1141 + IGNORE is nonzero if the value is to be ignored. */
1142 +
1143 +rtx
1144 +avr32_expand_builtin (tree exp,
1145 + rtx target,
1146 + rtx subtarget ATTRIBUTE_UNUSED,
1147 + enum machine_mode mode ATTRIBUTE_UNUSED,
1148 + int ignore ATTRIBUTE_UNUSED)
1149 +{
1150 + const struct builtin_description *d;
1151 + unsigned int i;
1152 + enum insn_code icode = 0;
1153 + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
1154 + tree arg0, arg1, arg2;
1155 + rtx op0, op1, op2, pat;
1156 + enum machine_mode tmode, mode0, mode1;
1157 + enum machine_mode arg0_mode;
1158 + int fcode = DECL_FUNCTION_CODE (fndecl);
1159 +
1160 + switch (fcode)
1161 + {
1162 + default:
1163 + break;
1164 +
1165 + case AVR32_BUILTIN_SATS:
1166 + case AVR32_BUILTIN_SATU:
1167 + case AVR32_BUILTIN_SATRNDS:
1168 + case AVR32_BUILTIN_SATRNDU:
1169 + {
1170 + const char *fname;
1171 + switch (fcode)
1172 + {
1173 + default:
1174 + case AVR32_BUILTIN_SATS:
1175 + icode = CODE_FOR_sats;
1176 + fname = "sats";
1177 + break;
1178 + case AVR32_BUILTIN_SATU:
1179 + icode = CODE_FOR_satu;
1180 + fname = "satu";
1181 + break;
1182 + case AVR32_BUILTIN_SATRNDS:
1183 + icode = CODE_FOR_satrnds;
1184 + fname = "satrnds";
1185 + break;
1186 + case AVR32_BUILTIN_SATRNDU:
1187 + icode = CODE_FOR_satrndu;
1188 + fname = "satrndu";
1189 + break;
1190 + }
1191 +
1192 + arg0 = CALL_EXPR_ARG (exp,0);
1193 + arg1 = CALL_EXPR_ARG (exp,1);
1194 + arg2 = CALL_EXPR_ARG (exp,2);
1195 + op0 = expand_normal (arg0);
1196 + op1 = expand_normal (arg1);
1197 + op2 = expand_normal (arg2);
1198 +
1199 + tmode = insn_data[icode].operand[0].mode;
1200 +
1201 +
1202 + if (target == 0
1203 + || GET_MODE (target) != tmode
1204 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1205 + target = gen_reg_rtx (tmode);
1206 +
1207 +
1208 + if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0)))
1209 + {
1210 + op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0);
1211 + }
1212 +
1213 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1214 + {
1215 + error ("Parameter 2 to __builtin_%s should be a constant number.",
1216 + fname);
1217 + return NULL_RTX;
1218 + }
1219 +
1220 + if (!(*insn_data[icode].operand[1].predicate) (op2, SImode))
1221 + {
1222 + error ("Parameter 3 to __builtin_%s should be a constant number.",
1223 + fname);
1224 + return NULL_RTX;
1225 + }
1226 +
1227 + emit_move_insn (target, op0);
1228 + pat = GEN_FCN (icode) (target, op1, op2);
1229 + if (!pat)
1230 + return 0;
1231 + emit_insn (pat);
1232 +
1233 + return target;
1234 + }
1235 + case AVR32_BUILTIN_MUSTR:
1236 + icode = CODE_FOR_mustr;
1237 + tmode = insn_data[icode].operand[0].mode;
1238 +
1239 + if (target == 0
1240 + || GET_MODE (target) != tmode
1241 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1242 + target = gen_reg_rtx (tmode);
1243 + pat = GEN_FCN (icode) (target);
1244 + if (!pat)
1245 + return 0;
1246 + emit_insn (pat);
1247 + return target;
1248 +
1249 + case AVR32_BUILTIN_MFSR:
1250 + icode = CODE_FOR_mfsr;
1251 + arg0 = CALL_EXPR_ARG (exp,0);
1252 + op0 = expand_normal (arg0);
1253 + tmode = insn_data[icode].operand[0].mode;
1254 + mode0 = insn_data[icode].operand[1].mode;
1255 +
1256 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1257 + {
1258 + error ("Parameter 1 to __builtin_mfsr must be a constant number");
1259 + }
1260 +
1261 + if (target == 0
1262 + || GET_MODE (target) != tmode
1263 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1264 + target = gen_reg_rtx (tmode);
1265 + pat = GEN_FCN (icode) (target, op0);
1266 + if (!pat)
1267 + return 0;
1268 + emit_insn (pat);
1269 + return target;
1270 + case AVR32_BUILTIN_MTSR:
1271 + icode = CODE_FOR_mtsr;
1272 + arg0 = CALL_EXPR_ARG (exp,0);
1273 + arg1 = CALL_EXPR_ARG (exp,1);
1274 + op0 = expand_normal (arg0);
1275 + op1 = expand_normal (arg1);
1276 + mode0 = insn_data[icode].operand[0].mode;
1277 + mode1 = insn_data[icode].operand[1].mode;
1278 +
1279 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1280 + {
1281 + error ("Parameter 1 to __builtin_mtsr must be a constant number");
1282 + return gen_reg_rtx (mode0);
1283 + }
1284 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1285 + op1 = copy_to_mode_reg (mode1, op1);
1286 + pat = GEN_FCN (icode) (op0, op1);
1287 + if (!pat)
1288 + return 0;
1289 + emit_insn (pat);
1290 + return NULL_RTX;
1291 + case AVR32_BUILTIN_MFDR:
1292 + icode = CODE_FOR_mfdr;
1293 + arg0 = CALL_EXPR_ARG (exp,0);
1294 + op0 = expand_normal (arg0);
1295 + tmode = insn_data[icode].operand[0].mode;
1296 + mode0 = insn_data[icode].operand[1].mode;
1297 +
1298 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1299 + {
1300 + error ("Parameter 1 to __builtin_mfdr must be a constant number");
1301 + }
1302 +
1303 + if (target == 0
1304 + || GET_MODE (target) != tmode
1305 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1306 + target = gen_reg_rtx (tmode);
1307 + pat = GEN_FCN (icode) (target, op0);
1308 + if (!pat)
1309 + return 0;
1310 + emit_insn (pat);
1311 + return target;
1312 + case AVR32_BUILTIN_MTDR:
1313 + icode = CODE_FOR_mtdr;
1314 + arg0 = CALL_EXPR_ARG (exp,0);
1315 + arg1 = CALL_EXPR_ARG (exp,1);
1316 + op0 = expand_normal (arg0);
1317 + op1 = expand_normal (arg1);
1318 + mode0 = insn_data[icode].operand[0].mode;
1319 + mode1 = insn_data[icode].operand[1].mode;
1320 +
1321 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1322 + {
1323 + error ("Parameter 1 to __builtin_mtdr must be a constant number");
1324 + return gen_reg_rtx (mode0);
1325 + }
1326 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1327 + op1 = copy_to_mode_reg (mode1, op1);
1328 + pat = GEN_FCN (icode) (op0, op1);
1329 + if (!pat)
1330 + return 0;
1331 + emit_insn (pat);
1332 + return NULL_RTX;
1333 + case AVR32_BUILTIN_CACHE:
1334 + icode = CODE_FOR_cache;
1335 + arg0 = CALL_EXPR_ARG (exp,0);
1336 + arg1 = CALL_EXPR_ARG (exp,1);
1337 + op0 = expand_normal (arg0);
1338 + op1 = expand_normal (arg1);
1339 + mode0 = insn_data[icode].operand[0].mode;
1340 + mode1 = insn_data[icode].operand[1].mode;
1341 +
1342 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1343 + {
1344 + error ("Parameter 2 to __builtin_cache must be a constant number");
1345 + return gen_reg_rtx (mode1);
1346 + }
1347 +
1348 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1349 + op0 = copy_to_mode_reg (mode0, op0);
1350 +
1351 + pat = GEN_FCN (icode) (op0, op1);
1352 + if (!pat)
1353 + return 0;
1354 + emit_insn (pat);
1355 + return NULL_RTX;
1356 + case AVR32_BUILTIN_SYNC:
1357 + case AVR32_BUILTIN_MUSFR:
1358 + case AVR32_BUILTIN_SSRF:
1359 + case AVR32_BUILTIN_CSRF:
1360 + {
1361 + const char *fname;
1362 + switch (fcode)
1363 + {
1364 + default:
1365 + case AVR32_BUILTIN_SYNC:
1366 + icode = CODE_FOR_sync;
1367 + fname = "sync";
1368 + break;
1369 + case AVR32_BUILTIN_MUSFR:
1370 + icode = CODE_FOR_musfr;
1371 + fname = "musfr";
1372 + break;
1373 + case AVR32_BUILTIN_SSRF:
1374 + icode = CODE_FOR_ssrf;
1375 + fname = "ssrf";
1376 + break;
1377 + case AVR32_BUILTIN_CSRF:
1378 + icode = CODE_FOR_csrf;
1379 + fname = "csrf";
1380 + break;
1381 + }
1382 +
1383 + arg0 = CALL_EXPR_ARG (exp,0);
1384 + op0 = expand_normal (arg0);
1385 + mode0 = insn_data[icode].operand[0].mode;
1386 +
1387 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1388 + {
1389 + if (icode == CODE_FOR_musfr)
1390 + op0 = copy_to_mode_reg (mode0, op0);
1391 + else
1392 + {
1393 + error ("Parameter to __builtin_%s is illegal.", fname);
1394 + return gen_reg_rtx (mode0);
1395 + }
1396 + }
1397 + pat = GEN_FCN (icode) (op0);
1398 + if (!pat)
1399 + return 0;
1400 + emit_insn (pat);
1401 + return NULL_RTX;
1402 + }
1403 + case AVR32_BUILTIN_TLBR:
1404 + icode = CODE_FOR_tlbr;
1405 + pat = GEN_FCN (icode) (NULL_RTX);
1406 + if (!pat)
1407 + return 0;
1408 + emit_insn (pat);
1409 + return NULL_RTX;
1410 + case AVR32_BUILTIN_TLBS:
1411 + icode = CODE_FOR_tlbs;
1412 + pat = GEN_FCN (icode) (NULL_RTX);
1413 + if (!pat)
1414 + return 0;
1415 + emit_insn (pat);
1416 + return NULL_RTX;
1417 + case AVR32_BUILTIN_TLBW:
1418 + icode = CODE_FOR_tlbw;
1419 + pat = GEN_FCN (icode) (NULL_RTX);
1420 + if (!pat)
1421 + return 0;
1422 + emit_insn (pat);
1423 + return NULL_RTX;
1424 + case AVR32_BUILTIN_BREAKPOINT:
1425 + icode = CODE_FOR_breakpoint;
1426 + pat = GEN_FCN (icode) (NULL_RTX);
1427 + if (!pat)
1428 + return 0;
1429 + emit_insn (pat);
1430 + return NULL_RTX;
1431 + case AVR32_BUILTIN_XCHG:
1432 + icode = CODE_FOR_sync_lock_test_and_setsi;
1433 + arg0 = CALL_EXPR_ARG (exp,0);
1434 + arg1 = CALL_EXPR_ARG (exp,1);
1435 + op0 = expand_normal (arg0);
1436 + op1 = expand_normal (arg1);
1437 + tmode = insn_data[icode].operand[0].mode;
1438 + mode0 = insn_data[icode].operand[1].mode;
1439 + mode1 = insn_data[icode].operand[2].mode;
1440 +
1441 + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1442 + {
1443 + op1 = copy_to_mode_reg (mode1, op1);
1444 + }
1445 +
1446 + op0 = force_reg (GET_MODE (op0), op0);
1447 + op0 = gen_rtx_MEM (GET_MODE (op0), op0);
1448 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1449 + {
1450 + error
1451 + ("Parameter 1 to __builtin_xchg must be a pointer to an integer.");
1452 + }
1453 +
1454 + if (target == 0
1455 + || GET_MODE (target) != tmode
1456 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1457 + target = gen_reg_rtx (tmode);
1458 + pat = GEN_FCN (icode) (target, op0, op1);
1459 + if (!pat)
1460 + return 0;
1461 + emit_insn (pat);
1462 + return target;
1463 + case AVR32_BUILTIN_LDXI:
1464 + icode = CODE_FOR_ldxi;
1465 + arg0 = CALL_EXPR_ARG (exp,0);
1466 + arg1 = CALL_EXPR_ARG (exp,1);
1467 + arg2 = CALL_EXPR_ARG (exp,2);
1468 + op0 = expand_normal (arg0);
1469 + op1 = expand_normal (arg1);
1470 + op2 = expand_normal (arg2);
1471 + tmode = insn_data[icode].operand[0].mode;
1472 + mode0 = insn_data[icode].operand[1].mode;
1473 + mode1 = insn_data[icode].operand[2].mode;
1474 +
1475 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1476 + {
1477 + op0 = copy_to_mode_reg (mode0, op0);
1478 + }
1479 +
1480 + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1481 + {
1482 + op1 = copy_to_mode_reg (mode1, op1);
1483 + }
1484 +
1485 + if (!(*insn_data[icode].operand[3].predicate) (op2, SImode))
1486 + {
1487 + error
1488 + ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)");
1489 + return gen_reg_rtx (mode0);
1490 + }
1491 +
1492 + if (target == 0
1493 + || GET_MODE (target) != tmode
1494 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1495 + target = gen_reg_rtx (tmode);
1496 + pat = GEN_FCN (icode) (target, op0, op1, op2);
1497 + if (!pat)
1498 + return 0;
1499 + emit_insn (pat);
1500 + return target;
1501 + case AVR32_BUILTIN_BSWAP16:
1502 + {
1503 + icode = CODE_FOR_bswap_16;
1504 + arg0 = CALL_EXPR_ARG (exp,0);
1505 + arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
1506 + mode0 = insn_data[icode].operand[1].mode;
1507 + if (arg0_mode != mode0)
1508 + arg0 = build1 (NOP_EXPR,
1509 + (*lang_hooks.types.type_for_mode) (mode0, 0), arg0);
1510 +
1511 + op0 = expand_expr (arg0, NULL_RTX, HImode, 0);
1512 + tmode = insn_data[icode].operand[0].mode;
1513 +
1514 +
1515 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1516 + {
1517 + if ( CONST_INT_P (op0) )
1518 + {
1519 + HOST_WIDE_INT val = ( ((INTVAL (op0)&0x00ff) << 8) |
1520 + ((INTVAL (op0)&0xff00) >> 8) );
1521 + /* Sign extend 16-bit value to host wide int */
1522 + val <<= (HOST_BITS_PER_WIDE_INT - 16);
1523 + val >>= (HOST_BITS_PER_WIDE_INT - 16);
1524 + op0 = GEN_INT(val);
1525 + if (target == 0
1526 + || GET_MODE (target) != tmode
1527 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1528 + target = gen_reg_rtx (tmode);
1529 + emit_move_insn(target, op0);
1530 + return target;
1531 + }
1532 + else
1533 + op0 = copy_to_mode_reg (mode0, op0);
1534 + }
1535 +
1536 + if (target == 0
1537 + || GET_MODE (target) != tmode
1538 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1539 + {
1540 + target = gen_reg_rtx (tmode);
1541 + }
1542 +
1543 +
1544 + pat = GEN_FCN (icode) (target, op0);
1545 + if (!pat)
1546 + return 0;
1547 + emit_insn (pat);
1548 +
1549 + return target;
1550 + }
1551 + case AVR32_BUILTIN_BSWAP32:
1552 + {
1553 + icode = CODE_FOR_bswap_32;
1554 + arg0 = CALL_EXPR_ARG (exp,0);
1555 + op0 = expand_normal (arg0);
1556 + tmode = insn_data[icode].operand[0].mode;
1557 + mode0 = insn_data[icode].operand[1].mode;
1558 +
1559 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1560 + {
1561 + if ( CONST_INT_P (op0) )
1562 + {
1563 + HOST_WIDE_INT val = ( ((INTVAL (op0)&0x000000ff) << 24) |
1564 + ((INTVAL (op0)&0x0000ff00) << 8) |
1565 + ((INTVAL (op0)&0x00ff0000) >> 8) |
1566 + ((INTVAL (op0)&0xff000000) >> 24) );
1567 + /* Sign extend 32-bit value to host wide int */
1568 + val <<= (HOST_BITS_PER_WIDE_INT - 32);
1569 + val >>= (HOST_BITS_PER_WIDE_INT - 32);
1570 + op0 = GEN_INT(val);
1571 + if (target == 0
1572 + || GET_MODE (target) != tmode
1573 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1574 + target = gen_reg_rtx (tmode);
1575 + emit_move_insn(target, op0);
1576 + return target;
1577 + }
1578 + else
1579 + op0 = copy_to_mode_reg (mode0, op0);
1580 + }
1581 +
1582 + if (target == 0
1583 + || GET_MODE (target) != tmode
1584 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1585 + target = gen_reg_rtx (tmode);
1586 +
1587 +
1588 + pat = GEN_FCN (icode) (target, op0);
1589 + if (!pat)
1590 + return 0;
1591 + emit_insn (pat);
1592 +
1593 + return target;
1594 + }
1595 + case AVR32_BUILTIN_MVCR_W:
1596 + case AVR32_BUILTIN_MVCR_D:
1597 + {
1598 + arg0 = CALL_EXPR_ARG (exp,0);
1599 + arg1 = CALL_EXPR_ARG (exp,1);
1600 + op0 = expand_normal (arg0);
1601 + op1 = expand_normal (arg1);
1602 +
1603 + if (fcode == AVR32_BUILTIN_MVCR_W)
1604 + icode = CODE_FOR_mvcrsi;
1605 + else
1606 + icode = CODE_FOR_mvcrdi;
1607 +
1608 + tmode = insn_data[icode].operand[0].mode;
1609 +
1610 + if (target == 0
1611 + || GET_MODE (target) != tmode
1612 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1613 + target = gen_reg_rtx (tmode);
1614 +
1615 + if (!(*insn_data[icode].operand[1].predicate) (op0, SImode))
1616 + {
1617 + error
1618 + ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
1619 + error ("Number should be between 0 and 7.");
1620 + return NULL_RTX;
1621 + }
1622 +
1623 + if (!(*insn_data[icode].operand[2].predicate) (op1, SImode))
1624 + {
1625 + error
1626 + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
1627 + error ("Number should be between 0 and 15.");
1628 + return NULL_RTX;
1629 + }
1630 +
1631 + pat = GEN_FCN (icode) (target, op0, op1);
1632 + if (!pat)
1633 + return 0;
1634 + emit_insn (pat);
1635 +
1636 + return target;
1637 + }
1638 + case AVR32_BUILTIN_MACSATHH_W:
1639 + case AVR32_BUILTIN_MACWH_D:
1640 + case AVR32_BUILTIN_MACHH_D:
1641 + {
1642 + arg0 = CALL_EXPR_ARG (exp,0);
1643 + arg1 = CALL_EXPR_ARG (exp,1);
1644 + arg2 = CALL_EXPR_ARG (exp,2);
1645 + op0 = expand_normal (arg0);
1646 + op1 = expand_normal (arg1);
1647 + op2 = expand_normal (arg2);
1648 +
1649 + icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w :
1650 + (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d :
1651 + CODE_FOR_machh_d);
1652 +
1653 + tmode = insn_data[icode].operand[0].mode;
1654 + mode0 = insn_data[icode].operand[1].mode;
1655 + mode1 = insn_data[icode].operand[2].mode;
1656 +
1657 +
1658 + if (!target
1659 + || GET_MODE (target) != tmode
1660 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1661 + target = gen_reg_rtx (tmode);
1662 +
1663 + if (!(*insn_data[icode].operand[0].predicate) (op0, tmode))
1664 + {
1665 + /* If op0 is already a reg we must cast it to the correct mode. */
1666 + if (REG_P (op0))
1667 + op0 = convert_to_mode (tmode, op0, 1);
1668 + else
1669 + op0 = copy_to_mode_reg (tmode, op0);
1670 + }
1671 +
1672 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode0))
1673 + {
1674 + /* If op1 is already a reg we must cast it to the correct mode. */
1675 + if (REG_P (op1))
1676 + op1 = convert_to_mode (mode0, op1, 1);
1677 + else
1678 + op1 = copy_to_mode_reg (mode0, op1);
1679 + }
1680 +
1681 + if (!(*insn_data[icode].operand[2].predicate) (op2, mode1))
1682 + {
1683 + /* If op1 is already a reg we must cast it to the correct mode. */
1684 + if (REG_P (op2))
1685 + op2 = convert_to_mode (mode1, op2, 1);
1686 + else
1687 + op2 = copy_to_mode_reg (mode1, op2);
1688 + }
1689 +
1690 + emit_move_insn (target, op0);
1691 +
1692 + pat = GEN_FCN (icode) (target, op1, op2);
1693 + if (!pat)
1694 + return 0;
1695 + emit_insn (pat);
1696 + return target;
1697 + }
1698 + case AVR32_BUILTIN_MVRC_W:
1699 + case AVR32_BUILTIN_MVRC_D:
1700 + {
1701 + arg0 = CALL_EXPR_ARG (exp,0);
1702 + arg1 = CALL_EXPR_ARG (exp,1);
1703 + arg2 = CALL_EXPR_ARG (exp,2);
1704 + op0 = expand_normal (arg0);
1705 + op1 = expand_normal (arg1);
1706 + op2 = expand_normal (arg2);
1707 +
1708 + if (fcode == AVR32_BUILTIN_MVRC_W)
1709 + icode = CODE_FOR_mvrcsi;
1710 + else
1711 + icode = CODE_FOR_mvrcdi;
1712 +
1713 + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
1714 + {
1715 + error ("Parameter 1 is not a valid coprocessor number.");
1716 + error ("Number should be between 0 and 7.");
1717 + return NULL_RTX;
1718 + }
1719 +
1720 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1721 + {
1722 + error ("Parameter 2 is not a valid coprocessor register number.");
1723 + error ("Number should be between 0 and 15.");
1724 + return NULL_RTX;
1725 + }
1726 +
1727 + if (GET_CODE (op2) == CONST_INT
1728 + || GET_CODE (op2) == CONST
1729 + || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF)
1730 + {
1731 + op2 = force_const_mem (insn_data[icode].operand[2].mode, op2);
1732 + }
1733 +
1734 + if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2)))
1735 + op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
1736 +
1737 +
1738 + pat = GEN_FCN (icode) (op0, op1, op2);
1739 + if (!pat)
1740 + return 0;
1741 + emit_insn (pat);
1742 +
1743 + return NULL_RTX;
1744 + }
1745 + case AVR32_BUILTIN_COP:
1746 + {
1747 + rtx op3, op4;
1748 + tree arg3, arg4;
1749 + icode = CODE_FOR_cop;
1750 + arg0 = CALL_EXPR_ARG (exp,0);
1751 + arg1 = CALL_EXPR_ARG (exp,1);
1752 + arg2 = CALL_EXPR_ARG (exp,2);
1753 + arg3 = CALL_EXPR_ARG (exp,3);
1754 + arg4 = CALL_EXPR_ARG (exp,4);
1755 + op0 = expand_normal (arg0);
1756 + op1 = expand_normal (arg1);
1757 + op2 = expand_normal (arg2);
1758 + op3 = expand_normal (arg3);
1759 + op4 = expand_normal (arg4);
1760 +
1761 + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
1762 + {
1763 + error
1764 + ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
1765 + error ("Number should be between 0 and 7.");
1766 + return NULL_RTX;
1767 + }
1768 +
1769 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1770 + {
1771 + error
1772 + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
1773 + error ("Number should be between 0 and 15.");
1774 + return NULL_RTX;
1775 + }
1776 +
1777 + if (!(*insn_data[icode].operand[2].predicate) (op2, SImode))
1778 + {
1779 + error
1780 + ("Parameter 3 to __builtin_cop is not a valid coprocessor register number.");
1781 + error ("Number should be between 0 and 15.");
1782 + return NULL_RTX;
1783 + }
1784 +
1785 + if (!(*insn_data[icode].operand[3].predicate) (op3, SImode))
1786 + {
1787 + error
1788 + ("Parameter 4 to __builtin_cop is not a valid coprocessor register number.");
1789 + error ("Number should be between 0 and 15.");
1790 + return NULL_RTX;
1791 + }
1792 +
1793 + if (!(*insn_data[icode].operand[4].predicate) (op4, SImode))
1794 + {
1795 + error
1796 + ("Parameter 5 to __builtin_cop is not a valid coprocessor operation.");
1797 + error ("Number should be between 0 and 127.");
1798 + return NULL_RTX;
1799 + }
1800 +
1801 + pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
1802 + if (!pat)
1803 + return 0;
1804 + emit_insn (pat);
1805 +
1806 + return target;
1807 + }
1808 +
1809 + }
1810 +
1811 + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
1812 + if (d->code == fcode)
1813 + return avr32_expand_binop_builtin (d->icode, exp, target);
1814 +
1815 +
1816 + /* @@@ Should really do something sensible here. */
1817 + return NULL_RTX;
1818 +}
1819 +
1820 +
1821 +/* Handle an "interrupt" or "isr" attribute;
1822 + arguments as in struct attribute_spec.handler. */
1823 +
1824 +static tree
1825 +avr32_handle_isr_attribute (tree * node, tree name, tree args,
1826 + int flags, bool * no_add_attrs)
1827 +{
1828 + if (DECL_P (*node))
1829 + {
1830 + if (TREE_CODE (*node) != FUNCTION_DECL)
1831 + {
1832 + warning (OPT_Wattributes,"`%s' attribute only applies to functions",
1833 + IDENTIFIER_POINTER (name));
1834 + *no_add_attrs = true;
1835 + }
1836 + /* FIXME: the argument if any is checked for type attributes; should it
1837 + be checked for decl ones? */
1838 + }
1839 + else
1840 + {
1841 + if (TREE_CODE (*node) == FUNCTION_TYPE
1842 + || TREE_CODE (*node) == METHOD_TYPE)
1843 + {
1844 + if (avr32_isr_value (args) == AVR32_FT_UNKNOWN)
1845 + {
1846 + warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
1847 + *no_add_attrs = true;
1848 + }
1849 + }
1850 + else if (TREE_CODE (*node) == POINTER_TYPE
1851 + && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
1852 + || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
1853 + && avr32_isr_value (args) != AVR32_FT_UNKNOWN)
1854 + {
1855 + *node = build_variant_type_copy (*node);
1856 + TREE_TYPE (*node) = build_type_attribute_variant
1857 + (TREE_TYPE (*node),
1858 + tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
1859 + *no_add_attrs = true;
1860 + }
1861 + else
1862 + {
1863 + /* Possibly pass this attribute on from the type to a decl. */
1864 + if (flags & ((int) ATTR_FLAG_DECL_NEXT
1865 + | (int) ATTR_FLAG_FUNCTION_NEXT
1866 + | (int) ATTR_FLAG_ARRAY_NEXT))
1867 + {
1868 + *no_add_attrs = true;
1869 + return tree_cons (name, args, NULL_TREE);
1870 + }
1871 + else
1872 + {
1873 + warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
1874 + }
1875 + }
1876 + }
1877 +
1878 + return NULL_TREE;
1879 +}
1880 +
1881 +/* Handle an attribute requiring a FUNCTION_DECL;
1882 + arguments as in struct attribute_spec.handler. */
1883 +static tree
1884 +avr32_handle_fndecl_attribute (tree * node, tree name,
1885 + tree args ATTRIBUTE_UNUSED,
1886 + int flags ATTRIBUTE_UNUSED,
1887 + bool * no_add_attrs)
1888 +{
1889 + if (TREE_CODE (*node) != FUNCTION_DECL)
1890 + {
1891 + warning (OPT_Wattributes,"%qs attribute only applies to functions",
1892 + IDENTIFIER_POINTER (name));
1893 + *no_add_attrs = true;
1894 + }
1895 +
1896 + return NULL_TREE;
1897 +}
1898 +
1899 +
1900 +/* Handle an acall attribute;
1901 + arguments as in struct attribute_spec.handler. */
1902 +
1903 +static tree
1904 +avr32_handle_acall_attribute (tree * node, tree name,
1905 + tree args ATTRIBUTE_UNUSED,
1906 + int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
1907 +{
1908 + if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE)
1909 + {
1910 + warning (OPT_Wattributes,"`%s' attribute not yet supported...",
1911 + IDENTIFIER_POINTER (name));
1912 + *no_add_attrs = true;
1913 + return NULL_TREE;
1914 + }
1915 +
1916 + warning (OPT_Wattributes,"`%s' attribute only applies to functions",
1917 + IDENTIFIER_POINTER (name));
1918 + *no_add_attrs = true;
1919 + return NULL_TREE;
1920 +}
1921 +
1922 +
1923 +/* Return 0 if the attributes for two types are incompatible, 1 if they
1924 + are compatible, and 2 if they are nearly compatible (which causes a
1925 + warning to be generated). */
1926 +
1927 +static int
1928 +avr32_comp_type_attributes (tree type1, tree type2)
1929 +{
1930 + int acall1, acall2, isr1, isr2, naked1, naked2;
1931 +
1932 + /* Check for mismatch of non-default calling convention. */
1933 + if (TREE_CODE (type1) != FUNCTION_TYPE)
1934 + return 1;
1935 +
1936 + /* Check for mismatched call attributes. */
1937 + acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL;
1938 + acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL;
1939 + naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL;
1940 + naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL;
1941 + isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
1942 + if (!isr1)
1943 + isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
1944 +
1945 + isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
1946 + if (!isr2)
1947 + isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
1948 +
1949 + if ((acall1 && isr2)
1950 + || (acall2 && isr1) || (naked1 && isr2) || (naked2 && isr1))
1951 + return 0;
1952 +
1953 + return 1;
1954 +}
1955 +
1956 +
1957 +/* Computes the type of the current function. */
1958 +
1959 +static unsigned long
1960 +avr32_compute_func_type (void)
1961 +{
1962 + unsigned long type = AVR32_FT_UNKNOWN;
1963 + tree a;
1964 + tree attr;
1965 +
1966 + if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
1967 + abort ();
1968 +
1969 + /* Decide if the current function is volatile. Such functions never
1970 + return, and many memory cycles can be saved by not storing register
1971 + values that will never be needed again. This optimization was added to
1972 + speed up context switching in a kernel application. */
1973 + if (optimize > 0
1974 + && TREE_NOTHROW (current_function_decl)
1975 + && TREE_THIS_VOLATILE (current_function_decl))
1976 + type |= AVR32_FT_VOLATILE;
1977 +
1978 + if (cfun->static_chain_decl != NULL)
1979 + type |= AVR32_FT_NESTED;
1980 +
1981 + attr = DECL_ATTRIBUTES (current_function_decl);
1982 +
1983 + a = lookup_attribute ("isr", attr);
1984 + if (a == NULL_TREE)
1985 + a = lookup_attribute ("interrupt", attr);
1986 +
1987 + if (a == NULL_TREE)
1988 + type |= AVR32_FT_NORMAL;
1989 + else
1990 + type |= avr32_isr_value (TREE_VALUE (a));
1991 +
1992 +
1993 + a = lookup_attribute ("acall", attr);
1994 + if (a != NULL_TREE)
1995 + type |= AVR32_FT_ACALL;
1996 +
1997 + a = lookup_attribute ("naked", attr);
1998 + if (a != NULL_TREE)
1999 + type |= AVR32_FT_NAKED;
2000 +
2001 + return type;
2002 +}
2003 +
2004 +/* Returns the type of the current function. */
2005 +
2006 +static unsigned long
2007 +avr32_current_func_type (void)
2008 +{
2009 + if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN)
2010 + cfun->machine->func_type = avr32_compute_func_type ();
2011 +
2012 + return cfun->machine->func_type;
2013 +}
2014 +
2015 +/*
2016 + This target hook should return true if we should not pass type solely
2017 + in registers. The file expr.h defines a definition that is usually appropriate,
2018 + refer to expr.h for additional documentation.
2019 +*/
2020 +bool
2021 +avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
2022 +{
2023 + if (type && AGGREGATE_TYPE_P (type)
2024 + /* If the alignment is less than the size then pass in the struct on
2025 + the stack. */
2026 + && ((unsigned int) TYPE_ALIGN_UNIT (type) <
2027 + (unsigned int) int_size_in_bytes (type))
2028 + /* If we support unaligned word accesses then structs of size 4 and 8
2029 + can have any alignment and still be passed in registers. */
2030 + && !(TARGET_UNALIGNED_WORD
2031 + && (int_size_in_bytes (type) == 4
2032 + || int_size_in_bytes (type) == 8))
2033 + /* Double word structs need only a word alignment. */
2034 + && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4))
2035 + return true;
2036 +
2037 + if (type && AGGREGATE_TYPE_P (type)
2038 + /* Structs of size 3,5,6,7 are always passed in registers. */
2039 + && (int_size_in_bytes (type) == 3
2040 + || int_size_in_bytes (type) == 5
2041 + || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7))
2042 + return true;
2043 +
2044 +
2045 + return (type && TREE_ADDRESSABLE (type));
2046 +}
2047 +
2048 +
2049 +bool
2050 +avr32_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
2051 +{
2052 + return true;
2053 +}
2054 +
2055 +/*
2056 + This target hook should return true if an argument at the position indicated
2057 + by cum should be passed by reference. This predicate is queried after target
2058 + independent reasons for being passed by reference, such as TREE_ADDRESSABLE (type).
2059 +
2060 + If the hook returns true, a copy of that argument is made in memory and a
2061 + pointer to the argument is passed instead of the argument itself. The pointer
2062 + is passed in whatever way is appropriate for passing a pointer to that type.
2063 +*/
2064 +bool
2065 +avr32_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
2066 + enum machine_mode mode ATTRIBUTE_UNUSED,
2067 + tree type, bool named ATTRIBUTE_UNUSED)
2068 +{
2069 + return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST));
2070 +}
2071 +
2072 +static int
2073 +avr32_arg_partial_bytes (CUMULATIVE_ARGS * pcum ATTRIBUTE_UNUSED,
2074 + enum machine_mode mode ATTRIBUTE_UNUSED,
2075 + tree type ATTRIBUTE_UNUSED,
2076 + bool named ATTRIBUTE_UNUSED)
2077 +{
2078 + return 0;
2079 +}
2080 +
2081 +
2082 +struct gcc_target targetm = TARGET_INITIALIZER;
2083 +
2084 +/*
2085 + Table used to convert from register number in the assembler instructions and
2086 + the register numbers used in gcc.
2087 +*/
2088 +const int avr32_function_arg_reglist[] = {
2089 + INTERNAL_REGNUM (12),
2090 + INTERNAL_REGNUM (11),
2091 + INTERNAL_REGNUM (10),
2092 + INTERNAL_REGNUM (9),
2093 + INTERNAL_REGNUM (8)
2094 +};
2095 +
2096 +rtx avr32_compare_op0 = NULL_RTX;
2097 +rtx avr32_compare_op1 = NULL_RTX;
2098 +rtx avr32_compare_operator = NULL_RTX;
2099 +rtx avr32_acc_cache = NULL_RTX;
2100 +
2101 +/*
2102 + Returns nonzero if it is allowed to store a value of mode mode in hard
2103 + register number regno.
2104 +*/
2105 +int
2106 +avr32_hard_regno_mode_ok (int regnr, enum machine_mode mode)
2107 +{
2108 + /* We allow only float modes in the fp-registers */
2109 + if (regnr >= FIRST_FP_REGNUM
2110 + && regnr <= LAST_FP_REGNUM && GET_MODE_CLASS (mode) != MODE_FLOAT)
2111 + {
2112 + return 0;
2113 + }
2114 +
2115 + switch (mode)
2116 + {
2117 + case DImode: /* long long */
2118 + case DFmode: /* double */
2119 + case SCmode: /* __complex__ float */
2120 + case CSImode: /* __complex__ int */
2121 + if (regnr < 4)
2122 + { /* long long int not supported in r12, sp, lr
2123 + or pc. */
2124 + return 0;
2125 + }
2126 + else
2127 + {
2128 + if (regnr % 2) /* long long int has to be refered in even
2129 + registers. */
2130 + return 0;
2131 + else
2132 + return 1;
2133 + }
2134 + case CDImode: /* __complex__ long long */
2135 + case DCmode: /* __complex__ double */
2136 + case TImode: /* 16 bytes */
2137 + if (regnr < 7)
2138 + return 0;
2139 + else if (regnr % 2)
2140 + return 0;
2141 + else
2142 + return 1;
2143 + default:
2144 + return 1;
2145 + }
2146 +}
2147 +
2148 +
2149 +int
2150 +avr32_rnd_operands (rtx add, rtx shift)
2151 +{
2152 + if (GET_CODE (shift) == CONST_INT &&
2153 + GET_CODE (add) == CONST_INT && INTVAL (shift) > 0)
2154 + {
2155 + if ((1 << (INTVAL (shift) - 1)) == INTVAL (add))
2156 + return TRUE;
2157 + }
2158 +
2159 + return FALSE;
2160 +}
2161 +
2162 +
2163 +
2164 +int
2165 +avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, const char *str)
2166 +{
2167 + switch (c)
2168 + {
2169 + case 'K':
2170 + case 'I':
2171 + {
2172 + HOST_WIDE_INT min_value = 0, max_value = 0;
2173 + char size_str[3];
2174 + int const_size;
2175 +
2176 + size_str[0] = str[2];
2177 + size_str[1] = str[3];
2178 + size_str[2] = '\0';
2179 + const_size = atoi (size_str);
2180 +
2181 + if (toupper (str[1]) == 'U')
2182 + {
2183 + min_value = 0;
2184 + max_value = (1 << const_size) - 1;
2185 + }
2186 + else if (toupper (str[1]) == 'S')
2187 + {
2188 + min_value = -(1 << (const_size - 1));
2189 + max_value = (1 << (const_size - 1)) - 1;
2190 + }
2191 +
2192 + if (c == 'I')
2193 + {
2194 + value = -value;
2195 + }
2196 +
2197 + if (value >= min_value && value <= max_value)
2198 + {
2199 + return 1;
2200 + }
2201 + break;
2202 + }
2203 + case 'M':
2204 + return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode);
2205 + case 'J':
2206 + return avr32_hi16_immediate_operand (GEN_INT (value), VOIDmode);
2207 + }
2208 +
2209 + return 0;
2210 +}
2211 +
2212 +
2213 +/*Compute mask of which floating-point registers needs saving upon
2214 + entry to this function*/
2215 +static unsigned long
2216 +avr32_compute_save_fp_reg_mask (void)
2217 +{
2218 + unsigned long func_type = avr32_current_func_type ();
2219 + unsigned int save_reg_mask = 0;
2220 + unsigned int reg;
2221 + unsigned int max_reg = 7;
2222 + int save_all_call_used_regs = FALSE;
2223 +
2224 + /* This only applies for hardware floating-point implementation. */
2225 + if (!TARGET_HARD_FLOAT)
2226 + return 0;
2227 +
2228 + if (IS_INTERRUPT (func_type))
2229 + {
2230 +
2231 + /* Interrupt functions must not corrupt any registers, even call
2232 + clobbered ones. If this is a leaf function we can just examine the
2233 + registers used by the RTL, but otherwise we have to assume that
2234 + whatever function is called might clobber anything, and so we have
2235 + to save all the call-clobbered registers as well. */
2236 + max_reg = 13;
2237 + save_all_call_used_regs = !current_function_is_leaf;
2238 + }
2239 +
2240 + /* All used registers used must be saved */
2241 + for (reg = 0; reg <= max_reg; reg++)
2242 + if (df_regs_ever_live_p (INTERNAL_FP_REGNUM (reg))
2243 + || (save_all_call_used_regs
2244 + && call_used_regs[INTERNAL_FP_REGNUM (reg)]))
2245 + save_reg_mask |= (1 << reg);
2246 +
2247 + return save_reg_mask;
2248 +}
2249 +
2250 +/*Compute mask of registers which needs saving upon function entry */
2251 +static unsigned long
2252 +avr32_compute_save_reg_mask (int push)
2253 +{
2254 + unsigned long func_type;
2255 + unsigned int save_reg_mask = 0;
2256 + unsigned int reg;
2257 +
2258 + func_type = avr32_current_func_type ();
2259 +
2260 + if (IS_INTERRUPT (func_type))
2261 + {
2262 + unsigned int max_reg = 12;
2263 +
2264 +
2265 + /* Get the banking scheme for the interrupt */
2266 + switch (func_type)
2267 + {
2268 + case AVR32_FT_ISR_FULL:
2269 + max_reg = 0;
2270 + break;
2271 + case AVR32_FT_ISR_HALF:
2272 + max_reg = 7;
2273 + break;
2274 + case AVR32_FT_ISR_NONE:
2275 + max_reg = 12;
2276 + break;
2277 + }
2278 +
2279 + /* Interrupt functions must not corrupt any registers, even call
2280 + clobbered ones. If this is a leaf function we can just examine the
2281 + registers used by the RTL, but otherwise we have to assume that
2282 + whatever function is called might clobber anything, and so we have
2283 + to save all the call-clobbered registers as well. */
2284 +
2285 + /* Need not push the registers r8-r12 for AVR32A architectures, as this
2286 + is automatially done in hardware. We also do not have any shadow
2287 + registers. */
2288 + if (TARGET_UARCH_AVR32A)
2289 + {
2290 + max_reg = 7;
2291 + func_type = AVR32_FT_ISR_NONE;
2292 + }
2293 +
2294 + /* All registers which are used and is not shadowed must be saved */
2295 + for (reg = 0; reg <= max_reg; reg++)
2296 + if (df_regs_ever_live_p (INTERNAL_REGNUM (reg))
2297 + || (!current_function_is_leaf
2298 + && call_used_regs[INTERNAL_REGNUM (reg)]))
2299 + save_reg_mask |= (1 << reg);
2300 +
2301 + /* Check LR */
2302 + if ((df_regs_ever_live_p (LR_REGNUM)
2303 + || !current_function_is_leaf || frame_pointer_needed)
2304 + /* Only non-shadowed register models */
2305 + && (func_type == AVR32_FT_ISR_NONE))
2306 + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
2307 +
2308 + /* Make sure that the GOT register is pushed. */
2309 + if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)
2310 + && current_function_uses_pic_offset_table)
2311 + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
2312 +
2313 + }
2314 + else
2315 + {
2316 + int use_pushm = optimize_size;
2317 +
2318 + /* In the normal case we only need to save those registers which are
2319 + call saved and which are used by this function. */
2320 + for (reg = 0; reg <= 7; reg++)
2321 + if (df_regs_ever_live_p (INTERNAL_REGNUM (reg))
2322 + && !call_used_regs[INTERNAL_REGNUM (reg)])
2323 + save_reg_mask |= (1 << reg);
2324 +
2325 + /* Make sure that the GOT register is pushed. */
2326 + if (current_function_uses_pic_offset_table)
2327 + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
2328 +
2329 +
2330 + /* If we optimize for size and do not have anonymous arguments: use
2331 + popm/pushm always */
2332 + if (use_pushm)
2333 + {
2334 + if ((save_reg_mask & (1 << 0))
2335 + || (save_reg_mask & (1 << 1))
2336 + || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3)))
2337 + save_reg_mask |= 0xf;
2338 +
2339 + if ((save_reg_mask & (1 << 4))
2340 + || (save_reg_mask & (1 << 5))
2341 + || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7)))
2342 + save_reg_mask |= 0xf0;
2343 +
2344 + if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9)))
2345 + save_reg_mask |= 0x300;
2346 + }
2347 +
2348 +
2349 + /* Check LR */
2350 + if ((df_regs_ever_live_p (LR_REGNUM)
2351 + || !current_function_is_leaf
2352 + || (optimize_size
2353 + && save_reg_mask
2354 + && !current_function_calls_eh_return) || frame_pointer_needed))
2355 + {
2356 + if (push
2357 + /* Never pop LR into PC for functions which
2358 + calls __builtin_eh_return, since we need to
2359 + fix the SP after the restoring of the registers
2360 + and before returning. */
2361 + || current_function_calls_eh_return)
2362 + {
2363 + /* Push/Pop LR */
2364 + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
2365 + }
2366 + else
2367 + {
2368 + /* Pop PC */
2369 + save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM));
2370 + }
2371 + }
2372 + }
2373 +
2374 +
2375 + /* Save registers so the exception handler can modify them. */
2376 + if (current_function_calls_eh_return)
2377 + {
2378 + unsigned int i;
2379 +
2380 + for (i = 0;; i++)
2381 + {
2382 + reg = EH_RETURN_DATA_REGNO (i);
2383 + if (reg == INVALID_REGNUM)
2384 + break;
2385 + save_reg_mask |= 1 << ASM_REGNUM (reg);
2386 + }
2387 + }
2388 +
2389 + return save_reg_mask;
2390 +}
2391 +
2392 +/*Compute total size in bytes of all saved registers */
2393 +static int
2394 +avr32_get_reg_mask_size (int reg_mask)
2395 +{
2396 + int reg, size;
2397 + size = 0;
2398 +
2399 + for (reg = 0; reg <= 15; reg++)
2400 + if (reg_mask & (1 << reg))
2401 + size += 4;
2402 +
2403 + return size;
2404 +}
2405 +
2406 +/*Get a register from one of the registers which are saved onto the stack
2407 + upon function entry */
2408 +
2409 +static int
2410 +avr32_get_saved_reg (int save_reg_mask)
2411 +{
2412 + unsigned int reg;
2413 +
2414 + /* Find the first register which is saved in the saved_reg_mask */
2415 + for (reg = 0; reg <= 15; reg++)
2416 + if (save_reg_mask & (1 << reg))
2417 + return reg;
2418 +
2419 + return -1;
2420 +}
2421 +
2422 +/* Return 1 if it is possible to return using a single instruction. */
2423 +int
2424 +avr32_use_return_insn (int iscond)
2425 +{
2426 + unsigned int func_type = avr32_current_func_type ();
2427 + unsigned long saved_int_regs;
2428 + unsigned long saved_fp_regs;
2429 +
2430 + /* Never use a return instruction before reload has run. */
2431 + if (!reload_completed)
2432 + return 0;
2433 +
2434 + /* Must adjust the stack for vararg functions. */
2435 + if (current_function_args_info.uses_anonymous_args)
2436 + return 0;
2437 +
2438 + /* If there a stack adjstment. */
2439 + if (get_frame_size ())
2440 + return 0;
2441 +
2442 + saved_int_regs = avr32_compute_save_reg_mask (TRUE);
2443 + saved_fp_regs = avr32_compute_save_fp_reg_mask ();
2444 +
2445 + /* Functions which have saved fp-regs on the stack can not be performed in
2446 + one instruction */
2447 + if (saved_fp_regs)
2448 + return 0;
2449 +
2450 + /* Conditional returns can not be performed in one instruction if we need
2451 + to restore registers from the stack */
2452 + if (iscond && saved_int_regs)
2453 + return 0;
2454 +
2455 + /* Conditional return can not be used for interrupt handlers. */
2456 + if (iscond && IS_INTERRUPT (func_type))
2457 + return 0;
2458 +
2459 + /* For interrupt handlers which needs to pop registers */
2460 + if (saved_int_regs && IS_INTERRUPT (func_type))
2461 + return 0;
2462 +
2463 +
2464 + /* If there are saved registers but the LR isn't saved, then we need two
2465 + instructions for the return. */
2466 + if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM))))
2467 + return 0;
2468 +
2469 +
2470 + return 1;
2471 +}
2472 +
2473 +
2474 +/*Generate some function prologue info in the assembly file*/
2475 +
2476 +void
2477 +avr32_target_asm_function_prologue (FILE * f, HOST_WIDE_INT frame_size)
2478 +{
2479 + if (IS_NAKED (avr32_current_func_type ()))
2480 + fprintf (f,
2481 + "\t# Function is naked: Prologue and epilogue provided by programmer\n");
2482 +
2483 + if (IS_INTERRUPT (avr32_current_func_type ()))
2484 + {
2485 + switch (avr32_current_func_type ())
2486 + {
2487 + case AVR32_FT_ISR_FULL:
2488 + fprintf (f,
2489 + "\t# Interrupt Function: Fully shadowed register file\n");
2490 + break;
2491 + case AVR32_FT_ISR_HALF:
2492 + fprintf (f,
2493 + "\t# Interrupt Function: Half shadowed register file\n");
2494 + break;
2495 + default:
2496 + case AVR32_FT_ISR_NONE:
2497 + fprintf (f, "\t# Interrupt Function: No shadowed register file\n");
2498 + break;
2499 + }
2500 + }
2501 +
2502 +
2503 + fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n",
2504 + current_function_args_size, frame_size,
2505 + current_function_pretend_args_size);
2506 +
2507 + fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n",
2508 + frame_pointer_needed, current_function_is_leaf);
2509 +
2510 + fprintf (f, "\t# uses_anonymous_args = %i\n",
2511 + current_function_args_info.uses_anonymous_args);
2512 + if (current_function_calls_eh_return)
2513 + fprintf (f, "\t# Calls __builtin_eh_return.\n");
2514 +
2515 +}
2516 +
2517 +
2518 +/* Generate and emit an insn that we will recognize as a pushm or stm.
2519 + Unfortunately, since this insn does not reflect very well the actual
2520 + semantics of the operation, we need to annotate the insn for the benefit
2521 + of DWARF2 frame unwind information. */
2522 +
2523 +int avr32_convert_to_reglist16 (int reglist8_vect);
2524 +
2525 +static rtx
2526 +emit_multi_reg_push (int reglist, int usePUSHM)
2527 +{
2528 + rtx insn;
2529 + rtx dwarf;
2530 + rtx tmp;
2531 + rtx reg;
2532 + int i;
2533 + int nr_regs;
2534 + int index = 0;
2535 +
2536 + if (usePUSHM)
2537 + {
2538 + insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist)));
2539 + reglist = avr32_convert_to_reglist16 (reglist);
2540 + }
2541 + else
2542 + {
2543 + insn = emit_insn (gen_stm (stack_pointer_rtx,
2544 + gen_rtx_CONST_INT (SImode, reglist),
2545 + gen_rtx_CONST_INT (SImode, 1)));
2546 + }
2547 +
2548 + nr_regs = avr32_get_reg_mask_size (reglist) / 4;
2549 + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
2550 +
2551 + for (i = 15; i >= 0; i--)
2552 + {
2553 + if (reglist & (1 << i))
2554 + {
2555 + reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i));
2556 + tmp = gen_rtx_SET (VOIDmode,
2557 + gen_rtx_MEM (SImode,
2558 + plus_constant (stack_pointer_rtx,
2559 + 4 * index)), reg);
2560 + RTX_FRAME_RELATED_P (tmp) = 1;
2561 + XVECEXP (dwarf, 0, 1 + index++) = tmp;
2562 + }
2563 + }
2564 +
2565 + tmp = gen_rtx_SET (SImode,
2566 + stack_pointer_rtx,
2567 + gen_rtx_PLUS (SImode,
2568 + stack_pointer_rtx,
2569 + GEN_INT (-4 * nr_regs)));
2570 + RTX_FRAME_RELATED_P (tmp) = 1;
2571 + XVECEXP (dwarf, 0, 0) = tmp;
2572 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
2573 + REG_NOTES (insn));
2574 + return insn;
2575 +}
2576 +
2577 +
2578 +static rtx
2579 +emit_multi_fp_reg_push (int reglist)
2580 +{
2581 + rtx insn;
2582 + rtx dwarf;
2583 + rtx tmp;
2584 + rtx reg;
2585 + int i;
2586 + int nr_regs;
2587 + int index = 0;
2588 +
2589 + insn = emit_insn (gen_stm_fp (stack_pointer_rtx,
2590 + gen_rtx_CONST_INT (SImode, reglist),
2591 + gen_rtx_CONST_INT (SImode, 1)));
2592 +
2593 + nr_regs = avr32_get_reg_mask_size (reglist) / 4;
2594 + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
2595 +
2596 + for (i = 15; i >= 0; i--)
2597 + {
2598 + if (reglist & (1 << i))
2599 + {
2600 + reg = gen_rtx_REG (SImode, INTERNAL_FP_REGNUM (i));
2601 + tmp = gen_rtx_SET (VOIDmode,
2602 + gen_rtx_MEM (SImode,
2603 + plus_constant (stack_pointer_rtx,
2604 + 4 * index)), reg);
2605 + RTX_FRAME_RELATED_P (tmp) = 1;
2606 + XVECEXP (dwarf, 0, 1 + index++) = tmp;
2607 + }
2608 + }
2609 +
2610 + tmp = gen_rtx_SET (SImode,
2611 + stack_pointer_rtx,
2612 + gen_rtx_PLUS (SImode,
2613 + stack_pointer_rtx,
2614 + GEN_INT (-4 * nr_regs)));
2615 + RTX_FRAME_RELATED_P (tmp) = 1;
2616 + XVECEXP (dwarf, 0, 0) = tmp;
2617 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
2618 + REG_NOTES (insn));
2619 + return insn;
2620 +}
2621 +
2622 +rtx
2623 +avr32_gen_load_multiple (rtx * regs, int count, rtx from,
2624 + int write_back, int in_struct_p, int scalar_p)
2625 +{
2626 +
2627 + rtx result;
2628 + int i = 0, j;
2629 +
2630 + result =
2631 + gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0)));
2632 +
2633 + if (write_back)
2634 + {
2635 + XVECEXP (result, 0, 0)
2636 + = gen_rtx_SET (GET_MODE (from), from,
2637 + plus_constant (from, count * 4));
2638 + i = 1;
2639 + count++;
2640 + }
2641 +
2642 +
2643 + for (j = 0; i < count; i++, j++)
2644 + {
2645 + rtx unspec;
2646 + rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4));
2647 + MEM_IN_STRUCT_P (mem) = in_struct_p;
2648 + MEM_SCALAR_P (mem) = scalar_p;
2649 + unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM);
2650 + XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec);
2651 + }
2652 +
2653 + return result;
2654 +}
2655 +
2656 +
2657 +rtx
2658 +avr32_gen_store_multiple (rtx * regs, int count, rtx to,
2659 + int in_struct_p, int scalar_p)
2660 +{
2661 + rtx result;
2662 + int i = 0, j;
2663 +
2664 + result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
2665 +
2666 + for (j = 0; i < count; i++, j++)
2667 + {
2668 + rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4));
2669 + MEM_IN_STRUCT_P (mem) = in_struct_p;
2670 + MEM_SCALAR_P (mem) = scalar_p;
2671 + XVECEXP (result, 0, i)
2672 + = gen_rtx_SET (VOIDmode, mem,
2673 + gen_rtx_UNSPEC (VOIDmode,
2674 + gen_rtvec (1, regs[j]),
2675 + UNSPEC_STORE_MULTIPLE));
2676 + }
2677 +
2678 + return result;
2679 +}
2680 +
2681 +
2682 +/* Move a block of memory if it is word aligned or we support unaligned
2683 + word memory accesses. The size must be maximum 64 bytes. */
2684 +
2685 +int
2686 +avr32_gen_movmemsi (rtx * operands)
2687 +{
2688 + HOST_WIDE_INT bytes_to_go;
2689 + rtx src, dst;
2690 + rtx st_src, st_dst;
2691 + int src_offset = 0, dst_offset = 0;
2692 + int block_size;
2693 + int dst_in_struct_p, src_in_struct_p;
2694 + int dst_scalar_p, src_scalar_p;
2695 + int unaligned;
2696 +
2697 + if (GET_CODE (operands[2]) != CONST_INT
2698 + || GET_CODE (operands[3]) != CONST_INT
2699 + || INTVAL (operands[2]) > 64
2700 + || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD))
2701 + return 0;
2702 +
2703 + unaligned = (INTVAL (operands[3]) & 3) != 0;
2704 +
2705 + block_size = 4;
2706 +
2707 + st_dst = XEXP (operands[0], 0);
2708 + st_src = XEXP (operands[1], 0);
2709 +
2710 + dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
2711 + dst_scalar_p = MEM_SCALAR_P (operands[0]);
2712 + src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
2713 + src_scalar_p = MEM_SCALAR_P (operands[1]);
2714 +
2715 + dst = copy_to_mode_reg (SImode, st_dst);
2716 + src = copy_to_mode_reg (SImode, st_src);
2717 +
2718 + bytes_to_go = INTVAL (operands[2]);
2719 +
2720 + while (bytes_to_go)
2721 + {
2722 + enum machine_mode move_mode;
2723 + /* (Seems to be a problem with reloads for the movti pattern so this is
2724 + disabled until that problem is resolved)
2725 + UPDATE: Problem seems to be solved now.... */
2726 + if (bytes_to_go >= GET_MODE_SIZE (TImode) && !unaligned
2727 + /* Do not emit ldm/stm for UC3 as ld.d/st.d is more optimal. */
2728 + && !TARGET_ARCH_UC)
2729 + move_mode = TImode;
2730 + else if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned)
2731 + move_mode = DImode;
2732 + else if (bytes_to_go >= GET_MODE_SIZE (SImode))
2733 + move_mode = SImode;
2734 + else
2735 + move_mode = QImode;
2736 +
2737 + {
2738 + rtx src_mem;
2739 + rtx dst_mem = gen_rtx_MEM (move_mode,
2740 + gen_rtx_PLUS (SImode, dst,
2741 + GEN_INT (dst_offset)));
2742 + dst_offset += GET_MODE_SIZE (move_mode);
2743 + if ( 0 /* This causes an error in GCC. Think there is
2744 + something wrong in the gcse pass which causes REQ_EQUIV notes
2745 + to be wrong so disabling it for now. */
2746 + && move_mode == TImode
2747 + && INTVAL (operands[2]) > GET_MODE_SIZE (TImode) )
2748 + {
2749 + src_mem = gen_rtx_MEM (move_mode,
2750 + gen_rtx_POST_INC (SImode, src));
2751 + }
2752 + else
2753 + {
2754 + src_mem = gen_rtx_MEM (move_mode,
2755 + gen_rtx_PLUS (SImode, src,
2756 + GEN_INT (src_offset)));
2757 + src_offset += GET_MODE_SIZE (move_mode);
2758 + }
2759 +
2760 + bytes_to_go -= GET_MODE_SIZE (move_mode);
2761 +
2762 + MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p;
2763 + MEM_SCALAR_P (dst_mem) = dst_scalar_p;
2764 +
2765 + MEM_IN_STRUCT_P (src_mem) = src_in_struct_p;
2766 + MEM_SCALAR_P (src_mem) = src_scalar_p;
2767 + emit_move_insn (dst_mem, src_mem);
2768 +
2769 + }
2770 + }
2771 +
2772 + return 1;
2773 +}
2774 +
2775 +
2776 +
2777 +/*Expand the prologue instruction*/
2778 +void
2779 +avr32_expand_prologue (void)
2780 +{
2781 + rtx insn, dwarf;
2782 + unsigned long saved_reg_mask, saved_fp_reg_mask;
2783 + int reglist8 = 0;
2784 +
2785 + /* Naked functions does not have a prologue */
2786 + if (IS_NAKED (avr32_current_func_type ()))
2787 + return;
2788 +
2789 + saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
2790 +
2791 + if (saved_reg_mask)
2792 + {
2793 + /* Must push used registers */
2794 +
2795 + /* Should we use POPM or LDM? */
2796 + int usePUSHM = TRUE;
2797 + reglist8 = 0;
2798 + if (((saved_reg_mask & (1 << 0)) ||
2799 + (saved_reg_mask & (1 << 1)) ||
2800 + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
2801 + {
2802 + /* One of R0-R3 should at least be pushed */
2803 + if (((saved_reg_mask & (1 << 0)) &&
2804 + (saved_reg_mask & (1 << 1)) &&
2805 + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
2806 + {
2807 + /* All should be pushed */
2808 + reglist8 |= 0x01;
2809 + }
2810 + else
2811 + {
2812 + usePUSHM = FALSE;
2813 + }
2814 + }
2815 +
2816 + if (((saved_reg_mask & (1 << 4)) ||
2817 + (saved_reg_mask & (1 << 5)) ||
2818 + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
2819 + {
2820 + /* One of R4-R7 should at least be pushed */
2821 + if (((saved_reg_mask & (1 << 4)) &&
2822 + (saved_reg_mask & (1 << 5)) &&
2823 + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
2824 + {
2825 + if (usePUSHM)
2826 + /* All should be pushed */
2827 + reglist8 |= 0x02;
2828 + }
2829 + else
2830 + {
2831 + usePUSHM = FALSE;
2832 + }
2833 + }
2834 +
2835 + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
2836 + {
2837 + /* One of R8-R9 should at least be pushed */
2838 + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
2839 + {
2840 + if (usePUSHM)
2841 + /* All should be pushed */
2842 + reglist8 |= 0x04;
2843 + }
2844 + else
2845 + {
2846 + usePUSHM = FALSE;
2847 + }
2848 + }
2849 +
2850 + if (saved_reg_mask & (1 << 10))
2851 + reglist8 |= 0x08;
2852 +
2853 + if (saved_reg_mask & (1 << 11))
2854 + reglist8 |= 0x10;
2855 +
2856 + if (saved_reg_mask & (1 << 12))
2857 + reglist8 |= 0x20;
2858 +
2859 + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
2860 + {
2861 + /* Push LR */
2862 + reglist8 |= 0x40;
2863 + }
2864 +
2865 + if (usePUSHM)
2866 + {
2867 + insn = emit_multi_reg_push (reglist8, TRUE);
2868 + }
2869 + else
2870 + {
2871 + insn = emit_multi_reg_push (saved_reg_mask, FALSE);
2872 + }
2873 + RTX_FRAME_RELATED_P (insn) = 1;
2874 +
2875 + /* Prevent this instruction from being scheduled after any other
2876 + instructions. */
2877 + emit_insn (gen_blockage ());
2878 + }
2879 +
2880 + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
2881 + if (saved_fp_reg_mask)
2882 + {
2883 + insn = emit_multi_fp_reg_push (saved_fp_reg_mask);
2884 + RTX_FRAME_RELATED_P (insn) = 1;
2885 +
2886 + /* Prevent this instruction from being scheduled after any other
2887 + instructions. */
2888 + emit_insn (gen_blockage ());
2889 + }
2890 +
2891 + /* Set frame pointer */
2892 + if (frame_pointer_needed)
2893 + {
2894 + insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
2895 + RTX_FRAME_RELATED_P (insn) = 1;
2896 + }
2897 +
2898 + if (get_frame_size () > 0)
2899 + {
2900 + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21"))
2901 + {
2902 + insn = emit_insn (gen_rtx_SET (SImode,
2903 + stack_pointer_rtx,
2904 + gen_rtx_PLUS (SImode,
2905 + stack_pointer_rtx,
2906 + gen_rtx_CONST_INT
2907 + (SImode,
2908 + -get_frame_size
2909 + ()))));
2910 + RTX_FRAME_RELATED_P (insn) = 1;
2911 + }
2912 + else
2913 + {
2914 + /* Immediate is larger than k21 We must either check if we can use
2915 + one of the pushed reegisters as temporary storage or we must
2916 + make us a temp register by pushing a register to the stack. */
2917 + rtx temp_reg, const_pool_entry, insn;
2918 + if (saved_reg_mask)
2919 + {
2920 + temp_reg =
2921 + gen_rtx_REG (SImode,
2922 + INTERNAL_REGNUM (avr32_get_saved_reg
2923 + (saved_reg_mask)));
2924 + }
2925 + else
2926 + {
2927 + temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7));
2928 + emit_move_insn (gen_rtx_MEM
2929 + (SImode,
2930 + gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)),
2931 + temp_reg);
2932 + }
2933 +
2934 + const_pool_entry =
2935 + force_const_mem (SImode,
2936 + gen_rtx_CONST_INT (SImode, get_frame_size ()));
2937 + emit_move_insn (temp_reg, const_pool_entry);
2938 +
2939 + insn = emit_insn (gen_rtx_SET (SImode,
2940 + stack_pointer_rtx,
2941 + gen_rtx_MINUS (SImode,
2942 + stack_pointer_rtx,
2943 + temp_reg)));
2944 +
2945 + dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
2946 + gen_rtx_PLUS (SImode, stack_pointer_rtx,
2947 + GEN_INT (-get_frame_size ())));
2948 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2949 + dwarf, REG_NOTES (insn));
2950 + RTX_FRAME_RELATED_P (insn) = 1;
2951 +
2952 + if (!saved_reg_mask)
2953 + {
2954 + insn =
2955 + emit_move_insn (temp_reg,
2956 + gen_rtx_MEM (SImode,
2957 + gen_rtx_POST_INC (SImode,
2958 + gen_rtx_REG
2959 + (SImode,
2960 + 13))));
2961 + }
2962 +
2963 + /* Mark the temp register as dead */
2964 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg,
2965 + REG_NOTES (insn));
2966 +
2967 +
2968 + }
2969 +
2970 + /* Prevent the the stack adjustment to be scheduled after any
2971 + instructions using the frame pointer. */
2972 + emit_insn (gen_blockage ());
2973 + }
2974 +
2975 + /* Load GOT */
2976 + if (flag_pic)
2977 + {
2978 + avr32_load_pic_register ();
2979 +
2980 + /* gcc does not know that load or call instructions might use the pic
2981 + register so it might schedule these instructions before the loading
2982 + of the pic register. To avoid this emit a barrier for now. TODO!
2983 + Find out a better way to let gcc know which instructions might use
2984 + the pic register. */
2985 + emit_insn (gen_blockage ());
2986 + }
2987 + return;
2988 +}
2989 +
2990 +void
2991 +avr32_set_return_address (rtx source, rtx scratch)
2992 +{
2993 + rtx addr;
2994 + unsigned long saved_regs;
2995 +
2996 + saved_regs = avr32_compute_save_reg_mask (TRUE);
2997 +
2998 + if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM))))
2999 + emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
3000 + else
3001 + {
3002 + if (frame_pointer_needed)
3003 + addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM);
3004 + else
3005 + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks16"))
3006 + {
3007 + addr = plus_constant (stack_pointer_rtx, get_frame_size ());
3008 + }
3009 + else
3010 + {
3011 + emit_insn (gen_movsi (scratch, GEN_INT (get_frame_size ())));
3012 + addr = scratch;
3013 + }
3014 + emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
3015 + }
3016 +}
3017 +
3018 +
3019 +
3020 +/* Return the length of INSN. LENGTH is the initial length computed by
3021 + attributes in the machine-description file. */
3022 +
3023 +int
3024 +avr32_adjust_insn_length (rtx insn ATTRIBUTE_UNUSED,
3025 + int length ATTRIBUTE_UNUSED)
3026 +{
3027 + return length;
3028 +}
3029 +
3030 +void
3031 +avr32_output_return_instruction (int single_ret_inst ATTRIBUTE_UNUSED,
3032 + int iscond ATTRIBUTE_UNUSED,
3033 + rtx cond ATTRIBUTE_UNUSED, rtx r12_imm)
3034 +{
3035 +
3036 + unsigned long saved_reg_mask, saved_fp_reg_mask;
3037 + int insert_ret = TRUE;
3038 + int reglist8 = 0;
3039 + int stack_adjustment = get_frame_size ();
3040 + unsigned int func_type = avr32_current_func_type ();
3041 + FILE *f = asm_out_file;
3042 +
3043 + /* Naked functions does not have an epilogue */
3044 + if (IS_NAKED (func_type))
3045 + return;
3046 +
3047 + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
3048 +
3049 + saved_reg_mask = avr32_compute_save_reg_mask (FALSE);
3050 +
3051 + /* Reset frame pointer */
3052 + if (stack_adjustment > 0)
3053 + {
3054 + if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21"))
3055 + {
3056 + fprintf (f, "\tsub\tsp, %i # Reset Frame Pointer\n",
3057 + -stack_adjustment);
3058 + }
3059 + else
3060 + {
3061 + /* TODO! Is it safe to use r8 as scratch?? */
3062 + fprintf (f, "\tmov\tr8, lo(%i) # Reset Frame Pointer\n",
3063 + -stack_adjustment);
3064 + fprintf (f, "\torh\tr8, hi(%i) # Reset Frame Pointer\n",
3065 + -stack_adjustment);
3066 + fprintf (f, "\tadd\tsp, r8 # Reset Frame Pointer\n");
3067 + }
3068 + }
3069 +
3070 + if (saved_fp_reg_mask)
3071 + {
3072 + char reglist[64]; /* 64 bytes should be enough... */
3073 + avr32_make_fp_reglist_w (saved_fp_reg_mask, (char *) reglist);
3074 + fprintf (f, "\tldcm.w\tcp0, sp++, %s\n", reglist);
3075 + if (saved_fp_reg_mask & ~0xff)
3076 + {
3077 + saved_fp_reg_mask &= ~0xff;
3078 + avr32_make_fp_reglist_d (saved_fp_reg_mask, (char *) reglist);
3079 + fprintf (f, "\tldcm.d\tcp0, sp++, %s\n", reglist);
3080 + }
3081 + }
3082 +
3083 + if (saved_reg_mask)
3084 + {
3085 + /* Must pop used registers */
3086 +
3087 + /* Should we use POPM or LDM? */
3088 + int usePOPM = TRUE;
3089 + if (((saved_reg_mask & (1 << 0)) ||
3090 + (saved_reg_mask & (1 << 1)) ||
3091 + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
3092 + {
3093 + /* One of R0-R3 should at least be popped */
3094 + if (((saved_reg_mask & (1 << 0)) &&
3095 + (saved_reg_mask & (1 << 1)) &&
3096 + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
3097 + {
3098 + /* All should be popped */
3099 + reglist8 |= 0x01;
3100 + }
3101 + else
3102 + {
3103 + usePOPM = FALSE;
3104 + }
3105 + }
3106 +
3107 + if (((saved_reg_mask & (1 << 4)) ||
3108 + (saved_reg_mask & (1 << 5)) ||
3109 + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
3110 + {
3111 + /* One of R0-R3 should at least be popped */
3112 + if (((saved_reg_mask & (1 << 4)) &&
3113 + (saved_reg_mask & (1 << 5)) &&
3114 + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
3115 + {
3116 + if (usePOPM)
3117 + /* All should be popped */
3118 + reglist8 |= 0x02;
3119 + }
3120 + else
3121 + {
3122 + usePOPM = FALSE;
3123 + }
3124 + }
3125 +
3126 + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
3127 + {
3128 + /* One of R8-R9 should at least be pushed */
3129 + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
3130 + {
3131 + if (usePOPM)
3132 + /* All should be pushed */
3133 + reglist8 |= 0x04;
3134 + }
3135 + else
3136 + {
3137 + usePOPM = FALSE;
3138 + }
3139 + }
3140 +
3141 + if (saved_reg_mask & (1 << 10))
3142 + reglist8 |= 0x08;
3143 +
3144 + if (saved_reg_mask & (1 << 11))
3145 + reglist8 |= 0x10;
3146 +
3147 + if (saved_reg_mask & (1 << 12))
3148 + reglist8 |= 0x20;
3149 +
3150 + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
3151 + /* Pop LR */
3152 + reglist8 |= 0x40;
3153 +
3154 + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
3155 + /* Pop LR into PC. */
3156 + reglist8 |= 0x80;
3157 +
3158 + if (usePOPM)
3159 + {
3160 + char reglist[64]; /* 64 bytes should be enough... */
3161 + avr32_make_reglist8 (reglist8, (char *) reglist);
3162 +
3163 + if (reglist8 & 0x80)
3164 + /* This instruction is also a return */
3165 + insert_ret = FALSE;
3166 +
3167 + if (r12_imm && !insert_ret)
3168 + fprintf (f, "\tpopm\t%s, r12=%li\n", reglist, INTVAL (r12_imm));
3169 + else
3170 + fprintf (f, "\tpopm\t%s\n", reglist);
3171 +
3172 + }
3173 + else
3174 + {
3175 + char reglist[64]; /* 64 bytes should be enough... */
3176 + avr32_make_reglist16 (saved_reg_mask, (char *) reglist);
3177 + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
3178 + /* This instruction is also a return */
3179 + insert_ret = FALSE;
3180 +
3181 + if (r12_imm && !insert_ret)
3182 + fprintf (f, "\tldm\tsp++, %s, r12=%li\n", reglist,
3183 + INTVAL (r12_imm));
3184 + else
3185 + fprintf (f, "\tldm\tsp++, %s\n", reglist);
3186 +
3187 + }
3188 +
3189 + }
3190 +
3191 + /* Stack adjustment for exception handler. */
3192 + if (current_function_calls_eh_return)
3193 + fprintf (f, "\tadd\tsp, r%d\n", ASM_REGNUM (EH_RETURN_STACKADJ_REGNO));
3194 +
3195 +
3196 + if (IS_INTERRUPT (func_type))
3197 + {
3198 + fprintf (f, "\trete\n");
3199 + }
3200 + else if (insert_ret)
3201 + {
3202 + if (r12_imm)
3203 + fprintf (f, "\tretal\t%li\n", INTVAL (r12_imm));
3204 + else
3205 + fprintf (f, "\tretal\tr12\n");
3206 + }
3207 +}
3208 +
3209 +/* Function for converting a fp-register mask to a
3210 + reglistCPD8 register list string. */
3211 +void
3212 +avr32_make_fp_reglist_d (int reglist_mask, char *reglist_string)
3213 +{
3214 + int i;
3215 +
3216 + /* Make sure reglist_string is empty */
3217 + reglist_string[0] = '\0';
3218 +
3219 + for (i = 0; i < NUM_FP_REGS; i += 2)
3220 + {
3221 + if (reglist_mask & (1 << i))
3222 + {
3223 + strlen (reglist_string) ?
3224 + sprintf (reglist_string, "%s, %s-%s", reglist_string,
3225 + reg_names[INTERNAL_FP_REGNUM (i)],
3226 + reg_names[INTERNAL_FP_REGNUM (i + 1)]) :
3227 + sprintf (reglist_string, "%s-%s",
3228 + reg_names[INTERNAL_FP_REGNUM (i)],
3229 + reg_names[INTERNAL_FP_REGNUM (i + 1)]);
3230 + }
3231 + }
3232 +}
3233 +
3234 +/* Function for converting a fp-register mask to a
3235 + reglistCP8 register list string. */
3236 +void
3237 +avr32_make_fp_reglist_w (int reglist_mask, char *reglist_string)
3238 +{
3239 + int i;
3240 +
3241 + /* Make sure reglist_string is empty */
3242 + reglist_string[0] = '\0';
3243 +
3244 + for (i = 0; i < NUM_FP_REGS; ++i)
3245 + {
3246 + if (reglist_mask & (1 << i))
3247 + {
3248 + strlen (reglist_string) ?
3249 + sprintf (reglist_string, "%s, %s", reglist_string,
3250 + reg_names[INTERNAL_FP_REGNUM (i)]) :
3251 + sprintf (reglist_string, "%s", reg_names[INTERNAL_FP_REGNUM (i)]);
3252 + }
3253 + }
3254 +}
3255 +
3256 +void
3257 +avr32_make_reglist16 (int reglist16_vect, char *reglist16_string)
3258 +{
3259 + int i;
3260 +
3261 + /* Make sure reglist16_string is empty */
3262 + reglist16_string[0] = '\0';
3263 +
3264 + for (i = 0; i < 16; ++i)
3265 + {
3266 + if (reglist16_vect & (1 << i))
3267 + {
3268 + strlen (reglist16_string) ?
3269 + sprintf (reglist16_string, "%s, %s", reglist16_string,
3270 + reg_names[INTERNAL_REGNUM (i)]) :
3271 + sprintf (reglist16_string, "%s", reg_names[INTERNAL_REGNUM (i)]);
3272 + }
3273 + }
3274 +}
3275 +
3276 +int
3277 +avr32_convert_to_reglist16 (int reglist8_vect)
3278 +{
3279 + int reglist16_vect = 0;
3280 + if (reglist8_vect & 0x1)
3281 + reglist16_vect |= 0xF;
3282 + if (reglist8_vect & 0x2)
3283 + reglist16_vect |= 0xF0;
3284 + if (reglist8_vect & 0x4)
3285 + reglist16_vect |= 0x300;
3286 + if (reglist8_vect & 0x8)
3287 + reglist16_vect |= 0x400;
3288 + if (reglist8_vect & 0x10)
3289 + reglist16_vect |= 0x800;
3290 + if (reglist8_vect & 0x20)
3291 + reglist16_vect |= 0x1000;
3292 + if (reglist8_vect & 0x40)
3293 + reglist16_vect |= 0x4000;
3294 + if (reglist8_vect & 0x80)
3295 + reglist16_vect |= 0x8000;
3296 +
3297 + return reglist16_vect;
3298 +}
3299 +
3300 +void
3301 +avr32_make_reglist8 (int reglist8_vect, char *reglist8_string)
3302 +{
3303 + /* Make sure reglist8_string is empty */
3304 + reglist8_string[0] = '\0';
3305 +
3306 + if (reglist8_vect & 0x1)
3307 + sprintf (reglist8_string, "r0-r3");
3308 + if (reglist8_vect & 0x2)
3309 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r4-r7",
3310 + reglist8_string) :
3311 + sprintf (reglist8_string, "r4-r7");
3312 + if (reglist8_vect & 0x4)
3313 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r8-r9",
3314 + reglist8_string) :
3315 + sprintf (reglist8_string, "r8-r9");
3316 + if (reglist8_vect & 0x8)
3317 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r10",
3318 + reglist8_string) :
3319 + sprintf (reglist8_string, "r10");
3320 + if (reglist8_vect & 0x10)
3321 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r11",
3322 + reglist8_string) :
3323 + sprintf (reglist8_string, "r11");
3324 + if (reglist8_vect & 0x20)
3325 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, r12",
3326 + reglist8_string) :
3327 + sprintf (reglist8_string, "r12");
3328 + if (reglist8_vect & 0x40)
3329 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, lr",
3330 + reglist8_string) :
3331 + sprintf (reglist8_string, "lr");
3332 + if (reglist8_vect & 0x80)
3333 + strlen (reglist8_string) ? sprintf (reglist8_string, "%s, pc",
3334 + reglist8_string) :
3335 + sprintf (reglist8_string, "pc");
3336 +}
3337 +
3338 +int
3339 +avr32_eh_return_data_regno (int n)
3340 +{
3341 + if (n >= 0 && n <= 3)
3342 + return 8 + n;
3343 + else
3344 + return INVALID_REGNUM;
3345 +}
3346 +
3347 +/* Compute the distance from register FROM to register TO.
3348 + These can be the arg pointer, the frame pointer or
3349 + the stack pointer.
3350 + Typical stack layout looks like this:
3351 +
3352 + old stack pointer -> | |
3353 + ----
3354 + | | \
3355 + | | saved arguments for
3356 + | | vararg functions
3357 + arg_pointer -> | | /
3358 + --
3359 + | | \
3360 + | | call saved
3361 + | | registers
3362 + | | /
3363 + frame ptr -> --
3364 + | | \
3365 + | | local
3366 + | | variables
3367 + stack ptr --> | | /
3368 + --
3369 + | | \
3370 + | | outgoing
3371 + | | arguments
3372 + | | /
3373 + --
3374 +
3375 + For a given funciton some or all of these stack compomnents
3376 + may not be needed, giving rise to the possibility of
3377 + eliminating some of the registers.
3378 +
3379 + The values returned by this function must reflect the behaviour
3380 + of avr32_expand_prologue() and avr32_compute_save_reg_mask().
3381 +
3382 + The sign of the number returned reflects the direction of stack
3383 + growth, so the values are positive for all eliminations except
3384 + from the soft frame pointer to the hard frame pointer. */
3385 +
3386 +
3387 +int
3388 +avr32_initial_elimination_offset (int from, int to)
3389 +{
3390 + int i;
3391 + int call_saved_regs = 0;
3392 + unsigned long saved_reg_mask, saved_fp_reg_mask;
3393 + unsigned int local_vars = get_frame_size ();
3394 +
3395 + saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
3396 + saved_fp_reg_mask = avr32_compute_save_fp_reg_mask ();
3397 +
3398 + for (i = 0; i < 16; ++i)
3399 + {
3400 + if (saved_reg_mask & (1 << i))
3401 + call_saved_regs += 4;
3402 + }
3403 +
3404 + for (i = 0; i < NUM_FP_REGS; ++i)
3405 + {
3406 + if (saved_fp_reg_mask & (1 << i))
3407 + call_saved_regs += 4;
3408 + }
3409 +
3410 + switch (from)
3411 + {
3412 + case ARG_POINTER_REGNUM:
3413 + switch (to)
3414 + {
3415 + case STACK_POINTER_REGNUM:
3416 + return call_saved_regs + local_vars;
3417 + case FRAME_POINTER_REGNUM:
3418 + return call_saved_regs;
3419 + default:
3420 + abort ();
3421 + }
3422 + case FRAME_POINTER_REGNUM:
3423 + switch (to)
3424 + {
3425 + case STACK_POINTER_REGNUM:
3426 + return local_vars;
3427 + default:
3428 + abort ();
3429 + }
3430 + default:
3431 + abort ();
3432 + }
3433 +}
3434 +
3435 +
3436 +/*
3437 + Returns a rtx used when passing the next argument to a function.
3438 + avr32_init_cumulative_args() and avr32_function_arg_advance() sets witch
3439 + register to use.
3440 +*/
3441 +rtx
3442 +avr32_function_arg (CUMULATIVE_ARGS * cum, enum machine_mode mode,
3443 + tree type, int named)
3444 +{
3445 + int index = -1;
3446 +
3447 + HOST_WIDE_INT arg_size, arg_rsize;
3448 + if (type)
3449 + {
3450 + arg_size = int_size_in_bytes (type);
3451 + }
3452 + else
3453 + {
3454 + arg_size = GET_MODE_SIZE (mode);
3455 + }
3456 + arg_rsize = PUSH_ROUNDING (arg_size);
3457 +
3458 + /*
3459 + The last time this macro is called, it is called with mode == VOIDmode,
3460 + and its result is passed to the call or call_value pattern as operands 2
3461 + and 3 respectively. */
3462 + if (mode == VOIDmode)
3463 + {
3464 + return gen_rtx_CONST_INT (SImode, 22); /* ToDo: fixme. */
3465 + }
3466 +
3467 + if ((*targetm.calls.must_pass_in_stack) (mode, type) || !named)
3468 + {
3469 + return NULL_RTX;
3470 + }
3471 +
3472 + if (arg_rsize == 8)
3473 + {
3474 + /* use r11:r10 or r9:r8. */
3475 + if (!(GET_USED_INDEX (cum, 1) || GET_USED_INDEX (cum, 2)))
3476 + index = 1;
3477 + else if (!(GET_USED_INDEX (cum, 3) || GET_USED_INDEX (cum, 4)))
3478 + index = 3;
3479 + else
3480 + index = -1;
3481 + }
3482 + else if (arg_rsize == 4)
3483 + { /* Use first available register */
3484 + index = 0;
3485 + while (index <= LAST_CUM_REG_INDEX && GET_USED_INDEX (cum, index))
3486 + index++;
3487 + if (index > LAST_CUM_REG_INDEX)
3488 + index = -1;
3489 + }
3490 +
3491 + SET_REG_INDEX (cum, index);
3492 +
3493 + if (GET_REG_INDEX (cum) >= 0)
3494 + return gen_rtx_REG (mode,
3495 + avr32_function_arg_reglist[GET_REG_INDEX (cum)]);
3496 +
3497 + return NULL_RTX;
3498 +}
3499 +
3500 +/*
3501 + Set the register used for passing the first argument to a function.
3502 +*/
3503 +void
3504 +avr32_init_cumulative_args (CUMULATIVE_ARGS * cum,
3505 + tree fntype ATTRIBUTE_UNUSED,
3506 + rtx libname ATTRIBUTE_UNUSED,
3507 + tree fndecl ATTRIBUTE_UNUSED)
3508 + {
3509 + /* Set all registers as unused. */
3510 + SET_INDEXES_UNUSED (cum);
3511 +
3512 + /* Reset uses_anonymous_args */
3513 + cum->uses_anonymous_args = 0;
3514 +
3515 + /* Reset size of stack pushed arguments */
3516 + cum->stack_pushed_args_size = 0;
3517 + }
3518 +
3519 +/*
3520 + Set register used for passing the next argument to a function. Only the
3521 + Scratch Registers are used.
3522 +
3523 + number name
3524 + 15 r15 PC
3525 + 14 r14 LR
3526 + 13 r13 _SP_________
3527 + FIRST_CUM_REG 12 r12 _||_
3528 + 10 r11 ||
3529 + 11 r10 _||_ Scratch Registers
3530 + 8 r9 ||
3531 + LAST_SCRATCH_REG 9 r8 _\/_________
3532 + 6 r7 /\
3533 + 7 r6 ||
3534 + 4 r5 ||
3535 + 5 r4 ||
3536 + 2 r3 ||
3537 + 3 r2 ||
3538 + 0 r1 ||
3539 + 1 r0 _||_________
3540 +
3541 +*/
3542 +void
3543 +avr32_function_arg_advance (CUMULATIVE_ARGS * cum, enum machine_mode mode,
3544 + tree type, int named ATTRIBUTE_UNUSED)
3545 +{
3546 + HOST_WIDE_INT arg_size, arg_rsize;
3547 +
3548 + if (type)
3549 + {
3550 + arg_size = int_size_in_bytes (type);
3551 + }
3552 + else
3553 + {
3554 + arg_size = GET_MODE_SIZE (mode);
3555 + }
3556 + arg_rsize = PUSH_ROUNDING (arg_size);
3557 +
3558 + /* It the argument had to be passed in stack, no register is used. */
3559 + if ((*targetm.calls.must_pass_in_stack) (mode, type))
3560 + {
3561 + cum->stack_pushed_args_size += PUSH_ROUNDING (int_size_in_bytes (type));
3562 + return;
3563 + }
3564 +
3565 + /* Mark the used registers as "used". */
3566 + if (GET_REG_INDEX (cum) >= 0)
3567 + {
3568 + SET_USED_INDEX (cum, GET_REG_INDEX (cum));
3569 + if (arg_rsize == 8)
3570 + {
3571 + SET_USED_INDEX (cum, (GET_REG_INDEX (cum) + 1));
3572 + }
3573 + }
3574 + else
3575 + {
3576 + /* Had to use stack */
3577 + cum->stack_pushed_args_size += arg_rsize;
3578 + }
3579 +}
3580 +
3581 +/*
3582 + Defines witch direction to go to find the next register to use if the
3583 + argument is larger then one register or for arguments shorter than an
3584 + int which is not promoted, such as the last part of structures with
3585 + size not a multiple of 4. */
3586 +enum direction
3587 +avr32_function_arg_padding (enum machine_mode mode ATTRIBUTE_UNUSED,
3588 + tree type)
3589 +{
3590 + /* Pad upward for all aggregates except byte and halfword sized aggregates
3591 + which can be passed in registers. */
3592 + if (type
3593 + && AGGREGATE_TYPE_P (type)
3594 + && (int_size_in_bytes (type) != 1)
3595 + && !((int_size_in_bytes (type) == 2)
3596 + && TYPE_ALIGN_UNIT (type) >= 2)
3597 + && (int_size_in_bytes (type) & 0x3))
3598 + {
3599 + return upward;
3600 + }
3601 +
3602 + return downward;
3603 +}
3604 +
3605 +/*
3606 + Return a rtx used for the return value from a function call.
3607 +*/
3608 +rtx
3609 +avr32_function_value (tree type, tree func, bool outgoing ATTRIBUTE_UNUSED)
3610 +{
3611 + if (avr32_return_in_memory (type, func))
3612 + return NULL_RTX;
3613 +
3614 + if (int_size_in_bytes (type) <= 4)
3615 + {
3616 + enum machine_mode mode = TYPE_MODE (type);
3617 + int unsignedp = 0;
3618 + PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
3619 + return gen_rtx_REG (mode, RET_REGISTER);
3620 + }
3621 + else if (int_size_in_bytes (type) <= 8)
3622 + return gen_rtx_REG (TYPE_MODE (type), INTERNAL_REGNUM (11));
3623 +
3624 + return NULL_RTX;
3625 +}
3626 +
3627 +/*
3628 + Return a rtx used for the return value from a library function call.
3629 +*/
3630 +rtx
3631 +avr32_libcall_value (enum machine_mode mode)
3632 +{
3633 +
3634 + if (GET_MODE_SIZE (mode) <= 4)
3635 + return gen_rtx_REG (mode, RET_REGISTER);
3636 + else if (GET_MODE_SIZE (mode) <= 8)
3637 + return gen_rtx_REG (mode, INTERNAL_REGNUM (11));
3638 + else
3639 + return NULL_RTX;
3640 +}
3641 +
3642 +/* Return TRUE if X references a SYMBOL_REF. */
3643 +int
3644 +symbol_mentioned_p (rtx x)
3645 +{
3646 + const char *fmt;
3647 + int i;
3648 +
3649 + if (GET_CODE (x) == SYMBOL_REF)
3650 + return 1;
3651 +
3652 + fmt = GET_RTX_FORMAT (GET_CODE (x));
3653 +
3654 + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3655 + {
3656 + if (fmt[i] == 'E')
3657 + {
3658 + int j;
3659 +
3660 + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3661 + if (symbol_mentioned_p (XVECEXP (x, i, j)))
3662 + return 1;
3663 + }
3664 + else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
3665 + return 1;
3666 + }
3667 +
3668 + return 0;
3669 +}
3670 +
3671 +/* Return TRUE if X references a LABEL_REF. */
3672 +int
3673 +label_mentioned_p (rtx x)
3674 +{
3675 + const char *fmt;
3676 + int i;
3677 +
3678 + if (GET_CODE (x) == LABEL_REF)
3679 + return 1;
3680 +
3681 + fmt = GET_RTX_FORMAT (GET_CODE (x));
3682 + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3683 + {
3684 + if (fmt[i] == 'E')
3685 + {
3686 + int j;
3687 +
3688 + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3689 + if (label_mentioned_p (XVECEXP (x, i, j)))
3690 + return 1;
3691 + }
3692 + else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
3693 + return 1;
3694 + }
3695 +
3696 + return 0;
3697 +}
3698 +
3699 +/* Return TRUE if X contains a MEM expression. */
3700 +int
3701 +mem_mentioned_p (rtx x)
3702 +{
3703 + const char *fmt;
3704 + int i;
3705 +
3706 + if (MEM_P (x))
3707 + return 1;
3708 +
3709 + fmt = GET_RTX_FORMAT (GET_CODE (x));
3710 + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3711 + {
3712 + if (fmt[i] == 'E')
3713 + {
3714 + int j;
3715 +
3716 + for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3717 + if (mem_mentioned_p (XVECEXP (x, i, j)))
3718 + return 1;
3719 + }
3720 + else if (fmt[i] == 'e' && mem_mentioned_p (XEXP (x, i)))
3721 + return 1;
3722 + }
3723 +
3724 + return 0;
3725 +}
3726 +
3727 +int
3728 +avr32_legitimate_pic_operand_p (rtx x)
3729 +{
3730 +
3731 + /* We can't have const, this must be broken down to a symbol. */
3732 + if (GET_CODE (x) == CONST)
3733 + return FALSE;
3734 +
3735 + /* Can't access symbols or labels via the constant pool either */
3736 + if ((GET_CODE (x) == SYMBOL_REF
3737 + && CONSTANT_POOL_ADDRESS_P (x)
3738 + && (symbol_mentioned_p (get_pool_constant (x))
3739 + || label_mentioned_p (get_pool_constant (x)))))
3740 + return FALSE;
3741 +
3742 + return TRUE;
3743 +}
3744 +
3745 +