[toolchain/gcc]: bump the 4.4 flavour to 4.4.6
[openwrt/svn-archive/archive.git] / toolchain / gcc / patches / 4.4.6 / 930-avr32_support.patch
1 --- a/gcc/builtins.c
2 +++ b/gcc/builtins.c
3 @@ -11108,7 +11108,7 @@ validate_gimple_arglist (const_gimple ca
4
5 do
6 {
7 - code = va_arg (ap, enum tree_code);
8 + code = va_arg (ap, int);
9 switch (code)
10 {
11 case 0:
12 --- a/gcc/calls.c
13 +++ b/gcc/calls.c
14 @@ -3447,7 +3447,7 @@ emit_library_call_value_1 (int retval, r
15 for (; count < nargs; count++)
16 {
17 rtx val = va_arg (p, rtx);
18 - enum machine_mode mode = va_arg (p, enum machine_mode);
19 + enum machine_mode mode = va_arg (p, int);
20
21 /* We cannot convert the arg value to the mode the library wants here;
22 must do it earlier where we know the signedness of the arg. */
23 --- /dev/null
24 +++ b/gcc/config/avr32/avr32.c
25 @@ -0,0 +1,8060 @@
26 +/*
27 + Target hooks and helper functions for AVR32.
28 + Copyright 2003,2004,2005,2006,2007,2008,2009,2010 Atmel Corporation.
29 +
30 + This file is part of GCC.
31 +
32 + This program is free software; you can redistribute it and/or modify
33 + it under the terms of the GNU General Public License as published by
34 + the Free Software Foundation; either version 2 of the License, or
35 + (at your option) any later version.
36 +
37 + This program is distributed in the hope that it will be useful,
38 + but WITHOUT ANY WARRANTY; without even the implied warranty of
39 + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
40 + GNU General Public License for more details.
41 +
42 + You should have received a copy of the GNU General Public License
43 + along with this program; if not, write to the Free Software
44 + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
45 +
46 +#include "config.h"
47 +#include "system.h"
48 +#include "coretypes.h"
49 +#include "tm.h"
50 +#include "rtl.h"
51 +#include "tree.h"
52 +#include "obstack.h"
53 +#include "regs.h"
54 +#include "hard-reg-set.h"
55 +#include "real.h"
56 +#include "insn-config.h"
57 +#include "conditions.h"
58 +#include "output.h"
59 +#include "insn-attr.h"
60 +#include "flags.h"
61 +#include "reload.h"
62 +#include "function.h"
63 +#include "expr.h"
64 +#include "optabs.h"
65 +#include "toplev.h"
66 +#include "recog.h"
67 +#include "ggc.h"
68 +#include "except.h"
69 +#include "c-pragma.h"
70 +#include "integrate.h"
71 +#include "tm_p.h"
72 +#include "langhooks.h"
73 +#include "hooks.h"
74 +#include "df.h"
75 +
76 +#include "target.h"
77 +#include "target-def.h"
78 +
79 +#include <ctype.h>
80 +
81 +
82 +
83 +/* Global variables. */
84 +typedef struct minipool_node Mnode;
85 +typedef struct minipool_fixup Mfix;
86 +
87 +/* Obstack for minipool constant handling. */
88 +static struct obstack minipool_obstack;
89 +static char *minipool_startobj;
90 +static rtx minipool_vector_label;
91 +
92 +/* True if we are currently building a constant table. */
93 +int making_const_table;
94 +
95 +tree fndecl_attribute_args = NULL_TREE;
96 +
97 +
98 +/* Function prototypes. */
99 +static unsigned long avr32_isr_value (tree);
100 +static unsigned long avr32_compute_func_type (void);
101 +static tree avr32_handle_isr_attribute (tree *, tree, tree, int, bool *);
102 +static tree avr32_handle_acall_attribute (tree *, tree, tree, int, bool *);
103 +static tree avr32_handle_fndecl_attribute (tree * node, tree name, tree args,
104 + int flags, bool * no_add_attrs);
105 +static void avr32_reorg (void);
106 +bool avr32_return_in_msb (tree type);
107 +bool avr32_vector_mode_supported (enum machine_mode mode);
108 +static void avr32_init_libfuncs (void);
109 +static void avr32_file_end (void);
110 +static void flashvault_decl_list_add (unsigned int vector_num, const char *name);
111 +
112 +
113 +
114 +static void
115 +avr32_add_gc_roots (void)
116 +{
117 + gcc_obstack_init (&minipool_obstack);
118 + minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
119 +}
120 +
121 +
122 +/* List of all known AVR32 parts */
123 +static const struct part_type_s avr32_part_types[] = {
124 + /* name, part_type, architecture type, macro */
125 + {"none", PART_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"},
126 + {"ap7000", PART_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"},
127 + {"ap7001", PART_TYPE_AVR32_AP7001, ARCH_TYPE_AVR32_AP, "__AVR32_AP7001__"},
128 + {"ap7002", PART_TYPE_AVR32_AP7002, ARCH_TYPE_AVR32_AP, "__AVR32_AP7002__"},
129 + {"ap7200", PART_TYPE_AVR32_AP7200, ARCH_TYPE_AVR32_AP, "__AVR32_AP7200__"},
130 + {"uc3a0128", PART_TYPE_AVR32_UC3A0128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0128__"},
131 + {"uc3a0256", PART_TYPE_AVR32_UC3A0256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0256__"},
132 + {"uc3a0512", PART_TYPE_AVR32_UC3A0512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A0512__"},
133 + {"uc3a0512es", PART_TYPE_AVR32_UC3A0512ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3A0512ES__"},
134 + {"uc3a1128", PART_TYPE_AVR32_UC3A1128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1128__"},
135 + {"uc3a1256", PART_TYPE_AVR32_UC3A1256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1256__"},
136 + {"uc3a1512", PART_TYPE_AVR32_UC3A1512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A1512__"},
137 + {"uc3a1512es", PART_TYPE_AVR32_UC3A1512ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3A1512ES__"},
138 + {"uc3a3revd", PART_TYPE_AVR32_UC3A3REVD, ARCH_TYPE_AVR32_UCR2NOMUL, "__AVR32_UC3A3256S__"},
139 + {"uc3a364", PART_TYPE_AVR32_UC3A364, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A364__"},
140 + {"uc3a364s", PART_TYPE_AVR32_UC3A364S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A364S__"},
141 + {"uc3a3128", PART_TYPE_AVR32_UC3A3128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3128__"},
142 + {"uc3a3128s", PART_TYPE_AVR32_UC3A3128S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3128S__"},
143 + {"uc3a3256", PART_TYPE_AVR32_UC3A3256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3256__"},
144 + {"uc3a3256s", PART_TYPE_AVR32_UC3A3256S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A3256S__"},
145 + {"uc3a464", PART_TYPE_AVR32_UC3A464, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A464__"},
146 + {"uc3a464s", PART_TYPE_AVR32_UC3A464S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A464S__"},
147 + {"uc3a4128", PART_TYPE_AVR32_UC3A4128, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4128__"},
148 + {"uc3a4128s", PART_TYPE_AVR32_UC3A4128S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4128S__"},
149 + {"uc3a4256", PART_TYPE_AVR32_UC3A4256, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4256__"},
150 + {"uc3a4256s", PART_TYPE_AVR32_UC3A4256S, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3A4256S__"},
151 + {"uc3b064", PART_TYPE_AVR32_UC3B064, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B064__"},
152 + {"uc3b0128", PART_TYPE_AVR32_UC3B0128, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0128__"},
153 + {"uc3b0256", PART_TYPE_AVR32_UC3B0256, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0256__"},
154 + {"uc3b0256es", PART_TYPE_AVR32_UC3B0256ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B0256ES__"},
155 + {"uc3b0512", PART_TYPE_AVR32_UC3B0512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B0512__"},
156 + {"uc3b0512revc", PART_TYPE_AVR32_UC3B0512REVC, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B0512REVC__"},
157 + {"uc3b164", PART_TYPE_AVR32_UC3B164, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B164__"},
158 + {"uc3b1128", PART_TYPE_AVR32_UC3B1128, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1128__"},
159 + {"uc3b1256", PART_TYPE_AVR32_UC3B1256, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1256__"},
160 + {"uc3b1256es", PART_TYPE_AVR32_UC3B1256ES, ARCH_TYPE_AVR32_UCR1, "__AVR32_UC3B1256ES__"},
161 + {"uc3b1512", PART_TYPE_AVR32_UC3B1512, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B1512__"},
162 + {"uc3b1512revc", PART_TYPE_AVR32_UC3B1512REVC, ARCH_TYPE_AVR32_UCR2, "__AVR32_UC3B1512REVC__"},
163 + {"uc64d3", PART_TYPE_AVR32_UC64D3, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64D3__"},
164 + {"uc128d3", PART_TYPE_AVR32_UC128D3, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128D3__"},
165 + {"uc64d4", PART_TYPE_AVR32_UC64D4, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64D4__"},
166 + {"uc128d4", PART_TYPE_AVR32_UC128D4, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128D4__"},
167 + {"uc3c0512crevc", PART_TYPE_AVR32_UC3C0512CREVC, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C0512CREVC__"},
168 + {"uc3c1512crevc", PART_TYPE_AVR32_UC3C1512CREVC, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C1512CREVC__"},
169 + {"uc3c2512crevc", PART_TYPE_AVR32_UC3C2512CREVC, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3C2512CREVC__"},
170 + {"uc3l0256", PART_TYPE_AVR32_UC3L0256, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L0256__"},
171 + {"uc3l0128", PART_TYPE_AVR32_UC3L0128, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L0128__"},
172 + {"uc3l064", PART_TYPE_AVR32_UC3L064, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L064__"},
173 + {"uc3l032", PART_TYPE_AVR32_UC3L032, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L032__"},
174 + {"uc3l016", PART_TYPE_AVR32_UC3L016, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L016__"},
175 + {"uc3l064revb", PART_TYPE_AVR32_UC3L064REVB, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC3L064REVB__"},
176 + {"uc64l3u", PART_TYPE_AVR32_UC64L3U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64L3U__"},
177 + {"uc128l3u", PART_TYPE_AVR32_UC128L3U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128L3U__"},
178 + {"uc256l3u", PART_TYPE_AVR32_UC256L3U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC256L3U__"},
179 + {"uc64l4u", PART_TYPE_AVR32_UC64L4U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC64L4U__"},
180 + {"uc128l4u", PART_TYPE_AVR32_UC128L4U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC128L4U__"},
181 + {"uc256l4u", PART_TYPE_AVR32_UC256L4U, ARCH_TYPE_AVR32_UCR3, "__AVR32_UC256L4U__"},
182 + {"uc3c064c", PART_TYPE_AVR32_UC3C064C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C064C__"},
183 + {"uc3c0128c", PART_TYPE_AVR32_UC3C0128C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C0128C__"},
184 + {"uc3c0256c", PART_TYPE_AVR32_UC3C0256C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C0256C__"},
185 + {"uc3c0512c", PART_TYPE_AVR32_UC3C0512C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C0512C__"},
186 + {"uc3c164c", PART_TYPE_AVR32_UC3C164C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C164C__"},
187 + {"uc3c1128c", PART_TYPE_AVR32_UC3C1128C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C1128C__"},
188 + {"uc3c1256c", PART_TYPE_AVR32_UC3C1256C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C1256C__"},
189 + {"uc3c1512c", PART_TYPE_AVR32_UC3C1512C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C1512C__"},
190 + {"uc3c264c", PART_TYPE_AVR32_UC3C264C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C264C__"},
191 + {"uc3c2128c", PART_TYPE_AVR32_UC3C2128C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C2128C__"},
192 + {"uc3c2256c", PART_TYPE_AVR32_UC3C2256C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C2256C__"},
193 + {"uc3c2512c", PART_TYPE_AVR32_UC3C2512C, ARCH_TYPE_AVR32_UCR3FP, "__AVR32_UC3C2512C__"},
194 + {"mxt768e", PART_TYPE_AVR32_MXT768E, ARCH_TYPE_AVR32_UCR3, "__AVR32_MXT768E__"},
195 + {NULL, 0, 0, NULL}
196 +};
197 +
198 +/* List of all known AVR32 architectures */
199 +static const struct arch_type_s avr32_arch_types[] = {
200 + /* name, architecture type, microarchitecture type, feature flags, macro */
201 + {"ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B,
202 + (FLAG_AVR32_HAS_DSP
203 + | FLAG_AVR32_HAS_SIMD
204 + | FLAG_AVR32_HAS_UNALIGNED_WORD
205 + | FLAG_AVR32_HAS_BRANCH_PRED | FLAG_AVR32_HAS_RETURN_STACK
206 + | FLAG_AVR32_HAS_CACHES),
207 + "__AVR32_AP__"},
208 + {"ucr1", ARCH_TYPE_AVR32_UCR1, UARCH_TYPE_AVR32A,
209 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW),
210 + "__AVR32_UC__=1"},
211 + {"ucr2", ARCH_TYPE_AVR32_UCR2, UARCH_TYPE_AVR32A,
212 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
213 + | FLAG_AVR32_HAS_V2_INSNS),
214 + "__AVR32_UC__=2"},
215 + {"ucr2nomul", ARCH_TYPE_AVR32_UCR2NOMUL, UARCH_TYPE_AVR32A,
216 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
217 + | FLAG_AVR32_HAS_V2_INSNS | FLAG_AVR32_HAS_NO_MUL_INSNS),
218 + "__AVR32_UC__=2"},
219 + {"ucr3", ARCH_TYPE_AVR32_UCR3, UARCH_TYPE_AVR32A,
220 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW
221 + | FLAG_AVR32_HAS_V2_INSNS),
222 + "__AVR32_UC__=3"},
223 + {"ucr3fp", ARCH_TYPE_AVR32_UCR3FP, UARCH_TYPE_AVR32A,
224 + (FLAG_AVR32_HAS_DSP | FLAG_AVR32_HAS_RMW | FLAG_AVR32_HAS_FPU
225 + | FLAG_AVR32_HAS_V2_INSNS),
226 + "__AVR32_UC__=3"},
227 + {NULL, 0, 0, 0, NULL}
228 +};
229 +
230 +/* Default arch name */
231 +const char *avr32_arch_name = "none";
232 +const char *avr32_part_name = "none";
233 +
234 +const struct part_type_s *avr32_part;
235 +const struct arch_type_s *avr32_arch;
236 +
237 +
238 +/* FIXME: needs to use GC. */
239 +struct flashvault_decl_list
240 +{
241 + struct flashvault_decl_list *next;
242 + unsigned int vector_num;
243 + const char *name;
244 +};
245 +
246 +static struct flashvault_decl_list *flashvault_decl_list_head = NULL;
247 +
248 +
249 +/* Set default target_flags. */
250 +#undef TARGET_DEFAULT_TARGET_FLAGS
251 +#define TARGET_DEFAULT_TARGET_FLAGS \
252 + (MASK_HAS_ASM_ADDR_PSEUDOS | MASK_MD_REORG_OPTIMIZATION | MASK_COND_EXEC_BEFORE_RELOAD)
253 +
254 +void
255 +avr32_optimization_options (int level, int size)
256 +{
257 + if (AVR32_ALWAYS_PIC)
258 + flag_pic = 1;
259 +
260 + /* Enable section anchors if optimization is enabled. */
261 + if (level > 0 || size)
262 + flag_section_anchors = 2;
263 +}
264 +
265 +
266 +/* Override command line options */
267 +void
268 +avr32_override_options (void)
269 +{
270 + const struct part_type_s *part;
271 + const struct arch_type_s *arch;
272 +
273 + /*Add backward compability*/
274 + if (strcmp ("uc", avr32_arch_name)== 0)
275 + {
276 + fprintf (stderr, "Warning: Deprecated arch `%s' specified. "
277 + "Please use '-march=ucr1' instead. "
278 + "Converting to arch 'ucr1'\n",
279 + avr32_arch_name);
280 + avr32_arch_name="ucr1";
281 + }
282 +
283 + /* Check if arch type is set. */
284 + for (arch = avr32_arch_types; arch->name; arch++)
285 + {
286 + if (strcmp (arch->name, avr32_arch_name) == 0)
287 + break;
288 + }
289 + avr32_arch = arch;
290 +
291 + if (!arch->name && strcmp("none", avr32_arch_name) != 0)
292 + {
293 + fprintf (stderr, "Unknown arch `%s' specified\n"
294 + "Known arch names:\n"
295 + "\tuc (deprecated)\n",
296 + avr32_arch_name);
297 + for (arch = avr32_arch_types; arch->name; arch++)
298 + fprintf (stderr, "\t%s\n", arch->name);
299 + avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP];
300 + }
301 +
302 + /* Check if part type is set. */
303 + for (part = avr32_part_types; part->name; part++)
304 + if (strcmp (part->name, avr32_part_name) == 0)
305 + break;
306 +
307 + avr32_part = part;
308 + if (!part->name)
309 + {
310 + fprintf (stderr, "Unknown part `%s' specified\nKnown part names:\n",
311 + avr32_part_name);
312 + for (part = avr32_part_types; part->name; part++)
313 + {
314 + if (strcmp("none", part->name) != 0)
315 + fprintf (stderr, "\t%s\n", part->name);
316 + }
317 + /* Set default to NONE*/
318 + avr32_part = &avr32_part_types[PART_TYPE_AVR32_NONE];
319 + }
320 +
321 + /* NB! option -march= overrides option -mpart
322 + * if both are used at the same time */
323 + if (!arch->name)
324 + avr32_arch = &avr32_arch_types[avr32_part->arch_type];
325 +
326 + /* If optimization level is two or greater, then align start of loops to a
327 + word boundary since this will allow folding the first insn of the loop.
328 + Do this only for targets supporting branch prediction. */
329 + if (optimize >= 2 && TARGET_BRANCH_PRED)
330 + align_loops = 2;
331 +
332 +
333 + /* Enable fast-float library if unsafe math optimizations
334 + are used. */
335 + if (flag_unsafe_math_optimizations)
336 + target_flags |= MASK_FAST_FLOAT;
337 +
338 + /* Check if we should set avr32_imm_in_const_pool
339 + based on if caches are present or not. */
340 + if ( avr32_imm_in_const_pool == -1 )
341 + {
342 + if ( TARGET_CACHES )
343 + avr32_imm_in_const_pool = 1;
344 + else
345 + avr32_imm_in_const_pool = 0;
346 + }
347 +
348 + if (TARGET_NO_PIC)
349 + flag_pic = 0;
350 + avr32_add_gc_roots ();
351 +}
352 +
353 +
354 +/*
355 +If defined, a function that outputs the assembler code for entry to a
356 +function. The prologue is responsible for setting up the stack frame,
357 +initializing the frame pointer register, saving registers that must be
358 +saved, and allocating size additional bytes of storage for the
359 +local variables. size is an integer. file is a stdio
360 +stream to which the assembler code should be output.
361 +
362 +The label for the beginning of the function need not be output by this
363 +macro. That has already been done when the macro is run.
364 +
365 +To determine which registers to save, the macro can refer to the array
366 +regs_ever_live: element r is nonzero if hard register
367 +r is used anywhere within the function. This implies the function
368 +prologue should save register r, provided it is not one of the
369 +call-used registers. (TARGET_ASM_FUNCTION_EPILOGUE must likewise use
370 +regs_ever_live.)
371 +
372 +On machines that have ``register windows'', the function entry code does
373 +not save on the stack the registers that are in the windows, even if
374 +they are supposed to be preserved by function calls; instead it takes
375 +appropriate steps to ``push'' the register stack, if any non-call-used
376 +registers are used in the function.
377 +
378 +On machines where functions may or may not have frame-pointers, the
379 +function entry code must vary accordingly; it must set up the frame
380 +pointer if one is wanted, and not otherwise. To determine whether a
381 +frame pointer is in wanted, the macro can refer to the variable
382 +frame_pointer_needed. The variable's value will be 1 at run
383 +time in a function that needs a frame pointer. (see Elimination).
384 +
385 +The function entry code is responsible for allocating any stack space
386 +required for the function. This stack space consists of the regions
387 +listed below. In most cases, these regions are allocated in the
388 +order listed, with the last listed region closest to the top of the
389 +stack (the lowest address if STACK_GROWS_DOWNWARD is defined, and
390 +the highest address if it is not defined). You can use a different order
391 +for a machine if doing so is more convenient or required for
392 +compatibility reasons. Except in cases where required by standard
393 +or by a debugger, there is no reason why the stack layout used by GCC
394 +need agree with that used by other compilers for a machine.
395 +*/
396 +
397 +#undef TARGET_ASM_FUNCTION_PROLOGUE
398 +#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue
399 +
400 +#undef TARGET_ASM_FILE_END
401 +#define TARGET_ASM_FILE_END avr32_file_end
402 +
403 +#undef TARGET_DEFAULT_SHORT_ENUMS
404 +#define TARGET_DEFAULT_SHORT_ENUMS hook_bool_void_false
405 +
406 +#undef TARGET_PROMOTE_FUNCTION_ARGS
407 +#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
408 +
409 +#undef TARGET_PROMOTE_FUNCTION_RETURN
410 +#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
411 +
412 +#undef TARGET_PROMOTE_PROTOTYPES
413 +#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
414 +
415 +#undef TARGET_MUST_PASS_IN_STACK
416 +#define TARGET_MUST_PASS_IN_STACK avr32_must_pass_in_stack
417 +
418 +#undef TARGET_PASS_BY_REFERENCE
419 +#define TARGET_PASS_BY_REFERENCE avr32_pass_by_reference
420 +
421 +#undef TARGET_STRICT_ARGUMENT_NAMING
422 +#define TARGET_STRICT_ARGUMENT_NAMING avr32_strict_argument_naming
423 +
424 +#undef TARGET_VECTOR_MODE_SUPPORTED_P
425 +#define TARGET_VECTOR_MODE_SUPPORTED_P avr32_vector_mode_supported
426 +
427 +#undef TARGET_RETURN_IN_MEMORY
428 +#define TARGET_RETURN_IN_MEMORY avr32_return_in_memory
429 +
430 +#undef TARGET_RETURN_IN_MSB
431 +#define TARGET_RETURN_IN_MSB avr32_return_in_msb
432 +
433 +#undef TARGET_ENCODE_SECTION_INFO
434 +#define TARGET_ENCODE_SECTION_INFO avr32_encode_section_info
435 +
436 +#undef TARGET_ARG_PARTIAL_BYTES
437 +#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes
438 +
439 +#undef TARGET_STRIP_NAME_ENCODING
440 +#define TARGET_STRIP_NAME_ENCODING avr32_strip_name_encoding
441 +
442 +#define streq(string1, string2) (strcmp (string1, string2) == 0)
443 +
444 +#undef TARGET_NARROW_VOLATILE_BITFIELD
445 +#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
446 +
447 +#undef TARGET_ATTRIBUTE_TABLE
448 +#define TARGET_ATTRIBUTE_TABLE avr32_attribute_table
449 +
450 +#undef TARGET_COMP_TYPE_ATTRIBUTES
451 +#define TARGET_COMP_TYPE_ATTRIBUTES avr32_comp_type_attributes
452 +
453 +
454 +#undef TARGET_RTX_COSTS
455 +#define TARGET_RTX_COSTS avr32_rtx_costs
456 +
457 +#undef TARGET_CANNOT_FORCE_CONST_MEM
458 +#define TARGET_CANNOT_FORCE_CONST_MEM avr32_cannot_force_const_mem
459 +
460 +#undef TARGET_ASM_INTEGER
461 +#define TARGET_ASM_INTEGER avr32_assemble_integer
462 +
463 +#undef TARGET_FUNCTION_VALUE
464 +#define TARGET_FUNCTION_VALUE avr32_function_value
465 +
466 +#undef TARGET_MIN_ANCHOR_OFFSET
467 +#define TARGET_MIN_ANCHOR_OFFSET (0)
468 +
469 +#undef TARGET_MAX_ANCHOR_OFFSET
470 +#define TARGET_MAX_ANCHOR_OFFSET ((1 << 15) - 1)
471 +#undef TARGET_SECONDARY_RELOAD
472 +#define TARGET_SECONDARY_RELOAD avr32_secondary_reload
473 +
474 +
475 +/*
476 + * Defining the option, -mlist-devices to list the devices supported by gcc.
477 + * This option should be used while printing target-help to list all the
478 + * supported devices.
479 + */
480 +#undef TARGET_HELP
481 +#define TARGET_HELP avr32_target_help
482 +
483 +void avr32_target_help ()
484 +{
485 + if (avr32_list_supported_parts)
486 + {
487 + const struct part_type_s *list;
488 + fprintf (stdout, "List of parts supported by avr32-gcc:\n");
489 + for (list = avr32_part_types; list->name; list++)
490 + {
491 + if (strcmp("none", list->name) != 0)
492 + fprintf (stdout, "%-20s%s\n", list->name, list->macro);
493 + }
494 + fprintf (stdout, "\n\n");
495 + }
496 +}
497 +
498 +enum reg_class
499 +avr32_secondary_reload (bool in_p, rtx x, enum reg_class class,
500 + enum machine_mode mode, secondary_reload_info *sri)
501 +{
502 +
503 + if ( avr32_rmw_memory_operand (x, mode) )
504 + {
505 + if (!in_p)
506 + sri->icode = CODE_FOR_reload_out_rmw_memory_operand;
507 + else
508 + sri->icode = CODE_FOR_reload_in_rmw_memory_operand;
509 + }
510 + return NO_REGS;
511 +
512 +}
513 +/*
514 + * Switches to the appropriate section for output of constant pool
515 + * entry x in mode. You can assume that x is some kind of constant in
516 + * RTL. The argument mode is redundant except in the case of a
517 + * const_int rtx. Select the section by calling readonly_data_ section
518 + * or one of the alternatives for other sections. align is the
519 + * constant alignment in bits.
520 + *
521 + * The default version of this function takes care of putting symbolic
522 + * constants in flag_ pic mode in data_section and everything else in
523 + * readonly_data_section.
524 + */
525 +//#undef TARGET_ASM_SELECT_RTX_SECTION
526 +//#define TARGET_ASM_SELECT_RTX_SECTION avr32_select_rtx_section
527 +
528 +
529 +/*
530 + * If non-null, this hook performs a target-specific pass over the
531 + * instruction stream. The compiler will run it at all optimization
532 + * levels, just before the point at which it normally does
533 + * delayed-branch scheduling.
534 + *
535 + * The exact purpose of the hook varies from target to target. Some
536 + * use it to do transformations that are necessary for correctness,
537 + * such as laying out in-function constant pools or avoiding hardware
538 + * hazards. Others use it as an opportunity to do some
539 + * machine-dependent optimizations.
540 + *
541 + * You need not implement the hook if it has nothing to do. The
542 + * default definition is null.
543 + */
544 +#undef TARGET_MACHINE_DEPENDENT_REORG
545 +#define TARGET_MACHINE_DEPENDENT_REORG avr32_reorg
546 +
547 +/* Target hook for assembling integer objects.
548 + Need to handle integer vectors */
549 +static bool
550 +avr32_assemble_integer (rtx x, unsigned int size, int aligned_p)
551 +{
552 + if (avr32_vector_mode_supported (GET_MODE (x)))
553 + {
554 + int i, units;
555 +
556 + if (GET_CODE (x) != CONST_VECTOR)
557 + abort ();
558 +
559 + units = CONST_VECTOR_NUNITS (x);
560 +
561 + switch (GET_MODE (x))
562 + {
563 + case V2HImode:
564 + size = 2;
565 + break;
566 + case V4QImode:
567 + size = 1;
568 + break;
569 + default:
570 + abort ();
571 + }
572 +
573 + for (i = 0; i < units; i++)
574 + {
575 + rtx elt;
576 +
577 + elt = CONST_VECTOR_ELT (x, i);
578 + assemble_integer (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1);
579 + }
580 +
581 + return true;
582 + }
583 +
584 + return default_assemble_integer (x, size, aligned_p);
585 +}
586 +
587 +
588 +/*
589 + * This target hook describes the relative costs of RTL expressions.
590 + *
591 + * The cost may depend on the precise form of the expression, which is
592 + * available for examination in x, and the rtx code of the expression
593 + * in which it is contained, found in outer_code. code is the
594 + * expression code--redundant, since it can be obtained with GET_CODE
595 + * (x).
596 + *
597 + * In implementing this hook, you can use the construct COSTS_N_INSNS
598 + * (n) to specify a cost equal to n fast instructions.
599 + *
600 + * On entry to the hook, *total contains a default estimate for the
601 + * cost of the expression. The hook should modify this value as
602 + * necessary. Traditionally, the default costs are COSTS_N_INSNS (5)
603 + * for multiplications, COSTS_N_INSNS (7) for division and modulus
604 + * operations, and COSTS_N_INSNS (1) for all other operations.
605 + *
606 + * When optimizing for code size, i.e. when optimize_size is non-zero,
607 + * this target hook should be used to estimate the relative size cost
608 + * of an expression, again relative to COSTS_N_INSNS.
609 + *
610 + * The hook returns true when all subexpressions of x have been
611 + * processed, and false when rtx_cost should recurse.
612 + */
613 +
614 +/* Worker routine for avr32_rtx_costs. */
615 +static inline int
616 +avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED,
617 + enum rtx_code outer ATTRIBUTE_UNUSED)
618 +{
619 + enum machine_mode mode = GET_MODE (x);
620 +
621 + switch (GET_CODE (x))
622 + {
623 + case MEM:
624 + /* Using pre decrement / post increment memory operations on the
625 + avr32_uc architecture means that two writebacks must be performed
626 + and hence two cycles are needed. */
627 + if (!optimize_size
628 + && GET_MODE_SIZE (mode) <= 2 * UNITS_PER_WORD
629 + && TARGET_ARCH_UC
630 + && (GET_CODE (XEXP (x, 0)) == PRE_DEC
631 + || GET_CODE (XEXP (x, 0)) == POST_INC))
632 + return COSTS_N_INSNS (5);
633 +
634 + /* Memory costs quite a lot for the first word, but subsequent words
635 + load at the equivalent of a single insn each. */
636 + if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
637 + return COSTS_N_INSNS (3 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD));
638 +
639 + return COSTS_N_INSNS (4);
640 + case SYMBOL_REF:
641 + case CONST:
642 + /* These are valid for the pseudo insns: lda.w and call which operates
643 + on direct addresses. We assume that the cost of a lda.w is the same
644 + as the cost of a ld.w insn. */
645 + return (outer == SET) ? COSTS_N_INSNS (4) : COSTS_N_INSNS (1);
646 + case DIV:
647 + case MOD:
648 + case UDIV:
649 + case UMOD:
650 + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
651 +
652 + case ROTATE:
653 + case ROTATERT:
654 + if (mode == TImode)
655 + return COSTS_N_INSNS (100);
656 +
657 + if (mode == DImode)
658 + return COSTS_N_INSNS (10);
659 + return COSTS_N_INSNS (4);
660 + case ASHIFT:
661 + case LSHIFTRT:
662 + case ASHIFTRT:
663 + case NOT:
664 + if (mode == TImode)
665 + return COSTS_N_INSNS (10);
666 +
667 + if (mode == DImode)
668 + return COSTS_N_INSNS (4);
669 + return COSTS_N_INSNS (1);
670 + case PLUS:
671 + case MINUS:
672 + case NEG:
673 + case COMPARE:
674 + case ABS:
675 + if (GET_MODE_CLASS (mode) == MODE_FLOAT)
676 + return COSTS_N_INSNS (100);
677 +
678 + if (mode == TImode)
679 + return COSTS_N_INSNS (50);
680 +
681 + if (mode == DImode)
682 + return COSTS_N_INSNS (2);
683 + return COSTS_N_INSNS (1);
684 +
685 + case MULT:
686 + {
687 + if (GET_MODE_CLASS (mode) == MODE_FLOAT)
688 + return COSTS_N_INSNS (300);
689 +
690 + if (mode == TImode)
691 + return COSTS_N_INSNS (16);
692 +
693 + if (mode == DImode)
694 + return COSTS_N_INSNS (4);
695 +
696 + if (mode == HImode)
697 + return COSTS_N_INSNS (2);
698 +
699 + return COSTS_N_INSNS (3);
700 + }
701 + case IF_THEN_ELSE:
702 + if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
703 + return COSTS_N_INSNS (4);
704 + return COSTS_N_INSNS (1);
705 + case SIGN_EXTEND:
706 + case ZERO_EXTEND:
707 + /* Sign/Zero extensions of registers cost quite much since these
708 + instrcutions only take one register operand which means that gcc
709 + often must insert some move instrcutions */
710 + if (mode == QImode || mode == HImode)
711 + return (COSTS_N_INSNS (GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1));
712 + return COSTS_N_INSNS (4);
713 + case UNSPEC:
714 + /* divmod operations */
715 + if (XINT (x, 1) == UNSPEC_UDIVMODSI4_INTERNAL
716 + || XINT (x, 1) == UNSPEC_DIVMODSI4_INTERNAL)
717 + {
718 + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS (16);
719 + }
720 + /* Fallthrough */
721 + default:
722 + return COSTS_N_INSNS (1);
723 + }
724 +}
725 +
726 +
727 +static bool
728 +avr32_rtx_costs (rtx x, int code, int outer_code, int *total)
729 +{
730 + *total = avr32_rtx_costs_1 (x, code, outer_code);
731 + return true;
732 +}
733 +
734 +
735 +bool
736 +avr32_cannot_force_const_mem (rtx x ATTRIBUTE_UNUSED)
737 +{
738 + /* Do not want symbols in the constant pool when compiling pic or if using
739 + address pseudo instructions. */
740 + return ((flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS)
741 + && avr32_find_symbol (x) != NULL_RTX);
742 +}
743 +
744 +
745 +/* Table of machine attributes. */
746 +const struct attribute_spec avr32_attribute_table[] = {
747 + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
748 + /* Interrupt Service Routines have special prologue and epilogue
749 + requirements. */
750 + {"isr", 0, 1, false, false, false, avr32_handle_isr_attribute},
751 + {"interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute},
752 + {"acall", 0, 1, false, true, true, avr32_handle_acall_attribute},
753 + {"naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute},
754 + {"rmw_addressable", 0, 0, true, false, false, NULL},
755 + {"flashvault", 0, 1, true, false, false, avr32_handle_fndecl_attribute},
756 + {"flashvault_impl", 0, 1, true, false, false, avr32_handle_fndecl_attribute},
757 + {NULL, 0, 0, false, false, false, NULL}
758 +};
759 +
760 +
761 +typedef struct
762 +{
763 + const char *const arg;
764 + const unsigned long return_value;
765 +}
766 +isr_attribute_arg;
767 +
768 +
769 +static const isr_attribute_arg isr_attribute_args[] = {
770 + {"FULL", AVR32_FT_ISR_FULL},
771 + {"full", AVR32_FT_ISR_FULL},
772 + {"HALF", AVR32_FT_ISR_HALF},
773 + {"half", AVR32_FT_ISR_HALF},
774 + {"NONE", AVR32_FT_ISR_NONE},
775 + {"none", AVR32_FT_ISR_NONE},
776 + {"UNDEF", AVR32_FT_ISR_NONE},
777 + {"undef", AVR32_FT_ISR_NONE},
778 + {"SWI", AVR32_FT_ISR_NONE},
779 + {"swi", AVR32_FT_ISR_NONE},
780 + {NULL, AVR32_FT_ISR_NONE}
781 +};
782 +
783 +
784 +/* Returns the (interrupt) function type of the current
785 + function, or AVR32_FT_UNKNOWN if the type cannot be determined. */
786 +static unsigned long
787 +avr32_isr_value (tree argument)
788 +{
789 + const isr_attribute_arg *ptr;
790 + const char *arg;
791 +
792 + /* No argument - default to ISR_NONE. */
793 + if (argument == NULL_TREE)
794 + return AVR32_FT_ISR_NONE;
795 +
796 + /* Get the value of the argument. */
797 + if (TREE_VALUE (argument) == NULL_TREE
798 + || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
799 + return AVR32_FT_UNKNOWN;
800 +
801 + arg = TREE_STRING_POINTER (TREE_VALUE (argument));
802 +
803 + /* Check it against the list of known arguments. */
804 + for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
805 + if (streq (arg, ptr->arg))
806 + return ptr->return_value;
807 +
808 + /* An unrecognized interrupt type. */
809 + return AVR32_FT_UNKNOWN;
810 +}
811 +
812 +
813 +/*
814 +These hooks specify assembly directives for creating certain kinds
815 +of integer object. The TARGET_ASM_BYTE_OP directive creates a
816 +byte-sized object, the TARGET_ASM_ALIGNED_HI_OP one creates an
817 +aligned two-byte object, and so on. Any of the hooks may be
818 +NULL, indicating that no suitable directive is available.
819 +
820 +The compiler will print these strings at the start of a new line,
821 +followed immediately by the object's initial value. In most cases,
822 +the string should contain a tab, a pseudo-op, and then another tab.
823 +*/
824 +#undef TARGET_ASM_BYTE_OP
825 +#define TARGET_ASM_BYTE_OP "\t.byte\t"
826 +#undef TARGET_ASM_ALIGNED_HI_OP
827 +#define TARGET_ASM_ALIGNED_HI_OP "\t.align 1\n\t.short\t"
828 +#undef TARGET_ASM_ALIGNED_SI_OP
829 +#define TARGET_ASM_ALIGNED_SI_OP "\t.align 2\n\t.int\t"
830 +#undef TARGET_ASM_ALIGNED_DI_OP
831 +#define TARGET_ASM_ALIGNED_DI_OP NULL
832 +#undef TARGET_ASM_ALIGNED_TI_OP
833 +#define TARGET_ASM_ALIGNED_TI_OP NULL
834 +#undef TARGET_ASM_UNALIGNED_HI_OP
835 +#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
836 +#undef TARGET_ASM_UNALIGNED_SI_OP
837 +#define TARGET_ASM_UNALIGNED_SI_OP "\t.int\t"
838 +#undef TARGET_ASM_UNALIGNED_DI_OP
839 +#define TARGET_ASM_UNALIGNED_DI_OP NULL
840 +#undef TARGET_ASM_UNALIGNED_TI_OP
841 +#define TARGET_ASM_UNALIGNED_TI_OP NULL
842 +
843 +#undef TARGET_ASM_OUTPUT_MI_THUNK
844 +#define TARGET_ASM_OUTPUT_MI_THUNK avr32_output_mi_thunk
845 +
846 +#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
847 +#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
848 +
849 +
850 +static void
851 +avr32_output_mi_thunk (FILE * file,
852 + tree thunk ATTRIBUTE_UNUSED,
853 + HOST_WIDE_INT delta,
854 + HOST_WIDE_INT vcall_offset, tree function)
855 + {
856 + int mi_delta = delta;
857 + int this_regno =
858 + (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function) ?
859 + INTERNAL_REGNUM (11) : INTERNAL_REGNUM (12));
860 +
861 +
862 + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
863 + || vcall_offset)
864 + {
865 + fputs ("\tpushm\tlr\n", file);
866 + }
867 +
868 +
869 + if (mi_delta != 0)
870 + {
871 + if (avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21"))
872 + {
873 + fprintf (file, "\tsub\t%s, %d\n", reg_names[this_regno], -mi_delta);
874 + }
875 + else
876 + {
877 + /* Immediate is larger than k21 we must make us a temp register by
878 + pushing a register to the stack. */
879 + fprintf (file, "\tmov\tlr, lo(%d)\n", mi_delta);
880 + fprintf (file, "\torh\tlr, hi(%d)\n", mi_delta);
881 + fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
882 + }
883 + }
884 +
885 +
886 + if (vcall_offset != 0)
887 + {
888 + fprintf (file, "\tld.w\tlr, %s[0]\n", reg_names[this_regno]);
889 + fprintf (file, "\tld.w\tlr, lr[%i]\n", (int) vcall_offset);
890 + fprintf (file, "\tadd\t%s, lr\n", reg_names[this_regno]);
891 + }
892 +
893 +
894 + if (!avr32_const_ok_for_constraint_p (mi_delta, 'I', "Is21")
895 + || vcall_offset)
896 + {
897 + fputs ("\tpopm\tlr\n", file);
898 + }
899 +
900 + /* Jump to the function. We assume that we can use an rjmp since the
901 + function to jump to is local and probably not too far away from
902 + the thunk. If this assumption proves to be wrong we could implement
903 + this jump by calculating the offset between the jump source and destination
904 + and put this in the constant pool and then perform an add to pc.
905 + This would also be legitimate PIC code. But for now we hope that an rjmp
906 + will be sufficient...
907 + */
908 + fputs ("\trjmp\t", file);
909 + assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
910 + fputc ('\n', file);
911 + }
912 +
913 +
914 +/* Implements target hook vector_mode_supported. */
915 +bool
916 +avr32_vector_mode_supported (enum machine_mode mode)
917 +{
918 + if ((mode == V2HImode) || (mode == V4QImode))
919 + return true;
920 +
921 + return false;
922 +}
923 +
924 +
925 +#undef TARGET_INIT_LIBFUNCS
926 +#define TARGET_INIT_LIBFUNCS avr32_init_libfuncs
927 +
928 +#undef TARGET_INIT_BUILTINS
929 +#define TARGET_INIT_BUILTINS avr32_init_builtins
930 +
931 +#undef TARGET_EXPAND_BUILTIN
932 +#define TARGET_EXPAND_BUILTIN avr32_expand_builtin
933 +
934 +tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int,
935 + void_ftype_ptr_int;
936 +tree void_ftype_int, void_ftype_ulong, void_ftype_void, int_ftype_ptr_int;
937 +tree short_ftype_short, int_ftype_int_short, int_ftype_short_short,
938 + short_ftype_short_short;
939 +tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short;
940 +tree void_ftype_int_int_int_int_int, void_ftype_int_int_int;
941 +tree longlong_ftype_int_int, void_ftype_int_int_longlong;
942 +tree int_ftype_int_int_int, longlong_ftype_longlong_int_short;
943 +tree longlong_ftype_longlong_short_short, int_ftype_int_short_short;
944 +
945 +#define def_builtin(NAME, TYPE, CODE) \
946 + add_builtin_function ((NAME), (TYPE), (CODE), \
947 + BUILT_IN_MD, NULL, NULL_TREE)
948 +
949 +#define def_mbuiltin(MASK, NAME, TYPE, CODE) \
950 + do \
951 + { \
952 + if ((MASK)) \
953 + add_builtin_function ((NAME), (TYPE), (CODE), \
954 + BUILT_IN_MD, NULL, NULL_TREE); \
955 + } \
956 + while (0)
957 +
958 +struct builtin_description
959 +{
960 + const unsigned int mask;
961 + const enum insn_code icode;
962 + const char *const name;
963 + const int code;
964 + const enum rtx_code comparison;
965 + const unsigned int flag;
966 + const tree *ftype;
967 +};
968 +
969 +static const struct builtin_description bdesc_2arg[] = {
970 +
971 +#define DSP_BUILTIN(code, builtin, ftype) \
972 + { 1, CODE_FOR_##code, "__builtin_" #code , \
973 + AVR32_BUILTIN_##builtin, 0, 0, ftype }
974 +
975 + DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short),
976 + DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short),
977 + DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short),
978 + DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short),
979 + DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short),
980 + DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short),
981 + DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short),
982 + DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int),
983 + DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int),
984 + DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short),
985 + DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short)
986 +};
987 +
988 +
989 +void
990 +avr32_init_builtins (void)
991 +{
992 + unsigned int i;
993 + const struct builtin_description *d;
994 + tree endlink = void_list_node;
995 + tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
996 + tree longlong_endlink =
997 + tree_cons (NULL_TREE, long_long_integer_type_node, endlink);
998 + tree short_endlink =
999 + tree_cons (NULL_TREE, short_integer_type_node, endlink);
1000 + tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink);
1001 +
1002 + /* int func (int) */
1003 + int_ftype_int = build_function_type (integer_type_node, int_endlink);
1004 +
1005 + /* short func (short) */
1006 + short_ftype_short
1007 + = build_function_type (short_integer_type_node, short_endlink);
1008 +
1009 + /* short func (short, short) */
1010 + short_ftype_short_short
1011 + = build_function_type (short_integer_type_node,
1012 + tree_cons (NULL_TREE, short_integer_type_node,
1013 + short_endlink));
1014 +
1015 + /* long long func (long long, short, short) */
1016 + longlong_ftype_longlong_short_short
1017 + = build_function_type (long_long_integer_type_node,
1018 + tree_cons (NULL_TREE, long_long_integer_type_node,
1019 + tree_cons (NULL_TREE,
1020 + short_integer_type_node,
1021 + short_endlink)));
1022 +
1023 + /* long long func (short, short) */
1024 + longlong_ftype_short_short
1025 + = build_function_type (long_long_integer_type_node,
1026 + tree_cons (NULL_TREE, short_integer_type_node,
1027 + short_endlink));
1028 +
1029 + /* int func (int, int) */
1030 + int_ftype_int_int
1031 + = build_function_type (integer_type_node,
1032 + tree_cons (NULL_TREE, integer_type_node,
1033 + int_endlink));
1034 +
1035 + /* long long func (int, int) */
1036 + longlong_ftype_int_int
1037 + = build_function_type (long_long_integer_type_node,
1038 + tree_cons (NULL_TREE, integer_type_node,
1039 + int_endlink));
1040 +
1041 + /* long long int func (long long, int, short) */
1042 + longlong_ftype_longlong_int_short
1043 + = build_function_type (long_long_integer_type_node,
1044 + tree_cons (NULL_TREE, long_long_integer_type_node,
1045 + tree_cons (NULL_TREE, integer_type_node,
1046 + short_endlink)));
1047 +
1048 + /* long long int func (int, short) */
1049 + longlong_ftype_int_short
1050 + = build_function_type (long_long_integer_type_node,
1051 + tree_cons (NULL_TREE, integer_type_node,
1052 + short_endlink));
1053 +
1054 + /* int func (int, short, short) */
1055 + int_ftype_int_short_short
1056 + = build_function_type (integer_type_node,
1057 + tree_cons (NULL_TREE, integer_type_node,
1058 + tree_cons (NULL_TREE,
1059 + short_integer_type_node,
1060 + short_endlink)));
1061 +
1062 + /* int func (short, short) */
1063 + int_ftype_short_short
1064 + = build_function_type (integer_type_node,
1065 + tree_cons (NULL_TREE, short_integer_type_node,
1066 + short_endlink));
1067 +
1068 + /* int func (int, short) */
1069 + int_ftype_int_short
1070 + = build_function_type (integer_type_node,
1071 + tree_cons (NULL_TREE, integer_type_node,
1072 + short_endlink));
1073 +
1074 + /* void func (int, int) */
1075 + void_ftype_int_int
1076 + = build_function_type (void_type_node,
1077 + tree_cons (NULL_TREE, integer_type_node,
1078 + int_endlink));
1079 +
1080 + /* void func (int, int, int) */
1081 + void_ftype_int_int_int
1082 + = build_function_type (void_type_node,
1083 + tree_cons (NULL_TREE, integer_type_node,
1084 + tree_cons (NULL_TREE, integer_type_node,
1085 + int_endlink)));
1086 +
1087 + /* void func (int, int, long long) */
1088 + void_ftype_int_int_longlong
1089 + = build_function_type (void_type_node,
1090 + tree_cons (NULL_TREE, integer_type_node,
1091 + tree_cons (NULL_TREE, integer_type_node,
1092 + longlong_endlink)));
1093 +
1094 + /* void func (int, int, int, int, int) */
1095 + void_ftype_int_int_int_int_int
1096 + = build_function_type (void_type_node,
1097 + tree_cons (NULL_TREE, integer_type_node,
1098 + tree_cons (NULL_TREE, integer_type_node,
1099 + tree_cons (NULL_TREE,
1100 + integer_type_node,
1101 + tree_cons
1102 + (NULL_TREE,
1103 + integer_type_node,
1104 + int_endlink)))));
1105 +
1106 + /* void func (void *, int) */
1107 + void_ftype_ptr_int
1108 + = build_function_type (void_type_node,
1109 + tree_cons (NULL_TREE, ptr_type_node, int_endlink));
1110 +
1111 + /* void func (int) */
1112 + void_ftype_int = build_function_type (void_type_node, int_endlink);
1113 +
1114 + /* void func (ulong) */
1115 + void_ftype_ulong = build_function_type_list (void_type_node,
1116 + long_unsigned_type_node, NULL_TREE);
1117 +
1118 + /* void func (void) */
1119 + void_ftype_void = build_function_type (void_type_node, void_endlink);
1120 +
1121 + /* int func (void) */
1122 + int_ftype_void = build_function_type (integer_type_node, void_endlink);
1123 +
1124 + /* int func (void *, int) */
1125 + int_ftype_ptr_int
1126 + = build_function_type (integer_type_node,
1127 + tree_cons (NULL_TREE, ptr_type_node, int_endlink));
1128 +
1129 + /* int func (int, int, int) */
1130 + int_ftype_int_int_int
1131 + = build_function_type (integer_type_node,
1132 + tree_cons (NULL_TREE, integer_type_node,
1133 + tree_cons (NULL_TREE, integer_type_node,
1134 + int_endlink)));
1135 +
1136 + /* Initialize avr32 builtins. */
1137 + def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR);
1138 + def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR);
1139 + def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR);
1140 + def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR);
1141 + def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE);
1142 + def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC);
1143 + def_builtin ("__builtin_ssrf", void_ftype_int, AVR32_BUILTIN_SSRF);
1144 + def_builtin ("__builtin_csrf", void_ftype_int, AVR32_BUILTIN_CSRF);
1145 + def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR);
1146 + def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS);
1147 + def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW);
1148 + def_builtin ("__builtin_breakpoint", void_ftype_void,
1149 + AVR32_BUILTIN_BREAKPOINT);
1150 + def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG);
1151 + def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI);
1152 + def_builtin ("__builtin_bswap_16", short_ftype_short,
1153 + AVR32_BUILTIN_BSWAP16);
1154 + def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32);
1155 + def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int,
1156 + AVR32_BUILTIN_COP);
1157 + def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W);
1158 + def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int,
1159 + AVR32_BUILTIN_MVRC_W);
1160 + def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int,
1161 + AVR32_BUILTIN_MVCR_D);
1162 + def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong,
1163 + AVR32_BUILTIN_MVRC_D);
1164 + def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS);
1165 + def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU);
1166 + def_builtin ("__builtin_satrnds", int_ftype_int_int_int,
1167 + AVR32_BUILTIN_SATRNDS);
1168 + def_builtin ("__builtin_satrndu", int_ftype_int_int_int,
1169 + AVR32_BUILTIN_SATRNDU);
1170 + def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR);
1171 + def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR);
1172 + def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short,
1173 + AVR32_BUILTIN_MACSATHH_W);
1174 + def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short,
1175 + AVR32_BUILTIN_MACWH_D);
1176 + def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short,
1177 + AVR32_BUILTIN_MACHH_D);
1178 + def_builtin ("__builtin_mems", void_ftype_ptr_int, AVR32_BUILTIN_MEMS);
1179 + def_builtin ("__builtin_memt", void_ftype_ptr_int, AVR32_BUILTIN_MEMT);
1180 + def_builtin ("__builtin_memc", void_ftype_ptr_int, AVR32_BUILTIN_MEMC);
1181 + def_builtin ("__builtin_sleep", void_ftype_int, AVR32_BUILTIN_SLEEP);
1182 + def_builtin ("__builtin_avr32_delay_cycles", void_ftype_int, AVR32_BUILTIN_DELAY_CYCLES);
1183 +
1184 + /* Add all builtins that are more or less simple operations on two
1185 + operands. */
1186 + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
1187 + {
1188 + /* Use one of the operands; the target can have a different mode for
1189 + mask-generating compares. */
1190 +
1191 + if (d->name == 0)
1192 + continue;
1193 +
1194 + def_mbuiltin (d->mask, d->name, *(d->ftype), d->code);
1195 + }
1196 +}
1197 +
1198 +
1199 +/* Subroutine of avr32_expand_builtin to take care of binop insns. */
1200 +static rtx
1201 +avr32_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
1202 +{
1203 + rtx pat;
1204 + tree arg0 = CALL_EXPR_ARG (exp,0);
1205 + tree arg1 = CALL_EXPR_ARG (exp,1);
1206 + rtx op0 = expand_normal (arg0);
1207 + rtx op1 = expand_normal (arg1);
1208 + enum machine_mode tmode = insn_data[icode].operand[0].mode;
1209 + enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1210 + enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1211 +
1212 + if (!target
1213 + || GET_MODE (target) != tmode
1214 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1215 + target = gen_reg_rtx (tmode);
1216 +
1217 + /* In case the insn wants input operands in modes different from the
1218 + result, abort. */
1219 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1220 + {
1221 + /* If op0 is already a reg we must cast it to the correct mode. */
1222 + if (REG_P (op0))
1223 + op0 = convert_to_mode (mode0, op0, 1);
1224 + else
1225 + op0 = copy_to_mode_reg (mode0, op0);
1226 + }
1227 + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1228 + {
1229 + /* If op1 is already a reg we must cast it to the correct mode. */
1230 + if (REG_P (op1))
1231 + op1 = convert_to_mode (mode1, op1, 1);
1232 + else
1233 + op1 = copy_to_mode_reg (mode1, op1);
1234 + }
1235 + pat = GEN_FCN (icode) (target, op0, op1);
1236 + if (!pat)
1237 + return 0;
1238 + emit_insn (pat);
1239 + return target;
1240 +}
1241 +
1242 +
1243 +/* Expand an expression EXP that calls a built-in function,
1244 + with result going to TARGET if that's convenient
1245 + (and in mode MODE if that's convenient).
1246 + SUBTARGET may be used as the target for computing one of EXP's operands.
1247 + IGNORE is nonzero if the value is to be ignored. */
1248 +rtx
1249 +avr32_expand_builtin (tree exp,
1250 + rtx target,
1251 + rtx subtarget ATTRIBUTE_UNUSED,
1252 + enum machine_mode mode ATTRIBUTE_UNUSED,
1253 + int ignore ATTRIBUTE_UNUSED)
1254 +{
1255 + const struct builtin_description *d;
1256 + unsigned int i;
1257 + enum insn_code icode = 0;
1258 + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
1259 + tree arg0, arg1, arg2;
1260 + rtx op0, op1, op2, pat;
1261 + enum machine_mode tmode, mode0, mode1;
1262 + enum machine_mode arg0_mode;
1263 + int fcode = DECL_FUNCTION_CODE (fndecl);
1264 +
1265 + switch (fcode)
1266 + {
1267 + default:
1268 + break;
1269 +
1270 + case AVR32_BUILTIN_SATS:
1271 + case AVR32_BUILTIN_SATU:
1272 + case AVR32_BUILTIN_SATRNDS:
1273 + case AVR32_BUILTIN_SATRNDU:
1274 + {
1275 + const char *fname;
1276 + switch (fcode)
1277 + {
1278 + default:
1279 + case AVR32_BUILTIN_SATS:
1280 + icode = CODE_FOR_sats;
1281 + fname = "sats";
1282 + break;
1283 + case AVR32_BUILTIN_SATU:
1284 + icode = CODE_FOR_satu;
1285 + fname = "satu";
1286 + break;
1287 + case AVR32_BUILTIN_SATRNDS:
1288 + icode = CODE_FOR_satrnds;
1289 + fname = "satrnds";
1290 + break;
1291 + case AVR32_BUILTIN_SATRNDU:
1292 + icode = CODE_FOR_satrndu;
1293 + fname = "satrndu";
1294 + break;
1295 + }
1296 +
1297 + arg0 = CALL_EXPR_ARG (exp,0);
1298 + arg1 = CALL_EXPR_ARG (exp,1);
1299 + arg2 = CALL_EXPR_ARG (exp,2);
1300 + op0 = expand_normal (arg0);
1301 + op1 = expand_normal (arg1);
1302 + op2 = expand_normal (arg2);
1303 +
1304 + tmode = insn_data[icode].operand[0].mode;
1305 +
1306 +
1307 + if (target == 0
1308 + || GET_MODE (target) != tmode
1309 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1310 + target = gen_reg_rtx (tmode);
1311 +
1312 +
1313 + if (!(*insn_data[icode].operand[0].predicate) (op0, GET_MODE (op0)))
1314 + {
1315 + op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0);
1316 + }
1317 +
1318 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1319 + {
1320 + error ("Parameter 2 to __builtin_%s should be a constant number.",
1321 + fname);
1322 + return NULL_RTX;
1323 + }
1324 +
1325 + if (!(*insn_data[icode].operand[1].predicate) (op2, SImode))
1326 + {
1327 + error ("Parameter 3 to __builtin_%s should be a constant number.",
1328 + fname);
1329 + return NULL_RTX;
1330 + }
1331 +
1332 + emit_move_insn (target, op0);
1333 + pat = GEN_FCN (icode) (target, op1, op2);
1334 + if (!pat)
1335 + return 0;
1336 + emit_insn (pat);
1337 +
1338 + return target;
1339 + }
1340 + case AVR32_BUILTIN_MUSTR:
1341 + icode = CODE_FOR_mustr;
1342 + tmode = insn_data[icode].operand[0].mode;
1343 +
1344 + if (target == 0
1345 + || GET_MODE (target) != tmode
1346 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1347 + target = gen_reg_rtx (tmode);
1348 + pat = GEN_FCN (icode) (target);
1349 + if (!pat)
1350 + return 0;
1351 + emit_insn (pat);
1352 + return target;
1353 +
1354 + case AVR32_BUILTIN_MFSR:
1355 + icode = CODE_FOR_mfsr;
1356 + arg0 = CALL_EXPR_ARG (exp,0);
1357 + op0 = expand_normal (arg0);
1358 + tmode = insn_data[icode].operand[0].mode;
1359 + mode0 = insn_data[icode].operand[1].mode;
1360 +
1361 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1362 + {
1363 + error ("Parameter 1 to __builtin_mfsr must be a constant number");
1364 + }
1365 +
1366 + if (target == 0
1367 + || GET_MODE (target) != tmode
1368 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1369 + target = gen_reg_rtx (tmode);
1370 + pat = GEN_FCN (icode) (target, op0);
1371 + if (!pat)
1372 + return 0;
1373 + emit_insn (pat);
1374 + return target;
1375 + case AVR32_BUILTIN_MTSR:
1376 + icode = CODE_FOR_mtsr;
1377 + arg0 = CALL_EXPR_ARG (exp,0);
1378 + arg1 = CALL_EXPR_ARG (exp,1);
1379 + op0 = expand_normal (arg0);
1380 + op1 = expand_normal (arg1);
1381 + mode0 = insn_data[icode].operand[0].mode;
1382 + mode1 = insn_data[icode].operand[1].mode;
1383 +
1384 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1385 + {
1386 + error ("Parameter 1 to __builtin_mtsr must be a constant number");
1387 + return gen_reg_rtx (mode0);
1388 + }
1389 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1390 + op1 = copy_to_mode_reg (mode1, op1);
1391 + pat = GEN_FCN (icode) (op0, op1);
1392 + if (!pat)
1393 + return 0;
1394 + emit_insn (pat);
1395 + return NULL_RTX;
1396 + case AVR32_BUILTIN_MFDR:
1397 + icode = CODE_FOR_mfdr;
1398 + arg0 = CALL_EXPR_ARG (exp,0);
1399 + op0 = expand_normal (arg0);
1400 + tmode = insn_data[icode].operand[0].mode;
1401 + mode0 = insn_data[icode].operand[1].mode;
1402 +
1403 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1404 + {
1405 + error ("Parameter 1 to __builtin_mfdr must be a constant number");
1406 + }
1407 +
1408 + if (target == 0
1409 + || GET_MODE (target) != tmode
1410 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1411 + target = gen_reg_rtx (tmode);
1412 + pat = GEN_FCN (icode) (target, op0);
1413 + if (!pat)
1414 + return 0;
1415 + emit_insn (pat);
1416 + return target;
1417 + case AVR32_BUILTIN_MTDR:
1418 + icode = CODE_FOR_mtdr;
1419 + arg0 = CALL_EXPR_ARG (exp,0);
1420 + arg1 = CALL_EXPR_ARG (exp,1);
1421 + op0 = expand_normal (arg0);
1422 + op1 = expand_normal (arg1);
1423 + mode0 = insn_data[icode].operand[0].mode;
1424 + mode1 = insn_data[icode].operand[1].mode;
1425 +
1426 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1427 + {
1428 + error ("Parameter 1 to __builtin_mtdr must be a constant number");
1429 + return gen_reg_rtx (mode0);
1430 + }
1431 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1432 + op1 = copy_to_mode_reg (mode1, op1);
1433 + pat = GEN_FCN (icode) (op0, op1);
1434 + if (!pat)
1435 + return 0;
1436 + emit_insn (pat);
1437 + return NULL_RTX;
1438 + case AVR32_BUILTIN_CACHE:
1439 + icode = CODE_FOR_cache;
1440 + arg0 = CALL_EXPR_ARG (exp,0);
1441 + arg1 = CALL_EXPR_ARG (exp,1);
1442 + op0 = expand_normal (arg0);
1443 + op1 = expand_normal (arg1);
1444 + mode0 = insn_data[icode].operand[0].mode;
1445 + mode1 = insn_data[icode].operand[1].mode;
1446 +
1447 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode1))
1448 + {
1449 + error ("Parameter 2 to __builtin_cache must be a constant number");
1450 + return gen_reg_rtx (mode1);
1451 + }
1452 +
1453 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1454 + op0 = copy_to_mode_reg (mode0, op0);
1455 +
1456 + pat = GEN_FCN (icode) (op0, op1);
1457 + if (!pat)
1458 + return 0;
1459 + emit_insn (pat);
1460 + return NULL_RTX;
1461 + case AVR32_BUILTIN_SYNC:
1462 + case AVR32_BUILTIN_MUSFR:
1463 + case AVR32_BUILTIN_SSRF:
1464 + case AVR32_BUILTIN_CSRF:
1465 + {
1466 + const char *fname;
1467 + switch (fcode)
1468 + {
1469 + default:
1470 + case AVR32_BUILTIN_SYNC:
1471 + icode = CODE_FOR_sync;
1472 + fname = "sync";
1473 + break;
1474 + case AVR32_BUILTIN_MUSFR:
1475 + icode = CODE_FOR_musfr;
1476 + fname = "musfr";
1477 + break;
1478 + case AVR32_BUILTIN_SSRF:
1479 + icode = CODE_FOR_ssrf;
1480 + fname = "ssrf";
1481 + break;
1482 + case AVR32_BUILTIN_CSRF:
1483 + icode = CODE_FOR_csrf;
1484 + fname = "csrf";
1485 + break;
1486 + }
1487 +
1488 + arg0 = CALL_EXPR_ARG (exp,0);
1489 + op0 = expand_normal (arg0);
1490 + mode0 = insn_data[icode].operand[0].mode;
1491 +
1492 + if (!(*insn_data[icode].operand[0].predicate) (op0, mode0))
1493 + {
1494 + if (icode == CODE_FOR_musfr)
1495 + op0 = copy_to_mode_reg (mode0, op0);
1496 + else
1497 + {
1498 + error ("Parameter to __builtin_%s is illegal.", fname);
1499 + return gen_reg_rtx (mode0);
1500 + }
1501 + }
1502 + pat = GEN_FCN (icode) (op0);
1503 + if (!pat)
1504 + return 0;
1505 + emit_insn (pat);
1506 + return NULL_RTX;
1507 + }
1508 + case AVR32_BUILTIN_TLBR:
1509 + icode = CODE_FOR_tlbr;
1510 + pat = GEN_FCN (icode) (NULL_RTX);
1511 + if (!pat)
1512 + return 0;
1513 + emit_insn (pat);
1514 + return NULL_RTX;
1515 + case AVR32_BUILTIN_TLBS:
1516 + icode = CODE_FOR_tlbs;
1517 + pat = GEN_FCN (icode) (NULL_RTX);
1518 + if (!pat)
1519 + return 0;
1520 + emit_insn (pat);
1521 + return NULL_RTX;
1522 + case AVR32_BUILTIN_TLBW:
1523 + icode = CODE_FOR_tlbw;
1524 + pat = GEN_FCN (icode) (NULL_RTX);
1525 + if (!pat)
1526 + return 0;
1527 + emit_insn (pat);
1528 + return NULL_RTX;
1529 + case AVR32_BUILTIN_BREAKPOINT:
1530 + icode = CODE_FOR_breakpoint;
1531 + pat = GEN_FCN (icode) (NULL_RTX);
1532 + if (!pat)
1533 + return 0;
1534 + emit_insn (pat);
1535 + return NULL_RTX;
1536 + case AVR32_BUILTIN_XCHG:
1537 + icode = CODE_FOR_sync_lock_test_and_setsi;
1538 + arg0 = CALL_EXPR_ARG (exp,0);
1539 + arg1 = CALL_EXPR_ARG (exp,1);
1540 + op0 = expand_normal (arg0);
1541 + op1 = expand_normal (arg1);
1542 + tmode = insn_data[icode].operand[0].mode;
1543 + mode0 = insn_data[icode].operand[1].mode;
1544 + mode1 = insn_data[icode].operand[2].mode;
1545 +
1546 + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1547 + {
1548 + op1 = copy_to_mode_reg (mode1, op1);
1549 + }
1550 +
1551 + op0 = force_reg (GET_MODE (op0), op0);
1552 + op0 = gen_rtx_MEM (GET_MODE (op0), op0);
1553 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1554 + {
1555 + error
1556 + ("Parameter 1 to __builtin_xchg must be a pointer to an integer.");
1557 + }
1558 +
1559 + if (target == 0
1560 + || GET_MODE (target) != tmode
1561 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1562 + target = gen_reg_rtx (tmode);
1563 + pat = GEN_FCN (icode) (target, op0, op1);
1564 + if (!pat)
1565 + return 0;
1566 + emit_insn (pat);
1567 + return target;
1568 + case AVR32_BUILTIN_LDXI:
1569 + icode = CODE_FOR_ldxi;
1570 + arg0 = CALL_EXPR_ARG (exp,0);
1571 + arg1 = CALL_EXPR_ARG (exp,1);
1572 + arg2 = CALL_EXPR_ARG (exp,2);
1573 + op0 = expand_normal (arg0);
1574 + op1 = expand_normal (arg1);
1575 + op2 = expand_normal (arg2);
1576 + tmode = insn_data[icode].operand[0].mode;
1577 + mode0 = insn_data[icode].operand[1].mode;
1578 + mode1 = insn_data[icode].operand[2].mode;
1579 +
1580 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1581 + {
1582 + op0 = copy_to_mode_reg (mode0, op0);
1583 + }
1584 +
1585 + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
1586 + {
1587 + op1 = copy_to_mode_reg (mode1, op1);
1588 + }
1589 +
1590 + if (!(*insn_data[icode].operand[3].predicate) (op2, SImode))
1591 + {
1592 + error
1593 + ("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)");
1594 + return gen_reg_rtx (mode0);
1595 + }
1596 +
1597 + if (target == 0
1598 + || GET_MODE (target) != tmode
1599 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1600 + target = gen_reg_rtx (tmode);
1601 + pat = GEN_FCN (icode) (target, op0, op1, op2);
1602 + if (!pat)
1603 + return 0;
1604 + emit_insn (pat);
1605 + return target;
1606 + case AVR32_BUILTIN_BSWAP16:
1607 + {
1608 + icode = CODE_FOR_bswap_16;
1609 + arg0 = CALL_EXPR_ARG (exp,0);
1610 + arg0_mode = TYPE_MODE (TREE_TYPE (arg0));
1611 + mode0 = insn_data[icode].operand[1].mode;
1612 + if (arg0_mode != mode0)
1613 + arg0 = build1 (NOP_EXPR,
1614 + (*lang_hooks.types.type_for_mode) (mode0, 0), arg0);
1615 +
1616 + op0 = expand_expr (arg0, NULL_RTX, HImode, 0);
1617 + tmode = insn_data[icode].operand[0].mode;
1618 +
1619 +
1620 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1621 + {
1622 + if ( CONST_INT_P (op0) )
1623 + {
1624 + HOST_WIDE_INT val = ( ((INTVAL (op0)&0x00ff) << 8) |
1625 + ((INTVAL (op0)&0xff00) >> 8) );
1626 + /* Sign extend 16-bit value to host wide int */
1627 + val <<= (HOST_BITS_PER_WIDE_INT - 16);
1628 + val >>= (HOST_BITS_PER_WIDE_INT - 16);
1629 + op0 = GEN_INT(val);
1630 + if (target == 0
1631 + || GET_MODE (target) != tmode
1632 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1633 + target = gen_reg_rtx (tmode);
1634 + emit_move_insn(target, op0);
1635 + return target;
1636 + }
1637 + else
1638 + op0 = copy_to_mode_reg (mode0, op0);
1639 + }
1640 +
1641 + if (target == 0
1642 + || GET_MODE (target) != tmode
1643 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1644 + {
1645 + target = gen_reg_rtx (tmode);
1646 + }
1647 +
1648 +
1649 + pat = GEN_FCN (icode) (target, op0);
1650 + if (!pat)
1651 + return 0;
1652 + emit_insn (pat);
1653 +
1654 + return target;
1655 + }
1656 + case AVR32_BUILTIN_BSWAP32:
1657 + {
1658 + icode = CODE_FOR_bswap_32;
1659 + arg0 = CALL_EXPR_ARG (exp,0);
1660 + op0 = expand_normal (arg0);
1661 + tmode = insn_data[icode].operand[0].mode;
1662 + mode0 = insn_data[icode].operand[1].mode;
1663 +
1664 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1665 + {
1666 + if ( CONST_INT_P (op0) )
1667 + {
1668 + HOST_WIDE_INT val = ( ((INTVAL (op0)&0x000000ff) << 24) |
1669 + ((INTVAL (op0)&0x0000ff00) << 8) |
1670 + ((INTVAL (op0)&0x00ff0000) >> 8) |
1671 + ((INTVAL (op0)&0xff000000) >> 24) );
1672 + /* Sign extend 32-bit value to host wide int */
1673 + val <<= (HOST_BITS_PER_WIDE_INT - 32);
1674 + val >>= (HOST_BITS_PER_WIDE_INT - 32);
1675 + op0 = GEN_INT(val);
1676 + if (target == 0
1677 + || GET_MODE (target) != tmode
1678 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1679 + target = gen_reg_rtx (tmode);
1680 + emit_move_insn(target, op0);
1681 + return target;
1682 + }
1683 + else
1684 + op0 = copy_to_mode_reg (mode0, op0);
1685 + }
1686 +
1687 + if (target == 0
1688 + || GET_MODE (target) != tmode
1689 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1690 + target = gen_reg_rtx (tmode);
1691 +
1692 +
1693 + pat = GEN_FCN (icode) (target, op0);
1694 + if (!pat)
1695 + return 0;
1696 + emit_insn (pat);
1697 +
1698 + return target;
1699 + }
1700 + case AVR32_BUILTIN_MVCR_W:
1701 + case AVR32_BUILTIN_MVCR_D:
1702 + {
1703 + arg0 = CALL_EXPR_ARG (exp,0);
1704 + arg1 = CALL_EXPR_ARG (exp,1);
1705 + op0 = expand_normal (arg0);
1706 + op1 = expand_normal (arg1);
1707 +
1708 + if (fcode == AVR32_BUILTIN_MVCR_W)
1709 + icode = CODE_FOR_mvcrsi;
1710 + else
1711 + icode = CODE_FOR_mvcrdi;
1712 +
1713 + tmode = insn_data[icode].operand[0].mode;
1714 +
1715 + if (target == 0
1716 + || GET_MODE (target) != tmode
1717 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1718 + target = gen_reg_rtx (tmode);
1719 +
1720 + if (!(*insn_data[icode].operand[1].predicate) (op0, SImode))
1721 + {
1722 + error
1723 + ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
1724 + error ("Number should be between 0 and 7.");
1725 + return NULL_RTX;
1726 + }
1727 +
1728 + if (!(*insn_data[icode].operand[2].predicate) (op1, SImode))
1729 + {
1730 + error
1731 + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
1732 + error ("Number should be between 0 and 15.");
1733 + return NULL_RTX;
1734 + }
1735 +
1736 + pat = GEN_FCN (icode) (target, op0, op1);
1737 + if (!pat)
1738 + return 0;
1739 + emit_insn (pat);
1740 +
1741 + return target;
1742 + }
1743 + case AVR32_BUILTIN_MACSATHH_W:
1744 + case AVR32_BUILTIN_MACWH_D:
1745 + case AVR32_BUILTIN_MACHH_D:
1746 + {
1747 + arg0 = CALL_EXPR_ARG (exp,0);
1748 + arg1 = CALL_EXPR_ARG (exp,1);
1749 + arg2 = CALL_EXPR_ARG (exp,2);
1750 + op0 = expand_normal (arg0);
1751 + op1 = expand_normal (arg1);
1752 + op2 = expand_normal (arg2);
1753 +
1754 + icode = ((fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w :
1755 + (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d :
1756 + CODE_FOR_machh_d);
1757 +
1758 + tmode = insn_data[icode].operand[0].mode;
1759 + mode0 = insn_data[icode].operand[1].mode;
1760 + mode1 = insn_data[icode].operand[2].mode;
1761 +
1762 +
1763 + if (!target
1764 + || GET_MODE (target) != tmode
1765 + || !(*insn_data[icode].operand[0].predicate) (target, tmode))
1766 + target = gen_reg_rtx (tmode);
1767 +
1768 + if (!(*insn_data[icode].operand[0].predicate) (op0, tmode))
1769 + {
1770 + /* If op0 is already a reg we must cast it to the correct mode. */
1771 + if (REG_P (op0))
1772 + op0 = convert_to_mode (tmode, op0, 1);
1773 + else
1774 + op0 = copy_to_mode_reg (tmode, op0);
1775 + }
1776 +
1777 + if (!(*insn_data[icode].operand[1].predicate) (op1, mode0))
1778 + {
1779 + /* If op1 is already a reg we must cast it to the correct mode. */
1780 + if (REG_P (op1))
1781 + op1 = convert_to_mode (mode0, op1, 1);
1782 + else
1783 + op1 = copy_to_mode_reg (mode0, op1);
1784 + }
1785 +
1786 + if (!(*insn_data[icode].operand[2].predicate) (op2, mode1))
1787 + {
1788 + /* If op1 is already a reg we must cast it to the correct mode. */
1789 + if (REG_P (op2))
1790 + op2 = convert_to_mode (mode1, op2, 1);
1791 + else
1792 + op2 = copy_to_mode_reg (mode1, op2);
1793 + }
1794 +
1795 + emit_move_insn (target, op0);
1796 +
1797 + pat = GEN_FCN (icode) (target, op1, op2);
1798 + if (!pat)
1799 + return 0;
1800 + emit_insn (pat);
1801 + return target;
1802 + }
1803 + case AVR32_BUILTIN_MVRC_W:
1804 + case AVR32_BUILTIN_MVRC_D:
1805 + {
1806 + arg0 = CALL_EXPR_ARG (exp,0);
1807 + arg1 = CALL_EXPR_ARG (exp,1);
1808 + arg2 = CALL_EXPR_ARG (exp,2);
1809 + op0 = expand_normal (arg0);
1810 + op1 = expand_normal (arg1);
1811 + op2 = expand_normal (arg2);
1812 +
1813 + if (fcode == AVR32_BUILTIN_MVRC_W)
1814 + icode = CODE_FOR_mvrcsi;
1815 + else
1816 + icode = CODE_FOR_mvrcdi;
1817 +
1818 + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
1819 + {
1820 + error ("Parameter 1 is not a valid coprocessor number.");
1821 + error ("Number should be between 0 and 7.");
1822 + return NULL_RTX;
1823 + }
1824 +
1825 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1826 + {
1827 + error ("Parameter 2 is not a valid coprocessor register number.");
1828 + error ("Number should be between 0 and 15.");
1829 + return NULL_RTX;
1830 + }
1831 +
1832 + if (GET_CODE (op2) == CONST_INT
1833 + || GET_CODE (op2) == CONST
1834 + || GET_CODE (op2) == SYMBOL_REF || GET_CODE (op2) == LABEL_REF)
1835 + {
1836 + op2 = force_const_mem (insn_data[icode].operand[2].mode, op2);
1837 + }
1838 +
1839 + if (!(*insn_data[icode].operand[2].predicate) (op2, GET_MODE (op2)))
1840 + op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
1841 +
1842 +
1843 + pat = GEN_FCN (icode) (op0, op1, op2);
1844 + if (!pat)
1845 + return 0;
1846 + emit_insn (pat);
1847 +
1848 + return NULL_RTX;
1849 + }
1850 + case AVR32_BUILTIN_COP:
1851 + {
1852 + rtx op3, op4;
1853 + tree arg3, arg4;
1854 + icode = CODE_FOR_cop;
1855 + arg0 = CALL_EXPR_ARG (exp,0);
1856 + arg1 = CALL_EXPR_ARG (exp,1);
1857 + arg2 = CALL_EXPR_ARG (exp,2);
1858 + arg3 = CALL_EXPR_ARG (exp,3);
1859 + arg4 = CALL_EXPR_ARG (exp,4);
1860 + op0 = expand_normal (arg0);
1861 + op1 = expand_normal (arg1);
1862 + op2 = expand_normal (arg2);
1863 + op3 = expand_normal (arg3);
1864 + op4 = expand_normal (arg4);
1865 +
1866 + if (!(*insn_data[icode].operand[0].predicate) (op0, SImode))
1867 + {
1868 + error
1869 + ("Parameter 1 to __builtin_cop is not a valid coprocessor number.");
1870 + error ("Number should be between 0 and 7.");
1871 + return NULL_RTX;
1872 + }
1873 +
1874 + if (!(*insn_data[icode].operand[1].predicate) (op1, SImode))
1875 + {
1876 + error
1877 + ("Parameter 2 to __builtin_cop is not a valid coprocessor register number.");
1878 + error ("Number should be between 0 and 15.");
1879 + return NULL_RTX;
1880 + }
1881 +
1882 + if (!(*insn_data[icode].operand[2].predicate) (op2, SImode))
1883 + {
1884 + error
1885 + ("Parameter 3 to __builtin_cop is not a valid coprocessor register number.");
1886 + error ("Number should be between 0 and 15.");
1887 + return NULL_RTX;
1888 + }
1889 +
1890 + if (!(*insn_data[icode].operand[3].predicate) (op3, SImode))
1891 + {
1892 + error
1893 + ("Parameter 4 to __builtin_cop is not a valid coprocessor register number.");
1894 + error ("Number should be between 0 and 15.");
1895 + return NULL_RTX;
1896 + }
1897 +
1898 + if (!(*insn_data[icode].operand[4].predicate) (op4, SImode))
1899 + {
1900 + error
1901 + ("Parameter 5 to __builtin_cop is not a valid coprocessor operation.");
1902 + error ("Number should be between 0 and 127.");
1903 + return NULL_RTX;
1904 + }
1905 +
1906 + pat = GEN_FCN (icode) (op0, op1, op2, op3, op4);
1907 + if (!pat)
1908 + return 0;
1909 + emit_insn (pat);
1910 +
1911 + return target;
1912 + }
1913 +
1914 + case AVR32_BUILTIN_MEMS:
1915 + case AVR32_BUILTIN_MEMC:
1916 + case AVR32_BUILTIN_MEMT:
1917 + {
1918 + if (!TARGET_RMW)
1919 + error ("Trying to use __builtin_mem(s/c/t) when target does not support RMW insns.");
1920 +
1921 + switch (fcode) {
1922 + case AVR32_BUILTIN_MEMS:
1923 + icode = CODE_FOR_iorsi3;
1924 + break;
1925 + case AVR32_BUILTIN_MEMC:
1926 + icode = CODE_FOR_andsi3;
1927 + break;
1928 + case AVR32_BUILTIN_MEMT:
1929 + icode = CODE_FOR_xorsi3;
1930 + break;
1931 + }
1932 + arg0 = CALL_EXPR_ARG (exp,0);
1933 + arg1 = CALL_EXPR_ARG (exp,1);
1934 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1935 + if ( GET_CODE (op0) == SYMBOL_REF )
1936 + // This symbol must be RMW addressable
1937 + SYMBOL_REF_FLAGS (op0) |= (1 << SYMBOL_FLAG_RMW_ADDR_SHIFT);
1938 + op0 = gen_rtx_MEM(SImode, op0);
1939 + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
1940 + mode0 = insn_data[icode].operand[1].mode;
1941 +
1942 +
1943 + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
1944 + {
1945 + error ("Parameter 1 to __builtin_mem(s/c/t) must be a Ks15<<2 address or a rmw addressable symbol.");
1946 + }
1947 +
1948 + if ( !CONST_INT_P (op1)
1949 + || INTVAL (op1) > 31
1950 + || INTVAL (op1) < 0 )
1951 + error ("Parameter 2 to __builtin_mem(s/c/t) must be a constant between 0 and 31.");
1952 +
1953 + if ( fcode == AVR32_BUILTIN_MEMC )
1954 + op1 = GEN_INT((~(1 << INTVAL(op1)))&0xffffffff);
1955 + else
1956 + op1 = GEN_INT((1 << INTVAL(op1))&0xffffffff);
1957 + pat = GEN_FCN (icode) (op0, op0, op1);
1958 + if (!pat)
1959 + return 0;
1960 + emit_insn (pat);
1961 + return op0;
1962 + }
1963 +
1964 + case AVR32_BUILTIN_SLEEP:
1965 + {
1966 + arg0 = CALL_EXPR_ARG (exp, 0);
1967 + op0 = expand_normal (arg0);
1968 + int intval = INTVAL(op0);
1969 +
1970 + /* Check if the argument if integer and if the value of integer
1971 + is greater than 0. */
1972 +
1973 + if (!CONSTANT_P (op0))
1974 + error ("Parameter 1 to __builtin_sleep() is not a valid integer.");
1975 + if (intval < 0 )
1976 + error ("Parameter 1 to __builtin_sleep() should be an integer greater than 0.");
1977 +
1978 + int strncmpval = strncmp (avr32_part_name,"uc3l", 4);
1979 +
1980 + /* Check if op0 is less than 7 for uc3l* and less than 6 for other
1981 + devices. By this check we are avoiding if operand is less than
1982 + 256. For more devices, add more such checks. */
1983 +
1984 + if ( strncmpval == 0 && intval >= 7)
1985 + error ("Parameter 1 to __builtin_sleep() should be less than or equal to 7.");
1986 + else if ( strncmp != 0 && intval >= 6)
1987 + error ("Parameter 1 to __builtin_sleep() should be less than or equal to 6.");
1988 +
1989 + emit_insn (gen_sleep(op0));
1990 + return target;
1991 +
1992 + }
1993 + case AVR32_BUILTIN_DELAY_CYCLES:
1994 + {
1995 + arg0 = CALL_EXPR_ARG (exp, 0);
1996 + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
1997 +
1998 + if (TARGET_ARCH_AP)
1999 + error (" __builtin_avr32_delay_cycles() not supported for \'%s\' architecture.", avr32_arch_name);
2000 + if (!CONSTANT_P (op0))
2001 + error ("Parameter 1 to __builtin_avr32_delay_cycles() should be an integer.");
2002 + emit_insn (gen_delay_cycles (op0));
2003 + return 0;
2004 +
2005 + }
2006 +
2007 + }
2008 +
2009 + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
2010 + if (d->code == fcode)
2011 + return avr32_expand_binop_builtin (d->icode, exp, target);
2012 +
2013 +
2014 + /* @@@ Should really do something sensible here. */
2015 + return NULL_RTX;
2016 +}
2017 +
2018 +
2019 +/* Handle an "interrupt" or "isr" attribute;
2020 + arguments as in struct attribute_spec.handler. */
2021 +static tree
2022 +avr32_handle_isr_attribute (tree * node, tree name, tree args,
2023 + int flags, bool * no_add_attrs)
2024 +{
2025 + if (DECL_P (*node))
2026 + {
2027 + if (TREE_CODE (*node) != FUNCTION_DECL)
2028 + {
2029 + warning (OPT_Wattributes,"`%s' attribute only applies to functions",
2030 + IDENTIFIER_POINTER (name));
2031 + *no_add_attrs = true;
2032 + }
2033 + /* FIXME: the argument if any is checked for type attributes; should it
2034 + be checked for decl ones? */
2035 + }
2036 + else
2037 + {
2038 + if (TREE_CODE (*node) == FUNCTION_TYPE
2039 + || TREE_CODE (*node) == METHOD_TYPE)
2040 + {
2041 + if (avr32_isr_value (args) == AVR32_FT_UNKNOWN)
2042 + {
2043 + warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
2044 + *no_add_attrs = true;
2045 + }
2046 + }
2047 + else if (TREE_CODE (*node) == POINTER_TYPE
2048 + && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
2049 + || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
2050 + && avr32_isr_value (args) != AVR32_FT_UNKNOWN)
2051 + {
2052 + *node = build_variant_type_copy (*node);
2053 + TREE_TYPE (*node) = build_type_attribute_variant
2054 + (TREE_TYPE (*node),
2055 + tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
2056 + *no_add_attrs = true;
2057 + }
2058 + else
2059 + {
2060 + /* Possibly pass this attribute on from the type to a decl. */
2061 + if (flags & ((int) ATTR_FLAG_DECL_NEXT
2062 + | (int) ATTR_FLAG_FUNCTION_NEXT
2063 + | (int) ATTR_FLAG_ARRAY_NEXT))
2064 + {
2065 + *no_add_attrs = true;
2066 + return tree_cons (name, args, NULL_TREE);
2067 + }
2068 + else
2069 + {
2070 + warning (OPT_Wattributes,"`%s' attribute ignored", IDENTIFIER_POINTER (name));
2071 + }
2072 + }
2073 + }
2074 +
2075 + return NULL_TREE;
2076 +}
2077 +
2078 +
2079 +/* Handle an attribute requiring a FUNCTION_DECL;
2080 + arguments as in struct attribute_spec.handler. */
2081 +static tree
2082 +avr32_handle_fndecl_attribute (tree * node, tree name,
2083 + tree args,
2084 + int flags ATTRIBUTE_UNUSED,
2085 + bool * no_add_attrs)
2086 +{
2087 + if (TREE_CODE (*node) != FUNCTION_DECL)
2088 + {
2089 + warning (OPT_Wattributes,"%qs attribute only applies to functions",
2090 + IDENTIFIER_POINTER (name));
2091 + *no_add_attrs = true;
2092 + return NULL_TREE;
2093 + }
2094 +
2095 + fndecl_attribute_args = args;
2096 + if (args == NULL_TREE)
2097 + return NULL_TREE;
2098 +
2099 + tree value = TREE_VALUE (args);
2100 + if (TREE_CODE (value) != INTEGER_CST)
2101 + {
2102 + warning (OPT_Wattributes,
2103 + "argument of %qs attribute is not an integer constant",
2104 + IDENTIFIER_POINTER (name));
2105 + *no_add_attrs = true;
2106 + }
2107 +
2108 + return NULL_TREE;
2109 +}
2110 +
2111 +
2112 +/* Handle an acall attribute;
2113 + arguments as in struct attribute_spec.handler. */
2114 +
2115 +static tree
2116 +avr32_handle_acall_attribute (tree * node, tree name,
2117 + tree args ATTRIBUTE_UNUSED,
2118 + int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
2119 +{
2120 + if (TREE_CODE (*node) == FUNCTION_TYPE || TREE_CODE (*node) == METHOD_TYPE)
2121 + {
2122 + warning (OPT_Wattributes,"`%s' attribute not yet supported...",
2123 + IDENTIFIER_POINTER (name));
2124 + *no_add_attrs = true;
2125 + return NULL_TREE;
2126 + }
2127 +
2128 + warning (OPT_Wattributes,"`%s' attribute only applies to functions",
2129 + IDENTIFIER_POINTER (name));
2130 + *no_add_attrs = true;
2131 + return NULL_TREE;
2132 +}
2133 +
2134 +
2135 +bool
2136 +avr32_flashvault_call(tree decl)
2137 +{
2138 + tree attributes;
2139 + tree fv_attribute;
2140 + tree vector_tree;
2141 + unsigned int vector;
2142 +
2143 + if (decl && TREE_CODE (decl) == FUNCTION_DECL)
2144 + {
2145 + attributes = DECL_ATTRIBUTES(decl);
2146 + fv_attribute = lookup_attribute ("flashvault", attributes);
2147 + if (fv_attribute != NULL_TREE)
2148 + {
2149 + /* Get attribute parameter, for the function vector number. */
2150 + /*
2151 + There is probably an easier, standard way to retrieve the
2152 + attribute parameter which needs to be done here.
2153 + */
2154 + vector_tree = TREE_VALUE(fv_attribute);
2155 + if (vector_tree != NULL_TREE)
2156 + {
2157 + vector = (unsigned int)TREE_INT_CST_LOW(TREE_VALUE(vector_tree));
2158 + fprintf (asm_out_file,
2159 + "\tmov\tr8, lo(%i)\t# Load vector number for sscall.\n",
2160 + vector);
2161 + }
2162 +
2163 + fprintf (asm_out_file,
2164 + "\tsscall\t# Secure system call.\n");
2165 +
2166 + return true;
2167 + }
2168 + }
2169 +
2170 + return false;
2171 +}
2172 +
2173 +
2174 +static bool has_attribute_p (tree decl, const char *name)
2175 +{
2176 + if (decl && TREE_CODE (decl) == FUNCTION_DECL)
2177 + {
2178 + return (lookup_attribute (name, DECL_ATTRIBUTES(decl)) != NULL_TREE);
2179 + }
2180 + return NULL_TREE;
2181 +}
2182 +
2183 +
2184 +/* Return 0 if the attributes for two types are incompatible, 1 if they
2185 + are compatible, and 2 if they are nearly compatible (which causes a
2186 + warning to be generated). */
2187 +static int
2188 +avr32_comp_type_attributes (tree type1, tree type2)
2189 +{
2190 + bool acall1, acall2, isr1, isr2, naked1, naked2, fv1, fv2, fvimpl1, fvimpl2;
2191 +
2192 + /* Check for mismatch of non-default calling convention. */
2193 + if (TREE_CODE (type1) != FUNCTION_TYPE)
2194 + return 1;
2195 +
2196 + /* Check for mismatched call attributes. */
2197 + acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL;
2198 + acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL;
2199 + naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL;
2200 + naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL;
2201 + fv1 = lookup_attribute ("flashvault", TYPE_ATTRIBUTES (type1)) != NULL;
2202 + fv2 = lookup_attribute ("flashvault", TYPE_ATTRIBUTES (type2)) != NULL;
2203 + fvimpl1 = lookup_attribute ("flashvault_impl", TYPE_ATTRIBUTES (type1)) != NULL;
2204 + fvimpl2 = lookup_attribute ("flashvault_impl", TYPE_ATTRIBUTES (type2)) != NULL;
2205 + isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
2206 + if (!isr1)
2207 + isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
2208 +
2209 + isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
2210 + if (!isr2)
2211 + isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
2212 +
2213 + if ((acall1 && isr2)
2214 + || (acall2 && isr1)
2215 + || (naked1 && isr2)
2216 + || (naked2 && isr1)
2217 + || (fv1 && isr2)
2218 + || (fv2 && isr1)
2219 + || (fvimpl1 && isr2)
2220 + || (fvimpl2 && isr1)
2221 + || (fv1 && fvimpl2)
2222 + || (fv2 && fvimpl1)
2223 + )
2224 + return 0;
2225 +
2226 + return 1;
2227 +}
2228 +
2229 +
2230 +/* Computes the type of the current function. */
2231 +static unsigned long
2232 +avr32_compute_func_type (void)
2233 +{
2234 + unsigned long type = AVR32_FT_UNKNOWN;
2235 + tree a;
2236 + tree attr;
2237 +
2238 + if (TREE_CODE (current_function_decl) != FUNCTION_DECL)
2239 + abort ();
2240 +
2241 + /* Decide if the current function is volatile. Such functions never
2242 + return, and many memory cycles can be saved by not storing register
2243 + values that will never be needed again. This optimization was added to
2244 + speed up context switching in a kernel application. */
2245 + if (optimize > 0
2246 + && TREE_NOTHROW (current_function_decl)
2247 + && TREE_THIS_VOLATILE (current_function_decl))
2248 + type |= AVR32_FT_VOLATILE;
2249 +
2250 + if (cfun->static_chain_decl != NULL)
2251 + type |= AVR32_FT_NESTED;
2252 +
2253 + attr = DECL_ATTRIBUTES (current_function_decl);
2254 +
2255 + a = lookup_attribute ("isr", attr);
2256 + if (a == NULL_TREE)
2257 + a = lookup_attribute ("interrupt", attr);
2258 +
2259 + if (a == NULL_TREE)
2260 + type |= AVR32_FT_NORMAL;
2261 + else
2262 + type |= avr32_isr_value (TREE_VALUE (a));
2263 +
2264 +
2265 + a = lookup_attribute ("acall", attr);
2266 + if (a != NULL_TREE)
2267 + type |= AVR32_FT_ACALL;
2268 +
2269 + a = lookup_attribute ("naked", attr);
2270 + if (a != NULL_TREE)
2271 + type |= AVR32_FT_NAKED;
2272 +
2273 + a = lookup_attribute ("flashvault", attr);
2274 + if (a != NULL_TREE)
2275 + type |= AVR32_FT_FLASHVAULT;
2276 +
2277 + a = lookup_attribute ("flashvault_impl", attr);
2278 + if (a != NULL_TREE)
2279 + type |= AVR32_FT_FLASHVAULT_IMPL;
2280 +
2281 + return type;
2282 +}
2283 +
2284 +
2285 +/* Returns the type of the current function. */
2286 +static unsigned long
2287 +avr32_current_func_type (void)
2288 +{
2289 + if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN)
2290 + cfun->machine->func_type = avr32_compute_func_type ();
2291 +
2292 + return cfun->machine->func_type;
2293 +}
2294 +
2295 +
2296 +/*
2297 +This target hook should return true if we should not pass type solely
2298 +in registers. The file expr.h defines a definition that is usually appropriate,
2299 +refer to expr.h for additional documentation.
2300 +*/
2301 +bool
2302 +avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type)
2303 +{
2304 + if (type && AGGREGATE_TYPE_P (type)
2305 + /* If the alignment is less than the size then pass in the struct on
2306 + the stack. */
2307 + && ((unsigned int) TYPE_ALIGN_UNIT (type) <
2308 + (unsigned int) int_size_in_bytes (type))
2309 + /* If we support unaligned word accesses then structs of size 4 and 8
2310 + can have any alignment and still be passed in registers. */
2311 + && !(TARGET_UNALIGNED_WORD
2312 + && (int_size_in_bytes (type) == 4
2313 + || int_size_in_bytes (type) == 8))
2314 + /* Double word structs need only a word alignment. */
2315 + && !(int_size_in_bytes (type) == 8 && TYPE_ALIGN_UNIT (type) >= 4))
2316 + return true;
2317 +
2318 + if (type && AGGREGATE_TYPE_P (type)
2319 + /* Structs of size 3,5,6,7 are always passed in registers. */
2320 + && (int_size_in_bytes (type) == 3
2321 + || int_size_in_bytes (type) == 5
2322 + || int_size_in_bytes (type) == 6 || int_size_in_bytes (type) == 7))
2323 + return true;
2324 +
2325 +
2326 + return (type && TREE_ADDRESSABLE (type));
2327 +}
2328 +
2329 +
2330 +bool
2331 +avr32_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
2332 +{
2333 + return true;
2334 +}
2335 +
2336 +
2337 +/*
2338 + This target hook should return true if an argument at the position indicated
2339 + by cum should be passed by reference. This predicate is queried after target
2340 + independent reasons for being passed by reference, such as TREE_ADDRESSABLE (type).
2341 +
2342 + If the hook returns true, a copy of that argument is made in memory and a
2343 + pointer to the argument is passed instead of the argument itself. The pointer
2344 + is passed in whatever way is appropriate for passing a pointer to that type.
2345 +*/
2346 +bool
2347 +avr32_pass_by_reference (CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,
2348 + enum machine_mode mode ATTRIBUTE_UNUSED,
2349 + tree type, bool named ATTRIBUTE_UNUSED)
2350 +{
2351 + return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST));
2352 +}
2353 +
2354 +
2355 +static int
2356 +avr32_arg_partial_bytes (CUMULATIVE_ARGS * pcum ATTRIBUTE_UNUSED,
2357 + enum machine_mode mode ATTRIBUTE_UNUSED,
2358 + tree type ATTRIBUTE_UNUSED,
2359 + bool named ATTRIBUTE_UNUSED)
2360 +{
2361 + return 0;
2362 +}
2363 +
2364 +
2365 +struct gcc_target targetm = TARGET_INITIALIZER;
2366 +
2367 +/*
2368 + Table used to convert from register number in the assembler instructions and
2369 + the register numbers used in gcc.
2370 +*/
2371 +const int avr32_function_arg_reglist[] = {
2372 + INTERNAL_REGNUM (12),
2373 + INTERNAL_REGNUM (11),
2374 + INTERNAL_REGNUM (10),
2375 + INTERNAL_REGNUM (9),
2376 + INTERNAL_REGNUM (8)
2377 +};
2378 +
2379 +
2380 +rtx avr32_compare_op0 = NULL_RTX;
2381 +rtx avr32_compare_op1 = NULL_RTX;
2382 +rtx avr32_compare_operator = NULL_RTX;
2383 +rtx avr32_acc_cache = NULL_RTX;
2384 +/* type of branch to use */
2385 +enum avr32_cmp_type avr32_branch_type;
2386 +
2387 +
2388 +/*
2389 + Returns nonzero if it is allowed to store a value of mode mode in hard
2390 + register number regno.
2391 +*/
2392 +int
2393 +avr32_hard_regno_mode_ok (int regnr, enum machine_mode mode)
2394 +{
2395 + switch (mode)
2396 + {
2397 + case DImode: /* long long */
2398 + case DFmode: /* double */
2399 + case SCmode: /* __complex__ float */
2400 + case CSImode: /* __complex__ int */
2401 + if (regnr < 4)
2402 + { /* long long int not supported in r12, sp, lr or pc. */
2403 + return 0;
2404 + }
2405 + else
2406 + {
2407 + /* long long int has to be referred in even registers. */
2408 + if (regnr % 2)
2409 + return 0;
2410 + else
2411 + return 1;
2412 + }
2413 + case CDImode: /* __complex__ long long */
2414 + case DCmode: /* __complex__ double */
2415 + case TImode: /* 16 bytes */
2416 + if (regnr < 7)
2417 + return 0;
2418 + else if (regnr % 2)
2419 + return 0;
2420 + else
2421 + return 1;
2422 + default:
2423 + return 1;
2424 + }
2425 +}
2426 +
2427 +
2428 +int
2429 +avr32_rnd_operands (rtx add, rtx shift)
2430 +{
2431 + if (GET_CODE (shift) == CONST_INT &&
2432 + GET_CODE (add) == CONST_INT && INTVAL (shift) > 0)
2433 + {
2434 + if ((1 << (INTVAL (shift) - 1)) == INTVAL (add))
2435 + return TRUE;
2436 + }
2437 +
2438 + return FALSE;
2439 +}
2440 +
2441 +
2442 +int
2443 +avr32_const_ok_for_constraint_p (HOST_WIDE_INT value, char c, const char *str)
2444 +{
2445 + switch (c)
2446 + {
2447 + case 'K':
2448 + case 'I':
2449 + {
2450 + HOST_WIDE_INT min_value = 0, max_value = 0;
2451 + char size_str[3];
2452 + int const_size;
2453 +
2454 + size_str[0] = str[2];
2455 + size_str[1] = str[3];
2456 + size_str[2] = '\0';
2457 + const_size = atoi (size_str);
2458 +
2459 + if (TOUPPER (str[1]) == 'U')
2460 + {
2461 + min_value = 0;
2462 + max_value = (1 << const_size) - 1;
2463 + }
2464 + else if (TOUPPER (str[1]) == 'S')
2465 + {
2466 + min_value = -(1 << (const_size - 1));
2467 + max_value = (1 << (const_size - 1)) - 1;
2468 + }
2469 +
2470 + if (c == 'I')
2471 + {
2472 + value = -value;
2473 + }
2474 +
2475 + if (value >= min_value && value <= max_value)
2476 + {
2477 + return 1;
2478 + }
2479 + break;
2480 + }
2481 + case 'M':
2482 + return avr32_mask_upper_bits_operand (GEN_INT (value), VOIDmode);
2483 + case 'J':
2484 + return avr32_hi16_immediate_operand (GEN_INT (value), VOIDmode);
2485 + case 'O':
2486 + return one_bit_set_operand (GEN_INT (value), VOIDmode);
2487 + case 'N':
2488 + return one_bit_cleared_operand (GEN_INT (value), VOIDmode);
2489 + case 'L':
2490 + /* The lower 16-bits are set. */
2491 + return ((value & 0xffff) == 0xffff) ;
2492 + }
2493 +
2494 + return 0;
2495 +}
2496 +
2497 +
2498 +/* Compute mask of registers which needs saving upon function entry. */
2499 +static unsigned long
2500 +avr32_compute_save_reg_mask (int push)
2501 +{
2502 + unsigned long func_type;
2503 + unsigned int save_reg_mask = 0;
2504 + unsigned int reg;
2505 +
2506 + func_type = avr32_current_func_type ();
2507 +
2508 + if (IS_INTERRUPT (func_type))
2509 + {
2510 + unsigned int max_reg = 12;
2511 +
2512 + /* Get the banking scheme for the interrupt */
2513 + switch (func_type)
2514 + {
2515 + case AVR32_FT_ISR_FULL:
2516 + max_reg = 0;
2517 + break;
2518 + case AVR32_FT_ISR_HALF:
2519 + max_reg = 7;
2520 + break;
2521 + case AVR32_FT_ISR_NONE:
2522 + max_reg = 12;
2523 + break;
2524 + }
2525 +
2526 + /* Interrupt functions must not corrupt any registers, even call
2527 + clobbered ones. If this is a leaf function we can just examine the
2528 + registers used by the RTL, but otherwise we have to assume that
2529 + whatever function is called might clobber anything, and so we have
2530 + to save all the call-clobbered registers as well. */
2531 +
2532 + /* Need not push the registers r8-r12 for AVR32A architectures, as this
2533 + is automatially done in hardware. We also do not have any shadow
2534 + registers. */
2535 + if (TARGET_UARCH_AVR32A)
2536 + {
2537 + max_reg = 7;
2538 + func_type = AVR32_FT_ISR_NONE;
2539 + }
2540 +
2541 + /* All registers which are used and are not shadowed must be saved. */
2542 + for (reg = 0; reg <= max_reg; reg++)
2543 + if (df_regs_ever_live_p (INTERNAL_REGNUM (reg))
2544 + || (!current_function_is_leaf
2545 + && call_used_regs[INTERNAL_REGNUM (reg)]))
2546 + save_reg_mask |= (1 << reg);
2547 +
2548 + /* Check LR */
2549 + if ((df_regs_ever_live_p (LR_REGNUM)
2550 + || !current_function_is_leaf || frame_pointer_needed)
2551 + /* Only non-shadowed register models */
2552 + && (func_type == AVR32_FT_ISR_NONE))
2553 + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
2554 +
2555 + /* Make sure that the GOT register is pushed. */
2556 + if (max_reg >= ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM)
2557 + && crtl->uses_pic_offset_table)
2558 + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
2559 +
2560 + }
2561 + else
2562 + {
2563 + int use_pushm = optimize_size;
2564 +
2565 + /* In the normal case we only need to save those registers which are
2566 + call saved and which are used by this function. */
2567 + for (reg = 0; reg <= 7; reg++)
2568 + if (df_regs_ever_live_p (INTERNAL_REGNUM (reg))
2569 + && !call_used_regs[INTERNAL_REGNUM (reg)])
2570 + save_reg_mask |= (1 << reg);
2571 +
2572 + /* Make sure that the GOT register is pushed. */
2573 + if (crtl->uses_pic_offset_table)
2574 + save_reg_mask |= (1 << ASM_REGNUM (PIC_OFFSET_TABLE_REGNUM));
2575 +
2576 +
2577 + /* If we optimize for size and do not have anonymous arguments: use
2578 + pushm/popm always. */
2579 + if (use_pushm)
2580 + {
2581 + if ((save_reg_mask & (1 << 0))
2582 + || (save_reg_mask & (1 << 1))
2583 + || (save_reg_mask & (1 << 2)) || (save_reg_mask & (1 << 3)))
2584 + save_reg_mask |= 0xf;
2585 +
2586 + if ((save_reg_mask & (1 << 4))
2587 + || (save_reg_mask & (1 << 5))
2588 + || (save_reg_mask & (1 << 6)) || (save_reg_mask & (1 << 7)))
2589 + save_reg_mask |= 0xf0;
2590 +
2591 + if ((save_reg_mask & (1 << 8)) || (save_reg_mask & (1 << 9)))
2592 + save_reg_mask |= 0x300;
2593 + }
2594 +
2595 +
2596 + /* Check LR */
2597 + if ((df_regs_ever_live_p (LR_REGNUM)
2598 + || !current_function_is_leaf
2599 + || (optimize_size
2600 + && save_reg_mask
2601 + && !crtl->calls_eh_return)
2602 + || frame_pointer_needed)
2603 + && !IS_FLASHVAULT (func_type))
2604 + {
2605 + if (push
2606 + /* Never pop LR into PC for functions which
2607 + calls __builtin_eh_return, since we need to
2608 + fix the SP after the restoring of the registers
2609 + and before returning. */
2610 + || crtl->calls_eh_return)
2611 + {
2612 + /* Push/Pop LR */
2613 + save_reg_mask |= (1 << ASM_REGNUM (LR_REGNUM));
2614 + }
2615 + else
2616 + {
2617 + /* Pop PC */
2618 + save_reg_mask |= (1 << ASM_REGNUM (PC_REGNUM));
2619 + }
2620 + }
2621 + }
2622 +
2623 +
2624 + /* Save registers so the exception handler can modify them. */
2625 + if (crtl->calls_eh_return)
2626 + {
2627 + unsigned int i;
2628 +
2629 + for (i = 0;; i++)
2630 + {
2631 + reg = EH_RETURN_DATA_REGNO (i);
2632 + if (reg == INVALID_REGNUM)
2633 + break;
2634 + save_reg_mask |= 1 << ASM_REGNUM (reg);
2635 + }
2636 + }
2637 +
2638 + return save_reg_mask;
2639 +}
2640 +
2641 +
2642 +/* Compute total size in bytes of all saved registers. */
2643 +static int
2644 +avr32_get_reg_mask_size (int reg_mask)
2645 +{
2646 + int reg, size;
2647 + size = 0;
2648 +
2649 + for (reg = 0; reg <= 15; reg++)
2650 + if (reg_mask & (1 << reg))
2651 + size += 4;
2652 +
2653 + return size;
2654 +}
2655 +
2656 +
2657 +/* Get a register from one of the registers which are saved onto the stack
2658 + upon function entry. */
2659 +static int
2660 +avr32_get_saved_reg (int save_reg_mask)
2661 +{
2662 + unsigned int reg;
2663 +
2664 + /* Find the first register which is saved in the saved_reg_mask */
2665 + for (reg = 0; reg <= 15; reg++)
2666 + if (save_reg_mask & (1 << reg))
2667 + return reg;
2668 +
2669 + return -1;
2670 +}
2671 +
2672 +
2673 +/* Return 1 if it is possible to return using a single instruction. */
2674 +int
2675 +avr32_use_return_insn (int iscond)
2676 +{
2677 + unsigned int func_type = avr32_current_func_type ();
2678 + unsigned long saved_int_regs;
2679 +
2680 + /* Never use a return instruction before reload has run. */
2681 + if (!reload_completed)
2682 + return 0;
2683 +
2684 + /* Must adjust the stack for vararg functions. */
2685 + if (crtl->args.info.uses_anonymous_args)
2686 + return 0;
2687 +
2688 + /* If there a stack adjstment. */
2689 + if (get_frame_size ())
2690 + return 0;
2691 +
2692 + saved_int_regs = avr32_compute_save_reg_mask (TRUE);
2693 +
2694 + /* Conditional returns can not be performed in one instruction if we need
2695 + to restore registers from the stack */
2696 + if (iscond && saved_int_regs)
2697 + return 0;
2698 +
2699 + /* Conditional return can not be used for interrupt handlers. */
2700 + if (iscond && IS_INTERRUPT (func_type))
2701 + return 0;
2702 +
2703 + /* For interrupt handlers which needs to pop registers */
2704 + if (saved_int_regs && IS_INTERRUPT (func_type))
2705 + return 0;
2706 +
2707 +
2708 + /* If there are saved registers but the LR isn't saved, then we need two
2709 + instructions for the return. */
2710 + if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM (LR_REGNUM))))
2711 + return 0;
2712 +
2713 +
2714 + return 1;
2715 +}
2716 +
2717 +
2718 +/* Generate some function prologue info in the assembly file. */
2719 +void
2720 +avr32_target_asm_function_prologue (FILE * f, HOST_WIDE_INT frame_size)
2721 +{
2722 + unsigned long func_type = avr32_current_func_type ();
2723 +
2724 + if (IS_NAKED (func_type))
2725 + fprintf (f,
2726 + "\t# Function is naked: Prologue and epilogue provided by programmer\n");
2727 +
2728 + if (IS_FLASHVAULT (func_type))
2729 + {
2730 + fprintf(f,
2731 + "\t.ident \"flashvault\"\n\t# Function is defined with flashvault attribute.\n");
2732 + }
2733 +
2734 + if (IS_FLASHVAULT_IMPL (func_type))
2735 + {
2736 + fprintf(f,
2737 + "\t.ident \"flashvault\"\n\t# Function is defined with flashvault_impl attribute.\n");
2738 +
2739 + /* Save information on flashvault function declaration. */
2740 + tree fv_attribute = lookup_attribute ("flashvault_impl", DECL_ATTRIBUTES(current_function_decl));
2741 + if (fv_attribute != NULL_TREE)
2742 + {
2743 + tree vector_tree = TREE_VALUE(fv_attribute);
2744 + if (vector_tree != NULL_TREE)
2745 + {
2746 + unsigned int vector_num;
2747 + const char * name;
2748 +
2749 + vector_num = (unsigned int) TREE_INT_CST_LOW (TREE_VALUE (vector_tree));
2750 +
2751 + name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
2752 +
2753 + flashvault_decl_list_add (vector_num, name);
2754 + }
2755 + }
2756 + }
2757 +
2758 + if (IS_INTERRUPT (func_type))
2759 + {
2760 + switch (func_type)
2761 + {
2762 + case AVR32_FT_ISR_FULL:
2763 + fprintf (f,
2764 + "\t# Interrupt Function: Fully shadowed register file\n");
2765 + break;
2766 + case AVR32_FT_ISR_HALF:
2767 + fprintf (f,
2768 + "\t# Interrupt Function: Half shadowed register file\n");
2769 + break;
2770 + default:
2771 + case AVR32_FT_ISR_NONE:
2772 + fprintf (f, "\t# Interrupt Function: No shadowed register file\n");
2773 + break;
2774 + }
2775 + }
2776 +
2777 +
2778 + fprintf (f, "\t# args = %i, frame = %li, pretend = %i\n",
2779 + crtl->args.size, frame_size,
2780 + crtl->args.pretend_args_size);
2781 +
2782 + fprintf (f, "\t# frame_needed = %i, leaf_function = %i\n",
2783 + frame_pointer_needed, current_function_is_leaf);
2784 +
2785 + fprintf (f, "\t# uses_anonymous_args = %i\n",
2786 + crtl->args.info.uses_anonymous_args);
2787 +
2788 + if (crtl->calls_eh_return)
2789 + fprintf (f, "\t# Calls __builtin_eh_return.\n");
2790 +
2791 +}
2792 +
2793 +
2794 +/* Generate and emit an insn that we will recognize as a pushm or stm.
2795 + Unfortunately, since this insn does not reflect very well the actual
2796 + semantics of the operation, we need to annotate the insn for the benefit
2797 + of DWARF2 frame unwind information. */
2798 +
2799 +int avr32_convert_to_reglist16 (int reglist8_vect);
2800 +
2801 +static rtx
2802 +emit_multi_reg_push (int reglist, int usePUSHM)
2803 +{
2804 + rtx insn;
2805 + rtx dwarf;
2806 + rtx tmp;
2807 + rtx reg;
2808 + int i;
2809 + int nr_regs;
2810 + int index = 0;
2811 +
2812 + if (usePUSHM)
2813 + {
2814 + insn = emit_insn (gen_pushm (gen_rtx_CONST_INT (SImode, reglist)));
2815 + reglist = avr32_convert_to_reglist16 (reglist);
2816 + }
2817 + else
2818 + {
2819 + insn = emit_insn (gen_stm (stack_pointer_rtx,
2820 + gen_rtx_CONST_INT (SImode, reglist),
2821 + gen_rtx_CONST_INT (SImode, 1)));
2822 + }
2823 +
2824 + nr_regs = avr32_get_reg_mask_size (reglist) / 4;
2825 + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1));
2826 +
2827 + for (i = 15; i >= 0; i--)
2828 + {
2829 + if (reglist & (1 << i))
2830 + {
2831 + reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (i));
2832 + tmp = gen_rtx_SET (VOIDmode,
2833 + gen_rtx_MEM (SImode,
2834 + plus_constant (stack_pointer_rtx,
2835 + 4 * index)), reg);
2836 + RTX_FRAME_RELATED_P (tmp) = 1;
2837 + XVECEXP (dwarf, 0, 1 + index++) = tmp;
2838 + }
2839 + }
2840 +
2841 + tmp = gen_rtx_SET (SImode,
2842 + stack_pointer_rtx,
2843 + gen_rtx_PLUS (SImode,
2844 + stack_pointer_rtx,
2845 + GEN_INT (-4 * nr_regs)));
2846 + RTX_FRAME_RELATED_P (tmp) = 1;
2847 + XVECEXP (dwarf, 0, 0) = tmp;
2848 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
2849 + REG_NOTES (insn));
2850 + return insn;
2851 +}
2852 +
2853 +rtx
2854 +avr32_gen_load_multiple (rtx * regs, int count, rtx from,
2855 + int write_back, int in_struct_p, int scalar_p)
2856 +{
2857 +
2858 + rtx result;
2859 + int i = 0, j;
2860 +
2861 + result =
2862 + gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0)));
2863 +
2864 + if (write_back)
2865 + {
2866 + XVECEXP (result, 0, 0)
2867 + = gen_rtx_SET (GET_MODE (from), from,
2868 + plus_constant (from, count * 4));
2869 + i = 1;
2870 + count++;
2871 + }
2872 +
2873 +
2874 + for (j = 0; i < count; i++, j++)
2875 + {
2876 + rtx unspec;
2877 + rtx mem = gen_rtx_MEM (SImode, plus_constant (from, j * 4));
2878 + MEM_IN_STRUCT_P (mem) = in_struct_p;
2879 + MEM_SCALAR_P (mem) = scalar_p;
2880 + unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, mem), UNSPEC_LDM);
2881 + XVECEXP (result, 0, i) = gen_rtx_SET (VOIDmode, regs[j], unspec);
2882 + }
2883 +
2884 + return result;
2885 +}
2886 +
2887 +
2888 +rtx
2889 +avr32_gen_store_multiple (rtx * regs, int count, rtx to,
2890 + int in_struct_p, int scalar_p)
2891 +{
2892 + rtx result;
2893 + int i = 0, j;
2894 +
2895 + result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
2896 +
2897 + for (j = 0; i < count; i++, j++)
2898 + {
2899 + rtx mem = gen_rtx_MEM (SImode, plus_constant (to, j * 4));
2900 + MEM_IN_STRUCT_P (mem) = in_struct_p;
2901 + MEM_SCALAR_P (mem) = scalar_p;
2902 + XVECEXP (result, 0, i)
2903 + = gen_rtx_SET (VOIDmode, mem,
2904 + gen_rtx_UNSPEC (VOIDmode,
2905 + gen_rtvec (1, regs[j]),
2906 + UNSPEC_STORE_MULTIPLE));
2907 + }
2908 +
2909 + return result;
2910 +}
2911 +
2912 +
2913 +/* Move a block of memory if it is word aligned or we support unaligned
2914 + word memory accesses. The size must be maximum 64 bytes. */
2915 +int
2916 +avr32_gen_movmemsi (rtx * operands)
2917 +{
2918 + HOST_WIDE_INT bytes_to_go;
2919 + rtx src, dst;
2920 + rtx st_src, st_dst;
2921 + int src_offset = 0, dst_offset = 0;
2922 + int block_size;
2923 + int dst_in_struct_p, src_in_struct_p;
2924 + int dst_scalar_p, src_scalar_p;
2925 + int unaligned;
2926 +
2927 + if (GET_CODE (operands[2]) != CONST_INT
2928 + || GET_CODE (operands[3]) != CONST_INT
2929 + || INTVAL (operands[2]) > 64
2930 + || ((INTVAL (operands[3]) & 3) && !TARGET_UNALIGNED_WORD))
2931 + return 0;
2932 +
2933 + unaligned = (INTVAL (operands[3]) & 3) != 0;
2934 +
2935 + block_size = 4;
2936 +
2937 + st_dst = XEXP (operands[0], 0);
2938 + st_src = XEXP (operands[1], 0);
2939 +
2940 + dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
2941 + dst_scalar_p = MEM_SCALAR_P (operands[0]);
2942 + src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
2943 + src_scalar_p = MEM_SCALAR_P (operands[1]);
2944 +
2945 + dst = copy_to_mode_reg (SImode, st_dst);
2946 + src = copy_to_mode_reg (SImode, st_src);
2947 +
2948 + bytes_to_go = INTVAL (operands[2]);
2949 +
2950 + while (bytes_to_go)
2951 + {
2952 + enum machine_mode move_mode;
2953 + /* (Seems to be a problem with reloads for the movti pattern so this is
2954 + disabled until that problem is resolved)
2955 + UPDATE: Problem seems to be solved now.... */
2956 + if (bytes_to_go >= GET_MODE_SIZE (TImode) && !unaligned
2957 + /* Do not emit ldm/stm for UC3 as ld.d/st.d is more optimal. */
2958 + && !TARGET_ARCH_UC)
2959 + move_mode = TImode;
2960 + else if ((bytes_to_go >= GET_MODE_SIZE (DImode)) && !unaligned)
2961 + move_mode = DImode;
2962 + else if (bytes_to_go >= GET_MODE_SIZE (SImode))
2963 + move_mode = SImode;
2964 + else
2965 + move_mode = QImode;
2966 +
2967 + {
2968 + rtx src_mem;
2969 + rtx dst_mem = gen_rtx_MEM (move_mode,
2970 + gen_rtx_PLUS (SImode, dst,
2971 + GEN_INT (dst_offset)));
2972 + dst_offset += GET_MODE_SIZE (move_mode);
2973 + if ( 0 /* This causes an error in GCC. Think there is
2974 + something wrong in the gcse pass which causes REQ_EQUIV notes
2975 + to be wrong so disabling it for now. */
2976 + && move_mode == TImode
2977 + && INTVAL (operands[2]) > GET_MODE_SIZE (TImode) )
2978 + {
2979 + src_mem = gen_rtx_MEM (move_mode,
2980 + gen_rtx_POST_INC (SImode, src));
2981 + }
2982 + else
2983 + {
2984 + src_mem = gen_rtx_MEM (move_mode,
2985 + gen_rtx_PLUS (SImode, src,
2986 + GEN_INT (src_offset)));
2987 + src_offset += GET_MODE_SIZE (move_mode);
2988 + }
2989 +
2990 + bytes_to_go -= GET_MODE_SIZE (move_mode);
2991 +
2992 + MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p;
2993 + MEM_SCALAR_P (dst_mem) = dst_scalar_p;
2994 +
2995 + MEM_IN_STRUCT_P (src_mem) = src_in_struct_p;
2996 + MEM_SCALAR_P (src_mem) = src_scalar_p;
2997 + emit_move_insn (dst_mem, src_mem);
2998 +
2999 + }
3000 + }
3001 +
3002 + return 1;
3003 +}
3004 +
3005 +
3006 +/* Expand the prologue instruction. */
3007 +void
3008 +avr32_expand_prologue (void)
3009 +{
3010 + rtx insn, dwarf;
3011 + unsigned long saved_reg_mask;
3012 + int reglist8 = 0;
3013 +
3014 + /* Naked functions do not have a prologue. */
3015 + if (IS_NAKED (avr32_current_func_type ()))
3016 + return;
3017 +
3018 + saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
3019 +
3020 + if (saved_reg_mask)
3021 + {
3022 + /* Must push used registers. */
3023 +
3024 + /* Should we use POPM or LDM? */
3025 + int usePUSHM = TRUE;
3026 + reglist8 = 0;
3027 + if (((saved_reg_mask & (1 << 0)) ||
3028 + (saved_reg_mask & (1 << 1)) ||
3029 + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
3030 + {
3031 + /* One of R0-R3 should at least be pushed. */
3032 + if (((saved_reg_mask & (1 << 0)) &&
3033 + (saved_reg_mask & (1 << 1)) &&
3034 + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
3035 + {
3036 + /* All should be pushed. */
3037 + reglist8 |= 0x01;
3038 + }
3039 + else
3040 + {
3041 + usePUSHM = FALSE;
3042 + }
3043 + }
3044 +
3045 + if (((saved_reg_mask & (1 << 4)) ||
3046 + (saved_reg_mask & (1 << 5)) ||
3047 + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
3048 + {
3049 + /* One of R4-R7 should at least be pushed */
3050 + if (((saved_reg_mask & (1 << 4)) &&
3051 + (saved_reg_mask & (1 << 5)) &&
3052 + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
3053 + {
3054 + if (usePUSHM)
3055 + /* All should be pushed */
3056 + reglist8 |= 0x02;
3057 + }
3058 + else
3059 + {
3060 + usePUSHM = FALSE;
3061 + }
3062 + }
3063 +
3064 + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
3065 + {
3066 + /* One of R8-R9 should at least be pushed. */
3067 + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
3068 + {
3069 + if (usePUSHM)
3070 + /* All should be pushed. */
3071 + reglist8 |= 0x04;
3072 + }
3073 + else
3074 + {
3075 + usePUSHM = FALSE;
3076 + }
3077 + }
3078 +
3079 + if (saved_reg_mask & (1 << 10))
3080 + reglist8 |= 0x08;
3081 +
3082 + if (saved_reg_mask & (1 << 11))
3083 + reglist8 |= 0x10;
3084 +
3085 + if (saved_reg_mask & (1 << 12))
3086 + reglist8 |= 0x20;
3087 +
3088 + if ((saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
3089 + && !IS_FLASHVAULT (avr32_current_func_type ()))
3090 + {
3091 + /* Push LR */
3092 + reglist8 |= 0x40;
3093 + }
3094 +
3095 + if (usePUSHM)
3096 + {
3097 + insn = emit_multi_reg_push (reglist8, TRUE);
3098 + }
3099 + else
3100 + {
3101 + insn = emit_multi_reg_push (saved_reg_mask, FALSE);
3102 + }
3103 + RTX_FRAME_RELATED_P (insn) = 1;
3104 +
3105 + /* Prevent this instruction from being scheduled after any other
3106 + instructions. */
3107 + emit_insn (gen_blockage ());
3108 + }
3109 +
3110 + /* Set frame pointer */
3111 + if (frame_pointer_needed)
3112 + {
3113 + insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3114 + RTX_FRAME_RELATED_P (insn) = 1;
3115 + }
3116 +
3117 + if (get_frame_size () > 0)
3118 + {
3119 + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks21"))
3120 + {
3121 + insn = emit_insn (gen_rtx_SET (SImode,
3122 + stack_pointer_rtx,
3123 + gen_rtx_PLUS (SImode,
3124 + stack_pointer_rtx,
3125 + gen_rtx_CONST_INT
3126 + (SImode,
3127 + -get_frame_size
3128 + ()))));
3129 + RTX_FRAME_RELATED_P (insn) = 1;
3130 + }
3131 + else
3132 + {
3133 + /* Immediate is larger than k21 We must either check if we can use
3134 + one of the pushed reegisters as temporary storage or we must
3135 + make us a temp register by pushing a register to the stack. */
3136 + rtx temp_reg, const_pool_entry, insn;
3137 + if (saved_reg_mask)
3138 + {
3139 + temp_reg =
3140 + gen_rtx_REG (SImode,
3141 + INTERNAL_REGNUM (avr32_get_saved_reg
3142 + (saved_reg_mask)));
3143 + }
3144 + else
3145 + {
3146 + temp_reg = gen_rtx_REG (SImode, INTERNAL_REGNUM (7));
3147 + emit_move_insn (gen_rtx_MEM
3148 + (SImode,
3149 + gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)),
3150 + temp_reg);
3151 + }
3152 +
3153 + const_pool_entry =
3154 + force_const_mem (SImode,
3155 + gen_rtx_CONST_INT (SImode, get_frame_size ()));
3156 + emit_move_insn (temp_reg, const_pool_entry);
3157 +
3158 + insn = emit_insn (gen_rtx_SET (SImode,
3159 + stack_pointer_rtx,
3160 + gen_rtx_MINUS (SImode,
3161 + stack_pointer_rtx,
3162 + temp_reg)));
3163 +
3164 + dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
3165 + gen_rtx_PLUS (SImode, stack_pointer_rtx,
3166 + GEN_INT (-get_frame_size ())));
3167 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3168 + dwarf, REG_NOTES (insn));
3169 + RTX_FRAME_RELATED_P (insn) = 1;
3170 +
3171 + if (!saved_reg_mask)
3172 + {
3173 + insn =
3174 + emit_move_insn (temp_reg,
3175 + gen_rtx_MEM (SImode,
3176 + gen_rtx_POST_INC (SImode,
3177 + gen_rtx_REG
3178 + (SImode,
3179 + 13))));
3180 + }
3181 +
3182 + /* Mark the temp register as dead */
3183 + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg,
3184 + REG_NOTES (insn));
3185 +
3186 +
3187 + }
3188 +
3189 + /* Prevent the the stack adjustment to be scheduled after any
3190 + instructions using the frame pointer. */
3191 + emit_insn (gen_blockage ());
3192 + }
3193 +
3194 + /* Load GOT */
3195 + if (flag_pic)
3196 + {
3197 + avr32_load_pic_register ();
3198 +
3199 + /* gcc does not know that load or call instructions might use the pic
3200 + register so it might schedule these instructions before the loading
3201 + of the pic register. To avoid this emit a barrier for now. TODO!
3202 + Find out a better way to let gcc know which instructions might use
3203 + the pic register. */
3204 + emit_insn (gen_blockage ());
3205 + }
3206 + return;
3207 +}
3208 +
3209 +
3210 +void
3211 +avr32_set_return_address (rtx source, rtx scratch)
3212 +{
3213 + rtx addr;
3214 + unsigned long saved_regs;
3215 +
3216 + saved_regs = avr32_compute_save_reg_mask (TRUE);
3217 +
3218 + if (!(saved_regs & (1 << ASM_REGNUM (LR_REGNUM))))
3219 + emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
3220 + else
3221 + {
3222 + if (frame_pointer_needed)
3223 + addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM);
3224 + else
3225 + if (avr32_const_ok_for_constraint_p (get_frame_size (), 'K', "Ks16"))
3226 + {
3227 + addr = plus_constant (stack_pointer_rtx, get_frame_size ());
3228 + }
3229 + else
3230 + {
3231 + emit_insn (gen_movsi (scratch, GEN_INT (get_frame_size ())));
3232 + addr = scratch;
3233 + }
3234 + emit_move_insn (gen_rtx_MEM (Pmode, addr), source);
3235 + }
3236 +}
3237 +
3238 +
3239 +/* Return the length of INSN. LENGTH is the initial length computed by
3240 + attributes in the machine-description file. */
3241 +int
3242 +avr32_adjust_insn_length (rtx insn ATTRIBUTE_UNUSED,
3243 + int length ATTRIBUTE_UNUSED)
3244 +{
3245 + return length;
3246 +}
3247 +
3248 +
3249 +void
3250 +avr32_output_return_instruction (int single_ret_inst ATTRIBUTE_UNUSED,
3251 + int iscond ATTRIBUTE_UNUSED,
3252 + rtx cond ATTRIBUTE_UNUSED, rtx r12_imm)
3253 +{
3254 +
3255 + unsigned long saved_reg_mask;
3256 + int insert_ret = TRUE;
3257 + int reglist8 = 0;
3258 + int stack_adjustment = get_frame_size ();
3259 + unsigned int func_type = avr32_current_func_type ();
3260 + FILE *f = asm_out_file;
3261 +
3262 + /* Naked functions does not have an epilogue */
3263 + if (IS_NAKED (func_type))
3264 + return;
3265 +
3266 + saved_reg_mask = avr32_compute_save_reg_mask (FALSE);
3267 +
3268 + /* Reset frame pointer */
3269 + if (stack_adjustment > 0)
3270 + {
3271 + if (avr32_const_ok_for_constraint_p (stack_adjustment, 'I', "Is21"))
3272 + {
3273 + fprintf (f, "\tsub\tsp, %i # Reset Frame Pointer\n",
3274 + -stack_adjustment);
3275 + }
3276 + else
3277 + {
3278 + /* TODO! Is it safe to use r8 as scratch?? */
3279 + fprintf (f, "\tmov\tr8, lo(%i) # Reset Frame Pointer\n",
3280 + -stack_adjustment);
3281 + fprintf (f, "\torh\tr8, hi(%i) # Reset Frame Pointer\n",
3282 + -stack_adjustment);
3283 + fprintf (f, "\tadd\tsp, r8 # Reset Frame Pointer\n");
3284 + }
3285 + }
3286 +
3287 + if (saved_reg_mask)
3288 + {
3289 + /* Must pop used registers */
3290 +
3291 + /* Should we use POPM or LDM? */
3292 + int usePOPM = TRUE;
3293 + if (((saved_reg_mask & (1 << 0)) ||
3294 + (saved_reg_mask & (1 << 1)) ||
3295 + (saved_reg_mask & (1 << 2)) || (saved_reg_mask & (1 << 3))))
3296 + {
3297 + /* One of R0-R3 should at least be popped */
3298 + if (((saved_reg_mask & (1 << 0)) &&
3299 + (saved_reg_mask & (1 << 1)) &&
3300 + (saved_reg_mask & (1 << 2)) && (saved_reg_mask & (1 << 3))))
3301 + {
3302 + /* All should be popped */
3303 + reglist8 |= 0x01;
3304 + }
3305 + else
3306 + {
3307 + usePOPM = FALSE;
3308 + }
3309 + }
3310 +
3311 + if (((saved_reg_mask & (1 << 4)) ||
3312 + (saved_reg_mask & (1 << 5)) ||
3313 + (saved_reg_mask & (1 << 6)) || (saved_reg_mask & (1 << 7))))
3314 + {
3315 + /* One of R0-R3 should at least be popped */
3316 + if (((saved_reg_mask & (1 << 4)) &&
3317 + (saved_reg_mask & (1 << 5)) &&
3318 + (saved_reg_mask & (1 << 6)) && (saved_reg_mask & (1 << 7))))
3319 + {
3320 + if (usePOPM)
3321 + /* All should be popped */
3322 + reglist8 |= 0x02;
3323 + }
3324 + else
3325 + {
3326 + usePOPM = FALSE;
3327 + }
3328 + }
3329 +
3330 + if (((saved_reg_mask & (1 << 8)) || (saved_reg_mask & (1 << 9))))
3331 + {
3332 + /* One of R8-R9 should at least be pushed */
3333 + if (((saved_reg_mask & (1 << 8)) && (saved_reg_mask & (1 << 9))))
3334 + {
3335 + if (usePOPM)
3336 + /* All should be pushed */
3337 + reglist8 |= 0x04;
3338 + }
3339 + else
3340 + {
3341 + usePOPM = FALSE;
3342 + }
3343 + }
3344 +
3345 + if (saved_reg_mask & (1 << 10))
3346 + reglist8 |= 0x08;
3347 +
3348 + if (saved_reg_mask & (1 << 11))
3349 + reglist8 |= 0x10;
3350 +
3351 + if (saved_reg_mask & (1 << 12))
3352 + reglist8 |= 0x20;
3353 +
3354 + if (saved_reg_mask & (1 << ASM_REGNUM (LR_REGNUM)))
3355 + /* Pop LR */
3356 + reglist8 |= 0x40;
3357 +
3358 + if ((saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
3359 + && !IS_FLASHVAULT_IMPL (func_type))
3360 + /* Pop LR into PC. */
3361 + reglist8 |= 0x80;
3362 +
3363 + if (usePOPM)
3364 + {
3365 + char reglist[64]; /* 64 bytes should be enough... */
3366 + avr32_make_reglist8 (reglist8, (char *) reglist);
3367 +
3368 + if (reglist8 & 0x80)
3369 + /* This instruction is also a return */
3370 + insert_ret = FALSE;
3371 +
3372 + if (r12_imm && !insert_ret)
3373 + fprintf (f, "\tpopm\t%s, r12=%li\n", reglist, INTVAL (r12_imm));
3374 + else
3375 + fprintf (f, "\tpopm\t%s\n", reglist);
3376 +
3377 + }
3378 + else
3379 + {
3380 + char reglist[64]; /* 64 bytes should be enough... */
3381 + avr32_make_reglist16 (saved_reg_mask, (char *) reglist);
3382 + if (saved_reg_mask & (1 << ASM_REGNUM (PC_REGNUM)))
3383 + /* This instruction is also a return */
3384 + insert_ret = FALSE;
3385 +
3386 + if (r12_imm && !insert_ret)
3387 + fprintf (f, "\tldm\tsp++, %s, r12=%li\n", reglist,
3388 + INTVAL (r12_imm));
3389 + else
3390 + fprintf (f, "\tldm\tsp++, %s\n", reglist);
3391 +
3392 + }
3393 +
3394 + }
3395 +
3396 + /* Stack adjustment for exception handler. */
3397 + if (crtl->calls_eh_return)
3398 + fprintf (f, "\tadd\tsp, r%d\n", ASM_REGNUM (EH_RETURN_STACKADJ_REGNO));
3399 +
3400 +
3401 + if (IS_INTERRUPT (func_type))
3402 + {
3403 + fprintf (f, "\trete\n");
3404 + }
3405 + else if (IS_FLASHVAULT (func_type))
3406 + {
3407 + /* Normal return from Secure System call, increment SS_RAR before
3408 + returning. Use R8 as scratch. */
3409 + fprintf (f,
3410 + "\t# Normal return from sscall.\n"
3411 + "\t# Increment SS_RAR before returning.\n"
3412 + "\t# Use R8 as scratch.\n"
3413 + "\tmfsr\tr8, 440\n"
3414 + "\tsub\tr8, -2\n"
3415 + "\tmtsr\t440, r8\n"
3416 + "\tretss\n");
3417 + }
3418 + else if (insert_ret)
3419 + {
3420 + if (r12_imm)
3421 + fprintf (f, "\tretal\t%li\n", INTVAL (r12_imm));
3422 + else
3423 + fprintf (f, "\tretal\tr12\n");
3424 + }
3425 +}
3426 +
3427 +void
3428 +avr32_make_reglist16 (int reglist16_vect, char *reglist16_string)
3429 +{
3430 + int i;
3431 + bool first_reg = true;
3432 + /* Make sure reglist16_string is empty. */
3433 + reglist16_string[0] = '\0';
3434 +
3435 + for (i = 0; i < 16; ++i)
3436 + {
3437 + if (reglist16_vect & (1 << i))
3438 + {
3439 + first_reg == true ? first_reg = false : strcat(reglist16_string,", ");
3440 + strcat (reglist16_string, reg_names[INTERNAL_REGNUM (i)]);
3441 + }
3442 + }
3443 +}
3444 +
3445 +int
3446 +avr32_convert_to_reglist16 (int reglist8_vect)
3447 +{
3448 + int reglist16_vect = 0;
3449 + if (reglist8_vect & 0x1)
3450 + reglist16_vect |= 0xF;
3451 + if (reglist8_vect & 0x2)
3452 + reglist16_vect |= 0xF0;
3453 + if (reglist8_vect & 0x4)
3454 + reglist16_vect |= 0x300;
3455 + if (reglist8_vect & 0x8)
3456 + reglist16_vect |= 0x400;
3457 + if (reglist8_vect & 0x10)
3458 + reglist16_vect |= 0x800;
3459 + if (reglist8_vect & 0x20)
3460 + reglist16_vect |= 0x1000;
3461 + if (reglist8_vect & 0x40)
3462 + reglist16_vect |= 0x4000;
3463 + if (reglist8_vect & 0x80)
3464 + reglist16_vect |= 0x8000;
3465 +
3466 + return reglist16_vect;
3467 +}
3468 +
3469 +void
3470 +avr32_make_reglist8 (int reglist8_vect, char *reglist8_string)
3471 +{
3472 + /* Make sure reglist8_string is empty. */
3473 + reglist8_string[0] = '\0';
3474 +
3475 + if (reglist8_vect & 0x1)
3476 + strcpy (reglist8_string, "r0-r3");
3477 + if (reglist8_vect & 0x2)
3478 + strlen (reglist8_string) ? strcat (reglist8_string, ", r4-r7") :
3479 + strcpy (reglist8_string, "r4-r7");
3480 + if (reglist8_vect & 0x4)
3481 + strlen (reglist8_string) ? strcat (reglist8_string, ", r8-r9") :
3482 + strcpy (reglist8_string, "r8-r9");
3483 + if (reglist8_vect & 0x8)
3484 + strlen (reglist8_string) ? strcat (reglist8_string, ", r10") :
3485 + strcpy (reglist8_string, "r10");
3486 + if (reglist8_vect & 0x10)
3487 + strlen (reglist8_string) ? strcat (reglist8_string, ", r11") :
3488 + strcpy (reglist8_string, "r11");
3489 + if (reglist8_vect & 0x20)
3490 + strlen (reglist8_string) ? strcat (reglist8_string, ", r12") :
3491 + strcpy (reglist8_string, "r12");
3492 + if (reglist8_vect & 0x40)
3493 + strlen (reglist8_string) ? strcat (reglist8_string, ", lr") :
3494 + strcpy (reglist8_string, "lr");
3495 + if (reglist8_vect & 0x80)
3496 + strlen (reglist8_string) ? strcat (reglist8_string, ", pc") :
3497 + strcpy (reglist8_string, "pc");
3498 +}
3499 +
3500 +
3501 +int
3502 +avr32_eh_return_data_regno (int n)
3503 +{
3504 + if (n >= 0 && n <= 3)
3505 + return 8 + n;
3506 + else
3507 + return INVALID_REGNUM;
3508 +}
3509 +
3510 +
3511 +/* Compute the distance from register FROM to register TO.
3512 + These can be the arg pointer, the frame pointer or
3513 + the stack pointer.
3514 + Typical stack layout looks like this:
3515 +
3516 + old stack pointer -> | |
3517 + ----
3518 + | | \
3519 + | | saved arguments for
3520 + | | vararg functions
3521 + arg_pointer -> | | /
3522 + --
3523 + | | \
3524 + | | call saved
3525 + | | registers
3526 + | | /
3527 + frame ptr -> --
3528 + | | \
3529 + | | local
3530 + | | variables
3531 + stack ptr --> | | /
3532 + --
3533 + | | \
3534 + | | outgoing
3535 + | | arguments
3536 + | | /
3537 + --
3538 +
3539 + For a given funciton some or all of these stack compomnents
3540 + may not be needed, giving rise to the possibility of
3541 + eliminating some of the registers.
3542 +
3543 + The values returned by this function must reflect the behaviour
3544 + of avr32_expand_prologue() and avr32_compute_save_reg_mask().
3545 +
3546 + The sign of the number returned reflects the direction of stack
3547 + growth, so the values are positive for all eliminations except
3548 + from the soft frame pointer to the hard frame pointer. */
3549 +int
3550 +avr32_initial_elimination_offset (int from, int to)
3551 +{
3552 + int i;
3553 + int call_saved_regs = 0;
3554 + unsigned long saved_reg_mask;
3555 + unsigned int local_vars = get_frame_size ();
3556 +
3557 + saved_reg_mask = avr32_compute_save_reg_mask (TRUE);
3558 +
3559 + for (i = 0; i < 16; ++i)
3560 + {
3561 + if (saved_reg_mask & (1 << i))
3562 + call_saved_regs += 4;
3563 + }
3564 +
3565 + switch (from)
3566 + {
3567 + case ARG_POINTER_REGNUM:
3568 + switch (to)
3569 + {
3570 + case STACK_POINTER_REGNUM:
<