diff -Nur gcc-4.0.2/config.sub gcc-4.0.2-avr32/config.sub --- gcc-4.0.2/config.sub 2005-04-25 12:36:56.000000000 +0200 +++ gcc-4.0.2-avr32/config.sub 2006-06-20 13:09:22.000000000 +0200 @@ -230,7 +230,7 @@ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ | am33_2.0 \ - | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \ + | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \ | bfin \ | c4x | clipper \ | d10v | d30v | dlx | dsp16xx \ @@ -299,7 +299,7 @@ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ - | avr-* \ + | avr-* | avr32-* \ | bfin-* | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \ | clipper-* | craynv-* | cydra-* \ diff -Nur gcc-4.0.2/configure.in gcc-4.0.2-avr32/configure.in --- gcc-4.0.2/configure.in 2005-09-13 09:01:28.000000000 +0200 +++ gcc-4.0.2-avr32/configure.in 2006-06-20 13:09:22.000000000 +0200 @@ -493,6 +493,9 @@ arm-*-riscix*) noconfigdirs="$noconfigdirs ld target-libgloss ${libgcj}" ;; + avr32-*-*) + noconfigdirs="$noconfigdirs target-libiberty target-libmudflap target-libffi ${libgcj}" + ;; avr-*-*) noconfigdirs="$noconfigdirs target-libiberty target-libstdc++-v3 ${libgcj}" ;; diff -Nur gcc-4.0.2/gcc/builtins.c gcc-4.0.2-avr32/gcc/builtins.c --- gcc-4.0.2/gcc/builtins.c 2005-08-28 13:08:55.000000000 +0200 +++ gcc-4.0.2-avr32/gcc/builtins.c 2006-06-20 13:09:22.000000000 +0200 @@ -8387,7 +8387,7 @@ do { - code = va_arg (ap, enum tree_code); + code = va_arg (ap, int); switch (code) { case 0: diff -Nur gcc-4.0.2/gcc/calls.c gcc-4.0.2-avr32/gcc/calls.c --- gcc-4.0.2/gcc/calls.c 2005-07-25 18:36:33.000000000 +0200 +++ gcc-4.0.2-avr32/gcc/calls.c 2006-06-20 13:09:22.000000000 +0200 @@ -3353,7 +3353,7 @@ for (; count < nargs; count++) { rtx val = va_arg (p, rtx); - enum machine_mode mode = va_arg (p, enum machine_mode); + enum machine_mode mode = va_arg (p, int); /* We cannot convert the arg value to the mode the library wants here; must do it earlier where we know the signedness of the arg. */ diff -Nur gcc-4.0.2/gcc/config/avr32/avr32.c gcc-4.0.2-avr32/gcc/config/avr32/avr32.c --- gcc-4.0.2/gcc/config/avr32/avr32.c 1970-01-01 01:00:00.000000000 +0100 +++ gcc-4.0.2-avr32/gcc/config/avr32/avr32.c 2006-06-20 14:18:12.000000000 +0200 @@ -0,0 +1,6672 @@ +/* + Target hooks and helper functions for AVR32. + Copyright 2003-2006 Atmel Corporation. + + Written by Ronny Pedersen, Atmel Norway, + Initial porting by Anders Ådland. + + This file is part of GCC. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "rtl.h" +#include "tree.h" +#include "obstack.h" +#include "regs.h" +#include "hard-reg-set.h" +#include "real.h" +#include "insn-config.h" +#include "conditions.h" +#include "output.h" +#include "insn-attr.h" +#include "flags.h" +#include "reload.h" +#include "function.h" +#include "expr.h" +#include "optabs.h" +#include "toplev.h" +#include "recog.h" +#include "ggc.h" +#include "except.h" +#include "c-pragma.h" +#include "integrate.h" +#include "tm_p.h" +#include "langhooks.h" + +#include "target.h" +#include "target-def.h" + +#include + +/* Forward definitions of types. */ +typedef struct minipool_node Mnode; +typedef struct minipool_fixup Mfix; + +/* Obstack for minipool constant handling. */ +static struct obstack minipool_obstack; +static char * minipool_startobj; +static rtx minipool_vector_label; + +/* True if we are currently building a constant table. */ +int making_const_table; + +/* Some forward function declarations */ +static unsigned long avr32_isr_value PARAMS ((tree)); +static unsigned long avr32_compute_func_type PARAMS ((void)); +static tree avr32_handle_isr_attribute PARAMS ((tree *, tree, tree, int, bool *)); +static tree avr32_handle_acall_attribute PARAMS ((tree *, tree, tree, int, bool *)); +static tree avr32_handle_fndecl_attribute (tree *node, tree name, tree args, + int flags, bool *no_add_attrs); +static void avr32_reorg (void); +rtx get_next_insn_cond(rtx cur_insn); +int set_next_insn_cond(rtx cur_insn, rtx cond); +bool avr32_return_in_msb(tree type); +bool avr32_vector_mode_supported (enum machine_mode mode); +static void avr32_init_libfuncs (void); +void avr32_load_pic_register (void); +void avr32_override_options (void); + + +static void +avr32_add_gc_roots (void) +{ + gcc_obstack_init(&minipool_obstack); + minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0); +} + + +/* List of all known AVR32 cpus */ +static const struct cpu_type_s avr32_cpu_types[] = { + /* name, cpu_type, architecture type, macro */ + { "none", CPU_TYPE_AVR32_NONE, ARCH_TYPE_AVR32_AP, "__AVR32__"}, + { "ap7000", CPU_TYPE_AVR32_AP7000, ARCH_TYPE_AVR32_AP, "__AVR32_AP7000__"}, + { NULL, 0, 0, NULL } +}; + +/* List of all known AVR32 architectures */ +static const struct arch_type_s avr32_arch_types[] = { + /* name, architecture type, microarchitecture type, feature flags, macro */ + { "avr32_ap", ARCH_TYPE_AVR32_AP, UARCH_TYPE_AVR32B, FLAG_AVR32_HAS_DSP| + FLAG_AVR32_HAS_SIMD|FLAG_AVR32_HAS_UNALIGNED_WORD|FLAG_AVR32_HAS_BRANCH_PRED, "__AVR32_AP__"}, + { NULL, 0, 0, 0, NULL } +}; + +/* Default arch name */ +const char *avr32_arch_name = "avr32_ap"; +const char *avr32_cpu_name = "none"; + +const struct cpu_type_s *avr32_cpu; +const struct arch_type_s *avr32_arch; + + +/* Override command line options */ +void +avr32_override_options (void){ + + const struct cpu_type_s *cpu; + const struct arch_type_s *arch; + + /* Check if cpu type is set. */ + for (cpu = avr32_cpu_types; cpu->name; cpu++) + if (strcmp (cpu->name, avr32_cpu_name) == 0) + break; + + avr32_cpu = cpu; + + if (!cpu->name) + { + fprintf (stderr, "Unknown CPU `%s' specified\nKnown CPU names:\n", + avr32_cpu_name); + for (cpu = avr32_cpu_types; cpu->name; cpu++) + fprintf (stderr,"\t%s\n", cpu->name); + avr32_cpu = &avr32_cpu_types[CPU_TYPE_AVR32_NONE]; + } + + avr32_arch = &avr32_arch_types[avr32_cpu->arch_type]; + + /* If cpu was set to "none" then check if arch was set. */ + if ( strcmp (avr32_cpu->name, "none") == 0 ){ + /* Check if cpu type is set. */ + for (arch = avr32_arch_types; arch->name; arch++) + if (strcmp (arch->name, avr32_arch_name) == 0) + break; + + avr32_arch = arch; + + if (!arch->name) + { + fprintf (stderr, "Unknown arch `%s' specified\nKnown arch names:\n", + avr32_arch_name); + for (arch = avr32_arch_types; arch->name; arch++) + fprintf (stderr,"\t%s\n", arch->name); + avr32_arch = &avr32_arch_types[ARCH_TYPE_AVR32_AP]; + } + } + + /* If optimization level is two or greater, then + align start of loops to a word boundary since + this will allow folding the first insn of the loop. + Do this only for targets supporting branch prediction. + */ + if ( optimize >= 2 + && TARGET_BRANCH_PRED ) + align_loops = 2; + + if (AVR32_ALWAYS_PIC) + flag_pic = 1; + + if (target_flags & AVR32_FLAG_NO_PIC) + flag_pic = 0; + + avr32_add_gc_roots(); +} + + +/* +If defined, a function that outputs the assembler code for entry to a +function. The prologue is responsible for setting up the stack frame, +initializing the frame pointer register, saving registers that must be +saved, and allocating size additional bytes of storage for the +local variables. size is an integer. file is a stdio +stream to which the assembler code should be output. + +The label for the beginning of the function need not be output by this +macro. That has already been done when the macro is run. + +To determine which registers to save, the macro can refer to the array +regs_ever_live: element r is nonzero if hard register +r is used anywhere within the function. This implies the function +prologue should save register r, provided it is not one of the +call-used registers. (TARGET_ASM_FUNCTION_EPILOGUE must likewise use +regs_ever_live.) + +On machines that have ``register windows'', the function entry code does +not save on the stack the registers that are in the windows, even if +they are supposed to be preserved by function calls; instead it takes +appropriate steps to ``push'' the register stack, if any non-call-used +registers are used in the function. + +On machines where functions may or may not have frame-pointers, the +function entry code must vary accordingly; it must set up the frame +pointer if one is wanted, and not otherwise. To determine whether a +frame pointer is in wanted, the macro can refer to the variable +frame_pointer_needed. The variable's value will be 1 at run +time in a function that needs a frame pointer. (see Elimination). + +The function entry code is responsible for allocating any stack space +required for the function. This stack space consists of the regions +listed below. In most cases, these regions are allocated in the +order listed, with the last listed region closest to the top of the +stack (the lowest address if STACK_GROWS_DOWNWARD is defined, and +the highest address if it is not defined). You can use a different order +for a machine if doing so is more convenient or required for +compatibility reasons. Except in cases where required by standard +or by a debugger, there is no reason why the stack layout used by GCC +need agree with that used by other compilers for a machine. +*/ + +#undef TARGET_ASM_FUNCTION_PROLOGUE +#define TARGET_ASM_FUNCTION_PROLOGUE avr32_target_asm_function_prologue + + +#undef TARGET_DEFAULT_SHORT_ENUMS +#define TARGET_DEFAULT_SHORT_ENUMS hook_bool_tree_true + +#undef TARGET_PROMOTE_FUNCTION_ARGS +#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true + +#undef TARGET_PROMOTE_FUNCTION_RETURN +#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true + +#undef TARGET_PROMOTE_PROTOTYPES +#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true + +#undef TARGET_MUST_PASS_IN_STACK +#define TARGET_MUST_PASS_IN_STACK avr32_must_pass_in_stack + +#undef TARGET_PASS_BY_REFERENCE +#define TARGET_PASS_BY_REFERENCE avr32_pass_by_reference + +#undef TARGET_STRICT_ARGUMENT_NAMING +#define TARGET_STRICT_ARGUMENT_NAMING avr32_strict_argument_naming + +#undef TARGET_VECTOR_MODE_SUPPORTED_P +#define TARGET_VECTOR_MODE_SUPPORTED_P avr32_vector_mode_supported + +#undef TARGET_RETURN_IN_MEMORY +#define TARGET_RETURN_IN_MEMORY avr32_return_in_memory + +#undef TARGET_RETURN_IN_MSB +#define TARGET_RETURN_IN_MSB avr32_return_in_msb + +#undef TARGET_ARG_PARTIAL_BYTES +#define TARGET_ARG_PARTIAL_BYTES avr32_arg_partial_bytes + +#undef TARGET_STRIP_NAME_ENCODING +#define TARGET_STRIP_NAME_ENCODING avr32_strip_name_encoding + +#define streq(string1, string2) (strcmp (string1, string2) == 0) + +#undef TARGET_ATTRIBUTE_TABLE +#define TARGET_ATTRIBUTE_TABLE avr32_attribute_table + +#undef TARGET_COMP_TYPE_ATTRIBUTES +#define TARGET_COMP_TYPE_ATTRIBUTES avr32_comp_type_attributes + + +#undef TARGET_RTX_COSTS +#define TARGET_RTX_COSTS avr32_rtx_costs + +#undef TARGET_CANNOT_FORCE_CONST_MEM +#define TARGET_CANNOT_FORCE_CONST_MEM avr32_cannot_force_const_mem + +#undef TARGET_ASM_INTEGER +#define TARGET_ASM_INTEGER avr32_assemble_integer + +/* + * Switches to the appropriate section for output of constant pool + * entry x in mode. You can assume that x is some kind of constant in + * RTL. The argument mode is redundant except in the case of a + * const_int rtx. Select the section by calling readonly_data_ section + * or one of the alternatives for other sections. align is the + * constant alignment in bits. + * + * The default version of this function takes care of putting symbolic + * constants in flag_ pic mode in data_section and everything else in + * readonly_data_section. + */ +#undef TARGET_ASM_SELECT_RTX_SECTION +#define TARGET_ASM_SELECT_RTX_SECTION avr32_select_rtx_section + + +/* + * If non-null, this hook performs a target-specific pass over the + * instruction stream. The compiler will run it at all optimization + * levels, just before the point at which it normally does + * delayed-branch scheduling. + * + * The exact purpose of the hook varies from target to target. Some + * use it to do transformations that are necessary for correctness, + * such as laying out in-function constant pools or avoiding hardware + * hazards. Others use it as an opportunity to do some + * machine-dependent optimizations. + * + * You need not implement the hook if it has nothing to do. The + * default definition is null. + */ +#undef TARGET_MACHINE_DEPENDENT_REORG +#define TARGET_MACHINE_DEPENDENT_REORG avr32_reorg + +/* Target hook for assembling integer objects. + Need to handle integer vectors */ +static bool +avr32_assemble_integer (rtx x, unsigned int size, int aligned_p) +{ + if (avr32_vector_mode_supported (GET_MODE (x))) + { + int i, units; + + if (GET_CODE (x) != CONST_VECTOR) + abort (); + + units = CONST_VECTOR_NUNITS (x); + + switch (GET_MODE (x)) + { + case V2HImode: size = 2; break; + case V4QImode: size = 1; break; + default: + abort (); + } + + for (i = 0; i < units; i++) + { + rtx elt; + + elt = CONST_VECTOR_ELT (x, i); + assemble_integer + (elt, size, i == 0 ? 32 : size * BITS_PER_UNIT, 1); + } + + return true; + } + + return default_assemble_integer (x, size, aligned_p); +} + +/* + * This target hook describes the relative costs of RTL expressions. + * + * The cost may depend on the precise form of the expression, which is + * available for examination in x, and the rtx code of the expression + * in which it is contained, found in outer_code. code is the + * expression code--redundant, since it can be obtained with GET_CODE + * (x). + * + * In implementing this hook, you can use the construct COSTS_N_INSNS + * (n) to specify a cost equal to n fast instructions. + * + * On entry to the hook, *total contains a default estimate for the + * cost of the expression. The hook should modify this value as + * necessary. Traditionally, the default costs are COSTS_N_INSNS (5) + * for multiplications, COSTS_N_INSNS (7) for division and modulus + * operations, and COSTS_N_INSNS (1) for all other operations. + * + * When optimizing for code size, i.e. when optimize_size is non-zero, + * this target hook should be used to estimate the relative size cost + * of an expression, again relative to COSTS_N_INSNS. + * + * The hook returns true when all subexpressions of x have been + * processed, and false when rtx_cost should recurse. + */ + +/* Worker routine for avr32_rtx_costs. */ +static inline int +avr32_rtx_costs_1 (rtx x, enum rtx_code code ATTRIBUTE_UNUSED, enum rtx_code outer ATTRIBUTE_UNUSED) +{ + enum machine_mode mode = GET_MODE (x); + + switch (GET_CODE(x)) + { + case MEM: + /* Memory costs quite a lot for the first word, but subsequent words + load at the equivalent of a single insn each. */ + if ( GET_MODE_SIZE(mode) > UNITS_PER_WORD ) + return COSTS_N_INSNS(2 + (GET_MODE_SIZE (mode) / UNITS_PER_WORD)); + + return COSTS_N_INSNS(3); + case SYMBOL_REF: + case CONST: + /* These are valid for the pseudo insns: lda.w and call which operates + on direct addresses. We assume that the cost of a lda.w is the same + as the cost of a ld.w insn.*/ + return (outer == SET) ? COSTS_N_INSNS(3) : COSTS_N_INSNS(1); + case DIV: + case MOD: + case UDIV: + case UMOD: + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS(16); + + case ROTATE: + case ROTATERT: + if (mode == TImode) + return COSTS_N_INSNS(100); + + if (mode == DImode) + return COSTS_N_INSNS(10); + return COSTS_N_INSNS(4); + case ASHIFT: case LSHIFTRT: case ASHIFTRT: + case NOT: + if (mode == TImode) + return COSTS_N_INSNS(10); + + if (mode == DImode) + return COSTS_N_INSNS(4); + return COSTS_N_INSNS(1); + case PLUS: + case MINUS: + case NEG: + case COMPARE: + case ABS: + if (GET_MODE_CLASS (mode) == MODE_FLOAT) + return COSTS_N_INSNS(100); + + if (mode == TImode) + return COSTS_N_INSNS(50); + + if (mode == DImode) + return COSTS_N_INSNS(2); + return COSTS_N_INSNS(1); + + case MULT: + { + if (GET_MODE_CLASS (mode) == MODE_FLOAT) + return COSTS_N_INSNS(300); + + if (mode == TImode) + return COSTS_N_INSNS(16); + + if (mode == DImode) + return COSTS_N_INSNS(4); + + if (mode == HImode) + return COSTS_N_INSNS(2); + + return COSTS_N_INSNS(3); + } + case IF_THEN_ELSE: + if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC) + return COSTS_N_INSNS(4); + return COSTS_N_INSNS(1); + case SIGN_EXTEND: + case ZERO_EXTEND: + /* Sign/Zero extensions of registers cost quite much + since these instrcutions only take one register operand + which means that gcc often must insert some move instrcutions */ + if ( mode == QImode || mode == HImode ) + return (COSTS_N_INSNS(GET_CODE (XEXP (x, 0)) == MEM ? 0 : 1)); + return COSTS_N_INSNS(4); + case UNSPEC: + /* divmod operations */ + if ( XINT(x, 1) == UNSPEC_UDIVMODSI4_INTERNAL + || XINT(x, 1) == UNSPEC_DIVMODSI4_INTERNAL ){ + return optimize_size ? COSTS_N_INSNS (1) : COSTS_N_INSNS(16); + } + /* Fallthrough */ + default: + return COSTS_N_INSNS(1); + } +} + +static bool +avr32_rtx_costs (rtx x, int code, int outer_code, int *total) +{ + *total = avr32_rtx_costs_1 (x, code, outer_code); + return true; +} + + +bool avr32_cannot_force_const_mem(rtx x ATTRIBUTE_UNUSED){ + /* Do not want symbols in the constant pool when compiling + pic or if using address pseudo instructions. */ + return ( (flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS) + && avr32_find_symbol(x) != NULL_RTX ); +} + + +/* Table of machine attributes. */ +const struct attribute_spec avr32_attribute_table[] = +{ + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ + /* Interrupt Service Routines have special prologue and epilogue requirements. */ + { "isr", 0, 1, false, false, false, avr32_handle_isr_attribute }, + { "interrupt", 0, 1, false, false, false, avr32_handle_isr_attribute }, + { "acall", 0, 1, false, true, true, avr32_handle_acall_attribute }, + { "naked", 0, 0, true, false, false, avr32_handle_fndecl_attribute }, + { NULL, 0, 0, false, false, false, NULL } +}; + + +typedef struct +{ + const char *const arg; + const unsigned long return_value; +} +isr_attribute_arg; + +static const isr_attribute_arg isr_attribute_args [] = +{ + { "FULL", AVR32_FT_ISR_FULL }, + { "full", AVR32_FT_ISR_FULL }, + { "HALF", AVR32_FT_ISR_HALF }, + { "half", AVR32_FT_ISR_HALF }, + { "NONE", AVR32_FT_ISR_NONE }, + { "none", AVR32_FT_ISR_NONE }, + { "UNDEF", AVR32_FT_ISR_NONE }, + { "undef", AVR32_FT_ISR_NONE }, + { "SWI", AVR32_FT_ISR_NONE }, + { "swi", AVR32_FT_ISR_NONE }, + { NULL, AVR32_FT_ISR_NONE } +}; + +/* Returns the (interrupt) function type of the current + function, or AVR32_FT_UNKNOWN if the type cannot be determined. */ + +static unsigned long +avr32_isr_value (argument) + tree argument; +{ + const isr_attribute_arg * ptr; + const char * arg; + + /* No argument - default to ISR_NONE. */ + if (argument == NULL_TREE) + return AVR32_FT_ISR_NONE; + + /* Get the value of the argument. */ + if (TREE_VALUE (argument) == NULL_TREE + || TREE_CODE (TREE_VALUE (argument)) != STRING_CST) + return AVR32_FT_UNKNOWN; + + arg = TREE_STRING_POINTER (TREE_VALUE (argument)); + + /* Check it against the list of known arguments. */ + for (ptr = isr_attribute_args; ptr->arg != NULL; ptr ++) + if (streq (arg, ptr->arg)) + return ptr->return_value; + + /* An unrecognized interrupt type. */ + return AVR32_FT_UNKNOWN; +} + + + +/* +These hooks specify assembly directives for creating certain kinds +of integer object. The TARGET_ASM_BYTE_OP directive creates a +byte-sized object, the TARGET_ASM_ALIGNED_HI_OP one creates an +aligned two-byte object, and so on. Any of the hooks may be +NULL, indicating that no suitable directive is available. + +The compiler will print these strings at the start of a new line, +followed immediately by the object's initial value. In most cases, +the string should contain a tab, a pseudo-op, and then another tab. +*/ +#undef TARGET_ASM_BYTE_OP +#define TARGET_ASM_BYTE_OP "\t.byte\t" +#undef TARGET_ASM_ALIGNED_HI_OP +#define TARGET_ASM_ALIGNED_HI_OP "\t.align 1\n\t.short\t" +#undef TARGET_ASM_ALIGNED_SI_OP +#define TARGET_ASM_ALIGNED_SI_OP "\t.align 2\n\t.int\t" +#undef TARGET_ASM_ALIGNED_DI_OP +#define TARGET_ASM_ALIGNED_DI_OP NULL +#undef TARGET_ASM_ALIGNED_TI_OP +#define TARGET_ASM_ALIGNED_TI_OP NULL +#undef TARGET_ASM_UNALIGNED_HI_OP +#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t" +#undef TARGET_ASM_UNALIGNED_SI_OP +#define TARGET_ASM_UNALIGNED_SI_OP "\t.int\t" +#undef TARGET_ASM_UNALIGNED_DI_OP +#define TARGET_ASM_UNALIGNED_DI_OP NULL +#undef TARGET_ASM_UNALIGNED_TI_OP +#define TARGET_ASM_UNALIGNED_TI_OP NULL + +#undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE +#define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE avr32_sched_use_dfa_pipeline_interface + +#undef TARGET_ASM_OUTPUT_MI_THUNK +#define TARGET_ASM_OUTPUT_MI_THUNK avr32_output_mi_thunk + + +static void +avr32_output_mi_thunk ( FILE *file, + tree thunk ATTRIBUTE_UNUSED, + HOST_WIDE_INT delta, + HOST_WIDE_INT vcall_offset, + tree function ) +{ + int mi_delta = delta; + int this_regno = (avr32_return_in_memory (DECL_RESULT (function), TREE_TYPE(function)) + ? INTERNAL_REGNUM(11) : INTERNAL_REGNUM(12)); + + + if ( !avr32_const_ok_for_constraint_p( mi_delta, 'I', "Is21") + || vcall_offset ){ + fprintf (file, "\tpushm\tr10\n"); + } + + + if ( mi_delta != 0 ){ + if ( avr32_const_ok_for_constraint_p( mi_delta, 'I', "Is21") ){ + fprintf (file, "\tsub\t%s, -0x%x\n", reg_names[this_regno], mi_delta); + } else { + /*Immediate is larger than k21 + we must make us a temp register by pushing a register + to the stack.*/ + fprintf (file, "\tmov\tr10, lo(%x)\n", mi_delta); + fprintf (file, "\torh\tr10, hi(%x)\n", mi_delta); + fprintf (file, "\tadd\t%s, r10\n", reg_names[this_regno]); + } + } + + + if ( vcall_offset != 0 ){ + fprintf (file, "\tld.w\tr10, %s[0]\n", reg_names[this_regno]); + fprintf (file, "\tld.w\tr10, r10[%i]\n", (int)vcall_offset); + fprintf (file, "\tadd\t%s, r10\n", reg_names[this_regno]); + } + + + if ( !avr32_const_ok_for_constraint_p( mi_delta, 'I', "Is21") + || vcall_offset ){ + fprintf (file, "\tpopm\tr10\n"); + } + + if ( flag_pic ){ + /* Don't know how we should do this!!! For now we'll just use + an extended branch instruction and hope that the function + will be reached. */ + fprintf (file, "\tbral\t"); + assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0)); + fputc ('\n', file); + } else { + fprintf (file, "\tlddpc\tpc, 0f\n"); + fprintf (file, "\t.align 2\n"); + fputs ("0:\t.long\t", file); + assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0)); + fputc ('\n', file); + } +} + +/* Implements target hook vector_mode_supported. */ +bool +avr32_vector_mode_supported (enum machine_mode mode) +{ + if ((mode == V2HImode) + || (mode == V4QImode)) + return true; + + return false; +} + + +#undef TARGET_INIT_LIBFUNCS +#define TARGET_INIT_LIBFUNCS avr32_init_libfuncs + +#undef TARGET_INIT_BUILTINS +#define TARGET_INIT_BUILTINS avr32_init_builtins + +#undef TARGET_EXPAND_BUILTIN +#define TARGET_EXPAND_BUILTIN avr32_expand_builtin + +tree int_ftype_int, int_ftype_void, short_ftype_short, void_ftype_int_int, void_ftype_ptr_int; +tree void_ftype_int, void_ftype_void, int_ftype_ptr_int; +tree short_ftype_short, int_ftype_int_short, int_ftype_short_short, short_ftype_short_short; +tree int_ftype_int_int, longlong_ftype_int_short, longlong_ftype_short_short; +tree void_ftype_int_int_int_int_int, void_ftype_int_int_int; +tree longlong_ftype_int_int, void_ftype_int_int_longlong; +tree int_ftype_int_int_int, longlong_ftype_longlong_int_short; +tree longlong_ftype_longlong_short_short, int_ftype_int_short_short; + +#define def_builtin(NAME, TYPE, CODE) \ + builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, NULL_TREE) + +#define def_mbuiltin(MASK, NAME, TYPE, CODE) \ + do \ + { \ + if ((MASK)) \ + builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, NULL_TREE); \ + } \ + while (0) + +struct builtin_description +{ + const unsigned int mask; + const enum insn_code icode; + const char * const name; + const int code; + const enum rtx_code comparison; + const unsigned int flag; + const tree *ftype; +}; + +static const struct builtin_description bdesc_2arg[] = +{ +#define DSP_BUILTIN(code, builtin, ftype) \ + { 1, CODE_FOR_##code, "__builtin_" #code , \ + AVR32_BUILTIN_##builtin, 0, 0, ftype }, + + DSP_BUILTIN (mulsathh_h, MULSATHH_H, &short_ftype_short_short) + DSP_BUILTIN (mulsathh_w, MULSATHH_W, &int_ftype_short_short) + DSP_BUILTIN (mulsatrndhh_h, MULSATRNDHH_H, &short_ftype_short_short) + DSP_BUILTIN (mulsatrndwh_w, MULSATRNDWH_W, &int_ftype_int_short) + DSP_BUILTIN (mulsatwh_w, MULSATWH_W, &int_ftype_int_short) + DSP_BUILTIN (satadd_h, SATADD_H, &short_ftype_short_short) + DSP_BUILTIN (satsub_h, SATSUB_H, &short_ftype_short_short) + DSP_BUILTIN (satadd_w, SATADD_W, &int_ftype_int_int) + DSP_BUILTIN (satsub_w, SATSUB_W, &int_ftype_int_int) + DSP_BUILTIN (mulwh_d, MULWH_D, &longlong_ftype_int_short) + DSP_BUILTIN (mulnwh_d, MULNWH_D, &longlong_ftype_int_short) +}; + + +void avr32_init_builtins (void){ + unsigned int i; + const struct builtin_description * d; + tree endlink = void_list_node; + tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink); + tree longlong_endlink = tree_cons (NULL_TREE, long_long_integer_type_node, endlink); + tree short_endlink = tree_cons (NULL_TREE, short_integer_type_node, endlink); + tree void_endlink = tree_cons (NULL_TREE, void_type_node, endlink); + + + /* int func (int) */ + int_ftype_int + = build_function_type (integer_type_node, + int_endlink); + + /* short func (short) */ + short_ftype_short + = build_function_type (short_integer_type_node, + short_endlink); + + /* short func (short, short) */ + short_ftype_short_short + = build_function_type (short_integer_type_node, + tree_cons(NULL_TREE, short_integer_type_node, + short_endlink)); + + /* long long func (long long, short, short) */ + longlong_ftype_longlong_short_short + = build_function_type (long_long_integer_type_node, + tree_cons(NULL_TREE, long_long_integer_type_node, + tree_cons(NULL_TREE, short_integer_type_node, + short_endlink))); + + /* long long func (short, short) */ + longlong_ftype_short_short + = build_function_type (long_long_integer_type_node, + tree_cons(NULL_TREE, short_integer_type_node, + short_endlink)); + + /* int func (int, int) */ + int_ftype_int_int + = build_function_type (integer_type_node, + tree_cons(NULL_TREE, integer_type_node, + int_endlink)); + + /* long long func (int, int) */ + longlong_ftype_int_int + = build_function_type (long_long_integer_type_node, + tree_cons(NULL_TREE, integer_type_node, + int_endlink)); + + /* long long int func (long long, int, short) */ + longlong_ftype_longlong_int_short + = build_function_type (long_long_integer_type_node, + tree_cons(NULL_TREE, long_long_integer_type_node, + tree_cons(NULL_TREE, integer_type_node, + short_endlink))); + + /* long long int func (int, short) */ + longlong_ftype_int_short + = build_function_type (long_long_integer_type_node, + tree_cons(NULL_TREE, integer_type_node, + short_endlink)); + + /* int func (int, short, short) */ + int_ftype_int_short_short + = build_function_type (integer_type_node, + tree_cons(NULL_TREE, integer_type_node, + tree_cons(NULL_TREE, short_integer_type_node, + short_endlink))); + + /* int func (short, short) */ + int_ftype_short_short + = build_function_type (integer_type_node, + tree_cons(NULL_TREE, short_integer_type_node, + short_endlink)); + + /* int func (int, short) */ + int_ftype_int_short + = build_function_type (integer_type_node, + tree_cons(NULL_TREE, integer_type_node, + short_endlink)); + + /* void func (int, int) */ + void_ftype_int_int + = build_function_type (void_type_node, + tree_cons(NULL_TREE,integer_type_node, + int_endlink)); + + /* void func (int, int, int) */ + void_ftype_int_int_int + = build_function_type (void_type_node, + tree_cons(NULL_TREE, integer_type_node, + tree_cons(NULL_TREE, integer_type_node, + int_endlink))); + + /* void func (int, int, long long) */ + void_ftype_int_int_longlong + = build_function_type (void_type_node, + tree_cons(NULL_TREE, integer_type_node, + tree_cons(NULL_TREE, integer_type_node, + longlong_endlink))); + + /* void func (int, int, int, int, int) */ + void_ftype_int_int_int_int_int + = build_function_type (void_type_node, + tree_cons(NULL_TREE, integer_type_node, + tree_cons(NULL_TREE, integer_type_node, + tree_cons(NULL_TREE, integer_type_node, + tree_cons(NULL_TREE, integer_type_node, + int_endlink))))); + + /* void func (void *, int) */ + void_ftype_ptr_int + = build_function_type (void_type_node, + tree_cons(NULL_TREE, ptr_type_node, + int_endlink)); + + /* void func (int) */ + void_ftype_int + = build_function_type (void_type_node, int_endlink); + + /* void func (void) */ + void_ftype_void + = build_function_type (void_type_node, void_endlink); + + /* int func (void) */ + int_ftype_void + = build_function_type (integer_type_node, void_endlink); + + /* int func (void *, int) */ + int_ftype_ptr_int + = build_function_type (integer_type_node, + tree_cons(NULL_TREE, ptr_type_node, + int_endlink)); + + /* int func (int, int, int) */ + int_ftype_int_int_int + = build_function_type (integer_type_node, + tree_cons(NULL_TREE, integer_type_node, + tree_cons(NULL_TREE, integer_type_node, + int_endlink))); + + /* Initialize avr32 builtins. */ + def_builtin ("__builtin_mfsr", int_ftype_int, AVR32_BUILTIN_MFSR); + def_builtin ("__builtin_mtsr", void_ftype_int_int, AVR32_BUILTIN_MTSR); + def_builtin ("__builtin_mfdr", int_ftype_int, AVR32_BUILTIN_MFDR); + def_builtin ("__builtin_mtdr", void_ftype_int_int, AVR32_BUILTIN_MTDR); + def_builtin ("__builtin_cache", void_ftype_ptr_int, AVR32_BUILTIN_CACHE); + def_builtin ("__builtin_sync", void_ftype_int, AVR32_BUILTIN_SYNC); + def_builtin ("__builtin_tlbr", void_ftype_void, AVR32_BUILTIN_TLBR); + def_builtin ("__builtin_tlbs", void_ftype_void, AVR32_BUILTIN_TLBS); + def_builtin ("__builtin_tlbw", void_ftype_void, AVR32_BUILTIN_TLBW); + def_builtin ("__builtin_breakpoint", void_ftype_void, AVR32_BUILTIN_BREAKPOINT); + def_builtin ("__builtin_xchg", int_ftype_ptr_int, AVR32_BUILTIN_XCHG); + def_builtin ("__builtin_ldxi", int_ftype_ptr_int, AVR32_BUILTIN_LDXI); + def_builtin ("__builtin_bswap_16", short_ftype_short, AVR32_BUILTIN_BSWAP16); + def_builtin ("__builtin_bswap_32", int_ftype_int, AVR32_BUILTIN_BSWAP32); + def_builtin ("__builtin_cop", void_ftype_int_int_int_int_int, AVR32_BUILTIN_COP); + def_builtin ("__builtin_mvcr_w", int_ftype_int_int, AVR32_BUILTIN_MVCR_W); + def_builtin ("__builtin_mvrc_w", void_ftype_int_int_int, AVR32_BUILTIN_MVRC_W); + def_builtin ("__builtin_mvcr_d", longlong_ftype_int_int, AVR32_BUILTIN_MVCR_D); + def_builtin ("__builtin_mvrc_d", void_ftype_int_int_longlong, AVR32_BUILTIN_MVRC_D); + def_builtin ("__builtin_sats", int_ftype_int_int_int, AVR32_BUILTIN_SATS); + def_builtin ("__builtin_satu", int_ftype_int_int_int, AVR32_BUILTIN_SATU); + def_builtin ("__builtin_satrnds", int_ftype_int_int_int, AVR32_BUILTIN_SATRNDS); + def_builtin ("__builtin_satrndu", int_ftype_int_int_int, AVR32_BUILTIN_SATRNDU); + def_builtin ("__builtin_musfr", void_ftype_int, AVR32_BUILTIN_MUSFR); + def_builtin ("__builtin_mustr", int_ftype_void, AVR32_BUILTIN_MUSTR); + def_builtin ("__builtin_macsathh_w", int_ftype_int_short_short, AVR32_BUILTIN_MACSATHH_W); + def_builtin ("__builtin_macwh_d", longlong_ftype_longlong_int_short, AVR32_BUILTIN_MACWH_D); + def_builtin ("__builtin_machh_d", longlong_ftype_longlong_short_short, AVR32_BUILTIN_MACHH_D); + + /* Add all builtins that are more or less simple operations on two + operands. */ + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++) + { + /* Use one of the operands; the target can have a different mode for + mask-generating compares. */ + + if (d->name == 0) + continue; + + def_mbuiltin (d->mask, d->name, *(d->ftype), d->code); + } +} + + +/* Subroutine of avr32_expand_builtin to take care of binop insns. */ + +static rtx +avr32_expand_binop_builtin (enum insn_code icode, + tree arglist, rtx target) +{ + rtx pat; + tree arg0 = TREE_VALUE (arglist); + tree arg1 = TREE_VALUE (TREE_CHAIN (arglist)); + rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); + enum machine_mode tmode = insn_data[icode].operand[0].mode; + enum machine_mode mode0 = insn_data[icode].operand[1].mode; + enum machine_mode mode1 = insn_data[icode].operand[2].mode; + + + if (! target + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + /* In case the insn wants input operands in modes different from + the result, abort. */ + if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)){ + /* If op0 is already a reg we must cast it to the correct mode. */ + if (REG_P(op0)) + op0 = convert_to_mode(mode0, op0, 1); + else + op0 = copy_to_mode_reg (mode0, op0); + } + if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)){ + /* If op1 is already a reg we must cast it to the correct mode. */ + if (REG_P(op1)) + op1 = convert_to_mode(mode1, op1, 1); + else + op1 = copy_to_mode_reg (mode1, op1); + } + pat = GEN_FCN (icode) (target, op0, op1); + if (! pat) + return 0; + emit_insn (pat); + return target; +} + +/* Expand an expression EXP that calls a built-in function, + with result going to TARGET if that's convenient + (and in mode MODE if that's convenient). + SUBTARGET may be used as the target for computing one of EXP's operands. + IGNORE is nonzero if the value is to be ignored. */ + +rtx +avr32_expand_builtin (tree exp, + rtx target, + rtx subtarget ATTRIBUTE_UNUSED, + enum machine_mode mode ATTRIBUTE_UNUSED, + int ignore ATTRIBUTE_UNUSED) +{ + const struct builtin_description * d; + unsigned int i; + enum insn_code icode; + tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); + tree arglist = TREE_OPERAND (exp, 1); + tree arg0,arg1,arg2; + rtx op0, op1, op2, pat; + enum machine_mode tmode, mode0,mode1; + enum machine_mode arg0_mode; + int fcode = DECL_FUNCTION_CODE (fndecl); + + switch (fcode) + { + default: + break; + + case AVR32_BUILTIN_SATS: + case AVR32_BUILTIN_SATU: + case AVR32_BUILTIN_SATRNDS: + case AVR32_BUILTIN_SATRNDU: + { + const char *fname; + switch (fcode){ + default: + case AVR32_BUILTIN_SATS: + icode = CODE_FOR_sats; + fname = "sats"; + break; + case AVR32_BUILTIN_SATU: + icode = CODE_FOR_satu; + fname = "satu"; + break; + case AVR32_BUILTIN_SATRNDS: + icode = CODE_FOR_satrnds; + fname = "satrnds"; + break; + case AVR32_BUILTIN_SATRNDU: + icode = CODE_FOR_satrndu; + fname = "satrndu"; + break; + } + + arg0 = TREE_VALUE (arglist); + arg1 = TREE_VALUE (TREE_CHAIN(arglist)); + arg2 = TREE_VALUE (TREE_CHAIN(TREE_CHAIN(arglist))); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); + + tmode = insn_data[icode].operand[0].mode; + + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + + if (! (*insn_data[icode].operand[0].predicate) (op0, GET_MODE(op0))){ + op0 = copy_to_mode_reg (insn_data[icode].operand[0].mode, op0); + } + + if (! (*insn_data[icode].operand[1].predicate) (op1, SImode)){ + error("Parameter 2 to __builtin_%s should be a constant number.", fname); + return NULL_RTX; + } + + if (! (*insn_data[icode].operand[1].predicate) (op2, SImode)){ + error("Parameter 3 to __builtin_%s should be a constant number.", fname); + return NULL_RTX; + } + + emit_move_insn(target, op0); + pat = GEN_FCN (icode) (target, op1, op2); + if (! pat) + return 0; + emit_insn (pat); + + return target; + } + case AVR32_BUILTIN_MUSTR: + icode = CODE_FOR_mustr; + tmode = insn_data[icode].operand[0].mode; + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + pat = GEN_FCN (icode) (target); + if (! pat) + return 0; + emit_insn (pat); + return target; + + case AVR32_BUILTIN_MFSR: + icode = CODE_FOR_mfsr; + arg0 = TREE_VALUE (arglist); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + tmode = insn_data[icode].operand[0].mode; + mode0 = insn_data[icode].operand[1].mode; + + if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)){ + error("Parameter 1 to __builtin_mfsr must be a constant number"); + } + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + pat = GEN_FCN (icode) (target, op0); + if (! pat) + return 0; + emit_insn (pat); + return target; + case AVR32_BUILTIN_MTSR: + icode = CODE_FOR_mtsr; + arg0 = TREE_VALUE (arglist); + arg1 = TREE_VALUE (TREE_CHAIN(arglist)); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); + mode0 = insn_data[icode].operand[0].mode; + mode1 = insn_data[icode].operand[1].mode; + + if (! (*insn_data[icode].operand[0].predicate) (op0, mode0)){ + error("Parameter 1 to __builtin_mtsr must be a constant number"); + return gen_reg_rtx(mode0); + } + if (! (*insn_data[icode].operand[1].predicate) (op1, mode1)) + op1 = copy_to_mode_reg (mode1, op1); + pat = GEN_FCN (icode) (op0, op1); + if (! pat) + return 0; + emit_insn (pat); + return NULL_RTX; + case AVR32_BUILTIN_MFDR: + icode = CODE_FOR_mfdr; + arg0 = TREE_VALUE (arglist); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + tmode = insn_data[icode].operand[0].mode; + mode0 = insn_data[icode].operand[1].mode; + + if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)){ + error("Parameter 1 to __builtin_mfdr must be a constant number"); + } + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + pat = GEN_FCN (icode) (target, op0); + if (! pat) + return 0; + emit_insn (pat); + return target; + case AVR32_BUILTIN_MTDR: + icode = CODE_FOR_mtdr; + arg0 = TREE_VALUE (arglist); + arg1 = TREE_VALUE (TREE_CHAIN(arglist)); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); + mode0 = insn_data[icode].operand[0].mode; + mode1 = insn_data[icode].operand[1].mode; + + if (! (*insn_data[icode].operand[0].predicate) (op0, mode0)){ + error("Parameter 1 to __builtin_mtdr must be a constant number"); + return gen_reg_rtx(mode0); + } + if (! (*insn_data[icode].operand[1].predicate) (op1, mode1)) + op1 = copy_to_mode_reg (mode1, op1); + pat = GEN_FCN (icode) (op0, op1); + if (! pat) + return 0; + emit_insn (pat); + return NULL_RTX; + case AVR32_BUILTIN_CACHE: + icode = CODE_FOR_cache; + arg0 = TREE_VALUE (arglist); + arg1 = TREE_VALUE (TREE_CHAIN(arglist)); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); + mode0 = insn_data[icode].operand[0].mode; + mode1 = insn_data[icode].operand[1].mode; + + if (! (*insn_data[icode].operand[1].predicate) (op1, mode1)){ + error("Parameter 2 to __builtin_cache must be a constant number"); + return gen_reg_rtx(mode1); + } + + if (! (*insn_data[icode].operand[0].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (mode0, op0); + + pat = GEN_FCN (icode) (op0, op1); + if (! pat) + return 0; + emit_insn (pat); + return NULL_RTX; + case AVR32_BUILTIN_SYNC: + case AVR32_BUILTIN_MUSFR:{ + const char *fname; + switch ( fcode ){ + default: + case AVR32_BUILTIN_SYNC: + icode = CODE_FOR_sync; + fname = "sync"; + break; + case AVR32_BUILTIN_MUSFR: + icode = CODE_FOR_musfr; + fname = "musfr"; + break; + } + + arg0 = TREE_VALUE (arglist); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + mode0 = insn_data[icode].operand[0].mode; + + if (! (*insn_data[icode].operand[0].predicate) (op0, mode0)){ + if ( icode == CODE_FOR_musfr ) + op0 = copy_to_mode_reg (mode0, op0); + else { + error("Parameter to __builtin_%s is illegal.", fname); + return gen_reg_rtx(mode0); + } + } + pat = GEN_FCN (icode) (op0); + if (! pat) + return 0; + emit_insn (pat); + return NULL_RTX; + } + case AVR32_BUILTIN_TLBR: + icode = CODE_FOR_tlbr; + pat = GEN_FCN (icode) (NULL_RTX); + if (! pat) + return 0; + emit_insn (pat); + return NULL_RTX; + case AVR32_BUILTIN_TLBS: + icode = CODE_FOR_tlbs; + pat = GEN_FCN (icode) (NULL_RTX); + if (! pat) + return 0; + emit_insn (pat); + return NULL_RTX; + case AVR32_BUILTIN_TLBW: + icode = CODE_FOR_tlbw; + pat = GEN_FCN (icode) (NULL_RTX); + if (! pat) + return 0; + emit_insn (pat); + return NULL_RTX; + case AVR32_BUILTIN_BREAKPOINT: + icode = CODE_FOR_breakpoint; + pat = GEN_FCN (icode) (NULL_RTX); + if (! pat) + return 0; + emit_insn (pat); + return NULL_RTX; + case AVR32_BUILTIN_XCHG: + icode = CODE_FOR_xchg; + arg0 = TREE_VALUE (arglist); + arg1 = TREE_VALUE (TREE_CHAIN(arglist)); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); + tmode = insn_data[icode].operand[0].mode; + mode0 = insn_data[icode].operand[1].mode; + mode1 = insn_data[icode].operand[3].mode; + + if (! (*insn_data[icode].operand[3].predicate) (op1, mode1)){ + op1 = copy_to_mode_reg (mode1, op1); + } + + if (! (*insn_data[icode].operand[2].predicate) (op0, mode0)){ + op0 = copy_to_mode_reg (mode0, op0); + } + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + pat = GEN_FCN (icode) (target, op0, op0, op1); + if (! pat) + return 0; + emit_insn (pat); + return target; + case AVR32_BUILTIN_LDXI: + icode = CODE_FOR_ldxi; + arg0 = TREE_VALUE (arglist); + arg1 = TREE_VALUE (TREE_CHAIN(arglist)); + arg2 = TREE_VALUE (TREE_CHAIN(TREE_CHAIN(arglist))); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); + tmode = insn_data[icode].operand[0].mode; + mode0 = insn_data[icode].operand[1].mode; + mode1 = insn_data[icode].operand[2].mode; + + if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)){ + op0 = copy_to_mode_reg (mode0, op0); + } + + if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)){ + op1 = copy_to_mode_reg (mode1, op1); + } + + if (! (*insn_data[icode].operand[3].predicate) (op2, SImode)){ + error("Parameter 3 to __builtin_ldxi must be a valid extract shift operand: (0|8|16|24)"); + return gen_reg_rtx(mode0); + } + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + pat = GEN_FCN (icode) (target, op0, op1, op2); + if (! pat) + return 0; + emit_insn (pat); + return target; + case AVR32_BUILTIN_BSWAP16: + { + icode = CODE_FOR_bswap_16; + arg0 = TREE_VALUE (arglist); + arg0_mode = TYPE_MODE (TREE_TYPE (arg0)); + mode0 = insn_data[icode].operand[1].mode; + if (arg0_mode != mode0) + arg0 = build1 (NOP_EXPR, + (*lang_hooks.types.type_for_mode) (mode0, 0), arg0); + + op0 = expand_expr (arg0, NULL_RTX, HImode, 0); + tmode = insn_data[icode].operand[0].mode; + + + if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)){ + op0 = copy_to_mode_reg (mode0, op0); + } + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)){ + target = gen_reg_rtx (tmode); + } + + + pat = GEN_FCN (icode) (target, op0); + if (! pat) + return 0; + emit_insn (pat); + + return target; + } + case AVR32_BUILTIN_BSWAP32: + { + icode = CODE_FOR_bswap_32; + arg0 = TREE_VALUE (arglist); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + tmode = insn_data[icode].operand[0].mode; + mode0 = insn_data[icode].operand[1].mode; + + if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)){ + op0 = copy_to_mode_reg (mode0, op0); + } + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + + pat = GEN_FCN (icode) (target, op0); + if (! pat) + return 0; + emit_insn (pat); + + return target; + } + case AVR32_BUILTIN_MVCR_W: + case AVR32_BUILTIN_MVCR_D: + { + arg0 = TREE_VALUE (arglist); + arg1 = TREE_VALUE (TREE_CHAIN(arglist)); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); + + if ( fcode == AVR32_BUILTIN_MVCR_W ) + icode = CODE_FOR_mvcrsi; + else + icode = CODE_FOR_mvcrdi; + + tmode = insn_data[icode].operand[0].mode; + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + if (! (*insn_data[icode].operand[1].predicate) (op0, SImode)){ + error("Parameter 1 to __builtin_cop is not a valid coprocessor number."); + error("Number should be between 0 and 7."); + return NULL_RTX; + } + + if (! (*insn_data[icode].operand[2].predicate) (op1, SImode)){ + error("Parameter 2 to __builtin_cop is not a valid coprocessor register number."); + error("Number should be between 0 and 15."); + return NULL_RTX; + } + + pat = GEN_FCN (icode) (target, op0, op1); + if (! pat) + return 0; + emit_insn (pat); + + return target; + } + case AVR32_BUILTIN_MACSATHH_W: + case AVR32_BUILTIN_MACWH_D: + case AVR32_BUILTIN_MACHH_D: + { + arg0 = TREE_VALUE (arglist); + arg1 = TREE_VALUE (TREE_CHAIN(arglist)); + arg2 = TREE_VALUE (TREE_CHAIN(TREE_CHAIN(arglist))); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); + + icode = ( (fcode == AVR32_BUILTIN_MACSATHH_W) ? CODE_FOR_macsathh_w : + (fcode == AVR32_BUILTIN_MACWH_D) ? CODE_FOR_macwh_d : + CODE_FOR_machh_d ); + + tmode = insn_data[icode].operand[0].mode; + mode0 = insn_data[icode].operand[1].mode; + mode1 = insn_data[icode].operand[2].mode; + + + if (! target + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + if (! (*insn_data[icode].operand[0].predicate) (op0, tmode)){ + /* If op0 is already a reg we must cast it to the correct mode. */ + if (REG_P(op0)) + op0 = convert_to_mode(tmode, op0, 1); + else + op0 = copy_to_mode_reg (tmode, op0); + } + + if (! (*insn_data[icode].operand[1].predicate) (op1, mode0)){ + /* If op1 is already a reg we must cast it to the correct mode. */ + if (REG_P(op1)) + op1 = convert_to_mode(mode0, op1, 1); + else + op1 = copy_to_mode_reg (mode0, op1); + } + + if (! (*insn_data[icode].operand[2].predicate) (op2, mode1)){ + /* If op1 is already a reg we must cast it to the correct mode. */ + if (REG_P(op2)) + op2 = convert_to_mode(mode1, op2, 1); + else + op2 = copy_to_mode_reg (mode1, op2); + } + + emit_move_insn(target, op0); + + pat = GEN_FCN (icode) (target, op1, op2); + if (! pat) + return 0; + emit_insn (pat); + return target; + } + case AVR32_BUILTIN_MVRC_W: + case AVR32_BUILTIN_MVRC_D: + { + arg0 = TREE_VALUE (arglist); + arg1 = TREE_VALUE (TREE_CHAIN(arglist)); + arg2 = TREE_VALUE (TREE_CHAIN(TREE_CHAIN(arglist))); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); + + if ( fcode == AVR32_BUILTIN_MVRC_W ) + icode = CODE_FOR_mvrcsi; + else + icode = CODE_FOR_mvrcdi; + + if (! (*insn_data[icode].operand[0].predicate) (op0, SImode)){ + error("Parameter 1 is not a valid coprocessor number."); + error("Number should be between 0 and 7."); + return NULL_RTX; + } + + if (! (*insn_data[icode].operand[1].predicate) (op1, SImode)){ + error("Parameter 2 is not a valid coprocessor register number."); + error("Number should be between 0 and 15."); + return NULL_RTX; + } + + if ( GET_CODE(op2) == CONST_INT + || GET_CODE(op2) == CONST + || GET_CODE(op2) == SYMBOL_REF + || GET_CODE(op2) == LABEL_REF ){ + op2 = force_const_mem(insn_data[icode].operand[2].mode, op2); + } + + if ( ! (*insn_data[icode].operand[2].predicate) (op2, GET_MODE(op2)) ) + op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2); + + + pat = GEN_FCN (icode) (op0, op1, op2); + if (! pat) + return 0; + emit_insn (pat); + + return NULL_RTX; + } + case AVR32_BUILTIN_COP: + { + rtx op3, op4; + tree arg3, arg4; + icode = CODE_FOR_cop; + arg0 = TREE_VALUE (arglist); + arg1 = TREE_VALUE (TREE_CHAIN(arglist)); + arg2 = TREE_VALUE (TREE_CHAIN(TREE_CHAIN(arglist))); + arg3 = TREE_VALUE (TREE_CHAIN(TREE_CHAIN(TREE_CHAIN(arglist)))); + arg4 = TREE_VALUE (TREE_CHAIN(TREE_CHAIN(TREE_CHAIN(TREE_CHAIN(arglist))))); + op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); + op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); + op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); + op3 = expand_expr (arg3, NULL_RTX, VOIDmode, 0); + op4 = expand_expr (arg4, NULL_RTX, VOIDmode, 0); + + if (! (*insn_data[icode].operand[0].predicate) (op0, SImode)){ + error("Parameter 1 to __builtin_cop is not a valid coprocessor number."); + error("Number should be between 0 and 7."); + return NULL_RTX; + } + + if (! (*insn_data[icode].operand[1].predicate) (op1, SImode)){ + error("Parameter 2 to __builtin_cop is not a valid coprocessor register number."); + error("Number should be between 0 and 15."); + return NULL_RTX; + } + + if (! (*insn_data[icode].operand[2].predicate) (op2, SImode)){ + error("Parameter 3 to __builtin_cop is not a valid coprocessor register number."); + error("Number should be between 0 and 15."); + return NULL_RTX; + } + + if (! (*insn_data[icode].operand[3].predicate) (op3, SImode)){ + error("Parameter 4 to __builtin_cop is not a valid coprocessor register number."); + error("Number should be between 0 and 15."); + return NULL_RTX; + } + + if (! (*insn_data[icode].operand[4].predicate) (op4, SImode)){ + error("Parameter 5 to __builtin_cop is not a valid coprocessor operation."); + error("Number should be between 0 and 127."); + return NULL_RTX; + } + + pat = GEN_FCN (icode) (op0, op1, op2, op3, op4); + if (! pat) + return 0; + emit_insn (pat); + + return target; + } + } + + for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++) + if (d->code == fcode) + return avr32_expand_binop_builtin (d->icode, arglist, target); + + + /* @@@ Should really do something sensible here. */ + return NULL_RTX; +} + + +/* Handle an "interrupt" or "isr" attribute; + arguments as in struct attribute_spec.handler. */ + +static tree +avr32_handle_isr_attribute (node, name, args, flags, no_add_attrs) + tree * node; + tree name; + tree args; + int flags; + bool * no_add_attrs; +{ + if (DECL_P (*node)) + { + if (TREE_CODE (*node) != FUNCTION_DECL) + { + warning ("`%s' attribute only applies to functions", + IDENTIFIER_POINTER (name)); + *no_add_attrs = true; + } + /* FIXME: the argument if any is checked for type attributes; + should it be checked for decl ones? */ + } + else + { + if (TREE_CODE (*node) == FUNCTION_TYPE + || TREE_CODE (*node) == METHOD_TYPE) + { + if (avr32_isr_value (args) == AVR32_FT_UNKNOWN) + { + warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); + *no_add_attrs = true; + } + } + else if (TREE_CODE (*node) == POINTER_TYPE + && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE + || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE) + && avr32_isr_value (args) != AVR32_FT_UNKNOWN) + { + *node = build_variant_type_copy (*node); + TREE_TYPE (*node) = build_type_attribute_variant + (TREE_TYPE (*node), + tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node)))); + *no_add_attrs = true; + } + else + { + /* Possibly pass this attribute on from the type to a decl. */ + if (flags & ((int) ATTR_FLAG_DECL_NEXT + | (int) ATTR_FLAG_FUNCTION_NEXT + | (int) ATTR_FLAG_ARRAY_NEXT)) + { + *no_add_attrs = true; + return tree_cons (name, args, NULL_TREE); + } + else + { + warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); + } + } + } + + return NULL_TREE; +} + +/* Handle an attribute requiring a FUNCTION_DECL; + arguments as in struct attribute_spec.handler. */ +static tree +avr32_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, + int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) +{ + if (TREE_CODE (*node) != FUNCTION_DECL) + { + warning ("%qs attribute only applies to functions", + IDENTIFIER_POINTER (name)); + *no_add_attrs = true; + } + + return NULL_TREE; +} + + +/* Handle an acall attribute; + arguments as in struct attribute_spec.handler. */ + +static tree +avr32_handle_acall_attribute (node, name, args, flags, no_add_attrs) + tree * node; + tree name; + tree args ATTRIBUTE_UNUSED; + int flags ATTRIBUTE_UNUSED; + bool * no_add_attrs; +{ + + if (TREE_CODE (*node) == FUNCTION_TYPE + || TREE_CODE (*node) == METHOD_TYPE) + { + warning ("`%s' attribute not yet supported...", + IDENTIFIER_POINTER (name)); + *no_add_attrs = true; + return NULL_TREE; + } + + warning ("`%s' attribute only applies to functions", + IDENTIFIER_POINTER (name)); + *no_add_attrs = true; + return NULL_TREE; +} + + +/* Return 0 if the attributes for two types are incompatible, 1 if they + are compatible, and 2 if they are nearly compatible (which causes a + warning to be generated). */ + +static int +avr32_comp_type_attributes (tree type1, + tree type2) +{ + int acall1, acall2, isr1, isr2, naked1, naked2; + + /* Check for mismatch of non-default calling convention. */ + if (TREE_CODE (type1) != FUNCTION_TYPE) + return 1; + + /* Check for mismatched call attributes. */ + acall1 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type1)) != NULL; + acall2 = lookup_attribute ("acall", TYPE_ATTRIBUTES (type2)) != NULL; + naked1 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type1)) != NULL; + naked2 = lookup_attribute ("naked", TYPE_ATTRIBUTES (type2)) != NULL; + isr1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL; + if (! isr1) + isr1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL; + + isr2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL; + if (! isr2) + isr2 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL; + + if ( (acall1 && isr2) + || (acall2 && isr1) + || (naked1 && isr2) + || (naked2 && isr1)) + return 0; + + return 1; +} + + +/* Computes the type of the current function. */ + +static unsigned long +avr32_compute_func_type () +{ + unsigned long type = AVR32_FT_UNKNOWN; + tree a; + tree attr; + + if (TREE_CODE (current_function_decl) != FUNCTION_DECL) + abort (); + + /* Decide if the current function is volatile. Such functions + never return, and many memory cycles can be saved by not storing + register values that will never be needed again. This optimization + was added to speed up context switching in a kernel application. */ + if (optimize > 0 + && TREE_NOTHROW (current_function_decl) + && TREE_THIS_VOLATILE (current_function_decl)) + type |= AVR32_FT_VOLATILE; + + if (cfun->static_chain_decl != NULL) + type |= AVR32_FT_NESTED; + + attr = DECL_ATTRIBUTES (current_function_decl); + + a = lookup_attribute ("isr", attr); + if (a == NULL_TREE) + a = lookup_attribute ("interrupt", attr); + + if (a == NULL_TREE) + type |= AVR32_FT_NORMAL; + else + type |= avr32_isr_value(TREE_VALUE (a)); + + + a = lookup_attribute ("acall", attr); + if (a != NULL_TREE) + type |= AVR32_FT_ACALL; + + a = lookup_attribute ("naked", attr); + if (a != NULL_TREE) + type |= AVR32_FT_NAKED; + + return type; +} + +/* Returns the type of the current function. */ + +static unsigned long avr32_current_func_type (void) +{ + if (AVR32_FUNC_TYPE (cfun->machine->func_type) == AVR32_FT_UNKNOWN) + cfun->machine->func_type = avr32_compute_func_type (); + + return cfun->machine->func_type; +} + +/* + This target hook should return true if we should not pass type solely + in registers. The file expr.h defines a definition that is usually appropriate, + refer to expr.h for additional documentation. +*/ +bool avr32_must_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type){ + + + if ( type + && AGGREGATE_TYPE_P (type) + /* If the alignment is less than the size then pass in + the struct on the stack. */ + && ((unsigned int)TYPE_ALIGN_UNIT(type) < (unsigned int)int_size_in_bytes(type)) + /* If we support unaligned word accesses then structs of + size 4 and 8 can have any alignment and still be passed in + registers. */ + && !(TARGET_UNALIGNED_WORD + && ( int_size_in_bytes(type) == 4 + || int_size_in_bytes(type) == 8 )) + /* Double word structs need only a word alignment. */ + && !(int_size_in_bytes(type) == 8 + && TYPE_ALIGN_UNIT(type) >= 4) ) + return true; + + if ( type + && AGGREGATE_TYPE_P (type) + /* Structs of size 3,5,6,7 are always passed in registers. */ + && ( int_size_in_bytes(type) == 3 + || int_size_in_bytes(type) == 5 + || int_size_in_bytes(type) == 6 + || int_size_in_bytes(type) == 7 ) ) + return true; + + + return (type && + TREE_ADDRESSABLE (type)); +} + + +bool avr32_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED){ + return true; +} + +/* + This target hook should return true if an argument at the position indicated + by cum should be passed by reference. This predicate is queried after target + independent reasons for being passed by reference, such as TREE_ADDRESSABLE (type). + + If the hook returns true, a copy of that argument is made in memory and a + pointer to the argument is passed instead of the argument itself. The pointer + is passed in whatever way is appropriate for passing a pointer to that type. +*/ +bool avr32_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED, + enum machine_mode mode ATTRIBUTE_UNUSED, + tree type, + bool named ATTRIBUTE_UNUSED){ + return (type && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)); +} + +static int +avr32_arg_partial_bytes (CUMULATIVE_ARGS *pcum ATTRIBUTE_UNUSED, + enum machine_mode mode ATTRIBUTE_UNUSED, + tree type ATTRIBUTE_UNUSED, + bool named ATTRIBUTE_UNUSED) +{ + return 0; +} + + +struct gcc_target targetm = TARGET_INITIALIZER; + + +/* + Table used to convert from register number in the assembler instructions and + the register numbers used in gcc. +*/ +const int avr32_function_arg_reglist[] = {INTERNAL_REGNUM(12), + INTERNAL_REGNUM(11), + INTERNAL_REGNUM(10), + INTERNAL_REGNUM(9), + INTERNAL_REGNUM(8)}; + +rtx avr32_compare_op0 = NULL_RTX; +rtx avr32_compare_op1 = NULL_RTX; +rtx avr32_compare_operator = NULL_RTX; +rtx avr32_acc_cache = NULL_RTX; + +/* + Returns nonzero if it is allowed to store a value of mode mode in hard + register number regno. +*/ +int +avr32_hard_regno_mode_ok(regnr, mode) + int regnr; + enum machine_mode mode; +{ + + switch (mode) { + case DImode: /* long long */ + case DFmode: /* double */ + case SCmode: /* __complex__ float */ + case CSImode: /* __complex__ int */ + if (regnr < 4) { /* long long int not supported in r12, sp, lr or pc. */ + return 0; + } else { + if (regnr % 2) /* long long int has to be refered in even registers. */ + return 0; + else + return 1; + } + case CDImode: /* __complex__ long long */ + case DCmode: /* __complex__ double */ + case TImode: /* 16 bytes */ + if (regnr < 7) + return 0; + else if (regnr % 2) + return 0; + else + return 1; + default: + return 1; + } +} + + +int avr32_rnd_operands(rtx add, + rtx shift) +{ + + if ( GET_CODE(shift) == CONST_INT && + GET_CODE(add) == CONST_INT && + INTVAL(shift) > 0){ + if ( (1 << (INTVAL(shift) - 1)) == INTVAL(add) ) + return TRUE; + } + + return FALSE; +} + + + +int avr32_const_ok_for_constraint_p (int value, char c, const char *str){ + + + switch (c){ + case 'K': + case 'I': + { + int min_value = 0, max_value = 0; + char size_str[3] = { str[2], str[3], '\0' }; + int const_size = atoi(size_str); + if ( toupper(str[1]) == 'U' ){ + min_value = 0; + max_value = (1 << const_size) - 1; + } else if ( toupper(str[1]) == 'S' ){ + min_value = -(1 << (const_size-1)); + max_value = (1 << (const_size-1)) - 1; + } + + if ( c == 'I' ){ + value = -value; + } + + if ( value >= min_value + && value <= max_value ){ + return 1; + } + break; + } + case 'M': + return avr32_mask_upper_bits_operand(GEN_INT(value), VOIDmode); + } + + return 0; +} + + +/*Compute mask of registers which needs saving upon function entry */ +static unsigned long +avr32_compute_save_reg_mask (int push) +{ + unsigned long func_type = avr32_current_func_type(); + unsigned int save_reg_mask = 0 ; + unsigned int reg; + + if ( IS_INTERRUPT(func_type) ){ + unsigned int max_reg = 12; + + + /* Get the banking scheme for the interrupt */ + switch ( func_type ){ + case AVR32_FT_ISR_FULL: + max_reg = 0; + break; + case AVR32_FT_ISR_HALF: + max_reg = 7; + break; + case AVR32_FT_ISR_NONE: + max_reg = 12; + break; + } + + /* Interrupt functions must not corrupt any registers, + even call clobbered ones. If this is a leaf function + we can just examine the registers used by the RTL, but + otherwise we have to assume that whatever function is + called might clobber anything, and so we have to save + all the call-clobbered registers as well. */ + + /* Need not push the registers r8-r12 for AVR32A architectures, + as this is automatially done in hardware. We also do not have + any shadow registers. */ + if ( avr32_arch->uarch_type == UARCH_TYPE_AVR32A ){ + max_reg = 0; + func_type = AVR32_FT_ISR_NONE; + } + + /* All registers which are used and is not shadowed + must be saved */ + for (reg = 0; reg <= max_reg; reg++) + if (regs_ever_live[INTERNAL_REGNUM(reg)] + || (! current_function_is_leaf && call_used_regs [INTERNAL_REGNUM(reg)])) + save_reg_mask |= (1 << reg); + + /* Check LR */ + if ((regs_ever_live[LR_REGNUM] || !current_function_is_leaf + || frame_pointer_needed) + && (func_type == AVR32_FT_ISR_NONE) /* Only non-shadowed register models */ ) + save_reg_mask |= (1 << ASM_REGNUM(LR_REGNUM)); + + /* Make sure that the GOT register is pushed. */ + if ( max_reg >= ASM_REGNUM(PIC_OFFSET_TABLE_REGNUM) + && current_function_uses_pic_offset_table ) + save_reg_mask |= (1 << ASM_REGNUM(PIC_OFFSET_TABLE_REGNUM)); + + } else { + int use_pushm = optimize_size; + + /* In the normal case we only need to save those registers + which are call saved and which are used by this function. */ + for (reg = 0; reg <= 7; reg++) + if (regs_ever_live[INTERNAL_REGNUM(reg)] && ! call_used_regs [INTERNAL_REGNUM(reg)]) + save_reg_mask |= (1 << reg); + + /* Make sure that the GOT register is pushed. */ + if ( current_function_uses_pic_offset_table ) + save_reg_mask |= (1 << ASM_REGNUM(PIC_OFFSET_TABLE_REGNUM)); + + + /* If we optimize for size and + do not have anonymous arguments: + use popm/pushm always */ + if ( use_pushm ){ + if ( (save_reg_mask & (1 << 0) ) + || (save_reg_mask & (1 << 1)) + || (save_reg_mask & (1 << 2)) + || (save_reg_mask & (1 << 3)) ) + save_reg_mask |= 0xf; + + if ( (save_reg_mask & (1 << 4) ) + || (save_reg_mask & (1 << 5)) + || (save_reg_mask & (1 << 6)) + || (save_reg_mask & (1 << 7)) ) + save_reg_mask |= 0xf0; + + if ( (save_reg_mask & (1 << 8) ) + || (save_reg_mask & (1 << 9))) + save_reg_mask |= 0x300; + } + + + //Check LR + if ( (regs_ever_live[LR_REGNUM] || !current_function_is_leaf || + (optimize_size && save_reg_mask) || frame_pointer_needed) ) { + if ( push ){ + //Push/Pop LR + save_reg_mask |= (1 << ASM_REGNUM(LR_REGNUM)); + } else { + //Pop PC + save_reg_mask |= (1 << ASM_REGNUM(PC_REGNUM)); + } + } + } + + return save_reg_mask; +} + +/*Compute total size in bytes of all saved registers */ +static int +avr32_get_reg_mask_size (int reg_mask) +{ + int reg, size; + size = 0; + for (reg = 0; reg <= 15; reg++) + if (reg_mask & (1 << reg)) + size += 4; + + return size; +} + +/*Get a register from one of the registers which are saved onto the stack + upon function entry */ + +static int +avr32_get_saved_reg (int save_reg_mask) +{ + unsigned int reg; + + /*Find the first register which is saved in the saved_reg_mask*/ + for (reg = 0; reg <= 15; reg++) + if (save_reg_mask & (1 << reg)) + return reg; + + return -1; +} + +/* Return 1 if it is possible to return using a single instruction. */ +int +avr32_use_return_insn (int iscond) +{ + unsigned int func_type = avr32_current_func_type(); + unsigned long saved_int_regs; + + /* Never use a return instruction before reload has run. */ + if (!reload_completed) + return 0; + + /* Must adjust the stack for vararg functions. */ + if ( current_function_args_info.uses_anonymous_args ) + return 0; + + /* If there a stack adjstment. */ + if (get_frame_size ()) + return 0; + + saved_int_regs = avr32_compute_save_reg_mask (TRUE); + + + /* Conditional returns can not be performed in one instruction + if we need to restore registers from the stack */ + if (iscond && saved_int_regs ) + return 0; + + /* Conditional return can not be used for interrupt handlers. */ + if (iscond && IS_INTERRUPT(func_type) ) + return 0; + + /* For interrupt handlers which needs to pop registers */ + if (saved_int_regs && IS_INTERRUPT(func_type) ) + return 0; + + + /* If there are saved registers but the LR isn't saved, then we need + two instructions for the return. */ + if (saved_int_regs && !(saved_int_regs & (1 << ASM_REGNUM(LR_REGNUM)))) + return 0; + + + return 1; +} + + +/*Generate some function prologue info in the assembly file*/ + +void +avr32_target_asm_function_prologue (f, frame_size) + FILE * f; + HOST_WIDE_INT frame_size; +{ + + if (IS_NAKED(avr32_current_func_type())) + fprintf( f, "\t# Function is naked: Prologue and epilogue provided by programmer\n"); + + if (IS_INTERRUPT(avr32_current_func_type())){ + switch ( avr32_current_func_type() ){ + case AVR32_FT_ISR_FULL: + fprintf( f, "\t# Interrupt Function: Fully shadowed register file\n"); + break; + case AVR32_FT_ISR_HALF: + fprintf( f, "\t# Interrupt Function: Half shadowed register file\n"); + break; + default: + case AVR32_FT_ISR_NONE: + fprintf( f, "\t# Interrupt Function: No shadowed register file\n"); + break; + } + } + + + fprintf( f, "\t# args = %i, frame = %li, pretend = %i\n", + current_function_args_size, frame_size, + current_function_pretend_args_size); + + fprintf( f, "\t# frame_needed = %i, leaf_function = %i\n", + frame_pointer_needed, current_function_is_leaf ); + + fprintf( f, "\t# uses_anonymous_args = %i\n", + current_function_args_info.uses_anonymous_args ); +} + + +/* Generate and emit an insn that we will recognize as a pushm or stm. + Unfortunately, since this insn does not reflect very well the actual + semantics of the operation, we need to annotate the insn for the benefit + of DWARF2 frame unwind information. */ + +int +avr32_convert_to_reglist16(int reglist8_vect); + +static rtx +emit_multi_reg_push (int reglist, + int usePUSHM) +{ + rtx insn; + rtx dwarf; + rtx tmp; + rtx reg; + int i; + int nr_regs; + int index = 0; + + if ( usePUSHM ){ + insn = emit_insn(gen_pushm(gen_rtx_CONST_INT(SImode, reglist))); + reglist = avr32_convert_to_reglist16(reglist); + } else { + insn = emit_insn(gen_stm(stack_pointer_rtx, + gen_rtx_CONST_INT(SImode, reglist), + gen_rtx_CONST_INT(SImode, 1))); + } + + nr_regs = avr32_get_reg_mask_size(reglist)/4; + dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nr_regs + 1)); + + for (i = 15; i >= 0; i--) { + if (reglist & (1 << i)) { + reg = gen_rtx_REG (SImode, INTERNAL_REGNUM(i)); + tmp = gen_rtx_SET (VOIDmode, + gen_rtx_MEM (SImode, + plus_constant (stack_pointer_rtx, + 4 * index)), + reg); + RTX_FRAME_RELATED_P (tmp) = 1; + XVECEXP (dwarf, 0, 1 + index++) = tmp; + } + } + + tmp = gen_rtx_SET (SImode, + stack_pointer_rtx, + gen_rtx_PLUS (SImode, + stack_pointer_rtx, + GEN_INT (-4 * nr_regs))); + RTX_FRAME_RELATED_P (tmp) = 1; + XVECEXP (dwarf, 0, 0) = tmp; + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf, + REG_NOTES (insn)); + return insn; +} + + +rtx +avr32_gen_load_multiple( rtx *regs, int count, rtx from, + int write_back, int in_struct_p, + int scalar_p ){ + + rtx result; + int i = 0,j; + result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + (write_back ? 1 : 0))); + + if (write_back) + { + XVECEXP (result, 0, 0) + = gen_rtx_SET (GET_MODE (from), from, + plus_constant (from, count * 4)); + i = 1; + count++; + } + + + for (j = 0; i < count; i++, j++) + { + rtx unspec; + rtx mem = gen_rtx_MEM (SImode, plus_constant(from,j*4)); + MEM_IN_STRUCT_P (mem) = in_struct_p; + MEM_SCALAR_P (mem) = scalar_p; + unspec = gen_rtx_UNSPEC(VOIDmode, + gen_rtvec(1, mem), + UNSPEC_LDM); + XVECEXP (result, 0, i) + = gen_rtx_SET (VOIDmode, regs[j], unspec); + } + + return result; +} + + +rtx +avr32_gen_store_multiple( rtx *regs, int count, rtx to, + int in_struct_p, + int scalar_p ){ + + rtx result; + int i = 0,j; + result = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count)); + + for (j = 0; i < count; i++, j++) + { + rtx mem = gen_rtx_MEM (SImode, plus_constant(to, j*4)); + MEM_IN_STRUCT_P (mem) = in_struct_p; + MEM_SCALAR_P (mem) = scalar_p; + XVECEXP (result, 0, i) + = gen_rtx_SET (VOIDmode, mem, + gen_rtx_UNSPEC(VOIDmode, + gen_rtvec(1, regs[j]), + UNSPEC_STORE_MULTIPLE)); + } + + return result; +} + + +/* Move a block of memory if it is word aligned or we support unaligned + word memory accesses. The size must be maximum 64 bytes. */ + +int avr32_gen_movmemsi (rtx *operands) +{ + HOST_WIDE_INT bytes_to_go; + rtx src, dst; + rtx st_src, st_dst; + int ptr_offset = 0; + int block_size; + int dst_in_struct_p, src_in_struct_p; + int dst_scalar_p, src_scalar_p; + int unaligned; + + if (GET_CODE (operands[2]) != CONST_INT + || GET_CODE (operands[3]) != CONST_INT + || INTVAL (operands[2]) > 64 + || ((INTVAL (operands[3]) & 3) + && !TARGET_UNALIGNED_WORD) ) + return 0; + + unaligned = (INTVAL (operands[3]) & 3) != 0; + + block_size = 4; + + st_dst = XEXP (operands[0], 0); + st_src = XEXP (operands[1], 0); + + dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]); + dst_scalar_p = MEM_SCALAR_P (operands[0]); + src_in_struct_p = MEM_IN_STRUCT_P (operands[1]); + src_scalar_p = MEM_SCALAR_P (operands[1]); + + dst = copy_to_mode_reg (SImode, st_dst); + src = copy_to_mode_reg (SImode, st_src); + + bytes_to_go = INTVAL (operands[2]); + + while (bytes_to_go) + { + enum machine_mode move_mode; + /* Seems to be a problem with reloads for the movti pattern + so this is disabled until that problem is resolved */ + + /*if ( bytes_to_go >= GET_MODE_SIZE(TImode) ) + move_mode = TImode; + else */ + if ( (bytes_to_go >= GET_MODE_SIZE(DImode)) + && !unaligned) + move_mode = DImode; + else if ( bytes_to_go >= GET_MODE_SIZE(SImode) ) + move_mode = SImode; + else + move_mode = QImode; + + { + rtx dst_mem = gen_rtx_MEM (move_mode, gen_rtx_PLUS(SImode, dst, GEN_INT(ptr_offset))); + rtx src_mem = gen_rtx_MEM (move_mode, gen_rtx_PLUS(SImode, src, GEN_INT(ptr_offset))); + ptr_offset += GET_MODE_SIZE(move_mode); + bytes_to_go -= GET_MODE_SIZE(move_mode); + + MEM_IN_STRUCT_P (dst_mem) = dst_in_struct_p; + MEM_SCALAR_P (dst_mem) = dst_scalar_p; + + MEM_IN_STRUCT_P (src_mem) = src_in_struct_p; + MEM_SCALAR_P (src_mem) = src_scalar_p; + emit_move_insn( dst_mem, + src_mem); + + } + } + + return 1; +} + + + +/*Expand the prologue instruction*/ +void avr32_expand_prologue(void){ + rtx insn, dwarf; + unsigned long saved_reg_mask; + int reglist8 = 0; + + /* Naked functions does not have a prologue */ + if (IS_NAKED(avr32_current_func_type())) + return; + + saved_reg_mask = avr32_compute_save_reg_mask (TRUE); + + if ( saved_reg_mask ){ + /* Must push used registers */ + + /* Should we use POPM or LDM?*/ + int usePUSHM = TRUE; + reglist8 = 0; + if ( ((saved_reg_mask & (1<<0)) || + (saved_reg_mask & (1<<1)) || + (saved_reg_mask & (1<<2)) || + (saved_reg_mask & (1<<3))) ){ + /* One of R0-R3 should at least be pushed */ + if ( ((saved_reg_mask & (1<<0)) && + (saved_reg_mask & (1<<1)) && + (saved_reg_mask & (1<<2)) && + (saved_reg_mask & (1<<3))) ){ + /*All should be pushed */ + reglist8 |= 0x01; + } else { + usePUSHM = FALSE; + } + } + + if ( ((saved_reg_mask & (1<<4)) || + (saved_reg_mask & (1<<5)) || + (saved_reg_mask & (1<<6)) || + (saved_reg_mask & (1<<7))) ){ + /* One of R4-R7 should at least be pushed */ + if ( ((saved_reg_mask & (1<<4)) && + (saved_reg_mask & (1<<5)) && + (saved_reg_mask & (1<<6)) && + (saved_reg_mask & (1<<7))) ){ + if ( usePUSHM ) + /*All should be pushed */ + reglist8 |= 0x02; + } else { + usePUSHM = FALSE; + } + } + + if ( ((saved_reg_mask & (1<<8)) || + (saved_reg_mask & (1<<9))) ){ + /* One of R8-R9 should at least be pushed */ + if ( ((saved_reg_mask & (1<<8)) && + (saved_reg_mask & (1<<9))) ){ + if ( usePUSHM ) + /*All should be pushed */ + reglist8 |= 0x04; + } else { + usePUSHM = FALSE; + } + } + + if (saved_reg_mask & (1<<10)) + reglist8 |= 0x08; + + if (saved_reg_mask & (1<<11)) + reglist8 |= 0x10; + + if (saved_reg_mask & (1<<12)) + reglist8 |= 0x20; + + if (saved_reg_mask & (1 << ASM_REGNUM(LR_REGNUM))){ + /* Push LR */ + reglist8 |= 0x40; + } + + if ( usePUSHM ){ + insn = emit_multi_reg_push(reglist8, TRUE); + } else { + insn = emit_multi_reg_push(saved_reg_mask, FALSE); + } + RTX_FRAME_RELATED_P(insn) = 1; + + //Prevent this instruction from being scheduled after any other instructions + emit_insn (gen_blockage ()); + } + + + /* Set frame pointer */ + if (frame_pointer_needed ) { + insn = emit_move_insn(frame_pointer_rtx, stack_pointer_rtx); + RTX_FRAME_RELATED_P (insn) = 1; + } + + if (get_frame_size() > 0) { + if ( avr32_const_ok_for_constraint_p( get_frame_size(), 'K', "Ks21") ){ + insn = emit_insn(gen_rtx_SET(SImode, + stack_pointer_rtx, + gen_rtx_PLUS(SImode, + stack_pointer_rtx, + gen_rtx_CONST_INT(SImode, + -get_frame_size())))); + RTX_FRAME_RELATED_P (insn) = 1; + } else { + /*Immediate is larger than k21 + We must either check if we can use one of the pushed registers as + temporary storage or we must make us a temp register by pushing a register + to the stack.*/ + rtx temp_reg, const_pool_entry, insn; + if ( saved_reg_mask ){ + temp_reg = gen_rtx_REG(SImode, INTERNAL_REGNUM(avr32_get_saved_reg(saved_reg_mask))); + } else { + temp_reg = gen_rtx_REG(SImode, INTERNAL_REGNUM(7)); + emit_move_insn(gen_rtx_MEM(SImode, gen_rtx_PRE_DEC(SImode, stack_pointer_rtx)), temp_reg); + } + + const_pool_entry = force_const_mem(SImode, gen_rtx_CONST_INT(SImode, get_frame_size())); + emit_move_insn(temp_reg, const_pool_entry); + + insn = emit_insn(gen_rtx_SET( SImode, + stack_pointer_rtx, + gen_rtx_MINUS( SImode, + stack_pointer_rtx, + temp_reg + ))); + + dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx, + gen_rtx_PLUS (SImode, stack_pointer_rtx, + GEN_INT (-get_frame_size()))); + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, + dwarf, REG_NOTES (insn)); + RTX_FRAME_RELATED_P (insn) = 1; + + if ( !saved_reg_mask ){ + insn = emit_move_insn(temp_reg, gen_rtx_MEM(SImode, gen_rtx_POST_INC( SImode, gen_rtx_REG(SImode, 13)))); + } + + //Mark the temp register as dead + REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, temp_reg, + REG_NOTES (insn)); + + + } + + //Prevent the the stack adjustment to be scheduled + //after any instrcutrions using the frame pointer. + emit_insn (gen_blockage ()); + } + + /* Load GOT */ + if ( flag_pic ){ + avr32_load_pic_register (); + + /* gcc does not know that load or call instructions might use + the pic register so it might schedule these instructions before + the loading of the pic register. To avoid this emit a barrier + for now. TODO! Find out a better way to let gcc know which + instructions might use the pic register. */ + emit_insn (gen_blockage ()); + } + return; +} + +void avr32_set_return_address (rtx source) +{ + rtx addr; + unsigned long saved_regs; + + saved_regs = avr32_compute_save_reg_mask(TRUE); + + if (!(saved_regs & (1 << ASM_REGNUM(LR_REGNUM)))) + emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source); + else + { + if (frame_pointer_needed) + addr = gen_rtx_REG (Pmode, FRAME_POINTER_REGNUM); + else + /* FIXME: Need to use scratch register if frame is large */ + addr = plus_constant (stack_pointer_rtx, get_frame_size()); + + emit_move_insn (gen_rtx_MEM (Pmode, addr), source); + } +} + + + +/* Return the length of INSN. LENGTH is the initial length computed by + attributes in the machine-description file. */ + +int avr32_adjust_insn_length(rtx insn ATTRIBUTE_UNUSED, + int length ATTRIBUTE_UNUSED) +{ + return length; +} + +void avr32_output_return_instruction( int single_ret_inst ATTRIBUTE_UNUSED, + int iscond ATTRIBUTE_UNUSED, + rtx cond ATTRIBUTE_UNUSED, + rtx r12_imm ) +{ + + unsigned long saved_reg_mask; + int insert_ret = TRUE; + int reglist8 = 0; + int stack_adjustment = get_frame_size(); + unsigned int func_type = avr32_current_func_type(); + FILE *f = asm_out_file; + + + /* Naked functions does not have an epilogue */ + if (IS_NAKED(func_type)) + return; + + saved_reg_mask = avr32_compute_save_reg_mask (FALSE); + + + /* Reset frame pointer */ + if ( stack_adjustment > 0 ) { + if ( avr32_const_ok_for_constraint_p( stack_adjustment, 'I', "Is21") ){ + fprintf(f, "\tsub sp, %i # Reset Frame Pointer\n", -stack_adjustment); + } else { + //TODO! Is it safe to use r8 as scratch?? + fprintf(f, "\tmov r8, lo(%i) # Reset Frame Pointer\n", -stack_adjustment); + fprintf(f, "\torh r8, hi(%i) # Reset Frame Pointer\n", -stack_adjustment); + fprintf(f, "\tadd sp,r8 # Reset Frame Pointer\n"); + } + } + + + if ( saved_reg_mask ){ + /* Must pop used registers */ + + /*Should we use POPM or LDM?*/ + int usePOPM = TRUE; + if ( ((saved_reg_mask & (1<<0)) || + (saved_reg_mask & (1<<1)) || + (saved_reg_mask & (1<<2)) || + (saved_reg_mask & (1<<3))) ){ + /* One of R0-R3 should at least be popped */ + if ( ((saved_reg_mask & (1<<0)) && + (saved_reg_mask & (1<<1)) && + (saved_reg_mask & (1<<2)) && + (saved_reg_mask & (1<<3))) ){ + /*All should be popped */ + reglist8 |= 0x01; + } else { + usePOPM = FALSE; + } + } + + if ( ((saved_reg_mask & (1<<4)) || + (saved_reg_mask & (1<<5)) || + (saved_reg_mask & (1<<6)) || + (saved_reg_mask & (1<<7))) ){ + /* One of R0-R3 should at least be popped */ + if ( ((saved_reg_mask & (1<<4)) && + (saved_reg_mask & (1<<5)) && + (saved_reg_mask & (1<<6)) && + (saved_reg_mask & (1<<7))) ){ + if ( usePOPM ) + /*All should be popped */ + reglist8 |= 0x02; + } else { + usePOPM = FALSE; + } + } + + if ( ((saved_reg_mask & (1<<8)) || + (saved_reg_mask & (1<<9))) ){ + /* One of R8-R9 should at least be pushed */ + if ( ((saved_reg_mask & (1<<8)) && + (saved_reg_mask & (1<<9))) ){ + if ( usePOPM ) + /*All should be pushed */ + reglist8 |= 0x04; + } else { + usePOPM = FALSE; + } + } + + if (saved_reg_mask & (1<<10)) + reglist8 |= 0x08; + + if (saved_reg_mask & (1<<11)) + reglist8 |= 0x10; + + if (saved_reg_mask & (1<<12)) + reglist8 |= 0x20; + + if (saved_reg_mask & (1<= 0 && n <= 3) + return 8+n; + else + return INVALID_REGNUM; +} + +/* Compute the distance from register FROM to register TO. + These can be the arg pointer, the frame pointer or + the stack pointer. + Typical stack layout looks like this: + + old stack pointer -> | | + ---- + | | \ + | | saved arguments for + | | vararg functions + arg_pointer -> | | / + -- + | | \ + | | call saved + | | registers + | | / + frame ptr -> -- + | | \ + | | local + | | variables + stack ptr --> | | / + -- + | | \ + | | outgoing + | | arguments + | | / + -- + + For a given funciton some or all of these stack compomnents + may not be needed, giving rise to the possibility of + eliminating some of the registers. + + The values returned by this function must reflect the behaviour + of avr32_expand_prologue() and avr32_compute_save_reg_mask(). + + The sign of the number returned reflects the direction of stack + growth, so the values are positive for all eliminations except + from the soft frame pointer to the hard frame pointer. */ + + +int +avr32_initial_elimination_offset(from, to) + const int from; + const int to; +{ + int i; + int call_saved_regs = 0; + unsigned long saved_reg_mask; + unsigned int local_vars = get_frame_size (); + + saved_reg_mask = avr32_compute_save_reg_mask (TRUE); + + for (i = 0; i < 16; ++i){ + if ( saved_reg_mask & ( 1 << i ) ) + call_saved_regs += 4; + } + + + switch ( from ){ + case ARG_POINTER_REGNUM: + switch (to){ + case STACK_POINTER_REGNUM: + return call_saved_regs + local_vars; + case FRAME_POINTER_REGNUM: + return call_saved_regs; + default: + abort(); + } + case FRAME_POINTER_REGNUM: + switch (to){ + case STACK_POINTER_REGNUM: + return local_vars; + default: + abort(); + } + default: + abort(); + } + +} + + +/* + Returns a rtx used when passing the next argument to a function. + avr32_init_cumulative_args() and avr32_function_arg_advance() sets witch + register to use. +*/ +rtx +avr32_function_arg(cum, mode, type, named) + CUMULATIVE_ARGS *cum; + enum machine_mode mode; + tree type; + int named; +{ + int index = -1; + + HOST_WIDE_INT arg_size, arg_rsize; + if ( type ){ + arg_size = int_size_in_bytes (type); + } else { + arg_size = GET_MODE_SIZE(mode); + } + arg_rsize = PUSH_ROUNDING(arg_size); + + /* + The last time this macro is called, it is called with mode == VOIDmode, + and its result is passed to the call or call_value + pattern as operands 2 and 3 respectively. + */ + if (mode == VOIDmode) { + return gen_rtx_CONST_INT(SImode, 22); // ToDo: fixme. + } + + if ((*targetm.calls.must_pass_in_stack)(mode, type) + || !named) { + return NULL_RTX; + } + + if ( arg_rsize == 8 ) { + /* use r11:r10 or r9:r8. */ + if ( !(GET_USED_INDEX(cum, 1) || GET_USED_INDEX(cum, 2)) ) + index = 1; + else if ( !(GET_USED_INDEX(cum, 3) || GET_USED_INDEX(cum, 4)) ) + index = 3; + else + index = -1; + } else if ( arg_rsize == 4 ){ /* Use first available register */ + index = 0; + while (index <= LAST_CUM_REG_INDEX && GET_USED_INDEX(cum, index)) + index++; + if (index > LAST_CUM_REG_INDEX) + index = -1; + } + + SET_REG_INDEX(cum, index); + + if (GET_REG_INDEX(cum) >= 0) + return gen_rtx_REG(mode, avr32_function_arg_reglist[GET_REG_INDEX(cum)]); + + return NULL_RTX; +} + +/* + Set the register used for passing the first argument to a function. +*/ +void +avr32_init_cumulative_args(cum, fntype, libname, fndecl) + CUMULATIVE_ARGS *cum; + tree fntype; + rtx libname ATTRIBUTE_UNUSED; + tree fndecl ATTRIBUTE_UNUSED; +{ + /* Set all registers as unused. */ + SET_INDEXES_UNUSED(cum); + + /* Reset uses_anonymous_args */ + cum->uses_anonymous_args = 0; + + /* Reset size of stack pushed arguments */ + cum->stack_pushed_args_size = 0; + + /* If the function is returning a value passed in memory + r12 is used as a Return Value Pointer. + */ + + if (fntype != 0 + && avr32_return_in_memory(TREE_TYPE(fntype), fntype)){ + SET_REG_INDEX(cum, 0); + SET_USED_INDEX(cum, GET_REG_INDEX(cum)); + } +} + +/* + Set register used for passing the next argument to a function. Only the + Scratch Registers are used. + + number name + 15 r15 PC + 14 r14 LR + 13 r13 _SP_________ + FIRST_CUM_REG 12 r12 _||_ + 10 r11 || + 11 r10 _||_ Scratch Registers + 8 r9 || + LAST_SCRATCH_REG 9 r8 _\/_________ + 6 r7 /\ + 7 r6 || + 4 r5 || + 5 r4 || + 2 r3 || + 3 r2 || + 0 r1 || + 1 r0 _||_________ + +*/ +void +avr32_function_arg_advance(cum, mode, type, named) + CUMULATIVE_ARGS *cum; + enum machine_mode mode; + tree type; + int named ATTRIBUTE_UNUSED; +{ + HOST_WIDE_INT arg_size, arg_rsize; + if ( type ){ + arg_size = int_size_in_bytes (type); + } else { + arg_size = GET_MODE_SIZE(mode); + } + arg_rsize = PUSH_ROUNDING(arg_size) ; + + /* It the argument had to be passed in stack, no register is used. */ + if ((*targetm.calls.must_pass_in_stack)(mode, type)){ + cum->stack_pushed_args_size += PUSH_ROUNDING(int_size_in_bytes(type)); + return; + } + + /* Mark the used registers as "used". */ + if (GET_REG_INDEX(cum) >= 0) { + SET_USED_INDEX(cum, GET_REG_INDEX(cum)); + if ( arg_rsize == 8 ){ + SET_USED_INDEX(cum, (GET_REG_INDEX(cum)+1)); + } + } else { + /* Had to use stack */ + cum->stack_pushed_args_size += arg_rsize; + } + +} + +/* + Defines witch direction to go to find the next register to use if the + argument is larger then one register or for arguments shorter than an + int which is not promoted, such as the last part of structures with + size not a multiple of 4. */ +enum direction +avr32_function_arg_padding(mode, type) + enum machine_mode mode ATTRIBUTE_UNUSED; + tree type; +{ + + + /* Pad upward for all aggregates except byte + and halfword sized aggregates which can be + passed in registers. */ + + if (type + && AGGREGATE_TYPE_P (type) + && (int_size_in_bytes(type) != 1) + && !((int_size_in_bytes(type) == 2) + && TYPE_ALIGN_UNIT(type) >= 2) + && (int_size_in_bytes(type) & 0x3)){ + return upward; + } + + return downward; +} + +/* + Return a rtx used for the return value from a function call. +*/ +rtx +avr32_function_value(tree type, tree func) +{ + + if ( avr32_return_in_memory(type, func) ) + return NULL_RTX; + + + if ( int_size_in_bytes(type) <= 4 ) + if ( avr32_return_in_msb(type) ) + /* Aggregates of size less than a word which does + align the data in the MSB must use SImode for + r12. */ + return gen_rtx_REG( SImode, RET_REGISTER); + else + return gen_rtx_REG( TYPE_MODE(type), RET_REGISTER); + else if ( int_size_in_bytes(type) <= 8 ) + return gen_rtx_REG( TYPE_MODE(type), INTERNAL_REGNUM(11)); + + return NULL_RTX; +} + +/* + Return a rtx used for the return value from a library function call. +*/ +rtx +avr32_libcall_value(mode) + enum machine_mode mode; +{ + + if ( GET_MODE_SIZE(mode) <= 4 ) + return gen_rtx_REG( mode, RET_REGISTER); + else if ( GET_MODE_SIZE(mode) <= 8 ) + return gen_rtx_REG( mode, INTERNAL_REGNUM(11)); + else + return NULL_RTX; +} + +/* Return TRUE if X references a SYMBOL_REF. */ +int +symbol_mentioned_p (rtx x) +{ + const char * fmt; + int i; + + if (GET_CODE (x) == SYMBOL_REF) + return 1; + + fmt = GET_RTX_FORMAT (GET_CODE (x)); + + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) + { + if (fmt[i] == 'E') + { + int j; + + for (j = XVECLEN (x, i) - 1; j >= 0; j--) + if (symbol_mentioned_p (XVECEXP (x, i, j))) + return 1; + } + else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i))) + return 1; + } + + return 0; +} + +/* Return TRUE if X references a LABEL_REF. */ +int +label_mentioned_p (rtx x) +{ + const char * fmt; + int i; + + if (GET_CODE (x) == LABEL_REF) + return 1; + + fmt = GET_RTX_FORMAT (GET_CODE (x)); + for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) + { + if (fmt[i] == 'E') + { + int j; + + for (j = XVECLEN (x, i) - 1; j >= 0; j--) + if (label_mentioned_p (XVECEXP (x, i, j))) + return 1; + } + else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i))) + return 1; + } + + return 0; +} + + +int +avr32_legitimate_pic_operand_p (rtx x) +{ + + /* We can't have const, this must be broken down to a symbol. */ + if ( GET_CODE (x) == CONST ) + return FALSE; + + /* Can't access symbols or labels via the constant pool either */ + if ( (GET_CODE (x) == SYMBOL_REF + && CONSTANT_POOL_ADDRESS_P (x) + && (symbol_mentioned_p (get_pool_constant (x)) + || label_mentioned_p (get_pool_constant (x)))) ) + return FALSE; + + return TRUE; +} + + +rtx +legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED, rtx reg) +{ + + if ( GET_CODE (orig) == SYMBOL_REF + || GET_CODE (orig) == LABEL_REF) + { + int subregs = 0; + + if (reg == 0) + { + if (no_new_pseudos) + abort (); + else + reg = gen_reg_rtx (Pmode); + + subregs = 1; + } + + emit_move_insn(reg, orig); + + /* Only set current function as using pic offset table if + flag_pic is set. This is because this function is also + used if TARGET_HAS_ASM_ADDR_PSEUDOS is set. */ + if ( flag_pic ) + current_function_uses_pic_offset_table = 1; + + /* Put a REG_EQUAL note on this insn, so that it can be optimized + by loop. */ + return reg; + } + else if (GET_CODE (orig) == CONST) + { + rtx base, offset; + + if (flag_pic + && GET_CODE (XEXP (orig, 0)) == PLUS + && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx) + return orig; + + if (reg == 0) + { + if (no_new_pseudos) + abort (); + else + reg = gen_reg_rtx (Pmode); + } + + if (GET_CODE (XEXP (orig, 0)) == PLUS) + { + base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg); + offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode, + base == reg ? 0 : reg); + } + else + abort (); + + if (GET_CODE (offset) == CONST_INT) + { + /* The base register doesn't really matter, we only want to + test the index for the appropriate mode. */ + if (!avr32_const_ok_for_constraint_p(INTVAL(offset), 'I', "Is21")) + { + if (!no_new_pseudos) + offset = force_reg (Pmode, offset); + else + abort (); + } + + if (GET_CODE (offset) == CONST_INT) + return plus_constant (base, INTVAL (offset)); + } + + return gen_rtx_PLUS (Pmode, base, offset); + } + + return orig; +} + +/* Generate code to load the PIC register. */ +void +avr32_load_pic_register () +{ + rtx l1, pic_tmp; + rtx global_offset_table; + + if ( (current_function_uses_pic_offset_table == 0) + || TARGET_NO_INIT_GOT ) + return; + + if (!flag_pic) + abort (); + + l1 = gen_label_rtx (); + + global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_"); + pic_tmp = gen_rtx_CONST(Pmode, gen_rtx_MINUS(SImode, gen_rtx_LABEL_REF (Pmode, l1), + global_offset_table)); + emit_insn (gen_pic_load_addr(pic_offset_table_rtx, force_const_mem(SImode, pic_tmp))); + emit_insn (gen_pic_compute_got_from_pc(pic_offset_table_rtx, l1)); + + /* Need to emit this whether or not we obey regdecls, + since setjmp/longjmp can cause life info to screw up. */ + emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx)); +} + + + +/* This hook should return true if values of type type are returned at the most significant + end of a register (in other words, if they are padded at the least significant end). You + can assume that type is returned in a register; the caller is required to check this. + Note that the register provided by FUNCTION_VALUE must be able to hold the complete + return value. For example, if a 1-, 2- or 3-byte structure is returned at the most + significant end of a 4-byte register, FUNCTION_VALUE should provide an SImode rtx. */ +bool +avr32_return_in_msb(tree type ATTRIBUTE_UNUSED) +{ + /* + if ( AGGREGATE_TYPE_P (type) ) + if ((int_size_in_bytes(type) == 1) + || ((int_size_in_bytes(type) == 2) + && TYPE_ALIGN_UNIT(type) >= 2)) + return false; + else + return true;*/ + + return false; +} + + +/* + Returns one if a certain function value is going to be returned in memory + and zero if it is going to be returned in a register. + + BLKmode and all other modes that is larger than 64 bits are returned in + memory. +*/ +int +avr32_return_in_memory(tree type, tree fntype ATTRIBUTE_UNUSED) +{ + if (TYPE_MODE(type) == VOIDmode) + return false; + + if (int_size_in_bytes(type) > (2 * UNITS_PER_WORD) + || int_size_in_bytes(type) == -1){ + return true; + } + + /* If we have an aggregate then use the same mechanism + as when checking if it should be passed on the stack. */ + if ( type + && AGGREGATE_TYPE_P (type) + && (*targetm.calls.must_pass_in_stack)(TYPE_MODE(type), type) ) + return true; + + return false; +} + + +/* Output the constant part of the trampoline. + lddpc r0, pc[0x8:e] ; load static chain register + lddpc pc, pc[0x8:e] ; jump to subrutine + .long 0 ; Address to static chain, + ; filled in by avr32_initialize_trampoline() + .long 0 ; Address to subrutine, + ; filled in by avr32_initialize_trampoline() +*/ +void +avr32_trampoline_template(file) + FILE *file; +{ + fprintf(file, "\tlddpc r0, pc[8]\n"); + fprintf(file, "\tlddpc pc, pc[8]\n"); + /* make room for the address of the static chain. */ + fprintf(file, "\t.long\t0\n"); + /* make room for the address to the subrutine. */ + fprintf(file, "\t.long\t0\n"); +} + + +/* + Initialize the variable parts of a trampoline. +*/ +void +avr32_initialize_trampoline(addr, fnaddr, static_chain) + rtx addr; + rtx fnaddr; + rtx static_chain; +{ + /* Store the address to the static chain. */ + emit_move_insn(gen_rtx_MEM( SImode, plus_constant(addr, TRAMPOLINE_SIZE-4)), + static_chain); + + /* Store the address to the function. */ + emit_move_insn(gen_rtx_MEM( SImode, plus_constant(addr, TRAMPOLINE_SIZE)), + fnaddr); + + emit_insn(gen_cache(gen_rtx_REG(SImode, 13), + gen_rtx_CONST_INT(SImode, AVR32_CACHE_INVALIDATE_ICACHE))); + +} + +/* Return nonzero if X is valid as an addressing register. */ +int +avr32_address_register_rtx_p (rtx x, int strict_p) +{ + int regno; + + if (GET_CODE (x) != REG) + return 0; + + regno = REGNO (x); + + if (strict_p) + return REGNO_OK_FOR_BASE_P (regno); + + return (regno <= LAST_REGNUM + || regno >= FIRST_PSEUDO_REGISTER); +} + +/* Return nonzero if INDEX is valid for an address index operand. */ +int +avr32_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p) +{ + enum rtx_code code = GET_CODE (index); + + if ( mode == TImode ) + return 0; + + /* Standard coprocessor addressing modes. */ + if ( code == CONST_INT ){ + if (TARGET_HARD_FLOAT + && GET_MODE_CLASS (mode) == MODE_FLOAT) + /* Coprocessor mem insns has a smaller reach than ordinary mem insns */ + return CONST_OK_FOR_CONSTRAINT_P(INTVAL(index), 'K', "Ku14"); + else + return CONST_OK_FOR_CONSTRAINT_P(INTVAL(index), 'K', "Ks16"); + } + + if ( avr32_address_register_rtx_p (index, strict_p) ) + return 1; + + if (code == MULT) { + rtx xiop0 = XEXP (index, 0); + rtx xiop1 = XEXP (index, 1); + return ((avr32_address_register_rtx_p (xiop0, strict_p) + && power_of_two_operand (xiop1, SImode) + && (INTVAL(xiop1) <= 8) ) + || (avr32_address_register_rtx_p (xiop1, strict_p) + && power_of_two_operand (xiop0, SImode) + && (INTVAL(xiop0) <= 8))); + } else if (code == ASHIFT){ + rtx op = XEXP (index, 1); + + return (avr32_address_register_rtx_p (XEXP (index, 0), strict_p) + && GET_CODE (op) == CONST_INT + && INTVAL (op) > 0 + && INTVAL (op) <= 3); + } + + return 0; +} + +/* + Used in the GO_IF_LEGITIMATE_ADDRESS macro. Returns a nonzero value if + the RTX x is a legitimate memory address. + + Returns NO_REGS if the address is not legatime, GENERAL_REGS or ALL_REGS + if it is. +*/ + +/* Forward declaration*/ +int is_minipool_label(rtx label); + +int +avr32_legitimate_address(mode, x, strict) + enum machine_mode mode ATTRIBUTE_UNUSED; + rtx x; + int strict; +{ + + switch ( GET_CODE(x) ){ + case REG: + return avr32_address_register_rtx_p(x, strict); + case CONST: + { + rtx label = avr32_find_symbol(x); + if ( label + && ( (GET_CODE(XEXP(label, 0)) == CODE_LABEL + && is_minipool_label(XEXP(label, 0))) + || (CONSTANT_POOL_ADDRESS_P (label) + && ! (flag_pic + && (symbol_mentioned_p (get_pool_constant (label)) + || label_mentioned_p (get_pool_constant (label))))) ) + ){ + return TRUE; + } + } + break; + case LABEL_REF: + if ( GET_CODE(XEXP(x, 0)) == CODE_LABEL + && is_minipool_label(XEXP(x, 0)) ){ + return TRUE; + } + break; + case SYMBOL_REF:{ + if ( CONSTANT_POOL_ADDRESS_P (x) + && ! (flag_pic + && (symbol_mentioned_p (get_pool_constant (x)) + || label_mentioned_p (get_pool_constant (x)))) ) + return TRUE; + /* + A symbol_ref is only legal if it is a function. If all of them are legal, a + pseudo reg that is a constant will be replaced by a symbol_ref and make + illegale code. SYMBOL_REF_FLAG is set by ENCODE_SECTION_INFO. + */ + else if (SYMBOL_REF_RCALL_FUNCTION_P(x)) + return TRUE; + break;} + case PRE_DEC: /* (pre_dec (...)) */ + case POST_INC: /* (post_inc (...)) */ + return avr32_address_register_rtx_p(XEXP(x, 0), strict); + case PLUS: /* (plus (...) (...)) */ + { + rtx xop0 = XEXP (x, 0); + rtx xop1 = XEXP (x, 1); + + return ((avr32_address_register_rtx_p (xop0, strict) + && avr32_legitimate_index_p (mode, xop1, strict)) + || (avr32_address_register_rtx_p (xop1, strict) + && avr32_legitimate_index_p (mode, xop0, strict))); + } + default: + break; + } + + return FALSE; +} + + +int avr32_const_double_immediate(rtx value) +{ + HOST_WIDE_INT hi, lo; + + if ( GET_CODE(value) != CONST_DOUBLE ) + return FALSE; + + if (GET_MODE(value) == DImode){ + hi = CONST_DOUBLE_HIGH(value); + lo = CONST_DOUBLE_LOW(value); + } else { + HOST_WIDE_INT target_float[2]; + hi = lo = 0; + real_to_target(target_float, CONST_DOUBLE_REAL_VALUE(value), GET_MODE(value) ); + lo = target_float[0]; + hi = target_float[1]; + } + if ( avr32_const_ok_for_constraint_p(lo, 'K', "Ks21") + && ((GET_MODE(value) == SFmode) + || avr32_const_ok_for_constraint_p(hi, 'K', "Ks21")) ){ + return TRUE; + } + + return FALSE; +} + + +int +avr32_legitimate_constant_p(rtx x) +{ + switch (GET_CODE(x)) { + case CONST_INT: + return avr32_const_ok_for_constraint_p(INTVAL(x), 'K', "Ks21"); + case CONST_DOUBLE: + if ( GET_MODE(x) == SFmode + || GET_MODE(x) == DFmode + || GET_MODE(x) == DImode ) + return avr32_const_double_immediate(x); + else + return 0; + case LABEL_REF: + return flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS; + case SYMBOL_REF: + return flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS; + case CONST: + /* We must handle this one in the movsi expansion in order + for gcc not to put it in the constant pool. */ + return 0/*flag_pic || TARGET_HAS_ASM_ADDR_PSEUDOS*/; + case HIGH: + case CONST_VECTOR: + return 0; + default: + printf("%s():\n", __FUNCTION__); + debug_rtx(x); + return 1; + } +} + + +/* Strip any special encoding from labels */ +const char * +avr32_strip_name_encoding (const char * name) +{ + const char *stripped = name; + + while (1){ + switch ( stripped[0] ){ + case '#': + stripped = strchr(name + 1, '#') + 1; + break; + case '*': + stripped = &stripped[1]; + break; + default: + return stripped; + } + } +} + + + +/* Do anything needed before RTL is emitted for each function. */ +static struct machine_function *avr32_init_machine_status (void) +{ + struct machine_function *machine; + machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function)); + +#if AVR32_FT_UNKNOWN != 0 + machine->func_type = AVR32_FT_UNKNOWN; +#endif + + machine->minipool_label_head = 0; + machine->minipool_label_tail = 0; + return machine; +} + +void avr32_init_expanders (void) +{ + /* Arrange to initialize and mark the machine per-function status. */ + init_machine_status = avr32_init_machine_status; +} + + +/* Return an RTX indicating where the return address to the + calling function can be found. */ + +rtx +avr32_return_addr ( int count, + rtx frame ATTRIBUTE_UNUSED) +{ + if (count != 0) + return NULL_RTX; + + return get_hard_reg_initial_val (Pmode, LR_REGNUM); +} + + +void +avr32_encode_section_info (tree decl, rtx rtl, int first) +{ + + if ( first && DECL_P (decl ) ){ + /* Set SYMBOL_REG_FLAG for local functions */ + if (! TREE_PUBLIC (decl) + && TREE_CODE(decl) == FUNCTION_DECL){ + if ( (*targetm.binds_local_p) (decl) ){ + SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1; + } + } + } +} + + +void +avr32_asm_output_ascii(stream, ptr, len) + FILE *stream; + char *ptr; + int len; +{ + int i, i_new = 0; + char *new_ptr = xmalloc(4*len); + // char new_ptr[4000]; + if (new_ptr == NULL) + internal_error("Out of memory."); + + for (i=0; i delete cmp insn */ + return next_cond; + } + + switch ( cc_prev_status.mdep.flags ){ + case CC_SET_VNCZ: + case CC_SET_NCZ: + n_flag_valid = TRUE; + case CC_SET_CZ: + case CC_SET_Z: + z_flag_valid = TRUE; + } + + if ( cc_prev_status.mdep.value + && REG_P(XEXP(compare_exp, 0)) + && REGNO(XEXP(compare_exp, 0)) == REGNO(cc_prev_status.mdep.value) + && GET_CODE(XEXP(compare_exp, 1)) == CONST_INT + && next_cond != NULL_RTX ) { + if ( INTVAL(XEXP(compare_exp, 1)) == 0 + && z_flag_valid + && ( GET_CODE(next_cond) == EQ + || GET_CODE(next_cond) == NE ) ) + /* We can skip comparison Z flag is already + reflecting ops[0]*/ + return next_cond; + else if ( n_flag_valid + && ( (INTVAL(XEXP(compare_exp, 1)) == 0 + && ( GET_CODE(next_cond) == GE + || GET_CODE(next_cond) == LT )) + || (INTVAL(XEXP(compare_exp, 1)) == -1 + && ( GET_CODE(next_cond) == GT + || GET_CODE(next_cond) == LE ))) ){ + /* We can skip comparison N flag is already + reflecting ops[0], which means that we + can use the mi/pl conditions to check if + ops[0] is GE or LT 0. */ + if ( (GET_CODE(next_cond) == GE) || (GET_CODE(next_cond) == GT) ) + new_cond = gen_rtx_UNSPEC(CCmode, gen_rtvec(2, cc0_rtx, const0_rtx), UNSPEC_COND_PL); + else + new_cond = gen_rtx_UNSPEC(CCmode, gen_rtvec(2, cc0_rtx, const0_rtx), UNSPEC_COND_MI); + return new_cond; + } + } + return NULL_RTX; +} + + + +/* + Updates cc_status. + +*/ +void +avr32_notice_update_cc(exp, insn) + rtx exp; + rtx insn; +{ + + switch (get_attr_cc(insn)) + { + case CC_CALL_SET: + CC_STATUS_INIT; + /* Check if the function call returns a value in r12 */ + if ( REG_P(recog_data.operand[0]) + && REGNO(recog_data.operand[0]) == RETVAL_REGNUM ){ + cc_status.flags = 0; + cc_status.mdep.value = gen_rtx_COMPARE(SImode, recog_data.operand[0], const0_rtx); + cc_status.mdep.flags = CC_SET_VNCZ; + + } + break; + case CC_COMPARE: + /* Check that compare will not be optimized away + if so nothing should be done */ + if ( is_compare_redundant(SET_SRC(exp), get_next_insn_cond(insn)) + == NULL_RTX ){ + + /* Reset the nonstandard flag */ + CC_STATUS_INIT; + cc_status.flags = 0; + cc_status.mdep.value = SET_SRC(exp); + cc_status.mdep.flags = CC_SET_VNCZ; + } + break; + case CC_BLD: + /* Bit load is kind of like an inverted testsi, because the Z flag is inverted */ + CC_STATUS_INIT; + cc_status.flags = CC_INVERTED; + cc_status.mdep.value = SET_SRC(exp); + cc_status.mdep.flags = CC_SET_Z; + break; + case CC_NONE: + /* Insn does not affect CC at all. + Check if the instruction updates some of the register currently reflected in cc0 */ + + if ( (GET_CODE(exp) == SET) && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value) + && (reg_mentioned_p(SET_DEST(exp), cc_status.value1) + || reg_mentioned_p(SET_DEST(exp), cc_status.value2) + || reg_mentioned_p(SET_DEST(exp), cc_status.mdep.value)) ){ + CC_STATUS_INIT; + } + + /* If this is a parallel we must step through each of the parallel expressions */ + if ( GET_CODE(exp) == PARALLEL ){ + int i; + for ( i = 0; i < XVECLEN(exp, 0); ++i ){ + rtx vec_exp = XVECEXP(exp, 0, i); + if ( (GET_CODE(vec_exp) == SET) && (cc_status.value1 || cc_status.value2 || cc_status.mdep.value) + && (reg_mentioned_p(SET_DEST(vec_exp), cc_status.value1) + || reg_mentioned_p(SET_DEST(vec_exp), cc_status.value2) + || reg_mentioned_p(SET_DEST(vec_exp), cc_status.mdep.value)) ){ + CC_STATUS_INIT; + } + } + } + + /* Check if we have memory opartions with post_inc or pre_dec on the register + currently reflected in cc0 */ + if ( GET_CODE(exp) == SET + && GET_CODE(SET_SRC(exp)) == MEM + && (GET_CODE(XEXP(SET_SRC(exp), 0)) == POST_INC + || GET_CODE(XEXP(SET_SRC(exp), 0)) == PRE_DEC) + && ( reg_mentioned_p(XEXP(XEXP(SET_SRC(exp), 0), 0), cc_status.value1) + || reg_mentioned_p(XEXP(XEXP(SET_SRC(exp), 0), 0), cc_status.value2) + || reg_mentioned_p(XEXP(XEXP(SET_SRC(exp), 0), 0), cc_status.mdep.value)) ) + CC_STATUS_INIT; + + if ( GET_CODE(exp) == SET + && GET_CODE(SET_DEST(exp)) == MEM + && (GET_CODE(XEXP(SET_DEST(exp), 0)) == POST_INC + || GET_CODE(XEXP(SET_DEST(exp), 0)) == PRE_DEC) + && ( reg_mentioned_p(XEXP(XEXP(SET_DEST(exp), 0), 0), cc_status.value1) + || reg_mentioned_p(XEXP(XEXP(SET_DEST(exp), 0), 0), cc_status.value2) + || reg_mentioned_p(XEXP(XEXP(SET_DEST(exp), 0), 0), cc_status.mdep.value)) ) + CC_STATUS_INIT; + break; + + case CC_SET_VNCZ: + CC_STATUS_INIT; + cc_status.mdep.value = recog_data.operand[0]; + cc_status.mdep.flags = CC_SET_VNCZ; + break; + + case CC_SET_NCZ: + CC_STATUS_INIT; + cc_status.mdep.value = recog_data.operand[0]; + cc_status.mdep.flags = CC_SET_NCZ; + break; + + case CC_SET_CZ: + CC_STATUS_INIT; + cc_status.mdep.value = recog_data.operand[0]; + cc_status.mdep.flags = CC_SET_CZ; + break; + + case CC_SET_Z: + CC_STATUS_INIT; + cc_status.mdep.value = recog_data.operand[0]; + cc_status.mdep.flags = CC_SET_Z; + break; + + case CC_CLOBBER: + CC_STATUS_INIT; + break; + + + + default: + PDEBUG("default\n"); + CC_STATUS_INIT; + } +} + + +/* + Outputs to stdio stream stream the assembler syntax for an instruction + operand x. x is an RTL expression. +*/ +void +avr32_print_operand(stream, x, code) + FILE *stream; + rtx x; + int code; +{ + int error = 0; + + switch (GET_CODE(x)) + { + case UNSPEC: + switch ( XINT(x, 1) ){ + case UNSPEC_COND_PL: + if ( code == 'i' ) + fputs("mi", stream); + else + fputs("pl", stream); + break; + case UNSPEC_COND_MI: + if ( code == 'i' ) + fputs("pl", stream); + else + fputs("mi", stream); + break; + default: + error = 1; + } + break; + case EQ: + if ( code == 'i' ) + fputs("ne", stream); + else + fputs("eq", stream); + break; + case NE: + if ( code == 'i' ) + fputs("eq", stream); + else + fputs("ne", stream); + break; + case GT: + if ( code == 'i' ) + fputs("le", stream); + else + fputs("gt", stream); + break; + case GTU: + if ( code == 'i' ) + fputs("ls", stream); + else + fputs("hi", stream); + break; + case LT: + if ( code == 'i' ) + fputs("ge", stream); + else + fputs("lt", stream); + break; + case LTU: + if ( code == 'i' ) + fputs("hs", stream); + else + fputs("lo", stream); + break; + case GE: + if ( code == 'i' ) + fputs("lt", stream); + else + fputs("ge", stream); + break; + case GEU: + if ( code == 'i' ) + fputs("lo", stream); + else + fputs("hs", stream); + break; + case LE: + if ( code == 'i' ) + fputs("gt", stream); + else + fputs("le", stream); + break; + case LEU: + if ( code == 'i' ) + fputs("hi", stream); + else + fputs("ls", stream); + break; + case CONST_INT:{ + int value = INTVAL(x); + + if (code == 'i'){ + value++; + } + + if (code == 'p'){ + /* Set to bit position of first bit set in immediate */ + int i, bitpos = 32; + for ( i = 0; i < 32; i++ ) + if ( value & (1 << i) ){ + bitpos = i; + break; + } + value = bitpos; + } + + if ( code == 'r' ){ + //Reglist 8 + char op[50]; + op[0] = '\0'; + + if (value & 0x01) + sprintf(op, "r0-r3"); + if (value & 0x02) + strlen(op) ? sprintf(op, "%s, r4-r7", op) : sprintf(op, "r4-r7"); + if (value & 0x04) + strlen(op) ? sprintf(op, "%s, r8-r9", op) : sprintf(op, "r8-r9"); + if (value & 0x08) + strlen(op) ? sprintf(op, "%s, r10", op) : sprintf(op, "r10"); + if (value & 0x10) + strlen(op) ? sprintf(op, "%s, r11", op) : sprintf(op, "r11"); + if (value & 0x20) + strlen(op) ? sprintf(op, "%s, r12", op) : sprintf(op, "r12"); + if (value & 0x40) + strlen(op) ? sprintf(op, "%s, lr", op) : sprintf(op, "lr"); + if (value & 0x80) + strlen(op) ? sprintf(op, "%s, pc", op) : sprintf(op, "pc"); + + fputs(op, stream); + } else if ( code == 's' ) { + //Reglist 16 + char reglist16_string[100]; + int i; + reglist16_string[0] = '\0'; + + for ( i = 0; i < 16; ++i ){ + if ( value & ( 1 << i ) ){ + strlen(reglist16_string) ? sprintf(reglist16_string, "%s, %s", reglist16_string, + reg_names[INTERNAL_REGNUM(i)]) : sprintf(reglist16_string, "%s", reg_names[INTERNAL_REGNUM(i)]); + } + } + fputs(reglist16_string, stream); + } else if ( code == 'd' ){ + //Print in decimal format + fprintf(stream, "%d", value); + } else if ( code == 'h' ){ + //Print halfword part of word + fputs( value ? "b" : "t", stream ); + } else { + //Normal constant + fprintf(stream, "%d"