diff --git a/software/gcc/gcc/config/rs6000/ppc-auxv.h b/software/gcc/gcc/config/rs6000/ppc-auxv.h new file mode 100644 index 0000000..c0e0700 --- /dev/null +++ b/software/gcc/gcc/config/rs6000/ppc-auxv.h @@ -0,0 +1,115 @@ +/* PowerPC support for accessing the AUXV AT_PLATFORM, AT_HWCAP and AT_HWCAP2 + values from the Thread Control Block (TCB). + + Copyright (C) 2016-2021 Free Software Foundation, Inc. + Contributed by Peter Bergner . + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef _PPC_AUXV_H +#define _PPC_AUXV_H + +/* The PLATFORM value stored in the TCB is offset by _DL_FIRST_PLATFORM. */ +#define _DL_FIRST_PLATFORM 32 + +/* AT_PLATFORM bits. These must match the values defined in GLIBC. */ +#define PPC_PLATFORM_POWER4 0 +#define PPC_PLATFORM_PPC970 1 +#define PPC_PLATFORM_POWER5 2 +#define PPC_PLATFORM_POWER5_PLUS 3 +#define PPC_PLATFORM_POWER6 4 +#define PPC_PLATFORM_CELL_BE 5 +#define PPC_PLATFORM_POWER6X 6 +#define PPC_PLATFORM_POWER7 7 +#define PPC_PLATFORM_PPCA2 8 +#define PPC_PLATFORM_PPC405 9 +#define PPC_PLATFORM_PPC440 10 +#define PPC_PLATFORM_PPC464 11 +#define PPC_PLATFORM_PPC476 12 +#define PPC_PLATFORM_POWER8 13 +#define PPC_PLATFORM_POWER9 14 +#define PPC_PLATFORM_PPCA2P 9 // i guess? cant use my own if glibc needs it! 0x8675309 + +/* This is not yet official. */ +#define PPC_PLATFORM_POWER10 15 + +/* AT_HWCAP bits. These must match the values defined in the Linux kernel. */ +#define PPC_FEATURE_32 0x80000000 +#define PPC_FEATURE_64 0x40000000 +#define PPC_FEATURE_601_INSTR 0x20000000 +#define PPC_FEATURE_HAS_ALTIVEC 0x10000000 +#define PPC_FEATURE_HAS_FPU 0x08000000 +#define PPC_FEATURE_HAS_MMU 0x04000000 +#define PPC_FEATURE_HAS_4xxMAC 0x02000000 +#define PPC_FEATURE_UNIFIED_CACHE 0x01000000 +#define PPC_FEATURE_HAS_SPE 0x00800000 +#define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000 +#define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000 +#define PPC_FEATURE_NO_TB 0x00100000 +#define PPC_FEATURE_POWER4 0x00080000 +#define PPC_FEATURE_POWER5 0x00040000 +#define PPC_FEATURE_POWER5_PLUS 0x00020000 +#define PPC_FEATURE_CELL_BE 0x00010000 +#define PPC_FEATURE_BOOKE 0x00008000 +#define PPC_FEATURE_SMT 0x00004000 +#define PPC_FEATURE_ICACHE_SNOOP 0x00002000 +#define PPC_FEATURE_ARCH_2_05 0x00001000 +#define PPC_FEATURE_PA6T 0x00000800 +#define PPC_FEATURE_HAS_DFP 0x00000400 +#define PPC_FEATURE_POWER6_EXT 0x00000200 +#define PPC_FEATURE_ARCH_2_06 0x00000100 +#define PPC_FEATURE_HAS_VSX 0x00000080 +#define PPC_FEATURE_PERFMON_COMPAT 0x00000040 +#define PPC_FEATURE_TRUE_LE 0x00000002 +#define PPC_FEATURE_PPC_LE 0x00000001 + +/* AT_HWCAP2 bits. These must match the values defined in the Linux kernel. */ +#define PPC_FEATURE2_ARCH_2_07 0x80000000 +#define PPC_FEATURE2_HAS_HTM 0x40000000 +#define PPC_FEATURE2_HAS_DSCR 0x20000000 +#define PPC_FEATURE2_HAS_EBB 0x10000000 +#define PPC_FEATURE2_HAS_ISEL 0x08000000 +#define PPC_FEATURE2_HAS_TAR 0x04000000 +#define PPC_FEATURE2_HAS_VEC_CRYPTO 0x02000000 +#define PPC_FEATURE2_HTM_NOSC 0x01000000 +#define PPC_FEATURE2_ARCH_3_00 0x00800000 +#define PPC_FEATURE2_HAS_IEEE128 0x00400000 +#define PPC_FEATURE2_DARN 0x00200000 +#define PPC_FEATURE2_SCV 0x00100000 +#define PPC_FEATURE2_HTM_NO_SUSPEND 0x00080000 + +/* These are not yet official. */ +#define PPC_FEATURE2_ARCH_3_1 0x00040000 +#define PPC_FEATURE2_MMA 0x00020000 + +/* Thread Control Block (TCB) offsets of the AT_PLATFORM, AT_HWCAP and + AT_HWCAP2 values. These must match the values defined in GLIBC. */ +#define TCB_PLATFORM_OFFSET ((TARGET_64BIT) ? -28764 : -28724) +#define TCB_HWCAP_BASE_OFFSET ((TARGET_64BIT) ? -28776 : -28736) +#define TCB_HWCAP1_OFFSET \ + ((BYTES_BIG_ENDIAN) ? TCB_HWCAP_BASE_OFFSET : TCB_HWCAP_BASE_OFFSET+4) +#define TCB_HWCAP2_OFFSET \ + ((BYTES_BIG_ENDIAN) ? TCB_HWCAP_BASE_OFFSET+4 : TCB_HWCAP_BASE_OFFSET) +#define TCB_HWCAP_OFFSET(ID) \ + (((ID) == 0) ? TCB_HWCAP1_OFFSET : TCB_HWCAP2_OFFSET) + +#endif /* _PPC_AUXV_H */ diff --git a/software/gcc/gcc/config/rs6000/readme.md b/software/gcc/gcc/config/rs6000/readme.md new file mode 100644 index 0000000..cc89454 --- /dev/null +++ b/software/gcc/gcc/config/rs6000/readme.md @@ -0,0 +1,14 @@ +# GCC + +## Links + +* https://gcc.gnu.org/onlinedocs/gccint/Machine-Desc.html#Machine-Desc + +* https://gcc.gnu.org/onlinedocs/gccint/Back-End.html + +* https://www.cse.iitb.ac.in/grc/slides/cgotut-gcc/topic5-md-intro.pdf + +## + +* gcc/gcc/config/rs6000/ has all the Power stuff up to P10 + diff --git a/software/gcc/gcc/config/rs6000/rs6000-call.c b/software/gcc/gcc/config/rs6000/rs6000-call.c new file mode 100644 index 0000000..cdf545f --- /dev/null +++ b/software/gcc/gcc/config/rs6000/rs6000-call.c @@ -0,0 +1,18358 @@ +/* Subroutines used to generate function calls and handle built-in + instructions on IBM RS/6000. + Copyright (C) 1991-2021 Free Software Foundation, Inc. + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#define IN_TARGET_CODE 1 + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "backend.h" +#include "rtl.h" +#include "tree.h" +#include "memmodel.h" +#include "gimple.h" +#include "cfghooks.h" +#include "cfgloop.h" +#include "df.h" +#include "tm_p.h" +#include "stringpool.h" +#include "expmed.h" +#include "optabs.h" +#include "regs.h" +#include "ira.h" +#include "recog.h" +#include "cgraph.h" +#include "diagnostic-core.h" +#include "insn-attr.h" +#include "flags.h" +#include "alias.h" +#include "fold-const.h" +#include "attribs.h" +#include "stor-layout.h" +#include "calls.h" +#include "print-tree.h" +#include "varasm.h" +#include "explow.h" +#include "expr.h" +#include "output.h" +#include "common/common-target.h" +#include "langhooks.h" +#include "gimplify.h" +#include "gimple-fold.h" +#include "gimple-iterator.h" +#include "ssa.h" +#include "tree-ssa-propagate.h" +#include "builtins.h" +#include "tree-vector-builder.h" +#if TARGET_XCOFF +#include "xcoffout.h" /* get declarations of xcoff_*_section_name */ +#endif +#include "ppc-auxv.h" +#include "targhooks.h" +#include "opts.h" + +#include "rs6000-internal.h" +#include "rs6000-builtins.h" + +#if TARGET_MACHO +#include "gstab.h" /* for N_SLINE */ +#include "dbxout.h" /* dbxout_ */ +#endif + +#ifndef TARGET_PROFILE_KERNEL +#define TARGET_PROFILE_KERNEL 0 +#endif + +#ifdef HAVE_AS_GNU_ATTRIBUTE +# ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE +# define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0 +# endif +#endif + +#ifndef TARGET_NO_PROTOTYPE +#define TARGET_NO_PROTOTYPE 0 +#endif + +struct builtin_compatibility +{ + const enum rs6000_builtins code; + const char *const name; +}; + +struct builtin_description +{ + const HOST_WIDE_INT mask; + const enum insn_code icode; + const char *const name; + const enum rs6000_builtins code; +}; + +/* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */ +static const struct +{ + const char *cpu; + unsigned int cpuid; +} cpu_is_info[] = { + { "power10", PPC_PLATFORM_POWER10 }, + { "power9", PPC_PLATFORM_POWER9 }, + { "power8", PPC_PLATFORM_POWER8 }, + { "power7", PPC_PLATFORM_POWER7 }, + { "power6x", PPC_PLATFORM_POWER6X }, + { "power6", PPC_PLATFORM_POWER6 }, + { "power5+", PPC_PLATFORM_POWER5_PLUS }, + { "power5", PPC_PLATFORM_POWER5 }, + { "ppc970", PPC_PLATFORM_PPC970 }, + { "power4", PPC_PLATFORM_POWER4 }, + { "ppca2p", PPC_PLATFORM_PPCA2P }, + { "ppca2", PPC_PLATFORM_PPCA2 }, + { "ppc476", PPC_PLATFORM_PPC476 }, + { "ppc464", PPC_PLATFORM_PPC464 }, + { "ppc440", PPC_PLATFORM_PPC440 }, + { "ppc405", PPC_PLATFORM_PPC405 }, + { "ppc-cell-be", PPC_PLATFORM_CELL_BE } +}; + +/* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */ +static const struct +{ + const char *hwcap; + int mask; + unsigned int id; +} cpu_supports_info[] = { + /* AT_HWCAP masks. */ + { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 }, + { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 }, + { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 }, + { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 }, + { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 }, + { "booke", PPC_FEATURE_BOOKE, 0 }, + { "cellbe", PPC_FEATURE_CELL_BE, 0 }, + { "dfp", PPC_FEATURE_HAS_DFP, 0 }, + { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 }, + { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 }, + { "fpu", PPC_FEATURE_HAS_FPU, 0 }, + { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 }, + { "mmu", PPC_FEATURE_HAS_MMU, 0 }, + { "notb", PPC_FEATURE_NO_TB, 0 }, + { "pa6t", PPC_FEATURE_PA6T, 0 }, + { "power4", PPC_FEATURE_POWER4, 0 }, + { "power5", PPC_FEATURE_POWER5, 0 }, + { "power5+", PPC_FEATURE_POWER5_PLUS, 0 }, + { "power6x", PPC_FEATURE_POWER6_EXT, 0 }, + { "ppc32", PPC_FEATURE_32, 0 }, + { "ppc601", PPC_FEATURE_601_INSTR, 0 }, + { "ppc64", PPC_FEATURE_64, 0 }, + { "ppcle", PPC_FEATURE_PPC_LE, 0 }, + { "smt", PPC_FEATURE_SMT, 0 }, + { "spe", PPC_FEATURE_HAS_SPE, 0 }, + { "true_le", PPC_FEATURE_TRUE_LE, 0 }, + { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 }, + { "vsx", PPC_FEATURE_HAS_VSX, 0 }, + + /* AT_HWCAP2 masks. */ + { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 }, + { "dscr", PPC_FEATURE2_HAS_DSCR, 1 }, + { "ebb", PPC_FEATURE2_HAS_EBB, 1 }, + { "htm", PPC_FEATURE2_HAS_HTM, 1 }, + { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 }, + { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 }, + { "isel", PPC_FEATURE2_HAS_ISEL, 1 }, + { "tar", PPC_FEATURE2_HAS_TAR, 1 }, + { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 }, + { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 }, + { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 }, + { "darn", PPC_FEATURE2_DARN, 1 }, + { "scv", PPC_FEATURE2_SCV, 1 }, + { "arch_3_1", PPC_FEATURE2_ARCH_3_1, 1 }, + { "mma", PPC_FEATURE2_MMA, 1 }, +}; + +static void altivec_init_builtins (void); +static tree builtin_function_type (machine_mode, machine_mode, + machine_mode, machine_mode, + enum rs6000_builtins, const char *name); +static void rs6000_common_init_builtins (void); +static void htm_init_builtins (void); +static void mma_init_builtins (void); +static rtx rs6000_expand_new_builtin (tree, rtx, rtx, machine_mode, int); +static bool rs6000_gimple_fold_new_builtin (gimple_stmt_iterator *gsi); + + +/* Hash table to keep track of the argument types for builtin functions. */ + +struct GTY((for_user)) builtin_hash_struct +{ + tree type; + machine_mode mode[4]; /* return value + 3 arguments. */ + unsigned char uns_p[4]; /* and whether the types are unsigned. */ +}; + +struct builtin_hasher : ggc_ptr_hash +{ + static hashval_t hash (builtin_hash_struct *); + static bool equal (builtin_hash_struct *, builtin_hash_struct *); +}; + +static GTY (()) hash_table *builtin_hash_table; + +/* Hash function for builtin functions with up to 3 arguments and a return + type. */ +hashval_t +builtin_hasher::hash (builtin_hash_struct *bh) +{ + unsigned ret = 0; + int i; + + for (i = 0; i < 4; i++) + { + ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]); + ret = (ret * 2) + bh->uns_p[i]; + } + + return ret; +} + +/* Compare builtin hash entries H1 and H2 for equivalence. */ +bool +builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2) +{ + return ((p1->mode[0] == p2->mode[0]) + && (p1->mode[1] == p2->mode[1]) + && (p1->mode[2] == p2->mode[2]) + && (p1->mode[3] == p2->mode[3]) + && (p1->uns_p[0] == p2->uns_p[0]) + && (p1->uns_p[1] == p2->uns_p[1]) + && (p1->uns_p[2] == p2->uns_p[2]) + && (p1->uns_p[3] == p2->uns_p[3])); +} + + +/* Table that classifies rs6000 builtin functions (pure, const, etc.). */ +#undef RS6000_BUILTIN_0 +#undef RS6000_BUILTIN_1 +#undef RS6000_BUILTIN_2 +#undef RS6000_BUILTIN_3 +#undef RS6000_BUILTIN_4 +#undef RS6000_BUILTIN_A +#undef RS6000_BUILTIN_D +#undef RS6000_BUILTIN_H +#undef RS6000_BUILTIN_M +#undef RS6000_BUILTIN_P +#undef RS6000_BUILTIN_X + +#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \ + { NAME, ICODE, MASK, ATTR }, + +#define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \ + { NAME, ICODE, MASK, ATTR }, + +#define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \ + { NAME, ICODE, MASK, ATTR }, + +#define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \ + { NAME, ICODE, MASK, ATTR }, + +#define RS6000_BUILTIN_4(ENUM, NAME, MASK, ATTR, ICODE) \ + { NAME, ICODE, MASK, ATTR }, + +#define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \ + { NAME, ICODE, MASK, ATTR }, + +#define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \ + { NAME, ICODE, MASK, ATTR }, + +#define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \ + { NAME, ICODE, MASK, ATTR }, + +#define RS6000_BUILTIN_M(ENUM, NAME, MASK, ATTR, ICODE) \ + { NAME, ICODE, MASK, ATTR }, + +#define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \ + { NAME, ICODE, MASK, ATTR }, + +#define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \ + { NAME, ICODE, MASK, ATTR }, + +struct rs6000_builtin_info_type { + const char *name; + const enum insn_code icode; + const HOST_WIDE_INT mask; + const unsigned attr; +}; + +static const struct rs6000_builtin_info_type rs6000_builtin_info[] = +{ +#include "rs6000-builtin.def" +}; + +#undef RS6000_BUILTIN_0 +#undef RS6000_BUILTIN_1 +#undef RS6000_BUILTIN_2 +#undef RS6000_BUILTIN_3 +#undef RS6000_BUILTIN_4 +#undef RS6000_BUILTIN_A +#undef RS6000_BUILTIN_D +#undef RS6000_BUILTIN_H +#undef RS6000_BUILTIN_M +#undef RS6000_BUILTIN_P +#undef RS6000_BUILTIN_X + +const struct altivec_builtin_types altivec_overloaded_builtins[] = { + /* Unary AltiVec/VSX builtins. */ + { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABS, P8V_BUILTIN_ABS_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABS, VSX_BUILTIN_XVABSDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_CEIL, ALTIVEC_BUILTIN_VRFIP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_CEIL, VSX_BUILTIN_XVRDPIP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_EXPTE, ALTIVEC_BUILTIN_VEXPTEFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_FLOOR, VSX_BUILTIN_XVRDPIM, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_FLOOR, ALTIVEC_BUILTIN_VRFIM, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_LOGE, ALTIVEC_BUILTIN_VLOGEFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RE, ALTIVEC_BUILTIN_VREFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RE, VSX_BUILTIN_XVREDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ROUND, ALTIVEC_BUILTIN_VRFIN, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_ROUND, VSX_BUILTIN_XVRDPI, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RECIP, ALTIVEC_BUILTIN_VRECIPFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_RECIP, VSX_BUILTIN_RECIP_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_RSQRT, ALTIVEC_BUILTIN_VRSQRTFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RSQRT, VSX_BUILTIN_RSQRT_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RSQRTE, ALTIVEC_BUILTIN_VRSQRTEFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RSQRTE, VSX_BUILTIN_XVRSQRTEDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_TRUNC, ALTIVEC_BUILTIN_VRFIZ, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_TRUNC, VSX_BUILTIN_XVRDPIZ, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSB, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSH, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKH, P8V_BUILTIN_VUPKHSW, + RS6000_BTI_V2DI, RS6000_BTI_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKH, P8V_BUILTIN_VUPKHSW, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHPX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKH, VSX_BUILTIN_DOUBLEH_V4SF, + RS6000_BTI_V2DF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKHSH, ALTIVEC_BUILTIN_VUPKHSH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKHSH, ALTIVEC_BUILTIN_VUPKHSH, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKHSH, P8V_BUILTIN_VUPKHSW, + RS6000_BTI_V2DI, RS6000_BTI_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKHSH, P8V_BUILTIN_VUPKHSW, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKHPX, ALTIVEC_BUILTIN_VUPKHPX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKHPX, ALTIVEC_BUILTIN_VUPKHPX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKHSB, ALTIVEC_BUILTIN_VUPKHSB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKHSB, ALTIVEC_BUILTIN_VUPKHSB, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSB, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLPX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSH, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKL, P8V_BUILTIN_VUPKLSW, + RS6000_BTI_V2DI, RS6000_BTI_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKL, P8V_BUILTIN_VUPKLSW, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_UNPACKL, VSX_BUILTIN_DOUBLEL_V4SF, + RS6000_BTI_V2DF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKLPX, ALTIVEC_BUILTIN_VUPKLPX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKLPX, ALTIVEC_BUILTIN_VUPKLPX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKLSH, ALTIVEC_BUILTIN_VUPKLSH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKLSH, ALTIVEC_BUILTIN_VUPKLSH, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKLSB, ALTIVEC_BUILTIN_VUPKLSB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VUPKLSB, ALTIVEC_BUILTIN_VUPKLSB, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V16QI, 0, 0 }, + + /* Binary AltiVec/VSX builtins. */ + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, P8V_BUILTIN_VADDUDM, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, P8V_BUILTIN_VADDUDM, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, P8V_BUILTIN_VADDUDM, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, P8V_BUILTIN_VADDUDM, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, P8V_BUILTIN_VADDUDM, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, P8V_BUILTIN_VADDUDM, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, VSX_BUILTIN_XVADDDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, P8V_BUILTIN_VADDUQM, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADD, P8V_BUILTIN_VADDUQM, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDFP, ALTIVEC_BUILTIN_VADDFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDC, ALTIVEC_BUILTIN_VADDCUW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDC, ALTIVEC_BUILTIN_VADDCUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDC, P8V_BUILTIN_VADDCUQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDC, P8V_BUILTIN_VADDCUQ, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDEC, P8V_BUILTIN_VADDECUQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI }, + { ALTIVEC_BUILTIN_VEC_ADDEC, P8V_BUILTIN_VADDECUQ, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V2DI_UNS, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V4SI_UNS, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V8HI_UNS, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V16QI_UNS, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V2DI_UNS, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V4SI_UNS, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V8HI_UNS, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V16QI_UNS, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + + { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VAVGSW, ALTIVEC_BUILTIN_VAVGSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VAVGUW, ALTIVEC_BUILTIN_VAVGUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VAVGSH, ALTIVEC_BUILTIN_VAVGSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VAVGUH, ALTIVEC_BUILTIN_VAVGUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VAVGSB, ALTIVEC_BUILTIN_VAVGSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VAVGUB, ALTIVEC_BUILTIN_VAVGUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPB, ALTIVEC_BUILTIN_VCMPBFP, + RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, P8V_BUILTIN_VCMPEQUD, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, P8V_BUILTIN_VCMPEQUD, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, P8V_BUILTIN_VCMPEQUD, + RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, P10V_BUILTIN_VCMPEQUT, + RS6000_BTI_bool_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, P10V_BUILTIN_VCMPEQUT, + RS6000_BTI_bool_V1TI, RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQFP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPEQ, VSX_BUILTIN_XVCMPEQDP, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPEQFP, ALTIVEC_BUILTIN_VCMPEQFP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + + { ALTIVEC_BUILTIN_VEC_VCMPEQUW, ALTIVEC_BUILTIN_VCMPEQUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPEQUW, ALTIVEC_BUILTIN_VCMPEQUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + + { ALTIVEC_BUILTIN_VEC_VCMPEQUH, ALTIVEC_BUILTIN_VCMPEQUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPEQUH, ALTIVEC_BUILTIN_VCMPEQUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + + { ALTIVEC_BUILTIN_VEC_VCMPEQUB, ALTIVEC_BUILTIN_VCMPEQUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPEQUB, ALTIVEC_BUILTIN_VCMPEQUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + + { ALTIVEC_BUILTIN_VEC_CMPGE, ALTIVEC_BUILTIN_VCMPGEFP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGE, VSX_BUILTIN_XVCMPGEDP, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGE, VSX_BUILTIN_CMPGE_16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPGE, VSX_BUILTIN_CMPGE_U16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPGE, VSX_BUILTIN_CMPGE_8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPGE, VSX_BUILTIN_CMPGE_U8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPGE, VSX_BUILTIN_CMPGE_4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPGE, VSX_BUILTIN_CMPGE_U4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPGE, VSX_BUILTIN_CMPGE_2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPGE, VSX_BUILTIN_CMPGE_U2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0}, + + { ALTIVEC_BUILTIN_VEC_CMPGE, P10V_BUILTIN_CMPGE_1TI, + RS6000_BTI_bool_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPGE, P10V_BUILTIN_CMPGE_U1TI, + RS6000_BTI_bool_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSB, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSH, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSW, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, P8V_BUILTIN_VCMPGTUD, + RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, P10V_BUILTIN_VCMPGTUT, + RS6000_BTI_bool_V1TI, RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, P8V_BUILTIN_VCMPGTSD, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, P10V_BUILTIN_VCMPGTST, + RS6000_BTI_bool_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTFP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPGT, VSX_BUILTIN_XVCMPGTDP, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTFP, ALTIVEC_BUILTIN_VCMPGTFP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTSW, ALTIVEC_BUILTIN_VCMPGTSW, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTUW, ALTIVEC_BUILTIN_VCMPGTUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTSH, ALTIVEC_BUILTIN_VCMPGTSH, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTUH, ALTIVEC_BUILTIN_VCMPGTUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTSB, ALTIVEC_BUILTIN_VCMPGTSB, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCMPGTUB, ALTIVEC_BUILTIN_VCMPGTUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLE, ALTIVEC_BUILTIN_VCMPGEFP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLE, VSX_BUILTIN_XVCMPGEDP, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLE, VSX_BUILTIN_CMPLE_16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPLE, VSX_BUILTIN_CMPLE_U16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPLE, VSX_BUILTIN_CMPLE_8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPLE, VSX_BUILTIN_CMPLE_U8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPLE, VSX_BUILTIN_CMPLE_4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPLE, VSX_BUILTIN_CMPLE_U4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPLE, VSX_BUILTIN_CMPLE_2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPLE, VSX_BUILTIN_CMPLE_U2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPLE, P10V_BUILTIN_CMPLE_1TI, + RS6000_BTI_bool_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPLE, P10V_BUILTIN_CMPLE_U1TI, + RS6000_BTI_bool_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0}, + { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUB, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSB, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUH, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSH, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUW, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSW, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, P8V_BUILTIN_VCMPGTUD, + RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, P8V_BUILTIN_VCMPGTSD, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTFP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPLT, VSX_BUILTIN_XVCMPGTDP, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_COPYSIGN, VSX_BUILTIN_CPSGNDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_COPYSIGN, ALTIVEC_BUILTIN_COPYSIGN_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_CTF, ALTIVEC_BUILTIN_VCFUX, + RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_CTF, ALTIVEC_BUILTIN_VCFSX, + RS6000_BTI_V4SF, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_CTF, VSX_BUILTIN_XVCVSXDDP_SCALE, + RS6000_BTI_V2DF, RS6000_BTI_V2DI, RS6000_BTI_INTSI, 0}, + { ALTIVEC_BUILTIN_VEC_CTF, VSX_BUILTIN_XVCVUXDDP_SCALE, + RS6000_BTI_V2DF, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, 0}, + { ALTIVEC_BUILTIN_VEC_VCFSX, ALTIVEC_BUILTIN_VCFSX, + RS6000_BTI_V4SF, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VCFUX, ALTIVEC_BUILTIN_VCFUX, + RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_CTS, ALTIVEC_BUILTIN_VCTSXS, + RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_CTS, VSX_BUILTIN_XVCVDPSXDS_SCALE, + RS6000_BTI_V2DI, RS6000_BTI_V2DF, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_CTU, ALTIVEC_BUILTIN_VCTUXS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_CTU, VSX_BUILTIN_XVCVDPUXDS_SCALE, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_V2DF, RS6000_BTI_INTSI, 0 }, + + { P8V_BUILTIN_VEC_BCDADD, MISC_BUILTIN_BCDADD_V1TI, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDADD, MISC_BUILTIN_BCDADD_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDADD_LT, MISC_BUILTIN_BCDADD_LT_V1TI, + RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDADD_LT, MISC_BUILTIN_BCDADD_LT_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDADD_EQ, MISC_BUILTIN_BCDADD_EQ_V1TI, + RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDADD_EQ, MISC_BUILTIN_BCDADD_EQ_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDADD_GT, MISC_BUILTIN_BCDADD_GT_V1TI, + RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDADD_GT, MISC_BUILTIN_BCDADD_GT_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDADD_OV, MISC_BUILTIN_BCDADD_OV_V1TI, + RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDADD_OV, MISC_BUILTIN_BCDADD_OV_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDINVALID, MISC_BUILTIN_BCDINVALID_V1TI, + RS6000_BTI_INTSI, RS6000_BTI_V1TI, 0, 0 }, + { P8V_BUILTIN_VEC_BCDINVALID, MISC_BUILTIN_BCDINVALID_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + + { P9V_BUILTIN_VEC_BCDMUL10, P9V_BUILTIN_BCDMUL10_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { P9V_BUILTIN_VEC_BCDDIV10, P9V_BUILTIN_BCDDIV10_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + + { P8V_BUILTIN_VEC_DENBCD, MISC_BUILTIN_DENBCD_V16QI, + RS6000_BTI_dfloat128, RS6000_BTI_unsigned_V16QI, 0, 0 }, + + { P8V_BUILTIN_VEC_BCDSUB, MISC_BUILTIN_BCDSUB_V1TI, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDSUB, MISC_BUILTIN_BCDSUB_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDSUB_LT, MISC_BUILTIN_BCDSUB_LT_V1TI, + RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDSUB_LT, MISC_BUILTIN_BCDSUB_LT_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDSUB_LE, MISC_BUILTIN_BCDSUB_LE_V1TI, + RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDSUB_LE, MISC_BUILTIN_BCDSUB_LE_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDSUB_EQ, MISC_BUILTIN_BCDSUB_EQ_V1TI, + RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDSUB_EQ, MISC_BUILTIN_BCDSUB_EQ_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDSUB_GT, MISC_BUILTIN_BCDSUB_GT_V1TI, + RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDSUB_GT, MISC_BUILTIN_BCDSUB_GT_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDSUB_GE, MISC_BUILTIN_BCDSUB_GE_V1TI, + RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDSUB_GE, MISC_BUILTIN_BCDSUB_GE_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDSUB_OV, MISC_BUILTIN_BCDSUB_OV_V1TI, + RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_INTSI }, + { P8V_BUILTIN_VEC_BCDSUB_OV, MISC_BUILTIN_BCDSUB_OV_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + + + { VSX_BUILTIN_VEC_DIV, VSX_BUILTIN_XVDIVSP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { VSX_BUILTIN_VEC_DIV, VSX_BUILTIN_XVDIVDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { VSX_BUILTIN_VEC_DIV, VSX_BUILTIN_DIV_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { VSX_BUILTIN_VEC_DIV, VSX_BUILTIN_UDIV_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + + { VSX_BUILTIN_VEC_DIV, P10V_BUILTIN_DIVS_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { VSX_BUILTIN_VEC_DIV, P10V_BUILTIN_DIVU_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { VSX_BUILTIN_VEC_DIV, P10V_BUILTIN_DIVS_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { VSX_BUILTIN_VEC_DIV, P10V_BUILTIN_DIVU_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { VSX_BUILTIN_VEC_DIV, P10V_BUILTIN_DIV_V1TI, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + { VSX_BUILTIN_VEC_DIV, P10V_BUILTIN_UDIV_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + + { P10_BUILTIN_VEC_DIVE, P10V_BUILTIN_DIVES_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { P10_BUILTIN_VEC_DIVE, P10V_BUILTIN_DIVEU_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P10_BUILTIN_VEC_DIVE, P10V_BUILTIN_DIVES_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { P10_BUILTIN_VEC_DIVE, P10V_BUILTIN_DIVEU_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P10_BUILTIN_VEC_DIVE, P10V_BUILTIN_DIVES_V1TI, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + { P10_BUILTIN_VEC_DIVE, P10V_BUILTIN_DIVEU_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + + { P10_BUILTIN_VEC_MOD, P10V_BUILTIN_MODS_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { P10_BUILTIN_VEC_MOD, P10V_BUILTIN_MODU_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P10_BUILTIN_VEC_MOD, P10V_BUILTIN_MODS_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { P10_BUILTIN_VEC_MOD, P10V_BUILTIN_MODU_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P10_BUILTIN_VEC_MOD, P10V_BUILTIN_MODS_V1TI, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + { P10_BUILTIN_VEC_MOD, P10V_BUILTIN_MODU_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + + { VSX_BUILTIN_VEC_DOUBLE, VSX_BUILTIN_XVCVSXDDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DI, 0, 0 }, + { VSX_BUILTIN_VEC_DOUBLE, VSX_BUILTIN_XVCVUXDDP, + RS6000_BTI_V2DF, RS6000_BTI_unsigned_V2DI, 0, 0 }, + + { VSX_BUILTIN_VEC_DOUBLEE, VSX_BUILTIN_DOUBLEE_V4SI, + RS6000_BTI_V2DF, RS6000_BTI_V4SI, 0, 0 }, + { VSX_BUILTIN_VEC_DOUBLEE, VSX_BUILTIN_UNS_DOUBLEE_V4SI, + RS6000_BTI_V2DF, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { VSX_BUILTIN_VEC_DOUBLEE, VSX_BUILTIN_DOUBLEE_V4SF, + RS6000_BTI_V2DF, RS6000_BTI_V4SF, 0, 0 }, + + { VSX_BUILTIN_VEC_DOUBLEO, VSX_BUILTIN_DOUBLEO_V4SI, + RS6000_BTI_V2DF, RS6000_BTI_V4SI, 0, 0 }, + { VSX_BUILTIN_VEC_DOUBLEO, VSX_BUILTIN_UNS_DOUBLEO_V4SI, + RS6000_BTI_V2DF, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { VSX_BUILTIN_VEC_DOUBLEO, VSX_BUILTIN_DOUBLEO_V4SF, + RS6000_BTI_V2DF, RS6000_BTI_V4SF, 0, 0 }, + + { VSX_BUILTIN_VEC_DOUBLEH, VSX_BUILTIN_DOUBLEH_V4SI, + RS6000_BTI_V2DF, RS6000_BTI_V4SI, 0, 0 }, + { VSX_BUILTIN_VEC_DOUBLEH, VSX_BUILTIN_UNS_DOUBLEH_V4SI, + RS6000_BTI_V2DF, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { VSX_BUILTIN_VEC_DOUBLEH, VSX_BUILTIN_DOUBLEH_V4SF, + RS6000_BTI_V2DF, RS6000_BTI_V4SF, 0, 0 }, + + { VSX_BUILTIN_VEC_DOUBLEL, VSX_BUILTIN_DOUBLEL_V4SI, + RS6000_BTI_V2DF, RS6000_BTI_V4SI, 0, 0 }, + { VSX_BUILTIN_VEC_DOUBLEL, VSX_BUILTIN_UNS_DOUBLEL_V4SI, + RS6000_BTI_V2DF, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { VSX_BUILTIN_VEC_DOUBLEL, VSX_BUILTIN_DOUBLEL_V4SF, + RS6000_BTI_V2DF, RS6000_BTI_V4SF, 0, 0 }, + + { VSX_BUILTIN_VEC_FLOAT, VSX_BUILTIN_XVCVSXWSP_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SI, 0, 0 }, + { VSX_BUILTIN_VEC_FLOAT, VSX_BUILTIN_XVCVUXWSP_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { P8V_BUILTIN_VEC_FLOAT2, P8V_BUILTIN_FLOAT2_V2DF, + RS6000_BTI_V4SF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { P8V_BUILTIN_VEC_FLOAT2, P8V_BUILTIN_FLOAT2_V2DI, + RS6000_BTI_V4SF, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_FLOAT2, P8V_BUILTIN_UNS_FLOAT2_V2DI, + RS6000_BTI_V4SF, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { VSX_BUILTIN_VEC_FLOATE, VSX_BUILTIN_FLOATE_V2DF, + RS6000_BTI_V4SF, RS6000_BTI_V2DF, 0, 0 }, + { VSX_BUILTIN_VEC_FLOATE, VSX_BUILTIN_FLOATE_V2DI, + RS6000_BTI_V4SF, RS6000_BTI_V2DI, 0, 0 }, + { VSX_BUILTIN_VEC_FLOATE, VSX_BUILTIN_UNS_FLOATE_V2DI, + RS6000_BTI_V4SF, RS6000_BTI_unsigned_V2DI, 0, 0 }, + { VSX_BUILTIN_VEC_FLOATO, VSX_BUILTIN_FLOATO_V2DF, + RS6000_BTI_V4SF, RS6000_BTI_V2DF, 0, 0 }, + { VSX_BUILTIN_VEC_FLOATO, VSX_BUILTIN_FLOATO_V2DI, + RS6000_BTI_V4SF, RS6000_BTI_V2DI, 0, 0 }, + { VSX_BUILTIN_VEC_FLOATO, VSX_BUILTIN_UNS_FLOATO_V2DI, + RS6000_BTI_V4SF, RS6000_BTI_unsigned_V2DI, 0, 0 }, + + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V1TI, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V1TI, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_INTTI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTTI, 0 }, + + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_long_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V8HI, + RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEBX, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEBX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEHX, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEHX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEHX, ALTIVEC_BUILTIN_LVEHX, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEHX, ALTIVEC_BUILTIN_LVEHX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEBX, ALTIVEC_BUILTIN_LVEBX, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVEBX, ALTIVEC_BUILTIN_LVEBX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + + /* vector signed__int128 vec_xl_sext (signed long long, signed char *); + vector signed__int128 vec_xl_sext (signed long long, signed short *); + vector signed__int128 vec_xl_sext (signed long long, signed int *); + vector signed__int128 vec_xl_sext (signed long long, signed longlong *); */ + { P10_BUILTIN_VEC_SE_LXVRX, P10_BUILTIN_SE_LXVRBX, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { P10_BUILTIN_VEC_SE_LXVRX, P10_BUILTIN_SE_LXVRHX, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { P10_BUILTIN_VEC_SE_LXVRX, P10_BUILTIN_SE_LXVRWX, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { P10_BUILTIN_VEC_SE_LXVRX, P10_BUILTIN_SE_LXVRDX, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_INTDI, 0 }, + { P10_BUILTIN_VEC_SE_LXVRX, P10_BUILTIN_SE_LXVRDX, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long, 0 }, + + /* vector unsigned__int128 vec_xl_zext (signed long long, unsigned char *); + vector unsigned__int128 vec_xl_zext (signed long long, unsigned short *); + vector unsigned__int128 vec_xl_zext (signed long long, unsigned int *); + vector unsigned__int128 vec_xl_zext (signed long long, unsigned longlong *); */ + { P10_BUILTIN_VEC_ZE_LXVRX, P10_BUILTIN_ZE_LXVRBX, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { P10_BUILTIN_VEC_ZE_LXVRX, P10_BUILTIN_ZE_LXVRHX, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { P10_BUILTIN_VEC_ZE_LXVRX, P10_BUILTIN_ZE_LXVRWX, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { P10_BUILTIN_VEC_ZE_LXVRX, P10_BUILTIN_ZE_LXVRDX, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTDI, 0 }, + { P10_BUILTIN_VEC_ZE_LXVRX, P10_BUILTIN_ZE_LXVRDX, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long_long, 0 }, + + /* void vec_xst_trunc (vector signed __int128, signed long long, signed char *); + void vec_xst_trunc (vector unsigned __int128, signed long long, unsigned char *); + void vec_xst_trunc (vector signed __int128, signed long long, signed char *); + void vec_xst_trunc (vector unsigned __int128, signed long long, unsigned char *); + void vec_xst_trunc (vector signed __int128, signed long long, signed char *); + void vec_xst_trunc (vector unsigned __int128, signed long long, unsigned char *); + void vec_xst_trunc (vector signed __int128, signed long long, signed char *); + void vec_xst_trunc (vector unsigned __int128, signed long long, unsigned char *); */ + { P10_BUILTIN_VEC_TR_STXVRX, P10_BUILTIN_TR_STXVRBX, RS6000_BTI_void, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { P10_BUILTIN_VEC_TR_STXVRX, P10_BUILTIN_TR_STXVRBX, RS6000_BTI_void, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_TR_STXVRX, P10_BUILTIN_TR_STXVRHX, RS6000_BTI_void, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { P10_BUILTIN_VEC_TR_STXVRX, P10_BUILTIN_TR_STXVRHX, RS6000_BTI_void, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { P10_BUILTIN_VEC_TR_STXVRX, P10_BUILTIN_TR_STXVRWX, RS6000_BTI_void, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { P10_BUILTIN_VEC_TR_STXVRX, P10_BUILTIN_TR_STXVRWX, RS6000_BTI_void, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { P10_BUILTIN_VEC_TR_STXVRX, P10_BUILTIN_TR_STXVRDX, RS6000_BTI_void, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long }, + { P10_BUILTIN_VEC_TR_STXVRX, P10_BUILTIN_TR_STXVRDX, RS6000_BTI_void, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long_long }, + { P10_BUILTIN_VEC_TR_STXVRX, P10_BUILTIN_TR_STXVRDX, RS6000_BTI_void, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_INTDI }, + { P10_BUILTIN_VEC_TR_STXVRX, P10_BUILTIN_TR_STXVRDX, RS6000_BTI_void, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTDI }, + + /* vector float vec_ldl (int, vector float *); + vector float vec_ldl (int, float *); */ + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + + /* vector bool int vec_ldl (int, vector bool int *); + vector bool int vec_ldl (int, bool int *); + vector int vec_ldl (int, vector int *); + vector int vec_ldl (int, int *); + vector unsigned int vec_ldl (int, vector unsigned int *); + vector unsigned int vec_ldl (int, unsigned int *); */ + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_int, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + + /* vector bool short vec_ldl (int, vector bool short *); + vector bool short vec_ldl (int, bool short *); + vector pixel vec_ldl (int, vector pixel *); + vector short vec_ldl (int, vector short *); + vector short vec_ldl (int, short *); + vector unsigned short vec_ldl (int, vector unsigned short *); + vector unsigned short vec_ldl (int, unsigned short *); */ + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_short, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V8HI, + RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + + /* vector bool char vec_ldl (int, vector bool char *); + vector bool char vec_ldl (int, bool char *); + vector char vec_ldl (int, vector char *); + vector char vec_ldl (int, char *); + vector unsigned char vec_ldl (int, vector unsigned char *); + vector unsigned char vec_ldl (int, unsigned char *); */ + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_char, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + + /* vector double vec_ldl (int, vector double *); + vector double vec_ldl (int, double *); */ + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double, 0 }, + + /* vector long long vec_ldl (int, vector long long *); + vector long long vec_ldl (int, long long *); + vector unsigned long long vec_ldl (int, vector unsigned long long *); + vector unsigned long long vec_ldl (int, unsigned long long *); + vector bool long long vec_ldl (int, vector bool long long *); + vector bool long long vec_ldl (int, bool long long *); */ + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_long_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL_V2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_long_long, 0 }, + + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_double, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTDI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTDI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_long_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_double, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTDI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTDI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_long_long, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, P8V_BUILTIN_VMAXUD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, P8V_BUILTIN_VMAXUD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, P8V_BUILTIN_VMAXUD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, P8V_BUILTIN_VMAXSD, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, P8V_BUILTIN_VMAXSD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, P8V_BUILTIN_VMAXSD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_MAX, VSX_BUILTIN_XVMAXDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXFP, ALTIVEC_BUILTIN_VMAXFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEH, VSX_BUILTIN_VEC_MERGEH_V2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MERGEL, VSX_BUILTIN_VEC_MERGEL_V2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, P8V_BUILTIN_VMINUD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, P8V_BUILTIN_VMINUD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, P8V_BUILTIN_VMINUD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, P8V_BUILTIN_VMINSD, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, P8V_BUILTIN_VMINSD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, P8V_BUILTIN_VMINSD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_MIN, VSX_BUILTIN_XVMINDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINFP, ALTIVEC_BUILTIN_VMINFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { P10_BUILTIN_VEC_MULH, P10V_BUILTIN_MULHS_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { P10_BUILTIN_VEC_MULH, P10V_BUILTIN_MULHU_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P10_BUILTIN_VEC_MULH, P10V_BUILTIN_MULHS_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { P10_BUILTIN_VEC_MULH, P10V_BUILTIN_MULHU_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + + { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULEUB, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULESB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULEUH, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULESH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULE, P8V_BUILTIN_VMULESW, + RS6000_BTI_V2DI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULE, P8V_BUILTIN_VMULEUW, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULE, P10V_BUILTIN_VMULESD, + RS6000_BTI_V1TI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULE, P10V_BUILTIN_VMULEUD, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULEUB, ALTIVEC_BUILTIN_VMULEUB, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULESB, ALTIVEC_BUILTIN_VMULESB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULEUH, ALTIVEC_BUILTIN_VMULEUH, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULESH, ALTIVEC_BUILTIN_VMULESH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULEUW, P8V_BUILTIN_VMULEUW, + RS6000_BTI_V2DI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULESW, P8V_BUILTIN_VMULESW, + RS6000_BTI_V2DI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOUB, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOSB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOUH, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULO, P8V_BUILTIN_VMULOSW, + RS6000_BTI_V2DI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULO, P8V_BUILTIN_VMULOUW, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULO, P10V_BUILTIN_VMULOSD, + RS6000_BTI_V1TI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULO, P10V_BUILTIN_VMULOUD, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOSH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULOSH, ALTIVEC_BUILTIN_VMULOSH, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULOUH, ALTIVEC_BUILTIN_VMULOUH, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULOSB, ALTIVEC_BUILTIN_VMULOSB, + RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULOUB, ALTIVEC_BUILTIN_VMULOUB, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULOUW, P8V_BUILTIN_VMULOUW, + RS6000_BTI_V2DI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VMULOSW, P8V_BUILTIN_VMULOSW, + RS6000_BTI_V2DI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + + { ALTIVEC_BUILTIN_VEC_NABS, ALTIVEC_BUILTIN_NABS_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_NABS, ALTIVEC_BUILTIN_NABS_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_NABS, ALTIVEC_BUILTIN_NABS_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_NABS, ALTIVEC_BUILTIN_NABS_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_NABS, ALTIVEC_BUILTIN_NABS_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_NABS, VSX_BUILTIN_XVNABSDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_NEARBYINT, VSX_BUILTIN_XVRDPI, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_NEARBYINT, VSX_BUILTIN_XVRSPI, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, P10V_BUILTIN_VNOR_V1TI, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_bool_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, P10V_BUILTIN_VNOR_V1TI, + RS6000_BTI_V1TI, RS6000_BTI_bool_V1TI, RS6000_BTI_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, P10V_BUILTIN_VNOR_V1TI_UNS, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, P10V_BUILTIN_VNOR_V1TI_UNS, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, RS6000_BTI_bool_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, P10V_BUILTIN_VNOR_V1TI_UNS, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_bool_V1TI, RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V2DI_UNS, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V4SI_UNS, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V8HI_UNS, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR_V16QI_UNS, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V2DI_UNS, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V4SI_UNS, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V8HI_UNS, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V16QI_UNS, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + + { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM, + RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM, + RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, P8V_BUILTIN_VPKUDUM, + RS6000_BTI_V4SI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, P8V_BUILTIN_VPKUDUM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, P8V_BUILTIN_VPKUDUM, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACK, P8V_BUILTIN_FLOAT2_V2DF, + RS6000_BTI_V4SF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + + { P8V_BUILTIN_VEC_NEG, P8V_BUILTIN_NEG_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { P8V_BUILTIN_VEC_NEG, P8V_BUILTIN_NEG_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { P8V_BUILTIN_VEC_NEG, P8V_BUILTIN_NEG_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { P8V_BUILTIN_VEC_NEG, P8V_BUILTIN_NEG_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { P8V_BUILTIN_VEC_NEG, P8V_BUILTIN_NEG_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { P8V_BUILTIN_VEC_NEG, P8V_BUILTIN_NEG_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + + { P9V_BUILTIN_VEC_CONVERT_4F32_8I16, P9V_BUILTIN_CONVERT_4F32_8I16, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { P9V_BUILTIN_VEC_CONVERT_4F32_8F16, P9V_BUILTIN_CONVERT_4F32_8F16, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + + { P9V_BUILTIN_VEC_VFIRSTMATCHINDEX, P9V_BUILTIN_VFIRSTMATCHINDEX_V16QI, + RS6000_BTI_UINTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMATCHINDEX, P9V_BUILTIN_VFIRSTMATCHINDEX_V16QI, + RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMATCHINDEX, P9V_BUILTIN_VFIRSTMATCHINDEX_V8HI, + RS6000_BTI_UINTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMATCHINDEX, P9V_BUILTIN_VFIRSTMATCHINDEX_V8HI, + RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMATCHINDEX, P9V_BUILTIN_VFIRSTMATCHINDEX_V4SI, + RS6000_BTI_UINTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMATCHINDEX, P9V_BUILTIN_VFIRSTMATCHINDEX_V4SI, + RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMATCHOREOSINDEX, P9V_BUILTIN_VFIRSTMATCHOREOSINDEX_V16QI, + RS6000_BTI_UINTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMATCHOREOSINDEX, P9V_BUILTIN_VFIRSTMATCHOREOSINDEX_V16QI, + RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMATCHOREOSINDEX, P9V_BUILTIN_VFIRSTMATCHOREOSINDEX_V8HI, + RS6000_BTI_UINTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMATCHOREOSINDEX, P9V_BUILTIN_VFIRSTMATCHOREOSINDEX_V8HI, + RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMATCHOREOSINDEX, P9V_BUILTIN_VFIRSTMATCHOREOSINDEX_V4SI, + RS6000_BTI_UINTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMATCHOREOSINDEX, P9V_BUILTIN_VFIRSTMATCHOREOSINDEX_V4SI, + RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMISMATCHINDEX, P9V_BUILTIN_VFIRSTMISMATCHINDEX_V16QI, + RS6000_BTI_UINTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMISMATCHINDEX, P9V_BUILTIN_VFIRSTMISMATCHINDEX_V16QI, + RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMISMATCHINDEX, P9V_BUILTIN_VFIRSTMISMATCHINDEX_V8HI, + RS6000_BTI_UINTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMISMATCHINDEX, P9V_BUILTIN_VFIRSTMISMATCHINDEX_V8HI, + RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMISMATCHINDEX, P9V_BUILTIN_VFIRSTMISMATCHINDEX_V4SI, + RS6000_BTI_UINTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMISMATCHINDEX, P9V_BUILTIN_VFIRSTMISMATCHINDEX_V4SI, + RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + + { P9V_BUILTIN_VEC_VFIRSTMISMATCHOREOSINDEX, + P9V_BUILTIN_VFIRSTMISMATCHOREOSINDEX_V16QI, + RS6000_BTI_UINTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMISMATCHOREOSINDEX, + P9V_BUILTIN_VFIRSTMISMATCHOREOSINDEX_V16QI, RS6000_BTI_UINTSI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMISMATCHOREOSINDEX, + P9V_BUILTIN_VFIRSTMISMATCHOREOSINDEX_V8HI, + RS6000_BTI_UINTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMISMATCHOREOSINDEX, + P9V_BUILTIN_VFIRSTMISMATCHOREOSINDEX_V8HI, + RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMISMATCHOREOSINDEX, + P9V_BUILTIN_VFIRSTMISMATCHOREOSINDEX_V4SI, + RS6000_BTI_UINTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { P9V_BUILTIN_VEC_VFIRSTMISMATCHOREOSINDEX, + P9V_BUILTIN_VFIRSTMISMATCHOREOSINDEX_V4SI, + RS6000_BTI_UINTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + + { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM, + RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM, + RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKPX, ALTIVEC_BUILTIN_VPKPX, + RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKUHUS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKSHSS, + RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKUWUS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKSWSS, + RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKS, P8V_BUILTIN_VPKUDUS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKS, P8V_BUILTIN_VPKSDSS, + RS6000_BTI_V4SI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKSWSS, ALTIVEC_BUILTIN_VPKSWSS, + RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUWUS, ALTIVEC_BUILTIN_VPKUWUS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKSHSS, ALTIVEC_BUILTIN_VPKSHSS, + RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKUHUS, ALTIVEC_BUILTIN_VPKUHUS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKUHUS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKSHUS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKUWUS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKSWUS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKSU, P8V_BUILTIN_VPKSDUS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_PACKSU, P8V_BUILTIN_VPKUDUS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKSWUS, ALTIVEC_BUILTIN_VPKSWUS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VPKSHUS, ALTIVEC_BUILTIN_VPKSHUS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_RINT, VSX_BUILTIN_XVRDPIC, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RINT, VSX_BUILTIN_XVRSPIC, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, P8V_BUILTIN_VRLD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, P8V_BUILTIN_VRLD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, P10V_BUILTIN_VRLQ, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_RL, P10V_BUILTIN_VRLQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_VRLW, ALTIVEC_BUILTIN_VRLW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VRLW, ALTIVEC_BUILTIN_VRLW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VRLH, ALTIVEC_BUILTIN_VRLH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VRLH, ALTIVEC_BUILTIN_VRLH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VRLB, ALTIVEC_BUILTIN_VRLB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VRLB, ALTIVEC_BUILTIN_VRLB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { P9V_BUILTIN_VEC_RLMI, P9V_BUILTIN_VRLWMI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI }, + { P9V_BUILTIN_VEC_RLMI, P9V_BUILTIN_VRLDMI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI }, + { P9V_BUILTIN_VEC_RLMI, P10V_BUILTIN_VRLQMI, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, + RS6000_BTI_V1TI, RS6000_BTI_unsigned_V1TI }, + { P9V_BUILTIN_VEC_RLMI, P10V_BUILTIN_VRLQMI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI }, + { P9V_BUILTIN_VEC_RLNM, P9V_BUILTIN_VRLWNM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P9V_BUILTIN_VEC_RLNM, P9V_BUILTIN_VRLDNM, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P9V_BUILTIN_VEC_RLNM, P10V_BUILTIN_VRLQNM, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + { P9V_BUILTIN_VEC_RLNM, P10V_BUILTIN_VRLQNM, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, P8V_BUILTIN_VSLD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, P8V_BUILTIN_VSLD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, P10V_BUILTIN_VSLQ, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_SL, P10V_BUILTIN_VSLQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_SQRT, VSX_BUILTIN_XVSQRTDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_SQRT, VSX_BUILTIN_XVSQRTSP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VSLW, ALTIVEC_BUILTIN_VSLW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSLW, ALTIVEC_BUILTIN_VSLW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSLH, ALTIVEC_BUILTIN_VSLH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSLH, ALTIVEC_BUILTIN_VSLH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSLB, ALTIVEC_BUILTIN_VSLB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSLB, ALTIVEC_BUILTIN_VSLB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V8HI, 0 }, + + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, VSX_BUILTIN_XXSPLTD_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, VSX_BUILTIN_XXSPLTD_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, VSX_BUILTIN_XXSPLTD_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SPLAT, VSX_BUILTIN_XXSPLTD_V2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, P8V_BUILTIN_VSRD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, P8V_BUILTIN_VSRD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, P10V_BUILTIN_VSRQ, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_SR, P10V_BUILTIN_VSRQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRW, ALTIVEC_BUILTIN_VSRW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRW, ALTIVEC_BUILTIN_VSRW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRH, ALTIVEC_BUILTIN_VSRH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRH, ALTIVEC_BUILTIN_VSRH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRB, ALTIVEC_BUILTIN_VSRB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRB, ALTIVEC_BUILTIN_VSRB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, P8V_BUILTIN_VSRAD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, P8V_BUILTIN_VSRAD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, P10V_BUILTIN_VSRAQ, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRA, P10V_BUILTIN_VSRAQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRAW, ALTIVEC_BUILTIN_VSRAW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRAW, ALTIVEC_BUILTIN_VSRAW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRAH, ALTIVEC_BUILTIN_VSRAH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRAH, ALTIVEC_BUILTIN_VSRAH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRAB, ALTIVEC_BUILTIN_VSRAB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSRAB, ALTIVEC_BUILTIN_VSRAB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V16QI, 0 }, + + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, P8V_BUILTIN_VSUBUDM, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, P8V_BUILTIN_VSUBUDM, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, P8V_BUILTIN_VSUBUDM, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, P8V_BUILTIN_VSUBUDM, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, P8V_BUILTIN_VSUBUDM, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, P8V_BUILTIN_VSUBUDM, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, VSX_BUILTIN_XVSUBDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, P8V_BUILTIN_VSUBUQM, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUB, P8V_BUILTIN_VSUBUQM, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBFP, ALTIVEC_BUILTIN_VSUBFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + + { ALTIVEC_BUILTIN_VEC_SUBC, ALTIVEC_BUILTIN_VSUBCUW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBC, ALTIVEC_BUILTIN_VSUBCUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBC, P8V_BUILTIN_VSUBCUQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBC, P8V_BUILTIN_VSUBCUQ, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4UBS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4SBS, + RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4SHS, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUM4SHS, ALTIVEC_BUILTIN_VSUM4SHS, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUM4SBS, ALTIVEC_BUILTIN_VSUM4SBS, + RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_VSUM4UBS, ALTIVEC_BUILTIN_VSUM4UBS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUM2S, ALTIVEC_BUILTIN_VSUM2SWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_SUMS, ALTIVEC_BUILTIN_VSUMSWS, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V1TI, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_INTTI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V1TI, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_V1TI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTTI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_INTDI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_long_long, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTDI, 0 }, + + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V16QI, 0 }, + { VSX_BUILTIN_VEC_XL, VSX_BUILTIN_LXVW4X_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V1TI, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_INTTI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTTI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_long_long, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V16QI, 0 }, + { VSX_BUILTIN_VEC_XL_BE, VSX_BUILTIN_LD_ELEMREV_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V4SI, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V2DI, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V2DI_UNS, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V4SI_UNS, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V8HI_UNS, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 }, + + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V16QI_UNS, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V16QI_UNS, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V16QI_UNS, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 }, + + /* Ternary AltiVec/VSX builtins. */ + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST, + RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST, + RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT, + RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT, + RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_MADD, ALTIVEC_BUILTIN_VMADDFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_MADD, VSX_BUILTIN_XVMADDDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + { ALTIVEC_BUILTIN_VEC_MADD, ALTIVEC_BUILTIN_VMLADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_MADD, ALTIVEC_BUILTIN_VMLADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_MADD, ALTIVEC_BUILTIN_VMLADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_MADD, ALTIVEC_BUILTIN_VMLADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_MADDS, ALTIVEC_BUILTIN_VMHADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM, + RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_MRADDS, ALTIVEC_BUILTIN_VMHRADDSHS, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { VSX_BUILTIN_VEC_MSUB, VSX_BUILTIN_XVMSUBSP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { VSX_BUILTIN_VEC_MSUB, VSX_BUILTIN_XVMSUBDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMUBM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMMBM, + RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMUHM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMSHM, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI }, + + { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMUDM, + RS6000_BTI_V1TI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V1TI }, + { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMUDM, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V1TI }, + + { ALTIVEC_BUILTIN_VEC_VMSUMSHM, ALTIVEC_BUILTIN_VMSUMSHM, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VMSUMUHM, ALTIVEC_BUILTIN_VMSUMUHM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_VMSUMMBM, ALTIVEC_BUILTIN_VMSUMMBM, + RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VMSUMUBM, ALTIVEC_BUILTIN_VMSUMUBM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_MSUMS, ALTIVEC_BUILTIN_VMSUMUHS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_MSUMS, ALTIVEC_BUILTIN_VMSUMSHS, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VMSUMSHS, ALTIVEC_BUILTIN_VMSUMSHS, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VMSUMUHS, ALTIVEC_BUILTIN_VMSUMUHS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI }, + { VSX_BUILTIN_VEC_NMADD, VSX_BUILTIN_XVNMADDSP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { VSX_BUILTIN_VEC_NMADD, VSX_BUILTIN_XVNMADDDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + { ALTIVEC_BUILTIN_VEC_NMSUB, ALTIVEC_BUILTIN_VNMSUBFP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_NMSUB, VSX_BUILTIN_XVNMSUBDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, + RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI }, + + { P8V_BUILTIN_VEC_VPERMXOR, P8V_BUILTIN_VPERMXOR, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, + RS6000_BTI_bool_V16QI }, + { P8V_BUILTIN_VEC_VPERMXOR, P8V_BUILTIN_VPERMXOR, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI }, + { P8V_BUILTIN_VEC_VPERMXOR, P8V_BUILTIN_VPERMXOR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, + RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, + RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI, + RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI }, + + { ALTIVEC_BUILTIN_VEC_SLDW, VSX_BUILTIN_XXSLDWI_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, + RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLDW, VSX_BUILTIN_XXSLDWI_16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLDW, VSX_BUILTIN_XXSLDWI_8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, + RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLDW, VSX_BUILTIN_XXSLDWI_8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLDW, VSX_BUILTIN_XXSLDWI_4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, + RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLDW, VSX_BUILTIN_XXSLDWI_4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLDW, VSX_BUILTIN_XXSLDWI_2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, + RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_SLDW, VSX_BUILTIN_XXSLDWI_2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI }, + + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V2DF, + RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V2DF, + RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V2DI, + RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V2DI, + RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V2DI, + RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V2DI, + RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_long_long }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V2DI, + RS6000_BTI_void, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V2DI, + RS6000_BTI_void, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_long_long }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V2DI, + RS6000_BTI_void, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_long_long }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SF, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SF, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SI, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SI, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SI, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SI, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V16QI, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V16QI, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V16QI, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V16QI, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX_V8HI, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_void }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_void }, + { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_void }, + { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_void }, + { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_void }, + { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void }, + { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SF, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SF, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SI, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SI, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SI, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SI, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V16QI, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V16QI, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V16QI, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V16QI, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V8HI, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V2DF, + RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V2DF, + RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V2DI, + RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V2DI, + RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL_V2DI, + RS6000_BTI_void, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVD2X_V2DF, + RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVD2X_V2DF, + RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVD2X_V2DI, + RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVD2X_V2DI, + RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVD2X_V2DI, RS6000_BTI_void, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long_long }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVD2X_V2DI, + RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVD2X_V2DI, + RS6000_BTI_void, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_bool_V2DI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SF, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SF, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { VSX_BUILTIN_VEC_XST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V2DF, + RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V2DF, + RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V1TI, + RS6000_BTI_void, RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_INTTI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V1TI, + RS6000_BTI_void, RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTTI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V2DI, + RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V2DI, + RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_long_long }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V2DI, + RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V2DI, + RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_long_long }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V4SF, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V4SF, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V4SI, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V4SI, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V4SI, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V4SI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V4SI, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTSI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V8HI, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V8HI, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V8HI, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V8HI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V8HI, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTHI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V16QI, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V16QI, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V16QI, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V16QI }, + { VSX_BUILTIN_VEC_XST_BE, VSX_BUILTIN_ST_ELEMREV_V16QI, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTQI }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_INTSI }, + + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_XXPERMDI, VSX_BUILTIN_XXPERMDI_16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_INTSI }, + + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVD2X_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVD2X_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_INTTI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_long_long, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTTI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long_long, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVD2X_V2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V2DI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V4SI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_long, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V8HI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V16QI, 0 }, + { VSX_BUILTIN_VEC_LD, VSX_BUILTIN_LXVW4X_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 }, + + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVD2X_V2DF, + RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_V2DF }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVD2X_V2DF, + RS6000_BTI_void, RS6000_BTI_V2DF, RS6000_BTI_INTSI, ~RS6000_BTI_double }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVD2X_V2DI, + RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTDI, + ~RS6000_BTI_long_long }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVD2X_V2DI, + RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTDI, + ~RS6000_BTI_unsigned_long_long }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVD2X_V1TI, + RS6000_BTI_void, RS6000_BTI_V1TI, RS6000_BTI_INTDI, ~RS6000_BTI_INTTI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVD2X_V1TI, + RS6000_BTI_void, RS6000_BTI_unsigned_V1TI, RS6000_BTI_INTDI, ~RS6000_BTI_UINTTI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVD2X_V2DI, + RS6000_BTI_void, RS6000_BTI_V2DI, RS6000_BTI_INTSI, ~RS6000_BTI_V2DI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVD2X_V2DI, + RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V2DI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVD2X_V2DI, + RS6000_BTI_void, RS6000_BTI_bool_V2DI, RS6000_BTI_INTSI, + ~RS6000_BTI_bool_V2DI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SF, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SF, + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V4SI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTSI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_bool_V4SI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTSI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V4SI, + RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, + ~RS6000_BTI_INTSI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V8HI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTHI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_bool_V8HI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTHI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V8HI, + RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_INTHI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_unsigned_V16QI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTQI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_bool_V16QI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_UINTQI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, + ~RS6000_BTI_INTQI }, + { VSX_BUILTIN_VEC_ST, VSX_BUILTIN_STXVW4X_V16QI, + RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, + ~RS6000_BTI_pixel_V8HI }, + + /* Predicates. */ + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, P8V_BUILTIN_VCMPGTUD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, P8V_BUILTIN_VCMPGTUD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, P8V_BUILTIN_VCMPGTUD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, P10V_BUILTIN_VCMPGTUT_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, P8V_BUILTIN_VCMPGTSD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, P8V_BUILTIN_VCMPGTSD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, P8V_BUILTIN_VCMPGTSD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V2DI, RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, P10V_BUILTIN_VCMPGTST_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTFP_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_VCMPGT_P, VSX_BUILTIN_XVCMPGTDP_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + + + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, P8V_BUILTIN_VCMPEQUD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, P8V_BUILTIN_VCMPEQUD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, P8V_BUILTIN_VCMPEQUD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, P8V_BUILTIN_VCMPEQUD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, P8V_BUILTIN_VCMPEQUD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, P8V_BUILTIN_VCMPEQUD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V2DI, RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, P8V_BUILTIN_VCMPEQUD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, P10V_BUILTIN_VCMPEQUT_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, P10V_BUILTIN_VCMPEQUT_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQFP_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_VCMPEQ_P, VSX_BUILTIN_XVCMPEQDP_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + + + /* cmpge is the same as cmpgt for all cases except floating point. + There is further code to deal with this special case in + altivec_build_resolved_builtin. */ + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, P8V_BUILTIN_VCMPGTUD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, P8V_BUILTIN_VCMPGTUD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, P8V_BUILTIN_VCMPGTUD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, P10V_BUILTIN_VCMPGTUT_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, P8V_BUILTIN_VCMPGTSD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, P8V_BUILTIN_VCMPGTSD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, P8V_BUILTIN_VCMPGTSD_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V2DI, RS6000_BTI_V2DI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, P10V_BUILTIN_VCMPGTST_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGEFP_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF }, + { ALTIVEC_BUILTIN_VEC_VCMPGE_P, VSX_BUILTIN_XVCMPGEDP_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V2DF, RS6000_BTI_V2DF }, + + /* Power8 vector overloaded functions. */ + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V16QI_UNS, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_bool_V16QI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V8HI_UNS, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_bool_V8HI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V4SI_UNS, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_bool_V4SI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V2DI_UNS, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { P8V_BUILTIN_VEC_EQV, P8V_BUILTIN_EQV_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_bool_V16QI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V16QI_UNS, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_bool_V8HI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V8HI_UNS, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_bool_V4SI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V4SI_UNS, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V2DI_UNS, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { P8V_BUILTIN_VEC_NAND, P8V_BUILTIN_NAND_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_bool_V16QI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V16QI_UNS, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V16QI_UNS, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_bool_V8HI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V8HI_UNS, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V8HI_UNS, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_bool_V4SI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V4SI_UNS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V4SI_UNS, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V2DI_UNS, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V2DI_UNS, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { P8V_BUILTIN_VEC_ORC, P8V_BUILTIN_ORC_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + + { P8V_BUILTIN_VEC_VADDCUQ, P8V_BUILTIN_VADDCUQ, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + { P8V_BUILTIN_VEC_VADDCUQ, P8V_BUILTIN_VADDCUQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + + { P8V_BUILTIN_VEC_VADDUDM, P8V_BUILTIN_VADDUDM, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_VADDUDM, P8V_BUILTIN_VADDUDM, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_VADDUDM, P8V_BUILTIN_VADDUDM, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_VADDUDM, P8V_BUILTIN_VADDUDM, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_VADDUDM, P8V_BUILTIN_VADDUDM, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_VADDUDM, P8V_BUILTIN_VADDUDM, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VADDUQM, P8V_BUILTIN_VADDUQM, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + { P8V_BUILTIN_VEC_VADDUQM, P8V_BUILTIN_VADDUQM, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + + { P9V_BUILTIN_VEC_VBPERM, P9V_BUILTIN_VBPERMD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P9V_BUILTIN_VEC_VBPERM, P8V_BUILTIN_VBPERMQ, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P9V_BUILTIN_VEC_VBPERM, P8V_BUILTIN_VBPERMQ2, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + + { P8V_BUILTIN_VEC_VBPERMQ, P8V_BUILTIN_VBPERMQ, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P8V_BUILTIN_VEC_VBPERMQ, P8V_BUILTIN_VBPERMQ, + RS6000_BTI_V2DI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { P8V_BUILTIN_VEC_VBPERMQ, P8V_BUILTIN_VBPERMQ, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P8V_BUILTIN_VEC_VBPERMQ, P8V_BUILTIN_VBPERMQ, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V16QI, 0 }, + + { P8V_BUILTIN_VEC_VCLZ, P8V_BUILTIN_VCLZB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { P8V_BUILTIN_VEC_VCLZ, P8V_BUILTIN_VCLZB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { P8V_BUILTIN_VEC_VCLZ, P8V_BUILTIN_VCLZH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { P8V_BUILTIN_VEC_VCLZ, P8V_BUILTIN_VCLZH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { P8V_BUILTIN_VEC_VCLZ, P8V_BUILTIN_VCLZW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { P8V_BUILTIN_VEC_VCLZ, P8V_BUILTIN_VCLZW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { P8V_BUILTIN_VEC_VCLZ, P8V_BUILTIN_VCLZD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { P8V_BUILTIN_VEC_VCLZ, P8V_BUILTIN_VCLZD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 }, + + { P8V_BUILTIN_VEC_VCLZB, P8V_BUILTIN_VCLZB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { P8V_BUILTIN_VEC_VCLZB, P8V_BUILTIN_VCLZB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + + { P8V_BUILTIN_VEC_VCLZH, P8V_BUILTIN_VCLZH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { P8V_BUILTIN_VEC_VCLZH, P8V_BUILTIN_VCLZH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + + { P8V_BUILTIN_VEC_VCLZW, P8V_BUILTIN_VCLZW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { P8V_BUILTIN_VEC_VCLZW, P8V_BUILTIN_VCLZW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 }, + + { P8V_BUILTIN_VEC_VCLZD, P8V_BUILTIN_VCLZD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { P8V_BUILTIN_VEC_VCLZD, P8V_BUILTIN_VCLZD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 }, + + { P9_BUILTIN_DFP_TSTSFI_LT, MISC_BUILTIN_TSTSFI_LT_TD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat128, 0 }, + { P9_BUILTIN_DFP_TSTSFI_LT, MISC_BUILTIN_TSTSFI_LT_DD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat64, 0 }, + + { P9_BUILTIN_DFP_TSTSFI_LT_TD, MISC_BUILTIN_TSTSFI_LT_TD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat128, 0 }, + { P9_BUILTIN_DFP_TSTSFI_LT_DD, MISC_BUILTIN_TSTSFI_LT_DD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat64, 0 }, + + { P9_BUILTIN_DFP_TSTSFI_EQ, MISC_BUILTIN_TSTSFI_EQ_TD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat128, 0 }, + { P9_BUILTIN_DFP_TSTSFI_EQ, MISC_BUILTIN_TSTSFI_EQ_DD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat64, 0 }, + + { P9_BUILTIN_DFP_TSTSFI_EQ_TD, MISC_BUILTIN_TSTSFI_EQ_TD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat128, 0 }, + { P9_BUILTIN_DFP_TSTSFI_EQ_DD, MISC_BUILTIN_TSTSFI_EQ_DD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat64, 0 }, + + { P9_BUILTIN_DFP_TSTSFI_GT, MISC_BUILTIN_TSTSFI_GT_TD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat128, 0 }, + { P9_BUILTIN_DFP_TSTSFI_GT, MISC_BUILTIN_TSTSFI_GT_DD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat64, 0 }, + + { P9_BUILTIN_DFP_TSTSFI_GT_TD, MISC_BUILTIN_TSTSFI_GT_TD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat128, 0 }, + { P9_BUILTIN_DFP_TSTSFI_GT_DD, MISC_BUILTIN_TSTSFI_GT_DD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat64, 0 }, + + { P9_BUILTIN_DFP_TSTSFI_OV, MISC_BUILTIN_TSTSFI_OV_TD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat128, 0 }, + { P9_BUILTIN_DFP_TSTSFI_OV, MISC_BUILTIN_TSTSFI_OV_DD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat64, 0 }, + + { P9_BUILTIN_DFP_TSTSFI_OV_TD, MISC_BUILTIN_TSTSFI_OV_TD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat128, 0 }, + { P9_BUILTIN_DFP_TSTSFI_OV_DD, MISC_BUILTIN_TSTSFI_OV_DD, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, RS6000_BTI_dfloat64, 0 }, + + { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { P9V_BUILTIN_VEC_VCTZ, P9V_BUILTIN_VCTZD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 }, + + { P9V_BUILTIN_VEC_VCTZB, P9V_BUILTIN_VCTZB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { P9V_BUILTIN_VEC_VCTZB, P9V_BUILTIN_VCTZB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + + { P9V_BUILTIN_VEC_VCTZH, P9V_BUILTIN_VCTZH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { P9V_BUILTIN_VEC_VCTZH, P9V_BUILTIN_VCTZH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + + { P9V_BUILTIN_VEC_VCTZW, P9V_BUILTIN_VCTZW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { P9V_BUILTIN_VEC_VCTZW, P9V_BUILTIN_VCTZW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 }, + + { P9V_BUILTIN_VEC_VCTZD, P9V_BUILTIN_VCTZD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { P9V_BUILTIN_VEC_VCTZD, P9V_BUILTIN_VCTZD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 }, + + { P9V_BUILTIN_VEC_VADU, P9V_BUILTIN_VADUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P9V_BUILTIN_VEC_VADU, P9V_BUILTIN_VADUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + { P9V_BUILTIN_VEC_VADU, P9V_BUILTIN_VADUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + + { P9V_BUILTIN_VEC_VADUB, P9V_BUILTIN_VADUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + + { P9V_BUILTIN_VEC_VADUH, P9V_BUILTIN_VADUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + + { P9V_BUILTIN_VEC_VADUW, P9V_BUILTIN_VADUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + + { P9V_BUILTIN_VEC_VES, P9V_BUILTIN_VESSP, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SF, 0, 0 }, + { P9V_BUILTIN_VEC_VES, P9V_BUILTIN_VESDP, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_V2DF, 0, 0 }, + + { P9V_BUILTIN_VEC_VESSP, P9V_BUILTIN_VESSP, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SF, 0, 0 }, + { P9V_BUILTIN_VEC_VESDP, P9V_BUILTIN_VESDP, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_V2DF, 0, 0 }, + + { P9V_BUILTIN_VEC_VEE, P9V_BUILTIN_VEESP, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SF, 0, 0 }, + { P9V_BUILTIN_VEC_VEE, P9V_BUILTIN_VEEDP, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_V2DF, 0, 0 }, + + { P9V_BUILTIN_VEC_VEESP, P9V_BUILTIN_VEESP, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SF, 0, 0 }, + { P9V_BUILTIN_VEC_VEEDP, P9V_BUILTIN_VEEDP, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_V2DF, 0, 0 }, + + { P9V_BUILTIN_VEC_VTDC, P9V_BUILTIN_VTDCSP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 }, + { P9V_BUILTIN_VEC_VTDC, P9V_BUILTIN_VTDCDP, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, RS6000_BTI_INTSI, 0 }, + + { P9V_BUILTIN_VEC_VTDCSP, P9V_BUILTIN_VTDCSP, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 }, + { P9V_BUILTIN_VEC_VTDCDP, P9V_BUILTIN_VTDCDP, + RS6000_BTI_bool_V2DI, RS6000_BTI_V2DF, RS6000_BTI_INTSI, 0 }, + + { P9V_BUILTIN_VEC_VIE, P9V_BUILTIN_VIESP, + RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { P9V_BUILTIN_VEC_VIE, P9V_BUILTIN_VIESP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, 0 }, + + { P9V_BUILTIN_VEC_VIE, P9V_BUILTIN_VIEDP, + RS6000_BTI_V2DF, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { P9V_BUILTIN_VEC_VIE, P9V_BUILTIN_VIEDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_unsigned_V2DI, 0 }, + + { P9V_BUILTIN_VEC_VIESP, P9V_BUILTIN_VIESP, + RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 }, + { P9V_BUILTIN_VEC_VIESP, P9V_BUILTIN_VIESP, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, 0 }, + + { P9V_BUILTIN_VEC_VIEDP, P9V_BUILTIN_VIEDP, + RS6000_BTI_V2DF, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { P9V_BUILTIN_VEC_VIEDP, P9V_BUILTIN_VIEDP, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_unsigned_V2DI, 0 }, + + { P9V_BUILTIN_VEC_VSTDC, P9V_BUILTIN_VSTDCSP, + RS6000_BTI_bool_int, RS6000_BTI_float, RS6000_BTI_INTSI, 0 }, + { P9V_BUILTIN_VEC_VSTDC, P9V_BUILTIN_VSTDCDP, + RS6000_BTI_bool_int, RS6000_BTI_double, RS6000_BTI_INTSI, 0 }, + { P9V_BUILTIN_VEC_VSTDC, P9V_BUILTIN_VSTDCQP, + RS6000_BTI_bool_int, RS6000_BTI_ieee128_float, RS6000_BTI_INTSI, 0 }, + + { P9V_BUILTIN_VEC_VSTDCSP, P9V_BUILTIN_VSTDCSP, + RS6000_BTI_bool_int, RS6000_BTI_float, RS6000_BTI_INTSI, 0 }, + { P9V_BUILTIN_VEC_VSTDCDP, P9V_BUILTIN_VSTDCDP, + RS6000_BTI_bool_int, RS6000_BTI_double, RS6000_BTI_INTSI, 0 }, + { P9V_BUILTIN_VEC_VSTDCQP, P9V_BUILTIN_VSTDCQP, + RS6000_BTI_bool_int, RS6000_BTI_ieee128_float, RS6000_BTI_INTSI, 0 }, + + { P9V_BUILTIN_VEC_VSTDCN, P9V_BUILTIN_VSTDCNSP, + RS6000_BTI_bool_int, RS6000_BTI_float, 0, 0 }, + { P9V_BUILTIN_VEC_VSTDCN, P9V_BUILTIN_VSTDCNDP, + RS6000_BTI_bool_int, RS6000_BTI_double, 0, 0 }, + { P9V_BUILTIN_VEC_VSTDCN, P9V_BUILTIN_VSTDCNQP, + RS6000_BTI_bool_int, RS6000_BTI_ieee128_float, 0, 0 }, + + { P9V_BUILTIN_VEC_VSTDCNSP, P9V_BUILTIN_VSTDCNSP, + RS6000_BTI_bool_int, RS6000_BTI_float, 0, 0 }, + { P9V_BUILTIN_VEC_VSTDCNDP, P9V_BUILTIN_VSTDCNDP, + RS6000_BTI_bool_int, RS6000_BTI_double, 0, 0 }, + { P9V_BUILTIN_VEC_VSTDCNQP, P9V_BUILTIN_VSTDCNQP, + RS6000_BTI_bool_int, RS6000_BTI_ieee128_float, 0, 0 }, + + { P9V_BUILTIN_VEC_VSEEDP, P9V_BUILTIN_VSEEDP, + RS6000_BTI_UINTSI, RS6000_BTI_double, 0, 0 }, + { P9V_BUILTIN_VEC_VSEEDP, P9V_BUILTIN_VSEEQP, + RS6000_BTI_UINTDI, RS6000_BTI_ieee128_float, 0, 0 }, + + { P9V_BUILTIN_VEC_VSESDP, P9V_BUILTIN_VSESDP, + RS6000_BTI_UINTDI, RS6000_BTI_double, 0, 0 }, + { P9V_BUILTIN_VEC_VSESDP, P9V_BUILTIN_VSESQP, + RS6000_BTI_UINTTI, RS6000_BTI_ieee128_float, 0, 0 }, + + { P9V_BUILTIN_VEC_VSIEDP, P9V_BUILTIN_VSIEDP, + RS6000_BTI_double, RS6000_BTI_UINTDI, RS6000_BTI_UINTDI, 0 }, + { P9V_BUILTIN_VEC_VSIEDP, P9V_BUILTIN_VSIEDPF, + RS6000_BTI_double, RS6000_BTI_double, RS6000_BTI_UINTDI, 0 }, + + { P9V_BUILTIN_VEC_VSIEDP, P9V_BUILTIN_VSIEQP, + RS6000_BTI_ieee128_float, RS6000_BTI_UINTTI, RS6000_BTI_UINTDI, 0 }, + { P9V_BUILTIN_VEC_VSIEDP, P9V_BUILTIN_VSIEQPF, + RS6000_BTI_ieee128_float, RS6000_BTI_ieee128_float, RS6000_BTI_UINTDI, 0 }, + + { P9V_BUILTIN_VEC_VSCEGT, P9V_BUILTIN_VSCEDPGT, + RS6000_BTI_INTSI, RS6000_BTI_double, RS6000_BTI_double, 0 }, + { P9V_BUILTIN_VEC_VSCEGT, P9V_BUILTIN_VSCEQPGT, + RS6000_BTI_INTSI, RS6000_BTI_ieee128_float, RS6000_BTI_ieee128_float, 0 }, + { P9V_BUILTIN_VEC_VSCELT, P9V_BUILTIN_VSCEDPLT, + RS6000_BTI_INTSI, RS6000_BTI_double, RS6000_BTI_double, 0 }, + { P9V_BUILTIN_VEC_VSCELT, P9V_BUILTIN_VSCEQPLT, + RS6000_BTI_INTSI, RS6000_BTI_ieee128_float, RS6000_BTI_ieee128_float, 0 }, + { P9V_BUILTIN_VEC_VSCEEQ, P9V_BUILTIN_VSCEDPEQ, + RS6000_BTI_INTSI, RS6000_BTI_double, RS6000_BTI_double, 0 }, + { P9V_BUILTIN_VEC_VSCEEQ, P9V_BUILTIN_VSCEQPEQ, + RS6000_BTI_INTSI, RS6000_BTI_ieee128_float, RS6000_BTI_ieee128_float, 0 }, + { P9V_BUILTIN_VEC_VSCEUO, P9V_BUILTIN_VSCEDPUO, + RS6000_BTI_INTSI, RS6000_BTI_double, RS6000_BTI_double, 0 }, + { P9V_BUILTIN_VEC_VSCEUO, P9V_BUILTIN_VSCEQPUO, + RS6000_BTI_INTSI, RS6000_BTI_ieee128_float, RS6000_BTI_ieee128_float, 0 }, + + { P9V_BUILTIN_VEC_XL_LEN_R, P9V_BUILTIN_XL_LEN_R, + RS6000_BTI_unsigned_V16QI, ~RS6000_BTI_UINTQI, + RS6000_BTI_unsigned_long_long, 0 }, + + { P9V_BUILTIN_VEC_LXVL, P9V_BUILTIN_LXVL, + RS6000_BTI_V16QI, ~RS6000_BTI_INTQI, + RS6000_BTI_unsigned_long_long, 0 }, + { P9V_BUILTIN_VEC_LXVL, P9V_BUILTIN_LXVL, + RS6000_BTI_unsigned_V16QI, ~RS6000_BTI_UINTQI, + RS6000_BTI_unsigned_long_long, 0 }, + + { P9V_BUILTIN_VEC_LXVL, P9V_BUILTIN_LXVL, + RS6000_BTI_V4SI, ~RS6000_BTI_INTSI, + RS6000_BTI_unsigned_long_long, 0 }, + { P9V_BUILTIN_VEC_LXVL, P9V_BUILTIN_LXVL, + RS6000_BTI_unsigned_V4SI, ~RS6000_BTI_UINTSI, + RS6000_BTI_unsigned_long_long, 0 }, + + { P9V_BUILTIN_VEC_LXVL, P9V_BUILTIN_LXVL, + RS6000_BTI_V1TI, ~RS6000_BTI_INTTI, + RS6000_BTI_unsigned_long_long, 0 }, + { P9V_BUILTIN_VEC_LXVL, P9V_BUILTIN_LXVL, + RS6000_BTI_unsigned_V1TI, ~RS6000_BTI_UINTTI, + RS6000_BTI_unsigned_long_long, 0 }, + + { P9V_BUILTIN_VEC_LXVL, P9V_BUILTIN_LXVL, + RS6000_BTI_V2DI, ~RS6000_BTI_long_long, + RS6000_BTI_unsigned_long_long, 0 }, + { P9V_BUILTIN_VEC_LXVL, P9V_BUILTIN_LXVL, + RS6000_BTI_unsigned_V2DI, ~RS6000_BTI_unsigned_long_long, + RS6000_BTI_unsigned_long_long, 0 }, + + { P9V_BUILTIN_VEC_LXVL, P9V_BUILTIN_LXVL, + RS6000_BTI_V8HI, ~RS6000_BTI_INTHI, + RS6000_BTI_unsigned_long_long, 0 }, + { P9V_BUILTIN_VEC_LXVL, P9V_BUILTIN_LXVL, + RS6000_BTI_unsigned_V8HI, ~RS6000_BTI_UINTHI, + RS6000_BTI_unsigned_long_long, 0 }, + + { P9V_BUILTIN_VEC_LXVL, P9V_BUILTIN_LXVL, + RS6000_BTI_V2DF, ~RS6000_BTI_double, + RS6000_BTI_unsigned_long_long, 0 }, + { P9V_BUILTIN_VEC_LXVL, P9V_BUILTIN_LXVL, + RS6000_BTI_V4SF, ~RS6000_BTI_float, + RS6000_BTI_unsigned_long_long, 0 }, + /* At an appropriate future time, add support for the + RS6000_BTI_Float16 (exact name to be determined) type here. */ + + { P9V_BUILTIN_VEC_XST_LEN_R, P9V_BUILTIN_XST_LEN_R, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, + ~RS6000_BTI_UINTQI, RS6000_BTI_unsigned_long_long}, + + { P9V_BUILTIN_VEC_STXVL, P9V_BUILTIN_STXVL, + RS6000_BTI_void, RS6000_BTI_V16QI, ~RS6000_BTI_INTQI, + RS6000_BTI_unsigned_long_long }, + { P9V_BUILTIN_VEC_STXVL, P9V_BUILTIN_STXVL, + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, ~RS6000_BTI_UINTQI, + RS6000_BTI_unsigned_long_long }, + + { P9V_BUILTIN_VEC_STXVL, P9V_BUILTIN_STXVL, + RS6000_BTI_void, RS6000_BTI_V4SI, ~RS6000_BTI_INTSI, + RS6000_BTI_unsigned_long_long }, + { P9V_BUILTIN_VEC_STXVL, P9V_BUILTIN_STXVL, + RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, ~RS6000_BTI_UINTSI, + RS6000_BTI_unsigned_long_long }, + + { P9V_BUILTIN_VEC_STXVL, P9V_BUILTIN_STXVL, + RS6000_BTI_void, RS6000_BTI_V1TI, ~RS6000_BTI_INTTI, + RS6000_BTI_unsigned_long_long }, + { P9V_BUILTIN_VEC_STXVL, P9V_BUILTIN_STXVL, + RS6000_BTI_void, RS6000_BTI_unsigned_V1TI, ~RS6000_BTI_UINTTI, + RS6000_BTI_unsigned_long_long }, + + { P9V_BUILTIN_VEC_STXVL, P9V_BUILTIN_STXVL, + RS6000_BTI_void, RS6000_BTI_V2DI, ~RS6000_BTI_long_long, + RS6000_BTI_unsigned_long_long }, + { P9V_BUILTIN_VEC_STXVL, P9V_BUILTIN_STXVL, + RS6000_BTI_void, RS6000_BTI_unsigned_V2DI, ~RS6000_BTI_unsigned_long_long, + RS6000_BTI_unsigned_long_long }, + + { P9V_BUILTIN_VEC_STXVL, P9V_BUILTIN_STXVL, + RS6000_BTI_void, RS6000_BTI_V8HI, ~RS6000_BTI_INTHI, + RS6000_BTI_unsigned_long_long }, + { P9V_BUILTIN_VEC_STXVL, P9V_BUILTIN_STXVL, + RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, ~RS6000_BTI_UINTHI, + RS6000_BTI_unsigned_long_long }, + + { P9V_BUILTIN_VEC_STXVL, P9V_BUILTIN_STXVL, + RS6000_BTI_void, RS6000_BTI_V2DF, ~RS6000_BTI_double, + RS6000_BTI_unsigned_long_long }, + { P9V_BUILTIN_VEC_STXVL, P9V_BUILTIN_STXVL, + RS6000_BTI_void, RS6000_BTI_V4SF, ~RS6000_BTI_float, + RS6000_BTI_unsigned_long_long }, + /* At an appropriate future time, add support for the + RS6000_BTI_Float16 (exact name to be determined) type here. */ + + { ALTIVEC_BUILTIN_VEC_CMPNE, P9V_BUILTIN_CMPNEB, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, + RS6000_BTI_bool_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPNE, P9V_BUILTIN_CMPNEB, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, + RS6000_BTI_V16QI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPNE, P9V_BUILTIN_CMPNEB, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + + { ALTIVEC_BUILTIN_VEC_CMPNE, P9V_BUILTIN_CMPNEH, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, + RS6000_BTI_bool_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPNE, P9V_BUILTIN_CMPNEH, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, + RS6000_BTI_V8HI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPNE, P9V_BUILTIN_CMPNEH, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + + { ALTIVEC_BUILTIN_VEC_CMPNE, P9V_BUILTIN_CMPNEW, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, + RS6000_BTI_bool_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPNE, P9V_BUILTIN_CMPNEW, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, + RS6000_BTI_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPNE, P9V_BUILTIN_CMPNEW, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPNE, P10V_BUILTIN_CMPNET, + RS6000_BTI_bool_V1TI, RS6000_BTI_V1TI, + RS6000_BTI_V1TI, 0 }, + { ALTIVEC_BUILTIN_VEC_CMPNE, P10V_BUILTIN_CMPNET, + RS6000_BTI_bool_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEB_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEB_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_bool_V16QI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEB_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEB_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, + RS6000_BTI_V16QI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEB_P, + RS6000_BTI_INTSI, RS6000_BTI_V16QI, + RS6000_BTI_bool_V16QI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEB_P, + RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEB_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, + RS6000_BTI_bool_V16QI, 0 }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEH_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEH_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_bool_V8HI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEH_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEH_P, + RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEH_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, + RS6000_BTI_V8HI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEH_P, + RS6000_BTI_INTSI, RS6000_BTI_V8HI, + RS6000_BTI_bool_V8HI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEH_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, + RS6000_BTI_bool_V8HI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEH_P, + RS6000_BTI_INTSI, RS6000_BTI_pixel_V8HI, + RS6000_BTI_pixel_V8HI, 0 }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEW_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEW_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_bool_V4SI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEW_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEW_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, + RS6000_BTI_V4SI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEW_P, + RS6000_BTI_INTSI, RS6000_BTI_V4SI, + RS6000_BTI_bool_V4SI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEW_P, + RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEW_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, + RS6000_BTI_bool_V4SI, 0 }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNED_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNED_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_bool_V2DI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNED_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 + }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNED_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, + RS6000_BTI_V2DI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNED_P, + RS6000_BTI_INTSI, RS6000_BTI_V2DI, + RS6000_BTI_bool_V2DI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNED_P, + RS6000_BTI_INTSI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNED_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, + RS6000_BTI_bool_V2DI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P10V_BUILTIN_VCMPNET_P, + RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P10V_BUILTIN_VCMPNET_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, 0 }, + + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEFP_P, + RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { P9V_BUILTIN_VEC_VCMPNE_P, P9V_BUILTIN_VCMPNEDP_P, + RS6000_BTI_INTSI, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEB_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEB_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_bool_V16QI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEB_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEB_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, + RS6000_BTI_V16QI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEB_P, + RS6000_BTI_INTSI, RS6000_BTI_V16QI, + RS6000_BTI_bool_V16QI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEB_P, + RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEB_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, + RS6000_BTI_bool_V16QI, 0 }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEH_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEH_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_bool_V8HI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEH_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEH_P, + RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEH_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, + RS6000_BTI_V8HI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEH_P, + RS6000_BTI_INTSI, RS6000_BTI_V8HI, + RS6000_BTI_bool_V8HI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEH_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, + RS6000_BTI_bool_V8HI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEH_P, + RS6000_BTI_INTSI, RS6000_BTI_pixel_V8HI, + RS6000_BTI_pixel_V8HI, 0 }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEW_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEW_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_bool_V4SI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEW_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEW_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, + RS6000_BTI_V4SI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEW_P, + RS6000_BTI_INTSI, RS6000_BTI_V4SI, + RS6000_BTI_bool_V4SI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEW_P, + RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEW_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, + RS6000_BTI_bool_V4SI, 0 }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAED_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAED_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_bool_V2DI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAED_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 + }, + + /* The following 2 entries have been deprecated. */ + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAED_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, + RS6000_BTI_V2DI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAED_P, + RS6000_BTI_INTSI, RS6000_BTI_V2DI, + RS6000_BTI_bool_V2DI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAED_P, + RS6000_BTI_INTSI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAED_P, + RS6000_BTI_INTSI, RS6000_BTI_bool_V2DI, + RS6000_BTI_bool_V2DI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P10V_BUILTIN_VCMPAET_P, + RS6000_BTI_INTSI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P10V_BUILTIN_VCMPAET_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEFP_P, + RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { P9V_BUILTIN_VEC_VCMPAE_P, P9V_BUILTIN_VCMPAEDP_P, + RS6000_BTI_INTSI, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + + { P9V_BUILTIN_VEC_VCMPNEZ_P, P9V_BUILTIN_VCMPNEZB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI }, + { P9V_BUILTIN_VEC_VCMPNEZ_P, P9V_BUILTIN_VCMPNEZB_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI }, + + { P9V_BUILTIN_VEC_VCMPNEZ_P, P9V_BUILTIN_VCMPNEZH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI }, + { P9V_BUILTIN_VEC_VCMPNEZ_P, P9V_BUILTIN_VCMPNEZH_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI }, + + { P9V_BUILTIN_VEC_VCMPNEZ_P, P9V_BUILTIN_VCMPNEZW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI }, + { P9V_BUILTIN_VEC_VCMPNEZ_P, P9V_BUILTIN_VCMPNEZW_P, + RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI }, + + { P9V_BUILTIN_VEC_CMPNEZ, P9V_BUILTIN_CMPNEZB, + RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, + RS6000_BTI_V16QI, 0 }, + { P9V_BUILTIN_VEC_CMPNEZ, P9V_BUILTIN_CMPNEZB, + RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + + { P9V_BUILTIN_VEC_CMPNEZ, P9V_BUILTIN_CMPNEZH, + RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, + RS6000_BTI_V8HI, 0 }, + { P9V_BUILTIN_VEC_CMPNEZ, P9V_BUILTIN_CMPNEZH, + RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + + { P9V_BUILTIN_VEC_CMPNEZ, P9V_BUILTIN_CMPNEZW, + RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, + RS6000_BTI_V4SI, 0 }, + { P9V_BUILTIN_VEC_CMPNEZ, P9V_BUILTIN_CMPNEZW, + RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + + { P9V_BUILTIN_VEC_VCLZLSBB, P9V_BUILTIN_VCLZLSBB_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_V16QI, 0, 0 }, + { P9V_BUILTIN_VEC_VCLZLSBB, P9V_BUILTIN_VCLZLSBB_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + + { P9V_BUILTIN_VEC_VCTZLSBB, P9V_BUILTIN_VCTZLSBB_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_V16QI, 0, 0 }, + { P9V_BUILTIN_VEC_VCTZLSBB, P9V_BUILTIN_VCTZLSBB_V16QI, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { P9V_BUILTIN_VEC_VCTZLSBB, P9V_BUILTIN_VCTZLSBB_V8HI, + RS6000_BTI_INTSI, RS6000_BTI_V8HI, 0, 0 }, + { P9V_BUILTIN_VEC_VCTZLSBB, P9V_BUILTIN_VCTZLSBB_V4SI, + RS6000_BTI_INTSI, RS6000_BTI_V4SI, 0, 0 }, + + { P9V_BUILTIN_VEC_EXTRACT4B, P9V_BUILTIN_EXTRACT4B, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, 0 }, + + { P9V_BUILTIN_VEC_VEXTRACT_FP_FROM_SHORTH, P9V_BUILTIN_VEXTRACT_FP_FROM_SHORTH, + RS6000_BTI_V4SF, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { P9V_BUILTIN_VEC_VEXTRACT_FP_FROM_SHORTL, P9V_BUILTIN_VEXTRACT_FP_FROM_SHORTL, + RS6000_BTI_V4SF, RS6000_BTI_unsigned_V8HI, 0, 0 }, + + { P9V_BUILTIN_VEC_VEXTULX, P9V_BUILTIN_VEXTUBLX, + RS6000_BTI_INTQI, RS6000_BTI_UINTSI, + RS6000_BTI_V16QI, 0 }, + { P9V_BUILTIN_VEC_VEXTULX, P9V_BUILTIN_VEXTUBLX, + RS6000_BTI_UINTQI, RS6000_BTI_UINTSI, + RS6000_BTI_unsigned_V16QI, 0 }, + + { P9V_BUILTIN_VEC_VEXTULX, P9V_BUILTIN_VEXTUHLX, + RS6000_BTI_INTHI, RS6000_BTI_UINTSI, + RS6000_BTI_V8HI, 0 }, + { P9V_BUILTIN_VEC_VEXTULX, P9V_BUILTIN_VEXTUHLX, + RS6000_BTI_UINTHI, RS6000_BTI_UINTSI, + RS6000_BTI_unsigned_V8HI, 0 }, + + { P9V_BUILTIN_VEC_VEXTULX, P9V_BUILTIN_VEXTUWLX, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, + RS6000_BTI_V4SI, 0 }, + { P9V_BUILTIN_VEC_VEXTULX, P9V_BUILTIN_VEXTUWLX, + RS6000_BTI_UINTSI, RS6000_BTI_UINTSI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P9V_BUILTIN_VEC_VEXTULX, P9V_BUILTIN_VEXTUWLX, + RS6000_BTI_float, RS6000_BTI_UINTSI, + RS6000_BTI_V4SF, 0 }, + + { P9V_BUILTIN_VEC_VEXTURX, P9V_BUILTIN_VEXTUBRX, + RS6000_BTI_INTQI, RS6000_BTI_UINTSI, + RS6000_BTI_V16QI, 0 }, + { P9V_BUILTIN_VEC_VEXTURX, P9V_BUILTIN_VEXTUBRX, + RS6000_BTI_UINTQI, RS6000_BTI_UINTSI, + RS6000_BTI_unsigned_V16QI, 0 }, + + { P9V_BUILTIN_VEC_VEXTURX, P9V_BUILTIN_VEXTUHRX, + RS6000_BTI_INTHI, RS6000_BTI_UINTSI, + RS6000_BTI_V8HI, 0 }, + { P9V_BUILTIN_VEC_VEXTURX, P9V_BUILTIN_VEXTUHRX, + RS6000_BTI_UINTHI, RS6000_BTI_UINTSI, + RS6000_BTI_unsigned_V8HI, 0 }, + + { P9V_BUILTIN_VEC_VEXTURX, P9V_BUILTIN_VEXTUWRX, + RS6000_BTI_INTSI, RS6000_BTI_UINTSI, + RS6000_BTI_V4SI, 0 }, + { P9V_BUILTIN_VEC_VEXTURX, P9V_BUILTIN_VEXTUWRX, + RS6000_BTI_UINTSI, RS6000_BTI_UINTSI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P9V_BUILTIN_VEC_VEXTURX, P9V_BUILTIN_VEXTUWRX, + RS6000_BTI_float, RS6000_BTI_UINTSI, + RS6000_BTI_V4SF, 0 }, + + { P8V_BUILTIN_VEC_VGBBD, P8V_BUILTIN_VGBBD, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { P8V_BUILTIN_VEC_VGBBD, P8V_BUILTIN_VGBBD, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + + { P9V_BUILTIN_VEC_INSERT4B, P9V_BUILTIN_INSERT4B, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V4SI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + { P9V_BUILTIN_VEC_INSERT4B, P9V_BUILTIN_INSERT4B, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI }, + + { P8V_BUILTIN_VEC_VADDECUQ, P8V_BUILTIN_VADDECUQ, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI }, + { P8V_BUILTIN_VEC_VADDECUQ, P8V_BUILTIN_VADDECUQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI }, + + { P8V_BUILTIN_VEC_VADDEUQM, P8V_BUILTIN_VADDEUQM, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI }, + { P8V_BUILTIN_VEC_VADDEUQM, P8V_BUILTIN_VADDEUQM, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI }, + + { P8V_BUILTIN_VEC_VSUBECUQ, P8V_BUILTIN_VSUBECUQ, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI }, + { P8V_BUILTIN_VEC_VSUBECUQ, P8V_BUILTIN_VSUBECUQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI }, + + { P8V_BUILTIN_VEC_VSUBEUQM, P8V_BUILTIN_VSUBEUQM, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI }, + { P8V_BUILTIN_VEC_VSUBEUQM, P8V_BUILTIN_VSUBEUQM, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI }, + + { P8V_BUILTIN_VEC_VMINSD, P8V_BUILTIN_VMINSD, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_VMINSD, P8V_BUILTIN_VMINSD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_VMINSD, P8V_BUILTIN_VMINSD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VMAXSD, P8V_BUILTIN_VMAXSD, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_VMAXSD, P8V_BUILTIN_VMAXSD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_VMAXSD, P8V_BUILTIN_VMAXSD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VMINUD, P8V_BUILTIN_VMINUD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_VMINUD, P8V_BUILTIN_VMINUD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_VMINUD, P8V_BUILTIN_VMINUD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VMAXUD, P8V_BUILTIN_VMAXUD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_VMAXUD, P8V_BUILTIN_VMAXUD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_VMAXUD, P8V_BUILTIN_VMAXUD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VMRGEW, P8V_BUILTIN_VMRGEW_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_VMRGEW, P8V_BUILTIN_VMRGEW_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_VMRGEW, P8V_BUILTIN_VMRGEW_V2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_VMRGEW, P8V_BUILTIN_VMRGEW_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + { P8V_BUILTIN_VEC_VMRGEW, P8V_BUILTIN_VMRGEW_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { P8V_BUILTIN_VEC_VMRGEW, P8V_BUILTIN_VMRGEW_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { P8V_BUILTIN_VEC_VMRGEW, P8V_BUILTIN_VMRGEW_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P8V_BUILTIN_VEC_VMRGEW, P8V_BUILTIN_VMRGEW_V4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + + { P8V_BUILTIN_VEC_VMRGOW, P8V_BUILTIN_VMRGOW_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 }, + { P8V_BUILTIN_VEC_VMRGOW, P8V_BUILTIN_VMRGOW_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P8V_BUILTIN_VEC_VMRGOW, P8V_BUILTIN_VMRGOW_V4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 }, + { P8V_BUILTIN_VEC_VMRGOW, P8V_BUILTIN_VMRGOW_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_VMRGOW, P8V_BUILTIN_VMRGOW_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_VMRGOW, P8V_BUILTIN_VMRGOW_V2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_VMRGOW, P8V_BUILTIN_VMRGOW_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + { P8V_BUILTIN_VEC_VMRGOW, P8V_BUILTIN_VMRGOW_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 }, + + { P8V_BUILTIN_VEC_VPMSUM, P8V_BUILTIN_VPMSUMB, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P8V_BUILTIN_VEC_VPMSUM, P8V_BUILTIN_VPMSUMH, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + { P8V_BUILTIN_VEC_VPMSUM, P8V_BUILTIN_VPMSUMW, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { P8V_BUILTIN_VEC_VPMSUM, P8V_BUILTIN_VPMSUMD, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VPOPCNT, P8V_BUILTIN_VPOPCNTB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNT, P8V_BUILTIN_VPOPCNTB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNT, P8V_BUILTIN_VPOPCNTH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNT, P8V_BUILTIN_VPOPCNTH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNT, P8V_BUILTIN_VPOPCNTW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNT, P8V_BUILTIN_VPOPCNTW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNT, P8V_BUILTIN_VPOPCNTD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNT, P8V_BUILTIN_VPOPCNTD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 }, + + { P8V_BUILTIN_VEC_VPOPCNTB, P8V_BUILTIN_VPOPCNTB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNTB, P8V_BUILTIN_VPOPCNTB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + + { P8V_BUILTIN_VEC_VPOPCNTU, P8V_BUILTIN_VPOPCNTUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNTU, P8V_BUILTIN_VPOPCNTUB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + + { P8V_BUILTIN_VEC_VPOPCNTU, P8V_BUILTIN_VPOPCNTUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNTU, P8V_BUILTIN_VPOPCNTUH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + + { P8V_BUILTIN_VEC_VPOPCNTU, P8V_BUILTIN_VPOPCNTUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNTU, P8V_BUILTIN_VPOPCNTUW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 }, + + { P8V_BUILTIN_VEC_VPOPCNTU, P8V_BUILTIN_VPOPCNTUD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNTU, P8V_BUILTIN_VPOPCNTUD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 }, + + { P8V_BUILTIN_VEC_VPOPCNTH, P8V_BUILTIN_VPOPCNTH, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNTH, P8V_BUILTIN_VPOPCNTH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + + { P8V_BUILTIN_VEC_VPOPCNTW, P8V_BUILTIN_VPOPCNTW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNTW, P8V_BUILTIN_VPOPCNTW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 }, + + { P8V_BUILTIN_VEC_VPOPCNTD, P8V_BUILTIN_VPOPCNTD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { P8V_BUILTIN_VEC_VPOPCNTD, P8V_BUILTIN_VPOPCNTD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 }, + + { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 }, + { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBQ, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0, 0 }, + { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, 0, 0 }, + { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBQ, + RS6000_BTI_INTTI, RS6000_BTI_INTTI, 0, 0 }, + { P9V_BUILTIN_VEC_VPRTYB, P9V_BUILTIN_VPRTYBQ, + RS6000_BTI_UINTTI, RS6000_BTI_UINTTI, 0, 0 }, + + { P9V_BUILTIN_VEC_VPRTYBW, P9V_BUILTIN_VPRTYBW, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { P9V_BUILTIN_VEC_VPRTYBW, P9V_BUILTIN_VPRTYBW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 }, + + { P9V_BUILTIN_VEC_VPRTYBD, P9V_BUILTIN_VPRTYBD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { P9V_BUILTIN_VEC_VPRTYBD, P9V_BUILTIN_VPRTYBD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 }, + + { P9V_BUILTIN_VEC_VPRTYBQ, P9V_BUILTIN_VPRTYBQ, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0, 0 }, + { P9V_BUILTIN_VEC_VPRTYBQ, P9V_BUILTIN_VPRTYBQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, 0, 0 }, + { P9V_BUILTIN_VEC_VPRTYBQ, P9V_BUILTIN_VPRTYBQ, + RS6000_BTI_INTTI, RS6000_BTI_INTTI, 0, 0 }, + { P9V_BUILTIN_VEC_VPRTYBQ, P9V_BUILTIN_VPRTYBQ, + RS6000_BTI_UINTTI, RS6000_BTI_UINTTI, 0, 0 }, + + { P9V_BUILTIN_VEC_VPARITY_LSBB, P9V_BUILTIN_VPRTYBW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { P9V_BUILTIN_VEC_VPARITY_LSBB, P9V_BUILTIN_VPRTYBW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { P9V_BUILTIN_VEC_VPARITY_LSBB, P9V_BUILTIN_VPRTYBD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { P9V_BUILTIN_VEC_VPARITY_LSBB, P9V_BUILTIN_VPRTYBD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 }, + { P9V_BUILTIN_VEC_VPARITY_LSBB, P9V_BUILTIN_VPRTYBQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_V1TI, 0, 0 }, + { P9V_BUILTIN_VEC_VPARITY_LSBB, P9V_BUILTIN_VPRTYBQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, 0, 0 }, + + { P9_BUILTIN_CMPRB, P9_BUILTIN_SCALAR_CMPRB, + RS6000_BTI_INTSI, RS6000_BTI_UINTQI, RS6000_BTI_UINTSI, 0 }, + { P9_BUILTIN_CMPRB2, P9_BUILTIN_SCALAR_CMPRB2, + RS6000_BTI_INTSI, RS6000_BTI_UINTQI, RS6000_BTI_UINTSI, 0 }, + { P9_BUILTIN_CMPEQB, P9_BUILTIN_SCALAR_CMPEQB, + RS6000_BTI_INTSI, RS6000_BTI_UINTQI, RS6000_BTI_UINTDI, 0 }, + + { P8V_BUILTIN_VEC_VPKUDUM, P8V_BUILTIN_VPKUDUM, + RS6000_BTI_V4SI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_VPKUDUM, P8V_BUILTIN_VPKUDUM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_VPKUDUM, P8V_BUILTIN_VPKUDUM, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VPKSDSS, P8V_BUILTIN_VPKSDSS, + RS6000_BTI_V4SI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VPKUDUS, P8V_BUILTIN_VPKUDUS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VPKSDUS, P8V_BUILTIN_VPKSDUS, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VRLD, P8V_BUILTIN_VRLD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_VRLD, P8V_BUILTIN_VRLD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VSLD, P8V_BUILTIN_VSLD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_VSLD, P8V_BUILTIN_VSLD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VSRD, P8V_BUILTIN_VSRD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_VSRD, P8V_BUILTIN_VSRD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VSRAD, P8V_BUILTIN_VSRAD, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_VSRAD, P8V_BUILTIN_VSRAD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VSUBCUQ, P8V_BUILTIN_VSUBCUQ, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + { P8V_BUILTIN_VEC_VSUBCUQ, P8V_BUILTIN_VSUBCUQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + + { P8V_BUILTIN_VEC_VSUBUDM, P8V_BUILTIN_VSUBUDM, + RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_VSUBUDM, P8V_BUILTIN_VSUBUDM, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_VSUBUDM, P8V_BUILTIN_VSUBUDM, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0 }, + { P8V_BUILTIN_VEC_VSUBUDM, P8V_BUILTIN_VSUBUDM, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + { P8V_BUILTIN_VEC_VSUBUDM, P8V_BUILTIN_VSUBUDM, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_bool_V2DI, 0 }, + { P8V_BUILTIN_VEC_VSUBUDM, P8V_BUILTIN_VSUBUDM, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0 }, + + { P8V_BUILTIN_VEC_VSUBUQM, P8V_BUILTIN_VSUBUQM, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0 }, + { P8V_BUILTIN_VEC_VSUBUQM, P8V_BUILTIN_VSUBUQM, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, 0 }, + + { P6_OV_BUILTIN_CMPB, P6_BUILTIN_CMPB_32, + RS6000_BTI_UINTSI, RS6000_BTI_UINTSI, RS6000_BTI_UINTSI, 0 }, + { P6_OV_BUILTIN_CMPB, P6_BUILTIN_CMPB, + RS6000_BTI_UINTDI, RS6000_BTI_UINTDI, RS6000_BTI_UINTDI, 0 }, + + { P8V_BUILTIN_VEC_VUPKHSW, P8V_BUILTIN_VUPKHSW, + RS6000_BTI_V2DI, RS6000_BTI_V4SI, 0, 0 }, + { P8V_BUILTIN_VEC_VUPKHSW, P8V_BUILTIN_VUPKHSW, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V4SI, 0, 0 }, + + { P8V_BUILTIN_VEC_VUPKLSW, P8V_BUILTIN_VUPKLSW, + RS6000_BTI_V2DI, RS6000_BTI_V4SI, 0, 0 }, + { P8V_BUILTIN_VEC_VUPKLSW, P8V_BUILTIN_VUPKLSW, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V4SI, 0, 0 }, + + { P9V_BUILTIN_VEC_VSLV, P9V_BUILTIN_VSLV, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + { P9V_BUILTIN_VEC_VSRV, P9V_BUILTIN_VSRV, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V1TI, + RS6000_BTI_V1TI, RS6000_BTI_V1TI, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { P8V_BUILTIN_VEC_REVB, P8V_BUILTIN_REVB_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + + { ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V2DI, + RS6000_BTI_bool_V2DI, RS6000_BTI_bool_V2DI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V4SI, + RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V8HI, + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V16QI, + RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { ALTIVEC_BUILTIN_VEC_VREVE, ALTIVEC_BUILTIN_VREVE_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + + { VSX_BUILTIN_VEC_VSIGNED, VSX_BUILTIN_VEC_VSIGNED_V4SF, + RS6000_BTI_V4SI, RS6000_BTI_V4SF, 0, 0 }, + { VSX_BUILTIN_VEC_VSIGNED, VSX_BUILTIN_VEC_VSIGNED_V2DF, + RS6000_BTI_V2DI, RS6000_BTI_V2DF, 0, 0 }, + { VSX_BUILTIN_VEC_VSIGNEDE, VSX_BUILTIN_VEC_VSIGNEDE_V2DF, + RS6000_BTI_V4SI, RS6000_BTI_V2DF, 0, 0 }, + { VSX_BUILTIN_VEC_VSIGNEDO, VSX_BUILTIN_VEC_VSIGNEDO_V2DF, + RS6000_BTI_V4SI, RS6000_BTI_V2DF, 0, 0 }, + { P8V_BUILTIN_VEC_VSIGNED2, P8V_BUILTIN_VEC_VSIGNED2_V2DF, + RS6000_BTI_V4SI, RS6000_BTI_V2DF, RS6000_BTI_V2DF, 0 }, + + { VSX_BUILTIN_VEC_VUNSIGNED, VSX_BUILTIN_VEC_VUNSIGNED_V4SF, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SF, 0, 0 }, + { VSX_BUILTIN_VEC_VUNSIGNED, VSX_BUILTIN_VEC_VUNSIGNED_V2DF, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_V2DF, 0, 0 }, + { VSX_BUILTIN_VEC_VUNSIGNEDE, VSX_BUILTIN_VEC_VUNSIGNEDE_V2DF, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V2DF, 0, 0 }, + { VSX_BUILTIN_VEC_VUNSIGNEDO, VSX_BUILTIN_VEC_VUNSIGNEDO_V2DF, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V2DF, 0, 0 }, + { P8V_BUILTIN_VEC_VUNSIGNED2, P8V_BUILTIN_VEC_VUNSIGNED2_V2DF, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_V2DF, + RS6000_BTI_V2DF, 0 }, + + /* Crypto builtins. */ + { CRYPTO_BUILTIN_VPERMXOR, CRYPTO_BUILTIN_VPERMXOR_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { CRYPTO_BUILTIN_VPERMXOR, CRYPTO_BUILTIN_VPERMXOR_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { CRYPTO_BUILTIN_VPERMXOR, CRYPTO_BUILTIN_VPERMXOR_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI }, + { CRYPTO_BUILTIN_VPERMXOR, CRYPTO_BUILTIN_VPERMXOR_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI }, + + { CRYPTO_BUILTIN_VPMSUM, CRYPTO_BUILTIN_VPMSUMB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, 0 }, + { CRYPTO_BUILTIN_VPMSUM, CRYPTO_BUILTIN_VPMSUMH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, 0 }, + { CRYPTO_BUILTIN_VPMSUM, CRYPTO_BUILTIN_VPMSUMW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, 0 }, + { CRYPTO_BUILTIN_VPMSUM, CRYPTO_BUILTIN_VPMSUMD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, 0 }, + + { CRYPTO_BUILTIN_VSHASIGMA, CRYPTO_BUILTIN_VSHASIGMAW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + { CRYPTO_BUILTIN_VSHASIGMA, CRYPTO_BUILTIN_VSHASIGMAD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_INTSI, RS6000_BTI_INTSI }, + + /* Sign extend builtins that work work on ISA 3.0, not added until ISA 3.1 */ + { P9V_BUILTIN_VEC_VSIGNEXTI, P9V_BUILTIN_VSIGNEXTSB2W, + RS6000_BTI_V4SI, RS6000_BTI_V16QI, 0, 0 }, + { P9V_BUILTIN_VEC_VSIGNEXTI, P9V_BUILTIN_VSIGNEXTSH2W, + RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 }, + + { P9V_BUILTIN_VEC_VSIGNEXTLL, P9V_BUILTIN_VSIGNEXTSB2D, + RS6000_BTI_V2DI, RS6000_BTI_V16QI, 0, 0 }, + { P9V_BUILTIN_VEC_VSIGNEXTLL, P9V_BUILTIN_VSIGNEXTSH2D, + RS6000_BTI_V2DI, RS6000_BTI_V8HI, 0, 0 }, + { P9V_BUILTIN_VEC_VSIGNEXTLL, P9V_BUILTIN_VSIGNEXTSW2D, + RS6000_BTI_V2DI, RS6000_BTI_V4SI, 0, 0 }, + + /* Overloaded built-in functions for ISA3.1 (power10). */ + { P10_BUILTIN_VEC_CLRL, P10V_BUILTIN_VCLRLB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_UINTSI, 0 }, + { P10_BUILTIN_VEC_CLRL, P10V_BUILTIN_VCLRLB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_UINTSI, 0 }, + { P10_BUILTIN_VEC_CLRR, P10V_BUILTIN_VCLRRB, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_UINTSI, 0 }, + { P10_BUILTIN_VEC_CLRR, P10V_BUILTIN_VCLRRB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_UINTSI, 0 }, + + { P10_BUILTIN_VEC_GNB, P10V_BUILTIN_VGNB, RS6000_BTI_unsigned_long_long, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_UINTQI, 0 }, + { P10_BUILTIN_VEC_XXGENPCVM, P10V_BUILTIN_XXGENPCVM_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, RS6000_BTI_INTSI, 0 }, + { P10_BUILTIN_VEC_XXGENPCVM, P10V_BUILTIN_XXGENPCVM_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 }, + { P10_BUILTIN_VEC_XXGENPCVM, P10V_BUILTIN_XXGENPCVM_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, 0 }, + { P10_BUILTIN_VEC_XXGENPCVM, P10V_BUILTIN_XXGENPCVM_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_INTSI, 0 }, + + /* The overloaded XXEVAL definitions are handled specially because the + fourth unsigned char operand is not encoded in this table. */ + { P10_BUILTIN_VEC_XXEVAL, P10V_BUILTIN_XXEVAL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { P10_BUILTIN_VEC_XXEVAL, P10V_BUILTIN_XXEVAL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { P10_BUILTIN_VEC_XXEVAL, P10V_BUILTIN_XXEVAL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI }, + { P10_BUILTIN_VEC_XXEVAL, P10V_BUILTIN_XXEVAL, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI }, + { P10_BUILTIN_VEC_XXEVAL, P10V_BUILTIN_XXEVAL, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI }, + + /* The overloaded XXPERMX definitions are handled specially because the + fourth unsigned char operand is not encoded in this table. */ + { P10_BUILTIN_VEC_XXPERMX, P10V_BUILTIN_VXXPERMX, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, + RS6000_BTI_unsigned_V16QI }, + { P10_BUILTIN_VEC_XXPERMX, P10V_BUILTIN_VXXPERMX, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { P10_BUILTIN_VEC_XXPERMX, P10V_BUILTIN_VXXPERMX, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, + RS6000_BTI_unsigned_V16QI }, + { P10_BUILTIN_VEC_XXPERMX, P10V_BUILTIN_VXXPERMX, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI }, + { P10_BUILTIN_VEC_XXPERMX, P10V_BUILTIN_VXXPERMX, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, + RS6000_BTI_unsigned_V16QI }, + { P10_BUILTIN_VEC_XXPERMX, P10V_BUILTIN_VXXPERMX, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI }, + { P10_BUILTIN_VEC_XXPERMX, P10V_BUILTIN_VXXPERMX, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, + RS6000_BTI_unsigned_V16QI }, + { P10_BUILTIN_VEC_XXPERMX, P10V_BUILTIN_VXXPERMX, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V16QI }, + { P10_BUILTIN_VEC_XXPERMX, P10V_BUILTIN_VXXPERMX, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, + RS6000_BTI_unsigned_V16QI }, + { P10_BUILTIN_VEC_XXPERMX, P10V_BUILTIN_VXXPERMX, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, + RS6000_BTI_unsigned_V16QI }, + + { P10_BUILTIN_VEC_EXTRACTL, P10V_BUILTIN_VEXTRACTBL, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_EXTRACTL, P10V_BUILTIN_VEXTRACTHL, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_EXTRACTL, P10V_BUILTIN_VEXTRACTWL, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_EXTRACTL, P10V_BUILTIN_VEXTRACTDL, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_UINTQI }, + + { P10_BUILTIN_VEC_INSERTL, P10V_BUILTIN_VINSERTGPRBL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_UINTQI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_UINTSI }, + { P10_BUILTIN_VEC_INSERTL, P10V_BUILTIN_VINSERTGPRHL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_UINTHI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_UINTSI }, + { P10_BUILTIN_VEC_INSERTL, P10V_BUILTIN_VINSERTGPRWL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_UINTSI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_UINTSI }, + { P10_BUILTIN_VEC_INSERTL, P10V_BUILTIN_VINSERTGPRDL, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_UINTDI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_UINTSI }, + { P10_BUILTIN_VEC_INSERTL, P10V_BUILTIN_VINSERTVPRBL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_INSERTL, P10V_BUILTIN_VINSERTVPRHL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_INSERTL, P10V_BUILTIN_VINSERTVPRWL, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_UINTQI }, + + { P10_BUILTIN_VEC_EXTRACTH, P10V_BUILTIN_VEXTRACTBR, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_EXTRACTH, P10V_BUILTIN_VEXTRACTHR, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_EXTRACTH, P10V_BUILTIN_VEXTRACTWR, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_EXTRACTH, P10V_BUILTIN_VEXTRACTDR, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_UINTQI }, + + { P10_BUILTIN_VEC_INSERTH, P10V_BUILTIN_VINSERTGPRBR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_UINTQI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_UINTSI }, + { P10_BUILTIN_VEC_INSERTH, P10V_BUILTIN_VINSERTGPRHR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_UINTHI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_UINTSI }, + { P10_BUILTIN_VEC_INSERTH, P10V_BUILTIN_VINSERTGPRWR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_UINTSI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_UINTSI }, + { P10_BUILTIN_VEC_INSERTH, P10V_BUILTIN_VINSERTGPRDR, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_UINTDI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_UINTSI }, + { P10_BUILTIN_VEC_INSERTH, P10V_BUILTIN_VINSERTVPRBR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_INSERTH, P10V_BUILTIN_VINSERTVPRHR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_INSERTH, P10V_BUILTIN_VINSERTVPRWR, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_UINTQI }, + + { P10_BUILTIN_VEC_REPLACE_ELT, P10V_BUILTIN_VREPLACE_ELT_UV4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_UINTSI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_REPLACE_ELT, P10V_BUILTIN_VREPLACE_ELT_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTQI }, + { P10_BUILTIN_VEC_REPLACE_ELT, P10V_BUILTIN_VREPLACE_ELT_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_float, RS6000_BTI_INTQI }, + { P10_BUILTIN_VEC_REPLACE_ELT, P10V_BUILTIN_VREPLACE_ELT_UV2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_UINTDI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_REPLACE_ELT, P10V_BUILTIN_VREPLACE_ELT_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_INTDI, RS6000_BTI_INTQI }, + { P10_BUILTIN_VEC_REPLACE_ELT, P10V_BUILTIN_VREPLACE_ELT_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_double, RS6000_BTI_INTQI }, + + { P10_BUILTIN_VEC_REPLACE_UN, P10V_BUILTIN_VREPLACE_UN_UV4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_UINTSI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_REPLACE_UN, P10V_BUILTIN_VREPLACE_UN_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTQI }, + { P10_BUILTIN_VEC_REPLACE_UN, P10V_BUILTIN_VREPLACE_UN_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_float, RS6000_BTI_INTQI }, + { P10_BUILTIN_VEC_REPLACE_UN, P10V_BUILTIN_VREPLACE_UN_UV2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_UINTDI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_REPLACE_UN, P10V_BUILTIN_VREPLACE_UN_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_INTDI, RS6000_BTI_INTQI }, + { P10_BUILTIN_VEC_REPLACE_UN, P10V_BUILTIN_VREPLACE_UN_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_double, RS6000_BTI_INTQI }, + + { P10_BUILTIN_VEC_SLDB, P10V_BUILTIN_VSLDB_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_SLDB, P10V_BUILTIN_VSLDB_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_SLDB, P10V_BUILTIN_VSLDB_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_SLDB, P10V_BUILTIN_VSLDB_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_SLDB, P10V_BUILTIN_VSLDB_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_SLDB, P10V_BUILTIN_VSLDB_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_SLDB, P10V_BUILTIN_VSLDB_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_SLDB, P10V_BUILTIN_VSLDB_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_UINTQI }, + + { P10_BUILTIN_VEC_XXSPLTIW, P10V_BUILTIN_VXXSPLTIW_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0, 0 }, + { P10_BUILTIN_VEC_XXSPLTIW, P10V_BUILTIN_VXXSPLTIW_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_float, 0, 0 }, + + { P10_BUILTIN_VEC_XXSPLTID, P10V_BUILTIN_VXXSPLTID, + RS6000_BTI_V2DF, RS6000_BTI_float, 0, 0 }, + + { P10_BUILTIN_VEC_XXSPLTI32DX, P10V_BUILTIN_VXXSPLTI32DX_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_UINTQI, RS6000_BTI_INTSI }, + { P10_BUILTIN_VEC_XXSPLTI32DX, P10V_BUILTIN_VXXSPLTI32DX_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_UINTQI, + RS6000_BTI_UINTSI }, + { P10_BUILTIN_VEC_XXSPLTI32DX, P10V_BUILTIN_VXXSPLTI32DX_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_UINTQI, RS6000_BTI_float }, + + { P10_BUILTIN_VEC_XXBLEND, P10V_BUILTIN_VXXBLEND_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, + RS6000_BTI_unsigned_V16QI }, + { P10_BUILTIN_VEC_XXBLEND, P10V_BUILTIN_VXXBLEND_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI }, + { P10_BUILTIN_VEC_XXBLEND, P10V_BUILTIN_VXXBLEND_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, + RS6000_BTI_unsigned_V8HI }, + { P10_BUILTIN_VEC_XXBLEND, P10V_BUILTIN_VXXBLEND_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI }, + { P10_BUILTIN_VEC_XXBLEND, P10V_BUILTIN_VXXBLEND_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, + RS6000_BTI_unsigned_V4SI }, + { P10_BUILTIN_VEC_XXBLEND, P10V_BUILTIN_VXXBLEND_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI }, + { P10_BUILTIN_VEC_XXBLEND, P10V_BUILTIN_VXXBLEND_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, RS6000_BTI_V2DI, + RS6000_BTI_unsigned_V2DI }, + { P10_BUILTIN_VEC_XXBLEND, P10V_BUILTIN_VXXBLEND_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI }, + { P10_BUILTIN_VEC_XXBLEND, P10V_BUILTIN_VXXBLEND_V4SF, + RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, + RS6000_BTI_unsigned_V4SI }, + { P10_BUILTIN_VEC_XXBLEND, P10V_BUILTIN_VXXBLEND_V2DF, + RS6000_BTI_V2DF, RS6000_BTI_V2DF, RS6000_BTI_V2DF, + RS6000_BTI_unsigned_V2DI }, + + { P10_BUILTIN_VEC_SRDB, P10V_BUILTIN_VSRDB_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, + RS6000_BTI_V16QI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_SRDB, P10V_BUILTIN_VSRDB_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_SRDB, P10V_BUILTIN_VSRDB_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, + RS6000_BTI_V8HI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_SRDB, P10V_BUILTIN_VSRDB_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_SRDB, P10V_BUILTIN_VSRDB_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_V4SI, + RS6000_BTI_V4SI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_SRDB, P10V_BUILTIN_VSRDB_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_SRDB, P10V_BUILTIN_VSRDB_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_V2DI, + RS6000_BTI_V2DI, RS6000_BTI_UINTQI }, + { P10_BUILTIN_VEC_SRDB, P10V_BUILTIN_VSRDB_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_UINTQI }, + + { P10_BUILTIN_VEC_VSTRIL, P10V_BUILTIN_VSTRIBL, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { P10_BUILTIN_VEC_VSTRIL, P10V_BUILTIN_VSTRIBL, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + + { P10_BUILTIN_VEC_VSTRIL, P10V_BUILTIN_VSTRIHL, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { P10_BUILTIN_VEC_VSTRIL, P10V_BUILTIN_VSTRIHL, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + + { P10_BUILTIN_VEC_VSTRIL_P, P10V_BUILTIN_VSTRIBL_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { P10_BUILTIN_VEC_VSTRIL_P, P10V_BUILTIN_VSTRIBL_P, + RS6000_BTI_INTSI, RS6000_BTI_V16QI, 0, 0 }, + + { P10_BUILTIN_VEC_VSTRIL_P, P10V_BUILTIN_VSTRIHL_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { P10_BUILTIN_VEC_VSTRIL_P, P10V_BUILTIN_VSTRIHL_P, + RS6000_BTI_INTSI, RS6000_BTI_V8HI, 0, 0 }, + + { P10_BUILTIN_VEC_VSTRIR, P10V_BUILTIN_VSTRIBR, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { P10_BUILTIN_VEC_VSTRIR, P10V_BUILTIN_VSTRIBR, + RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 }, + + { P10_BUILTIN_VEC_VSTRIR, P10V_BUILTIN_VSTRIHR, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { P10_BUILTIN_VEC_VSTRIR, P10V_BUILTIN_VSTRIHR, + RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 }, + + { P10_BUILTIN_VEC_VSTRIR_P, P10V_BUILTIN_VSTRIBR_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { P10_BUILTIN_VEC_VSTRIR_P, P10V_BUILTIN_VSTRIBR_P, + RS6000_BTI_INTSI, RS6000_BTI_V16QI, 0, 0 }, + + { P10_BUILTIN_VEC_VSTRIR_P, P10V_BUILTIN_VSTRIHR_P, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { P10_BUILTIN_VEC_VSTRIR_P, P10V_BUILTIN_VSTRIHR_P, + RS6000_BTI_INTSI, RS6000_BTI_V8HI, 0, 0 }, + + { P10_BUILTIN_VEC_MTVSRBM, P10V_BUILTIN_MTVSRBM, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_UINTDI, 0, 0 }, + { P10_BUILTIN_VEC_MTVSRHM, P10V_BUILTIN_MTVSRHM, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_UINTDI, 0, 0 }, + { P10_BUILTIN_VEC_MTVSRWM, P10V_BUILTIN_MTVSRWM, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_UINTDI, 0, 0 }, + { P10_BUILTIN_VEC_MTVSRDM, P10V_BUILTIN_MTVSRDM, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_UINTDI, 0, 0 }, + { P10_BUILTIN_VEC_MTVSRQM, P10V_BUILTIN_MTVSRQM, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_UINTDI, 0, 0 }, + + { P10_BUILTIN_VEC_VCNTM, P10V_BUILTIN_VCNTMBB, + RS6000_BTI_unsigned_long_long, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_UINTQI, 0 }, + { P10_BUILTIN_VEC_VCNTM, P10V_BUILTIN_VCNTMBH, + RS6000_BTI_unsigned_long_long, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_UINTQI, 0 }, + { P10_BUILTIN_VEC_VCNTM, P10V_BUILTIN_VCNTMBW, + RS6000_BTI_unsigned_long_long, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_UINTQI, 0 }, + { P10_BUILTIN_VEC_VCNTM, P10V_BUILTIN_VCNTMBD, + RS6000_BTI_unsigned_long_long, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_UINTQI, 0 }, + + { P10_BUILTIN_VEC_VEXPANDM, P10V_BUILTIN_VEXPANDMB, + RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { P10_BUILTIN_VEC_VEXPANDM, P10V_BUILTIN_VEXPANDMH, + RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { P10_BUILTIN_VEC_VEXPANDM, P10V_BUILTIN_VEXPANDMW, + RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { P10_BUILTIN_VEC_VEXPANDM, P10V_BUILTIN_VEXPANDMD, + RS6000_BTI_unsigned_V2DI, RS6000_BTI_unsigned_V2DI, 0, 0 }, + { P10_BUILTIN_VEC_VEXPANDM, P10V_BUILTIN_VEXPANDMQ, + RS6000_BTI_unsigned_V1TI, RS6000_BTI_unsigned_V1TI, 0, 0 }, + + { P10_BUILTIN_VEC_VEXTRACTM, P10V_BUILTIN_VEXTRACTMB, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { P10_BUILTIN_VEC_VEXTRACTM, P10V_BUILTIN_VEXTRACTMH, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, 0, 0 }, + { P10_BUILTIN_VEC_VEXTRACTM, P10V_BUILTIN_VEXTRACTMW, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, 0, 0 }, + { P10_BUILTIN_VEC_VEXTRACTM, P10V_BUILTIN_VEXTRACTMD, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V2DI, 0, 0 }, + { P10_BUILTIN_VEC_VEXTRACTM, P10V_BUILTIN_VEXTRACTMQ, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V1TI, 0, 0 }, + + { P10_BUILTIN_VEC_XVTLSBB_ZEROS, P10V_BUILTIN_XVTLSBB_ZEROS, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + { P10_BUILTIN_VEC_XVTLSBB_ONES, P10V_BUILTIN_XVTLSBB_ONES, + RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, 0, 0 }, + + { P10_BUILTIN_VEC_SIGNEXT, P10V_BUILTIN_VSIGNEXTSD2Q, + RS6000_BTI_V1TI, RS6000_BTI_V2DI, 0, 0 }, + + { RS6000_BUILTIN_NONE, RS6000_BUILTIN_NONE, 0, 0, 0, 0 } +}; + +/* Nonzero if we can use a floating-point register to pass this arg. */ +#define USE_FP_FOR_ARG_P(CUM,MODE) \ + (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \ + && (CUM)->fregno <= FP_ARG_MAX_REG \ + && TARGET_HARD_FLOAT) + +/* Nonzero if we can use an AltiVec register to pass this arg. */ +#define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \ + (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \ + && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \ + && TARGET_ALTIVEC_ABI \ + && (NAMED)) + +/* Walk down the type tree of TYPE counting consecutive base elements. + If *MODEP is VOIDmode, then set it to the first valid floating point + or vector type. If a non-floating point or vector type is found, or + if a floating point or vector type that doesn't match a non-VOIDmode + *MODEP is found, then return -1, otherwise return the count in the + sub-tree. + + There have been some ABI snafus along the way with C++. Modify + EMPTY_BASE_SEEN to a nonzero value iff a C++ empty base class makes + an appearance; separate flag bits indicate whether or not such a + field is marked "no unique address". Modify ZERO_WIDTH_BF_SEEN + to 1 iff a C++ zero-length bitfield makes an appearance, but + in this case otherwise treat this as still being a homogeneous + aggregate. */ + +static int +rs6000_aggregate_candidate (const_tree type, machine_mode *modep, + int *empty_base_seen, int *zero_width_bf_seen) +{ + machine_mode mode; + HOST_WIDE_INT size; + + switch (TREE_CODE (type)) + { + case REAL_TYPE: + mode = TYPE_MODE (type); + if (!SCALAR_FLOAT_MODE_P (mode)) + return -1; + + if (*modep == VOIDmode) + *modep = mode; + + if (*modep == mode) + return 1; + + break; + + case COMPLEX_TYPE: + mode = TYPE_MODE (TREE_TYPE (type)); + if (!SCALAR_FLOAT_MODE_P (mode)) + return -1; + + if (*modep == VOIDmode) + *modep = mode; + + if (*modep == mode) + return 2; + + break; + + case VECTOR_TYPE: + if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC) + return -1; + + /* Use V4SImode as representative of all 128-bit vector types. */ + size = int_size_in_bytes (type); + switch (size) + { + case 16: + mode = V4SImode; + break; + default: + return -1; + } + + if (*modep == VOIDmode) + *modep = mode; + + /* Vector modes are considered to be opaque: two vectors are + equivalent for the purposes of being homogeneous aggregates + if they are the same size. */ + if (*modep == mode) + return 1; + + break; + + case ARRAY_TYPE: + { + int count; + tree index = TYPE_DOMAIN (type); + + /* Can't handle incomplete types nor sizes that are not + fixed. */ + if (!COMPLETE_TYPE_P (type) + || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) + return -1; + + count = rs6000_aggregate_candidate (TREE_TYPE (type), modep, + empty_base_seen, + zero_width_bf_seen); + if (count == -1 + || !index + || !TYPE_MAX_VALUE (index) + || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index)) + || !TYPE_MIN_VALUE (index) + || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index)) + || count < 0) + return -1; + + count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index)) + - tree_to_uhwi (TYPE_MIN_VALUE (index))); + + /* There must be no padding. */ + if (wi::to_wide (TYPE_SIZE (type)) + != count * GET_MODE_BITSIZE (*modep)) + return -1; + + return count; + } + + case RECORD_TYPE: + { + int count = 0; + int sub_count; + tree field; + + /* Can't handle incomplete types nor sizes that are not + fixed. */ + if (!COMPLETE_TYPE_P (type) + || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) + return -1; + + for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) + { + if (TREE_CODE (field) != FIELD_DECL) + continue; + + if (DECL_FIELD_CXX_ZERO_WIDTH_BIT_FIELD (field)) + { + /* GCC 11 and earlier generated incorrect code in a rare + corner case for C++. When a RECORD_TYPE looks like a + homogeneous aggregate, except that it also contains + one or more zero-width bit fields, these earlier + compilers would incorrectly pass the fields in FPRs + or VSRs. This occurred because the front end wrongly + removed these bitfields from the RECORD_TYPE. In + GCC 12 and later, the front end flaw was corrected. + We want to diagnose this case. To do this, we pretend + that we don't see the zero-width bit fields (hence + the continue statement here), but pass back a flag + indicating what happened. The caller then diagnoses + the issue and rejects the RECORD_TYPE as a homogeneous + aggregate. */ + *zero_width_bf_seen = 1; + continue; + } + + if (DECL_FIELD_ABI_IGNORED (field)) + { + if (lookup_attribute ("no_unique_address", + DECL_ATTRIBUTES (field))) + *empty_base_seen |= 2; + else + *empty_base_seen |= 1; + continue; + } + + sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep, + empty_base_seen, + zero_width_bf_seen); + if (sub_count < 0) + return -1; + count += sub_count; + } + + /* There must be no padding. */ + if (wi::to_wide (TYPE_SIZE (type)) + != count * GET_MODE_BITSIZE (*modep)) + return -1; + + return count; + } + + case UNION_TYPE: + case QUAL_UNION_TYPE: + { + /* These aren't very interesting except in a degenerate case. */ + int count = 0; + int sub_count; + tree field; + + /* Can't handle incomplete types nor sizes that are not + fixed. */ + if (!COMPLETE_TYPE_P (type) + || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) + return -1; + + for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) + { + if (TREE_CODE (field) != FIELD_DECL) + continue; + + sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep, + empty_base_seen, + zero_width_bf_seen); + if (sub_count < 0) + return -1; + count = count > sub_count ? count : sub_count; + } + + /* There must be no padding. */ + if (wi::to_wide (TYPE_SIZE (type)) + != count * GET_MODE_BITSIZE (*modep)) + return -1; + + return count; + } + + default: + break; + } + + return -1; +} + +/* If an argument, whose type is described by TYPE and MODE, is a homogeneous + float or vector aggregate that shall be passed in FP/vector registers + according to the ELFv2 ABI, return the homogeneous element mode in + *ELT_MODE and the number of elements in *N_ELTS, and return TRUE. + + Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */ + +bool +rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type, + machine_mode *elt_mode, + int *n_elts) +{ + /* Note that we do not accept complex types at the top level as + homogeneous aggregates; these types are handled via the + targetm.calls.split_complex_arg mechanism. Complex types + can be elements of homogeneous aggregates, however. */ + if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type + && AGGREGATE_TYPE_P (type)) + { + machine_mode field_mode = VOIDmode; + int empty_base_seen = 0; + int zero_width_bf_seen = 0; + int field_count = rs6000_aggregate_candidate (type, &field_mode, + &empty_base_seen, + &zero_width_bf_seen); + + if (field_count > 0) + { + int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8; + int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size); + + /* The ELFv2 ABI allows homogeneous aggregates to occupy + up to AGGR_ARG_NUM_REG registers. */ + if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size) + { + if (elt_mode) + *elt_mode = field_mode; + if (n_elts) + *n_elts = field_count; + if (empty_base_seen && warn_psabi) + { + static unsigned last_reported_type_uid; + unsigned uid = TYPE_UID (TYPE_MAIN_VARIANT (type)); + if (uid != last_reported_type_uid) + { + const char *url + = CHANGES_ROOT_URL "gcc-10/changes.html#empty_base"; + if (empty_base_seen & 1) + inform (input_location, + "parameter passing for argument of type %qT " + "when C++17 is enabled changed to match C++14 " + "%{in GCC 10.1%}", type, url); + else + inform (input_location, + "parameter passing for argument of type %qT " + "with %<[[no_unique_address]]%> members " + "changed %{in GCC 10.1%}", type, url); + last_reported_type_uid = uid; + } + } + if (zero_width_bf_seen && warn_psabi) + { + static unsigned last_reported_type_uid; + unsigned uid = TYPE_UID (TYPE_MAIN_VARIANT (type)); + if (uid != last_reported_type_uid) + { + inform (input_location, + "ELFv2 parameter passing for an argument " + "containing zero-width bit fields but that is " + "otherwise a homogeneous aggregate was " + "corrected in GCC 12"); + last_reported_type_uid = uid; + } + if (elt_mode) + *elt_mode = mode; + if (n_elts) + *n_elts = 1; + return false; + } + return true; + } + } + } + + if (elt_mode) + *elt_mode = mode; + if (n_elts) + *n_elts = 1; + return false; +} + +/* Return a nonzero value to say to return the function value in + memory, just as large structures are always returned. TYPE will be + the data type of the value, and FNTYPE will be the type of the + function doing the returning, or @code{NULL} for libcalls. + + The AIX ABI for the RS/6000 specifies that all structures are + returned in memory. The Darwin ABI does the same. + + For the Darwin 64 Bit ABI, a function result can be returned in + registers or in memory, depending on the size of the return data + type. If it is returned in registers, the value occupies the same + registers as it would if it were the first and only function + argument. Otherwise, the function places its result in memory at + the location pointed to by GPR3. + + The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4, + but a draft put them in memory, and GCC used to implement the draft + instead of the final standard. Therefore, aix_struct_return + controls this instead of DEFAULT_ABI; V.4 targets needing backward + compatibility can change DRAFT_V4_STRUCT_RET to override the + default, and -m switches get the final word. See + rs6000_option_override_internal for more details. + + The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit + long double support is enabled. These values are returned in memory. + + int_size_in_bytes returns -1 for variable size objects, which go in + memory always. The cast to unsigned makes -1 > 8. */ + +bool +rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED) +{ + /* We do not allow MMA types being used as return values. Only report + the invalid return value usage the first time we encounter it. */ + if (cfun + && !cfun->machine->mma_return_type_error + && TREE_TYPE (cfun->decl) == fntype + && (TYPE_MODE (type) == OOmode || TYPE_MODE (type) == XOmode)) + { + /* Record we have now handled function CFUN, so the next time we + are called, we do not re-report the same error. */ + cfun->machine->mma_return_type_error = true; + if (TYPE_CANONICAL (type) != NULL_TREE) + type = TYPE_CANONICAL (type); + error ("invalid use of MMA type %qs as a function return value", + IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)))); + } + + /* For the Darwin64 ABI, test if we can fit the return value in regs. */ + if (TARGET_MACHO + && rs6000_darwin64_abi + && TREE_CODE (type) == RECORD_TYPE + && int_size_in_bytes (type) > 0) + { + CUMULATIVE_ARGS valcum; + rtx valret; + + valcum.words = 0; + valcum.fregno = FP_ARG_MIN_REG; + valcum.vregno = ALTIVEC_ARG_MIN_REG; + /* Do a trial code generation as if this were going to be passed + as an argument; if any part goes in memory, we return NULL. */ + valret = rs6000_darwin64_record_arg (&valcum, type, true, true); + if (valret) + return false; + /* Otherwise fall through to more conventional ABI rules. */ + } + + /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */ + if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type, + NULL, NULL)) + return false; + + /* The ELFv2 ABI returns aggregates up to 16B in registers */ + if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type) + && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16) + return false; + + if (AGGREGATE_TYPE_P (type) + && (aix_struct_return + || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8)) + return true; + + /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector + modes only exist for GCC vector types if -maltivec. */ + if (TARGET_32BIT && !TARGET_ALTIVEC_ABI + && ALTIVEC_VECTOR_MODE (TYPE_MODE (type))) + return false; + + /* Return synthetic vectors in memory. */ + if (TREE_CODE (type) == VECTOR_TYPE + && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8)) + { + static bool warned_for_return_big_vectors = false; + if (!warned_for_return_big_vectors) + { + warning (OPT_Wpsabi, "GCC vector returned by reference: " + "non-standard ABI extension with no compatibility " + "guarantee"); + warned_for_return_big_vectors = true; + } + return true; + } + + if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD + && FLOAT128_IEEE_P (TYPE_MODE (type))) + return true; + + return false; +} + +/* Specify whether values returned in registers should be at the most + significant end of a register. We want aggregates returned by + value to match the way aggregates are passed to functions. */ + +bool +rs6000_return_in_msb (const_tree valtype) +{ + return (DEFAULT_ABI == ABI_ELFv2 + && BYTES_BIG_ENDIAN + && AGGREGATE_TYPE_P (valtype) + && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype) + == PAD_UPWARD)); +} + +#ifdef HAVE_AS_GNU_ATTRIBUTE +/* Return TRUE if a call to function FNDECL may be one that + potentially affects the function calling ABI of the object file. */ + +static bool +call_ABI_of_interest (tree fndecl) +{ + if (rs6000_gnu_attr && symtab->state == EXPANSION) + { + struct cgraph_node *c_node; + + /* Libcalls are always interesting. */ + if (fndecl == NULL_TREE) + return true; + + /* Any call to an external function is interesting. */ + if (DECL_EXTERNAL (fndecl)) + return true; + + /* Interesting functions that we are emitting in this object file. */ + c_node = cgraph_node::get (fndecl); + c_node = c_node->ultimate_alias_target (); + return !c_node->only_called_directly_p (); + } + return false; +} +#endif + +/* Initialize a variable CUM of type CUMULATIVE_ARGS + for a call to a function whose data type is FNTYPE. + For a library call, FNTYPE is 0 and RETURN_MODE the return value mode. + + For incoming args we set the number of arguments in the prototype large + so we never return a PARALLEL. */ + +void +init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype, + rtx libname ATTRIBUTE_UNUSED, int incoming, + int libcall, int n_named_args, + tree fndecl, + machine_mode return_mode ATTRIBUTE_UNUSED) +{ + static CUMULATIVE_ARGS zero_cumulative; + + *cum = zero_cumulative; + cum->words = 0; + cum->fregno = FP_ARG_MIN_REG; + cum->vregno = ALTIVEC_ARG_MIN_REG; + cum->prototype = (fntype && prototype_p (fntype)); + cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall) + ? CALL_LIBCALL : CALL_NORMAL); + cum->sysv_gregno = GP_ARG_MIN_REG; + cum->stdarg = stdarg_p (fntype); + cum->libcall = libcall; + + cum->nargs_prototype = 0; + if (incoming || cum->prototype) + cum->nargs_prototype = n_named_args; + + /* Check for a longcall attribute. */ + if ((!fntype && rs6000_default_long_calls) + || (fntype + && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype)) + && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype)))) + cum->call_cookie |= CALL_LONG; + else if (DEFAULT_ABI != ABI_DARWIN) + { + bool is_local = (fndecl + && !DECL_EXTERNAL (fndecl) + && !DECL_WEAK (fndecl) + && (*targetm.binds_local_p) (fndecl)); + if (is_local) + ; + else if (flag_plt) + { + if (fntype + && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype))) + cum->call_cookie |= CALL_LONG; + } + else + { + if (!(fntype + && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype)))) + cum->call_cookie |= CALL_LONG; + } + } + + if (TARGET_DEBUG_ARG) + { + fprintf (stderr, "\ninit_cumulative_args:"); + if (fntype) + { + tree ret_type = TREE_TYPE (fntype); + fprintf (stderr, " ret code = %s,", + get_tree_code_name (TREE_CODE (ret_type))); + } + + if (cum->call_cookie & CALL_LONG) + fprintf (stderr, " longcall,"); + + fprintf (stderr, " proto = %d, nargs = %d\n", + cum->prototype, cum->nargs_prototype); + } + +#ifdef HAVE_AS_GNU_ATTRIBUTE + if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)) + { + cum->escapes = call_ABI_of_interest (fndecl); + if (cum->escapes) + { + tree return_type; + + if (fntype) + { + return_type = TREE_TYPE (fntype); + return_mode = TYPE_MODE (return_type); + } + else + return_type = lang_hooks.types.type_for_mode (return_mode, 0); + + if (return_type != NULL) + { + if (TREE_CODE (return_type) == RECORD_TYPE + && TYPE_TRANSPARENT_AGGR (return_type)) + { + return_type = TREE_TYPE (first_field (return_type)); + return_mode = TYPE_MODE (return_type); + } + if (AGGREGATE_TYPE_P (return_type) + && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type) + <= 8)) + rs6000_returns_struct = true; + } + if (SCALAR_FLOAT_MODE_P (return_mode)) + { + rs6000_passes_float = true; + if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT) + && (FLOAT128_IBM_P (return_mode) + || FLOAT128_IEEE_P (return_mode) + || (return_type != NULL + && (TYPE_MAIN_VARIANT (return_type) + == long_double_type_node)))) + rs6000_passes_long_double = true; + + /* Note if we passed or return a IEEE 128-bit type. We changed + the mangling for these types, and we may need to make an alias + with the old mangling. */ + if (FLOAT128_IEEE_P (return_mode)) + rs6000_passes_ieee128 = true; + } + if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)) + rs6000_passes_vector = true; + } + } +#endif + + if (fntype + && !TARGET_ALTIVEC + && TARGET_ALTIVEC_ABI + && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype)))) + { + error ("cannot return value in vector register because" + " altivec instructions are disabled, use %qs" + " to enable them", "-maltivec"); + } +} + + +/* On rs6000, function arguments are promoted, as are function return + values. */ + +machine_mode +rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED, + machine_mode mode, + int *punsignedp ATTRIBUTE_UNUSED, + const_tree, int for_return ATTRIBUTE_UNUSED) +{ + if (GET_MODE_CLASS (mode) == MODE_INT + && GET_MODE_SIZE (mode) < (TARGET_32BIT ? 4 : 8)) + mode = TARGET_32BIT ? SImode : DImode; + + return mode; +} + +/* Return true if TYPE must be passed on the stack and not in registers. */ + +bool +rs6000_must_pass_in_stack (const function_arg_info &arg) +{ + if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT) + return must_pass_in_stack_var_size (arg); + else + return must_pass_in_stack_var_size_or_pad (arg); +} + +static inline bool +is_complex_IBM_long_double (machine_mode mode) +{ + return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode)); +} + +/* Whether ABI_V4 passes MODE args to a function in floating point + registers. */ + +static bool +abi_v4_pass_in_fpr (machine_mode mode, bool named) +{ + if (!TARGET_HARD_FLOAT) + return false; + if (mode == DFmode) + return true; + if (mode == SFmode && named) + return true; + /* ABI_V4 passes complex IBM long double in 8 gprs. + Stupid, but we can't change the ABI now. */ + if (is_complex_IBM_long_double (mode)) + return false; + if (FLOAT128_2REG_P (mode)) + return true; + if (DECIMAL_FLOAT_MODE_P (mode)) + return true; + return false; +} + +/* Implement TARGET_FUNCTION_ARG_PADDING. + + For the AIX ABI structs are always stored left shifted in their + argument slot. */ + +pad_direction +rs6000_function_arg_padding (machine_mode mode, const_tree type) +{ +#ifndef AGGREGATE_PADDING_FIXED +#define AGGREGATE_PADDING_FIXED 0 +#endif +#ifndef AGGREGATES_PAD_UPWARD_ALWAYS +#define AGGREGATES_PAD_UPWARD_ALWAYS 0 +#endif + + if (!AGGREGATE_PADDING_FIXED) + { + /* GCC used to pass structures of the same size as integer types as + if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING. + i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were + passed padded downward, except that -mstrict-align further + muddied the water in that multi-component structures of 2 and 4 + bytes in size were passed padded upward. + + The following arranges for best compatibility with previous + versions of gcc, but removes the -mstrict-align dependency. */ + if (BYTES_BIG_ENDIAN) + { + HOST_WIDE_INT size = 0; + + if (mode == BLKmode) + { + if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST) + size = int_size_in_bytes (type); + } + else + size = GET_MODE_SIZE (mode); + + if (size == 1 || size == 2 || size == 4) + return PAD_DOWNWARD; + } + return PAD_UPWARD; + } + + if (AGGREGATES_PAD_UPWARD_ALWAYS) + { + if (type != 0 && AGGREGATE_TYPE_P (type)) + return PAD_UPWARD; + } + + /* Fall back to the default. */ + return default_function_arg_padding (mode, type); +} + +/* If defined, a C expression that gives the alignment boundary, in bits, + of an argument with the specified mode and type. If it is not defined, + PARM_BOUNDARY is used for all arguments. + + V.4 wants long longs and doubles to be double word aligned. Just + testing the mode size is a boneheaded way to do this as it means + that other types such as complex int are also double word aligned. + However, we're stuck with this because changing the ABI might break + existing library interfaces. + + Quadword align Altivec/VSX vectors. + Quadword align large synthetic vector types. */ + +unsigned int +rs6000_function_arg_boundary (machine_mode mode, const_tree type) +{ + machine_mode elt_mode; + int n_elts; + + rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts); + + if (DEFAULT_ABI == ABI_V4 + && (GET_MODE_SIZE (mode) == 8 + || (TARGET_HARD_FLOAT + && !is_complex_IBM_long_double (mode) + && FLOAT128_2REG_P (mode)))) + return 64; + else if (FLOAT128_VECTOR_P (mode)) + return 128; + else if (type && TREE_CODE (type) == VECTOR_TYPE + && int_size_in_bytes (type) >= 8 + && int_size_in_bytes (type) < 16) + return 64; + else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode) + || (type && TREE_CODE (type) == VECTOR_TYPE + && int_size_in_bytes (type) >= 16)) + return 128; + + /* Aggregate types that need > 8 byte alignment are quadword-aligned + in the parameter area in the ELFv2 ABI, and in the AIX ABI unless + -mcompat-align-parm is used. */ + if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm) + || DEFAULT_ABI == ABI_ELFv2) + && type && TYPE_ALIGN (type) > 64) + { + /* "Aggregate" means any AGGREGATE_TYPE except for single-element + or homogeneous float/vector aggregates here. We already handled + vector aggregates above, but still need to check for float here. */ + if (AGGREGATE_TYPE_P (type) + && !SCALAR_FLOAT_MODE_P (elt_mode)) + return 128; + } + + /* Similar for the Darwin64 ABI. Note that for historical reasons we + implement the "aggregate type" check as a BLKmode check here; this + means certain aggregate types are in fact not aligned. */ + if (TARGET_MACHO && rs6000_darwin64_abi + && mode == BLKmode + && type && TYPE_ALIGN (type) > 64) + return 128; + + return PARM_BOUNDARY; +} + +/* The offset in words to the start of the parameter save area. */ + +static unsigned int +rs6000_parm_offset (void) +{ + return (DEFAULT_ABI == ABI_V4 ? 2 + : DEFAULT_ABI == ABI_ELFv2 ? 4 + : 6); +} + +/* For a function parm of MODE and TYPE, return the starting word in + the parameter area. NWORDS of the parameter area are already used. */ + +static unsigned int +rs6000_parm_start (machine_mode mode, const_tree type, + unsigned int nwords) +{ + unsigned int align; + + align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1; + return nwords + (-(rs6000_parm_offset () + nwords) & align); +} + +/* Compute the size (in words) of a function argument. */ + +static unsigned long +rs6000_arg_size (machine_mode mode, const_tree type) +{ + unsigned long size; + + if (mode != BLKmode) + size = GET_MODE_SIZE (mode); + else + size = int_size_in_bytes (type); + + if (TARGET_32BIT) + return (size + 3) >> 2; + else + return (size + 7) >> 3; +} + +/* Use this to flush pending int fields. */ + +static void +rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum, + HOST_WIDE_INT bitpos, int final) +{ + unsigned int startbit, endbit; + int intregs, intoffset; + + /* Handle the situations where a float is taking up the first half + of the GPR, and the other half is empty (typically due to + alignment restrictions). We can detect this by a 8-byte-aligned + int field, or by seeing that this is the final flush for this + argument. Count the word and continue on. */ + if (cum->floats_in_gpr == 1 + && (cum->intoffset % 64 == 0 + || (cum->intoffset == -1 && final))) + { + cum->words++; + cum->floats_in_gpr = 0; + } + + if (cum->intoffset == -1) + return; + + intoffset = cum->intoffset; + cum->intoffset = -1; + cum->floats_in_gpr = 0; + + if (intoffset % BITS_PER_WORD != 0) + { + unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD; + if (!int_mode_for_size (bits, 0).exists ()) + { + /* We couldn't find an appropriate mode, which happens, + e.g., in packed structs when there are 3 bytes to load. + Back intoffset back to the beginning of the word in this + case. */ + intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD); + } + } + + startbit = ROUND_DOWN (intoffset, BITS_PER_WORD); + endbit = ROUND_UP (bitpos, BITS_PER_WORD); + intregs = (endbit - startbit) / BITS_PER_WORD; + cum->words += intregs; + /* words should be unsigned. */ + if ((unsigned)cum->words < (endbit/BITS_PER_WORD)) + { + int pad = (endbit/BITS_PER_WORD) - cum->words; + cum->words += pad; + } +} + +/* The darwin64 ABI calls for us to recurse down through structs, + looking for elements passed in registers. Unfortunately, we have + to track int register count here also because of misalignments + in powerpc alignment mode. */ + +static void +rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum, + const_tree type, + HOST_WIDE_INT startbitpos) +{ + tree f; + + for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f)) + if (TREE_CODE (f) == FIELD_DECL) + { + HOST_WIDE_INT bitpos = startbitpos; + tree ftype = TREE_TYPE (f); + machine_mode mode; + if (ftype == error_mark_node) + continue; + mode = TYPE_MODE (ftype); + + if (DECL_SIZE (f) != 0 + && tree_fits_uhwi_p (bit_position (f))) + bitpos += int_bit_position (f); + + /* ??? FIXME: else assume zero offset. */ + + if (TREE_CODE (ftype) == RECORD_TYPE) + rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos); + else if (USE_FP_FOR_ARG_P (cum, mode)) + { + unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3; + rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0); + cum->fregno += n_fpregs; + /* Single-precision floats present a special problem for + us, because they are smaller than an 8-byte GPR, and so + the structure-packing rules combined with the standard + varargs behavior mean that we want to pack float/float + and float/int combinations into a single register's + space. This is complicated by the arg advance flushing, + which works on arbitrarily large groups of int-type + fields. */ + if (mode == SFmode) + { + if (cum->floats_in_gpr == 1) + { + /* Two floats in a word; count the word and reset + the float count. */ + cum->words++; + cum->floats_in_gpr = 0; + } + else if (bitpos % 64 == 0) + { + /* A float at the beginning of an 8-byte word; + count it and put off adjusting cum->words until + we see if a arg advance flush is going to do it + for us. */ + cum->floats_in_gpr++; + } + else + { + /* The float is at the end of a word, preceded + by integer fields, so the arg advance flush + just above has already set cum->words and + everything is taken care of. */ + } + } + else + cum->words += n_fpregs; + } + else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1)) + { + rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0); + cum->vregno++; + cum->words += 2; + } + else if (cum->intoffset == -1) + cum->intoffset = bitpos; + } +} + +/* Check for an item that needs to be considered specially under the darwin 64 + bit ABI. These are record types where the mode is BLK or the structure is + 8 bytes in size. */ +int +rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type) +{ + return rs6000_darwin64_abi + && ((mode == BLKmode + && TREE_CODE (type) == RECORD_TYPE + && int_size_in_bytes (type) > 0) + || (type && TREE_CODE (type) == RECORD_TYPE + && int_size_in_bytes (type) == 8)) ? 1 : 0; +} + +/* Update the data in CUM to advance over an argument + of mode MODE and data type TYPE. + (TYPE is null for libcalls where that information may not be available.) + + Note that for args passed by reference, function_arg will be called + with MODE and TYPE set to that of the pointer to the arg, not the arg + itself. */ + +static void +rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode, + const_tree type, bool named, int depth) +{ + machine_mode elt_mode; + int n_elts; + + rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts); + + /* Only tick off an argument if we're not recursing. */ + if (depth == 0) + cum->nargs_prototype--; + +#ifdef HAVE_AS_GNU_ATTRIBUTE + if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4) + && cum->escapes) + { + if (SCALAR_FLOAT_MODE_P (mode)) + { + rs6000_passes_float = true; + if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT) + && (FLOAT128_IBM_P (mode) + || FLOAT128_IEEE_P (mode) + || (type != NULL + && TYPE_MAIN_VARIANT (type) == long_double_type_node))) + rs6000_passes_long_double = true; + + /* Note if we passed or return a IEEE 128-bit type. We changed the + mangling for these types, and we may need to make an alias with + the old mangling. */ + if (FLOAT128_IEEE_P (mode)) + rs6000_passes_ieee128 = true; + } + if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode)) + rs6000_passes_vector = true; + } +#endif + + if (TARGET_ALTIVEC_ABI + && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode) + || (type && TREE_CODE (type) == VECTOR_TYPE + && int_size_in_bytes (type) == 16))) + { + bool stack = false; + + if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named)) + { + cum->vregno += n_elts; + + if (!TARGET_ALTIVEC) + error ("cannot pass argument in vector register because" + " altivec instructions are disabled, use %qs" + " to enable them", "-maltivec"); + + /* PowerPC64 Linux and AIX allocate GPRs for a vector argument + even if it is going to be passed in a vector register. + Darwin does the same for variable-argument functions. */ + if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2) + && TARGET_64BIT) + || (cum->stdarg && DEFAULT_ABI != ABI_V4)) + stack = true; + } + else + stack = true; + + if (stack) + { + int align; + + /* Vector parameters must be 16-byte aligned. In 32-bit + mode this means we need to take into account the offset + to the parameter save area. In 64-bit mode, they just + have to start on an even word, since the parameter save + area is 16-byte aligned. */ + if (TARGET_32BIT) + align = -(rs6000_parm_offset () + cum->words) & 3; + else + align = cum->words & 1; + cum->words += align + rs6000_arg_size (mode, type); + + if (TARGET_DEBUG_ARG) + { + fprintf (stderr, "function_adv: words = %2d, align=%d, ", + cum->words, align); + fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n", + cum->nargs_prototype, cum->prototype, + GET_MODE_NAME (mode)); + } + } + } + else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type)) + { + int size = int_size_in_bytes (type); + /* Variable sized types have size == -1 and are + treated as if consisting entirely of ints. + Pad to 16 byte boundary if needed. */ + if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD + && (cum->words % 2) != 0) + cum->words++; + /* For varargs, we can just go up by the size of the struct. */ + if (!named) + cum->words += (size + 7) / 8; + else + { + /* It is tempting to say int register count just goes up by + sizeof(type)/8, but this is wrong in a case such as + { int; double; int; } [powerpc alignment]. We have to + grovel through the fields for these too. */ + cum->intoffset = 0; + cum->floats_in_gpr = 0; + rs6000_darwin64_record_arg_advance_recurse (cum, type, 0); + rs6000_darwin64_record_arg_advance_flush (cum, + size * BITS_PER_UNIT, 1); + } + if (TARGET_DEBUG_ARG) + { + fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d", + cum->words, TYPE_ALIGN (type), size); + fprintf (stderr, + "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n", + cum->nargs_prototype, cum->prototype, + GET_MODE_NAME (mode)); + } + } + else if (DEFAULT_ABI == ABI_V4) + { + if (abi_v4_pass_in_fpr (mode, named)) + { + /* _Decimal128 must use an even/odd register pair. This assumes + that the register number is odd when fregno is odd. */ + if (mode == TDmode && (cum->fregno % 2) == 1) + cum->fregno++; + + if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0) + <= FP_ARG_V4_MAX_REG) + cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3; + else + { + cum->fregno = FP_ARG_V4_MAX_REG + 1; + if (mode == DFmode || FLOAT128_IBM_P (mode) + || mode == DDmode || mode == TDmode) + cum->words += cum->words & 1; + cum->words += rs6000_arg_size (mode, type); + } + } + else + { + int n_words = rs6000_arg_size (mode, type); + int gregno = cum->sysv_gregno; + + /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10). + As does any other 2 word item such as complex int due to a + historical mistake. */ + if (n_words == 2) + gregno += (1 - gregno) & 1; + + /* Multi-reg args are not split between registers and stack. */ + if (gregno + n_words - 1 > GP_ARG_MAX_REG) + { + /* Long long is aligned on the stack. So are other 2 word + items such as complex int due to a historical mistake. */ + if (n_words == 2) + cum->words += cum->words & 1; + cum->words += n_words; + } + + /* Note: continuing to accumulate gregno past when we've started + spilling to the stack indicates the fact that we've started + spilling to the stack to expand_builtin_saveregs. */ + cum->sysv_gregno = gregno + n_words; + } + + if (TARGET_DEBUG_ARG) + { + fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ", + cum->words, cum->fregno); + fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ", + cum->sysv_gregno, cum->nargs_prototype, cum->prototype); + fprintf (stderr, "mode = %4s, named = %d\n", + GET_MODE_NAME (mode), named); + } + } + else + { + int n_words = rs6000_arg_size (mode, type); + int start_words = cum->words; + int align_words = rs6000_parm_start (mode, type, start_words); + + cum->words = align_words + n_words; + + if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT) + { + /* _Decimal128 must be passed in an even/odd float register pair. + This assumes that the register number is odd when fregno is + odd. */ + if (elt_mode == TDmode && (cum->fregno % 2) == 1) + cum->fregno++; + cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3); + } + + if (TARGET_DEBUG_ARG) + { + fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ", + cum->words, cum->fregno); + fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ", + cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode)); + fprintf (stderr, "named = %d, align = %d, depth = %d\n", + named, align_words - start_words, depth); + } + } +} + +void +rs6000_function_arg_advance (cumulative_args_t cum, + const function_arg_info &arg) +{ + rs6000_function_arg_advance_1 (get_cumulative_args (cum), + arg.mode, arg.type, arg.named, 0); +} + +/* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the + structure between cum->intoffset and bitpos to integer registers. */ + +static void +rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum, + HOST_WIDE_INT bitpos, rtx rvec[], int *k) +{ + machine_mode mode; + unsigned int regno; + unsigned int startbit, endbit; + int this_regno, intregs, intoffset; + rtx reg; + + if (cum->intoffset == -1) + return; + + intoffset = cum->intoffset; + cum->intoffset = -1; + + /* If this is the trailing part of a word, try to only load that + much into the register. Otherwise load the whole register. Note + that in the latter case we may pick up unwanted bits. It's not a + problem at the moment but may wish to revisit. */ + + if (intoffset % BITS_PER_WORD != 0) + { + unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD; + if (!int_mode_for_size (bits, 0).exists (&mode)) + { + /* We couldn't find an appropriate mode, which happens, + e.g., in packed structs when there are 3 bytes to load. + Back intoffset back to the beginning of the word in this + case. */ + intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD); + mode = word_mode; + } + } + else + mode = word_mode; + + startbit = ROUND_DOWN (intoffset, BITS_PER_WORD); + endbit = ROUND_UP (bitpos, BITS_PER_WORD); + intregs = (endbit - startbit) / BITS_PER_WORD; + this_regno = cum->words + intoffset / BITS_PER_WORD; + + if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno) + cum->use_stack = 1; + + intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno); + if (intregs <= 0) + return; + + intoffset /= BITS_PER_UNIT; + do + { + regno = GP_ARG_MIN_REG + this_regno; + reg = gen_rtx_REG (mode, regno); + rvec[(*k)++] = + gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset)); + + this_regno += 1; + intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1; + mode = word_mode; + intregs -= 1; + } + while (intregs > 0); +} + +/* Recursive workhorse for the following. */ + +static void +rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type, + HOST_WIDE_INT startbitpos, rtx rvec[], + int *k) +{ + tree f; + + for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f)) + if (TREE_CODE (f) == FIELD_DECL) + { + HOST_WIDE_INT bitpos = startbitpos; + tree ftype = TREE_TYPE (f); + machine_mode mode; + if (ftype == error_mark_node) + continue; + mode = TYPE_MODE (ftype); + + if (DECL_SIZE (f) != 0 + && tree_fits_uhwi_p (bit_position (f))) + bitpos += int_bit_position (f); + + /* ??? FIXME: else assume zero offset. */ + + if (TREE_CODE (ftype) == RECORD_TYPE) + rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k); + else if (cum->named && USE_FP_FOR_ARG_P (cum, mode)) + { + unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3; +#if 0 + switch (mode) + { + case E_SCmode: mode = SFmode; break; + case E_DCmode: mode = DFmode; break; + case E_TCmode: mode = TFmode; break; + default: break; + } +#endif + rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k); + if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1) + { + gcc_assert (cum->fregno == FP_ARG_MAX_REG + && (mode == TFmode || mode == TDmode)); + /* Long double or _Decimal128 split over regs and memory. */ + mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode; + cum->use_stack=1; + } + rvec[(*k)++] + = gen_rtx_EXPR_LIST (VOIDmode, + gen_rtx_REG (mode, cum->fregno++), + GEN_INT (bitpos / BITS_PER_UNIT)); + if (FLOAT128_2REG_P (mode)) + cum->fregno++; + } + else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1)) + { + rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k); + rvec[(*k)++] + = gen_rtx_EXPR_LIST (VOIDmode, + gen_rtx_REG (mode, cum->vregno++), + GEN_INT (bitpos / BITS_PER_UNIT)); + } + else if (cum->intoffset == -1) + cum->intoffset = bitpos; + } +} + +/* For the darwin64 ABI, we want to construct a PARALLEL consisting of + the register(s) to be used for each field and subfield of a struct + being passed by value, along with the offset of where the + register's value may be found in the block. FP fields go in FP + register, vector fields go in vector registers, and everything + else goes in int registers, packed as in memory. + + This code is also used for function return values. RETVAL indicates + whether this is the case. + + Much of this is taken from the SPARC V9 port, which has a similar + calling convention. */ + +rtx +rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type, + bool named, bool retval) +{ + rtx rvec[FIRST_PSEUDO_REGISTER]; + int k = 1, kbase = 1; + HOST_WIDE_INT typesize = int_size_in_bytes (type); + /* This is a copy; modifications are not visible to our caller. */ + CUMULATIVE_ARGS copy_cum = *orig_cum; + CUMULATIVE_ARGS *cum = ©_cum; + + /* Pad to 16 byte boundary if needed. */ + if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD + && (cum->words % 2) != 0) + cum->words++; + + cum->intoffset = 0; + cum->use_stack = 0; + cum->named = named; + + /* Put entries into rvec[] for individual FP and vector fields, and + for the chunks of memory that go in int regs. Note we start at + element 1; 0 is reserved for an indication of using memory, and + may or may not be filled in below. */ + rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k); + rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k); + + /* If any part of the struct went on the stack put all of it there. + This hack is because the generic code for + FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register + parts of the struct are not at the beginning. */ + if (cum->use_stack) + { + if (retval) + return NULL_RTX; /* doesn't go in registers at all */ + kbase = 0; + rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx); + } + if (k > 1 || cum->use_stack) + return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase])); + else + return NULL_RTX; +} + +/* Determine where to place an argument in 64-bit mode with 32-bit ABI. */ + +static rtx +rs6000_mixed_function_arg (machine_mode mode, const_tree type, + int align_words) +{ + int n_units; + int i, k; + rtx rvec[GP_ARG_NUM_REG + 1]; + + if (align_words >= GP_ARG_NUM_REG) + return NULL_RTX; + + n_units = rs6000_arg_size (mode, type); + + /* Optimize the simple case where the arg fits in one gpr, except in + the case of BLKmode due to assign_parms assuming that registers are + BITS_PER_WORD wide. */ + if (n_units == 0 + || (n_units == 1 && mode != BLKmode)) + return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words); + + k = 0; + if (align_words + n_units > GP_ARG_NUM_REG) + /* Not all of the arg fits in gprs. Say that it goes in memory too, + using a magic NULL_RTX component. + This is not strictly correct. Only some of the arg belongs in + memory, not all of it. However, the normal scheme using + function_arg_partial_nregs can result in unusual subregs, eg. + (subreg:SI (reg:DF) 4), which are not handled well. The code to + store the whole arg to memory is often more efficient than code + to store pieces, and we know that space is available in the right + place for the whole arg. */ + rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx); + + i = 0; + do + { + rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words); + rtx off = GEN_INT (i++ * 4); + rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off); + } + while (++align_words < GP_ARG_NUM_REG && --n_units != 0); + + return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec)); +} + +/* We have an argument of MODE and TYPE that goes into FPRs or VRs, + but must also be copied into the parameter save area starting at + offset ALIGN_WORDS. Fill in RVEC with the elements corresponding + to the GPRs and/or memory. Return the number of elements used. */ + +static int +rs6000_psave_function_arg (machine_mode mode, const_tree type, + int align_words, rtx *rvec) +{ + int k = 0; + + if (align_words < GP_ARG_NUM_REG) + { + int n_words = rs6000_arg_size (mode, type); + + if (align_words + n_words > GP_ARG_NUM_REG + || mode == BLKmode + || (TARGET_32BIT && TARGET_POWERPC64)) + { + /* If this is partially on the stack, then we only + include the portion actually in registers here. */ + machine_mode rmode = TARGET_32BIT ? SImode : DImode; + int i = 0; + + if (align_words + n_words > GP_ARG_NUM_REG) + { + /* Not all of the arg fits in gprs. Say that it goes in memory + too, using a magic NULL_RTX component. Also see comment in + rs6000_mixed_function_arg for why the normal + function_arg_partial_nregs scheme doesn't work in this case. */ + rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx); + } + + do + { + rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words); + rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode)); + rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off); + } + while (++align_words < GP_ARG_NUM_REG && --n_words != 0); + } + else + { + /* The whole arg fits in gprs. */ + rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words); + rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx); + } + } + else + { + /* It's entirely in memory. */ + rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx); + } + + return k; +} + +/* RVEC is a vector of K components of an argument of mode MODE. + Construct the final function_arg return value from it. */ + +static rtx +rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k) +{ + gcc_assert (k >= 1); + + /* Avoid returning a PARALLEL in the trivial cases. */ + if (k == 1) + { + if (XEXP (rvec[0], 0) == NULL_RTX) + return NULL_RTX; + + if (GET_MODE (XEXP (rvec[0], 0)) == mode) + return XEXP (rvec[0], 0); + } + + return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec)); +} + +/* Determine where to put an argument to a function. + Value is zero to push the argument on the stack, + or a hard register in which to store the argument. + + CUM is a variable of type CUMULATIVE_ARGS which gives info about + the preceding args and about the function being called. It is + not modified in this routine. + ARG is a description of the argument. + + On RS/6000 the first eight words of non-FP are normally in registers + and the rest are pushed. Under AIX, the first 13 FP args are in registers. + Under V.4, the first 8 FP args are in registers. + + If this is floating-point and no prototype is specified, we use + both an FP and integer register (or possibly FP reg and stack). Library + functions (when CALL_LIBCALL is set) always have the proper types for args, + so we can pass the FP value just in one register. emit_library_function + doesn't support PARALLEL anyway. + + Note that for args passed by reference, function_arg will be called + with ARG describing the pointer to the arg, not the arg itself. */ + +rtx +rs6000_function_arg (cumulative_args_t cum_v, const function_arg_info &arg) +{ + CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); + tree type = arg.type; + machine_mode mode = arg.mode; + bool named = arg.named; + enum rs6000_abi abi = DEFAULT_ABI; + machine_mode elt_mode; + int n_elts; + + /* We do not allow MMA types being used as function arguments. */ + if (mode == OOmode || mode == XOmode) + { + if (TYPE_CANONICAL (type) != NULL_TREE) + type = TYPE_CANONICAL (type); + error ("invalid use of MMA operand of type %qs as a function parameter", + IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)))); + return NULL_RTX; + } + + /* Return a marker to indicate whether CR1 needs to set or clear the + bit that V.4 uses to say fp args were passed in registers. + Assume that we don't need the marker for software floating point, + or compiler generated library calls. */ + if (arg.end_marker_p ()) + { + if (abi == ABI_V4 + && (cum->call_cookie & CALL_LIBCALL) == 0 + && (cum->stdarg + || (cum->nargs_prototype < 0 + && (cum->prototype || TARGET_NO_PROTOTYPE))) + && TARGET_HARD_FLOAT) + return GEN_INT (cum->call_cookie + | ((cum->fregno == FP_ARG_MIN_REG) + ? CALL_V4_SET_FP_ARGS + : CALL_V4_CLEAR_FP_ARGS)); + + return GEN_INT (cum->call_cookie & ~CALL_LIBCALL); + } + + rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts); + + if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type)) + { + rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false); + if (rslt != NULL_RTX) + return rslt; + /* Else fall through to usual handling. */ + } + + if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named)) + { + rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1]; + rtx r, off; + int i, k = 0; + + /* Do we also need to pass this argument in the parameter save area? + Library support functions for IEEE 128-bit are assumed to not need the + value passed both in GPRs and in vector registers. */ + if (TARGET_64BIT && !cum->prototype + && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode))) + { + int align_words = ROUND_UP (cum->words, 2); + k = rs6000_psave_function_arg (mode, type, align_words, rvec); + } + + /* Describe where this argument goes in the vector registers. */ + for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++) + { + r = gen_rtx_REG (elt_mode, cum->vregno + i); + off = GEN_INT (i * GET_MODE_SIZE (elt_mode)); + rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off); + } + + return rs6000_finish_function_arg (mode, rvec, k); + } + else if (TARGET_ALTIVEC_ABI + && (ALTIVEC_OR_VSX_VECTOR_MODE (mode) + || (type && TREE_CODE (type) == VECTOR_TYPE + && int_size_in_bytes (type) == 16))) + { + if (named || abi == ABI_V4) + return NULL_RTX; + else + { + /* Vector parameters to varargs functions under AIX or Darwin + get passed in memory and possibly also in GPRs. */ + int align, align_words, n_words; + machine_mode part_mode; + + /* Vector parameters must be 16-byte aligned. In 32-bit + mode this means we need to take into account the offset + to the parameter save area. In 64-bit mode, they just + have to start on an even word, since the parameter save + area is 16-byte aligned. */ + if (TARGET_32BIT) + align = -(rs6000_parm_offset () + cum->words) & 3; + else + align = cum->words & 1; + align_words = cum->words + align; + + /* Out of registers? Memory, then. */ + if (align_words >= GP_ARG_NUM_REG) + return NULL_RTX; + + if (TARGET_32BIT && TARGET_POWERPC64) + return rs6000_mixed_function_arg (mode, type, align_words); + + /* The vector value goes in GPRs. Only the part of the + value in GPRs is reported here. */ + part_mode = mode; + n_words = rs6000_arg_size (mode, type); + if (align_words + n_words > GP_ARG_NUM_REG) + /* Fortunately, there are only two possibilities, the value + is either wholly in GPRs or half in GPRs and half not. */ + part_mode = DImode; + + return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words); + } + } + + else if (abi == ABI_V4) + { + if (abi_v4_pass_in_fpr (mode, named)) + { + /* _Decimal128 must use an even/odd register pair. This assumes + that the register number is odd when fregno is odd. */ + if (mode == TDmode && (cum->fregno % 2) == 1) + cum->fregno++; + + if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0) + <= FP_ARG_V4_MAX_REG) + return gen_rtx_REG (mode, cum->fregno); + else + return NULL_RTX; + } + else + { + int n_words = rs6000_arg_size (mode, type); + int gregno = cum->sysv_gregno; + + /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10). + As does any other 2 word item such as complex int due to a + historical mistake. */ + if (n_words == 2) + gregno += (1 - gregno) & 1; + + /* Multi-reg args are not split between registers and stack. */ + if (gregno + n_words - 1 > GP_ARG_MAX_REG) + return NULL_RTX; + + if (TARGET_32BIT && TARGET_POWERPC64) + return rs6000_mixed_function_arg (mode, type, + gregno - GP_ARG_MIN_REG); + return gen_rtx_REG (mode, gregno); + } + } + else + { + int align_words = rs6000_parm_start (mode, type, cum->words); + + /* _Decimal128 must be passed in an even/odd float register pair. + This assumes that the register number is odd when fregno is odd. */ + if (elt_mode == TDmode && (cum->fregno % 2) == 1) + cum->fregno++; + + if (USE_FP_FOR_ARG_P (cum, elt_mode) + && !(TARGET_AIX && !TARGET_ELF + && type != NULL && AGGREGATE_TYPE_P (type))) + { + rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1]; + rtx r, off; + int i, k = 0; + unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3; + int fpr_words; + + /* Do we also need to pass this argument in the parameter + save area? */ + if (type && (cum->nargs_prototype <= 0 + || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2) + && TARGET_XL_COMPAT + && align_words >= GP_ARG_NUM_REG))) + k = rs6000_psave_function_arg (mode, type, align_words, rvec); + + /* Describe where this argument goes in the fprs. */ + for (i = 0; i < n_elts + && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++) + { + /* Check if the argument is split over registers and memory. + This can only ever happen for long double or _Decimal128; + complex types are handled via split_complex_arg. */ + machine_mode fmode = elt_mode; + if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1) + { + gcc_assert (FLOAT128_2REG_P (fmode)); + fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode; + } + + r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg); + off = GEN_INT (i * GET_MODE_SIZE (elt_mode)); + rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off); + } + + /* If there were not enough FPRs to hold the argument, the rest + usually goes into memory. However, if the current position + is still within the register parameter area, a portion may + actually have to go into GPRs. + + Note that it may happen that the portion of the argument + passed in the first "half" of the first GPR was already + passed in the last FPR as well. + + For unnamed arguments, we already set up GPRs to cover the + whole argument in rs6000_psave_function_arg, so there is + nothing further to do at this point. */ + fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8); + if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG + && cum->nargs_prototype > 0) + { + machine_mode rmode = TARGET_32BIT ? SImode : DImode; + int n_words = rs6000_arg_size (mode, type); + + align_words += fpr_words; + n_words -= fpr_words; + + do + { + r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words); + off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode)); + rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off); + } + while (++align_words < GP_ARG_NUM_REG && --n_words != 0); + } + + return rs6000_finish_function_arg (mode, rvec, k); + } + else if (align_words < GP_ARG_NUM_REG) + { + if (TARGET_32BIT && TARGET_POWERPC64) + return rs6000_mixed_function_arg (mode, type, align_words); + + return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words); + } + else + return NULL_RTX; + } +} + +/* For an arg passed partly in registers and partly in memory, this is + the number of bytes passed in registers. For args passed entirely in + registers or entirely in memory, zero. When an arg is described by a + PARALLEL, perhaps using more than one register type, this function + returns the number of bytes used by the first element of the PARALLEL. */ + +int +rs6000_arg_partial_bytes (cumulative_args_t cum_v, + const function_arg_info &arg) +{ + CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); + bool passed_in_gprs = true; + int ret = 0; + int align_words; + machine_mode elt_mode; + int n_elts; + + rs6000_discover_homogeneous_aggregate (arg.mode, arg.type, + &elt_mode, &n_elts); + + if (DEFAULT_ABI == ABI_V4) + return 0; + + if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, arg.named)) + { + /* If we are passing this arg in the fixed parameter save area (gprs or + memory) as well as VRs, we do not use the partial bytes mechanism; + instead, rs6000_function_arg will return a PARALLEL including a memory + element as necessary. Library support functions for IEEE 128-bit are + assumed to not need the value passed both in GPRs and in vector + registers. */ + if (TARGET_64BIT && !cum->prototype + && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode))) + return 0; + + /* Otherwise, we pass in VRs only. Check for partial copies. */ + passed_in_gprs = false; + if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1) + ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16; + } + + /* In this complicated case we just disable the partial_nregs code. */ + if (TARGET_MACHO && rs6000_darwin64_struct_check_p (arg.mode, arg.type)) + return 0; + + align_words = rs6000_parm_start (arg.mode, arg.type, cum->words); + + if (USE_FP_FOR_ARG_P (cum, elt_mode) + && !(TARGET_AIX && !TARGET_ELF && arg.aggregate_type_p ())) + { + unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3; + + /* If we are passing this arg in the fixed parameter save area + (gprs or memory) as well as FPRs, we do not use the partial + bytes mechanism; instead, rs6000_function_arg will return a + PARALLEL including a memory element as necessary. */ + if (arg.type + && (cum->nargs_prototype <= 0 + || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2) + && TARGET_XL_COMPAT + && align_words >= GP_ARG_NUM_REG))) + return 0; + + /* Otherwise, we pass in FPRs only. Check for partial copies. */ + passed_in_gprs = false; + if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1) + { + /* Compute number of bytes / words passed in FPRs. If there + is still space available in the register parameter area + *after* that amount, a part of the argument will be passed + in GPRs. In that case, the total amount passed in any + registers is equal to the amount that would have been passed + in GPRs if everything were passed there, so we fall back to + the GPR code below to compute the appropriate value. */ + int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno) + * MIN (8, GET_MODE_SIZE (elt_mode))); + int fpr_words = fpr / (TARGET_32BIT ? 4 : 8); + + if (align_words + fpr_words < GP_ARG_NUM_REG) + passed_in_gprs = true; + else + ret = fpr; + } + } + + if (passed_in_gprs + && align_words < GP_ARG_NUM_REG + && GP_ARG_NUM_REG < align_words + rs6000_arg_size (arg.mode, arg.type)) + ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8); + + if (ret != 0 && TARGET_DEBUG_ARG) + fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret); + + return ret; +} + +/* A C expression that indicates when an argument must be passed by + reference. If nonzero for an argument, a copy of that argument is + made in memory and a pointer to the argument is passed instead of + the argument itself. The pointer is passed in whatever way is + appropriate for passing a pointer to that type. + + Under V.4, aggregates and long double are passed by reference. + + As an extension to all 32-bit ABIs, AltiVec vectors are passed by + reference unless the AltiVec vector extension ABI is in force. + + As an extension to all ABIs, variable sized types are passed by + reference. */ + +bool +rs6000_pass_by_reference (cumulative_args_t, const function_arg_info &arg) +{ + if (!arg.type) + return 0; + + if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD + && FLOAT128_IEEE_P (TYPE_MODE (arg.type))) + { + if (TARGET_DEBUG_ARG) + fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n"); + return 1; + } + + if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (arg.type)) + { + if (TARGET_DEBUG_ARG) + fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n"); + return 1; + } + + if (int_size_in_bytes (arg.type) < 0) + { + if (TARGET_DEBUG_ARG) + fprintf (stderr, "function_arg_pass_by_reference: variable size\n"); + return 1; + } + + /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector + modes only exist for GCC vector types if -maltivec. */ + if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (arg.mode)) + { + if (TARGET_DEBUG_ARG) + fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n"); + return 1; + } + + /* Pass synthetic vectors in memory. */ + if (TREE_CODE (arg.type) == VECTOR_TYPE + && int_size_in_bytes (arg.type) > (TARGET_ALTIVEC_ABI ? 16 : 8)) + { + static bool warned_for_pass_big_vectors = false; + if (TARGET_DEBUG_ARG) + fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n"); + if (!warned_for_pass_big_vectors) + { + warning (OPT_Wpsabi, "GCC vector passed by reference: " + "non-standard ABI extension with no compatibility " + "guarantee"); + warned_for_pass_big_vectors = true; + } + return 1; + } + + return 0; +} + +/* Process parameter of type TYPE after ARGS_SO_FAR parameters were + already processes. Return true if the parameter must be passed + (fully or partially) on the stack. */ + +static bool +rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type) +{ + int unsignedp; + rtx entry_parm; + + /* Catch errors. */ + if (type == NULL || type == error_mark_node) + return true; + + /* Handle types with no storage requirement. */ + if (TYPE_MODE (type) == VOIDmode) + return false; + + /* Handle complex types. */ + if (TREE_CODE (type) == COMPLEX_TYPE) + return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)) + || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))); + + /* Handle transparent aggregates. */ + if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE) + && TYPE_TRANSPARENT_AGGR (type)) + type = TREE_TYPE (first_field (type)); + + /* See if this arg was passed by invisible reference. */ + function_arg_info arg (type, /*named=*/true); + apply_pass_by_reference_rules (get_cumulative_args (args_so_far), arg); + + /* Find mode as it is passed by the ABI. */ + unsignedp = TYPE_UNSIGNED (type); + arg.mode = promote_mode (arg.type, arg.mode, &unsignedp); + + /* If we must pass in stack, we need a stack. */ + if (rs6000_must_pass_in_stack (arg)) + return true; + + /* If there is no incoming register, we need a stack. */ + entry_parm = rs6000_function_arg (args_so_far, arg); + if (entry_parm == NULL) + return true; + + /* Likewise if we need to pass both in registers and on the stack. */ + if (GET_CODE (entry_parm) == PARALLEL + && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX) + return true; + + /* Also true if we're partially in registers and partially not. */ + if (rs6000_arg_partial_bytes (args_so_far, arg) != 0) + return true; + + /* Update info on where next arg arrives in registers. */ + rs6000_function_arg_advance (args_so_far, arg); + return false; +} + +/* Return true if FUN has no prototype, has a variable argument + list, or passes any parameter in memory. */ + +static bool +rs6000_function_parms_need_stack (tree fun, bool incoming) +{ + tree fntype, result; + CUMULATIVE_ARGS args_so_far_v; + cumulative_args_t args_so_far; + + if (!fun) + /* Must be a libcall, all of which only use reg parms. */ + return false; + + fntype = fun; + if (!TYPE_P (fun)) + fntype = TREE_TYPE (fun); + + /* Varargs functions need the parameter save area. */ + if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype)) + return true; + + INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX); + args_so_far = pack_cumulative_args (&args_so_far_v); + + /* When incoming, we will have been passed the function decl. + It is necessary to use the decl to handle K&R style functions, + where TYPE_ARG_TYPES may not be available. */ + if (incoming) + { + gcc_assert (DECL_P (fun)); + result = DECL_RESULT (fun); + } + else + result = TREE_TYPE (fntype); + + if (result && aggregate_value_p (result, fntype)) + { + if (!TYPE_P (result)) + result = TREE_TYPE (result); + result = build_pointer_type (result); + rs6000_parm_needs_stack (args_so_far, result); + } + + if (incoming) + { + tree parm; + + for (parm = DECL_ARGUMENTS (fun); + parm && parm != void_list_node; + parm = TREE_CHAIN (parm)) + if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm))) + return true; + } + else + { + function_args_iterator args_iter; + tree arg_type; + + FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter) + if (rs6000_parm_needs_stack (args_so_far, arg_type)) + return true; + } + + return false; +} + +/* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is + usually a constant depending on the ABI. However, in the ELFv2 ABI + the register parameter area is optional when calling a function that + has a prototype is scope, has no variable argument list, and passes + all parameters in registers. */ + +int +rs6000_reg_parm_stack_space (tree fun, bool incoming) +{ + int reg_parm_stack_space; + + switch (DEFAULT_ABI) + { + default: + reg_parm_stack_space = 0; + break; + + case ABI_AIX: + case ABI_DARWIN: + reg_parm_stack_space = TARGET_64BIT ? 64 : 32; + break; + + case ABI_ELFv2: + /* ??? Recomputing this every time is a bit expensive. Is there + a place to cache this information? */ + if (rs6000_function_parms_need_stack (fun, incoming)) + reg_parm_stack_space = TARGET_64BIT ? 64 : 32; + else + reg_parm_stack_space = 0; + break; + } + + return reg_parm_stack_space; +} + +static void +rs6000_move_block_from_reg (int regno, rtx x, int nregs) +{ + int i; + machine_mode reg_mode = TARGET_32BIT ? SImode : DImode; + + if (nregs == 0) + return; + + for (i = 0; i < nregs; i++) + { + rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode)); + if (reload_completed) + { + if (! strict_memory_address_p (reg_mode, XEXP (tem, 0))) + tem = NULL_RTX; + else + tem = simplify_gen_subreg (reg_mode, x, BLKmode, + i * GET_MODE_SIZE (reg_mode)); + } + else + tem = replace_equiv_address (tem, XEXP (tem, 0)); + + gcc_assert (tem); + + emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i)); + } +} + +/* Perform any needed actions needed for a function that is receiving a + variable number of arguments. + + CUM is as above. + + ARG is the last named argument. + + PRETEND_SIZE is a variable that should be set to the amount of stack + that must be pushed by the prolog to pretend that our caller pushed + it. + + Normally, this macro will push all remaining incoming registers on the + stack and set PRETEND_SIZE to the length of the registers pushed. */ + +void +setup_incoming_varargs (cumulative_args_t cum, + const function_arg_info &arg, + int *pretend_size ATTRIBUTE_UNUSED, int no_rtl) +{ + CUMULATIVE_ARGS next_cum; + int reg_size = TARGET_32BIT ? 4 : 8; + rtx save_area = NULL_RTX, mem; + int first_reg_offset; + alias_set_type set; + + /* Skip the last named argument. */ + next_cum = *get_cumulative_args (cum); + rs6000_function_arg_advance_1 (&next_cum, arg.mode, arg.type, arg.named, 0); + + if (DEFAULT_ABI == ABI_V4) + { + first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG; + + if (! no_rtl) + { + int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0; + HOST_WIDE_INT offset = 0; + + /* Try to optimize the size of the varargs save area. + The ABI requires that ap.reg_save_area is doubleword + aligned, but we don't need to allocate space for all + the bytes, only those to which we actually will save + anything. */ + if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG) + gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset; + if (TARGET_HARD_FLOAT + && next_cum.fregno <= FP_ARG_V4_MAX_REG + && cfun->va_list_fpr_size) + { + if (gpr_reg_num) + fpr_size = (next_cum.fregno - FP_ARG_MIN_REG) + * UNITS_PER_FP_WORD; + if (cfun->va_list_fpr_size + < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno) + fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD; + else + fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno) + * UNITS_PER_FP_WORD; + } + if (gpr_reg_num) + { + offset = -((first_reg_offset * reg_size) & ~7); + if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size) + { + gpr_reg_num = cfun->va_list_gpr_size; + if (reg_size == 4 && (first_reg_offset & 1)) + gpr_reg_num++; + } + gpr_size = (gpr_reg_num * reg_size + 7) & ~7; + } + else if (fpr_size) + offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG) + * UNITS_PER_FP_WORD + - (int) (GP_ARG_NUM_REG * reg_size); + + if (gpr_size + fpr_size) + { + rtx reg_save_area + = assign_stack_local (BLKmode, gpr_size + fpr_size, 64); + gcc_assert (MEM_P (reg_save_area)); + reg_save_area = XEXP (reg_save_area, 0); + if (GET_CODE (reg_save_area) == PLUS) + { + gcc_assert (XEXP (reg_save_area, 0) + == virtual_stack_vars_rtx); + gcc_assert (CONST_INT_P (XEXP (reg_save_area, 1))); + offset += INTVAL (XEXP (reg_save_area, 1)); + } + else + gcc_assert (reg_save_area == virtual_stack_vars_rtx); + } + + cfun->machine->varargs_save_offset = offset; + save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset); + } + } + else + { + first_reg_offset = next_cum.words; + save_area = crtl->args.internal_arg_pointer; + + if (targetm.calls.must_pass_in_stack (arg)) + first_reg_offset += rs6000_arg_size (TYPE_MODE (arg.type), arg.type); + } + + set = get_varargs_alias_set (); + if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG + && cfun->va_list_gpr_size) + { + int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset; + + if (va_list_gpr_counter_field) + /* V4 va_list_gpr_size counts number of registers needed. */ + n_gpr = cfun->va_list_gpr_size; + else + /* char * va_list instead counts number of bytes needed. */ + n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size; + + if (nregs > n_gpr) + nregs = n_gpr; + + mem = gen_rtx_MEM (BLKmode, + plus_constant (Pmode, save_area, + first_reg_offset * reg_size)); + MEM_NOTRAP_P (mem) = 1; + set_mem_alias_set (mem, set); + set_mem_align (mem, BITS_PER_WORD); + + rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem, + nregs); + } + + /* Save FP registers if needed. */ + if (DEFAULT_ABI == ABI_V4 + && TARGET_HARD_FLOAT + && ! no_rtl + && next_cum.fregno <= FP_ARG_V4_MAX_REG + && cfun->va_list_fpr_size) + { + int fregno = next_cum.fregno, nregs; + rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO); + rtx lab = gen_label_rtx (); + int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG) + * UNITS_PER_FP_WORD); + + emit_jump_insn + (gen_rtx_SET (pc_rtx, + gen_rtx_IF_THEN_ELSE (VOIDmode, + gen_rtx_NE (VOIDmode, cr1, + const0_rtx), + gen_rtx_LABEL_REF (VOIDmode, lab), + pc_rtx))); + + for (nregs = 0; + fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size; + fregno++, off += UNITS_PER_FP_WORD, nregs++) + { + mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode, + plus_constant (Pmode, save_area, off)); + MEM_NOTRAP_P (mem) = 1; + set_mem_alias_set (mem, set); + set_mem_align (mem, GET_MODE_ALIGNMENT ( + TARGET_HARD_FLOAT ? DFmode : SFmode)); + emit_move_insn (mem, gen_rtx_REG ( + TARGET_HARD_FLOAT ? DFmode : SFmode, fregno)); + } + + emit_label (lab); + } +} + +/* Create the va_list data type. */ + +tree +rs6000_build_builtin_va_list (void) +{ + tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl; + + /* For AIX, prefer 'char *' because that's what the system + header files like. */ + if (DEFAULT_ABI != ABI_V4) + return build_pointer_type (char_type_node); + + record = (*lang_hooks.types.make_type) (RECORD_TYPE); + type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL, + get_identifier ("__va_list_tag"), record); + + f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"), + unsigned_char_type_node); + f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"), + unsigned_char_type_node); + /* Give the two bytes of padding a name, so that -Wpadded won't warn on + every user file. */ + f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL, + get_identifier ("reserved"), short_unsigned_type_node); + f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL, + get_identifier ("overflow_arg_area"), + ptr_type_node); + f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL, + get_identifier ("reg_save_area"), + ptr_type_node); + + va_list_gpr_counter_field = f_gpr; + va_list_fpr_counter_field = f_fpr; + + DECL_FIELD_CONTEXT (f_gpr) = record; + DECL_FIELD_CONTEXT (f_fpr) = record; + DECL_FIELD_CONTEXT (f_res) = record; + DECL_FIELD_CONTEXT (f_ovf) = record; + DECL_FIELD_CONTEXT (f_sav) = record; + + TYPE_STUB_DECL (record) = type_decl; + TYPE_NAME (record) = type_decl; + TYPE_FIELDS (record) = f_gpr; + DECL_CHAIN (f_gpr) = f_fpr; + DECL_CHAIN (f_fpr) = f_res; + DECL_CHAIN (f_res) = f_ovf; + DECL_CHAIN (f_ovf) = f_sav; + + layout_type (record); + + /* The correct type is an array type of one element. */ + return build_array_type (record, build_index_type (size_zero_node)); +} + +/* Implement va_start. */ + +void +rs6000_va_start (tree valist, rtx nextarg) +{ + HOST_WIDE_INT words, n_gpr, n_fpr; + tree f_gpr, f_fpr, f_res, f_ovf, f_sav; + tree gpr, fpr, ovf, sav, t; + + /* Only SVR4 needs something special. */ + if (DEFAULT_ABI != ABI_V4) + { + std_expand_builtin_va_start (valist, nextarg); + return; + } + + f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node)); + f_fpr = DECL_CHAIN (f_gpr); + f_res = DECL_CHAIN (f_fpr); + f_ovf = DECL_CHAIN (f_res); + f_sav = DECL_CHAIN (f_ovf); + + valist = build_simple_mem_ref (valist); + gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE); + fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist), + f_fpr, NULL_TREE); + ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist), + f_ovf, NULL_TREE); + sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist), + f_sav, NULL_TREE); + + /* Count number of gp and fp argument registers used. */ + words = crtl->args.info.words; + n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG, + GP_ARG_NUM_REG); + n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG, + FP_ARG_NUM_REG); + + if (TARGET_DEBUG_ARG) + fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = " + HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n", + words, n_gpr, n_fpr); + + if (cfun->va_list_gpr_size) + { + t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, + build_int_cst (NULL_TREE, n_gpr)); + TREE_SIDE_EFFECTS (t) = 1; + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + } + + if (cfun->va_list_fpr_size) + { + t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, + build_int_cst (NULL_TREE, n_fpr)); + TREE_SIDE_EFFECTS (t) = 1; + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + +#ifdef HAVE_AS_GNU_ATTRIBUTE + if (call_ABI_of_interest (cfun->decl)) + rs6000_passes_float = true; +#endif + } + + /* Find the overflow area. */ + t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer); + if (words != 0) + t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD); + t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t); + TREE_SIDE_EFFECTS (t) = 1; + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); + + /* If there were no va_arg invocations, don't set up the register + save area. */ + if (!cfun->va_list_gpr_size + && !cfun->va_list_fpr_size + && n_gpr < GP_ARG_NUM_REG + && n_fpr < FP_ARG_V4_MAX_REG) + return; + + /* Find the register save area. */ + t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx); + if (cfun->machine->varargs_save_offset) + t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset); + t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t); + TREE_SIDE_EFFECTS (t) = 1; + expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); +} + +/* Implement va_arg. */ + +tree +rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p, + gimple_seq *post_p) +{ + tree f_gpr, f_fpr, f_res, f_ovf, f_sav; + tree gpr, fpr, ovf, sav, reg, t, u; + int size, rsize, n_reg, sav_ofs, sav_scale; + tree lab_false, lab_over, addr; + int align; + tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true); + int regalign = 0; + gimple *stmt; + + if (pass_va_arg_by_reference (type)) + { + t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p); + return build_va_arg_indirect_ref (t); + } + + /* We need to deal with the fact that the darwin ppc64 ABI is defined by an + earlier version of gcc, with the property that it always applied alignment + adjustments to the va-args (even for zero-sized types). The cheapest way + to deal with this is to replicate the effect of the part of + std_gimplify_va_arg_expr that carries out the align adjust, for the case + of relevance. + We don't need to check for pass-by-reference because of the test above. + We can return a simplifed answer, since we know there's no offset to add. */ + + if (((TARGET_MACHO + && rs6000_darwin64_abi) + || DEFAULT_ABI == ABI_ELFv2 + || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)) + && integer_zerop (TYPE_SIZE (type))) + { + unsigned HOST_WIDE_INT align, boundary; + tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL); + align = PARM_BOUNDARY / BITS_PER_UNIT; + boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type); + if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT) + boundary = MAX_SUPPORTED_STACK_ALIGNMENT; + boundary /= BITS_PER_UNIT; + if (boundary > align) + { + tree t ; + /* This updates arg ptr by the amount that would be necessary + to align the zero-sized (but not zero-alignment) item. */ + t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp, + fold_build_pointer_plus_hwi (valist_tmp, boundary - 1)); + gimplify_and_add (t, pre_p); + + t = fold_convert (sizetype, valist_tmp); + t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp, + fold_convert (TREE_TYPE (valist), + fold_build2 (BIT_AND_EXPR, sizetype, t, + size_int (-boundary)))); + t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t); + gimplify_and_add (t, pre_p); + } + /* Since it is zero-sized there's no increment for the item itself. */ + valist_tmp = fold_convert (build_pointer_type (type), valist_tmp); + return build_va_arg_indirect_ref (valist_tmp); + } + + if (DEFAULT_ABI != ABI_V4) + { + if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE) + { + tree elem_type = TREE_TYPE (type); + machine_mode elem_mode = TYPE_MODE (elem_type); + int elem_size = GET_MODE_SIZE (elem_mode); + + if (elem_size < UNITS_PER_WORD) + { + tree real_part, imag_part; + gimple_seq post = NULL; + + real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p, + &post); + /* Copy the value into a temporary, lest the formal temporary + be reused out from under us. */ + real_part = get_initialized_tmp_var (real_part, pre_p, &post); + gimple_seq_add_seq (pre_p, post); + + imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p, + post_p); + + return build2 (COMPLEX_EXPR, type, real_part, imag_part); + } + } + + return std_gimplify_va_arg_expr (valist, type, pre_p, post_p); + } + + f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node)); + f_fpr = DECL_CHAIN (f_gpr); + f_res = DECL_CHAIN (f_fpr); + f_ovf = DECL_CHAIN (f_res); + f_sav = DECL_CHAIN (f_ovf); + + gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE); + fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist), + f_fpr, NULL_TREE); + ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist), + f_ovf, NULL_TREE); + sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist), + f_sav, NULL_TREE); + + size = int_size_in_bytes (type); + rsize = (size + 3) / 4; + int pad = 4 * rsize - size; + align = 1; + + machine_mode mode = TYPE_MODE (type); + if (abi_v4_pass_in_fpr (mode, false)) + { + /* FP args go in FP registers, if present. */ + reg = fpr; + n_reg = (size + 7) / 8; + sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4; + sav_scale = (TARGET_HARD_FLOAT ? 8 : 4); + if (mode != SFmode && mode != SDmode) + align = 8; + } + else + { + /* Otherwise into GP registers. */ + reg = gpr; + n_reg = rsize; + sav_ofs = 0; + sav_scale = 4; + if (n_reg == 2) + align = 8; + } + + /* Pull the value out of the saved registers.... */ + + lab_over = NULL; + addr = create_tmp_var (ptr_type_node, "addr"); + + /* AltiVec vectors never go in registers when -mabi=altivec. */ + if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode)) + align = 16; + else + { + lab_false = create_artificial_label (input_location); + lab_over = create_artificial_label (input_location); + + /* Long long is aligned in the registers. As are any other 2 gpr + item such as complex int due to a historical mistake. */ + u = reg; + if (n_reg == 2 && reg == gpr) + { + regalign = 1; + u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg), + build_int_cst (TREE_TYPE (reg), n_reg - 1)); + u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), + unshare_expr (reg), u); + } + /* _Decimal128 is passed in even/odd fpr pairs; the stored + reg number is 0 for f1, so we want to make it odd. */ + else if (reg == fpr && mode == TDmode) + { + t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg), + build_int_cst (TREE_TYPE (reg), 1)); + u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t); + } + + t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1)); + t = build2 (GE_EXPR, boolean_type_node, u, t); + u = build1 (GOTO_EXPR, void_type_node, lab_false); + t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE); + gimplify_and_add (t, pre_p); + + t = sav; + if (sav_ofs) + t = fold_build_pointer_plus_hwi (sav, sav_ofs); + + u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg), + build_int_cst (TREE_TYPE (reg), n_reg)); + u = fold_convert (sizetype, u); + u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale)); + t = fold_build_pointer_plus (t, u); + + /* _Decimal32 varargs are located in the second word of the 64-bit + FP register for 32-bit binaries. */ + if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode) + t = fold_build_pointer_plus_hwi (t, size); + + /* Args are passed right-aligned. */ + if (BYTES_BIG_ENDIAN) + t = fold_build_pointer_plus_hwi (t, pad); + + gimplify_assign (addr, t, pre_p); + + gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over)); + + stmt = gimple_build_label (lab_false); + gimple_seq_add_stmt (pre_p, stmt); + + if ((n_reg == 2 && !regalign) || n_reg > 2) + { + /* Ensure that we don't find any more args in regs. + Alignment has taken care of for special cases. */ + gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p); + } + } + + /* ... otherwise out of the overflow area. */ + + /* Care for on-stack alignment if needed. */ + t = ovf; + if (align != 1) + { + t = fold_build_pointer_plus_hwi (t, align - 1); + t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, + build_int_cst (TREE_TYPE (t), -align)); + } + + /* Args are passed right-aligned. */ + if (BYTES_BIG_ENDIAN) + t = fold_build_pointer_plus_hwi (t, pad); + + gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue); + + gimplify_assign (unshare_expr (addr), t, pre_p); + + t = fold_build_pointer_plus_hwi (t, size); + gimplify_assign (unshare_expr (ovf), t, pre_p); + + if (lab_over) + { + stmt = gimple_build_label (lab_over); + gimple_seq_add_stmt (pre_p, stmt); + } + + if (STRICT_ALIGNMENT + && (TYPE_ALIGN (type) + > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align))) + { + /* The value (of type complex double, for example) may not be + aligned in memory in the saved registers, so copy via a + temporary. (This is the same code as used for SPARC.) */ + tree tmp = create_tmp_var (type, "va_arg_tmp"); + tree dest_addr = build_fold_addr_expr (tmp); + + tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY), + 3, dest_addr, addr, size_int (rsize * 4)); + TREE_ADDRESSABLE (tmp) = 1; + + gimplify_and_add (copy, pre_p); + addr = dest_addr; + } + + addr = fold_convert (ptrtype, addr); + return build_va_arg_indirect_ref (addr); +} + +/* Debug utility to translate a type node to a single textual token. */ +static +const char *rs6000_type_string (tree type_node) +{ + if (type_node == void_type_node) + return "void"; + else if (type_node == long_integer_type_node) + return "long"; + else if (type_node == long_unsigned_type_node) + return "ulong"; + else if (type_node == long_long_integer_type_node) + return "longlong"; + else if (type_node == long_long_unsigned_type_node) + return "ulonglong"; + else if (type_node == bool_V2DI_type_node) + return "vbll"; + else if (type_node == bool_V4SI_type_node) + return "vbi"; + else if (type_node == bool_V8HI_type_node) + return "vbs"; + else if (type_node == bool_V16QI_type_node) + return "vbc"; + else if (type_node == bool_int_type_node) + return "bool"; + else if (type_node == dfloat64_type_node) + return "_Decimal64"; + else if (type_node == double_type_node) + return "double"; + else if (type_node == intDI_type_node) + return "sll"; + else if (type_node == intHI_type_node) + return "ss"; + else if (type_node == ibm128_float_type_node) + return "__ibm128"; + else if (type_node == opaque_V4SI_type_node) + return "opaque"; + else if (POINTER_TYPE_P (type_node)) + return "void*"; + else if (type_node == intQI_type_node || type_node == char_type_node) + return "sc"; + else if (type_node == dfloat32_type_node) + return "_Decimal32"; + else if (type_node == float_type_node) + return "float"; + else if (type_node == intSI_type_node || type_node == integer_type_node) + return "si"; + else if (type_node == dfloat128_type_node) + return "_Decimal128"; + else if (type_node == long_double_type_node) + return "longdouble"; + else if (type_node == intTI_type_node) + return "sq"; + else if (type_node == unsigned_intDI_type_node) + return "ull"; + else if (type_node == unsigned_intHI_type_node) + return "us"; + else if (type_node == unsigned_intQI_type_node) + return "uc"; + else if (type_node == unsigned_intSI_type_node) + return "ui"; + else if (type_node == unsigned_intTI_type_node) + return "uq"; + else if (type_node == unsigned_V1TI_type_node) + return "vuq"; + else if (type_node == unsigned_V2DI_type_node) + return "vull"; + else if (type_node == unsigned_V4SI_type_node) + return "vui"; + else if (type_node == unsigned_V8HI_type_node) + return "vus"; + else if (type_node == unsigned_V16QI_type_node) + return "vuc"; + else if (type_node == V16QI_type_node) + return "vsc"; + else if (type_node == V1TI_type_node) + return "vsq"; + else if (type_node == V2DF_type_node) + return "vd"; + else if (type_node == V2DI_type_node) + return "vsll"; + else if (type_node == V4SF_type_node) + return "vf"; + else if (type_node == V4SI_type_node) + return "vsi"; + else if (type_node == V8HI_type_node) + return "vss"; + else if (type_node == pixel_V8HI_type_node) + return "vp"; + else if (type_node == pcvoid_type_node) + return "voidc*"; + else if (type_node == float128_type_node) + return "_Float128"; + else if (type_node == vector_pair_type_node) + return "__vector_pair"; + else if (type_node == vector_quad_type_node) + return "__vector_quad"; + + return "unknown"; +} + +static void +def_builtin (const char *name, tree type, enum rs6000_builtins code) +{ + tree t; + unsigned classify = rs6000_builtin_info[(int)code].attr; + const char *attr_string = ""; + + /* Don't define the builtin if it doesn't have a type. See PR92661. */ + if (type == NULL_TREE) + return; + + gcc_assert (name != NULL); + gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT)); + + if (rs6000_builtin_decls[(int)code]) + fatal_error (input_location, + "internal error: builtin function %qs already processed", + name); + + rs6000_builtin_decls[(int)code] = t = + add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE); + + /* Set any special attributes. */ + if ((classify & RS6000_BTC_CONST) != 0) + { + /* const function, function only depends on the inputs. */ + TREE_READONLY (t) = 1; + TREE_NOTHROW (t) = 1; + attr_string = "= const"; + } + else if ((classify & RS6000_BTC_PURE) != 0) + { + /* pure function, function can read global memory, but does not set any + external state. */ + DECL_PURE_P (t) = 1; + TREE_NOTHROW (t) = 1; + attr_string = "= pure"; + } + else if ((classify & RS6000_BTC_FP) != 0) + { + /* Function is a math function. If rounding mode is on, then treat the + function as not reading global memory, but it can have arbitrary side + effects. If it is off, then assume the function is a const function. + This mimics the ATTR_MATHFN_FPROUNDING attribute in + builtin-attribute.def that is used for the math functions. */ + TREE_NOTHROW (t) = 1; + if (flag_rounding_math) + { + DECL_PURE_P (t) = 1; + DECL_IS_NOVOPS (t) = 1; + attr_string = "= fp, pure"; + } + else + { + TREE_READONLY (t) = 1; + attr_string = "= fp, const"; + } + } + else if ((classify & (RS6000_BTC_QUAD | RS6000_BTC_PAIR)) != 0) + /* The function uses a register quad and/or pair. Nothing to do. */ + ; + else if ((classify & RS6000_BTC_ATTR_MASK) != 0) + gcc_unreachable (); + + if (TARGET_DEBUG_BUILTIN) + { + tree t = TREE_TYPE (type); + fprintf (stderr, "%s %s (", rs6000_type_string (t), name); + t = TYPE_ARG_TYPES (type); + gcc_assert (t); + + while (TREE_VALUE (t) != void_type_node) + { + fprintf (stderr, "%s", rs6000_type_string (TREE_VALUE (t))); + t = TREE_CHAIN (t); + gcc_assert (t); + if (TREE_VALUE (t) != void_type_node) + fprintf (stderr, ", "); + } + fprintf (stderr, "); %s [%4d]\n", attr_string, (int) code); + } +} + +static const struct builtin_compatibility bdesc_compat[] = +{ +#define RS6000_BUILTIN_COMPAT +#include "rs6000-builtin.def" +}; +#undef RS6000_BUILTIN_COMPAT + +/* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */ + +#undef RS6000_BUILTIN_0 +#undef RS6000_BUILTIN_1 +#undef RS6000_BUILTIN_2 +#undef RS6000_BUILTIN_3 +#undef RS6000_BUILTIN_4 +#undef RS6000_BUILTIN_A +#undef RS6000_BUILTIN_D +#undef RS6000_BUILTIN_H +#undef RS6000_BUILTIN_M +#undef RS6000_BUILTIN_P +#undef RS6000_BUILTIN_X + +#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \ + { MASK, ICODE, NAME, ENUM }, + +#define RS6000_BUILTIN_4(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_M(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) + +static const struct builtin_description bdesc_3arg[] = +{ +#include "rs6000-builtin.def" +}; + +/* Simple quaternary operations: VECd = foo (VECa, VECb, VECc, VECd). */ + +#undef RS6000_BUILTIN_0 +#undef RS6000_BUILTIN_1 +#undef RS6000_BUILTIN_2 +#undef RS6000_BUILTIN_3 +#undef RS6000_BUILTIN_4 +#undef RS6000_BUILTIN_A +#undef RS6000_BUILTIN_D +#undef RS6000_BUILTIN_H +#undef RS6000_BUILTIN_M +#undef RS6000_BUILTIN_P +#undef RS6000_BUILTIN_X + +#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_4(ENUM, NAME, MASK, ATTR, ICODE) \ + { MASK, ICODE, NAME, ENUM }, + +#define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_M(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) + +static const struct builtin_description bdesc_4arg[] = +{ +#include "rs6000-builtin.def" +}; + +/* DST operations: void foo (void *, const int, const char). */ + +#undef RS6000_BUILTIN_0 +#undef RS6000_BUILTIN_1 +#undef RS6000_BUILTIN_2 +#undef RS6000_BUILTIN_3 +#undef RS6000_BUILTIN_4 +#undef RS6000_BUILTIN_A +#undef RS6000_BUILTIN_D +#undef RS6000_BUILTIN_H +#undef RS6000_BUILTIN_M +#undef RS6000_BUILTIN_P +#undef RS6000_BUILTIN_X + +#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_4(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \ + { MASK, ICODE, NAME, ENUM }, + +#define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_M(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) + +static const struct builtin_description bdesc_dst[] = +{ +#include "rs6000-builtin.def" +}; + +/* Simple binary operations: VECc = foo (VECa, VECb). */ + +#undef RS6000_BUILTIN_0 +#undef RS6000_BUILTIN_1 +#undef RS6000_BUILTIN_2 +#undef RS6000_BUILTIN_3 +#undef RS6000_BUILTIN_4 +#undef RS6000_BUILTIN_A +#undef RS6000_BUILTIN_D +#undef RS6000_BUILTIN_H +#undef RS6000_BUILTIN_M +#undef RS6000_BUILTIN_P +#undef RS6000_BUILTIN_X + +#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \ + { MASK, ICODE, NAME, ENUM }, + +#define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_4(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_M(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) + +static const struct builtin_description bdesc_2arg[] = +{ +#include "rs6000-builtin.def" +}; + +#undef RS6000_BUILTIN_0 +#undef RS6000_BUILTIN_1 +#undef RS6000_BUILTIN_2 +#undef RS6000_BUILTIN_3 +#undef RS6000_BUILTIN_4 +#undef RS6000_BUILTIN_A +#undef RS6000_BUILTIN_D +#undef RS6000_BUILTIN_H +#undef RS6000_BUILTIN_M +#undef RS6000_BUILTIN_P +#undef RS6000_BUILTIN_X + +#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_4(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_M(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \ + { MASK, ICODE, NAME, ENUM }, + +#define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) + +/* AltiVec predicates. */ + +static const struct builtin_description bdesc_altivec_preds[] = +{ +#include "rs6000-builtin.def" +}; + +/* ABS* operations. */ + +#undef RS6000_BUILTIN_0 +#undef RS6000_BUILTIN_1 +#undef RS6000_BUILTIN_2 +#undef RS6000_BUILTIN_3 +#undef RS6000_BUILTIN_4 +#undef RS6000_BUILTIN_A +#undef RS6000_BUILTIN_D +#undef RS6000_BUILTIN_H +#undef RS6000_BUILTIN_M +#undef RS6000_BUILTIN_P +#undef RS6000_BUILTIN_X + +#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_4(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \ + { MASK, ICODE, NAME, ENUM }, + +#define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_M(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) + +static const struct builtin_description bdesc_abs[] = +{ +#include "rs6000-builtin.def" +}; + +/* Simple unary operations: VECb = foo (unsigned literal) or VECb = + foo (VECa). */ + +#undef RS6000_BUILTIN_0 +#undef RS6000_BUILTIN_1 +#undef RS6000_BUILTIN_2 +#undef RS6000_BUILTIN_3 +#undef RS6000_BUILTIN_4 +#undef RS6000_BUILTIN_A +#undef RS6000_BUILTIN_D +#undef RS6000_BUILTIN_H +#undef RS6000_BUILTIN_M +#undef RS6000_BUILTIN_P +#undef RS6000_BUILTIN_X + +#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \ + { MASK, ICODE, NAME, ENUM }, + +#define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_4(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_M(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) + +static const struct builtin_description bdesc_1arg[] = +{ +#include "rs6000-builtin.def" +}; + +/* Simple no-argument operations: result = __builtin_darn_32 () */ + +#undef RS6000_BUILTIN_0 +#undef RS6000_BUILTIN_1 +#undef RS6000_BUILTIN_2 +#undef RS6000_BUILTIN_3 +#undef RS6000_BUILTIN_4 +#undef RS6000_BUILTIN_A +#undef RS6000_BUILTIN_D +#undef RS6000_BUILTIN_H +#undef RS6000_BUILTIN_M +#undef RS6000_BUILTIN_P +#undef RS6000_BUILTIN_X + +#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \ + { MASK, ICODE, NAME, ENUM }, + +#define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_4(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_M(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) + +static const struct builtin_description bdesc_0arg[] = +{ +#include "rs6000-builtin.def" +}; + +/* HTM builtins. */ +#undef RS6000_BUILTIN_0 +#undef RS6000_BUILTIN_1 +#undef RS6000_BUILTIN_2 +#undef RS6000_BUILTIN_3 +#undef RS6000_BUILTIN_4 +#undef RS6000_BUILTIN_A +#undef RS6000_BUILTIN_D +#undef RS6000_BUILTIN_H +#undef RS6000_BUILTIN_M +#undef RS6000_BUILTIN_P +#undef RS6000_BUILTIN_X + +#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_4(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \ + { MASK, ICODE, NAME, ENUM }, + +#define RS6000_BUILTIN_M(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) + +static const struct builtin_description bdesc_htm[] = +{ +#include "rs6000-builtin.def" +}; + +/* MMA builtins. */ +#undef RS6000_BUILTIN_0 +#undef RS6000_BUILTIN_1 +#undef RS6000_BUILTIN_2 +#undef RS6000_BUILTIN_3 +#undef RS6000_BUILTIN_4 +#undef RS6000_BUILTIN_A +#undef RS6000_BUILTIN_D +#undef RS6000_BUILTIN_H +#undef RS6000_BUILTIN_M +#undef RS6000_BUILTIN_P +#undef RS6000_BUILTIN_X + +#define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_4(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_M(ENUM, NAME, MASK, ATTR, ICODE) \ + { MASK, ICODE, NAME, ENUM }, + +#define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) +#define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) + +static const struct builtin_description bdesc_mma[] = +{ +#include "rs6000-builtin.def" +}; + +#undef RS6000_BUILTIN_0 +#undef RS6000_BUILTIN_1 +#undef RS6000_BUILTIN_2 +#undef RS6000_BUILTIN_3 +#undef RS6000_BUILTIN_4 +#undef RS6000_BUILTIN_A +#undef RS6000_BUILTIN_D +#undef RS6000_BUILTIN_H +#undef RS6000_BUILTIN_M +#undef RS6000_BUILTIN_P +#undef RS6000_BUILTIN_X + +/* Return true if a builtin function is overloaded. */ +bool +rs6000_overloaded_builtin_p (enum rs6000_builtins fncode) +{ + return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0; +} + +const char * +rs6000_overloaded_builtin_name (enum rs6000_builtins fncode) +{ + return rs6000_builtin_info[(int)fncode].name; +} + +/* Expand an expression EXP that calls a builtin without arguments. */ +static rtx +rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target) +{ + rtx pat; + machine_mode tmode = insn_data[icode].operand[0].mode; + + if (icode == CODE_FOR_nothing) + /* Builtin not supported on this processor. */ + return 0; + + if (icode == CODE_FOR_rs6000_mffsl + && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT) + { + error ("%<__builtin_mffsl%> not supported with %<-msoft-float%>"); + return const0_rtx; + } + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + pat = GEN_FCN (icode) (target); + if (! pat) + return 0; + emit_insn (pat); + + return target; +} + + +static rtx +rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp) +{ + rtx pat; + tree arg0 = CALL_EXPR_ARG (exp, 0); + tree arg1 = CALL_EXPR_ARG (exp, 1); + rtx op0 = expand_normal (arg0); + rtx op1 = expand_normal (arg1); + machine_mode mode0 = insn_data[icode].operand[0].mode; + machine_mode mode1 = insn_data[icode].operand[1].mode; + + if (icode == CODE_FOR_nothing) + /* Builtin not supported on this processor. */ + return 0; + + /* If we got invalid arguments bail out before generating bad rtl. */ + if (arg0 == error_mark_node || arg1 == error_mark_node) + return const0_rtx; + + if (!CONST_INT_P (op0) + || INTVAL (op0) > 255 + || INTVAL (op0) < 0) + { + error ("argument 1 must be an 8-bit field value"); + return const0_rtx; + } + + if (! (*insn_data[icode].operand[0].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (mode0, op0); + + if (! (*insn_data[icode].operand[1].predicate) (op1, mode1)) + op1 = copy_to_mode_reg (mode1, op1); + + pat = GEN_FCN (icode) (op0, op1); + if (!pat) + return const0_rtx; + emit_insn (pat); + + return NULL_RTX; +} + +static rtx +rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp) +{ + rtx pat; + tree arg0 = CALL_EXPR_ARG (exp, 0); + rtx op0 = expand_normal (arg0); + + if (icode == CODE_FOR_nothing) + /* Builtin not supported on this processor. */ + return 0; + + if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT) + { + error ("%<__builtin_mtfsb0%> and %<__builtin_mtfsb1%> not supported with " + "%<-msoft-float%>"); + return const0_rtx; + } + + /* If we got invalid arguments bail out before generating bad rtl. */ + if (arg0 == error_mark_node) + return const0_rtx; + + /* Only allow bit numbers 0 to 31. */ + if (!u5bit_cint_operand (op0, VOIDmode)) + { + error ("Argument must be a constant between 0 and 31."); + return const0_rtx; + } + + pat = GEN_FCN (icode) (op0); + if (!pat) + return const0_rtx; + emit_insn (pat); + + return NULL_RTX; +} + +static rtx +rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp) +{ + rtx pat; + tree arg0 = CALL_EXPR_ARG (exp, 0); + rtx op0 = expand_normal (arg0); + machine_mode mode0 = insn_data[icode].operand[0].mode; + + if (icode == CODE_FOR_nothing) + /* Builtin not supported on this processor. */ + return 0; + + if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT) + { + error ("%<__builtin_set_fpscr_rn%> not supported with %<-msoft-float%>"); + return const0_rtx; + } + + /* If we got invalid arguments bail out before generating bad rtl. */ + if (arg0 == error_mark_node) + return const0_rtx; + + /* If the argument is a constant, check the range. Argument can only be a + 2-bit value. Unfortunately, can't check the range of the value at + compile time if the argument is a variable. The least significant two + bits of the argument, regardless of type, are used to set the rounding + mode. All other bits are ignored. */ + if (CONST_INT_P (op0) && !const_0_to_3_operand(op0, VOIDmode)) + { + error ("Argument must be a value between 0 and 3."); + return const0_rtx; + } + + if (! (*insn_data[icode].operand[0].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (mode0, op0); + + pat = GEN_FCN (icode) (op0); + if (!pat) + return const0_rtx; + emit_insn (pat); + + return NULL_RTX; +} +static rtx +rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp) +{ + rtx pat; + tree arg0 = CALL_EXPR_ARG (exp, 0); + rtx op0 = expand_normal (arg0); + machine_mode mode0 = insn_data[icode].operand[0].mode; + + if (TARGET_32BIT) + /* Builtin not supported in 32-bit mode. */ + fatal_error (input_location, + "%<__builtin_set_fpscr_drn%> is not supported " + "in 32-bit mode"); + + if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT) + { + error ("%<__builtin_set_fpscr_drn%> not supported with %<-msoft-float%>"); + return const0_rtx; + } + + if (icode == CODE_FOR_nothing) + /* Builtin not supported on this processor. */ + return 0; + + /* If we got invalid arguments bail out before generating bad rtl. */ + if (arg0 == error_mark_node) + return const0_rtx; + + /* If the argument is a constant, check the range. Agrument can only be a + 3-bit value. Unfortunately, can't check the range of the value at + compile time if the argument is a variable. The least significant two + bits of the argument, regardless of type, are used to set the rounding + mode. All other bits are ignored. */ + if (CONST_INT_P (op0) && !const_0_to_7_operand(op0, VOIDmode)) + { + error ("Argument must be a value between 0 and 7."); + return const0_rtx; + } + + if (! (*insn_data[icode].operand[0].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (mode0, op0); + + pat = GEN_FCN (icode) (op0); + if (! pat) + return const0_rtx; + emit_insn (pat); + + return NULL_RTX; +} + +static rtx +rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target) +{ + rtx pat; + tree arg0 = CALL_EXPR_ARG (exp, 0); + rtx op0 = expand_normal (arg0); + machine_mode tmode = insn_data[icode].operand[0].mode; + machine_mode mode0 = insn_data[icode].operand[1].mode; + + if (icode == CODE_FOR_nothing) + /* Builtin not supported on this processor. */ + return 0; + + /* If we got invalid arguments bail out before generating bad rtl. */ + if (arg0 == error_mark_node) + return const0_rtx; + + if (icode == CODE_FOR_altivec_vspltisb + || icode == CODE_FOR_altivec_vspltish + || icode == CODE_FOR_altivec_vspltisw) + { + /* Only allow 5-bit *signed* literals. */ + if (!CONST_INT_P (op0) + || INTVAL (op0) > 15 + || INTVAL (op0) < -16) + { + error ("argument 1 must be a 5-bit signed literal"); + return CONST0_RTX (tmode); + } + } + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (mode0, op0); + + pat = GEN_FCN (icode) (target, op0); + if (! pat) + return 0; + emit_insn (pat); + + return target; +} + +static rtx +altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target) +{ + rtx pat, scratch1, scratch2; + tree arg0 = CALL_EXPR_ARG (exp, 0); + rtx op0 = expand_normal (arg0); + machine_mode tmode = insn_data[icode].operand[0].mode; + machine_mode mode0 = insn_data[icode].operand[1].mode; + + /* If we have invalid arguments, bail out before generating bad rtl. */ + if (arg0 == error_mark_node) + return const0_rtx; + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (mode0, op0); + + scratch1 = gen_reg_rtx (mode0); + scratch2 = gen_reg_rtx (mode0); + + pat = GEN_FCN (icode) (target, op0, scratch1, scratch2); + if (! pat) + return 0; + emit_insn (pat); + + return target; +} + +static rtx +rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target) +{ + rtx pat; + tree arg0 = CALL_EXPR_ARG (exp, 0); + tree arg1 = CALL_EXPR_ARG (exp, 1); + rtx op0 = expand_normal (arg0); + rtx op1 = expand_normal (arg1); + machine_mode tmode = insn_data[icode].operand[0].mode; + machine_mode mode0 = insn_data[icode].operand[1].mode; + machine_mode mode1 = insn_data[icode].operand[2].mode; + + if (icode == CODE_FOR_nothing) + /* Builtin not supported on this processor. */ + return 0; + + /* If we got invalid arguments bail out before generating bad rtl. */ + if (arg0 == error_mark_node || arg1 == error_mark_node) + return const0_rtx; + + if (icode == CODE_FOR_unpackv1ti + || icode == CODE_FOR_unpackkf + || icode == CODE_FOR_unpacktf + || icode == CODE_FOR_unpackif + || icode == CODE_FOR_unpacktd + || icode == CODE_FOR_vec_cntmb_v16qi + || icode == CODE_FOR_vec_cntmb_v8hi + || icode == CODE_FOR_vec_cntmb_v4si + || icode == CODE_FOR_vec_cntmb_v2di) + { + /* Only allow 1-bit unsigned literals. */ + STRIP_NOPS (arg1); + if (TREE_CODE (arg1) != INTEGER_CST + || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1)) + { + error ("argument 2 must be a 1-bit unsigned literal"); + return CONST0_RTX (tmode); + } + } + else if (icode == CODE_FOR_altivec_vspltw) + { + /* Only allow 2-bit unsigned literals. */ + STRIP_NOPS (arg1); + if (TREE_CODE (arg1) != INTEGER_CST + || TREE_INT_CST_LOW (arg1) & ~3) + { + error ("argument 2 must be a 2-bit unsigned literal"); + return CONST0_RTX (tmode); + } + } + else if (icode == CODE_FOR_vgnb) + { + /* Only allow unsigned literals in range 2..7. */ + /* Note that arg1 is second operand. */ + STRIP_NOPS (arg1); + if (TREE_CODE (arg1) != INTEGER_CST + || (TREE_INT_CST_LOW (arg1) & ~7) + || !IN_RANGE (TREE_INT_CST_LOW (arg1), 2, 7)) + { + error ("argument 2 must be unsigned literal between " + "2 and 7 inclusive"); + return CONST0_RTX (tmode); + } + } + else if (icode == CODE_FOR_altivec_vsplth) + { + /* Only allow 3-bit unsigned literals. */ + STRIP_NOPS (arg1); + if (TREE_CODE (arg1) != INTEGER_CST + || TREE_INT_CST_LOW (arg1) & ~7) + { + error ("argument 2 must be a 3-bit unsigned literal"); + return CONST0_RTX (tmode); + } + } + else if (icode == CODE_FOR_altivec_vspltb) + { + /* Only allow 4-bit unsigned literals. */ + STRIP_NOPS (arg1); + if (TREE_CODE (arg1) != INTEGER_CST + || TREE_INT_CST_LOW (arg1) & ~15) + { + error ("argument 2 must be a 4-bit unsigned literal"); + return CONST0_RTX (tmode); + } + } + else if (icode == CODE_FOR_altivec_vcfux + || icode == CODE_FOR_altivec_vcfsx + || icode == CODE_FOR_altivec_vctsxs + || icode == CODE_FOR_altivec_vctuxs + || icode == CODE_FOR_vsx_xvcvuxddp_scale + || icode == CODE_FOR_vsx_xvcvsxddp_scale) + { + /* Only allow 5-bit unsigned literals. */ + STRIP_NOPS (arg1); + if (TREE_CODE (arg1) != INTEGER_CST + || TREE_INT_CST_LOW (arg1) & ~0x1f) + { + error ("argument 2 must be a 5-bit unsigned literal"); + return CONST0_RTX (tmode); + } + } + else if (icode == CODE_FOR_dfptstsfi_eq_dd + || icode == CODE_FOR_dfptstsfi_lt_dd + || icode == CODE_FOR_dfptstsfi_gt_dd + || icode == CODE_FOR_dfptstsfi_unordered_dd + || icode == CODE_FOR_dfptstsfi_eq_td + || icode == CODE_FOR_dfptstsfi_lt_td + || icode == CODE_FOR_dfptstsfi_gt_td + || icode == CODE_FOR_dfptstsfi_unordered_td) + { + /* Only allow 6-bit unsigned literals. */ + STRIP_NOPS (arg0); + if (TREE_CODE (arg0) != INTEGER_CST + || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63)) + { + error ("argument 1 must be a 6-bit unsigned literal"); + return CONST0_RTX (tmode); + } + } + else if (icode == CODE_FOR_xststdcqp_kf + || icode == CODE_FOR_xststdcqp_tf + || icode == CODE_FOR_xststdcdp + || icode == CODE_FOR_xststdcsp + || icode == CODE_FOR_xvtstdcdp + || icode == CODE_FOR_xvtstdcsp) + { + /* Only allow 7-bit unsigned literals. */ + STRIP_NOPS (arg1); + if (TREE_CODE (arg1) != INTEGER_CST + || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127)) + { + error ("argument 2 must be a 7-bit unsigned literal"); + return CONST0_RTX (tmode); + } + } + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (mode0, op0); + if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)) + op1 = copy_to_mode_reg (mode1, op1); + + pat = GEN_FCN (icode) (target, op0, op1); + if (! pat) + return 0; + emit_insn (pat); + + return target; +} + +static rtx +altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target) +{ + rtx pat, scratch; + tree cr6_form = CALL_EXPR_ARG (exp, 0); + tree arg0 = CALL_EXPR_ARG (exp, 1); + tree arg1 = CALL_EXPR_ARG (exp, 2); + rtx op0 = expand_normal (arg0); + rtx op1 = expand_normal (arg1); + machine_mode tmode = SImode; + machine_mode mode0 = insn_data[icode].operand[1].mode; + machine_mode mode1 = insn_data[icode].operand[2].mode; + int cr6_form_int; + + if (TREE_CODE (cr6_form) != INTEGER_CST) + { + error ("argument 1 of %qs must be a constant", + "__builtin_altivec_predicate"); + return const0_rtx; + } + else + cr6_form_int = TREE_INT_CST_LOW (cr6_form); + + gcc_assert (mode0 == mode1); + + /* If we have invalid arguments, bail out before generating bad rtl. */ + if (arg0 == error_mark_node || arg1 == error_mark_node) + return const0_rtx; + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (mode0, op0); + if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)) + op1 = copy_to_mode_reg (mode1, op1); + + /* Note that for many of the relevant operations (e.g. cmpne or + cmpeq) with float or double operands, it makes more sense for the + mode of the allocated scratch register to select a vector of + integer. But the choice to copy the mode of operand 0 was made + long ago and there are no plans to change it. */ + scratch = gen_reg_rtx (mode0); + + pat = GEN_FCN (icode) (scratch, op0, op1); + if (! pat) + return 0; + emit_insn (pat); + + /* The vec_any* and vec_all* predicates use the same opcodes for two + different operations, but the bits in CR6 will be different + depending on what information we want. So we have to play tricks + with CR6 to get the right bits out. + + If you think this is disgusting, look at the specs for the + AltiVec predicates. */ + + switch (cr6_form_int) + { + case 0: + emit_insn (gen_cr6_test_for_zero (target)); + break; + case 1: + emit_insn (gen_cr6_test_for_zero_reverse (target)); + break; + case 2: + emit_insn (gen_cr6_test_for_lt (target)); + break; + case 3: + emit_insn (gen_cr6_test_for_lt_reverse (target)); + break; + default: + error ("argument 1 of %qs is out of range", + "__builtin_altivec_predicate"); + break; + } + + return target; +} + +rtx +swap_endian_selector_for_mode (machine_mode mode) +{ + unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0}; + unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8}; + unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12}; + unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14}; + + unsigned int *swaparray, i; + rtx perm[16]; + + switch (mode) + { + case E_V1TImode: + swaparray = swap1; + break; + case E_V2DFmode: + case E_V2DImode: + swaparray = swap2; + break; + case E_V4SFmode: + case E_V4SImode: + swaparray = swap4; + break; + case E_V8HImode: + swaparray = swap8; + break; + default: + gcc_unreachable (); + } + + for (i = 0; i < 16; ++i) + perm[i] = GEN_INT (swaparray[i]); + + return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, + gen_rtvec_v (16, perm))); +} + +/* For the load and sign extend rightmost elements; load and zero extend + rightmost element builtins. */ +static rtx +altivec_expand_lxvr_builtin (enum insn_code icode, tree exp, rtx target, bool blk, bool sign_extend) +{ + rtx pat, addr; + tree arg0 = CALL_EXPR_ARG (exp, 0); + tree arg1 = CALL_EXPR_ARG (exp, 1); + machine_mode tmode = insn_data[icode].operand[0].mode; + machine_mode smode = insn_data[icode].operand[1].mode; + machine_mode mode0 = Pmode; + machine_mode mode1 = Pmode; + rtx op0 = expand_normal (arg0); + rtx op1 = expand_normal (arg1); + + if (icode == CODE_FOR_nothing) + /* Builtin not supported on this processor. */ + return 0; + + /* If we got invalid arguments bail out before generating bad rtl. */ + if (arg0 == error_mark_node || arg1 == error_mark_node) + return const0_rtx; + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + op1 = copy_to_mode_reg (mode1, op1); + + if (op0 == const0_rtx) + addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1); + else + { + op0 = copy_to_mode_reg (mode0, op0); + addr = gen_rtx_MEM (blk ? BLKmode : smode, + gen_rtx_PLUS (Pmode, op1, op0)); + } + + if (sign_extend) + { + rtx discratch = gen_reg_rtx (V2DImode); + rtx tiscratch = gen_reg_rtx (TImode); + + /* Emit the lxvr*x insn. */ + pat = GEN_FCN (icode) (tiscratch, addr); + if (!pat) + return 0; + emit_insn (pat); + + /* Emit a sign extension from V16QI,V8HI,V4SI to V2DI. */ + rtx temp1, temp2; + if (icode == CODE_FOR_vsx_lxvrbx) + { + temp1 = simplify_gen_subreg (V16QImode, tiscratch, TImode, 0); + emit_insn (gen_vsx_sign_extend_qi_v2di (discratch, temp1)); + } + else if (icode == CODE_FOR_vsx_lxvrhx) + { + temp1 = simplify_gen_subreg (V8HImode, tiscratch, TImode, 0); + emit_insn (gen_vsx_sign_extend_hi_v2di (discratch, temp1)); + } + else if (icode == CODE_FOR_vsx_lxvrwx) + { + temp1 = simplify_gen_subreg (V4SImode, tiscratch, TImode, 0); + emit_insn (gen_vsx_sign_extend_si_v2di (discratch, temp1)); + } + else if (icode == CODE_FOR_vsx_lxvrdx) + discratch = simplify_gen_subreg (V2DImode, tiscratch, TImode, 0); + else + gcc_unreachable (); + + /* Emit the sign extension from V2DI (double) to TI (quad). */ + temp2 = simplify_gen_subreg (TImode, discratch, V2DImode, 0); + emit_insn (gen_extendditi2_vector (target, temp2)); + + return target; + } + else + { + /* Zero extend. */ + pat = GEN_FCN (icode) (target, addr); + if (!pat) + return 0; + emit_insn (pat); + return target; + } + return 0; +} + +static rtx +altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk) +{ + rtx pat, addr; + tree arg0 = CALL_EXPR_ARG (exp, 0); + tree arg1 = CALL_EXPR_ARG (exp, 1); + machine_mode tmode = insn_data[icode].operand[0].mode; + machine_mode mode0 = Pmode; + machine_mode mode1 = Pmode; + rtx op0 = expand_normal (arg0); + rtx op1 = expand_normal (arg1); + + if (icode == CODE_FOR_nothing) + /* Builtin not supported on this processor. */ + return 0; + + /* If we got invalid arguments bail out before generating bad rtl. */ + if (arg0 == error_mark_node || arg1 == error_mark_node) + return const0_rtx; + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + op1 = copy_to_mode_reg (mode1, op1); + + /* For LVX, express the RTL accurately by ANDing the address with -16. + LVXL and LVE*X expand to use UNSPECs to hide their special behavior, + so the raw address is fine. */ + if (icode == CODE_FOR_altivec_lvx_v1ti + || icode == CODE_FOR_altivec_lvx_v2df + || icode == CODE_FOR_altivec_lvx_v2di + || icode == CODE_FOR_altivec_lvx_v4sf + || icode == CODE_FOR_altivec_lvx_v4si + || icode == CODE_FOR_altivec_lvx_v8hi + || icode == CODE_FOR_altivec_lvx_v16qi) + { + rtx rawaddr; + if (op0 == const0_rtx) + rawaddr = op1; + else + { + op0 = copy_to_mode_reg (mode0, op0); + rawaddr = gen_rtx_PLUS (Pmode, op1, op0); + } + addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16)); + addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr); + + emit_insn (gen_rtx_SET (target, addr)); + } + else + { + if (op0 == const0_rtx) + addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1); + else + { + op0 = copy_to_mode_reg (mode0, op0); + addr = gen_rtx_MEM (blk ? BLKmode : tmode, + gen_rtx_PLUS (Pmode, op1, op0)); + } + + pat = GEN_FCN (icode) (target, addr); + if (! pat) + return 0; + emit_insn (pat); + } + + return target; +} + +static rtx +altivec_expand_stxvl_builtin (enum insn_code icode, tree exp) +{ + rtx pat; + tree arg0 = CALL_EXPR_ARG (exp, 0); + tree arg1 = CALL_EXPR_ARG (exp, 1); + tree arg2 = CALL_EXPR_ARG (exp, 2); + rtx op0 = expand_normal (arg0); + rtx op1 = expand_normal (arg1); + rtx op2 = expand_normal (arg2); + machine_mode mode0 = insn_data[icode].operand[0].mode; + machine_mode mode1 = insn_data[icode].operand[1].mode; + machine_mode mode2 = insn_data[icode].operand[2].mode; + + if (icode == CODE_FOR_nothing) + /* Builtin not supported on this processor. */ + return NULL_RTX; + + /* If we got invalid arguments bail out before generating bad rtl. */ + if (arg0 == error_mark_node + || arg1 == error_mark_node + || arg2 == error_mark_node) + return NULL_RTX; + + if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (mode0, op0); + if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)) + op1 = copy_to_mode_reg (mode1, op1); + if (! (*insn_data[icode].operand[3].predicate) (op2, mode2)) + op2 = copy_to_mode_reg (mode2, op2); + + pat = GEN_FCN (icode) (op0, op1, op2); + if (pat) + emit_insn (pat); + + return NULL_RTX; +} + +static rtx +altivec_expand_stv_builtin (enum insn_code icode, tree exp) +{ + tree arg0 = CALL_EXPR_ARG (exp, 0); + tree arg1 = CALL_EXPR_ARG (exp, 1); + tree arg2 = CALL_EXPR_ARG (exp, 2); + rtx op0 = expand_normal (arg0); + rtx op1 = expand_normal (arg1); + rtx op2 = expand_normal (arg2); + rtx pat, addr, rawaddr, truncrtx; + machine_mode tmode = insn_data[icode].operand[0].mode; + machine_mode smode = insn_data[icode].operand[1].mode; + machine_mode mode1 = Pmode; + machine_mode mode2 = Pmode; + + /* Invalid arguments. Bail before doing anything stoopid! */ + if (arg0 == error_mark_node + || arg1 == error_mark_node + || arg2 == error_mark_node) + return const0_rtx; + + op2 = copy_to_mode_reg (mode2, op2); + + /* For STVX, express the RTL accurately by ANDing the address with -16. + STVXL and STVE*X expand to use UNSPECs to hide their special behavior, + so the raw address is fine. */ + if (icode == CODE_FOR_altivec_stvx_v2df + || icode == CODE_FOR_altivec_stvx_v2di + || icode == CODE_FOR_altivec_stvx_v4sf + || icode == CODE_FOR_altivec_stvx_v4si + || icode == CODE_FOR_altivec_stvx_v8hi + || icode == CODE_FOR_altivec_stvx_v16qi) + { + if (op1 == const0_rtx) + rawaddr = op2; + else + { + op1 = copy_to_mode_reg (mode1, op1); + rawaddr = gen_rtx_PLUS (Pmode, op2, op1); + } + + addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16)); + addr = gen_rtx_MEM (tmode, addr); + + op0 = copy_to_mode_reg (tmode, op0); + + emit_insn (gen_rtx_SET (addr, op0)); + } + else if (icode == CODE_FOR_vsx_stxvrbx + || icode == CODE_FOR_vsx_stxvrhx + || icode == CODE_FOR_vsx_stxvrwx + || icode == CODE_FOR_vsx_stxvrdx) + { + truncrtx = gen_rtx_TRUNCATE (tmode, op0); + op0 = copy_to_mode_reg (E_TImode, truncrtx); + + if (op1 == const0_rtx) + addr = gen_rtx_MEM (Pmode, op2); + else + { + op1 = copy_to_mode_reg (mode1, op1); + addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1)); + } + pat = GEN_FCN (icode) (addr, op0); + if (pat) + emit_insn (pat); + } + else + { + if (! (*insn_data[icode].operand[1].predicate) (op0, smode)) + op0 = copy_to_mode_reg (smode, op0); + + if (op1 == const0_rtx) + addr = gen_rtx_MEM (tmode, op2); + else + { + op1 = copy_to_mode_reg (mode1, op1); + addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1)); + } + + pat = GEN_FCN (icode) (addr, op0); + if (pat) + emit_insn (pat); + } + + return NULL_RTX; +} + +/* Expand the MMA built-in in EXP. + Store true in *EXPANDEDP if we found a built-in to expand. */ + +static rtx +mma_expand_builtin (tree exp, rtx target, bool *expandedp) +{ + unsigned i; + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); + enum rs6000_builtins fcode + = (enum rs6000_builtins) DECL_MD_FUNCTION_CODE (fndecl); + const struct builtin_description *d = bdesc_mma; + + /* Expand the MMA built-in. */ + for (i = 0; i < ARRAY_SIZE (bdesc_mma); i++, d++) + if (d->code == fcode) + break; + + if (i >= ARRAY_SIZE (bdesc_mma)) + { + *expandedp = false; + return NULL_RTX; + } + + *expandedp = true; + + tree arg; + call_expr_arg_iterator iter; + enum insn_code icode = d->icode; + const struct insn_operand_data *insn_op; + rtx op[MAX_MMA_OPERANDS]; + unsigned nopnds = 0; + unsigned attr = rs6000_builtin_info[fcode].attr; + bool void_func = (attr & RS6000_BTC_VOID); + machine_mode tmode = VOIDmode; + + if (TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node) + { + tmode = insn_data[icode].operand[0].mode; + if (!target + || GET_MODE (target) != tmode + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + op[nopnds++] = target; + } + else + target = const0_rtx; + + FOR_EACH_CALL_EXPR_ARG (arg, iter, exp) + { + if (arg == error_mark_node) + return const0_rtx; + + rtx opnd; + insn_op = &insn_data[icode].operand[nopnds]; + if (TREE_CODE (arg) == ADDR_EXPR + && MEM_P (DECL_RTL (TREE_OPERAND (arg, 0)))) + opnd = DECL_RTL (TREE_OPERAND (arg, 0)); + else + opnd = expand_normal (arg); + + if (!(*insn_op->predicate) (opnd, insn_op->mode)) + { + if (!strcmp (insn_op->constraint, "n")) + { + if (!CONST_INT_P (opnd)) + error ("argument %d must be an unsigned literal", nopnds); + else + error ("argument %d is an unsigned literal that is " + "out of range", nopnds); + return const0_rtx; + } + opnd = copy_to_mode_reg (insn_op->mode, opnd); + } + + /* Some MMA instructions have INOUT accumulator operands, so force + their target register to be the same as their input register. */ + if (!void_func + && nopnds == 1 + && !strcmp (insn_op->constraint, "0") + && insn_op->mode == tmode + && REG_P (opnd) + && (*insn_data[icode].operand[0].predicate) (opnd, tmode)) + target = op[0] = opnd; + + op[nopnds++] = opnd; + } + + unsigned attr_args = attr & RS6000_BTC_OPND_MASK; + if (attr & RS6000_BTC_QUAD + || fcode == VSX_BUILTIN_DISASSEMBLE_PAIR_INTERNAL) + attr_args++; + + gcc_assert (nopnds == attr_args); + + rtx pat; + switch (nopnds) + { + case 1: + pat = GEN_FCN (icode) (op[0]); + break; + case 2: + pat = GEN_FCN (icode) (op[0], op[1]); + break; + case 3: + /* The ASSEMBLE builtin source operands are reversed in little-endian + mode, so reorder them. */ + if (fcode == VSX_BUILTIN_ASSEMBLE_PAIR_INTERNAL && !WORDS_BIG_ENDIAN) + std::swap (op[1], op[2]); + pat = GEN_FCN (icode) (op[0], op[1], op[2]); + break; + case 4: + pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]); + break; + case 5: + /* The ASSEMBLE builtin source operands are reversed in little-endian + mode, so reorder them. */ + if (fcode == MMA_BUILTIN_ASSEMBLE_ACC_INTERNAL && !WORDS_BIG_ENDIAN) + { + std::swap (op[1], op[4]); + std::swap (op[2], op[3]); + } + pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]); + break; + case 6: + pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]); + break; + case 7: + pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5], op[6]); + break; + default: + gcc_unreachable (); + } + if (!pat) + return NULL_RTX; + emit_insn (pat); + + return target; +} + +/* Return the appropriate SPR number associated with the given builtin. */ +static inline HOST_WIDE_INT +htm_spr_num (enum rs6000_builtins code) +{ + if (code == HTM_BUILTIN_GET_TFHAR + || code == HTM_BUILTIN_SET_TFHAR) + return TFHAR_SPR; + else if (code == HTM_BUILTIN_GET_TFIAR + || code == HTM_BUILTIN_SET_TFIAR) + return TFIAR_SPR; + else if (code == HTM_BUILTIN_GET_TEXASR + || code == HTM_BUILTIN_SET_TEXASR) + return TEXASR_SPR; + gcc_assert (code == HTM_BUILTIN_GET_TEXASRU + || code == HTM_BUILTIN_SET_TEXASRU); + return TEXASRU_SPR; +} + +/* Return the correct ICODE value depending on whether we are + setting or reading the HTM SPRs. */ +static inline enum insn_code +rs6000_htm_spr_icode (bool nonvoid) +{ + if (nonvoid) + return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si; + else + return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si; +} + +/* Expand the HTM builtin in EXP and store the result in TARGET. + Store true in *EXPANDEDP if we found a builtin to expand. */ +static rtx +htm_expand_builtin (tree exp, rtx target, bool * expandedp) +{ + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); + bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node; + enum rs6000_builtins fcode + = (enum rs6000_builtins) DECL_MD_FUNCTION_CODE (fndecl); + const struct builtin_description *d; + size_t i; + + *expandedp = true; + + if (!TARGET_POWERPC64 + && (fcode == HTM_BUILTIN_TABORTDC + || fcode == HTM_BUILTIN_TABORTDCI)) + { + size_t uns_fcode = (size_t)fcode; + const char *name = rs6000_builtin_info[uns_fcode].name; + error ("builtin %qs is only valid in 64-bit mode", name); + return const0_rtx; + } + + /* Expand the HTM builtins. */ + d = bdesc_htm; + for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++) + if (d->code == fcode) + { + rtx op[MAX_HTM_OPERANDS], pat; + int nopnds = 0; + tree arg; + call_expr_arg_iterator iter; + unsigned attr = rs6000_builtin_info[fcode].attr; + enum insn_code icode = d->icode; + const struct insn_operand_data *insn_op; + bool uses_spr = (attr & RS6000_BTC_SPR); + rtx cr = NULL_RTX; + + if (uses_spr) + icode = rs6000_htm_spr_icode (nonvoid); + insn_op = &insn_data[icode].operand[0]; + + if (nonvoid) + { + machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode; + if (!target + || GET_MODE (target) != tmode + || (uses_spr && !(*insn_op->predicate) (target, tmode))) + target = gen_reg_rtx (tmode); + if (uses_spr) + op[nopnds++] = target; + } + + FOR_EACH_CALL_EXPR_ARG (arg, iter, exp) + { + if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS) + return const0_rtx; + + insn_op = &insn_data[icode].operand[nopnds]; + + op[nopnds] = expand_normal (arg); + + if (!(*insn_op->predicate) (op[nopnds], insn_op->mode)) + { + if (!strcmp (insn_op->constraint, "n")) + { + int arg_num = (nonvoid) ? nopnds : nopnds + 1; + if (!CONST_INT_P (op[nopnds])) + error ("argument %d must be an unsigned literal", arg_num); + else + error ("argument %d is an unsigned literal that is " + "out of range", arg_num); + return const0_rtx; + } + op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]); + } + + nopnds++; + } + + /* Handle the builtins for extended mnemonics. These accept + no arguments, but map to builtins that take arguments. */ + switch (fcode) + { + case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */ + case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */ + op[nopnds++] = GEN_INT (1); + if (flag_checking) + attr |= RS6000_BTC_UNARY; + break; + case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */ + op[nopnds++] = GEN_INT (0); + if (flag_checking) + attr |= RS6000_BTC_UNARY; + break; + default: + break; + } + + /* If this builtin accesses SPRs, then pass in the appropriate + SPR number and SPR regno as the last two operands. */ + if (uses_spr) + { + machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode; + op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode)); + } + /* If this builtin accesses a CR, then pass in a scratch + CR as the last operand. */ + else if (attr & RS6000_BTC_CR) + { cr = gen_reg_rtx (CCmode); + op[nopnds++] = cr; + } + + if (flag_checking) + { + int expected_nopnds = 0; + if ((attr & RS6000_BTC_OPND_MASK) == RS6000_BTC_UNARY) + expected_nopnds = 1; + else if ((attr & RS6000_BTC_OPND_MASK) == RS6000_BTC_BINARY) + expected_nopnds = 2; + else if ((attr & RS6000_BTC_OPND_MASK) == RS6000_BTC_TERNARY) + expected_nopnds = 3; + else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_QUATERNARY) + expected_nopnds = 4; + if (!(attr & RS6000_BTC_VOID)) + expected_nopnds += 1; + if (uses_spr) + expected_nopnds += 1; + + gcc_assert (nopnds == expected_nopnds + && nopnds <= MAX_HTM_OPERANDS); + } + + switch (nopnds) + { + case 1: + pat = GEN_FCN (icode) (op[0]); + break; + case 2: + pat = GEN_FCN (icode) (op[0], op[1]); + break; + case 3: + pat = GEN_FCN (icode) (op[0], op[1], op[2]); + break; + case 4: + pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]); + break; + default: + gcc_unreachable (); + } + if (!pat) + return NULL_RTX; + emit_insn (pat); + + if (attr & RS6000_BTC_CR) + { + if (fcode == HTM_BUILTIN_TBEGIN) + { + /* Emit code to set TARGET to true or false depending on + whether the tbegin. instruction successfully or failed + to start a transaction. We do this by placing the 1's + complement of CR's EQ bit into TARGET. */ + rtx scratch = gen_reg_rtx (SImode); + emit_insn (gen_rtx_SET (scratch, + gen_rtx_EQ (SImode, cr, + const0_rtx))); + emit_insn (gen_rtx_SET (target, + gen_rtx_XOR (SImode, scratch, + GEN_INT (1)))); + } + else + { + /* Emit code to copy the 4-bit condition register field + CR into the least significant end of register TARGET. */ + rtx scratch1 = gen_reg_rtx (SImode); + rtx scratch2 = gen_reg_rtx (SImode); + rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0); + emit_insn (gen_movcc (subreg, cr)); + emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28))); + emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf))); + } + } + + if (nonvoid) + return target; + return const0_rtx; + } + + *expandedp = false; + return NULL_RTX; +} + +/* Expand the CPU builtin in FCODE and store the result in TARGET. */ + +static rtx +cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED, + rtx target) +{ + /* __builtin_cpu_init () is a nop, so expand to nothing. */ + if (fcode == RS6000_BUILTIN_CPU_INIT) + return const0_rtx; + + if (target == 0 || GET_MODE (target) != SImode) + target = gen_reg_rtx (SImode); + +#ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB + tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0); + /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back + to a STRING_CST. */ + if (TREE_CODE (arg) == ARRAY_REF + && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST + && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST + && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0) + arg = TREE_OPERAND (arg, 0); + + if (TREE_CODE (arg) != STRING_CST) + { + error ("builtin %qs only accepts a string argument", + rs6000_builtin_info[(size_t) fcode].name); + return const0_rtx; + } + + if (fcode == RS6000_BUILTIN_CPU_IS) + { + const char *cpu = TREE_STRING_POINTER (arg); + rtx cpuid = NULL_RTX; + for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++) + if (strcmp (cpu, cpu_is_info[i].cpu) == 0) + { + /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */ + cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM); + break; + } + if (cpuid == NULL_RTX) + { + /* Invalid CPU argument. */ + error ("cpu %qs is an invalid argument to builtin %qs", + cpu, rs6000_builtin_info[(size_t) fcode].name); + return const0_rtx; + } + + rtx platform = gen_reg_rtx (SImode); + rtx tcbmem = gen_const_mem (SImode, + gen_rtx_PLUS (Pmode, + gen_rtx_REG (Pmode, TLS_REGNUM), + GEN_INT (TCB_PLATFORM_OFFSET))); + emit_move_insn (platform, tcbmem); + emit_insn (gen_eqsi3 (target, platform, cpuid)); + } + else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS) + { + const char *hwcap = TREE_STRING_POINTER (arg); + rtx mask = NULL_RTX; + int hwcap_offset; + for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++) + if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0) + { + mask = GEN_INT (cpu_supports_info[i].mask); + hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id); + break; + } + if (mask == NULL_RTX) + { + /* Invalid HWCAP argument. */ + error ("%s %qs is an invalid argument to builtin %qs", + "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name); + return const0_rtx; + } + + rtx tcb_hwcap = gen_reg_rtx (SImode); + rtx tcbmem = gen_const_mem (SImode, + gen_rtx_PLUS (Pmode, + gen_rtx_REG (Pmode, TLS_REGNUM), + GEN_INT (hwcap_offset))); + emit_move_insn (tcb_hwcap, tcbmem); + rtx scratch1 = gen_reg_rtx (SImode); + emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask))); + rtx scratch2 = gen_reg_rtx (SImode); + emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx)); + emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx))); + } + else + gcc_unreachable (); + + /* Record that we have expanded a CPU builtin, so that we can later + emit a reference to the special symbol exported by LIBC to ensure we + do not link against an old LIBC that doesn't support this feature. */ + cpu_builtin_p = true; + +#else + warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware " + "capability bits", rs6000_builtin_info[(size_t) fcode].name); + + /* For old LIBCs, always return FALSE. */ + emit_move_insn (target, GEN_INT (0)); +#endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */ + + return target; +} + +static rtx +rs6000_expand_quaternop_builtin (enum insn_code icode, tree exp, rtx target) +{ + rtx pat; + tree arg0 = CALL_EXPR_ARG (exp, 0); + tree arg1 = CALL_EXPR_ARG (exp, 1); + tree arg2 = CALL_EXPR_ARG (exp, 2); + tree arg3 = CALL_EXPR_ARG (exp, 3); + rtx op0 = expand_normal (arg0); + rtx op1 = expand_normal (arg1); + rtx op2 = expand_normal (arg2); + rtx op3 = expand_normal (arg3); + machine_mode tmode = insn_data[icode].operand[0].mode; + machine_mode mode0 = insn_data[icode].operand[1].mode; + machine_mode mode1 = insn_data[icode].operand[2].mode; + machine_mode mode2 = insn_data[icode].operand[3].mode; + machine_mode mode3 = insn_data[icode].operand[4].mode; + + if (icode == CODE_FOR_nothing) + /* Builtin not supported on this processor. */ + return 0; + + /* If we got invalid arguments bail out before generating bad rtl. */ + if (arg0 == error_mark_node + || arg1 == error_mark_node + || arg2 == error_mark_node + || arg3 == error_mark_node) + return const0_rtx; + + /* Check and prepare argument depending on the instruction code. + + Note that a switch statement instead of the sequence of tests + would be incorrect as many of the CODE_FOR values could be + CODE_FOR_nothing and that would yield multiple alternatives + with identical values. We'd never reach here at runtime in + this case. */ + if (icode == CODE_FOR_xxeval) + { + /* Only allow 8-bit unsigned literals. */ + STRIP_NOPS (arg3); + if (TREE_CODE (arg3) != INTEGER_CST + || TREE_INT_CST_LOW (arg3) & ~0xff) + { + error ("argument 4 must be an 8-bit unsigned literal"); + return CONST0_RTX (tmode); + } + } + + else if (icode == CODE_FOR_xxpermx) + { + /* Only allow 3-bit unsigned literals. */ + STRIP_NOPS (arg3); + if (TREE_CODE (arg3) != INTEGER_CST + || TREE_INT_CST_LOW (arg3) & ~0x7) + { + error ("argument 4 must be a 3-bit unsigned literal"); + return CONST0_RTX (tmode); + } + } + + else if (icode == CODE_FOR_vreplace_elt_v4si + || icode == CODE_FOR_vreplace_elt_v4sf) + { + /* Check whether the 3rd argument is an integer constant in the range + 0 to 3 inclusive. */ + STRIP_NOPS (arg2); + if (TREE_CODE (arg2) != INTEGER_CST + || !IN_RANGE (TREE_INT_CST_LOW (arg2), 0, 3)) + { + error ("argument 3 must be in the range 0 to 3"); + return CONST0_RTX (tmode); + } + } + + else if (icode == CODE_FOR_vreplace_un_v4si + || icode == CODE_FOR_vreplace_un_v4sf) + { + /* Check whether the 3rd argument is an integer constant in the range + 0 to 12 inclusive. */ + STRIP_NOPS (arg2); + if (TREE_CODE (arg2) != INTEGER_CST + || !IN_RANGE(TREE_INT_CST_LOW (arg2), 0, 12)) + { + error ("argument 3 must be in the range 0 to 12"); + return CONST0_RTX (tmode); + } + } + + else if (icode == CODE_FOR_vsldb_v16qi + || icode == CODE_FOR_vsldb_v8hi + || icode == CODE_FOR_vsldb_v4si + || icode == CODE_FOR_vsldb_v2di + || icode == CODE_FOR_vsrdb_v16qi + || icode == CODE_FOR_vsrdb_v8hi + || icode == CODE_FOR_vsrdb_v4si + || icode == CODE_FOR_vsrdb_v2di) + { + /* Check whether the 3rd argument is an integer constant in the range + 0 to 7 inclusive. */ + STRIP_NOPS (arg2); + if (TREE_CODE (arg2) != INTEGER_CST + || !IN_RANGE (TREE_INT_CST_LOW (arg2), 0, 7)) + { + error ("argument 3 must be a constant in the range 0 to 7"); + return CONST0_RTX (tmode); + } + } + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (mode0, op0); + if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)) + op1 = copy_to_mode_reg (mode1, op1); + if (! (*insn_data[icode].operand[3].predicate) (op2, mode2)) + op2 = copy_to_mode_reg (mode2, op2); + if (! (*insn_data[icode].operand[4].predicate) (op3, mode3)) + op3 = copy_to_mode_reg (mode3, op3); + + pat = GEN_FCN (icode) (target, op0, op1, op2, op3); + if (! pat) + return 0; + emit_insn (pat); + + return target; +} + +static rtx +rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target) +{ + rtx pat; + tree arg0 = CALL_EXPR_ARG (exp, 0); + tree arg1 = CALL_EXPR_ARG (exp, 1); + tree arg2 = CALL_EXPR_ARG (exp, 2); + rtx op0 = expand_normal (arg0); + rtx op1 = expand_normal (arg1); + rtx op2 = expand_normal (arg2); + machine_mode tmode = insn_data[icode].operand[0].mode; + machine_mode mode0 = insn_data[icode].operand[1].mode; + machine_mode mode1 = insn_data[icode].operand[2].mode; + machine_mode mode2 = insn_data[icode].operand[3].mode; + + if (icode == CODE_FOR_nothing) + /* Builtin not supported on this processor. */ + return 0; + + /* If we got invalid arguments bail out before generating bad rtl. */ + if (arg0 == error_mark_node + || arg1 == error_mark_node + || arg2 == error_mark_node) + return const0_rtx; + + /* Check and prepare argument depending on the instruction code. + + Note that a switch statement instead of the sequence of tests + would be incorrect as many of the CODE_FOR values could be + CODE_FOR_nothing and that would yield multiple alternatives + with identical values. We'd never reach here at runtime in + this case. */ + if (icode == CODE_FOR_altivec_vsldoi_v4sf + || icode == CODE_FOR_altivec_vsldoi_v2df + || icode == CODE_FOR_altivec_vsldoi_v4si + || icode == CODE_FOR_altivec_vsldoi_v8hi + || icode == CODE_FOR_altivec_vsldoi_v16qi) + { + /* Only allow 4-bit unsigned literals. */ + STRIP_NOPS (arg2); + if (TREE_CODE (arg2) != INTEGER_CST + || TREE_INT_CST_LOW (arg2) & ~0xf) + { + error ("argument 3 must be a 4-bit unsigned literal"); + return CONST0_RTX (tmode); + } + } + else if (icode == CODE_FOR_vsx_xxpermdi_v2df + || icode == CODE_FOR_vsx_xxpermdi_v2di + || icode == CODE_FOR_vsx_xxpermdi_v2df_be + || icode == CODE_FOR_vsx_xxpermdi_v2di_be + || icode == CODE_FOR_vsx_xxpermdi_v1ti + || icode == CODE_FOR_vsx_xxpermdi_v4sf + || icode == CODE_FOR_vsx_xxpermdi_v4si + || icode == CODE_FOR_vsx_xxpermdi_v8hi + || icode == CODE_FOR_vsx_xxpermdi_v16qi + || icode == CODE_FOR_vsx_xxsldwi_v16qi + || icode == CODE_FOR_vsx_xxsldwi_v8hi + || icode == CODE_FOR_vsx_xxsldwi_v4si + || icode == CODE_FOR_vsx_xxsldwi_v4sf + || icode == CODE_FOR_vsx_xxsldwi_v2di + || icode == CODE_FOR_vsx_xxsldwi_v2df) + { + /* Only allow 2-bit unsigned literals. */ + STRIP_NOPS (arg2); + if (TREE_CODE (arg2) != INTEGER_CST + || TREE_INT_CST_LOW (arg2) & ~0x3) + { + error ("argument 3 must be a 2-bit unsigned literal"); + return CONST0_RTX (tmode); + } + } + else if (icode == CODE_FOR_vsx_set_v2df + || icode == CODE_FOR_vsx_set_v2di + || icode == CODE_FOR_bcdadd_v16qi + || icode == CODE_FOR_bcdadd_v1ti + || icode == CODE_FOR_bcdadd_lt_v16qi + || icode == CODE_FOR_bcdadd_lt_v1ti + || icode == CODE_FOR_bcdadd_eq_v16qi + || icode == CODE_FOR_bcdadd_eq_v1ti + || icode == CODE_FOR_bcdadd_gt_v16qi + || icode == CODE_FOR_bcdadd_gt_v1ti + || icode == CODE_FOR_bcdsub_v16qi + || icode == CODE_FOR_bcdsub_v1ti + || icode == CODE_FOR_bcdsub_lt_v16qi + || icode == CODE_FOR_bcdsub_lt_v1ti + || icode == CODE_FOR_bcdsub_eq_v16qi + || icode == CODE_FOR_bcdsub_eq_v1ti + || icode == CODE_FOR_bcdsub_gt_v16qi + || icode == CODE_FOR_bcdsub_gt_v1ti) + { + /* Only allow 1-bit unsigned literals. */ + STRIP_NOPS (arg2); + if (TREE_CODE (arg2) != INTEGER_CST + || TREE_INT_CST_LOW (arg2) & ~0x1) + { + error ("argument 3 must be a 1-bit unsigned literal"); + return CONST0_RTX (tmode); + } + } + else if (icode == CODE_FOR_dfp_ddedpd_dd + || icode == CODE_FOR_dfp_ddedpd_td) + { + /* Only allow 2-bit unsigned literals where the value is 0 or 2. */ + STRIP_NOPS (arg0); + if (TREE_CODE (arg0) != INTEGER_CST + || TREE_INT_CST_LOW (arg2) & ~0x3) + { + error ("argument 1 must be 0 or 2"); + return CONST0_RTX (tmode); + } + } + else if (icode == CODE_FOR_dfp_denbcd_dd + || icode == CODE_FOR_dfp_denbcd_td + || icode == CODE_FOR_dfp_denbcd_v16qi) + { + /* Only allow 1-bit unsigned literals. */ + STRIP_NOPS (arg0); + if (TREE_CODE (arg0) != INTEGER_CST + || TREE_INT_CST_LOW (arg0) & ~0x1) + { + error ("argument 1 must be a 1-bit unsigned literal"); + return CONST0_RTX (tmode); + } + } + else if (icode == CODE_FOR_dfp_dscli_dd + || icode == CODE_FOR_dfp_dscli_td + || icode == CODE_FOR_dfp_dscri_dd + || icode == CODE_FOR_dfp_dscri_td) + { + /* Only allow 6-bit unsigned literals. */ + STRIP_NOPS (arg1); + if (TREE_CODE (arg1) != INTEGER_CST + || TREE_INT_CST_LOW (arg1) & ~0x3f) + { + error ("argument 2 must be a 6-bit unsigned literal"); + return CONST0_RTX (tmode); + } + } + else if (icode == CODE_FOR_crypto_vshasigmaw + || icode == CODE_FOR_crypto_vshasigmad) + { + /* Check whether the 2nd and 3rd arguments are integer constants and in + range and prepare arguments. */ + STRIP_NOPS (arg1); + if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2)) + { + error ("argument 2 must be 0 or 1"); + return CONST0_RTX (tmode); + } + + STRIP_NOPS (arg2); + if (TREE_CODE (arg2) != INTEGER_CST + || wi::geu_p (wi::to_wide (arg2), 16)) + { + error ("argument 3 must be in the range [0, 15]"); + return CONST0_RTX (tmode); + } + } + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (mode0, op0); + if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)) + op1 = copy_to_mode_reg (mode1, op1); + if (! (*insn_data[icode].operand[3].predicate) (op2, mode2)) + op2 = copy_to_mode_reg (mode2, op2); + + pat = GEN_FCN (icode) (target, op0, op1, op2); + if (! pat) + return 0; + emit_insn (pat); + + return target; +} + + +/* Expand the dst builtins. */ +static rtx +altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED, + bool *expandedp) +{ + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); + enum rs6000_builtins fcode + = (enum rs6000_builtins) DECL_MD_FUNCTION_CODE (fndecl); + tree arg0, arg1, arg2; + machine_mode mode0, mode1; + rtx pat, op0, op1, op2; + const struct builtin_description *d; + size_t i; + + *expandedp = false; + + /* Handle DST variants. */ + d = bdesc_dst; + for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++) + if (d->code == fcode) + { + arg0 = CALL_EXPR_ARG (exp, 0); + arg1 = CALL_EXPR_ARG (exp, 1); + arg2 = CALL_EXPR_ARG (exp, 2); + op0 = expand_normal (arg0); + op1 = expand_normal (arg1); + op2 = expand_normal (arg2); + mode0 = insn_data[d->icode].operand[0].mode; + mode1 = insn_data[d->icode].operand[1].mode; + + /* Invalid arguments, bail out before generating bad rtl. */ + if (arg0 == error_mark_node + || arg1 == error_mark_node + || arg2 == error_mark_node) + return const0_rtx; + + *expandedp = true; + STRIP_NOPS (arg2); + if (TREE_CODE (arg2) != INTEGER_CST + || TREE_INT_CST_LOW (arg2) & ~0x3) + { + error ("argument to %qs must be a 2-bit unsigned literal", d->name); + return const0_rtx; + } + + if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (Pmode, op0); + if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1)) + op1 = copy_to_mode_reg (mode1, op1); + + pat = GEN_FCN (d->icode) (op0, op1, op2); + if (pat != 0) + emit_insn (pat); + + return NULL_RTX; + } + + return NULL_RTX; +} + +/* Expand vec_init builtin. */ +static rtx +altivec_expand_vec_init_builtin (tree type, tree exp, rtx target) +{ + machine_mode tmode = TYPE_MODE (type); + machine_mode inner_mode = GET_MODE_INNER (tmode); + int i, n_elt = GET_MODE_NUNITS (tmode); + + gcc_assert (VECTOR_MODE_P (tmode)); + gcc_assert (n_elt == call_expr_nargs (exp)); + + if (!target || !register_operand (target, tmode)) + target = gen_reg_rtx (tmode); + + /* If we have a vector compromised of a single element, such as V1TImode, do + the initialization directly. */ + if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode)) + { + rtx x = expand_normal (CALL_EXPR_ARG (exp, 0)); + emit_move_insn (target, gen_lowpart (tmode, x)); + } + else + { + rtvec v = rtvec_alloc (n_elt); + + for (i = 0; i < n_elt; ++i) + { + rtx x = expand_normal (CALL_EXPR_ARG (exp, i)); + RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x); + } + + rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v)); + } + + return target; +} + +/* Return the integer constant in ARG. Constrain it to be in the range + of the subparts of VEC_TYPE; issue an error if not. */ + +static int +get_element_number (tree vec_type, tree arg) +{ + unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1; + + if (!tree_fits_uhwi_p (arg) + || (elt = tree_to_uhwi (arg), elt > max)) + { + error ("selector must be an integer constant in the range [0, %wi]", max); + return 0; + } + + return elt; +} + +/* Expand vec_set builtin. */ +static rtx +altivec_expand_vec_set_builtin (tree exp) +{ + machine_mode tmode, mode1; + tree arg0, arg1, arg2; + int elt; + rtx op0, op1; + + arg0 = CALL_EXPR_ARG (exp, 0); + arg1 = CALL_EXPR_ARG (exp, 1); + arg2 = CALL_EXPR_ARG (exp, 2); + + tmode = TYPE_MODE (TREE_TYPE (arg0)); + mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0))); + gcc_assert (VECTOR_MODE_P (tmode)); + + op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL); + op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL); + elt = get_element_number (TREE_TYPE (arg0), arg2); + + if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode) + op1 = convert_modes (mode1, GET_MODE (op1), op1, true); + + op0 = force_reg (tmode, op0); + op1 = force_reg (mode1, op1); + + rs6000_expand_vector_set (op0, op1, GEN_INT (elt)); + + return op0; +} + +/* Expand vec_ext builtin. */ +static rtx +altivec_expand_vec_ext_builtin (tree exp, rtx target) +{ + machine_mode tmode, mode0; + tree arg0, arg1; + rtx op0; + rtx op1; + + arg0 = CALL_EXPR_ARG (exp, 0); + arg1 = CALL_EXPR_ARG (exp, 1); + + op0 = expand_normal (arg0); + op1 = expand_normal (arg1); + + if (TREE_CODE (arg1) == INTEGER_CST) + { + unsigned HOST_WIDE_INT elt; + unsigned HOST_WIDE_INT size = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)); + unsigned int truncated_selector; + /* Even if !tree_fits_uhwi_p (arg1)), TREE_INT_CST_LOW (arg0) + returns low-order bits of INTEGER_CST for modulo indexing. */ + elt = TREE_INT_CST_LOW (arg1); + truncated_selector = elt % size; + op1 = GEN_INT (truncated_selector); + } + + tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0))); + mode0 = TYPE_MODE (TREE_TYPE (arg0)); + gcc_assert (VECTOR_MODE_P (mode0)); + + op0 = force_reg (mode0, op0); + + if (optimize || !target || !register_operand (target, tmode)) + target = gen_reg_rtx (tmode); + + rs6000_expand_vector_extract (target, op0, op1); + + return target; +} + +/* Expand vec_sel builtin. */ +static rtx +altivec_expand_vec_sel_builtin (enum insn_code icode, tree exp, rtx target) +{ + rtx op0, op1, op2, pat; + tree arg0, arg1, arg2; + + arg0 = CALL_EXPR_ARG (exp, 0); + op0 = expand_normal (arg0); + arg1 = CALL_EXPR_ARG (exp, 1); + op1 = expand_normal (arg1); + arg2 = CALL_EXPR_ARG (exp, 2); + op2 = expand_normal (arg2); + + machine_mode tmode = insn_data[icode].operand[0].mode; + machine_mode mode0 = insn_data[icode].operand[1].mode; + machine_mode mode1 = insn_data[icode].operand[2].mode; + machine_mode mode2 = insn_data[icode].operand[3].mode; + + if (target == 0 || GET_MODE (target) != tmode + || !(*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + if (!(*insn_data[icode].operand[1].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (mode0, op0); + if (!(*insn_data[icode].operand[2].predicate) (op1, mode1)) + op1 = copy_to_mode_reg (mode1, op1); + if (!(*insn_data[icode].operand[3].predicate) (op2, mode2)) + op2 = copy_to_mode_reg (mode2, op2); + + pat = GEN_FCN (icode) (target, op0, op1, op2, op2); + if (pat) + emit_insn (pat); + else + return NULL_RTX; + + return target; +} + +/* Expand the builtin in EXP and store the result in TARGET. Store + true in *EXPANDEDP if we found a builtin to expand. */ +static rtx +altivec_expand_builtin (tree exp, rtx target, bool *expandedp) +{ + const struct builtin_description *d; + size_t i; + enum insn_code icode; + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); + tree arg0, arg1, arg2; + rtx op0, pat; + machine_mode tmode, mode0; + enum rs6000_builtins fcode + = (enum rs6000_builtins) DECL_MD_FUNCTION_CODE (fndecl); + + if (rs6000_overloaded_builtin_p (fcode)) + { + *expandedp = true; + error ("unresolved overload for Altivec builtin %qF", fndecl); + + /* Given it is invalid, just generate a normal call. */ + return expand_call (exp, target, false); + } + + target = altivec_expand_dst_builtin (exp, target, expandedp); + if (*expandedp) + return target; + + *expandedp = true; + + switch (fcode) + { + case ALTIVEC_BUILTIN_STVX_V2DF: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp); + case ALTIVEC_BUILTIN_STVX_V2DI: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp); + case ALTIVEC_BUILTIN_STVX_V4SF: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp); + case ALTIVEC_BUILTIN_STVX: + case ALTIVEC_BUILTIN_STVX_V4SI: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp); + case ALTIVEC_BUILTIN_STVX_V8HI: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp); + case ALTIVEC_BUILTIN_STVX_V16QI: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp); + case ALTIVEC_BUILTIN_STVEBX: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp); + case ALTIVEC_BUILTIN_STVEHX: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp); + case ALTIVEC_BUILTIN_STVEWX: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp); + + case P10_BUILTIN_TR_STXVRBX: + return altivec_expand_stv_builtin (CODE_FOR_vsx_stxvrbx, exp); + case P10_BUILTIN_TR_STXVRHX: + return altivec_expand_stv_builtin (CODE_FOR_vsx_stxvrhx, exp); + case P10_BUILTIN_TR_STXVRWX: + return altivec_expand_stv_builtin (CODE_FOR_vsx_stxvrwx, exp); + case P10_BUILTIN_TR_STXVRDX: + return altivec_expand_stv_builtin (CODE_FOR_vsx_stxvrdx, exp); + + case ALTIVEC_BUILTIN_STVXL_V2DF: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp); + case ALTIVEC_BUILTIN_STVXL_V2DI: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp); + case ALTIVEC_BUILTIN_STVXL_V4SF: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp); + case ALTIVEC_BUILTIN_STVXL: + case ALTIVEC_BUILTIN_STVXL_V4SI: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp); + case ALTIVEC_BUILTIN_STVXL_V8HI: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp); + case ALTIVEC_BUILTIN_STVXL_V16QI: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp); + + case ALTIVEC_BUILTIN_STVLX: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp); + case ALTIVEC_BUILTIN_STVLXL: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp); + case ALTIVEC_BUILTIN_STVRX: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp); + case ALTIVEC_BUILTIN_STVRXL: + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp); + + case P9V_BUILTIN_STXVL: + return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp); + + case P9V_BUILTIN_XST_LEN_R: + return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp); + + case VSX_BUILTIN_STXVD2X_V1TI: + return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp); + case VSX_BUILTIN_STXVD2X_V2DF: + return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp); + case VSX_BUILTIN_STXVD2X_V2DI: + return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp); + case VSX_BUILTIN_STXVW4X_V4SF: + return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp); + case VSX_BUILTIN_STXVW4X_V4SI: + return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp); + case VSX_BUILTIN_STXVW4X_V8HI: + return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp); + case VSX_BUILTIN_STXVW4X_V16QI: + return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp); + + /* For the following on big endian, it's ok to use any appropriate + unaligned-supporting store, so use a generic expander. For + little-endian, the exact element-reversing instruction must + be used. */ + case VSX_BUILTIN_ST_ELEMREV_V1TI: + { + enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti + : CODE_FOR_vsx_st_elemrev_v1ti); + return altivec_expand_stv_builtin (code, exp); + } + case VSX_BUILTIN_ST_ELEMREV_V2DF: + { + enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df + : CODE_FOR_vsx_st_elemrev_v2df); + return altivec_expand_stv_builtin (code, exp); + } + case VSX_BUILTIN_ST_ELEMREV_V2DI: + { + enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di + : CODE_FOR_vsx_st_elemrev_v2di); + return altivec_expand_stv_builtin (code, exp); + } + case VSX_BUILTIN_ST_ELEMREV_V4SF: + { + enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf + : CODE_FOR_vsx_st_elemrev_v4sf); + return altivec_expand_stv_builtin (code, exp); + } + case VSX_BUILTIN_ST_ELEMREV_V4SI: + { + enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si + : CODE_FOR_vsx_st_elemrev_v4si); + return altivec_expand_stv_builtin (code, exp); + } + case VSX_BUILTIN_ST_ELEMREV_V8HI: + { + enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi + : CODE_FOR_vsx_st_elemrev_v8hi); + return altivec_expand_stv_builtin (code, exp); + } + case VSX_BUILTIN_ST_ELEMREV_V16QI: + { + enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi + : CODE_FOR_vsx_st_elemrev_v16qi); + return altivec_expand_stv_builtin (code, exp); + } + + case ALTIVEC_BUILTIN_MFVSCR: + icode = CODE_FOR_altivec_mfvscr; + tmode = insn_data[icode].operand[0].mode; + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + pat = GEN_FCN (icode) (target); + if (! pat) + return 0; + emit_insn (pat); + return target; + + case ALTIVEC_BUILTIN_MTVSCR: + icode = CODE_FOR_altivec_mtvscr; + arg0 = CALL_EXPR_ARG (exp, 0); + op0 = expand_normal (arg0); + mode0 = insn_data[icode].operand[0].mode; + + /* If we got invalid arguments bail out before generating bad rtl. */ + if (arg0 == error_mark_node) + return const0_rtx; + + if (! (*insn_data[icode].operand[0].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (mode0, op0); + + pat = GEN_FCN (icode) (op0); + if (pat) + emit_insn (pat); + return NULL_RTX; + + case ALTIVEC_BUILTIN_VSEL_2DF: + return altivec_expand_vec_sel_builtin (CODE_FOR_altivec_vselv2df, exp, + target); + case ALTIVEC_BUILTIN_VSEL_2DI: + case ALTIVEC_BUILTIN_VSEL_2DI_UNS: + return altivec_expand_vec_sel_builtin (CODE_FOR_altivec_vselv2di, exp, + target); + case ALTIVEC_BUILTIN_VSEL_4SF: + return altivec_expand_vec_sel_builtin (CODE_FOR_altivec_vselv4sf, exp, + target); + case ALTIVEC_BUILTIN_VSEL_4SI: + case ALTIVEC_BUILTIN_VSEL_4SI_UNS: + return altivec_expand_vec_sel_builtin (CODE_FOR_altivec_vselv4si, exp, + target); + case ALTIVEC_BUILTIN_VSEL_8HI: + case ALTIVEC_BUILTIN_VSEL_8HI_UNS: + return altivec_expand_vec_sel_builtin (CODE_FOR_altivec_vselv8hi, exp, + target); + case ALTIVEC_BUILTIN_VSEL_16QI: + case ALTIVEC_BUILTIN_VSEL_16QI_UNS: + return altivec_expand_vec_sel_builtin (CODE_FOR_altivec_vselv16qi, exp, + target); + + case ALTIVEC_BUILTIN_DSSALL: + emit_insn (gen_altivec_dssall ()); + return NULL_RTX; + + case ALTIVEC_BUILTIN_DSS: + icode = CODE_FOR_altivec_dss; + arg0 = CALL_EXPR_ARG (exp, 0); + STRIP_NOPS (arg0); + op0 = expand_normal (arg0); + mode0 = insn_data[icode].operand[0].mode; + + /* If we got invalid arguments bail out before generating bad rtl. */ + if (arg0 == error_mark_node) + return const0_rtx; + + if (TREE_CODE (arg0) != INTEGER_CST + || TREE_INT_CST_LOW (arg0) & ~0x3) + { + error ("argument to %qs must be a 2-bit unsigned literal", "dss"); + return const0_rtx; + } + + if (! (*insn_data[icode].operand[0].predicate) (op0, mode0)) + op0 = copy_to_mode_reg (mode0, op0); + + emit_insn (gen_altivec_dss (op0)); + return NULL_RTX; + + case ALTIVEC_BUILTIN_VEC_INIT_V4SI: + case ALTIVEC_BUILTIN_VEC_INIT_V8HI: + case ALTIVEC_BUILTIN_VEC_INIT_V16QI: + case ALTIVEC_BUILTIN_VEC_INIT_V4SF: + case VSX_BUILTIN_VEC_INIT_V2DF: + case VSX_BUILTIN_VEC_INIT_V2DI: + case VSX_BUILTIN_VEC_INIT_V1TI: + return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target); + + case ALTIVEC_BUILTIN_VEC_SET_V4SI: + case ALTIVEC_BUILTIN_VEC_SET_V8HI: + case ALTIVEC_BUILTIN_VEC_SET_V16QI: + case ALTIVEC_BUILTIN_VEC_SET_V4SF: + case VSX_BUILTIN_VEC_SET_V2DF: + case VSX_BUILTIN_VEC_SET_V2DI: + case VSX_BUILTIN_VEC_SET_V1TI: + return altivec_expand_vec_set_builtin (exp); + + case ALTIVEC_BUILTIN_VEC_EXT_V4SI: + case ALTIVEC_BUILTIN_VEC_EXT_V8HI: + case ALTIVEC_BUILTIN_VEC_EXT_V16QI: + case ALTIVEC_BUILTIN_VEC_EXT_V4SF: + case VSX_BUILTIN_VEC_EXT_V2DF: + case VSX_BUILTIN_VEC_EXT_V2DI: + case VSX_BUILTIN_VEC_EXT_V1TI: + return altivec_expand_vec_ext_builtin (exp, target); + + case P9V_BUILTIN_VEC_EXTRACT4B: + arg1 = CALL_EXPR_ARG (exp, 1); + STRIP_NOPS (arg1); + + /* Generate a normal call if it is invalid. */ + if (arg1 == error_mark_node) + return expand_call (exp, target, false); + + if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12) + { + error ("second argument to %qs must be [0, 12]", "vec_vextract4b"); + return expand_call (exp, target, false); + } + break; + + case P9V_BUILTIN_VEC_INSERT4B: + arg2 = CALL_EXPR_ARG (exp, 2); + STRIP_NOPS (arg2); + + /* Generate a normal call if it is invalid. */ + if (arg2 == error_mark_node) + return expand_call (exp, target, false); + + if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12) + { + error ("third argument to %qs must be [0, 12]", "vec_vinsert4b"); + return expand_call (exp, target, false); + } + break; + + case P10_BUILTIN_VEC_XXGENPCVM: + arg1 = CALL_EXPR_ARG (exp, 1); + STRIP_NOPS (arg1); + + /* Generate a normal call if it is invalid. */ + if (arg1 == error_mark_node) + return expand_call (exp, target, false); + + if (TREE_CODE (arg1) != INTEGER_CST + || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 3)) + { + size_t uns_fcode = (size_t) fcode; + const char *name = rs6000_builtin_info[uns_fcode].name; + error ("Second argument of %qs must be in the range [0, 3].", name); + return expand_call (exp, target, false); + } + break; + + default: + break; + /* Fall through. */ + } + + /* Expand abs* operations. */ + d = bdesc_abs; + for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++) + if (d->code == fcode) + return altivec_expand_abs_builtin (d->icode, exp, target); + + /* Expand the AltiVec predicates. */ + d = bdesc_altivec_preds; + for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++) + if (d->code == fcode) + return altivec_expand_predicate_builtin (d->icode, exp, target); + + /* LV* are funky. We initialized them differently. */ + switch (fcode) + { + case ALTIVEC_BUILTIN_LVSL: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl, + exp, target, false); + case ALTIVEC_BUILTIN_LVSR: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr, + exp, target, false); + case ALTIVEC_BUILTIN_LVEBX: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx, + exp, target, false); + case ALTIVEC_BUILTIN_LVEHX: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx, + exp, target, false); + case ALTIVEC_BUILTIN_LVEWX: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx, + exp, target, false); + case P10_BUILTIN_SE_LXVRBX: + return altivec_expand_lxvr_builtin (CODE_FOR_vsx_lxvrbx, + exp, target, false, true); + case P10_BUILTIN_SE_LXVRHX: + return altivec_expand_lxvr_builtin (CODE_FOR_vsx_lxvrhx, + exp, target, false, true); + case P10_BUILTIN_SE_LXVRWX: + return altivec_expand_lxvr_builtin (CODE_FOR_vsx_lxvrwx, + exp, target, false, true); + case P10_BUILTIN_SE_LXVRDX: + return altivec_expand_lxvr_builtin (CODE_FOR_vsx_lxvrdx, + exp, target, false, true); + case P10_BUILTIN_ZE_LXVRBX: + return altivec_expand_lxvr_builtin (CODE_FOR_vsx_lxvrbx, + exp, target, false, false); + case P10_BUILTIN_ZE_LXVRHX: + return altivec_expand_lxvr_builtin (CODE_FOR_vsx_lxvrhx, + exp, target, false, false); + case P10_BUILTIN_ZE_LXVRWX: + return altivec_expand_lxvr_builtin (CODE_FOR_vsx_lxvrwx, + exp, target, false, false); + case P10_BUILTIN_ZE_LXVRDX: + return altivec_expand_lxvr_builtin (CODE_FOR_vsx_lxvrdx, + exp, target, false, false); + case ALTIVEC_BUILTIN_LVXL_V2DF: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df, + exp, target, false); + case ALTIVEC_BUILTIN_LVXL_V2DI: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di, + exp, target, false); + case ALTIVEC_BUILTIN_LVXL_V4SF: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf, + exp, target, false); + case ALTIVEC_BUILTIN_LVXL: + case ALTIVEC_BUILTIN_LVXL_V4SI: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si, + exp, target, false); + case ALTIVEC_BUILTIN_LVXL_V8HI: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi, + exp, target, false); + case ALTIVEC_BUILTIN_LVXL_V16QI: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi, + exp, target, false); + case ALTIVEC_BUILTIN_LVX_V1TI: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti, + exp, target, false); + case ALTIVEC_BUILTIN_LVX_V2DF: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df, + exp, target, false); + case ALTIVEC_BUILTIN_LVX_V2DI: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di, + exp, target, false); + case ALTIVEC_BUILTIN_LVX_V4SF: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf, + exp, target, false); + case ALTIVEC_BUILTIN_LVX: + case ALTIVEC_BUILTIN_LVX_V4SI: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si, + exp, target, false); + case ALTIVEC_BUILTIN_LVX_V8HI: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi, + exp, target, false); + case ALTIVEC_BUILTIN_LVX_V16QI: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi, + exp, target, false); + case ALTIVEC_BUILTIN_LVLX: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx, + exp, target, true); + case ALTIVEC_BUILTIN_LVLXL: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl, + exp, target, true); + case ALTIVEC_BUILTIN_LVRX: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx, + exp, target, true); + case ALTIVEC_BUILTIN_LVRXL: + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl, + exp, target, true); + case VSX_BUILTIN_LXVD2X_V1TI: + return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti, + exp, target, false); + case VSX_BUILTIN_LXVD2X_V2DF: + return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df, + exp, target, false); + case VSX_BUILTIN_LXVD2X_V2DI: + return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di, + exp, target, false); + case VSX_BUILTIN_LXVW4X_V4SF: + return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf, + exp, target, false); + case VSX_BUILTIN_LXVW4X_V4SI: + return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si, + exp, target, false); + case VSX_BUILTIN_LXVW4X_V8HI: + return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi, + exp, target, false); + case VSX_BUILTIN_LXVW4X_V16QI: + return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi, + exp, target, false); + /* For the following on big endian, it's ok to use any appropriate + unaligned-supporting load, so use a generic expander. For + little-endian, the exact element-reversing instruction must + be used. */ + case VSX_BUILTIN_LD_ELEMREV_V2DF: + { + enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df + : CODE_FOR_vsx_ld_elemrev_v2df); + return altivec_expand_lv_builtin (code, exp, target, false); + } + case VSX_BUILTIN_LD_ELEMREV_V1TI: + { + enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti + : CODE_FOR_vsx_ld_elemrev_v1ti); + return altivec_expand_lv_builtin (code, exp, target, false); + } + case VSX_BUILTIN_LD_ELEMREV_V2DI: + { + enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di + : CODE_FOR_vsx_ld_elemrev_v2di); + return altivec_expand_lv_builtin (code, exp, target, false); + } + case VSX_BUILTIN_LD_ELEMREV_V4SF: + { + enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf + : CODE_FOR_vsx_ld_elemrev_v4sf); + return altivec_expand_lv_builtin (code, exp, target, false); + } + case VSX_BUILTIN_LD_ELEMREV_V4SI: + { + enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si + : CODE_FOR_vsx_ld_elemrev_v4si); + return altivec_expand_lv_builtin (code, exp, target, false); + } + case VSX_BUILTIN_LD_ELEMREV_V8HI: + { + enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi + : CODE_FOR_vsx_ld_elemrev_v8hi); + return altivec_expand_lv_builtin (code, exp, target, false); + } + case VSX_BUILTIN_LD_ELEMREV_V16QI: + { + enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi + : CODE_FOR_vsx_ld_elemrev_v16qi); + return altivec_expand_lv_builtin (code, exp, target, false); + } + break; + default: + break; + /* Fall through. */ + } + + *expandedp = false; + return NULL_RTX; +} + +/* Check whether a builtin function is supported in this target + configuration. */ +bool +rs6000_builtin_is_supported_p (enum rs6000_builtins fncode) +{ + HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask; + if ((fnmask & rs6000_builtin_mask) != fnmask) + return false; + else + return true; +} + +/* Raise an error message for a builtin function that is called without the + appropriate target options being set. */ + +static void +rs6000_invalid_builtin (enum rs6000_builtins fncode) +{ + size_t uns_fncode = (size_t) fncode; + const char *name = rs6000_builtin_info[uns_fncode].name; + HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask; + + gcc_assert (name != NULL); + if ((fnmask & RS6000_BTM_CELL) != 0) + error ("%qs is only valid for the cell processor", name); + else if ((fnmask & RS6000_BTM_VSX) != 0) + error ("%qs requires the %qs option", name, "-mvsx"); + else if ((fnmask & RS6000_BTM_HTM) != 0) + error ("%qs requires the %qs option", name, "-mhtm"); + else if ((fnmask & RS6000_BTM_ALTIVEC) != 0) + error ("%qs requires the %qs option", name, "-maltivec"); + else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR)) + == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR)) + error ("%qs requires the %qs and %qs options", name, "-mhard-dfp", + "-mpower8-vector"); + else if ((fnmask & RS6000_BTM_DFP) != 0) + error ("%qs requires the %qs option", name, "-mhard-dfp"); + else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0) + error ("%qs requires the %qs option", name, "-mpower8-vector"); + else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT)) + == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT)) + error ("%qs requires the %qs and %qs options", name, "-mcpu=power9", + "-m64"); + else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0) + error ("%qs requires the %qs option", name, "-mcpu=power9"); + else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT)) + == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT)) + error ("%qs requires the %qs and %qs options", name, "-mcpu=power9", + "-m64"); + else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC) + error ("%qs requires the %qs option", name, "-mcpu=power9"); + else if ((fnmask & RS6000_BTM_P10) != 0) + error ("%qs requires the %qs option", name, "-mcpu=power10"); + else if ((fnmask & RS6000_BTM_MMA) != 0) + error ("%qs requires the %qs option", name, "-mmma"); + else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128) + { + if (!TARGET_HARD_FLOAT) + error ("%qs requires the %qs option", name, "-mhard-float"); + else + error ("%qs requires the %qs option", name, + TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128"); + } + else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0) + error ("%qs requires the %qs option", name, "-mhard-float"); + else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0) + error ("%qs requires ISA 3.0 IEEE 128-bit floating point", name); + else if ((fnmask & RS6000_BTM_FLOAT128) != 0) + error ("%qs requires the %qs option", name, "%<-mfloat128%>"); + else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64)) + == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64)) + error ("%qs requires the %qs (or newer), and %qs or %qs options", + name, "-mcpu=power7", "-m64", "-mpowerpc64"); + else + error ("%qs is not supported with the current options", name); +} + +/* Raise an error message for a builtin function that is called without the + appropriate target options being set. */ + +static void +rs6000_invalid_new_builtin (enum rs6000_gen_builtins fncode) +{ + size_t j = (size_t) fncode; + const char *name = rs6000_builtin_info_x[j].bifname; + + switch (rs6000_builtin_info_x[j].enable) + { + case ENB_P5: + error ("%qs requires the %qs option", name, "-mcpu=power5"); + break; + case ENB_P6: + error ("%qs requires the %qs option", name, "-mcpu=power6"); + break; + case ENB_ALTIVEC: + error ("%qs requires the %qs option", name, "-maltivec"); + break; + case ENB_CELL: + error ("%qs requires the %qs option", name, "-mcpu=cell"); + break; + case ENB_VSX: + error ("%qs requires the %qs option", name, "-mvsx"); + break; + case ENB_P7: + error ("%qs requires the %qs option", name, "-mcpu=power7"); + break; + case ENB_P7_64: + error ("%qs requires the %qs option and either the %qs or %qs option", + name, "-mcpu=power7", "-m64", "-mpowerpc64"); + break; + case ENB_P8: + error ("%qs requires the %qs option", name, "-mcpu=power8"); + break; + case ENB_P8V: + error ("%qs requires the %qs option", name, "-mpower8-vector"); + break; + case ENB_P9: + error ("%qs requires the %qs option", name, "-mcpu=power9"); + break; + case ENB_P9_64: + error ("%qs requires the %qs option and either the %qs or %qs option", + name, "-mcpu=power9", "-m64", "-mpowerpc64"); + break; + case ENB_P9V: + error ("%qs requires the %qs option", name, "-mpower9-vector"); + break; + case ENB_IEEE128_HW: + error ("%qs requires ISA 3.0 IEEE 128-bit floating point", name); + break; + case ENB_DFP: + error ("%qs requires the %qs option", name, "-mhard-dfp"); + break; + case ENB_CRYPTO: + error ("%qs requires the %qs option", name, "-mcrypto"); + break; + case ENB_HTM: + error ("%qs requires the %qs option", name, "-mhtm"); + break; + case ENB_P10: + error ("%qs requires the %qs option", name, "-mcpu=power10"); + break; + case ENB_P10_64: + error ("%qs requires the %qs option and either the %qs or %qs option", + name, "-mcpu=power10", "-m64", "-mpowerpc64"); + break; + case ENB_MMA: + error ("%qs requires the %qs option", name, "-mmma"); + break; + default: + case ENB_ALWAYS: + gcc_unreachable (); + } +} + +/* Target hook for early folding of built-ins, shamelessly stolen + from ia64.c. */ + +tree +rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED, + int n_args ATTRIBUTE_UNUSED, + tree *args ATTRIBUTE_UNUSED, + bool ignore ATTRIBUTE_UNUSED) +{ +#ifdef SUBTARGET_FOLD_BUILTIN + return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore); +#else + return NULL_TREE; +#endif +} + +/* Helper function to sort out which built-ins may be valid without having + a LHS. */ +static bool +rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code) +{ + /* Check for built-ins explicitly marked as a void function. */ + if (rs6000_builtin_info[fn_code].attr & RS6000_BTC_VOID) + return true; + + switch (fn_code) + { + case ALTIVEC_BUILTIN_STVX_V16QI: + case ALTIVEC_BUILTIN_STVX_V8HI: + case ALTIVEC_BUILTIN_STVX_V4SI: + case ALTIVEC_BUILTIN_STVX_V4SF: + case ALTIVEC_BUILTIN_STVX_V2DI: + case ALTIVEC_BUILTIN_STVX_V2DF: + case VSX_BUILTIN_STXVW4X_V16QI: + case VSX_BUILTIN_STXVW4X_V8HI: + case VSX_BUILTIN_STXVW4X_V4SF: + case VSX_BUILTIN_STXVW4X_V4SI: + case VSX_BUILTIN_STXVD2X_V2DF: + case VSX_BUILTIN_STXVD2X_V2DI: + return true; + default: + return false; + } +} + +/* Helper function to handle the gimple folding of a vector compare + operation. This sets up true/false vectors, and uses the + VEC_COND_EXPR operation. + CODE indicates which comparison is to be made. (EQ, GT, ...). + TYPE indicates the type of the result. + Code is inserted before GSI. */ +static tree +fold_build_vec_cmp (tree_code code, tree type, tree arg0, tree arg1, + gimple_stmt_iterator *gsi) +{ + tree cmp_type = truth_type_for (type); + tree zero_vec = build_zero_cst (type); + tree minus_one_vec = build_minus_one_cst (type); + tree temp = create_tmp_reg_or_ssa_name (cmp_type); + gimple *g = gimple_build_assign (temp, code, arg0, arg1); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + return fold_build3 (VEC_COND_EXPR, type, temp, minus_one_vec, zero_vec); +} + +/* Helper function to handle the in-between steps for the + vector compare built-ins. */ +static void +fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt) +{ + tree arg0 = gimple_call_arg (stmt, 0); + tree arg1 = gimple_call_arg (stmt, 1); + tree lhs = gimple_call_lhs (stmt); + tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1, gsi); + gimple *g = gimple_build_assign (lhs, cmp); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); +} + +/* Helper function to map V2DF and V4SF types to their + integral equivalents (V2DI and V4SI). */ +tree map_to_integral_tree_type (tree input_tree_type) +{ + if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type))) + return input_tree_type; + else + { + if (types_compatible_p (TREE_TYPE (input_tree_type), + TREE_TYPE (V2DF_type_node))) + return V2DI_type_node; + else if (types_compatible_p (TREE_TYPE (input_tree_type), + TREE_TYPE (V4SF_type_node))) + return V4SI_type_node; + else + gcc_unreachable (); + } +} + +/* Helper function to handle the vector merge[hl] built-ins. The + implementation difference between h and l versions for this code are in + the values used when building of the permute vector for high word versus + low word merge. The variance is keyed off the use_high parameter. */ +static void +fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high) +{ + tree arg0 = gimple_call_arg (stmt, 0); + tree arg1 = gimple_call_arg (stmt, 1); + tree lhs = gimple_call_lhs (stmt); + tree lhs_type = TREE_TYPE (lhs); + int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type); + int midpoint = n_elts / 2; + int offset = 0; + + if (use_high == 1) + offset = midpoint; + + /* The permute_type will match the lhs for integral types. For double and + float types, the permute type needs to map to the V2 or V4 type that + matches size. */ + tree permute_type; + permute_type = map_to_integral_tree_type (lhs_type); + tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1); + + for (int i = 0; i < midpoint; i++) + { + elts.safe_push (build_int_cst (TREE_TYPE (permute_type), + offset + i)); + elts.safe_push (build_int_cst (TREE_TYPE (permute_type), + offset + n_elts + i)); + } + + tree permute = elts.build (); + + gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); +} + +/* Helper function to handle the vector merge[eo] built-ins. */ +static void +fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd) +{ + tree arg0 = gimple_call_arg (stmt, 0); + tree arg1 = gimple_call_arg (stmt, 1); + tree lhs = gimple_call_lhs (stmt); + tree lhs_type = TREE_TYPE (lhs); + int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type); + + /* The permute_type will match the lhs for integral types. For double and + float types, the permute type needs to map to the V2 or V4 type that + matches size. */ + tree permute_type; + permute_type = map_to_integral_tree_type (lhs_type); + + tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1); + + /* Build the permute vector. */ + for (int i = 0; i < n_elts / 2; i++) + { + elts.safe_push (build_int_cst (TREE_TYPE (permute_type), + 2*i + use_odd)); + elts.safe_push (build_int_cst (TREE_TYPE (permute_type), + 2*i + use_odd + n_elts)); + } + + tree permute = elts.build (); + + gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); +} + +/* Expand the MMA built-ins early, so that we can convert the pass-by-reference + __vector_quad arguments into pass-by-value arguments, leading to more + efficient code generation. */ + +bool +rs6000_gimple_fold_mma_builtin (gimple_stmt_iterator *gsi) +{ + gimple *stmt = gsi_stmt (*gsi); + tree fndecl = gimple_call_fndecl (stmt); + enum rs6000_builtins fncode + = (enum rs6000_builtins) DECL_MD_FUNCTION_CODE (fndecl); + unsigned attr = rs6000_builtin_info[fncode].attr; + + if ((attr & RS6000_BTC_GIMPLE) == 0) + return false; + + unsigned nopnds = (attr & RS6000_BTC_OPND_MASK); + gimple_seq new_seq = NULL; + gimple *new_call; + tree new_decl; + + if (fncode == MMA_BUILTIN_DISASSEMBLE_ACC + || fncode == VSX_BUILTIN_DISASSEMBLE_PAIR) + { + /* This is an MMA disassemble built-in function. */ + push_gimplify_context (true); + unsigned nvec = (fncode == MMA_BUILTIN_DISASSEMBLE_ACC) ? 4 : 2; + tree dst_ptr = gimple_call_arg (stmt, 0); + tree src_ptr = gimple_call_arg (stmt, 1); + tree src_type = TREE_TYPE (src_ptr); + tree src = create_tmp_reg_or_ssa_name (TREE_TYPE (src_type)); + gimplify_assign (src, build_simple_mem_ref (src_ptr), &new_seq); + + /* If we are not disassembling an accumulator/pair or our destination is + another accumulator/pair, then just copy the entire thing as is. */ + if ((fncode == MMA_BUILTIN_DISASSEMBLE_ACC + && TREE_TYPE (TREE_TYPE (dst_ptr)) == vector_quad_type_node) + || (fncode == VSX_BUILTIN_DISASSEMBLE_PAIR + && TREE_TYPE (TREE_TYPE (dst_ptr)) == vector_pair_type_node)) + { + tree dst = build_simple_mem_ref (build1 (VIEW_CONVERT_EXPR, + src_type, dst_ptr)); + gimplify_assign (dst, src, &new_seq); + pop_gimplify_context (NULL); + gsi_replace_with_seq (gsi, new_seq, true); + return true; + } + + /* If we're disassembling an accumulator into a different type, we need + to emit a xxmfacc instruction now, since we cannot do it later. */ + if (fncode == MMA_BUILTIN_DISASSEMBLE_ACC) + { + new_decl = rs6000_builtin_decls[MMA_BUILTIN_XXMFACC_INTERNAL]; + new_call = gimple_build_call (new_decl, 1, src); + src = create_tmp_reg_or_ssa_name (vector_quad_type_node); + gimple_call_set_lhs (new_call, src); + gimple_seq_add_stmt (&new_seq, new_call); + } + + /* Copy the accumulator/pair vector by vector. */ + new_decl = rs6000_builtin_decls[fncode + 1]; + tree dst_type = build_pointer_type_for_mode (unsigned_V16QI_type_node, + ptr_mode, true); + tree dst_base = build1 (VIEW_CONVERT_EXPR, dst_type, dst_ptr); + for (unsigned i = 0; i < nvec; i++) + { + unsigned index = WORDS_BIG_ENDIAN ? i : nvec - 1 - i; + tree dst = build2 (MEM_REF, unsigned_V16QI_type_node, dst_base, + build_int_cst (dst_type, index * 16)); + tree dstssa = create_tmp_reg_or_ssa_name (unsigned_V16QI_type_node); + new_call = gimple_build_call (new_decl, 2, src, + build_int_cstu (uint16_type_node, i)); + gimple_call_set_lhs (new_call, dstssa); + gimple_seq_add_stmt (&new_seq, new_call); + gimplify_assign (dst, dstssa, &new_seq); + } + pop_gimplify_context (NULL); + gsi_replace_with_seq (gsi, new_seq, true); + return true; + } + else if (fncode == VSX_BUILTIN_LXVP) + { + push_gimplify_context (true); + tree offset = gimple_call_arg (stmt, 0); + tree ptr = gimple_call_arg (stmt, 1); + tree lhs = gimple_call_lhs (stmt); + if (TREE_TYPE (TREE_TYPE (ptr)) != vector_pair_type_node) + ptr = build1 (VIEW_CONVERT_EXPR, + build_pointer_type (vector_pair_type_node), ptr); + tree mem = build_simple_mem_ref (build2 (POINTER_PLUS_EXPR, + TREE_TYPE (ptr), ptr, offset)); + gimplify_assign (lhs, mem, &new_seq); + pop_gimplify_context (NULL); + gsi_replace_with_seq (gsi, new_seq, true); + return true; + } + else if (fncode == VSX_BUILTIN_STXVP) + { + push_gimplify_context (true); + tree src = gimple_call_arg (stmt, 0); + tree offset = gimple_call_arg (stmt, 1); + tree ptr = gimple_call_arg (stmt, 2); + if (TREE_TYPE (TREE_TYPE (ptr)) != vector_pair_type_node) + ptr = build1 (VIEW_CONVERT_EXPR, + build_pointer_type (vector_pair_type_node), ptr); + tree mem = build_simple_mem_ref (build2 (POINTER_PLUS_EXPR, + TREE_TYPE (ptr), ptr, offset)); + gimplify_assign (mem, src, &new_seq); + pop_gimplify_context (NULL); + gsi_replace_with_seq (gsi, new_seq, true); + return true; + } + + /* Convert this built-in into an internal version that uses pass-by-value + arguments. The internal built-in follows immediately after this one. */ + new_decl = rs6000_builtin_decls[fncode + 1]; + tree lhs, op[MAX_MMA_OPERANDS]; + tree acc = gimple_call_arg (stmt, 0); + push_gimplify_context (true); + + if ((attr & RS6000_BTC_QUAD) != 0) + { + /* This built-in has a pass-by-reference accumulator input, so load it + into a temporary accumulator for use as a pass-by-value input. */ + op[0] = create_tmp_reg_or_ssa_name (vector_quad_type_node); + for (unsigned i = 1; i < nopnds; i++) + op[i] = gimple_call_arg (stmt, i); + gimplify_assign (op[0], build_simple_mem_ref (acc), &new_seq); + } + else + { + /* This built-in does not use its pass-by-reference accumulator argument + as an input argument, so remove it from the input list. */ + nopnds--; + for (unsigned i = 0; i < nopnds; i++) + op[i] = gimple_call_arg (stmt, i + 1); + } + + switch (nopnds) + { + case 0: + new_call = gimple_build_call (new_decl, 0); + break; + case 1: + new_call = gimple_build_call (new_decl, 1, op[0]); + break; + case 2: + new_call = gimple_build_call (new_decl, 2, op[0], op[1]); + break; + case 3: + new_call = gimple_build_call (new_decl, 3, op[0], op[1], op[2]); + break; + case 4: + new_call = gimple_build_call (new_decl, 4, op[0], op[1], op[2], op[3]); + break; + case 5: + new_call = gimple_build_call (new_decl, 5, op[0], op[1], op[2], op[3], + op[4]); + break; + case 6: + new_call = gimple_build_call (new_decl, 6, op[0], op[1], op[2], op[3], + op[4], op[5]); + break; + case 7: + new_call = gimple_build_call (new_decl, 7, op[0], op[1], op[2], op[3], + op[4], op[5], op[6]); + break; + default: + gcc_unreachable (); + } + + if (fncode == VSX_BUILTIN_BUILD_PAIR || fncode == VSX_BUILTIN_ASSEMBLE_PAIR) + lhs = create_tmp_reg_or_ssa_name (vector_pair_type_node); + else + lhs = create_tmp_reg_or_ssa_name (vector_quad_type_node); + gimple_call_set_lhs (new_call, lhs); + gimple_seq_add_stmt (&new_seq, new_call); + gimplify_assign (build_simple_mem_ref (acc), lhs, &new_seq); + pop_gimplify_context (NULL); + gsi_replace_with_seq (gsi, new_seq, true); + + return true; +} + +/* Fold a machine-dependent built-in in GIMPLE. (For folding into + a constant, use rs6000_fold_builtin.) */ + +bool +rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi) +{ + if (new_builtins_are_live) + return rs6000_gimple_fold_new_builtin (gsi); + + gimple *stmt = gsi_stmt (*gsi); + tree fndecl = gimple_call_fndecl (stmt); + gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD); + enum rs6000_builtins fn_code + = (enum rs6000_builtins) DECL_MD_FUNCTION_CODE (fndecl); + tree arg0, arg1, lhs, temp; + enum tree_code bcode; + gimple *g; + + size_t uns_fncode = (size_t) fn_code; + enum insn_code icode = rs6000_builtin_info[uns_fncode].icode; + const char *fn_name1 = rs6000_builtin_info[uns_fncode].name; + const char *fn_name2 = (icode != CODE_FOR_nothing) + ? get_insn_name ((int) icode) + : "nothing"; + + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n", + fn_code, fn_name1, fn_name2); + + if (!rs6000_fold_gimple) + return false; + + /* Prevent gimple folding for code that does not have a LHS, unless it is + allowed per the rs6000_builtin_valid_without_lhs helper function. */ + if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code)) + return false; + + /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */ + if (!rs6000_builtin_is_supported_p (fn_code)) + return false; + + if (rs6000_gimple_fold_mma_builtin (gsi)) + return true; + + switch (fn_code) + { + /* Flavors of vec_add. We deliberately don't expand + P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to + TImode, resulting in much poorer code generation. */ + case ALTIVEC_BUILTIN_VADDUBM: + case ALTIVEC_BUILTIN_VADDUHM: + case ALTIVEC_BUILTIN_VADDUWM: + case P8V_BUILTIN_VADDUDM: + case ALTIVEC_BUILTIN_VADDFP: + case VSX_BUILTIN_XVADDDP: + bcode = PLUS_EXPR; + do_binary: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs))) + && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs)))) + { + /* Ensure the binary operation is performed in a type + that wraps if it is integral type. */ + gimple_seq stmts = NULL; + tree type = unsigned_type_for (TREE_TYPE (lhs)); + tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR, + type, arg0); + tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR, + type, arg1); + tree res = gimple_build (&stmts, gimple_location (stmt), bcode, + type, uarg0, uarg1); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR, + build1 (VIEW_CONVERT_EXPR, + TREE_TYPE (lhs), res)); + gsi_replace (gsi, g, true); + return true; + } + g = gimple_build_assign (lhs, bcode, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_sub. We deliberately don't expand + P8V_BUILTIN_VSUBUQM. */ + case ALTIVEC_BUILTIN_VSUBUBM: + case ALTIVEC_BUILTIN_VSUBUHM: + case ALTIVEC_BUILTIN_VSUBUWM: + case P8V_BUILTIN_VSUBUDM: + case ALTIVEC_BUILTIN_VSUBFP: + case VSX_BUILTIN_XVSUBDP: + bcode = MINUS_EXPR; + goto do_binary; + case VSX_BUILTIN_XVMULSP: + case VSX_BUILTIN_XVMULDP: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Even element flavors of vec_mul (signed). */ + case ALTIVEC_BUILTIN_VMULESB: + case ALTIVEC_BUILTIN_VMULESH: + case P8V_BUILTIN_VMULESW: + /* Even element flavors of vec_mul (unsigned). */ + case ALTIVEC_BUILTIN_VMULEUB: + case ALTIVEC_BUILTIN_VMULEUH: + case P8V_BUILTIN_VMULEUW: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Odd element flavors of vec_mul (signed). */ + case ALTIVEC_BUILTIN_VMULOSB: + case ALTIVEC_BUILTIN_VMULOSH: + case P8V_BUILTIN_VMULOSW: + /* Odd element flavors of vec_mul (unsigned). */ + case ALTIVEC_BUILTIN_VMULOUB: + case ALTIVEC_BUILTIN_VMULOUH: + case P8V_BUILTIN_VMULOUW: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_div (Integer). */ + case VSX_BUILTIN_DIV_V2DI: + case VSX_BUILTIN_UDIV_V2DI: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_div (Float). */ + case VSX_BUILTIN_XVDIVSP: + case VSX_BUILTIN_XVDIVDP: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_and. */ + case ALTIVEC_BUILTIN_VAND_V16QI_UNS: + case ALTIVEC_BUILTIN_VAND_V16QI: + case ALTIVEC_BUILTIN_VAND_V8HI_UNS: + case ALTIVEC_BUILTIN_VAND_V8HI: + case ALTIVEC_BUILTIN_VAND_V4SI_UNS: + case ALTIVEC_BUILTIN_VAND_V4SI: + case ALTIVEC_BUILTIN_VAND_V2DI_UNS: + case ALTIVEC_BUILTIN_VAND_V2DI: + case ALTIVEC_BUILTIN_VAND_V4SF: + case ALTIVEC_BUILTIN_VAND_V2DF: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_andc. */ + case ALTIVEC_BUILTIN_VANDC_V16QI_UNS: + case ALTIVEC_BUILTIN_VANDC_V16QI: + case ALTIVEC_BUILTIN_VANDC_V8HI_UNS: + case ALTIVEC_BUILTIN_VANDC_V8HI: + case ALTIVEC_BUILTIN_VANDC_V4SI_UNS: + case ALTIVEC_BUILTIN_VANDC_V4SI: + case ALTIVEC_BUILTIN_VANDC_V2DI_UNS: + case ALTIVEC_BUILTIN_VANDC_V2DI: + case ALTIVEC_BUILTIN_VANDC_V4SF: + case ALTIVEC_BUILTIN_VANDC_V2DF: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1)); + g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_nand. */ + case P8V_BUILTIN_VEC_NAND: + case P8V_BUILTIN_NAND_V16QI_UNS: + case P8V_BUILTIN_NAND_V16QI: + case P8V_BUILTIN_NAND_V8HI_UNS: + case P8V_BUILTIN_NAND_V8HI: + case P8V_BUILTIN_NAND_V4SI_UNS: + case P8V_BUILTIN_NAND_V4SI: + case P8V_BUILTIN_NAND_V2DI_UNS: + case P8V_BUILTIN_NAND_V2DI: + case P8V_BUILTIN_NAND_V4SF: + case P8V_BUILTIN_NAND_V2DF: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1)); + g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_or. */ + case ALTIVEC_BUILTIN_VOR_V16QI_UNS: + case ALTIVEC_BUILTIN_VOR_V16QI: + case ALTIVEC_BUILTIN_VOR_V8HI_UNS: + case ALTIVEC_BUILTIN_VOR_V8HI: + case ALTIVEC_BUILTIN_VOR_V4SI_UNS: + case ALTIVEC_BUILTIN_VOR_V4SI: + case ALTIVEC_BUILTIN_VOR_V2DI_UNS: + case ALTIVEC_BUILTIN_VOR_V2DI: + case ALTIVEC_BUILTIN_VOR_V4SF: + case ALTIVEC_BUILTIN_VOR_V2DF: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* flavors of vec_orc. */ + case P8V_BUILTIN_ORC_V16QI_UNS: + case P8V_BUILTIN_ORC_V16QI: + case P8V_BUILTIN_ORC_V8HI_UNS: + case P8V_BUILTIN_ORC_V8HI: + case P8V_BUILTIN_ORC_V4SI_UNS: + case P8V_BUILTIN_ORC_V4SI: + case P8V_BUILTIN_ORC_V2DI_UNS: + case P8V_BUILTIN_ORC_V2DI: + case P8V_BUILTIN_ORC_V4SF: + case P8V_BUILTIN_ORC_V2DF: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1)); + g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_xor. */ + case ALTIVEC_BUILTIN_VXOR_V16QI_UNS: + case ALTIVEC_BUILTIN_VXOR_V16QI: + case ALTIVEC_BUILTIN_VXOR_V8HI_UNS: + case ALTIVEC_BUILTIN_VXOR_V8HI: + case ALTIVEC_BUILTIN_VXOR_V4SI_UNS: + case ALTIVEC_BUILTIN_VXOR_V4SI: + case ALTIVEC_BUILTIN_VXOR_V2DI_UNS: + case ALTIVEC_BUILTIN_VXOR_V2DI: + case ALTIVEC_BUILTIN_VXOR_V4SF: + case ALTIVEC_BUILTIN_VXOR_V2DF: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_nor. */ + case ALTIVEC_BUILTIN_VNOR_V16QI_UNS: + case ALTIVEC_BUILTIN_VNOR_V16QI: + case ALTIVEC_BUILTIN_VNOR_V8HI_UNS: + case ALTIVEC_BUILTIN_VNOR_V8HI: + case ALTIVEC_BUILTIN_VNOR_V4SI_UNS: + case ALTIVEC_BUILTIN_VNOR_V4SI: + case ALTIVEC_BUILTIN_VNOR_V2DI_UNS: + case ALTIVEC_BUILTIN_VNOR_V2DI: + case ALTIVEC_BUILTIN_VNOR_V4SF: + case ALTIVEC_BUILTIN_VNOR_V2DF: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1)); + g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* flavors of vec_abs. */ + case ALTIVEC_BUILTIN_ABS_V16QI: + case ALTIVEC_BUILTIN_ABS_V8HI: + case ALTIVEC_BUILTIN_ABS_V4SI: + case ALTIVEC_BUILTIN_ABS_V4SF: + case P8V_BUILTIN_ABS_V2DI: + case VSX_BUILTIN_XVABSDP: + arg0 = gimple_call_arg (stmt, 0); + if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0))) + && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0)))) + return false; + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, ABS_EXPR, arg0); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* flavors of vec_min. */ + case VSX_BUILTIN_XVMINDP: + case ALTIVEC_BUILTIN_VMINFP: + { + lhs = gimple_call_lhs (stmt); + tree type = TREE_TYPE (lhs); + if (HONOR_NANS (type)) + return false; + gcc_fallthrough (); + } + case P8V_BUILTIN_VMINSD: + case P8V_BUILTIN_VMINUD: + case ALTIVEC_BUILTIN_VMINSB: + case ALTIVEC_BUILTIN_VMINSH: + case ALTIVEC_BUILTIN_VMINSW: + case ALTIVEC_BUILTIN_VMINUB: + case ALTIVEC_BUILTIN_VMINUH: + case ALTIVEC_BUILTIN_VMINUW: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* flavors of vec_max. */ + case VSX_BUILTIN_XVMAXDP: + case ALTIVEC_BUILTIN_VMAXFP: + { + lhs = gimple_call_lhs (stmt); + tree type = TREE_TYPE (lhs); + if (HONOR_NANS (type)) + return false; + gcc_fallthrough (); + } + case P8V_BUILTIN_VMAXSD: + case P8V_BUILTIN_VMAXUD: + case ALTIVEC_BUILTIN_VMAXSB: + case ALTIVEC_BUILTIN_VMAXSH: + case ALTIVEC_BUILTIN_VMAXSW: + case ALTIVEC_BUILTIN_VMAXUB: + case ALTIVEC_BUILTIN_VMAXUH: + case ALTIVEC_BUILTIN_VMAXUW: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_eqv. */ + case P8V_BUILTIN_EQV_V16QI: + case P8V_BUILTIN_EQV_V8HI: + case P8V_BUILTIN_EQV_V4SI: + case P8V_BUILTIN_EQV_V4SF: + case P8V_BUILTIN_EQV_V2DF: + case P8V_BUILTIN_EQV_V2DI: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1)); + g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_rotate_left. */ + case ALTIVEC_BUILTIN_VRLB: + case ALTIVEC_BUILTIN_VRLH: + case ALTIVEC_BUILTIN_VRLW: + case P8V_BUILTIN_VRLD: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vector shift right algebraic. + vec_sra{b,h,w} -> vsra{b,h,w}. */ + case ALTIVEC_BUILTIN_VSRAB: + case ALTIVEC_BUILTIN_VSRAH: + case ALTIVEC_BUILTIN_VSRAW: + case P8V_BUILTIN_VSRAD: + { + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + tree arg1_type = TREE_TYPE (arg1); + tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1)); + tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type)); + location_t loc = gimple_location (stmt); + /* Force arg1 into the range valid matching the arg0 type. */ + /* Build a vector consisting of the max valid bit-size values. */ + int n_elts = VECTOR_CST_NELTS (arg1); + tree element_size = build_int_cst (unsigned_element_type, + 128 / n_elts); + tree_vector_builder elts (unsigned_arg1_type, n_elts, 1); + for (int i = 0; i < n_elts; i++) + elts.safe_push (element_size); + tree modulo_tree = elts.build (); + /* Modulo the provided shift value against that vector. */ + gimple_seq stmts = NULL; + tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR, + unsigned_arg1_type, arg1); + tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR, + unsigned_arg1_type, unsigned_arg1, + modulo_tree); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + /* And finally, do the shift. */ + g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1); + gimple_set_location (g, loc); + gsi_replace (gsi, g, true); + return true; + } + /* Flavors of vector shift left. + builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */ + case ALTIVEC_BUILTIN_VSLB: + case ALTIVEC_BUILTIN_VSLH: + case ALTIVEC_BUILTIN_VSLW: + case P8V_BUILTIN_VSLD: + { + location_t loc; + gimple_seq stmts = NULL; + arg0 = gimple_call_arg (stmt, 0); + tree arg0_type = TREE_TYPE (arg0); + if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type)) + && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type))) + return false; + arg1 = gimple_call_arg (stmt, 1); + tree arg1_type = TREE_TYPE (arg1); + tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1)); + tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type)); + loc = gimple_location (stmt); + lhs = gimple_call_lhs (stmt); + /* Force arg1 into the range valid matching the arg0 type. */ + /* Build a vector consisting of the max valid bit-size values. */ + int n_elts = VECTOR_CST_NELTS (arg1); + int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type)) + * BITS_PER_UNIT; + tree element_size = build_int_cst (unsigned_element_type, + tree_size_in_bits / n_elts); + tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1); + for (int i = 0; i < n_elts; i++) + elts.safe_push (element_size); + tree modulo_tree = elts.build (); + /* Modulo the provided shift value against that vector. */ + tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR, + unsigned_arg1_type, arg1); + tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR, + unsigned_arg1_type, unsigned_arg1, + modulo_tree); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + /* And finally, do the shift. */ + g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + } + /* Flavors of vector shift right. */ + case ALTIVEC_BUILTIN_VSRB: + case ALTIVEC_BUILTIN_VSRH: + case ALTIVEC_BUILTIN_VSRW: + case P8V_BUILTIN_VSRD: + { + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + tree arg1_type = TREE_TYPE (arg1); + tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1)); + tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type)); + location_t loc = gimple_location (stmt); + gimple_seq stmts = NULL; + /* Convert arg0 to unsigned. */ + tree arg0_unsigned + = gimple_build (&stmts, VIEW_CONVERT_EXPR, + unsigned_type_for (TREE_TYPE (arg0)), arg0); + /* Force arg1 into the range valid matching the arg0 type. */ + /* Build a vector consisting of the max valid bit-size values. */ + int n_elts = VECTOR_CST_NELTS (arg1); + tree element_size = build_int_cst (unsigned_element_type, + 128 / n_elts); + tree_vector_builder elts (unsigned_arg1_type, n_elts, 1); + for (int i = 0; i < n_elts; i++) + elts.safe_push (element_size); + tree modulo_tree = elts.build (); + /* Modulo the provided shift value against that vector. */ + tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR, + unsigned_arg1_type, arg1); + tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR, + unsigned_arg1_type, unsigned_arg1, + modulo_tree); + /* Do the shift. */ + tree res + = gimple_build (&stmts, RSHIFT_EXPR, + TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1); + /* Convert result back to the lhs type. */ + res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + replace_call_with_value (gsi, res); + return true; + } + /* Vector loads. */ + case ALTIVEC_BUILTIN_LVX_V16QI: + case ALTIVEC_BUILTIN_LVX_V8HI: + case ALTIVEC_BUILTIN_LVX_V4SI: + case ALTIVEC_BUILTIN_LVX_V4SF: + case ALTIVEC_BUILTIN_LVX_V2DI: + case ALTIVEC_BUILTIN_LVX_V2DF: + case ALTIVEC_BUILTIN_LVX_V1TI: + { + arg0 = gimple_call_arg (stmt, 0); // offset + arg1 = gimple_call_arg (stmt, 1); // address + lhs = gimple_call_lhs (stmt); + location_t loc = gimple_location (stmt); + /* Since arg1 may be cast to a different type, just use ptr_type_node + here instead of trying to enforce TBAA on pointer types. */ + tree arg1_type = ptr_type_node; + tree lhs_type = TREE_TYPE (lhs); + /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create + the tree using the value from arg0. The resulting type will match + the type of arg1. */ + gimple_seq stmts = NULL; + tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0); + tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR, + arg1_type, arg1, temp_offset); + /* Mask off any lower bits from the address. */ + tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR, + arg1_type, temp_addr, + build_int_cst (arg1_type, -16)); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + if (!is_gimple_mem_ref_addr (aligned_addr)) + { + tree t = make_ssa_name (TREE_TYPE (aligned_addr)); + gimple *g = gimple_build_assign (t, aligned_addr); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + aligned_addr = t; + } + /* Use the build2 helper to set up the mem_ref. The MEM_REF could also + take an offset, but since we've already incorporated the offset + above, here we just pass in a zero. */ + gimple *g + = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr, + build_int_cst (arg1_type, 0))); + gimple_set_location (g, loc); + gsi_replace (gsi, g, true); + return true; + } + /* Vector stores. */ + case ALTIVEC_BUILTIN_STVX_V16QI: + case ALTIVEC_BUILTIN_STVX_V8HI: + case ALTIVEC_BUILTIN_STVX_V4SI: + case ALTIVEC_BUILTIN_STVX_V4SF: + case ALTIVEC_BUILTIN_STVX_V2DI: + case ALTIVEC_BUILTIN_STVX_V2DF: + { + arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */ + arg1 = gimple_call_arg (stmt, 1); /* Offset. */ + tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */ + location_t loc = gimple_location (stmt); + tree arg0_type = TREE_TYPE (arg0); + /* Use ptr_type_node (no TBAA) for the arg2_type. + FIXME: (Richard) "A proper fix would be to transition this type as + seen from the frontend to GIMPLE, for example in a similar way we + do for MEM_REFs by piggy-backing that on an extra argument, a + constant zero pointer of the alias pointer type to use (which would + also serve as a type indicator of the store itself). I'd use a + target specific internal function for this (not sure if we can have + those target specific, but I guess if it's folded away then that's + fine) and get away with the overload set." */ + tree arg2_type = ptr_type_node; + /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create + the tree using the value from arg0. The resulting type will match + the type of arg2. */ + gimple_seq stmts = NULL; + tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1); + tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR, + arg2_type, arg2, temp_offset); + /* Mask off any lower bits from the address. */ + tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR, + arg2_type, temp_addr, + build_int_cst (arg2_type, -16)); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + if (!is_gimple_mem_ref_addr (aligned_addr)) + { + tree t = make_ssa_name (TREE_TYPE (aligned_addr)); + gimple *g = gimple_build_assign (t, aligned_addr); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + aligned_addr = t; + } + /* The desired gimple result should be similar to: + MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */ + gimple *g + = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr, + build_int_cst (arg2_type, 0)), arg0); + gimple_set_location (g, loc); + gsi_replace (gsi, g, true); + return true; + } + + /* unaligned Vector loads. */ + case VSX_BUILTIN_LXVW4X_V16QI: + case VSX_BUILTIN_LXVW4X_V8HI: + case VSX_BUILTIN_LXVW4X_V4SF: + case VSX_BUILTIN_LXVW4X_V4SI: + case VSX_BUILTIN_LXVD2X_V2DF: + case VSX_BUILTIN_LXVD2X_V2DI: + { + arg0 = gimple_call_arg (stmt, 0); // offset + arg1 = gimple_call_arg (stmt, 1); // address + lhs = gimple_call_lhs (stmt); + location_t loc = gimple_location (stmt); + /* Since arg1 may be cast to a different type, just use ptr_type_node + here instead of trying to enforce TBAA on pointer types. */ + tree arg1_type = ptr_type_node; + tree lhs_type = TREE_TYPE (lhs); + /* In GIMPLE the type of the MEM_REF specifies the alignment. The + required alignment (power) is 4 bytes regardless of data type. */ + tree align_ltype = build_aligned_type (lhs_type, 4); + /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create + the tree using the value from arg0. The resulting type will match + the type of arg1. */ + gimple_seq stmts = NULL; + tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0); + tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR, + arg1_type, arg1, temp_offset); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + if (!is_gimple_mem_ref_addr (temp_addr)) + { + tree t = make_ssa_name (TREE_TYPE (temp_addr)); + gimple *g = gimple_build_assign (t, temp_addr); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + temp_addr = t; + } + /* Use the build2 helper to set up the mem_ref. The MEM_REF could also + take an offset, but since we've already incorporated the offset + above, here we just pass in a zero. */ + gimple *g; + g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr, + build_int_cst (arg1_type, 0))); + gimple_set_location (g, loc); + gsi_replace (gsi, g, true); + return true; + } + + /* unaligned Vector stores. */ + case VSX_BUILTIN_STXVW4X_V16QI: + case VSX_BUILTIN_STXVW4X_V8HI: + case VSX_BUILTIN_STXVW4X_V4SF: + case VSX_BUILTIN_STXVW4X_V4SI: + case VSX_BUILTIN_STXVD2X_V2DF: + case VSX_BUILTIN_STXVD2X_V2DI: + { + arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */ + arg1 = gimple_call_arg (stmt, 1); /* Offset. */ + tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */ + location_t loc = gimple_location (stmt); + tree arg0_type = TREE_TYPE (arg0); + /* Use ptr_type_node (no TBAA) for the arg2_type. */ + tree arg2_type = ptr_type_node; + /* In GIMPLE the type of the MEM_REF specifies the alignment. The + required alignment (power) is 4 bytes regardless of data type. */ + tree align_stype = build_aligned_type (arg0_type, 4); + /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create + the tree using the value from arg1. */ + gimple_seq stmts = NULL; + tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1); + tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR, + arg2_type, arg2, temp_offset); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + if (!is_gimple_mem_ref_addr (temp_addr)) + { + tree t = make_ssa_name (TREE_TYPE (temp_addr)); + gimple *g = gimple_build_assign (t, temp_addr); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + temp_addr = t; + } + gimple *g; + g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr, + build_int_cst (arg2_type, 0)), arg0); + gimple_set_location (g, loc); + gsi_replace (gsi, g, true); + return true; + } + + /* Vector Fused multiply-add (fma). */ + case ALTIVEC_BUILTIN_VMADDFP: + case VSX_BUILTIN_XVMADDDP: + case ALTIVEC_BUILTIN_VMLADDUHM: + { + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + tree arg2 = gimple_call_arg (stmt, 2); + lhs = gimple_call_lhs (stmt); + gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2); + gimple_call_set_lhs (g, lhs); + gimple_call_set_nothrow (g, true); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + } + + /* Vector compares; EQ, NE, GE, GT, LE. */ + case ALTIVEC_BUILTIN_VCMPEQUB: + case ALTIVEC_BUILTIN_VCMPEQUH: + case ALTIVEC_BUILTIN_VCMPEQUW: + case P8V_BUILTIN_VCMPEQUD: + case P10V_BUILTIN_VCMPEQUT: + fold_compare_helper (gsi, EQ_EXPR, stmt); + return true; + + case P9V_BUILTIN_CMPNEB: + case P9V_BUILTIN_CMPNEH: + case P9V_BUILTIN_CMPNEW: + case P10V_BUILTIN_CMPNET: + fold_compare_helper (gsi, NE_EXPR, stmt); + return true; + + case VSX_BUILTIN_CMPGE_16QI: + case VSX_BUILTIN_CMPGE_U16QI: + case VSX_BUILTIN_CMPGE_8HI: + case VSX_BUILTIN_CMPGE_U8HI: + case VSX_BUILTIN_CMPGE_4SI: + case VSX_BUILTIN_CMPGE_U4SI: + case VSX_BUILTIN_CMPGE_2DI: + case VSX_BUILTIN_CMPGE_U2DI: + case P10V_BUILTIN_CMPGE_1TI: + case P10V_BUILTIN_CMPGE_U1TI: + fold_compare_helper (gsi, GE_EXPR, stmt); + return true; + + case ALTIVEC_BUILTIN_VCMPGTSB: + case ALTIVEC_BUILTIN_VCMPGTUB: + case ALTIVEC_BUILTIN_VCMPGTSH: + case ALTIVEC_BUILTIN_VCMPGTUH: + case ALTIVEC_BUILTIN_VCMPGTSW: + case ALTIVEC_BUILTIN_VCMPGTUW: + case P8V_BUILTIN_VCMPGTUD: + case P8V_BUILTIN_VCMPGTSD: + case P10V_BUILTIN_VCMPGTUT: + case P10V_BUILTIN_VCMPGTST: + fold_compare_helper (gsi, GT_EXPR, stmt); + return true; + + case VSX_BUILTIN_CMPLE_16QI: + case VSX_BUILTIN_CMPLE_U16QI: + case VSX_BUILTIN_CMPLE_8HI: + case VSX_BUILTIN_CMPLE_U8HI: + case VSX_BUILTIN_CMPLE_4SI: + case VSX_BUILTIN_CMPLE_U4SI: + case VSX_BUILTIN_CMPLE_2DI: + case VSX_BUILTIN_CMPLE_U2DI: + case P10V_BUILTIN_CMPLE_1TI: + case P10V_BUILTIN_CMPLE_U1TI: + fold_compare_helper (gsi, LE_EXPR, stmt); + return true; + + /* flavors of vec_splat_[us]{8,16,32}. */ + case ALTIVEC_BUILTIN_VSPLTISB: + case ALTIVEC_BUILTIN_VSPLTISH: + case ALTIVEC_BUILTIN_VSPLTISW: + { + arg0 = gimple_call_arg (stmt, 0); + lhs = gimple_call_lhs (stmt); + + /* Only fold the vec_splat_*() if the lower bits of arg 0 is a + 5-bit signed constant in range -16 to +15. */ + if (TREE_CODE (arg0) != INTEGER_CST + || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15)) + return false; + gimple_seq stmts = NULL; + location_t loc = gimple_location (stmt); + tree splat_value = gimple_convert (&stmts, loc, + TREE_TYPE (TREE_TYPE (lhs)), arg0); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value); + g = gimple_build_assign (lhs, splat_tree); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + } + + /* Flavors of vec_splat. */ + /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */ + case ALTIVEC_BUILTIN_VSPLTB: + case ALTIVEC_BUILTIN_VSPLTH: + case ALTIVEC_BUILTIN_VSPLTW: + case VSX_BUILTIN_XXSPLTD_V2DI: + case VSX_BUILTIN_XXSPLTD_V2DF: + { + arg0 = gimple_call_arg (stmt, 0); /* input vector. */ + arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */ + /* Only fold the vec_splat_*() if arg1 is both a constant value and + is a valid index into the arg0 vector. */ + unsigned int n_elts = VECTOR_CST_NELTS (arg0); + if (TREE_CODE (arg1) != INTEGER_CST + || TREE_INT_CST_LOW (arg1) > (n_elts -1)) + return false; + lhs = gimple_call_lhs (stmt); + tree lhs_type = TREE_TYPE (lhs); + tree arg0_type = TREE_TYPE (arg0); + tree splat; + if (TREE_CODE (arg0) == VECTOR_CST) + splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1)); + else + { + /* Determine (in bits) the length and start location of the + splat value for a call to the tree_vec_extract helper. */ + int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type)) + * BITS_PER_UNIT / n_elts; + int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size; + tree len = build_int_cst (bitsizetype, splat_elem_size); + tree start = build_int_cst (bitsizetype, splat_start_bit); + splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0, + len, start); + } + /* And finally, build the new vector. */ + tree splat_tree = build_vector_from_val (lhs_type, splat); + g = gimple_build_assign (lhs, splat_tree); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + } + + /* vec_mergel (integrals). */ + case ALTIVEC_BUILTIN_VMRGLH: + case ALTIVEC_BUILTIN_VMRGLW: + case VSX_BUILTIN_XXMRGLW_4SI: + case ALTIVEC_BUILTIN_VMRGLB: + case VSX_BUILTIN_VEC_MERGEL_V2DI: + case VSX_BUILTIN_XXMRGLW_4SF: + case VSX_BUILTIN_VEC_MERGEL_V2DF: + fold_mergehl_helper (gsi, stmt, 1); + return true; + /* vec_mergeh (integrals). */ + case ALTIVEC_BUILTIN_VMRGHH: + case ALTIVEC_BUILTIN_VMRGHW: + case VSX_BUILTIN_XXMRGHW_4SI: + case ALTIVEC_BUILTIN_VMRGHB: + case VSX_BUILTIN_VEC_MERGEH_V2DI: + case VSX_BUILTIN_XXMRGHW_4SF: + case VSX_BUILTIN_VEC_MERGEH_V2DF: + fold_mergehl_helper (gsi, stmt, 0); + return true; + + /* Flavors of vec_mergee. */ + case P8V_BUILTIN_VMRGEW_V4SI: + case P8V_BUILTIN_VMRGEW_V2DI: + case P8V_BUILTIN_VMRGEW_V4SF: + case P8V_BUILTIN_VMRGEW_V2DF: + fold_mergeeo_helper (gsi, stmt, 0); + return true; + /* Flavors of vec_mergeo. */ + case P8V_BUILTIN_VMRGOW_V4SI: + case P8V_BUILTIN_VMRGOW_V2DI: + case P8V_BUILTIN_VMRGOW_V4SF: + case P8V_BUILTIN_VMRGOW_V2DF: + fold_mergeeo_helper (gsi, stmt, 1); + return true; + + /* d = vec_pack (a, b) */ + case P8V_BUILTIN_VPKUDUM: + case ALTIVEC_BUILTIN_VPKUHUM: + case ALTIVEC_BUILTIN_VPKUWUM: + { + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + } + + /* d = vec_unpackh (a) */ + /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call + in this code is sensitive to endian-ness, and needs to be inverted to + handle both LE and BE targets. */ + case ALTIVEC_BUILTIN_VUPKHSB: + case ALTIVEC_BUILTIN_VUPKHSH: + case P8V_BUILTIN_VUPKHSW: + { + arg0 = gimple_call_arg (stmt, 0); + lhs = gimple_call_lhs (stmt); + if (BYTES_BIG_ENDIAN) + g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0); + else + g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + } + /* d = vec_unpackl (a) */ + case ALTIVEC_BUILTIN_VUPKLSB: + case ALTIVEC_BUILTIN_VUPKLSH: + case P8V_BUILTIN_VUPKLSW: + { + arg0 = gimple_call_arg (stmt, 0); + lhs = gimple_call_lhs (stmt); + if (BYTES_BIG_ENDIAN) + g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0); + else + g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + } + /* There is no gimple type corresponding with pixel, so just return. */ + case ALTIVEC_BUILTIN_VUPKHPX: + case ALTIVEC_BUILTIN_VUPKLPX: + return false; + + /* vec_perm. */ + case ALTIVEC_BUILTIN_VPERM_16QI: + case ALTIVEC_BUILTIN_VPERM_8HI: + case ALTIVEC_BUILTIN_VPERM_4SI: + case ALTIVEC_BUILTIN_VPERM_2DI: + case ALTIVEC_BUILTIN_VPERM_4SF: + case ALTIVEC_BUILTIN_VPERM_2DF: + { + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + tree permute = gimple_call_arg (stmt, 2); + lhs = gimple_call_lhs (stmt); + location_t loc = gimple_location (stmt); + gimple_seq stmts = NULL; + // convert arg0 and arg1 to match the type of the permute + // for the VEC_PERM_EXPR operation. + tree permute_type = (TREE_TYPE (permute)); + tree arg0_ptype = gimple_build (&stmts, loc, VIEW_CONVERT_EXPR, + permute_type, arg0); + tree arg1_ptype = gimple_build (&stmts, loc, VIEW_CONVERT_EXPR, + permute_type, arg1); + tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR, + permute_type, arg0_ptype, arg1_ptype, + permute); + // Convert the result back to the desired lhs type upon completion. + tree temp = gimple_build (&stmts, loc, VIEW_CONVERT_EXPR, + TREE_TYPE (lhs), lhs_ptype); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + g = gimple_build_assign (lhs, temp); + gimple_set_location (g, loc); + gsi_replace (gsi, g, true); + return true; + } + + default: + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n", + fn_code, fn_name1, fn_name2); + break; + } + + return false; +} + +/* Helper function to sort out which built-ins may be valid without having + a LHS. */ +static bool +rs6000_new_builtin_valid_without_lhs (enum rs6000_gen_builtins fn_code, + tree fndecl) +{ + if (TREE_TYPE (TREE_TYPE (fndecl)) == void_type_node) + return true; + + switch (fn_code) + { + case RS6000_BIF_STVX_V16QI: + case RS6000_BIF_STVX_V8HI: + case RS6000_BIF_STVX_V4SI: + case RS6000_BIF_STVX_V4SF: + case RS6000_BIF_STVX_V2DI: + case RS6000_BIF_STVX_V2DF: + case RS6000_BIF_STXVW4X_V16QI: + case RS6000_BIF_STXVW4X_V8HI: + case RS6000_BIF_STXVW4X_V4SF: + case RS6000_BIF_STXVW4X_V4SI: + case RS6000_BIF_STXVD2X_V2DF: + case RS6000_BIF_STXVD2X_V2DI: + return true; + default: + return false; + } +} + +/* Check whether a builtin function is supported in this target + configuration. */ +bool +rs6000_new_builtin_is_supported (enum rs6000_gen_builtins fncode) +{ + switch (rs6000_builtin_info_x[(size_t) fncode].enable) + { + case ENB_ALWAYS: + return true; + case ENB_P5: + return TARGET_POPCNTB; + case ENB_P6: + return TARGET_CMPB; + case ENB_P7: + return TARGET_POPCNTD; + case ENB_P7_64: + return TARGET_POPCNTD && TARGET_POWERPC64; + case ENB_P8: + return TARGET_DIRECT_MOVE; + case ENB_P8V: + return TARGET_P8_VECTOR; + case ENB_P9: + return TARGET_MODULO; + case ENB_P9_64: + return TARGET_MODULO && TARGET_POWERPC64; + case ENB_P9V: + return TARGET_P9_VECTOR; + case ENB_P10: + return TARGET_POWER10; + case ENB_P10_64: + return TARGET_POWER10 && TARGET_POWERPC64; + case ENB_ALTIVEC: + return TARGET_ALTIVEC; + case ENB_VSX: + return TARGET_VSX; + case ENB_CELL: + return TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL; + case ENB_IEEE128_HW: + return TARGET_FLOAT128_HW; + case ENB_DFP: + return TARGET_DFP; + case ENB_CRYPTO: + return TARGET_CRYPTO; + case ENB_HTM: + return TARGET_HTM; + case ENB_MMA: + return TARGET_MMA; + default: + gcc_unreachable (); + } + gcc_unreachable (); +} + +/* Expand the MMA built-ins early, so that we can convert the pass-by-reference + __vector_quad arguments into pass-by-value arguments, leading to more + efficient code generation. */ +static bool +rs6000_gimple_fold_new_mma_builtin (gimple_stmt_iterator *gsi, + rs6000_gen_builtins fn_code) +{ + gimple *stmt = gsi_stmt (*gsi); + size_t fncode = (size_t) fn_code; + + if (!bif_is_mma (rs6000_builtin_info_x[fncode])) + return false; + + /* Each call that can be gimple-expanded has an associated built-in + function that it will expand into. If this one doesn't, we have + already expanded it! Exceptions: lxvp and stxvp. */ + if (rs6000_builtin_info_x[fncode].assoc_bif == RS6000_BIF_NONE + && fncode != RS6000_BIF_LXVP + && fncode != RS6000_BIF_STXVP) + return false; + + bifdata *bd = &rs6000_builtin_info_x[fncode]; + unsigned nopnds = bd->nargs; + gimple_seq new_seq = NULL; + gimple *new_call; + tree new_decl; + + /* Compatibility built-ins; we used to call these + __builtin_mma_{dis,}assemble_pair, but now we call them + __builtin_vsx_{dis,}assemble_pair. Handle the old versions. */ + if (fncode == RS6000_BIF_ASSEMBLE_PAIR) + fncode = RS6000_BIF_ASSEMBLE_PAIR_V; + else if (fncode == RS6000_BIF_DISASSEMBLE_PAIR) + fncode = RS6000_BIF_DISASSEMBLE_PAIR_V; + + if (fncode == RS6000_BIF_DISASSEMBLE_ACC + || fncode == RS6000_BIF_DISASSEMBLE_PAIR_V) + { + /* This is an MMA disassemble built-in function. */ + push_gimplify_context (true); + unsigned nvec = (fncode == RS6000_BIF_DISASSEMBLE_ACC) ? 4 : 2; + tree dst_ptr = gimple_call_arg (stmt, 0); + tree src_ptr = gimple_call_arg (stmt, 1); + tree src_type = TREE_TYPE (src_ptr); + tree src = create_tmp_reg_or_ssa_name (TREE_TYPE (src_type)); + gimplify_assign (src, build_simple_mem_ref (src_ptr), &new_seq); + + /* If we are not disassembling an accumulator/pair or our destination is + another accumulator/pair, then just copy the entire thing as is. */ + if ((fncode == RS6000_BIF_DISASSEMBLE_ACC + && TREE_TYPE (TREE_TYPE (dst_ptr)) == vector_quad_type_node) + || (fncode == RS6000_BIF_DISASSEMBLE_PAIR_V + && TREE_TYPE (TREE_TYPE (dst_ptr)) == vector_pair_type_node)) + { + tree dst = build_simple_mem_ref (build1 (VIEW_CONVERT_EXPR, + src_type, dst_ptr)); + gimplify_assign (dst, src, &new_seq); + pop_gimplify_context (NULL); + gsi_replace_with_seq (gsi, new_seq, true); + return true; + } + + /* If we're disassembling an accumulator into a different type, we need + to emit a xxmfacc instruction now, since we cannot do it later. */ + if (fncode == RS6000_BIF_DISASSEMBLE_ACC) + { + new_decl = rs6000_builtin_decls_x[RS6000_BIF_XXMFACC_INTERNAL]; + new_call = gimple_build_call (new_decl, 1, src); + src = create_tmp_reg_or_ssa_name (vector_quad_type_node); + gimple_call_set_lhs (new_call, src); + gimple_seq_add_stmt (&new_seq, new_call); + } + + /* Copy the accumulator/pair vector by vector. */ + new_decl + = rs6000_builtin_decls_x[rs6000_builtin_info_x[fncode].assoc_bif]; + tree dst_type = build_pointer_type_for_mode (unsigned_V16QI_type_node, + ptr_mode, true); + tree dst_base = build1 (VIEW_CONVERT_EXPR, dst_type, dst_ptr); + for (unsigned i = 0; i < nvec; i++) + { + unsigned index = WORDS_BIG_ENDIAN ? i : nvec - 1 - i; + tree dst = build2 (MEM_REF, unsigned_V16QI_type_node, dst_base, + build_int_cst (dst_type, index * 16)); + tree dstssa = create_tmp_reg_or_ssa_name (unsigned_V16QI_type_node); + new_call = gimple_build_call (new_decl, 2, src, + build_int_cstu (uint16_type_node, i)); + gimple_call_set_lhs (new_call, dstssa); + gimple_seq_add_stmt (&new_seq, new_call); + gimplify_assign (dst, dstssa, &new_seq); + } + pop_gimplify_context (NULL); + gsi_replace_with_seq (gsi, new_seq, true); + return true; + } + + /* TODO: Do some factoring on these two chunks. */ + if (fncode == RS6000_BIF_LXVP) + { + push_gimplify_context (true); + tree offset = gimple_call_arg (stmt, 0); + tree ptr = gimple_call_arg (stmt, 1); + tree lhs = gimple_call_lhs (stmt); + if (TREE_TYPE (TREE_TYPE (ptr)) != vector_pair_type_node) + ptr = build1 (VIEW_CONVERT_EXPR, + build_pointer_type (vector_pair_type_node), ptr); + tree mem = build_simple_mem_ref (build2 (POINTER_PLUS_EXPR, + TREE_TYPE (ptr), ptr, offset)); + gimplify_assign (lhs, mem, &new_seq); + pop_gimplify_context (NULL); + gsi_replace_with_seq (gsi, new_seq, true); + return true; + } + + if (fncode == RS6000_BIF_STXVP) + { + push_gimplify_context (true); + tree src = gimple_call_arg (stmt, 0); + tree offset = gimple_call_arg (stmt, 1); + tree ptr = gimple_call_arg (stmt, 2); + if (TREE_TYPE (TREE_TYPE (ptr)) != vector_pair_type_node) + ptr = build1 (VIEW_CONVERT_EXPR, + build_pointer_type (vector_pair_type_node), ptr); + tree mem = build_simple_mem_ref (build2 (POINTER_PLUS_EXPR, + TREE_TYPE (ptr), ptr, offset)); + gimplify_assign (mem, src, &new_seq); + pop_gimplify_context (NULL); + gsi_replace_with_seq (gsi, new_seq, true); + return true; + } + + /* Convert this built-in into an internal version that uses pass-by-value + arguments. The internal built-in is found in the assoc_bif field. */ + new_decl = rs6000_builtin_decls_x[rs6000_builtin_info_x[fncode].assoc_bif]; + tree lhs, op[MAX_MMA_OPERANDS]; + tree acc = gimple_call_arg (stmt, 0); + push_gimplify_context (true); + + if (bif_is_quad (*bd)) + { + /* This built-in has a pass-by-reference accumulator input, so load it + into a temporary accumulator for use as a pass-by-value input. */ + op[0] = create_tmp_reg_or_ssa_name (vector_quad_type_node); + for (unsigned i = 1; i < nopnds; i++) + op[i] = gimple_call_arg (stmt, i); + gimplify_assign (op[0], build_simple_mem_ref (acc), &new_seq); + } + else + { + /* This built-in does not use its pass-by-reference accumulator argument + as an input argument, so remove it from the input list. */ + nopnds--; + for (unsigned i = 0; i < nopnds; i++) + op[i] = gimple_call_arg (stmt, i + 1); + } + + switch (nopnds) + { + case 0: + new_call = gimple_build_call (new_decl, 0); + break; + case 1: + new_call = gimple_build_call (new_decl, 1, op[0]); + break; + case 2: + new_call = gimple_build_call (new_decl, 2, op[0], op[1]); + break; + case 3: + new_call = gimple_build_call (new_decl, 3, op[0], op[1], op[2]); + break; + case 4: + new_call = gimple_build_call (new_decl, 4, op[0], op[1], op[2], op[3]); + break; + case 5: + new_call = gimple_build_call (new_decl, 5, op[0], op[1], op[2], op[3], + op[4]); + break; + case 6: + new_call = gimple_build_call (new_decl, 6, op[0], op[1], op[2], op[3], + op[4], op[5]); + break; + case 7: + new_call = gimple_build_call (new_decl, 7, op[0], op[1], op[2], op[3], + op[4], op[5], op[6]); + break; + default: + gcc_unreachable (); + } + + if (fncode == RS6000_BIF_BUILD_PAIR || fncode == RS6000_BIF_ASSEMBLE_PAIR_V) + lhs = create_tmp_reg_or_ssa_name (vector_pair_type_node); + else + lhs = create_tmp_reg_or_ssa_name (vector_quad_type_node); + gimple_call_set_lhs (new_call, lhs); + gimple_seq_add_stmt (&new_seq, new_call); + gimplify_assign (build_simple_mem_ref (acc), lhs, &new_seq); + pop_gimplify_context (NULL); + gsi_replace_with_seq (gsi, new_seq, true); + + return true; +} + +/* Fold a machine-dependent built-in in GIMPLE. (For folding into + a constant, use rs6000_fold_builtin.) */ +static bool +rs6000_gimple_fold_new_builtin (gimple_stmt_iterator *gsi) +{ + gimple *stmt = gsi_stmt (*gsi); + tree fndecl = gimple_call_fndecl (stmt); + gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD); + enum rs6000_gen_builtins fn_code + = (enum rs6000_gen_builtins) DECL_MD_FUNCTION_CODE (fndecl); + tree arg0, arg1, lhs, temp; + enum tree_code bcode; + gimple *g; + + size_t uns_fncode = (size_t) fn_code; + enum insn_code icode = rs6000_builtin_info_x[uns_fncode].icode; + const char *fn_name1 = rs6000_builtin_info_x[uns_fncode].bifname; + const char *fn_name2 = (icode != CODE_FOR_nothing) + ? get_insn_name ((int) icode) + : "nothing"; + + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_gimple_fold_new_builtin %d %s %s\n", + fn_code, fn_name1, fn_name2); + + if (!rs6000_fold_gimple) + return false; + + /* Prevent gimple folding for code that does not have a LHS, unless it is + allowed per the rs6000_new_builtin_valid_without_lhs helper function. */ + if (!gimple_call_lhs (stmt) + && !rs6000_new_builtin_valid_without_lhs (fn_code, fndecl)) + return false; + + /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */ + if (!rs6000_new_builtin_is_supported (fn_code)) + return false; + + if (rs6000_gimple_fold_new_mma_builtin (gsi, fn_code)) + return true; + + switch (fn_code) + { + /* Flavors of vec_add. We deliberately don't expand + RS6000_BIF_VADDUQM as it gets lowered from V1TImode to + TImode, resulting in much poorer code generation. */ + case RS6000_BIF_VADDUBM: + case RS6000_BIF_VADDUHM: + case RS6000_BIF_VADDUWM: + case RS6000_BIF_VADDUDM: + case RS6000_BIF_VADDFP: + case RS6000_BIF_XVADDDP: + case RS6000_BIF_XVADDSP: + bcode = PLUS_EXPR; + do_binary: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs))) + && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs)))) + { + /* Ensure the binary operation is performed in a type + that wraps if it is integral type. */ + gimple_seq stmts = NULL; + tree type = unsigned_type_for (TREE_TYPE (lhs)); + tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR, + type, arg0); + tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR, + type, arg1); + tree res = gimple_build (&stmts, gimple_location (stmt), bcode, + type, uarg0, uarg1); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR, + build1 (VIEW_CONVERT_EXPR, + TREE_TYPE (lhs), res)); + gsi_replace (gsi, g, true); + return true; + } + g = gimple_build_assign (lhs, bcode, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_sub. We deliberately don't expand + RS6000_BIF_VSUBUQM. */ + case RS6000_BIF_VSUBUBM: + case RS6000_BIF_VSUBUHM: + case RS6000_BIF_VSUBUWM: + case RS6000_BIF_VSUBUDM: + case RS6000_BIF_VSUBFP: + case RS6000_BIF_XVSUBDP: + case RS6000_BIF_XVSUBSP: + bcode = MINUS_EXPR; + goto do_binary; + case RS6000_BIF_XVMULSP: + case RS6000_BIF_XVMULDP: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Even element flavors of vec_mul (signed). */ + case RS6000_BIF_VMULESB: + case RS6000_BIF_VMULESH: + case RS6000_BIF_VMULESW: + /* Even element flavors of vec_mul (unsigned). */ + case RS6000_BIF_VMULEUB: + case RS6000_BIF_VMULEUH: + case RS6000_BIF_VMULEUW: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Odd element flavors of vec_mul (signed). */ + case RS6000_BIF_VMULOSB: + case RS6000_BIF_VMULOSH: + case RS6000_BIF_VMULOSW: + /* Odd element flavors of vec_mul (unsigned). */ + case RS6000_BIF_VMULOUB: + case RS6000_BIF_VMULOUH: + case RS6000_BIF_VMULOUW: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_div (Integer). */ + case RS6000_BIF_DIV_V2DI: + case RS6000_BIF_UDIV_V2DI: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_div (Float). */ + case RS6000_BIF_XVDIVSP: + case RS6000_BIF_XVDIVDP: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_and. */ + case RS6000_BIF_VAND_V16QI_UNS: + case RS6000_BIF_VAND_V16QI: + case RS6000_BIF_VAND_V8HI_UNS: + case RS6000_BIF_VAND_V8HI: + case RS6000_BIF_VAND_V4SI_UNS: + case RS6000_BIF_VAND_V4SI: + case RS6000_BIF_VAND_V2DI_UNS: + case RS6000_BIF_VAND_V2DI: + case RS6000_BIF_VAND_V4SF: + case RS6000_BIF_VAND_V2DF: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_andc. */ + case RS6000_BIF_VANDC_V16QI_UNS: + case RS6000_BIF_VANDC_V16QI: + case RS6000_BIF_VANDC_V8HI_UNS: + case RS6000_BIF_VANDC_V8HI: + case RS6000_BIF_VANDC_V4SI_UNS: + case RS6000_BIF_VANDC_V4SI: + case RS6000_BIF_VANDC_V2DI_UNS: + case RS6000_BIF_VANDC_V2DI: + case RS6000_BIF_VANDC_V4SF: + case RS6000_BIF_VANDC_V2DF: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1)); + g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_nand. */ + case RS6000_BIF_NAND_V16QI_UNS: + case RS6000_BIF_NAND_V16QI: + case RS6000_BIF_NAND_V8HI_UNS: + case RS6000_BIF_NAND_V8HI: + case RS6000_BIF_NAND_V4SI_UNS: + case RS6000_BIF_NAND_V4SI: + case RS6000_BIF_NAND_V2DI_UNS: + case RS6000_BIF_NAND_V2DI: + case RS6000_BIF_NAND_V4SF: + case RS6000_BIF_NAND_V2DF: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1)); + g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_or. */ + case RS6000_BIF_VOR_V16QI_UNS: + case RS6000_BIF_VOR_V16QI: + case RS6000_BIF_VOR_V8HI_UNS: + case RS6000_BIF_VOR_V8HI: + case RS6000_BIF_VOR_V4SI_UNS: + case RS6000_BIF_VOR_V4SI: + case RS6000_BIF_VOR_V2DI_UNS: + case RS6000_BIF_VOR_V2DI: + case RS6000_BIF_VOR_V4SF: + case RS6000_BIF_VOR_V2DF: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* flavors of vec_orc. */ + case RS6000_BIF_ORC_V16QI_UNS: + case RS6000_BIF_ORC_V16QI: + case RS6000_BIF_ORC_V8HI_UNS: + case RS6000_BIF_ORC_V8HI: + case RS6000_BIF_ORC_V4SI_UNS: + case RS6000_BIF_ORC_V4SI: + case RS6000_BIF_ORC_V2DI_UNS: + case RS6000_BIF_ORC_V2DI: + case RS6000_BIF_ORC_V4SF: + case RS6000_BIF_ORC_V2DF: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1)); + g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_xor. */ + case RS6000_BIF_VXOR_V16QI_UNS: + case RS6000_BIF_VXOR_V16QI: + case RS6000_BIF_VXOR_V8HI_UNS: + case RS6000_BIF_VXOR_V8HI: + case RS6000_BIF_VXOR_V4SI_UNS: + case RS6000_BIF_VXOR_V4SI: + case RS6000_BIF_VXOR_V2DI_UNS: + case RS6000_BIF_VXOR_V2DI: + case RS6000_BIF_VXOR_V4SF: + case RS6000_BIF_VXOR_V2DF: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_nor. */ + case RS6000_BIF_VNOR_V16QI_UNS: + case RS6000_BIF_VNOR_V16QI: + case RS6000_BIF_VNOR_V8HI_UNS: + case RS6000_BIF_VNOR_V8HI: + case RS6000_BIF_VNOR_V4SI_UNS: + case RS6000_BIF_VNOR_V4SI: + case RS6000_BIF_VNOR_V2DI_UNS: + case RS6000_BIF_VNOR_V2DI: + case RS6000_BIF_VNOR_V4SF: + case RS6000_BIF_VNOR_V2DF: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1)); + g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* flavors of vec_abs. */ + case RS6000_BIF_ABS_V16QI: + case RS6000_BIF_ABS_V8HI: + case RS6000_BIF_ABS_V4SI: + case RS6000_BIF_ABS_V4SF: + case RS6000_BIF_ABS_V2DI: + case RS6000_BIF_XVABSDP: + case RS6000_BIF_XVABSSP: + arg0 = gimple_call_arg (stmt, 0); + if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0))) + && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0)))) + return false; + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, ABS_EXPR, arg0); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* flavors of vec_min. */ + case RS6000_BIF_XVMINDP: + case RS6000_BIF_XVMINSP: + case RS6000_BIF_VMINFP: + { + lhs = gimple_call_lhs (stmt); + tree type = TREE_TYPE (lhs); + if (HONOR_NANS (type)) + return false; + gcc_fallthrough (); + } + case RS6000_BIF_VMINSD: + case RS6000_BIF_VMINUD: + case RS6000_BIF_VMINSB: + case RS6000_BIF_VMINSH: + case RS6000_BIF_VMINSW: + case RS6000_BIF_VMINUB: + case RS6000_BIF_VMINUH: + case RS6000_BIF_VMINUW: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* flavors of vec_max. */ + case RS6000_BIF_XVMAXDP: + case RS6000_BIF_XVMAXSP: + case RS6000_BIF_VMAXFP: + { + lhs = gimple_call_lhs (stmt); + tree type = TREE_TYPE (lhs); + if (HONOR_NANS (type)) + return false; + gcc_fallthrough (); + } + case RS6000_BIF_VMAXSD: + case RS6000_BIF_VMAXUD: + case RS6000_BIF_VMAXSB: + case RS6000_BIF_VMAXSH: + case RS6000_BIF_VMAXSW: + case RS6000_BIF_VMAXUB: + case RS6000_BIF_VMAXUH: + case RS6000_BIF_VMAXUW: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_eqv. */ + case RS6000_BIF_EQV_V16QI: + case RS6000_BIF_EQV_V8HI: + case RS6000_BIF_EQV_V4SI: + case RS6000_BIF_EQV_V4SF: + case RS6000_BIF_EQV_V2DF: + case RS6000_BIF_EQV_V2DI: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1)); + g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vec_rotate_left. */ + case RS6000_BIF_VRLB: + case RS6000_BIF_VRLH: + case RS6000_BIF_VRLW: + case RS6000_BIF_VRLD: + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + /* Flavors of vector shift right algebraic. + vec_sra{b,h,w} -> vsra{b,h,w}. */ + case RS6000_BIF_VSRAB: + case RS6000_BIF_VSRAH: + case RS6000_BIF_VSRAW: + case RS6000_BIF_VSRAD: + { + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + tree arg1_type = TREE_TYPE (arg1); + tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1)); + tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type)); + location_t loc = gimple_location (stmt); + /* Force arg1 into the range valid matching the arg0 type. */ + /* Build a vector consisting of the max valid bit-size values. */ + int n_elts = VECTOR_CST_NELTS (arg1); + tree element_size = build_int_cst (unsigned_element_type, + 128 / n_elts); + tree_vector_builder elts (unsigned_arg1_type, n_elts, 1); + for (int i = 0; i < n_elts; i++) + elts.safe_push (element_size); + tree modulo_tree = elts.build (); + /* Modulo the provided shift value against that vector. */ + gimple_seq stmts = NULL; + tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR, + unsigned_arg1_type, arg1); + tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR, + unsigned_arg1_type, unsigned_arg1, + modulo_tree); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + /* And finally, do the shift. */ + g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1); + gimple_set_location (g, loc); + gsi_replace (gsi, g, true); + return true; + } + /* Flavors of vector shift left. + builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */ + case RS6000_BIF_VSLB: + case RS6000_BIF_VSLH: + case RS6000_BIF_VSLW: + case RS6000_BIF_VSLD: + { + location_t loc; + gimple_seq stmts = NULL; + arg0 = gimple_call_arg (stmt, 0); + tree arg0_type = TREE_TYPE (arg0); + if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type)) + && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type))) + return false; + arg1 = gimple_call_arg (stmt, 1); + tree arg1_type = TREE_TYPE (arg1); + tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1)); + tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type)); + loc = gimple_location (stmt); + lhs = gimple_call_lhs (stmt); + /* Force arg1 into the range valid matching the arg0 type. */ + /* Build a vector consisting of the max valid bit-size values. */ + int n_elts = VECTOR_CST_NELTS (arg1); + int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type)) + * BITS_PER_UNIT; + tree element_size = build_int_cst (unsigned_element_type, + tree_size_in_bits / n_elts); + tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1); + for (int i = 0; i < n_elts; i++) + elts.safe_push (element_size); + tree modulo_tree = elts.build (); + /* Modulo the provided shift value against that vector. */ + tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR, + unsigned_arg1_type, arg1); + tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR, + unsigned_arg1_type, unsigned_arg1, + modulo_tree); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + /* And finally, do the shift. */ + g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + } + /* Flavors of vector shift right. */ + case RS6000_BIF_VSRB: + case RS6000_BIF_VSRH: + case RS6000_BIF_VSRW: + case RS6000_BIF_VSRD: + { + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + tree arg1_type = TREE_TYPE (arg1); + tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1)); + tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type)); + location_t loc = gimple_location (stmt); + gimple_seq stmts = NULL; + /* Convert arg0 to unsigned. */ + tree arg0_unsigned + = gimple_build (&stmts, VIEW_CONVERT_EXPR, + unsigned_type_for (TREE_TYPE (arg0)), arg0); + /* Force arg1 into the range valid matching the arg0 type. */ + /* Build a vector consisting of the max valid bit-size values. */ + int n_elts = VECTOR_CST_NELTS (arg1); + tree element_size = build_int_cst (unsigned_element_type, + 128 / n_elts); + tree_vector_builder elts (unsigned_arg1_type, n_elts, 1); + for (int i = 0; i < n_elts; i++) + elts.safe_push (element_size); + tree modulo_tree = elts.build (); + /* Modulo the provided shift value against that vector. */ + tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR, + unsigned_arg1_type, arg1); + tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR, + unsigned_arg1_type, unsigned_arg1, + modulo_tree); + /* Do the shift. */ + tree res + = gimple_build (&stmts, RSHIFT_EXPR, + TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1); + /* Convert result back to the lhs type. */ + res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + replace_call_with_value (gsi, res); + return true; + } + /* Vector loads. */ + case RS6000_BIF_LVX_V16QI: + case RS6000_BIF_LVX_V8HI: + case RS6000_BIF_LVX_V4SI: + case RS6000_BIF_LVX_V4SF: + case RS6000_BIF_LVX_V2DI: + case RS6000_BIF_LVX_V2DF: + case RS6000_BIF_LVX_V1TI: + { + arg0 = gimple_call_arg (stmt, 0); // offset + arg1 = gimple_call_arg (stmt, 1); // address + lhs = gimple_call_lhs (stmt); + location_t loc = gimple_location (stmt); + /* Since arg1 may be cast to a different type, just use ptr_type_node + here instead of trying to enforce TBAA on pointer types. */ + tree arg1_type = ptr_type_node; + tree lhs_type = TREE_TYPE (lhs); + /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create + the tree using the value from arg0. The resulting type will match + the type of arg1. */ + gimple_seq stmts = NULL; + tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0); + tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR, + arg1_type, arg1, temp_offset); + /* Mask off any lower bits from the address. */ + tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR, + arg1_type, temp_addr, + build_int_cst (arg1_type, -16)); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + if (!is_gimple_mem_ref_addr (aligned_addr)) + { + tree t = make_ssa_name (TREE_TYPE (aligned_addr)); + gimple *g = gimple_build_assign (t, aligned_addr); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + aligned_addr = t; + } + /* Use the build2 helper to set up the mem_ref. The MEM_REF could also + take an offset, but since we've already incorporated the offset + above, here we just pass in a zero. */ + gimple *g + = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr, + build_int_cst (arg1_type, 0))); + gimple_set_location (g, loc); + gsi_replace (gsi, g, true); + return true; + } + /* Vector stores. */ + case RS6000_BIF_STVX_V16QI: + case RS6000_BIF_STVX_V8HI: + case RS6000_BIF_STVX_V4SI: + case RS6000_BIF_STVX_V4SF: + case RS6000_BIF_STVX_V2DI: + case RS6000_BIF_STVX_V2DF: + { + arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */ + arg1 = gimple_call_arg (stmt, 1); /* Offset. */ + tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */ + location_t loc = gimple_location (stmt); + tree arg0_type = TREE_TYPE (arg0); + /* Use ptr_type_node (no TBAA) for the arg2_type. + FIXME: (Richard) "A proper fix would be to transition this type as + seen from the frontend to GIMPLE, for example in a similar way we + do for MEM_REFs by piggy-backing that on an extra argument, a + constant zero pointer of the alias pointer type to use (which would + also serve as a type indicator of the store itself). I'd use a + target specific internal function for this (not sure if we can have + those target specific, but I guess if it's folded away then that's + fine) and get away with the overload set." */ + tree arg2_type = ptr_type_node; + /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create + the tree using the value from arg0. The resulting type will match + the type of arg2. */ + gimple_seq stmts = NULL; + tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1); + tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR, + arg2_type, arg2, temp_offset); + /* Mask off any lower bits from the address. */ + tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR, + arg2_type, temp_addr, + build_int_cst (arg2_type, -16)); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + if (!is_gimple_mem_ref_addr (aligned_addr)) + { + tree t = make_ssa_name (TREE_TYPE (aligned_addr)); + gimple *g = gimple_build_assign (t, aligned_addr); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + aligned_addr = t; + } + /* The desired gimple result should be similar to: + MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */ + gimple *g + = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr, + build_int_cst (arg2_type, 0)), arg0); + gimple_set_location (g, loc); + gsi_replace (gsi, g, true); + return true; + } + + /* unaligned Vector loads. */ + case RS6000_BIF_LXVW4X_V16QI: + case RS6000_BIF_LXVW4X_V8HI: + case RS6000_BIF_LXVW4X_V4SF: + case RS6000_BIF_LXVW4X_V4SI: + case RS6000_BIF_LXVD2X_V2DF: + case RS6000_BIF_LXVD2X_V2DI: + { + arg0 = gimple_call_arg (stmt, 0); // offset + arg1 = gimple_call_arg (stmt, 1); // address + lhs = gimple_call_lhs (stmt); + location_t loc = gimple_location (stmt); + /* Since arg1 may be cast to a different type, just use ptr_type_node + here instead of trying to enforce TBAA on pointer types. */ + tree arg1_type = ptr_type_node; + tree lhs_type = TREE_TYPE (lhs); + /* In GIMPLE the type of the MEM_REF specifies the alignment. The + required alignment (power) is 4 bytes regardless of data type. */ + tree align_ltype = build_aligned_type (lhs_type, 4); + /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create + the tree using the value from arg0. The resulting type will match + the type of arg1. */ + gimple_seq stmts = NULL; + tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0); + tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR, + arg1_type, arg1, temp_offset); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + if (!is_gimple_mem_ref_addr (temp_addr)) + { + tree t = make_ssa_name (TREE_TYPE (temp_addr)); + gimple *g = gimple_build_assign (t, temp_addr); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + temp_addr = t; + } + /* Use the build2 helper to set up the mem_ref. The MEM_REF could also + take an offset, but since we've already incorporated the offset + above, here we just pass in a zero. */ + gimple *g; + g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr, + build_int_cst (arg1_type, 0))); + gimple_set_location (g, loc); + gsi_replace (gsi, g, true); + return true; + } + + /* unaligned Vector stores. */ + case RS6000_BIF_STXVW4X_V16QI: + case RS6000_BIF_STXVW4X_V8HI: + case RS6000_BIF_STXVW4X_V4SF: + case RS6000_BIF_STXVW4X_V4SI: + case RS6000_BIF_STXVD2X_V2DF: + case RS6000_BIF_STXVD2X_V2DI: + { + arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */ + arg1 = gimple_call_arg (stmt, 1); /* Offset. */ + tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */ + location_t loc = gimple_location (stmt); + tree arg0_type = TREE_TYPE (arg0); + /* Use ptr_type_node (no TBAA) for the arg2_type. */ + tree arg2_type = ptr_type_node; + /* In GIMPLE the type of the MEM_REF specifies the alignment. The + required alignment (power) is 4 bytes regardless of data type. */ + tree align_stype = build_aligned_type (arg0_type, 4); + /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create + the tree using the value from arg1. */ + gimple_seq stmts = NULL; + tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1); + tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR, + arg2_type, arg2, temp_offset); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + if (!is_gimple_mem_ref_addr (temp_addr)) + { + tree t = make_ssa_name (TREE_TYPE (temp_addr)); + gimple *g = gimple_build_assign (t, temp_addr); + gsi_insert_before (gsi, g, GSI_SAME_STMT); + temp_addr = t; + } + gimple *g; + g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr, + build_int_cst (arg2_type, 0)), arg0); + gimple_set_location (g, loc); + gsi_replace (gsi, g, true); + return true; + } + + /* Vector Fused multiply-add (fma). */ + case RS6000_BIF_VMADDFP: + case RS6000_BIF_XVMADDDP: + case RS6000_BIF_XVMADDSP: + case RS6000_BIF_VMLADDUHM: + { + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + tree arg2 = gimple_call_arg (stmt, 2); + lhs = gimple_call_lhs (stmt); + gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2); + gimple_call_set_lhs (g, lhs); + gimple_call_set_nothrow (g, true); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + } + + /* Vector compares; EQ, NE, GE, GT, LE. */ + case RS6000_BIF_VCMPEQUB: + case RS6000_BIF_VCMPEQUH: + case RS6000_BIF_VCMPEQUW: + case RS6000_BIF_VCMPEQUD: + /* We deliberately omit RS6000_BIF_VCMPEQUT for now, because gimple + folding produces worse code for 128-bit compares. */ + fold_compare_helper (gsi, EQ_EXPR, stmt); + return true; + + case RS6000_BIF_VCMPNEB: + case RS6000_BIF_VCMPNEH: + case RS6000_BIF_VCMPNEW: + /* We deliberately omit RS6000_BIF_VCMPNET for now, because gimple + folding produces worse code for 128-bit compares. */ + fold_compare_helper (gsi, NE_EXPR, stmt); + return true; + + case RS6000_BIF_CMPGE_16QI: + case RS6000_BIF_CMPGE_U16QI: + case RS6000_BIF_CMPGE_8HI: + case RS6000_BIF_CMPGE_U8HI: + case RS6000_BIF_CMPGE_4SI: + case RS6000_BIF_CMPGE_U4SI: + case RS6000_BIF_CMPGE_2DI: + case RS6000_BIF_CMPGE_U2DI: + /* We deliberately omit RS6000_BIF_CMPGE_1TI and RS6000_BIF_CMPGE_U1TI + for now, because gimple folding produces worse code for 128-bit + compares. */ + fold_compare_helper (gsi, GE_EXPR, stmt); + return true; + + case RS6000_BIF_VCMPGTSB: + case RS6000_BIF_VCMPGTUB: + case RS6000_BIF_VCMPGTSH: + case RS6000_BIF_VCMPGTUH: + case RS6000_BIF_VCMPGTSW: + case RS6000_BIF_VCMPGTUW: + case RS6000_BIF_VCMPGTUD: + case RS6000_BIF_VCMPGTSD: + /* We deliberately omit RS6000_BIF_VCMPGTUT and RS6000_BIF_VCMPGTST + for now, because gimple folding produces worse code for 128-bit + compares. */ + fold_compare_helper (gsi, GT_EXPR, stmt); + return true; + + case RS6000_BIF_CMPLE_16QI: + case RS6000_BIF_CMPLE_U16QI: + case RS6000_BIF_CMPLE_8HI: + case RS6000_BIF_CMPLE_U8HI: + case RS6000_BIF_CMPLE_4SI: + case RS6000_BIF_CMPLE_U4SI: + case RS6000_BIF_CMPLE_2DI: + case RS6000_BIF_CMPLE_U2DI: + /* We deliberately omit RS6000_BIF_CMPLE_1TI and RS6000_BIF_CMPLE_U1TI + for now, because gimple folding produces worse code for 128-bit + compares. */ + fold_compare_helper (gsi, LE_EXPR, stmt); + return true; + + /* flavors of vec_splat_[us]{8,16,32}. */ + case RS6000_BIF_VSPLTISB: + case RS6000_BIF_VSPLTISH: + case RS6000_BIF_VSPLTISW: + { + arg0 = gimple_call_arg (stmt, 0); + lhs = gimple_call_lhs (stmt); + + /* Only fold the vec_splat_*() if the lower bits of arg 0 is a + 5-bit signed constant in range -16 to +15. */ + if (TREE_CODE (arg0) != INTEGER_CST + || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15)) + return false; + gimple_seq stmts = NULL; + location_t loc = gimple_location (stmt); + tree splat_value = gimple_convert (&stmts, loc, + TREE_TYPE (TREE_TYPE (lhs)), arg0); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value); + g = gimple_build_assign (lhs, splat_tree); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + } + + /* Flavors of vec_splat. */ + /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */ + case RS6000_BIF_VSPLTB: + case RS6000_BIF_VSPLTH: + case RS6000_BIF_VSPLTW: + case RS6000_BIF_XXSPLTD_V2DI: + case RS6000_BIF_XXSPLTD_V2DF: + { + arg0 = gimple_call_arg (stmt, 0); /* input vector. */ + arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */ + /* Only fold the vec_splat_*() if arg1 is both a constant value and + is a valid index into the arg0 vector. */ + unsigned int n_elts = VECTOR_CST_NELTS (arg0); + if (TREE_CODE (arg1) != INTEGER_CST + || TREE_INT_CST_LOW (arg1) > (n_elts -1)) + return false; + lhs = gimple_call_lhs (stmt); + tree lhs_type = TREE_TYPE (lhs); + tree arg0_type = TREE_TYPE (arg0); + tree splat; + if (TREE_CODE (arg0) == VECTOR_CST) + splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1)); + else + { + /* Determine (in bits) the length and start location of the + splat value for a call to the tree_vec_extract helper. */ + int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type)) + * BITS_PER_UNIT / n_elts; + int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size; + tree len = build_int_cst (bitsizetype, splat_elem_size); + tree start = build_int_cst (bitsizetype, splat_start_bit); + splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0, + len, start); + } + /* And finally, build the new vector. */ + tree splat_tree = build_vector_from_val (lhs_type, splat); + g = gimple_build_assign (lhs, splat_tree); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + } + + /* vec_mergel (integrals). */ + case RS6000_BIF_VMRGLH: + case RS6000_BIF_VMRGLW: + case RS6000_BIF_XXMRGLW_4SI: + case RS6000_BIF_VMRGLB: + case RS6000_BIF_VEC_MERGEL_V2DI: + case RS6000_BIF_XXMRGLW_4SF: + case RS6000_BIF_VEC_MERGEL_V2DF: + fold_mergehl_helper (gsi, stmt, 1); + return true; + /* vec_mergeh (integrals). */ + case RS6000_BIF_VMRGHH: + case RS6000_BIF_VMRGHW: + case RS6000_BIF_XXMRGHW_4SI: + case RS6000_BIF_VMRGHB: + case RS6000_BIF_VEC_MERGEH_V2DI: + case RS6000_BIF_XXMRGHW_4SF: + case RS6000_BIF_VEC_MERGEH_V2DF: + fold_mergehl_helper (gsi, stmt, 0); + return true; + + /* Flavors of vec_mergee. */ + case RS6000_BIF_VMRGEW_V4SI: + case RS6000_BIF_VMRGEW_V2DI: + case RS6000_BIF_VMRGEW_V4SF: + case RS6000_BIF_VMRGEW_V2DF: + fold_mergeeo_helper (gsi, stmt, 0); + return true; + /* Flavors of vec_mergeo. */ + case RS6000_BIF_VMRGOW_V4SI: + case RS6000_BIF_VMRGOW_V2DI: + case RS6000_BIF_VMRGOW_V4SF: + case RS6000_BIF_VMRGOW_V2DF: + fold_mergeeo_helper (gsi, stmt, 1); + return true; + + /* d = vec_pack (a, b) */ + case RS6000_BIF_VPKUDUM: + case RS6000_BIF_VPKUHUM: + case RS6000_BIF_VPKUWUM: + { + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + lhs = gimple_call_lhs (stmt); + gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + } + + /* d = vec_unpackh (a) */ + /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call + in this code is sensitive to endian-ness, and needs to be inverted to + handle both LE and BE targets. */ + case RS6000_BIF_VUPKHSB: + case RS6000_BIF_VUPKHSH: + case RS6000_BIF_VUPKHSW: + { + arg0 = gimple_call_arg (stmt, 0); + lhs = gimple_call_lhs (stmt); + if (BYTES_BIG_ENDIAN) + g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0); + else + g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + } + /* d = vec_unpackl (a) */ + case RS6000_BIF_VUPKLSB: + case RS6000_BIF_VUPKLSH: + case RS6000_BIF_VUPKLSW: + { + arg0 = gimple_call_arg (stmt, 0); + lhs = gimple_call_lhs (stmt); + if (BYTES_BIG_ENDIAN) + g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0); + else + g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0); + gimple_set_location (g, gimple_location (stmt)); + gsi_replace (gsi, g, true); + return true; + } + /* There is no gimple type corresponding with pixel, so just return. */ + case RS6000_BIF_VUPKHPX: + case RS6000_BIF_VUPKLPX: + return false; + + /* vec_perm. */ + case RS6000_BIF_VPERM_16QI: + case RS6000_BIF_VPERM_8HI: + case RS6000_BIF_VPERM_4SI: + case RS6000_BIF_VPERM_2DI: + case RS6000_BIF_VPERM_4SF: + case RS6000_BIF_VPERM_2DF: + case RS6000_BIF_VPERM_16QI_UNS: + case RS6000_BIF_VPERM_8HI_UNS: + case RS6000_BIF_VPERM_4SI_UNS: + case RS6000_BIF_VPERM_2DI_UNS: + { + arg0 = gimple_call_arg (stmt, 0); + arg1 = gimple_call_arg (stmt, 1); + tree permute = gimple_call_arg (stmt, 2); + lhs = gimple_call_lhs (stmt); + location_t loc = gimple_location (stmt); + gimple_seq stmts = NULL; + // convert arg0 and arg1 to match the type of the permute + // for the VEC_PERM_EXPR operation. + tree permute_type = (TREE_TYPE (permute)); + tree arg0_ptype = gimple_build (&stmts, loc, VIEW_CONVERT_EXPR, + permute_type, arg0); + tree arg1_ptype = gimple_build (&stmts, loc, VIEW_CONVERT_EXPR, + permute_type, arg1); + tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR, + permute_type, arg0_ptype, arg1_ptype, + permute); + // Convert the result back to the desired lhs type upon completion. + tree temp = gimple_build (&stmts, loc, VIEW_CONVERT_EXPR, + TREE_TYPE (lhs), lhs_ptype); + gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT); + g = gimple_build_assign (lhs, temp); + gimple_set_location (g, loc); + gsi_replace (gsi, g, true); + return true; + } + + default: + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n", + fn_code, fn_name1, fn_name2); + break; + } + + return false; +} + +/* Expand an expression EXP that calls a built-in function, + with result going to TARGET if that's convenient + (and in mode MODE if that's convenient). + SUBTARGET may be used as the target for computing one of EXP's operands. + IGNORE is nonzero if the value is to be ignored. */ + +rtx +rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, + machine_mode mode ATTRIBUTE_UNUSED, + int ignore ATTRIBUTE_UNUSED) +{ + if (new_builtins_are_live) + return rs6000_expand_new_builtin (exp, target, subtarget, mode, ignore); + + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); + enum rs6000_builtins fcode + = (enum rs6000_builtins) DECL_MD_FUNCTION_CODE (fndecl); + size_t uns_fcode = (size_t)fcode; + const struct builtin_description *d; + size_t i; + rtx ret; + bool success; + HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask; + bool func_valid_p = ((rs6000_builtin_mask & mask) == mask); + enum insn_code icode = rs6000_builtin_info[uns_fcode].icode; + + /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit + floating point type, depending on whether long double is the IBM extended + double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if + we only define one variant of the built-in function, and switch the code + when defining it, rather than defining two built-ins and using the + overload table in rs6000-c.c to switch between the two. If we don't have + the proper assembler, don't do this switch because CODE_FOR_*kf* and + CODE_FOR_*tf* will be CODE_FOR_nothing. */ + if (FLOAT128_IEEE_P (TFmode)) + switch (icode) + { + default: + break; + + case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break; + case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break; + case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break; + case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break; + case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break; + case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break; + case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break; + case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break; + case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break; + case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break; + case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break; + case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break; + case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break; + + case CODE_FOR_xscmpexpqp_eq_kf: + icode = CODE_FOR_xscmpexpqp_eq_tf; + break; + + case CODE_FOR_xscmpexpqp_lt_kf: + icode = CODE_FOR_xscmpexpqp_lt_tf; + break; + + case CODE_FOR_xscmpexpqp_gt_kf: + icode = CODE_FOR_xscmpexpqp_gt_tf; + break; + + case CODE_FOR_xscmpexpqp_unordered_kf: + icode = CODE_FOR_xscmpexpqp_unordered_tf; + break; + } + + if (TARGET_DEBUG_BUILTIN) + { + const char *name1 = rs6000_builtin_info[uns_fcode].name; + const char *name2 = (icode != CODE_FOR_nothing) + ? get_insn_name ((int) icode) + : "nothing"; + const char *name3; + + switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK) + { + default: name3 = "unknown"; break; + case RS6000_BTC_SPECIAL: name3 = "special"; break; + case RS6000_BTC_UNARY: name3 = "unary"; break; + case RS6000_BTC_BINARY: name3 = "binary"; break; + case RS6000_BTC_TERNARY: name3 = "ternary"; break; + case RS6000_BTC_QUATERNARY:name3 = "quaternary";break; + case RS6000_BTC_PREDICATE: name3 = "predicate"; break; + case RS6000_BTC_ABS: name3 = "abs"; break; + case RS6000_BTC_DST: name3 = "dst"; break; + } + + + fprintf (stderr, + "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n", + (name1) ? name1 : "---", fcode, + (name2) ? name2 : "---", (int) icode, + name3, + func_valid_p ? "" : ", not valid"); + } + + if (!func_valid_p) + { + rs6000_invalid_builtin (fcode); + + /* Given it is invalid, just generate a normal call. */ + return expand_call (exp, target, ignore); + } + + switch (fcode) + { + case RS6000_BUILTIN_RECIP: + return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target); + + case RS6000_BUILTIN_RECIPF: + return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target); + + case RS6000_BUILTIN_RSQRTF: + return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target); + + case RS6000_BUILTIN_RSQRT: + return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target); + + case POWER7_BUILTIN_BPERMD: + return rs6000_expand_binop_builtin (((TARGET_64BIT) + ? CODE_FOR_bpermd_di + : CODE_FOR_bpermd_si), exp, target); + + case RS6000_BUILTIN_GET_TB: + return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase, + target); + + case RS6000_BUILTIN_MFTB: + return rs6000_expand_zeroop_builtin (((TARGET_64BIT) + ? CODE_FOR_rs6000_mftb_di + : CODE_FOR_rs6000_mftb_si), + target); + + case RS6000_BUILTIN_MFFS: + return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target); + + case RS6000_BUILTIN_MTFSB0: + return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp); + + case RS6000_BUILTIN_MTFSB1: + return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp); + + case RS6000_BUILTIN_SET_FPSCR_RN: + return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn, + exp); + + case RS6000_BUILTIN_SET_FPSCR_DRN: + return + rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn, + exp); + + case RS6000_BUILTIN_MFFSL: + return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target); + + case RS6000_BUILTIN_MTFSF: + return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp); + + case RS6000_BUILTIN_CPU_INIT: + case RS6000_BUILTIN_CPU_IS: + case RS6000_BUILTIN_CPU_SUPPORTS: + return cpu_expand_builtin (fcode, exp, target); + + case MISC_BUILTIN_SPEC_BARRIER: + { + emit_insn (gen_speculation_barrier ()); + return NULL_RTX; + } + + case ALTIVEC_BUILTIN_MASK_FOR_LOAD: + { + int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct + : (int) CODE_FOR_altivec_lvsl_direct); + machine_mode tmode = insn_data[icode2].operand[0].mode; + machine_mode mode = insn_data[icode2].operand[1].mode; + tree arg; + rtx op, addr, pat; + + gcc_assert (TARGET_ALTIVEC); + + arg = CALL_EXPR_ARG (exp, 0); + gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg))); + op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL); + addr = memory_address (mode, op); + /* We need to negate the address. */ + op = gen_reg_rtx (GET_MODE (addr)); + emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr))); + op = gen_rtx_MEM (mode, op); + + if (target == 0 + || GET_MODE (target) != tmode + || ! (*insn_data[icode2].operand[0].predicate) (target, tmode)) + target = gen_reg_rtx (tmode); + + pat = GEN_FCN (icode2) (target, op); + if (!pat) + return 0; + emit_insn (pat); + + return target; + } + + case ALTIVEC_BUILTIN_VCFUX: + case ALTIVEC_BUILTIN_VCFSX: + case ALTIVEC_BUILTIN_VCTUXS: + case ALTIVEC_BUILTIN_VCTSXS: + /* FIXME: There's got to be a nicer way to handle this case than + constructing a new CALL_EXPR. */ + if (call_expr_nargs (exp) == 1) + { + exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp), + 2, CALL_EXPR_ARG (exp, 0), integer_zero_node); + } + break; + + /* For the pack and unpack int128 routines, fix up the builtin so it + uses the correct IBM128 type. */ + case MISC_BUILTIN_PACK_IF: + if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD) + { + icode = CODE_FOR_packtf; + fcode = MISC_BUILTIN_PACK_TF; + uns_fcode = (size_t)fcode; + } + break; + + case MISC_BUILTIN_UNPACK_IF: + if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD) + { + icode = CODE_FOR_unpacktf; + fcode = MISC_BUILTIN_UNPACK_TF; + uns_fcode = (size_t)fcode; + } + break; + + default: + break; + } + + if (TARGET_MMA) + { + ret = mma_expand_builtin (exp, target, &success); + + if (success) + return ret; + } + if (TARGET_ALTIVEC) + { + ret = altivec_expand_builtin (exp, target, &success); + + if (success) + return ret; + } + if (TARGET_HTM) + { + ret = htm_expand_builtin (exp, target, &success); + + if (success) + return ret; + } + + unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_OPND_MASK; + /* RS6000_BTC_SPECIAL represents no-operand operators. */ + gcc_assert (attr == RS6000_BTC_UNARY + || attr == RS6000_BTC_BINARY + || attr == RS6000_BTC_TERNARY + || attr == RS6000_BTC_QUATERNARY + || attr == RS6000_BTC_SPECIAL); + + /* Handle simple unary operations. */ + d = bdesc_1arg; + for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++) + if (d->code == fcode) + return rs6000_expand_unop_builtin (icode, exp, target); + + /* Handle simple binary operations. */ + d = bdesc_2arg; + for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++) + if (d->code == fcode) + return rs6000_expand_binop_builtin (icode, exp, target); + + /* Handle simple ternary operations. */ + d = bdesc_3arg; + for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++) + if (d->code == fcode) + return rs6000_expand_ternop_builtin (icode, exp, target); + + /* Handle simple quaternary operations. */ + d = bdesc_4arg; + for (i = 0; i < ARRAY_SIZE (bdesc_4arg); i++, d++) + if (d->code == fcode) + return rs6000_expand_quaternop_builtin (icode, exp, target); + + /* Handle simple no-argument operations. */ + d = bdesc_0arg; + for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++) + if (d->code == fcode) + return rs6000_expand_zeroop_builtin (icode, target); + + gcc_unreachable (); +} + +/* Expand ALTIVEC_BUILTIN_MASK_FOR_LOAD. */ +rtx +rs6000_expand_ldst_mask (rtx target, tree arg0) +{ + int icode2 = BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct + : (int) CODE_FOR_altivec_lvsl_direct; + machine_mode tmode = insn_data[icode2].operand[0].mode; + machine_mode mode = insn_data[icode2].operand[1].mode; + + gcc_assert (TARGET_ALTIVEC); + + gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg0))); + rtx op = expand_expr (arg0, NULL_RTX, Pmode, EXPAND_NORMAL); + rtx addr = memory_address (mode, op); + /* We need to negate the address. */ + op = gen_reg_rtx (GET_MODE (addr)); + emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr))); + op = gen_rtx_MEM (mode, op); + + if (target == 0 + || GET_MODE (target) != tmode + || !insn_data[icode2].operand[0].predicate (target, tmode)) + target = gen_reg_rtx (tmode); + + rtx pat = GEN_FCN (icode2) (target, op); + if (!pat) + return 0; + emit_insn (pat); + + return target; +} + +/* Expand the CPU builtin in FCODE and store the result in TARGET. */ +static rtx +new_cpu_expand_builtin (enum rs6000_gen_builtins fcode, + tree exp ATTRIBUTE_UNUSED, rtx target) +{ + /* __builtin_cpu_init () is a nop, so expand to nothing. */ + if (fcode == RS6000_BIF_CPU_INIT) + return const0_rtx; + + if (target == 0 || GET_MODE (target) != SImode) + target = gen_reg_rtx (SImode); + + /* TODO: Factor the #ifdef'd code into a separate function. */ +#ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB + tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0); + /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back + to a STRING_CST. */ + if (TREE_CODE (arg) == ARRAY_REF + && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST + && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST + && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0) + arg = TREE_OPERAND (arg, 0); + + if (TREE_CODE (arg) != STRING_CST) + { + error ("builtin %qs only accepts a string argument", + rs6000_builtin_info_x[(size_t) fcode].bifname); + return const0_rtx; + } + + if (fcode == RS6000_BIF_CPU_IS) + { + const char *cpu = TREE_STRING_POINTER (arg); + rtx cpuid = NULL_RTX; + for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++) + if (strcmp (cpu, cpu_is_info[i].cpu) == 0) + { + /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */ + cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM); + break; + } + if (cpuid == NULL_RTX) + { + /* Invalid CPU argument. */ + error ("cpu %qs is an invalid argument to builtin %qs", + cpu, rs6000_builtin_info_x[(size_t) fcode].bifname); + return const0_rtx; + } + + rtx platform = gen_reg_rtx (SImode); + rtx address = gen_rtx_PLUS (Pmode, + gen_rtx_REG (Pmode, TLS_REGNUM), + GEN_INT (TCB_PLATFORM_OFFSET)); + rtx tcbmem = gen_const_mem (SImode, address); + emit_move_insn (platform, tcbmem); + emit_insn (gen_eqsi3 (target, platform, cpuid)); + } + else if (fcode == RS6000_BIF_CPU_SUPPORTS) + { + const char *hwcap = TREE_STRING_POINTER (arg); + rtx mask = NULL_RTX; + int hwcap_offset; + for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++) + if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0) + { + mask = GEN_INT (cpu_supports_info[i].mask); + hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id); + break; + } + if (mask == NULL_RTX) + { + /* Invalid HWCAP argument. */ + error ("%s %qs is an invalid argument to builtin %qs", + "hwcap", hwcap, + rs6000_builtin_info_x[(size_t) fcode].bifname); + return const0_rtx; + } + + rtx tcb_hwcap = gen_reg_rtx (SImode); + rtx address = gen_rtx_PLUS (Pmode, + gen_rtx_REG (Pmode, TLS_REGNUM), + GEN_INT (hwcap_offset)); + rtx tcbmem = gen_const_mem (SImode, address); + emit_move_insn (tcb_hwcap, tcbmem); + rtx scratch1 = gen_reg_rtx (SImode); + emit_insn (gen_rtx_SET (scratch1, + gen_rtx_AND (SImode, tcb_hwcap, mask))); + rtx scratch2 = gen_reg_rtx (SImode); + emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx)); + emit_insn (gen_rtx_SET (target, + gen_rtx_XOR (SImode, scratch2, const1_rtx))); + } + else + gcc_unreachable (); + + /* Record that we have expanded a CPU builtin, so that we can later + emit a reference to the special symbol exported by LIBC to ensure we + do not link against an old LIBC that doesn't support this feature. */ + cpu_builtin_p = true; + +#else + warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware " + "capability bits", rs6000_builtin_info_x[(size_t) fcode].bifname); + + /* For old LIBCs, always return FALSE. */ + emit_move_insn (target, GEN_INT (0)); +#endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */ + + return target; +} + +/* For the element-reversing load/store built-ins, produce the correct + insn_code depending on the target endianness. */ +static insn_code +elemrev_icode (rs6000_gen_builtins fcode) +{ + switch (fcode) + { + case RS6000_BIF_ST_ELEMREV_V1TI: + return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti + : CODE_FOR_vsx_st_elemrev_v1ti; + + case RS6000_BIF_ST_ELEMREV_V2DF: + return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df + : CODE_FOR_vsx_st_elemrev_v2df; + + case RS6000_BIF_ST_ELEMREV_V2DI: + return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di + : CODE_FOR_vsx_st_elemrev_v2di; + + case RS6000_BIF_ST_ELEMREV_V4SF: + return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf + : CODE_FOR_vsx_st_elemrev_v4sf; + + case RS6000_BIF_ST_ELEMREV_V4SI: + return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si + : CODE_FOR_vsx_st_elemrev_v4si; + + case RS6000_BIF_ST_ELEMREV_V8HI: + return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi + : CODE_FOR_vsx_st_elemrev_v8hi; + + case RS6000_BIF_ST_ELEMREV_V16QI: + return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi + : CODE_FOR_vsx_st_elemrev_v16qi; + + case RS6000_BIF_LD_ELEMREV_V2DF: + return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df + : CODE_FOR_vsx_ld_elemrev_v2df; + + case RS6000_BIF_LD_ELEMREV_V1TI: + return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti + : CODE_FOR_vsx_ld_elemrev_v1ti; + + case RS6000_BIF_LD_ELEMREV_V2DI: + return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di + : CODE_FOR_vsx_ld_elemrev_v2di; + + case RS6000_BIF_LD_ELEMREV_V4SF: + return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf + : CODE_FOR_vsx_ld_elemrev_v4sf; + + case RS6000_BIF_LD_ELEMREV_V4SI: + return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si + : CODE_FOR_vsx_ld_elemrev_v4si; + + case RS6000_BIF_LD_ELEMREV_V8HI: + return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi + : CODE_FOR_vsx_ld_elemrev_v8hi; + + case RS6000_BIF_LD_ELEMREV_V16QI: + return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi + : CODE_FOR_vsx_ld_elemrev_v16qi; + default: + ; + } + + gcc_unreachable (); +} + +/* Expand an AltiVec vector load builtin, and return the expanded rtx. */ +static rtx +ldv_expand_builtin (rtx target, insn_code icode, rtx *op, machine_mode tmode) +{ + if (target == 0 + || GET_MODE (target) != tmode + || !insn_data[icode].operand[0].predicate (target, tmode)) + target = gen_reg_rtx (tmode); + + op[1] = copy_to_mode_reg (Pmode, op[1]); + + /* These CELL built-ins use BLKmode instead of tmode for historical + (i.e., unknown) reasons. TODO: Is this necessary? */ + bool blk = (icode == CODE_FOR_altivec_lvlx + || icode == CODE_FOR_altivec_lvlxl + || icode == CODE_FOR_altivec_lvrx + || icode == CODE_FOR_altivec_lvrxl); + + /* For LVX, express the RTL accurately by ANDing the address with -16. + LVXL and LVE*X expand to use UNSPECs to hide their special behavior, + so the raw address is fine. */ + /* TODO: That statement seems wrong, as the UNSPECs don't surround the + memory expression, so a latent bug may lie here. The &-16 is likely + needed for all VMX-style loads. */ + if (icode == CODE_FOR_altivec_lvx_v1ti + || icode == CODE_FOR_altivec_lvx_v2df + || icode == CODE_FOR_altivec_lvx_v2di + || icode == CODE_FOR_altivec_lvx_v4sf + || icode == CODE_FOR_altivec_lvx_v4si + || icode == CODE_FOR_altivec_lvx_v8hi + || icode == CODE_FOR_altivec_lvx_v16qi) + { + rtx rawaddr; + if (op[0] == const0_rtx) + rawaddr = op[1]; + else + { + op[0] = copy_to_mode_reg (Pmode, op[0]); + rawaddr = gen_rtx_PLUS (Pmode, op[1], op[0]); + } + rtx addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16)); + addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr); + + emit_insn (gen_rtx_SET (target, addr)); + } + else + { + rtx addr; + if (op[0] == const0_rtx) + addr = gen_rtx_MEM (blk ? BLKmode : tmode, op[1]); + else + { + op[0] = copy_to_mode_reg (Pmode, op[0]); + addr = gen_rtx_MEM (blk ? BLKmode : tmode, + gen_rtx_PLUS (Pmode, op[1], op[0])); + } + + rtx pat = GEN_FCN (icode) (target, addr); + if (!pat) + return 0; + emit_insn (pat); + } + + return target; +} + +/* Expand a builtin function that loads a scalar into a vector register + with sign extension, and return the expanded rtx. */ +static rtx +lxvrse_expand_builtin (rtx target, insn_code icode, rtx *op, + machine_mode tmode, machine_mode smode) +{ + rtx pat, addr; + op[1] = copy_to_mode_reg (Pmode, op[1]); + + if (op[0] == const0_rtx) + addr = gen_rtx_MEM (tmode, op[1]); + else + { + op[0] = copy_to_mode_reg (Pmode, op[0]); + addr = gen_rtx_MEM (smode, + gen_rtx_PLUS (Pmode, op[1], op[0])); + } + + rtx discratch = gen_reg_rtx (V2DImode); + rtx tiscratch = gen_reg_rtx (TImode); + + /* Emit the lxvr*x insn. */ + pat = GEN_FCN (icode) (tiscratch, addr); + if (!pat) + return 0; + emit_insn (pat); + + /* Emit a sign extension from V16QI,V8HI,V4SI to V2DI. */ + rtx temp1; + if (icode == CODE_FOR_vsx_lxvrbx) + { + temp1 = simplify_gen_subreg (V16QImode, tiscratch, TImode, 0); + emit_insn (gen_vsx_sign_extend_qi_v2di (discratch, temp1)); + } + else if (icode == CODE_FOR_vsx_lxvrhx) + { + temp1 = simplify_gen_subreg (V8HImode, tiscratch, TImode, 0); + emit_insn (gen_vsx_sign_extend_hi_v2di (discratch, temp1)); + } + else if (icode == CODE_FOR_vsx_lxvrwx) + { + temp1 = simplify_gen_subreg (V4SImode, tiscratch, TImode, 0); + emit_insn (gen_vsx_sign_extend_si_v2di (discratch, temp1)); + } + else if (icode == CODE_FOR_vsx_lxvrdx) + discratch = simplify_gen_subreg (V2DImode, tiscratch, TImode, 0); + else + gcc_unreachable (); + + /* Emit the sign extension from V2DI (double) to TI (quad). */ + rtx temp2 = simplify_gen_subreg (TImode, discratch, V2DImode, 0); + emit_insn (gen_extendditi2_vector (target, temp2)); + + return target; +} + +/* Expand a builtin function that loads a scalar into a vector register + with zero extension, and return the expanded rtx. */ +static rtx +lxvrze_expand_builtin (rtx target, insn_code icode, rtx *op, + machine_mode tmode, machine_mode smode) +{ + rtx pat, addr; + op[1] = copy_to_mode_reg (Pmode, op[1]); + + if (op[0] == const0_rtx) + addr = gen_rtx_MEM (tmode, op[1]); + else + { + op[0] = copy_to_mode_reg (Pmode, op[0]); + addr = gen_rtx_MEM (smode, + gen_rtx_PLUS (Pmode, op[1], op[0])); + } + + pat = GEN_FCN (icode) (target, addr); + if (!pat) + return 0; + emit_insn (pat); + return target; +} + +/* Expand an AltiVec vector store builtin, and return the expanded rtx. */ +static rtx +stv_expand_builtin (insn_code icode, rtx *op, + machine_mode tmode, machine_mode smode) +{ + op[2] = copy_to_mode_reg (Pmode, op[2]); + + /* For STVX, express the RTL accurately by ANDing the address with -16. + STVXL and STVE*X expand to use UNSPECs to hide their special behavior, + so the raw address is fine. */ + /* TODO: That statement seems wrong, as the UNSPECs don't surround the + memory expression, so a latent bug may lie here. The &-16 is likely + needed for all VMX-style stores. */ + if (icode == CODE_FOR_altivec_stvx_v2df + || icode == CODE_FOR_altivec_stvx_v2di + || icode == CODE_FOR_altivec_stvx_v4sf + || icode == CODE_FOR_altivec_stvx_v4si + || icode == CODE_FOR_altivec_stvx_v8hi + || icode == CODE_FOR_altivec_stvx_v16qi) + { + rtx rawaddr; + if (op[1] == const0_rtx) + rawaddr = op[2]; + else + { + op[1] = copy_to_mode_reg (Pmode, op[1]); + rawaddr = gen_rtx_PLUS (Pmode, op[2], op[1]); + } + + rtx addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16)); + addr = gen_rtx_MEM (tmode, addr); + op[0] = copy_to_mode_reg (tmode, op[0]); + emit_insn (gen_rtx_SET (addr, op[0])); + } + else if (icode == CODE_FOR_vsx_stxvrbx + || icode == CODE_FOR_vsx_stxvrhx + || icode == CODE_FOR_vsx_stxvrwx + || icode == CODE_FOR_vsx_stxvrdx) + { + rtx truncrtx = gen_rtx_TRUNCATE (tmode, op[0]); + op[0] = copy_to_mode_reg (E_TImode, truncrtx); + + rtx addr; + if (op[1] == const0_rtx) + addr = gen_rtx_MEM (Pmode, op[2]); + else + { + op[1] = copy_to_mode_reg (Pmode, op[1]); + addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op[2], op[1])); + } + rtx pat = GEN_FCN (icode) (addr, op[0]); + if (pat) + emit_insn (pat); + } + else + { + if (!insn_data[icode].operand[1].predicate (op[0], smode)) + op[0] = copy_to_mode_reg (smode, op[0]); + + rtx addr; + if (op[1] == const0_rtx) + addr = gen_rtx_MEM (tmode, op[2]); + else + { + op[1] = copy_to_mode_reg (Pmode, op[1]); + addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op[2], op[1])); + } + + rtx pat = GEN_FCN (icode) (addr, op[0]); + if (pat) + emit_insn (pat); + } + + return NULL_RTX; +} + +/* Expand the MMA built-in in EXP, and return it. */ +static rtx +new_mma_expand_builtin (tree exp, rtx target, insn_code icode, + rs6000_gen_builtins fcode) +{ + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); + bool void_func = TREE_TYPE (TREE_TYPE (fndecl)) == void_type_node; + machine_mode tmode = VOIDmode; + rtx op[MAX_MMA_OPERANDS]; + unsigned nopnds = 0; + + if (!void_func) + { + tmode = insn_data[icode].operand[0].mode; + if (!(target + && GET_MODE (target) == tmode + && insn_data[icode].operand[0].predicate (target, tmode))) + target = gen_reg_rtx (tmode); + op[nopnds++] = target; + } + else + target = const0_rtx; + + call_expr_arg_iterator iter; + tree arg; + FOR_EACH_CALL_EXPR_ARG (arg, iter, exp) + { + if (arg == error_mark_node) + return const0_rtx; + + rtx opnd; + const struct insn_operand_data *insn_op; + insn_op = &insn_data[icode].operand[nopnds]; + if (TREE_CODE (arg) == ADDR_EXPR + && MEM_P (DECL_RTL (TREE_OPERAND (arg, 0)))) + opnd = DECL_RTL (TREE_OPERAND (arg, 0)); + else + opnd = expand_normal (arg); + + if (!insn_op->predicate (opnd, insn_op->mode)) + { + /* TODO: This use of constraints needs explanation. */ + if (!strcmp (insn_op->constraint, "n")) + { + if (!CONST_INT_P (opnd)) + error ("argument %d must be an unsigned literal", nopnds); + else + error ("argument %d is an unsigned literal that is " + "out of range", nopnds); + return const0_rtx; + } + opnd = copy_to_mode_reg (insn_op->mode, opnd); + } + + /* Some MMA instructions have INOUT accumulator operands, so force + their target register to be the same as their input register. */ + if (!void_func + && nopnds == 1 + && !strcmp (insn_op->constraint, "0") + && insn_op->mode == tmode + && REG_P (opnd) + && insn_data[icode].operand[0].predicate (opnd, tmode)) + target = op[0] = opnd; + + op[nopnds++] = opnd; + } + + rtx pat; + switch (nopnds) + { + case 1: + pat = GEN_FCN (icode) (op[0]); + break; + case 2: + pat = GEN_FCN (icode) (op[0], op[1]); + break; + case 3: + /* The ASSEMBLE builtin source operands are reversed in little-endian + mode, so reorder them. */ + if (fcode == RS6000_BIF_ASSEMBLE_PAIR_V_INTERNAL && !WORDS_BIG_ENDIAN) + std::swap (op[1], op[2]); + pat = GEN_FCN (icode) (op[0], op[1], op[2]); + break; + case 4: + pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]); + break; + case 5: + /* The ASSEMBLE builtin source operands are reversed in little-endian + mode, so reorder them. */ + if (fcode == RS6000_BIF_ASSEMBLE_ACC_INTERNAL && !WORDS_BIG_ENDIAN) + { + std::swap (op[1], op[4]); + std::swap (op[2], op[3]); + } + pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]); + break; + case 6: + pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]); + break; + case 7: + pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5], op[6]); + break; + default: + gcc_unreachable (); + } + + if (!pat) + return NULL_RTX; + + emit_insn (pat); + return target; +} + +/* Return the appropriate SPR number associated with the given builtin. */ +static inline HOST_WIDE_INT +new_htm_spr_num (enum rs6000_gen_builtins code) +{ + if (code == RS6000_BIF_GET_TFHAR + || code == RS6000_BIF_SET_TFHAR) + return TFHAR_SPR; + else if (code == RS6000_BIF_GET_TFIAR + || code == RS6000_BIF_SET_TFIAR) + return TFIAR_SPR; + else if (code == RS6000_BIF_GET_TEXASR + || code == RS6000_BIF_SET_TEXASR) + return TEXASR_SPR; + gcc_assert (code == RS6000_BIF_GET_TEXASRU + || code == RS6000_BIF_SET_TEXASRU); + return TEXASRU_SPR; +} + +/* Expand the HTM builtin in EXP and store the result in TARGET. + Return the expanded rtx. */ +static rtx +new_htm_expand_builtin (bifdata *bifaddr, rs6000_gen_builtins fcode, + tree exp, rtx target) +{ + if (!TARGET_POWERPC64 + && (fcode == RS6000_BIF_TABORTDC + || fcode == RS6000_BIF_TABORTDCI)) + { + error ("builtin %qs is only valid in 64-bit mode", bifaddr->bifname); + return const0_rtx; + } + + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); + bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node; + bool uses_spr = bif_is_htmspr (*bifaddr); + insn_code icode = bifaddr->icode; + + if (uses_spr) + icode = rs6000_htm_spr_icode (nonvoid); + + rtx op[MAX_HTM_OPERANDS]; + int nopnds = 0; + const insn_operand_data *insn_op = &insn_data[icode].operand[0]; + + if (nonvoid) + { + machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode; + if (!target + || GET_MODE (target) != tmode + || (uses_spr && !insn_op->predicate (target, tmode))) + target = gen_reg_rtx (tmode); + if (uses_spr) + op[nopnds++] = target; + } + + tree arg; + call_expr_arg_iterator iter; + + FOR_EACH_CALL_EXPR_ARG (arg, iter, exp) + { + if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS) + return const0_rtx; + + insn_op = &insn_data[icode].operand[nopnds]; + op[nopnds] = expand_normal (arg); + + if (!insn_op->predicate (op[nopnds], insn_op->mode)) + { + /* TODO: This use of constraints could use explanation. + This happens a couple of places, perhaps make that a + function to document what's happening. */ + if (!strcmp (insn_op->constraint, "n")) + { + int arg_num = nonvoid ? nopnds : nopnds + 1; + if (!CONST_INT_P (op[nopnds])) + error ("argument %d must be an unsigned literal", arg_num); + else + error ("argument %d is an unsigned literal that is " + "out of range", arg_num); + return const0_rtx; + } + op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]); + } + + nopnds++; + } + + /* Handle the builtins for extended mnemonics. These accept + no arguments, but map to builtins that take arguments. */ + switch (fcode) + { + case RS6000_BIF_TENDALL: /* Alias for: tend. 1 */ + case RS6000_BIF_TRESUME: /* Alias for: tsr. 1 */ + op[nopnds++] = GEN_INT (1); + break; + case RS6000_BIF_TSUSPEND: /* Alias for: tsr. 0 */ + op[nopnds++] = GEN_INT (0); + break; + default: + break; + } + + /* If this builtin accesses SPRs, then pass in the appropriate + SPR number and SPR regno as the last two operands. */ + rtx cr = NULL_RTX; + if (uses_spr) + { + machine_mode mode = TARGET_POWERPC64 ? DImode : SImode; + op[nopnds++] = gen_rtx_CONST_INT (mode, new_htm_spr_num (fcode)); + } + /* If this builtin accesses a CR field, then pass in a scratch + CR field as the last operand. */ + else if (bif_is_htmcr (*bifaddr)) + { + cr = gen_reg_rtx (CCmode); + op[nopnds++] = cr; + } + + rtx pat; + switch (nopnds) + { + case 1: + pat = GEN_FCN (icode) (op[0]); + break; + case 2: + pat = GEN_FCN (icode) (op[0], op[1]); + break; + case 3: + pat = GEN_FCN (icode) (op[0], op[1], op[2]); + break; + case 4: + pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]); + break; + default: + gcc_unreachable (); + } + if (!pat) + return NULL_RTX; + emit_insn (pat); + + if (bif_is_htmcr (*bifaddr)) + { + if (fcode == RS6000_BIF_TBEGIN) + { + /* Emit code to set TARGET to true or false depending on + whether the tbegin. instruction succeeded or failed + to start a transaction. We do this by placing the 1's + complement of CR's EQ bit into TARGET. */ + rtx scratch = gen_reg_rtx (SImode); + emit_insn (gen_rtx_SET (scratch, + gen_rtx_EQ (SImode, cr, + const0_rtx))); + emit_insn (gen_rtx_SET (target, + gen_rtx_XOR (SImode, scratch, + GEN_INT (1)))); + } + else + { + /* Emit code to copy the 4-bit condition register field + CR into the least significant end of register TARGET. */ + rtx scratch1 = gen_reg_rtx (SImode); + rtx scratch2 = gen_reg_rtx (SImode); + rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0); + emit_insn (gen_movcc (subreg, cr)); + emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28))); + emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf))); + } + } + + if (nonvoid) + return target; + return const0_rtx; +} + +/* Expand an expression EXP that calls a built-in function, + with result going to TARGET if that's convenient + (and in mode MODE if that's convenient). + SUBTARGET may be used as the target for computing one of EXP's operands. + IGNORE is nonzero if the value is to be ignored. + Use the new builtin infrastructure. */ +static rtx +rs6000_expand_new_builtin (tree exp, rtx target, + rtx /* subtarget */, + machine_mode /* mode */, + int ignore) +{ + tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); + enum rs6000_gen_builtins fcode + = (enum rs6000_gen_builtins) DECL_MD_FUNCTION_CODE (fndecl); + size_t uns_fcode = (size_t)fcode; + enum insn_code icode = rs6000_builtin_info_x[uns_fcode].icode; + + /* TODO: The following commentary and code is inherited from the original + builtin processing code. The commentary is a bit confusing, with the + intent being that KFmode is always IEEE-128, IFmode is always IBM + double-double, and TFmode is the current long double. The code is + confusing in that it converts from KFmode to TFmode pattern names, + when the other direction is more intuitive. Try to address this. */ + + /* We have two different modes (KFmode, TFmode) that are the IEEE + 128-bit floating point type, depending on whether long double is the + IBM extended double (KFmode) or long double is IEEE 128-bit (TFmode). + It is simpler if we only define one variant of the built-in function, + and switch the code when defining it, rather than defining two built- + ins and using the overload table in rs6000-c.c to switch between the + two. If we don't have the proper assembler, don't do this switch + because CODE_FOR_*kf* and CODE_FOR_*tf* will be CODE_FOR_nothing. */ + if (FLOAT128_IEEE_P (TFmode)) + switch (icode) + { + case CODE_FOR_sqrtkf2_odd: + icode = CODE_FOR_sqrttf2_odd; + break; + case CODE_FOR_trunckfdf2_odd: + icode = CODE_FOR_trunctfdf2_odd; + break; + case CODE_FOR_addkf3_odd: + icode = CODE_FOR_addtf3_odd; + break; + case CODE_FOR_subkf3_odd: + icode = CODE_FOR_subtf3_odd; + break; + case CODE_FOR_mulkf3_odd: + icode = CODE_FOR_multf3_odd; + break; + case CODE_FOR_divkf3_odd: + icode = CODE_FOR_divtf3_odd; + break; + case CODE_FOR_fmakf4_odd: + icode = CODE_FOR_fmatf4_odd; + break; + case CODE_FOR_xsxexpqp_kf: + icode = CODE_FOR_xsxexpqp_tf; + break; + case CODE_FOR_xsxsigqp_kf: + icode = CODE_FOR_xsxsigqp_tf; + break; + case CODE_FOR_xststdcnegqp_kf: + icode = CODE_FOR_xststdcnegqp_tf; + break; + case CODE_FOR_xsiexpqp_kf: + icode = CODE_FOR_xsiexpqp_tf; + break; + case CODE_FOR_xsiexpqpf_kf: + icode = CODE_FOR_xsiexpqpf_tf; + break; + case CODE_FOR_xststdcqp_kf: + icode = CODE_FOR_xststdcqp_tf; + break; + case CODE_FOR_xscmpexpqp_eq_kf: + icode = CODE_FOR_xscmpexpqp_eq_tf; + break; + case CODE_FOR_xscmpexpqp_lt_kf: + icode = CODE_FOR_xscmpexpqp_lt_tf; + break; + case CODE_FOR_xscmpexpqp_gt_kf: + icode = CODE_FOR_xscmpexpqp_gt_tf; + break; + case CODE_FOR_xscmpexpqp_unordered_kf: + icode = CODE_FOR_xscmpexpqp_unordered_tf; + break; + default: + break; + } + + /* In case of "#pragma target" changes, we initialize all builtins + but check for actual availability now, during expand time. For + invalid builtins, generate a normal call. */ + bifdata *bifaddr = &rs6000_builtin_info_x[uns_fcode]; + bif_enable e = bifaddr->enable; + + if (!(e == ENB_ALWAYS + || (e == ENB_P5 && TARGET_POPCNTB) + || (e == ENB_P6 && TARGET_CMPB) + || (e == ENB_ALTIVEC && TARGET_ALTIVEC) + || (e == ENB_CELL && TARGET_ALTIVEC + && rs6000_cpu == PROCESSOR_CELL) + || (e == ENB_VSX && TARGET_VSX) + || (e == ENB_P7 && TARGET_POPCNTD) + || (e == ENB_P7_64 && TARGET_POPCNTD + && TARGET_POWERPC64) + || (e == ENB_P8 && TARGET_DIRECT_MOVE) + || (e == ENB_P8V && TARGET_P8_VECTOR) + || (e == ENB_P9 && TARGET_MODULO) + || (e == ENB_P9_64 && TARGET_MODULO + && TARGET_POWERPC64) + || (e == ENB_P9V && TARGET_P9_VECTOR) + || (e == ENB_IEEE128_HW && TARGET_FLOAT128_HW) + || (e == ENB_DFP && TARGET_DFP) + || (e == ENB_CRYPTO && TARGET_CRYPTO) + || (e == ENB_HTM && TARGET_HTM) + || (e == ENB_P10 && TARGET_POWER10) + || (e == ENB_P10_64 && TARGET_POWER10 + && TARGET_POWERPC64) + || (e == ENB_MMA && TARGET_MMA))) + { + rs6000_invalid_new_builtin (fcode); + return expand_call (exp, target, ignore); + } + + if (bif_is_nosoft (*bifaddr) + && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT) + { + error ("%<%s%> not supported with %<-msoft-float%>", + bifaddr->bifname); + return const0_rtx; + } + + if (bif_is_no32bit (*bifaddr) && TARGET_32BIT) + fatal_error (input_location, + "%<%s%> is not supported in 32-bit mode", + bifaddr->bifname); + + if (bif_is_cpu (*bifaddr)) + return new_cpu_expand_builtin (fcode, exp, target); + + if (bif_is_init (*bifaddr)) + return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target); + + if (bif_is_set (*bifaddr)) + return altivec_expand_vec_set_builtin (exp); + + if (bif_is_extract (*bifaddr)) + return altivec_expand_vec_ext_builtin (exp, target); + + if (bif_is_predicate (*bifaddr)) + return altivec_expand_predicate_builtin (icode, exp, target); + + if (bif_is_htm (*bifaddr)) + return new_htm_expand_builtin (bifaddr, fcode, exp, target); + + if (bif_is_32bit (*bifaddr) && TARGET_32BIT) + { + if (fcode == RS6000_BIF_MFTB) + icode = CODE_FOR_rs6000_mftb_si; + else + gcc_unreachable (); + } + + if (bif_is_endian (*bifaddr) && BYTES_BIG_ENDIAN) + { + if (fcode == RS6000_BIF_LD_ELEMREV_V1TI) + icode = CODE_FOR_vsx_load_v1ti; + else if (fcode == RS6000_BIF_LD_ELEMREV_V2DF) + icode = CODE_FOR_vsx_load_v2df; + else if (fcode == RS6000_BIF_LD_ELEMREV_V2DI) + icode = CODE_FOR_vsx_load_v2di; + else if (fcode == RS6000_BIF_LD_ELEMREV_V4SF) + icode = CODE_FOR_vsx_load_v4sf; + else if (fcode == RS6000_BIF_LD_ELEMREV_V4SI) + icode = CODE_FOR_vsx_load_v4si; + else if (fcode == RS6000_BIF_LD_ELEMREV_V8HI) + icode = CODE_FOR_vsx_load_v8hi; + else if (fcode == RS6000_BIF_LD_ELEMREV_V16QI) + icode = CODE_FOR_vsx_load_v16qi; + else if (fcode == RS6000_BIF_ST_ELEMREV_V1TI) + icode = CODE_FOR_vsx_store_v1ti; + else if (fcode == RS6000_BIF_ST_ELEMREV_V2DF) + icode = CODE_FOR_vsx_store_v2df; + else if (fcode == RS6000_BIF_ST_ELEMREV_V2DI) + icode = CODE_FOR_vsx_store_v2di; + else if (fcode == RS6000_BIF_ST_ELEMREV_V4SF) + icode = CODE_FOR_vsx_store_v4sf; + else if (fcode == RS6000_BIF_ST_ELEMREV_V4SI) + icode = CODE_FOR_vsx_store_v4si; + else if (fcode == RS6000_BIF_ST_ELEMREV_V8HI) + icode = CODE_FOR_vsx_store_v8hi; + else if (fcode == RS6000_BIF_ST_ELEMREV_V16QI) + icode = CODE_FOR_vsx_store_v16qi; + else + gcc_unreachable (); + } + + + /* TRUE iff the built-in function returns void. */ + bool void_func = TREE_TYPE (TREE_TYPE (fndecl)) == void_type_node; + /* Position of first argument (0 for void-returning functions, else 1). */ + int k; + /* Modes for the return value, if any, and arguments. */ + const int MAX_BUILTIN_ARGS = 6; + machine_mode mode[MAX_BUILTIN_ARGS + 1]; + + if (void_func) + k = 0; + else + { + k = 1; + mode[0] = insn_data[icode].operand[0].mode; + } + + /* Tree expressions for each argument. */ + tree arg[MAX_BUILTIN_ARGS]; + /* RTL expressions for each argument. */ + rtx op[MAX_BUILTIN_ARGS]; + + int nargs = bifaddr->nargs; + gcc_assert (nargs <= MAX_BUILTIN_ARGS); + + + for (int i = 0; i < nargs; i++) + { + arg[i] = CALL_EXPR_ARG (exp, i); + if (arg[i] == error_mark_node) + return const0_rtx; + STRIP_NOPS (arg[i]); + op[i] = expand_normal (arg[i]); + /* We have a couple of pesky patterns that don't specify the mode... */ + mode[i+k] = insn_data[icode].operand[i+k].mode; + if (!mode[i+k]) + mode[i+k] = Pmode; + } + + /* Check for restricted constant arguments. */ + for (int i = 0; i < 2; i++) + { + switch (bifaddr->restr[i]) + { + case RES_BITS: + { + size_t mask = 1; + mask <<= bifaddr->restr_val1[i]; + mask--; + tree restr_arg = arg[bifaddr->restr_opnd[i] - 1]; + STRIP_NOPS (restr_arg); + if (!(TREE_CODE (restr_arg) == INTEGER_CST + && (TREE_INT_CST_LOW (restr_arg) & ~mask) == 0)) + { + error ("argument %d must be a %d-bit unsigned literal", + bifaddr->restr_opnd[i], bifaddr->restr_val1[i]); + return CONST0_RTX (mode[0]); + } + break; + } + case RES_RANGE: + { + tree restr_arg = arg[bifaddr->restr_opnd[i] - 1]; + STRIP_NOPS (restr_arg); + if (!(TREE_CODE (restr_arg) == INTEGER_CST + && IN_RANGE (tree_to_shwi (restr_arg), + bifaddr->restr_val1[i], + bifaddr->restr_val2[i]))) + { + error ("argument %d must be a literal between %d and %d," + " inclusive", + bifaddr->restr_opnd[i], bifaddr->restr_val1[i], + bifaddr->restr_val2[i]); + return CONST0_RTX (mode[0]); + } + break; + } + case RES_VAR_RANGE: + { + tree restr_arg = arg[bifaddr->restr_opnd[i] - 1]; + STRIP_NOPS (restr_arg); + if (TREE_CODE (restr_arg) == INTEGER_CST + && !IN_RANGE (tree_to_shwi (restr_arg), + bifaddr->restr_val1[i], + bifaddr->restr_val2[i])) + { + error ("argument %d must be a variable or a literal " + "between %d and %d, inclusive", + bifaddr->restr_opnd[i], bifaddr->restr_val1[i], + bifaddr->restr_val2[i]); + return CONST0_RTX (mode[0]); + } + break; + } + case RES_VALUES: + { + tree restr_arg = arg[bifaddr->restr_opnd[i] - 1]; + STRIP_NOPS (restr_arg); + if (!(TREE_CODE (restr_arg) == INTEGER_CST + && (tree_to_shwi (restr_arg) == bifaddr->restr_val1[i] + || tree_to_shwi (restr_arg) == bifaddr->restr_val2[i]))) + { + error ("argument %d must be either a literal %d or a " + "literal %d", + bifaddr->restr_opnd[i], bifaddr->restr_val1[i], + bifaddr->restr_val2[i]); + return CONST0_RTX (mode[0]); + } + break; + } + default: + case RES_NONE: + break; + } + } + + if (bif_is_ldstmask (*bifaddr)) + return rs6000_expand_ldst_mask (target, arg[0]); + + if (bif_is_stvec (*bifaddr)) + { + if (bif_is_reve (*bifaddr)) + icode = elemrev_icode (fcode); + return stv_expand_builtin (icode, op, mode[0], mode[1]); + } + + if (bif_is_ldvec (*bifaddr)) + { + if (bif_is_reve (*bifaddr)) + icode = elemrev_icode (fcode); + return ldv_expand_builtin (target, icode, op, mode[0]); + } + + if (bif_is_lxvrse (*bifaddr)) + return lxvrse_expand_builtin (target, icode, op, mode[0], mode[1]); + + if (bif_is_lxvrze (*bifaddr)) + return lxvrze_expand_builtin (target, icode, op, mode[0], mode[1]); + + if (bif_is_mma (*bifaddr)) + return new_mma_expand_builtin (exp, target, icode, fcode); + + if (fcode == RS6000_BIF_PACK_IF + && TARGET_LONG_DOUBLE_128 + && !TARGET_IEEEQUAD) + { + icode = CODE_FOR_packtf; + fcode = RS6000_BIF_PACK_TF; + uns_fcode = (size_t) fcode; + } + else if (fcode == RS6000_BIF_UNPACK_IF + && TARGET_LONG_DOUBLE_128 + && !TARGET_IEEEQUAD) + { + icode = CODE_FOR_unpacktf; + fcode = RS6000_BIF_UNPACK_TF; + uns_fcode = (size_t) fcode; + } + + if (TREE_TYPE (TREE_TYPE (fndecl)) == void_type_node) + target = NULL_RTX; + else if (target == 0 + || GET_MODE (target) != mode[0] + || !insn_data[icode].operand[0].predicate (target, mode[0])) + target = gen_reg_rtx (mode[0]); + + for (int i = 0; i < nargs; i++) + if (!insn_data[icode].operand[i+k].predicate (op[i], mode[i+k])) + op[i] = copy_to_mode_reg (mode[i+k], op[i]); + + rtx pat; + + switch (nargs) + { + case 0: + pat = (void_func + ? GEN_FCN (icode) () + : GEN_FCN (icode) (target)); + break; + case 1: + pat = (void_func + ? GEN_FCN (icode) (op[0]) + : GEN_FCN (icode) (target, op[0])); + break; + case 2: + pat = (void_func + ? GEN_FCN (icode) (op[0], op[1]) + : GEN_FCN (icode) (target, op[0], op[1])); + break; + case 3: + pat = (void_func + ? GEN_FCN (icode) (op[0], op[1], op[2]) + : GEN_FCN (icode) (target, op[0], op[1], op[2])); + break; + case 4: + pat = (void_func + ? GEN_FCN (icode) (op[0], op[1], op[2], op[3]) + : GEN_FCN (icode) (target, op[0], op[1], op[2], op[3])); + break; + case 5: + pat = (void_func + ? GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]) + : GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4])); + break; + case 6: + pat = (void_func + ? GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]) + : GEN_FCN (icode) (target, op[0], op[1], + op[2], op[3], op[4], op[5])); + break; + default: + gcc_assert (MAX_BUILTIN_ARGS == 6); + gcc_unreachable (); + } + + if (!pat) + return 0; + + emit_insn (pat); + return target; +} + +/* Create a builtin vector type with a name. Taking care not to give + the canonical type a name. */ + +static tree +rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts) +{ + tree result = build_vector_type (elt_type, num_elts); + + /* Copy so we don't give the canonical type a name. */ + result = build_variant_type_copy (result); + + add_builtin_type (name, result); + + return result; +} + +void +rs6000_init_builtins (void) +{ + tree tdecl; + tree ftype; + tree t; + machine_mode mode; + const char *str; + + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_init_builtins%s%s\n", + (TARGET_ALTIVEC) ? ", altivec" : "", + (TARGET_VSX) ? ", vsx" : ""); + + if (new_builtins_are_live) + V2DI_type_node = rs6000_vector_type ("__vector long long", + long_long_integer_type_node, 2); + else + { + str = TARGET_POWERPC64 ? "__vector long" : "__vector long long"; + V2DI_type_node = rs6000_vector_type (str, + long_long_integer_type_node, + 2); + } + ptr_V2DI_type_node + = build_pointer_type (build_qualified_type (V2DI_type_node, + TYPE_QUAL_CONST)); + + V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2); + ptr_V2DF_type_node + = build_pointer_type (build_qualified_type (V2DF_type_node, + TYPE_QUAL_CONST)); + + V4SI_type_node = rs6000_vector_type ("__vector signed int", + intSI_type_node, 4); + ptr_V4SI_type_node + = build_pointer_type (build_qualified_type (V4SI_type_node, + TYPE_QUAL_CONST)); + + V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4); + ptr_V4SF_type_node + = build_pointer_type (build_qualified_type (V4SF_type_node, + TYPE_QUAL_CONST)); + + V8HI_type_node = rs6000_vector_type ("__vector signed short", + intHI_type_node, 8); + ptr_V8HI_type_node + = build_pointer_type (build_qualified_type (V8HI_type_node, + TYPE_QUAL_CONST)); + + V16QI_type_node = rs6000_vector_type ("__vector signed char", + intQI_type_node, 16); + ptr_V16QI_type_node + = build_pointer_type (build_qualified_type (V16QI_type_node, + TYPE_QUAL_CONST)); + + unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char", + unsigned_intQI_type_node, 16); + ptr_unsigned_V16QI_type_node + = build_pointer_type (build_qualified_type (unsigned_V16QI_type_node, + TYPE_QUAL_CONST)); + + unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short", + unsigned_intHI_type_node, 8); + ptr_unsigned_V8HI_type_node + = build_pointer_type (build_qualified_type (unsigned_V8HI_type_node, + TYPE_QUAL_CONST)); + + unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int", + unsigned_intSI_type_node, 4); + ptr_unsigned_V4SI_type_node + = build_pointer_type (build_qualified_type (unsigned_V4SI_type_node, + TYPE_QUAL_CONST)); + + if (new_builtins_are_live) + unsigned_V2DI_type_node + = rs6000_vector_type ("__vector unsigned long long", + long_long_unsigned_type_node, 2); + else + { + str = TARGET_POWERPC64 + ? "__vector unsigned long" + : "__vector unsigned long long"; + unsigned_V2DI_type_node + = rs6000_vector_type (str, long_long_unsigned_type_node, 2); + } + + ptr_unsigned_V2DI_type_node + = build_pointer_type (build_qualified_type (unsigned_V2DI_type_node, + TYPE_QUAL_CONST)); + + opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4); + + const_str_type_node + = build_pointer_type (build_qualified_type (char_type_node, + TYPE_QUAL_CONST)); + + /* We use V1TI mode as a special container to hold __int128_t items that + must live in VSX registers. */ + if (intTI_type_node) + { + V1TI_type_node = rs6000_vector_type ("__vector __int128", + intTI_type_node, 1); + ptr_V1TI_type_node + = build_pointer_type (build_qualified_type (V1TI_type_node, + TYPE_QUAL_CONST)); + unsigned_V1TI_type_node + = rs6000_vector_type ("__vector unsigned __int128", + unsigned_intTI_type_node, 1); + ptr_unsigned_V1TI_type_node + = build_pointer_type (build_qualified_type (unsigned_V1TI_type_node, + TYPE_QUAL_CONST)); + } + + /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...' + types, especially in C++ land. Similarly, 'vector pixel' is distinct from + 'vector unsigned short'. */ + + bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node); + bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node); + bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node); + bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node); + pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node); + + long_integer_type_internal_node = long_integer_type_node; + long_unsigned_type_internal_node = long_unsigned_type_node; + long_long_integer_type_internal_node = long_long_integer_type_node; + long_long_unsigned_type_internal_node = long_long_unsigned_type_node; + intQI_type_internal_node = intQI_type_node; + uintQI_type_internal_node = unsigned_intQI_type_node; + intHI_type_internal_node = intHI_type_node; + uintHI_type_internal_node = unsigned_intHI_type_node; + intSI_type_internal_node = intSI_type_node; + uintSI_type_internal_node = unsigned_intSI_type_node; + intDI_type_internal_node = intDI_type_node; + uintDI_type_internal_node = unsigned_intDI_type_node; + intTI_type_internal_node = intTI_type_node; + uintTI_type_internal_node = unsigned_intTI_type_node; + float_type_internal_node = float_type_node; + double_type_internal_node = double_type_node; + long_double_type_internal_node = long_double_type_node; + dfloat64_type_internal_node = dfloat64_type_node; + dfloat128_type_internal_node = dfloat128_type_node; + void_type_internal_node = void_type_node; + + ptr_intQI_type_node + = build_pointer_type (build_qualified_type (intQI_type_internal_node, + TYPE_QUAL_CONST)); + ptr_uintQI_type_node + = build_pointer_type (build_qualified_type (uintQI_type_internal_node, + TYPE_QUAL_CONST)); + ptr_intHI_type_node + = build_pointer_type (build_qualified_type (intHI_type_internal_node, + TYPE_QUAL_CONST)); + ptr_uintHI_type_node + = build_pointer_type (build_qualified_type (uintHI_type_internal_node, + TYPE_QUAL_CONST)); + ptr_intSI_type_node + = build_pointer_type (build_qualified_type (intSI_type_internal_node, + TYPE_QUAL_CONST)); + ptr_uintSI_type_node + = build_pointer_type (build_qualified_type (uintSI_type_internal_node, + TYPE_QUAL_CONST)); + ptr_intDI_type_node + = build_pointer_type (build_qualified_type (intDI_type_internal_node, + TYPE_QUAL_CONST)); + ptr_uintDI_type_node + = build_pointer_type (build_qualified_type (uintDI_type_internal_node, + TYPE_QUAL_CONST)); + ptr_intTI_type_node + = build_pointer_type (build_qualified_type (intTI_type_internal_node, + TYPE_QUAL_CONST)); + ptr_uintTI_type_node + = build_pointer_type (build_qualified_type (uintTI_type_internal_node, + TYPE_QUAL_CONST)); + + t = build_qualified_type (long_integer_type_internal_node, TYPE_QUAL_CONST); + ptr_long_integer_type_node = build_pointer_type (t); + + t = build_qualified_type (long_unsigned_type_internal_node, TYPE_QUAL_CONST); + ptr_long_unsigned_type_node = build_pointer_type (t); + + ptr_float_type_node + = build_pointer_type (build_qualified_type (float_type_internal_node, + TYPE_QUAL_CONST)); + ptr_double_type_node + = build_pointer_type (build_qualified_type (double_type_internal_node, + TYPE_QUAL_CONST)); + ptr_long_double_type_node + = build_pointer_type (build_qualified_type (long_double_type_internal_node, + TYPE_QUAL_CONST)); + if (dfloat64_type_node) + { + t = build_qualified_type (dfloat64_type_internal_node, TYPE_QUAL_CONST); + ptr_dfloat64_type_node = build_pointer_type (t); + } + else + ptr_dfloat64_type_node = NULL; + + if (dfloat128_type_node) + { + t = build_qualified_type (dfloat128_type_internal_node, TYPE_QUAL_CONST); + ptr_dfloat128_type_node = build_pointer_type (t); + } + else + ptr_dfloat128_type_node = NULL; + + t = build_qualified_type (long_long_integer_type_internal_node, + TYPE_QUAL_CONST); + ptr_long_long_integer_type_node = build_pointer_type (t); + + t = build_qualified_type (long_long_unsigned_type_internal_node, + TYPE_QUAL_CONST); + ptr_long_long_unsigned_type_node = build_pointer_type (t); + + /* 128-bit floating point support. KFmode is IEEE 128-bit floating point. + IFmode is the IBM extended 128-bit format that is a pair of doubles. + TFmode will be either IEEE 128-bit floating point or the IBM double-double + format that uses a pair of doubles, depending on the switches and + defaults. + + If we don't support for either 128-bit IBM double double or IEEE 128-bit + floating point, we need make sure the type is non-zero or else self-test + fails during bootstrap. + + Always create __ibm128 as a separate type, even if the current long double + format is IBM extended double. + + For IEEE 128-bit floating point, always create the type __ieee128. If the + user used -mfloat128, rs6000-c.c will create a define from __float128 to + __ieee128. */ + if (TARGET_FLOAT128_TYPE) + { + if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128) + ibm128_float_type_node = long_double_type_node; + else + { + ibm128_float_type_node = make_node (REAL_TYPE); + TYPE_PRECISION (ibm128_float_type_node) = 128; + SET_TYPE_MODE (ibm128_float_type_node, IFmode); + layout_type (ibm128_float_type_node); + } + t = build_qualified_type (ibm128_float_type_node, TYPE_QUAL_CONST); + ptr_ibm128_float_type_node = build_pointer_type (t); + lang_hooks.types.register_builtin_type (ibm128_float_type_node, + "__ibm128"); + + if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128) + ieee128_float_type_node = long_double_type_node; + else + ieee128_float_type_node = float128_type_node; + t = build_qualified_type (ieee128_float_type_node, TYPE_QUAL_CONST); + ptr_ieee128_float_type_node = build_pointer_type (t); + lang_hooks.types.register_builtin_type (ieee128_float_type_node, + "__ieee128"); + } + + else + ieee128_float_type_node = ibm128_float_type_node = long_double_type_node; + + /* Vector pair and vector quad support. */ + vector_pair_type_node = make_node (OPAQUE_TYPE); + SET_TYPE_MODE (vector_pair_type_node, OOmode); + TYPE_SIZE (vector_pair_type_node) = bitsize_int (GET_MODE_BITSIZE (OOmode)); + TYPE_PRECISION (vector_pair_type_node) = GET_MODE_BITSIZE (OOmode); + TYPE_SIZE_UNIT (vector_pair_type_node) = size_int (GET_MODE_SIZE (OOmode)); + SET_TYPE_ALIGN (vector_pair_type_node, 256); + TYPE_USER_ALIGN (vector_pair_type_node) = 0; + lang_hooks.types.register_builtin_type (vector_pair_type_node, + "__vector_pair"); + t = build_qualified_type (vector_pair_type_node, TYPE_QUAL_CONST); + ptr_vector_pair_type_node = build_pointer_type (t); + + vector_quad_type_node = make_node (OPAQUE_TYPE); + SET_TYPE_MODE (vector_quad_type_node, XOmode); + TYPE_SIZE (vector_quad_type_node) = bitsize_int (GET_MODE_BITSIZE (XOmode)); + TYPE_PRECISION (vector_quad_type_node) = GET_MODE_BITSIZE (XOmode); + TYPE_SIZE_UNIT (vector_quad_type_node) = size_int (GET_MODE_SIZE (XOmode)); + SET_TYPE_ALIGN (vector_quad_type_node, 512); + TYPE_USER_ALIGN (vector_quad_type_node) = 0; + lang_hooks.types.register_builtin_type (vector_quad_type_node, + "__vector_quad"); + t = build_qualified_type (vector_quad_type_node, TYPE_QUAL_CONST); + ptr_vector_quad_type_node = build_pointer_type (t); + + /* Initialize the modes for builtin_function_type, mapping a machine mode to + tree type node. */ + builtin_mode_to_type[QImode][0] = integer_type_node; + builtin_mode_to_type[QImode][1] = unsigned_intSI_type_node; + builtin_mode_to_type[HImode][0] = integer_type_node; + builtin_mode_to_type[HImode][1] = unsigned_intSI_type_node; + builtin_mode_to_type[SImode][0] = intSI_type_node; + builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node; + builtin_mode_to_type[DImode][0] = intDI_type_node; + builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node; + builtin_mode_to_type[TImode][0] = intTI_type_node; + builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node; + builtin_mode_to_type[SFmode][0] = float_type_node; + builtin_mode_to_type[DFmode][0] = double_type_node; + builtin_mode_to_type[IFmode][0] = ibm128_float_type_node; + builtin_mode_to_type[KFmode][0] = ieee128_float_type_node; + builtin_mode_to_type[TFmode][0] = long_double_type_node; + builtin_mode_to_type[DDmode][0] = dfloat64_type_node; + builtin_mode_to_type[TDmode][0] = dfloat128_type_node; + builtin_mode_to_type[V1TImode][0] = V1TI_type_node; + builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node; + builtin_mode_to_type[V2DImode][0] = V2DI_type_node; + builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node; + builtin_mode_to_type[V2DFmode][0] = V2DF_type_node; + builtin_mode_to_type[V4SImode][0] = V4SI_type_node; + builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node; + builtin_mode_to_type[V4SFmode][0] = V4SF_type_node; + builtin_mode_to_type[V8HImode][0] = V8HI_type_node; + builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node; + builtin_mode_to_type[V16QImode][0] = V16QI_type_node; + builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node; + builtin_mode_to_type[OOmode][1] = vector_pair_type_node; + builtin_mode_to_type[XOmode][1] = vector_quad_type_node; + + tdecl = add_builtin_type ("__bool char", bool_char_type_node); + TYPE_NAME (bool_char_type_node) = tdecl; + + tdecl = add_builtin_type ("__bool short", bool_short_type_node); + TYPE_NAME (bool_short_type_node) = tdecl; + + tdecl = add_builtin_type ("__bool int", bool_int_type_node); + TYPE_NAME (bool_int_type_node) = tdecl; + + tdecl = add_builtin_type ("__pixel", pixel_type_node); + TYPE_NAME (pixel_type_node) = tdecl; + + bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char", + bool_char_type_node, 16); + ptr_bool_V16QI_type_node + = build_pointer_type (build_qualified_type (bool_V16QI_type_node, + TYPE_QUAL_CONST)); + + bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short", + bool_short_type_node, 8); + ptr_bool_V8HI_type_node + = build_pointer_type (build_qualified_type (bool_V8HI_type_node, + TYPE_QUAL_CONST)); + + bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int", + bool_int_type_node, 4); + ptr_bool_V4SI_type_node + = build_pointer_type (build_qualified_type (bool_V4SI_type_node, + TYPE_QUAL_CONST)); + + bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 + ? "__vector __bool long" + : "__vector __bool long long", + bool_long_long_type_node, 2); + ptr_bool_V2DI_type_node + = build_pointer_type (build_qualified_type (bool_V2DI_type_node, + TYPE_QUAL_CONST)); + + bool_V1TI_type_node = rs6000_vector_type ("__vector __bool __int128", + intTI_type_node, 1); + ptr_bool_V1TI_type_node + = build_pointer_type (build_qualified_type (bool_V1TI_type_node, + TYPE_QUAL_CONST)); + + pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel", + pixel_type_node, 8); + ptr_pixel_V8HI_type_node + = build_pointer_type (build_qualified_type (pixel_V8HI_type_node, + TYPE_QUAL_CONST)); + pcvoid_type_node + = build_pointer_type (build_qualified_type (void_type_node, + TYPE_QUAL_CONST)); + + /* Execute the autogenerated initialization code for builtins. */ + rs6000_init_generated_builtins (); + + if (TARGET_DEBUG_BUILTIN) + { + fprintf (stderr, "\nAutogenerated built-in functions:\n\n"); + for (int i = 1; i < (int) RS6000_BIF_MAX; i++) + { + bif_enable e = rs6000_builtin_info_x[i].enable; + if (e == ENB_P5 && !TARGET_POPCNTB) + continue; + if (e == ENB_P6 && !TARGET_CMPB) + continue; + if (e == ENB_ALTIVEC && !TARGET_ALTIVEC) + continue; + if (e == ENB_VSX && !TARGET_VSX) + continue; + if (e == ENB_P7 && !TARGET_POPCNTD) + continue; + if (e == ENB_P7_64 && !(TARGET_POPCNTD && TARGET_POWERPC64)) + continue; + if (e == ENB_P8 && !TARGET_DIRECT_MOVE) + continue; + if (e == ENB_P8V && !TARGET_P8_VECTOR) + continue; + if (e == ENB_P9 && !TARGET_MODULO) + continue; + if (e == ENB_P9_64 && !(TARGET_MODULO && TARGET_POWERPC64)) + continue; + if (e == ENB_P9V && !TARGET_P9_VECTOR) + continue; + if (e == ENB_IEEE128_HW && !TARGET_FLOAT128_HW) + continue; + if (e == ENB_DFP && !TARGET_DFP) + continue; + if (e == ENB_CRYPTO && !TARGET_CRYPTO) + continue; + if (e == ENB_HTM && !TARGET_HTM) + continue; + if (e == ENB_P10 && !TARGET_POWER10) + continue; + if (e == ENB_P10_64 && !(TARGET_POWER10 && TARGET_POWERPC64)) + continue; + if (e == ENB_MMA && !TARGET_MMA) + continue; + tree fntype = rs6000_builtin_info_x[i].fntype; + tree t = TREE_TYPE (fntype); + fprintf (stderr, "%s %s (", rs6000_type_string (t), + rs6000_builtin_info_x[i].bifname); + t = TYPE_ARG_TYPES (fntype); + while (t && TREE_VALUE (t) != void_type_node) + { + fprintf (stderr, "%s", + rs6000_type_string (TREE_VALUE (t))); + t = TREE_CHAIN (t); + if (t && TREE_VALUE (t) != void_type_node) + fprintf (stderr, ", "); + } + fprintf (stderr, "); %s [%4d]\n", + rs6000_builtin_info_x[i].attr_string, (int) i); + } + fprintf (stderr, "\nEnd autogenerated built-in functions.\n\n\n"); + } + + if (new_builtins_are_live) + { + altivec_builtin_mask_for_load + = rs6000_builtin_decls_x[RS6000_BIF_MASK_FOR_LOAD]; + +#ifdef SUBTARGET_INIT_BUILTINS + SUBTARGET_INIT_BUILTINS; +#endif + return; + } + + /* Create Altivec, VSX and MMA builtins on machines with at least the + general purpose extensions (970 and newer) to allow the use of + the target attribute. */ + if (TARGET_EXTRA_BUILTINS) + { + altivec_init_builtins (); + mma_init_builtins (); + } + if (TARGET_HTM) + htm_init_builtins (); + + if (TARGET_EXTRA_BUILTINS) + rs6000_common_init_builtins (); + + ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode, + RS6000_BUILTIN_RECIP, "__builtin_recipdiv"); + def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP); + + ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode, + RS6000_BUILTIN_RECIPF, "__builtin_recipdivf"); + def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF); + + ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode, + RS6000_BUILTIN_RSQRT, "__builtin_rsqrt"); + def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT); + + ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode, + RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf"); + def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF); + + mode = (TARGET_64BIT) ? DImode : SImode; + ftype = builtin_function_type (mode, mode, mode, VOIDmode, + POWER7_BUILTIN_BPERMD, "__builtin_bpermd"); + def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD); + + ftype = build_function_type_list (unsigned_intDI_type_node, + NULL_TREE); + def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB); + + if (TARGET_64BIT) + ftype = build_function_type_list (unsigned_intDI_type_node, + NULL_TREE); + else + ftype = build_function_type_list (unsigned_intSI_type_node, + NULL_TREE); + def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB); + + ftype = build_function_type_list (double_type_node, NULL_TREE); + def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS); + + ftype = build_function_type_list (double_type_node, NULL_TREE); + def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL); + + ftype = build_function_type_list (void_type_node, + intSI_type_node, + NULL_TREE); + def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0); + + ftype = build_function_type_list (void_type_node, + intSI_type_node, + NULL_TREE); + def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1); + + ftype = build_function_type_list (void_type_node, + intDI_type_node, + NULL_TREE); + def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN); + + ftype = build_function_type_list (void_type_node, + intDI_type_node, + NULL_TREE); + def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN); + + ftype = build_function_type_list (void_type_node, + intSI_type_node, double_type_node, + NULL_TREE); + def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF); + + ftype = build_function_type_list (void_type_node, NULL_TREE); + def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT); + def_builtin ("__builtin_ppc_speculation_barrier", ftype, + MISC_BUILTIN_SPEC_BARRIER); + + ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node, + NULL_TREE); + def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS); + def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS); + + if (TARGET_XCOFF) + { + /* AIX libm provides clog as __clog. */ + if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE) + set_user_assembler_name (tdecl, "__clog"); + + /* When long double is 64 bit, some long double builtins of libc + functions (like __builtin_frexpl) must call the double version + (frexp) not the long double version (frexpl) that expects a 128 bit + argument. */ + if (! TARGET_LONG_DOUBLE_128) + { + if ((tdecl = builtin_decl_explicit (BUILT_IN_FMODL)) != NULL_TREE) + set_user_assembler_name (tdecl, "fmod"); + if ((tdecl = builtin_decl_explicit (BUILT_IN_FREXPL)) != NULL_TREE) + set_user_assembler_name (tdecl, "frexp"); + if ((tdecl = builtin_decl_explicit (BUILT_IN_LDEXPL)) != NULL_TREE) + set_user_assembler_name (tdecl, "ldexp"); + if ((tdecl = builtin_decl_explicit (BUILT_IN_MODFL)) != NULL_TREE) + set_user_assembler_name (tdecl, "modf"); + } + } + +#ifdef SUBTARGET_INIT_BUILTINS + SUBTARGET_INIT_BUILTINS; +#endif + + /* Register the compatibility builtins after all of the normal + builtins have been defined. */ + const struct builtin_compatibility *d = bdesc_compat; + unsigned i; + for (i = 0; i < ARRAY_SIZE (bdesc_compat); i++, d++) + { + tree decl = rs6000_builtin_decls[(int)d->code]; + if (decl != NULL) + add_builtin_function (d->name, TREE_TYPE (decl), (int)d->code, + BUILT_IN_MD, NULL, NULL_TREE); + } +} + +static tree +rs6000_new_builtin_decl (unsigned code, bool /* initialize_p */) +{ + rs6000_gen_builtins fcode = (rs6000_gen_builtins) code; + + if (fcode >= RS6000_OVLD_MAX) + return error_mark_node; + + if (!rs6000_new_builtin_is_supported (fcode)) + { + rs6000_invalid_new_builtin (fcode); + return error_mark_node; + } + + return rs6000_builtin_decls_x[code]; +} + +/* Returns the rs6000 builtin decl for CODE. */ + +tree +rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED) +{ + if (new_builtins_are_live) + return rs6000_new_builtin_decl (code, initialize_p); + + HOST_WIDE_INT fnmask; + + if (code >= RS6000_BUILTIN_COUNT) + return error_mark_node; + + fnmask = rs6000_builtin_info[code].mask; + if ((fnmask & rs6000_builtin_mask) != fnmask) + { + rs6000_invalid_builtin ((enum rs6000_builtins)code); + return error_mark_node; + } + + return rs6000_builtin_decls[code]; +} + +static void +altivec_init_builtins (void) +{ + const struct builtin_description *d; + size_t i; + tree ftype; + tree decl; + + tree pvoid_type_node = build_pointer_type (void_type_node); + + tree int_ftype_opaque + = build_function_type_list (integer_type_node, + opaque_V4SI_type_node, NULL_TREE); + tree opaque_ftype_opaque + = build_function_type_list (integer_type_node, NULL_TREE); + tree opaque_ftype_opaque_int + = build_function_type_list (opaque_V4SI_type_node, + opaque_V4SI_type_node, integer_type_node, NULL_TREE); + tree opaque_ftype_opaque_opaque_int + = build_function_type_list (opaque_V4SI_type_node, + opaque_V4SI_type_node, opaque_V4SI_type_node, + integer_type_node, NULL_TREE); + tree opaque_ftype_opaque_opaque_opaque + = build_function_type_list (opaque_V4SI_type_node, + opaque_V4SI_type_node, opaque_V4SI_type_node, + opaque_V4SI_type_node, NULL_TREE); + tree opaque_ftype_opaque_opaque + = build_function_type_list (opaque_V4SI_type_node, + opaque_V4SI_type_node, opaque_V4SI_type_node, + NULL_TREE); + tree int_ftype_int_opaque_opaque + = build_function_type_list (integer_type_node, + integer_type_node, opaque_V4SI_type_node, + opaque_V4SI_type_node, NULL_TREE); + tree int_ftype_int_v4si_v4si + = build_function_type_list (integer_type_node, + integer_type_node, V4SI_type_node, + V4SI_type_node, NULL_TREE); + tree int_ftype_int_v2di_v2di + = build_function_type_list (integer_type_node, + integer_type_node, V2DI_type_node, + V2DI_type_node, NULL_TREE); + tree int_ftype_int_v1ti_v1ti + = build_function_type_list (integer_type_node, + integer_type_node, V1TI_type_node, + V1TI_type_node, NULL_TREE); + tree void_ftype_v4si + = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE); + tree v8hi_ftype_void + = build_function_type_list (V8HI_type_node, NULL_TREE); + tree void_ftype_void + = build_function_type_list (void_type_node, NULL_TREE); + tree void_ftype_int + = build_function_type_list (void_type_node, integer_type_node, NULL_TREE); + + tree opaque_ftype_long_pcvoid + = build_function_type_list (opaque_V4SI_type_node, + long_integer_type_node, pcvoid_type_node, + NULL_TREE); + tree v16qi_ftype_pcvoid + = build_function_type_list (V16QI_type_node, + pcvoid_type_node, + NULL_TREE); + tree v16qi_ftype_long_pcvoid + = build_function_type_list (V16QI_type_node, + long_integer_type_node, pcvoid_type_node, + NULL_TREE); + tree v8hi_ftype_long_pcvoid + = build_function_type_list (V8HI_type_node, + long_integer_type_node, pcvoid_type_node, + NULL_TREE); + tree v4si_ftype_long_pcvoid + = build_function_type_list (V4SI_type_node, + long_integer_type_node, pcvoid_type_node, + NULL_TREE); + tree v4sf_ftype_long_pcvoid + = build_function_type_list (V4SF_type_node, + long_integer_type_node, pcvoid_type_node, + NULL_TREE); + tree v2df_ftype_long_pcvoid + = build_function_type_list (V2DF_type_node, + long_integer_type_node, pcvoid_type_node, + NULL_TREE); + tree v2di_ftype_long_pcvoid + = build_function_type_list (V2DI_type_node, + long_integer_type_node, pcvoid_type_node, + NULL_TREE); + tree v1ti_ftype_long_pcvoid + = build_function_type_list (V1TI_type_node, + long_integer_type_node, pcvoid_type_node, + NULL_TREE); + + tree void_ftype_opaque_long_pvoid + = build_function_type_list (void_type_node, + opaque_V4SI_type_node, long_integer_type_node, + pvoid_type_node, NULL_TREE); + tree void_ftype_v4si_long_pvoid + = build_function_type_list (void_type_node, + V4SI_type_node, long_integer_type_node, + pvoid_type_node, NULL_TREE); + tree void_ftype_v16qi_long_pvoid + = build_function_type_list (void_type_node, + V16QI_type_node, long_integer_type_node, + pvoid_type_node, NULL_TREE); + + tree void_ftype_v16qi_pvoid_long + = build_function_type_list (void_type_node, + V16QI_type_node, pvoid_type_node, + long_integer_type_node, NULL_TREE); + + tree void_ftype_v8hi_long_pvoid + = build_function_type_list (void_type_node, + V8HI_type_node, long_integer_type_node, + pvoid_type_node, NULL_TREE); + tree void_ftype_v4sf_long_pvoid + = build_function_type_list (void_type_node, + V4SF_type_node, long_integer_type_node, + pvoid_type_node, NULL_TREE); + tree void_ftype_v2df_long_pvoid + = build_function_type_list (void_type_node, + V2DF_type_node, long_integer_type_node, + pvoid_type_node, NULL_TREE); + tree void_ftype_v1ti_long_pvoid + = build_function_type_list (void_type_node, + V1TI_type_node, long_integer_type_node, + pvoid_type_node, NULL_TREE); + tree void_ftype_v2di_long_pvoid + = build_function_type_list (void_type_node, + V2DI_type_node, long_integer_type_node, + pvoid_type_node, NULL_TREE); + tree int_ftype_int_v8hi_v8hi + = build_function_type_list (integer_type_node, + integer_type_node, V8HI_type_node, + V8HI_type_node, NULL_TREE); + tree int_ftype_int_v16qi_v16qi + = build_function_type_list (integer_type_node, + integer_type_node, V16QI_type_node, + V16QI_type_node, NULL_TREE); + tree int_ftype_int_v4sf_v4sf + = build_function_type_list (integer_type_node, + integer_type_node, V4SF_type_node, + V4SF_type_node, NULL_TREE); + tree int_ftype_int_v2df_v2df + = build_function_type_list (integer_type_node, + integer_type_node, V2DF_type_node, + V2DF_type_node, NULL_TREE); + tree v2di_ftype_v2di + = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE); + tree v4si_ftype_v4si + = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE); + tree v8hi_ftype_v8hi + = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE); + tree v16qi_ftype_v16qi + = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE); + tree v4sf_ftype_v4sf + = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE); + tree v2df_ftype_v2df + = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE); + tree void_ftype_pcvoid_int_int + = build_function_type_list (void_type_node, + pcvoid_type_node, integer_type_node, + integer_type_node, NULL_TREE); + + def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR); + def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR); + def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL); + def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS); + def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL); + def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR); + def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX); + def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX); + def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX); + def_builtin ("__builtin_altivec_se_lxvrbx", v16qi_ftype_long_pcvoid, P10_BUILTIN_SE_LXVRBX); + def_builtin ("__builtin_altivec_se_lxvrhx", v8hi_ftype_long_pcvoid, P10_BUILTIN_SE_LXVRHX); + def_builtin ("__builtin_altivec_se_lxvrwx", v4si_ftype_long_pcvoid, P10_BUILTIN_SE_LXVRWX); + def_builtin ("__builtin_altivec_se_lxvrdx", v2di_ftype_long_pcvoid, P10_BUILTIN_SE_LXVRDX); + def_builtin ("__builtin_altivec_ze_lxvrbx", v16qi_ftype_long_pcvoid, P10_BUILTIN_ZE_LXVRBX); + def_builtin ("__builtin_altivec_ze_lxvrhx", v8hi_ftype_long_pcvoid, P10_BUILTIN_ZE_LXVRHX); + def_builtin ("__builtin_altivec_ze_lxvrwx", v4si_ftype_long_pcvoid, P10_BUILTIN_ZE_LXVRWX); + def_builtin ("__builtin_altivec_ze_lxvrdx", v2di_ftype_long_pcvoid, P10_BUILTIN_ZE_LXVRDX); + def_builtin ("__builtin_altivec_tr_stxvrbx", void_ftype_v1ti_long_pvoid, P10_BUILTIN_TR_STXVRBX); + def_builtin ("__builtin_altivec_tr_stxvrhx", void_ftype_v1ti_long_pvoid, P10_BUILTIN_TR_STXVRHX); + def_builtin ("__builtin_altivec_tr_stxvrwx", void_ftype_v1ti_long_pvoid, P10_BUILTIN_TR_STXVRWX); + def_builtin ("__builtin_altivec_tr_stxvrdx", void_ftype_v1ti_long_pvoid, P10_BUILTIN_TR_STXVRDX); + def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL); + def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid, + ALTIVEC_BUILTIN_LVXL_V2DF); + def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid, + ALTIVEC_BUILTIN_LVXL_V2DI); + def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid, + ALTIVEC_BUILTIN_LVXL_V4SF); + def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid, + ALTIVEC_BUILTIN_LVXL_V4SI); + def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid, + ALTIVEC_BUILTIN_LVXL_V8HI); + def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid, + ALTIVEC_BUILTIN_LVXL_V16QI); + def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX); + def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid, + ALTIVEC_BUILTIN_LVX_V1TI); + def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid, + ALTIVEC_BUILTIN_LVX_V2DF); + def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid, + ALTIVEC_BUILTIN_LVX_V2DI); + def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid, + ALTIVEC_BUILTIN_LVX_V4SF); + def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid, + ALTIVEC_BUILTIN_LVX_V4SI); + def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid, + ALTIVEC_BUILTIN_LVX_V8HI); + def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid, + ALTIVEC_BUILTIN_LVX_V16QI); + def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX); + def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid, + ALTIVEC_BUILTIN_STVX_V2DF); + def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid, + ALTIVEC_BUILTIN_STVX_V2DI); + def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid, + ALTIVEC_BUILTIN_STVX_V4SF); + def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid, + ALTIVEC_BUILTIN_STVX_V4SI); + def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid, + ALTIVEC_BUILTIN_STVX_V8HI); + def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid, + ALTIVEC_BUILTIN_STVX_V16QI); + def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX); + def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL); + def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid, + ALTIVEC_BUILTIN_STVXL_V2DF); + def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid, + ALTIVEC_BUILTIN_STVXL_V2DI); + def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid, + ALTIVEC_BUILTIN_STVXL_V4SF); + def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid, + ALTIVEC_BUILTIN_STVXL_V4SI); + def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid, + ALTIVEC_BUILTIN_STVXL_V8HI); + def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid, + ALTIVEC_BUILTIN_STVXL_V16QI); + def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX); + def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX); + def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD); + def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE); + def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL); + def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL); + def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR); + def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX); + def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX); + def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX); + def_builtin ("__builtin_vec_se_lxvrx", v1ti_ftype_long_pcvoid, P10_BUILTIN_VEC_SE_LXVRX); + def_builtin ("__builtin_vec_ze_lxvrx", v1ti_ftype_long_pcvoid, P10_BUILTIN_VEC_ZE_LXVRX); + def_builtin ("__builtin_vec_tr_stxvrx", void_ftype_opaque_long_pvoid, P10_BUILTIN_VEC_TR_STXVRX); + def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST); + def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE); + def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL); + def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX); + def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX); + def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX); + + def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid, + VSX_BUILTIN_LXVD2X_V2DF); + def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid, + VSX_BUILTIN_LXVD2X_V2DI); + def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid, + VSX_BUILTIN_LXVW4X_V4SF); + def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid, + VSX_BUILTIN_LXVW4X_V4SI); + def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid, + VSX_BUILTIN_LXVW4X_V8HI); + def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid, + VSX_BUILTIN_LXVW4X_V16QI); + def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid, + VSX_BUILTIN_STXVD2X_V2DF); + def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid, + VSX_BUILTIN_STXVD2X_V2DI); + def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid, + VSX_BUILTIN_STXVW4X_V4SF); + def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid, + VSX_BUILTIN_STXVW4X_V4SI); + def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid, + VSX_BUILTIN_STXVW4X_V8HI); + def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid, + VSX_BUILTIN_STXVW4X_V16QI); + + def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid, + VSX_BUILTIN_LD_ELEMREV_V2DF); + def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid, + VSX_BUILTIN_LD_ELEMREV_V2DI); + def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid, + VSX_BUILTIN_LD_ELEMREV_V4SF); + def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid, + VSX_BUILTIN_LD_ELEMREV_V4SI); + def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid, + VSX_BUILTIN_LD_ELEMREV_V8HI); + def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid, + VSX_BUILTIN_LD_ELEMREV_V16QI); + def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid, + VSX_BUILTIN_ST_ELEMREV_V2DF); + def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid, + VSX_BUILTIN_ST_ELEMREV_V1TI); + def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid, + VSX_BUILTIN_ST_ELEMREV_V2DI); + def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid, + VSX_BUILTIN_ST_ELEMREV_V4SF); + def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid, + VSX_BUILTIN_ST_ELEMREV_V4SI); + def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid, + VSX_BUILTIN_ST_ELEMREV_V8HI); + def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid, + VSX_BUILTIN_ST_ELEMREV_V16QI); + + def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid, + VSX_BUILTIN_VEC_LD); + def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid, + VSX_BUILTIN_VEC_ST); + def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid, + VSX_BUILTIN_VEC_XL); + def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid, + VSX_BUILTIN_VEC_XL_BE); + def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid, + VSX_BUILTIN_VEC_XST); + def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid, + VSX_BUILTIN_VEC_XST_BE); + + def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP); + def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS); + def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE); + + def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD); + def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT); + def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT); + def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT); + def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW); + def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH); + def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB); + def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF); + def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX); + def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX); + def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS); + def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU); + + def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque, + ALTIVEC_BUILTIN_VEC_ADDE); + def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque, + ALTIVEC_BUILTIN_VEC_ADDEC); + def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque, + ALTIVEC_BUILTIN_VEC_CMPNE); + def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque, + ALTIVEC_BUILTIN_VEC_MUL); + def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque, + ALTIVEC_BUILTIN_VEC_SUBE); + def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque, + ALTIVEC_BUILTIN_VEC_SUBEC); + + /* Cell builtins. */ + def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX); + def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL); + def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX); + def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL); + + def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX); + def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL); + def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX); + def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL); + + def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX); + def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL); + def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX); + def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL); + + def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX); + def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL); + def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX); + def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL); + + if (TARGET_P9_VECTOR) + { + def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long, + P9V_BUILTIN_STXVL); + def_builtin ("__builtin_altivec_xst_len_r", void_ftype_v16qi_pvoid_long, + P9V_BUILTIN_XST_LEN_R); + } + + /* Add the DST variants. */ + d = bdesc_dst; + for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++) + { + /* It is expected that these dst built-in functions may have + d->icode equal to CODE_FOR_nothing. */ + def_builtin (d->name, void_ftype_pcvoid_int_int, d->code); + } + + /* Initialize the predicates. */ + d = bdesc_altivec_preds; + for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++) + { + machine_mode mode1; + tree type; + + if (rs6000_overloaded_builtin_p (d->code)) + mode1 = VOIDmode; + else + { + /* Cannot define builtin if the instruction is disabled. */ + gcc_assert (d->icode != CODE_FOR_nothing); + mode1 = insn_data[d->icode].operand[1].mode; + } + + switch (mode1) + { + case E_VOIDmode: + type = int_ftype_int_opaque_opaque; + break; + case E_V1TImode: + type = int_ftype_int_v1ti_v1ti; + break; + case E_V2DImode: + type = int_ftype_int_v2di_v2di; + break; + case E_V4SImode: + type = int_ftype_int_v4si_v4si; + break; + case E_V8HImode: + type = int_ftype_int_v8hi_v8hi; + break; + case E_V16QImode: + type = int_ftype_int_v16qi_v16qi; + break; + case E_V4SFmode: + type = int_ftype_int_v4sf_v4sf; + break; + case E_V2DFmode: + type = int_ftype_int_v2df_v2df; + break; + default: + gcc_unreachable (); + } + + def_builtin (d->name, type, d->code); + } + + /* Initialize the abs* operators. */ + d = bdesc_abs; + for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++) + { + machine_mode mode0; + tree type; + + /* Cannot define builtin if the instruction is disabled. */ + gcc_assert (d->icode != CODE_FOR_nothing); + mode0 = insn_data[d->icode].operand[0].mode; + + switch (mode0) + { + case E_V2DImode: + type = v2di_ftype_v2di; + break; + case E_V4SImode: + type = v4si_ftype_v4si; + break; + case E_V8HImode: + type = v8hi_ftype_v8hi; + break; + case E_V16QImode: + type = v16qi_ftype_v16qi; + break; + case E_V4SFmode: + type = v4sf_ftype_v4sf; + break; + case E_V2DFmode: + type = v2df_ftype_v2df; + break; + default: + gcc_unreachable (); + } + + def_builtin (d->name, type, d->code); + } + + /* Initialize target builtin that implements + targetm.vectorize.builtin_mask_for_load. */ + + decl = add_builtin_function ("__builtin_altivec_mask_for_load", + v16qi_ftype_pcvoid, + ALTIVEC_BUILTIN_MASK_FOR_LOAD, + BUILT_IN_MD, NULL, NULL_TREE); + TREE_READONLY (decl) = 1; + if (TARGET_DEBUG_BUILTIN) + { + tree arg_type = TREE_VALUE (TYPE_ARG_TYPES (v16qi_ftype_pcvoid)); + fprintf (stderr, "%s __builtin_altivec_mask_for_load (%s); [%4d]\n", + rs6000_type_string (TREE_TYPE (v16qi_ftype_pcvoid)), + rs6000_type_string (arg_type), + (int) ALTIVEC_BUILTIN_MASK_FOR_LOAD); + } + /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */ + altivec_builtin_mask_for_load = decl; + + /* Access to the vec_init patterns. */ + ftype = build_function_type_list (V4SI_type_node, integer_type_node, + integer_type_node, integer_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI); + + ftype = build_function_type_list (V8HI_type_node, short_integer_type_node, + short_integer_type_node, + short_integer_type_node, + short_integer_type_node, + short_integer_type_node, + short_integer_type_node, + short_integer_type_node, + short_integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI); + + ftype = build_function_type_list (V16QI_type_node, char_type_node, + char_type_node, char_type_node, + char_type_node, char_type_node, + char_type_node, char_type_node, + char_type_node, char_type_node, + char_type_node, char_type_node, + char_type_node, char_type_node, + char_type_node, char_type_node, + char_type_node, NULL_TREE); + def_builtin ("__builtin_vec_init_v16qi", ftype, + ALTIVEC_BUILTIN_VEC_INIT_V16QI); + + ftype = build_function_type_list (V4SF_type_node, float_type_node, + float_type_node, float_type_node, + float_type_node, NULL_TREE); + def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF); + + /* VSX builtins. */ + ftype = build_function_type_list (V2DF_type_node, double_type_node, + double_type_node, NULL_TREE); + def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF); + + ftype = build_function_type_list (V2DI_type_node, intDI_type_node, + intDI_type_node, NULL_TREE); + def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI); + + /* Access to the vec_set patterns. */ + ftype = build_function_type_list (V4SI_type_node, V4SI_type_node, + intSI_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI); + + ftype = build_function_type_list (V8HI_type_node, V8HI_type_node, + intHI_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI); + + ftype = build_function_type_list (V16QI_type_node, V16QI_type_node, + intQI_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI); + + ftype = build_function_type_list (V4SF_type_node, V4SF_type_node, + float_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF); + + ftype = build_function_type_list (V2DF_type_node, V2DF_type_node, + double_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF); + + ftype = build_function_type_list (V2DI_type_node, V2DI_type_node, + intDI_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI); + + /* Access to the vec_extract patterns. */ + ftype = build_function_type_list (intSI_type_node, V4SI_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI); + + ftype = build_function_type_list (intHI_type_node, V8HI_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI); + + ftype = build_function_type_list (intQI_type_node, V16QI_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI); + + ftype = build_function_type_list (float_type_node, V4SF_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF); + + ftype = build_function_type_list (double_type_node, V2DF_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF); + + ftype = build_function_type_list (intDI_type_node, V2DI_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI); + + + if (V1TI_type_node) + { + tree v1ti_ftype_long_pcvoid + = build_function_type_list (V1TI_type_node, + long_integer_type_node, pcvoid_type_node, + NULL_TREE); + tree void_ftype_v1ti_long_pvoid + = build_function_type_list (void_type_node, + V1TI_type_node, long_integer_type_node, + pvoid_type_node, NULL_TREE); + def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid, + VSX_BUILTIN_LD_ELEMREV_V1TI); + def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid, + VSX_BUILTIN_LXVD2X_V1TI); + def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid, + VSX_BUILTIN_STXVD2X_V1TI); + ftype = build_function_type_list (V1TI_type_node, intTI_type_node, + NULL_TREE, NULL_TREE); + def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI); + ftype = build_function_type_list (V1TI_type_node, V1TI_type_node, + intTI_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI); + ftype = build_function_type_list (intTI_type_node, V1TI_type_node, + integer_type_node, NULL_TREE); + def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI); + } + +} + +static void +mma_init_builtins (void) +{ + const struct builtin_description *d = bdesc_mma; + + for (unsigned i = 0; i < ARRAY_SIZE (bdesc_mma); i++, d++) + { + tree op[MAX_MMA_OPERANDS], type; + unsigned icode = (unsigned) d->icode; + unsigned attr = rs6000_builtin_info[d->code].attr; + int attr_args = (attr & RS6000_BTC_OPND_MASK); + bool gimple_func = (attr & RS6000_BTC_GIMPLE); + unsigned nopnds = 0; + + if (d->name == 0) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "mma_builtin, bdesc_mma[%ld] no name\n", + (long unsigned) i); + continue; + } + + if (gimple_func) + { + gcc_assert (icode == CODE_FOR_nothing); + /* Some MMA built-ins that are expanded into gimple are converted + into internal MMA built-ins that are expanded into rtl. + The internal built-in follows immediately after this built-in. */ + if (d->code != VSX_BUILTIN_LXVP + && d->code != VSX_BUILTIN_STXVP) + { + op[nopnds++] = void_type_node; + icode = d[1].icode; + } + } + else + { + if (!(d->code == MMA_BUILTIN_DISASSEMBLE_ACC_INTERNAL + || d->code == VSX_BUILTIN_DISASSEMBLE_PAIR_INTERNAL) + && (attr & RS6000_BTC_QUAD) == 0) + attr_args--; + + /* Ensure we have the correct number and type of operands. */ + gcc_assert (attr_args == insn_data[icode].n_operands - 1); + } + + /* This is a disassemble pair/acc function. */ + if (d->code == MMA_BUILTIN_DISASSEMBLE_ACC + || d->code == VSX_BUILTIN_DISASSEMBLE_PAIR) + { + op[nopnds++] = build_pointer_type (void_type_node); + if (d->code == MMA_BUILTIN_DISASSEMBLE_ACC) + op[nopnds++] = build_pointer_type (vector_quad_type_node); + else + op[nopnds++] = build_pointer_type (vector_pair_type_node); + } + else if (d->code == VSX_BUILTIN_LXVP) + { + op[nopnds++] = vector_pair_type_node; + op[nopnds++] = sizetype; + op[nopnds++] = build_pointer_type (vector_pair_type_node); + } + else if (d->code == VSX_BUILTIN_STXVP) + { + op[nopnds++] = void_type_node; + op[nopnds++] = vector_pair_type_node; + op[nopnds++] = sizetype; + op[nopnds++] = build_pointer_type (vector_pair_type_node); + } + else + { + /* This is a normal MMA built-in function. */ + unsigned j = 0; + if (attr & RS6000_BTC_QUAD + && d->code != MMA_BUILTIN_DISASSEMBLE_ACC_INTERNAL + && d->code != VSX_BUILTIN_DISASSEMBLE_PAIR_INTERNAL) + j = 1; + for (; j < (unsigned) insn_data[icode].n_operands; j++) + { + machine_mode mode = insn_data[icode].operand[j].mode; + if (gimple_func && mode == XOmode) + op[nopnds++] = build_pointer_type (vector_quad_type_node); + else if (gimple_func + && mode == OOmode + && (d->code == VSX_BUILTIN_BUILD_PAIR + || d->code == VSX_BUILTIN_ASSEMBLE_PAIR)) + op[nopnds++] = build_pointer_type (vector_pair_type_node); + else + /* MMA uses unsigned types. */ + op[nopnds++] = builtin_mode_to_type[mode][1]; + } + } + + switch (nopnds) + { + case 1: + type = build_function_type_list (op[0], NULL_TREE); + break; + case 2: + type = build_function_type_list (op[0], op[1], NULL_TREE); + break; + case 3: + type = build_function_type_list (op[0], op[1], op[2], NULL_TREE); + break; + case 4: + type = build_function_type_list (op[0], op[1], op[2], op[3], + NULL_TREE); + break; + case 5: + type = build_function_type_list (op[0], op[1], op[2], op[3], op[4], + NULL_TREE); + break; + case 6: + type = build_function_type_list (op[0], op[1], op[2], op[3], op[4], + op[5], NULL_TREE); + break; + case 7: + type = build_function_type_list (op[0], op[1], op[2], op[3], op[4], + op[5], op[6], NULL_TREE); + break; + default: + gcc_unreachable (); + } + + def_builtin (d->name, type, d->code); + } +} + +static void +htm_init_builtins (void) +{ + HOST_WIDE_INT builtin_mask = rs6000_builtin_mask; + const struct builtin_description *d; + size_t i; + + d = bdesc_htm; + for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++) + { + tree op[MAX_HTM_OPERANDS], type; + HOST_WIDE_INT mask = d->mask; + unsigned attr = rs6000_builtin_info[d->code].attr; + bool void_func = (attr & RS6000_BTC_VOID); + int attr_args = (attr & RS6000_BTC_OPND_MASK); + int nopnds = 0; + tree gpr_type_node; + tree rettype; + tree argtype; + + /* It is expected that these htm built-in functions may have + d->icode equal to CODE_FOR_nothing. */ + + if (TARGET_32BIT && TARGET_POWERPC64) + gpr_type_node = long_long_unsigned_type_node; + else + gpr_type_node = long_unsigned_type_node; + + if (attr & RS6000_BTC_SPR) + { + rettype = gpr_type_node; + argtype = gpr_type_node; + } + else if (d->code == HTM_BUILTIN_TABORTDC + || d->code == HTM_BUILTIN_TABORTDCI) + { + rettype = unsigned_type_node; + argtype = gpr_type_node; + } + else + { + rettype = unsigned_type_node; + argtype = unsigned_type_node; + } + + if ((mask & builtin_mask) != mask) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "htm_builtin, skip binary %s\n", d->name); + continue; + } + + if (d->name == 0) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n", + (long unsigned) i); + continue; + } + + op[nopnds++] = (void_func) ? void_type_node : rettype; + + if (attr_args == RS6000_BTC_UNARY) + op[nopnds++] = argtype; + else if (attr_args == RS6000_BTC_BINARY) + { + op[nopnds++] = argtype; + op[nopnds++] = argtype; + } + else if (attr_args == RS6000_BTC_TERNARY) + { + op[nopnds++] = argtype; + op[nopnds++] = argtype; + op[nopnds++] = argtype; + } + + switch (nopnds) + { + case 1: + type = build_function_type_list (op[0], NULL_TREE); + break; + case 2: + type = build_function_type_list (op[0], op[1], NULL_TREE); + break; + case 3: + type = build_function_type_list (op[0], op[1], op[2], NULL_TREE); + break; + case 4: + type = build_function_type_list (op[0], op[1], op[2], op[3], + NULL_TREE); + break; + default: + gcc_unreachable (); + } + + def_builtin (d->name, type, d->code); + } +} + +/* Map types for builtin functions with an explicit return type and + exactly 4 arguments. Functions with fewer than 3 arguments use + builtin_function_type. The number of quaternary built-in + functions is very small. Handle each case specially. */ +static tree +builtin_quaternary_function_type (machine_mode mode_ret, + machine_mode mode_arg0, + machine_mode mode_arg1, + machine_mode mode_arg2, + machine_mode mode_arg3, + enum rs6000_builtins builtin) +{ + tree function_type = NULL; + + static tree v2udi_type = builtin_mode_to_type[V2DImode][1]; + static tree v16uqi_type = builtin_mode_to_type[V16QImode][1]; + static tree uchar_type = builtin_mode_to_type[QImode][1]; + + static tree xxeval_type = + build_function_type_list (v2udi_type, v2udi_type, v2udi_type, + v2udi_type, uchar_type, NULL_TREE); + + static tree xxpermx_type = + build_function_type_list (v2udi_type, v2udi_type, v2udi_type, + v16uqi_type, uchar_type, NULL_TREE); + + switch (builtin) { + + case P10V_BUILTIN_XXEVAL: + gcc_assert ((mode_ret == V2DImode) + && (mode_arg0 == V2DImode) + && (mode_arg1 == V2DImode) + && (mode_arg2 == V2DImode) + && (mode_arg3 == QImode)); + function_type = xxeval_type; + break; + + case P10V_BUILTIN_VXXPERMX: + gcc_assert ((mode_ret == V2DImode) + && (mode_arg0 == V2DImode) + && (mode_arg1 == V2DImode) + && (mode_arg2 == V16QImode) + && (mode_arg3 == QImode)); + function_type = xxpermx_type; + break; + + default: + /* A case for each quaternary built-in must be provided above. */ + gcc_unreachable (); + } + + return function_type; +} + +/* Map types for builtin functions with an explicit return type and up to 3 + arguments. Functions with fewer than 3 arguments use VOIDmode as the type + of the argument. */ +static tree +builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0, + machine_mode mode_arg1, machine_mode mode_arg2, + enum rs6000_builtins builtin, const char *name) +{ + struct builtin_hash_struct h; + struct builtin_hash_struct *h2; + int num_args = 3; + int i; + tree ret_type = NULL_TREE; + tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE }; + + /* Create builtin_hash_table. */ + if (builtin_hash_table == NULL) + builtin_hash_table = hash_table::create_ggc (1500); + + h.type = NULL_TREE; + h.mode[0] = mode_ret; + h.mode[1] = mode_arg0; + h.mode[2] = mode_arg1; + h.mode[3] = mode_arg2; + h.uns_p[0] = 0; + h.uns_p[1] = 0; + h.uns_p[2] = 0; + h.uns_p[3] = 0; + + /* If the builtin is a type that produces unsigned results or takes unsigned + arguments, and it is returned as a decl for the vectorizer (such as + widening multiplies, permute), make sure the arguments and return value + are type correct. */ + switch (builtin) + { + /* unsigned 1 argument functions. */ + case CRYPTO_BUILTIN_VSBOX: + case CRYPTO_BUILTIN_VSBOX_BE: + case P8V_BUILTIN_VGBBD: + case MISC_BUILTIN_CDTBCD: + case MISC_BUILTIN_CBCDTD: + case P10V_BUILTIN_XVCVSPBF16: + case P10V_BUILTIN_XVCVBF16SPN: + case P10V_BUILTIN_MTVSRBM: + case P10V_BUILTIN_MTVSRHM: + case P10V_BUILTIN_MTVSRWM: + case P10V_BUILTIN_MTVSRDM: + case P10V_BUILTIN_MTVSRQM: + case P10V_BUILTIN_VCNTMBB: + case P10V_BUILTIN_VCNTMBH: + case P10V_BUILTIN_VCNTMBW: + case P10V_BUILTIN_VCNTMBD: + case P10V_BUILTIN_VEXPANDMB: + case P10V_BUILTIN_VEXPANDMH: + case P10V_BUILTIN_VEXPANDMW: + case P10V_BUILTIN_VEXPANDMD: + case P10V_BUILTIN_VEXPANDMQ: + h.uns_p[0] = 1; + h.uns_p[1] = 1; + break; + + /* unsigned 2 argument functions. */ + case ALTIVEC_BUILTIN_VMULEUB: + case ALTIVEC_BUILTIN_VMULEUH: + case P8V_BUILTIN_VMULEUW: + case ALTIVEC_BUILTIN_VMULOUB: + case ALTIVEC_BUILTIN_VMULOUH: + case P8V_BUILTIN_VMULOUW: + case CRYPTO_BUILTIN_VCIPHER: + case CRYPTO_BUILTIN_VCIPHER_BE: + case CRYPTO_BUILTIN_VCIPHERLAST: + case CRYPTO_BUILTIN_VCIPHERLAST_BE: + case CRYPTO_BUILTIN_VNCIPHER: + case CRYPTO_BUILTIN_VNCIPHER_BE: + case CRYPTO_BUILTIN_VNCIPHERLAST: + case CRYPTO_BUILTIN_VNCIPHERLAST_BE: + case CRYPTO_BUILTIN_VPMSUMB: + case CRYPTO_BUILTIN_VPMSUMH: + case CRYPTO_BUILTIN_VPMSUMW: + case CRYPTO_BUILTIN_VPMSUMD: + case CRYPTO_BUILTIN_VPMSUM: + case MISC_BUILTIN_ADDG6S: + case MISC_BUILTIN_DIVWEU: + case MISC_BUILTIN_DIVDEU: + case VSX_BUILTIN_UDIV_V2DI: + case ALTIVEC_BUILTIN_VMAXUB: + case ALTIVEC_BUILTIN_VMINUB: + case ALTIVEC_BUILTIN_VMAXUH: + case ALTIVEC_BUILTIN_VMINUH: + case ALTIVEC_BUILTIN_VMAXUW: + case ALTIVEC_BUILTIN_VMINUW: + case P8V_BUILTIN_VMAXUD: + case P8V_BUILTIN_VMINUD: + case ALTIVEC_BUILTIN_VAND_V16QI_UNS: + case ALTIVEC_BUILTIN_VAND_V8HI_UNS: + case ALTIVEC_BUILTIN_VAND_V4SI_UNS: + case ALTIVEC_BUILTIN_VAND_V2DI_UNS: + case ALTIVEC_BUILTIN_VANDC_V16QI_UNS: + case ALTIVEC_BUILTIN_VANDC_V8HI_UNS: + case ALTIVEC_BUILTIN_VANDC_V4SI_UNS: + case ALTIVEC_BUILTIN_VANDC_V2DI_UNS: + case ALTIVEC_BUILTIN_VNOR_V16QI_UNS: + case ALTIVEC_BUILTIN_VNOR_V8HI_UNS: + case ALTIVEC_BUILTIN_VNOR_V4SI_UNS: + case ALTIVEC_BUILTIN_VNOR_V2DI_UNS: + case ALTIVEC_BUILTIN_VOR_V16QI_UNS: + case ALTIVEC_BUILTIN_VOR_V8HI_UNS: + case ALTIVEC_BUILTIN_VOR_V4SI_UNS: + case ALTIVEC_BUILTIN_VOR_V2DI_UNS: + case ALTIVEC_BUILTIN_VXOR_V16QI_UNS: + case ALTIVEC_BUILTIN_VXOR_V8HI_UNS: + case ALTIVEC_BUILTIN_VXOR_V4SI_UNS: + case ALTIVEC_BUILTIN_VXOR_V2DI_UNS: + case P8V_BUILTIN_EQV_V16QI_UNS: + case P8V_BUILTIN_EQV_V8HI_UNS: + case P8V_BUILTIN_EQV_V4SI_UNS: + case P8V_BUILTIN_EQV_V2DI_UNS: + case P8V_BUILTIN_EQV_V1TI_UNS: + case P8V_BUILTIN_NAND_V16QI_UNS: + case P8V_BUILTIN_NAND_V8HI_UNS: + case P8V_BUILTIN_NAND_V4SI_UNS: + case P8V_BUILTIN_NAND_V2DI_UNS: + case P8V_BUILTIN_NAND_V1TI_UNS: + case P8V_BUILTIN_ORC_V16QI_UNS: + case P8V_BUILTIN_ORC_V8HI_UNS: + case P8V_BUILTIN_ORC_V4SI_UNS: + case P8V_BUILTIN_ORC_V2DI_UNS: + case P8V_BUILTIN_ORC_V1TI_UNS: + case P10_BUILTIN_CFUGED: + case P10_BUILTIN_CNTLZDM: + case P10_BUILTIN_CNTTZDM: + case P10_BUILTIN_PDEPD: + case P10_BUILTIN_PEXTD: + case P10V_BUILTIN_VCFUGED: + case P10V_BUILTIN_VCLZDM: + case P10V_BUILTIN_VCTZDM: + case P10V_BUILTIN_VGNB: + case P10V_BUILTIN_VPDEPD: + case P10V_BUILTIN_VPEXTD: + case P10V_BUILTIN_XXGENPCVM_V16QI: + case P10V_BUILTIN_XXGENPCVM_V8HI: + case P10V_BUILTIN_XXGENPCVM_V4SI: + case P10V_BUILTIN_XXGENPCVM_V2DI: + case P10V_BUILTIN_DIVEU_V4SI: + case P10V_BUILTIN_DIVEU_V2DI: + case P10V_BUILTIN_DIVEU_V1TI: + case P10V_BUILTIN_DIVU_V4SI: + case P10V_BUILTIN_DIVU_V2DI: + case P10V_BUILTIN_MODU_V1TI: + case P10V_BUILTIN_MODU_V2DI: + case P10V_BUILTIN_MODU_V4SI: + case P10V_BUILTIN_MULHU_V2DI: + case P10V_BUILTIN_MULHU_V4SI: + case P10V_BUILTIN_VMULEUD: + case P10V_BUILTIN_VMULOUD: + h.uns_p[0] = 1; + h.uns_p[1] = 1; + h.uns_p[2] = 1; + break; + + /* unsigned 3 argument functions. */ + case ALTIVEC_BUILTIN_VPERM_16QI_UNS: + case ALTIVEC_BUILTIN_VPERM_8HI_UNS: + case ALTIVEC_BUILTIN_VPERM_4SI_UNS: + case ALTIVEC_BUILTIN_VPERM_2DI_UNS: + case ALTIVEC_BUILTIN_VSEL_16QI_UNS: + case ALTIVEC_BUILTIN_VSEL_8HI_UNS: + case ALTIVEC_BUILTIN_VSEL_4SI_UNS: + case ALTIVEC_BUILTIN_VSEL_2DI_UNS: + case VSX_BUILTIN_VPERM_16QI_UNS: + case VSX_BUILTIN_VPERM_8HI_UNS: + case VSX_BUILTIN_VPERM_4SI_UNS: + case VSX_BUILTIN_VPERM_2DI_UNS: + case VSX_BUILTIN_XXSEL_16QI_UNS: + case VSX_BUILTIN_XXSEL_8HI_UNS: + case VSX_BUILTIN_XXSEL_4SI_UNS: + case VSX_BUILTIN_XXSEL_2DI_UNS: + case CRYPTO_BUILTIN_VPERMXOR: + case CRYPTO_BUILTIN_VPERMXOR_V2DI: + case CRYPTO_BUILTIN_VPERMXOR_V4SI: + case CRYPTO_BUILTIN_VPERMXOR_V8HI: + case CRYPTO_BUILTIN_VPERMXOR_V16QI: + case CRYPTO_BUILTIN_VSHASIGMAW: + case CRYPTO_BUILTIN_VSHASIGMAD: + case CRYPTO_BUILTIN_VSHASIGMA: + case P10V_BUILTIN_VEXTRACTBL: + case P10V_BUILTIN_VEXTRACTHL: + case P10V_BUILTIN_VEXTRACTWL: + case P10V_BUILTIN_VEXTRACTDL: + case P10V_BUILTIN_VEXTRACTBR: + case P10V_BUILTIN_VEXTRACTHR: + case P10V_BUILTIN_VEXTRACTWR: + case P10V_BUILTIN_VEXTRACTDR: + case P10V_BUILTIN_VINSERTGPRBL: + case P10V_BUILTIN_VINSERTGPRHL: + case P10V_BUILTIN_VINSERTGPRWL: + case P10V_BUILTIN_VINSERTGPRDL: + case P10V_BUILTIN_VINSERTVPRBL: + case P10V_BUILTIN_VINSERTVPRHL: + case P10V_BUILTIN_VINSERTVPRWL: + case P10V_BUILTIN_VREPLACE_ELT_UV4SI: + case P10V_BUILTIN_VREPLACE_ELT_UV2DI: + case P10V_BUILTIN_VREPLACE_UN_UV4SI: + case P10V_BUILTIN_VREPLACE_UN_UV2DI: + case P10V_BUILTIN_VXXBLEND_V16QI: + case P10V_BUILTIN_VXXBLEND_V8HI: + case P10V_BUILTIN_VXXBLEND_V4SI: + case P10V_BUILTIN_VXXBLEND_V2DI: + h.uns_p[0] = 1; + h.uns_p[1] = 1; + h.uns_p[2] = 1; + h.uns_p[3] = 1; + break; + + /* signed permute functions with unsigned char mask. */ + case ALTIVEC_BUILTIN_VPERM_16QI: + case ALTIVEC_BUILTIN_VPERM_8HI: + case ALTIVEC_BUILTIN_VPERM_4SI: + case ALTIVEC_BUILTIN_VPERM_4SF: + case ALTIVEC_BUILTIN_VPERM_2DI: + case ALTIVEC_BUILTIN_VPERM_2DF: + case VSX_BUILTIN_VPERM_16QI: + case VSX_BUILTIN_VPERM_8HI: + case VSX_BUILTIN_VPERM_4SI: + case VSX_BUILTIN_VPERM_4SF: + case VSX_BUILTIN_VPERM_2DI: + case VSX_BUILTIN_VPERM_2DF: + h.uns_p[3] = 1; + break; + + /* unsigned args, signed return. */ + case VSX_BUILTIN_XVCVUXDSP: + case VSX_BUILTIN_XVCVUXDDP_UNS: + case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF: + h.uns_p[1] = 1; + break; + + /* signed args, unsigned return. */ + case VSX_BUILTIN_XVCVDPUXDS_UNS: + case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI: + case MISC_BUILTIN_UNPACK_TD: + case MISC_BUILTIN_UNPACK_V1TI: + h.uns_p[0] = 1; + break; + + /* unsigned arguments, bool return (compares). */ + case ALTIVEC_BUILTIN_VCMPEQUB: + case ALTIVEC_BUILTIN_VCMPEQUH: + case ALTIVEC_BUILTIN_VCMPEQUW: + case P8V_BUILTIN_VCMPEQUD: + case VSX_BUILTIN_CMPGE_U16QI: + case VSX_BUILTIN_CMPGE_U8HI: + case VSX_BUILTIN_CMPGE_U4SI: + case VSX_BUILTIN_CMPGE_U2DI: + case P10V_BUILTIN_CMPGE_U1TI: + case ALTIVEC_BUILTIN_VCMPGTUB: + case ALTIVEC_BUILTIN_VCMPGTUH: + case ALTIVEC_BUILTIN_VCMPGTUW: + case P8V_BUILTIN_VCMPGTUD: + case P10V_BUILTIN_VCMPGTUT: + case P10V_BUILTIN_VCMPEQUT: + h.uns_p[1] = 1; + h.uns_p[2] = 1; + break; + + /* unsigned arguments for 128-bit pack instructions. */ + case MISC_BUILTIN_PACK_TD: + case MISC_BUILTIN_PACK_V1TI: + h.uns_p[1] = 1; + h.uns_p[2] = 1; + break; + + /* unsigned second arguments (vector shift right). */ + case ALTIVEC_BUILTIN_VSRB: + case ALTIVEC_BUILTIN_VSRH: + case ALTIVEC_BUILTIN_VSRW: + case P8V_BUILTIN_VSRD: + /* Vector splat immediate insert */ + case P10V_BUILTIN_VXXSPLTI32DX_V4SI: + case P10V_BUILTIN_VXXSPLTI32DX_V4SF: + h.uns_p[2] = 1; + break; + + case VSX_BUILTIN_LXVP: + h.uns_p[0] = 1; + h.uns_p[2] = 1; + break; + + case VSX_BUILTIN_STXVP: + h.uns_p[1] = 1; + h.uns_p[3] = 1; + break; + + default: + break; + } + + /* Figure out how many args are present. */ + while (num_args > 0 && h.mode[num_args] == VOIDmode) + num_args--; + + ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]]; + if (!ret_type && h.uns_p[0]) + ret_type = builtin_mode_to_type[h.mode[0]][0]; + + /* If the required decimal float type has been disabled, + then return NULL_TREE. */ + if (!ret_type && DECIMAL_FLOAT_MODE_P (h.mode[0])) + return NULL_TREE; + + if (!ret_type) + fatal_error (input_location, + "internal error: builtin function %qs had an unexpected " + "return type %qs", name, GET_MODE_NAME (h.mode[0])); + + for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++) + arg_type[i] = NULL_TREE; + + for (i = 0; i < num_args; i++) + { + int m = (int) h.mode[i+1]; + int uns_p = h.uns_p[i+1]; + + arg_type[i] = builtin_mode_to_type[m][uns_p]; + if (!arg_type[i] && uns_p) + arg_type[i] = builtin_mode_to_type[m][0]; + + /* If the required decimal float type has been disabled, + then return NULL_TREE. */ + if (!arg_type[i] && DECIMAL_FLOAT_MODE_P (m)) + return NULL_TREE; + + if (!arg_type[i]) + fatal_error (input_location, + "internal error: builtin function %qs, argument %d " + "had unexpected argument type %qs", name, i, + GET_MODE_NAME (m)); + } + + builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT); + if (*found == NULL) + { + h2 = ggc_alloc (); + *h2 = h; + *found = h2; + + h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1], + arg_type[2], NULL_TREE); + } + + return (*found)->type; +} + +static void +rs6000_common_init_builtins (void) +{ + const struct builtin_description *d; + size_t i; + + tree opaque_ftype_opaque = NULL_TREE; + tree opaque_ftype_opaque_opaque = NULL_TREE; + tree opaque_ftype_opaque_opaque_opaque = NULL_TREE; + tree opaque_ftype_opaque_opaque_opaque_opaque = NULL_TREE; + HOST_WIDE_INT builtin_mask = rs6000_builtin_mask; + + /* Create Altivec and VSX builtins on machines with at least the + general purpose extensions (970 and newer) to allow the use of + the target attribute. */ + + if (TARGET_EXTRA_BUILTINS) + builtin_mask |= RS6000_BTM_COMMON; + + /* Add the quaternary operators. */ + d = bdesc_4arg; + for (i = 0; i < ARRAY_SIZE (bdesc_4arg); i++, d++) + { + tree type; + HOST_WIDE_INT mask = d->mask; + + if ((mask & builtin_mask) != mask) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_builtin, skip quaternary %s\n", d->name); + continue; + } + + if (rs6000_overloaded_builtin_p (d->code)) + { + type = opaque_ftype_opaque_opaque_opaque_opaque; + if (!type) + type = opaque_ftype_opaque_opaque_opaque_opaque + = build_function_type_list (opaque_V4SI_type_node, + opaque_V4SI_type_node, + opaque_V4SI_type_node, + opaque_V4SI_type_node, + opaque_V4SI_type_node, + NULL_TREE); + } + else + { + enum insn_code icode = d->icode; + if (d->name == 0) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_builtin, bdesc_4arg[%ld] no name\n", + (long) i); + continue; + } + + if (icode == CODE_FOR_nothing) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, + "rs6000_builtin, skip quaternary %s (no code)\n", + d->name); + continue; + } + + type = + builtin_quaternary_function_type (insn_data[icode].operand[0].mode, + insn_data[icode].operand[1].mode, + insn_data[icode].operand[2].mode, + insn_data[icode].operand[3].mode, + insn_data[icode].operand[4].mode, + d->code); + } + def_builtin (d->name, type, d->code); + } + + /* Add the ternary operators. */ + d = bdesc_3arg; + for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++) + { + tree type; + HOST_WIDE_INT mask = d->mask; + + if ((mask & builtin_mask) != mask) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name); + continue; + } + + if (rs6000_overloaded_builtin_p (d->code)) + { + if (! (type = opaque_ftype_opaque_opaque_opaque)) + type = opaque_ftype_opaque_opaque_opaque + = build_function_type_list (opaque_V4SI_type_node, + opaque_V4SI_type_node, + opaque_V4SI_type_node, + opaque_V4SI_type_node, + NULL_TREE); + } + else + { + enum insn_code icode = d->icode; + if (d->name == 0) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n", + (long unsigned)i); + + continue; + } + + if (icode == CODE_FOR_nothing) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n", + d->name); + + continue; + } + + type = builtin_function_type (insn_data[icode].operand[0].mode, + insn_data[icode].operand[1].mode, + insn_data[icode].operand[2].mode, + insn_data[icode].operand[3].mode, + d->code, d->name); + } + + def_builtin (d->name, type, d->code); + } + + /* Add the binary operators. */ + d = bdesc_2arg; + for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++) + { + machine_mode mode0, mode1, mode2; + tree type; + HOST_WIDE_INT mask = d->mask; + + if ((mask & builtin_mask) != mask) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name); + continue; + } + + if (rs6000_overloaded_builtin_p (d->code)) + { + const struct altivec_builtin_types *desc; + + /* Verify the builtin we are overloading has already been defined. */ + type = NULL_TREE; + for (desc = altivec_overloaded_builtins; + desc->code != RS6000_BUILTIN_NONE; desc++) + if (desc->code == d->code + && rs6000_builtin_decls[(int)desc->overloaded_code]) + { + if (! (type = opaque_ftype_opaque_opaque)) + type = opaque_ftype_opaque_opaque + = build_function_type_list (opaque_V4SI_type_node, + opaque_V4SI_type_node, + opaque_V4SI_type_node, + NULL_TREE); + break; + } + } + else + { + enum insn_code icode = d->icode; + if (d->name == 0) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n", + (long unsigned)i); + + continue; + } + + if (icode == CODE_FOR_nothing) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n", + d->name); + + continue; + } + + mode0 = insn_data[icode].operand[0].mode; + mode1 = insn_data[icode].operand[1].mode; + mode2 = insn_data[icode].operand[2].mode; + + type = builtin_function_type (mode0, mode1, mode2, VOIDmode, + d->code, d->name); + } + + def_builtin (d->name, type, d->code); + } + + /* Add the simple unary operators. */ + d = bdesc_1arg; + for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++) + { + machine_mode mode0, mode1; + tree type; + HOST_WIDE_INT mask = d->mask; + + if ((mask & builtin_mask) != mask) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name); + continue; + } + + if (rs6000_overloaded_builtin_p (d->code)) + { + if (! (type = opaque_ftype_opaque)) + type = opaque_ftype_opaque + = build_function_type_list (opaque_V4SI_type_node, + opaque_V4SI_type_node, + NULL_TREE); + } + else + { + enum insn_code icode = d->icode; + if (d->name == 0) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n", + (long unsigned)i); + + continue; + } + + if (icode == CODE_FOR_nothing) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n", + d->name); + + continue; + } + + mode0 = insn_data[icode].operand[0].mode; + mode1 = insn_data[icode].operand[1].mode; + + type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode, + d->code, d->name); + } + + def_builtin (d->name, type, d->code); + } + + /* Add the simple no-argument operators. */ + d = bdesc_0arg; + for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++) + { + machine_mode mode0; + tree type; + HOST_WIDE_INT mask = d->mask; + + if ((mask & builtin_mask) != mask) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name); + continue; + } + if (rs6000_overloaded_builtin_p (d->code)) + { + if (!opaque_ftype_opaque) + opaque_ftype_opaque + = build_function_type_list (opaque_V4SI_type_node, NULL_TREE); + type = opaque_ftype_opaque; + } + else + { + enum insn_code icode = d->icode; + if (d->name == 0) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n", + (long unsigned) i); + continue; + } + if (icode == CODE_FOR_nothing) + { + if (TARGET_DEBUG_BUILTIN) + fprintf (stderr, + "rs6000_builtin, skip no-argument %s (no code)\n", + d->name); + continue; + } + mode0 = insn_data[icode].operand[0].mode; + type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode, + d->code, d->name); + } + def_builtin (d->name, type, d->code); + } +} + +/* Return the internal arg pointer used for function incoming + arguments. When -fsplit-stack, the arg pointer is r12 so we need + to copy it to a pseudo in order for it to be preserved over calls + and suchlike. We'd really like to use a pseudo here for the + internal arg pointer but data-flow analysis is not prepared to + accept pseudos as live at the beginning of a function. */ + +rtx +rs6000_internal_arg_pointer (void) +{ + if (flag_split_stack + && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl)) + == NULL)) + + { + if (cfun->machine->split_stack_arg_pointer == NULL_RTX) + { + rtx pat; + + cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode); + REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1; + + /* Put the pseudo initialization right after the note at the + beginning of the function. */ + pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer, + gen_rtx_REG (Pmode, 12)); + push_topmost_sequence (); + emit_insn_after (pat, get_insns ()); + pop_topmost_sequence (); + } + rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer, + FIRST_PARM_OFFSET (current_function_decl)); + return copy_to_reg (ret); + } + return virtual_incoming_args_rtx; +} + + +/* A C compound statement that outputs the assembler code for a thunk + function, used to implement C++ virtual function calls with + multiple inheritance. The thunk acts as a wrapper around a virtual + function, adjusting the implicit object parameter before handing + control off to the real function. + + First, emit code to add the integer DELTA to the location that + contains the incoming first argument. Assume that this argument + contains a pointer, and is the one used to pass the `this' pointer + in C++. This is the incoming argument *before* the function + prologue, e.g. `%o0' on a sparc. The addition must preserve the + values of all other incoming arguments. + + After the addition, emit code to jump to FUNCTION, which is a + `FUNCTION_DECL'. This is a direct pure jump, not a call, and does + not touch the return address. Hence returning from FUNCTION will + return to whoever called the current `thunk'. + + The effect must be as if FUNCTION had been called directly with the + adjusted first argument. This macro is responsible for emitting + all of the code for a thunk function; output_function_prologue() + and output_function_epilogue() are not invoked. + + The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already + been extracted from it.) It might possibly be useful on some + targets, but probably not. + + If you do not define this macro, the target-independent code in the + C++ frontend will generate a less efficient heavyweight thunk that + calls FUNCTION instead of jumping to it. The generic approach does + not support varargs. */ + +void +rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, + HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, + tree function) +{ + const char *fnname = get_fnname_from_decl (thunk_fndecl); + rtx this_rtx, funexp; + rtx_insn *insn; + + reload_completed = 1; + epilogue_completed = 1; + + /* Mark the end of the (empty) prologue. */ + emit_note (NOTE_INSN_PROLOGUE_END); + + /* Find the "this" pointer. If the function returns a structure, + the structure return pointer is in r3. */ + if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)) + this_rtx = gen_rtx_REG (Pmode, 4); + else + this_rtx = gen_rtx_REG (Pmode, 3); + + /* Apply the constant offset, if required. */ + if (delta) + emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta))); + + /* Apply the offset from the vtable, if required. */ + if (vcall_offset) + { + rtx vcall_offset_rtx = GEN_INT (vcall_offset); + rtx tmp = gen_rtx_REG (Pmode, 12); + + emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx)); + if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000) + { + emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx)); + emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp)); + } + else + { + rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx); + + emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc)); + } + emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp)); + } + + /* Generate a tail call to the target function. */ + if (!TREE_USED (function)) + { + assemble_external (function); + TREE_USED (function) = 1; + } + funexp = XEXP (DECL_RTL (function), 0); + funexp = gen_rtx_MEM (FUNCTION_MODE, funexp); + + insn = emit_call_insn (gen_sibcall (funexp, const0_rtx, const0_rtx)); + SIBLING_CALL_P (insn) = 1; + emit_barrier (); + + /* Run just enough of rest_of_compilation to get the insns emitted. + There's not really enough bulk here to make other passes such as + instruction scheduling worth while. */ + insn = get_insns (); + shorten_branches (insn); + assemble_start_function (thunk_fndecl, fnname); + final_start_function (insn, file, 1); + final (insn, file, 1); + final_end_function (); + assemble_end_function (thunk_fndecl, fnname); + + reload_completed = 0; + epilogue_completed = 0; +} + +#include "gt-rs6000-call.h" diff --git a/software/gcc/gcc/config/rs6000/rs6000-cpus.def b/software/gcc/gcc/config/rs6000/rs6000-cpus.def new file mode 100644 index 0000000..1abd249 --- /dev/null +++ b/software/gcc/gcc/config/rs6000/rs6000-cpus.def @@ -0,0 +1,280 @@ +/* IBM RS/6000 CPU names.. + Copyright (C) 1991-2021 Free Software Foundation, Inc. + Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu) + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +/* ISA masks. */ +#ifndef ISA_2_1_MASKS +#define ISA_2_1_MASKS OPTION_MASK_MFCRF +#define ISA_2_2_MASKS (ISA_2_1_MASKS | OPTION_MASK_POPCNTB) +#define ISA_2_4_MASKS (ISA_2_2_MASKS | OPTION_MASK_FPRND) + + /* For ISA 2.05, don't add ALTIVEC, since in general it isn't a win on + power6. In ISA 2.04, fsel, fre, fsqrt, etc. were no longer documented + as optional. Group masks by server and embedded. */ +#define ISA_2_5_MASKS_EMBEDDED (ISA_2_4_MASKS \ + | OPTION_MASK_CMPB \ + | OPTION_MASK_RECIP_PRECISION \ + | OPTION_MASK_PPC_GFXOPT \ + | OPTION_MASK_PPC_GPOPT) + +#define ISA_2_5_MASKS_SERVER (ISA_2_5_MASKS_EMBEDDED | OPTION_MASK_DFP) + + /* For ISA 2.06, don't add ISEL, since in general it isn't a win, but + altivec is a win so enable it. */ +#define ISA_2_6_MASKS_EMBEDDED (ISA_2_5_MASKS_EMBEDDED | OPTION_MASK_POPCNTD) +#define ISA_2_6_MASKS_SERVER (ISA_2_5_MASKS_SERVER \ + | OPTION_MASK_POPCNTD \ + | OPTION_MASK_ALTIVEC \ + | OPTION_MASK_VSX) + +/* For now, don't provide an embedded version of ISA 2.07. Do not set power8 + fusion here, instead set it in rs6000.c if we are tuning for a power8 + system. */ +#define ISA_2_7_MASKS_SERVER (ISA_2_6_MASKS_SERVER \ + | OPTION_MASK_P8_VECTOR \ + | OPTION_MASK_CRYPTO \ + | OPTION_MASK_DIRECT_MOVE \ + | OPTION_MASK_EFFICIENT_UNALIGNED_VSX \ + | OPTION_MASK_QUAD_MEMORY \ + | OPTION_MASK_QUAD_MEMORY_ATOMIC) + +/* ISA masks setting fusion options. */ +#define OTHER_FUSION_MASKS (OPTION_MASK_P8_FUSION \ + | OPTION_MASK_P8_FUSION_SIGN) + +/* Add ISEL back into ISA 3.0, since it is supposed to be a win. Do not add + FLOAT128_HW here until we are ready to make -mfloat128 on by default. */ +#define ISA_3_0_MASKS_SERVER ((ISA_2_7_MASKS_SERVER \ + | OPTION_MASK_ISEL \ + | OPTION_MASK_MODULO \ + | OPTION_MASK_P9_MINMAX \ + | OPTION_MASK_P9_MISC \ + | OPTION_MASK_P9_VECTOR) \ + & ~OTHER_FUSION_MASKS) + +/* Support for the IEEE 128-bit floating point hardware requires a lot of the + VSX instructions that are part of ISA 3.0. */ +#define ISA_3_0_MASKS_IEEE (OPTION_MASK_VSX \ + | OPTION_MASK_P8_VECTOR \ + | OPTION_MASK_P9_VECTOR) + +/* Flags that need to be turned off if -mno-power10. */ +/* We comment out PCREL_OPT here to disable it by default because SPEC2017 + performance was degraded by it. */ +#define OTHER_POWER10_MASKS (OPTION_MASK_MMA \ + | OPTION_MASK_PCREL \ + /* | OPTION_MASK_PCREL_OPT */ \ + | OPTION_MASK_PREFIXED) + +#define ISA_3_1_MASKS_SERVER (ISA_3_0_MASKS_SERVER \ + | OPTION_MASK_POWER10 \ + | OTHER_POWER10_MASKS \ + | OPTION_MASK_P10_FUSION \ + | OPTION_MASK_P10_FUSION_LD_CMPI \ + | OPTION_MASK_P10_FUSION_2LOGICAL \ + | OPTION_MASK_P10_FUSION_LOGADD \ + | OPTION_MASK_P10_FUSION_ADDLOG \ + | OPTION_MASK_P10_FUSION_2ADD \ + | OPTION_MASK_P10_FUSION_2STORE) + +/* Flags that need to be turned off if -mno-power9-vector. */ +#define OTHER_P9_VECTOR_MASKS (OPTION_MASK_FLOAT128_HW \ + | OPTION_MASK_P9_MINMAX) + +/* Flags that need to be turned off if -mno-power8-vector. */ +#define OTHER_P8_VECTOR_MASKS (OTHER_P9_VECTOR_MASKS \ + | OPTION_MASK_P9_VECTOR \ + | OPTION_MASK_DIRECT_MOVE \ + | OPTION_MASK_CRYPTO) + +/* Flags that need to be turned off if -mno-vsx. */ +#define OTHER_VSX_VECTOR_MASKS (OTHER_P8_VECTOR_MASKS \ + | OPTION_MASK_EFFICIENT_UNALIGNED_VSX \ + | OPTION_MASK_FLOAT128_KEYWORD \ + | OPTION_MASK_P8_VECTOR) + +/* Flags that need to be turned off if -mno-altivec. */ +#define OTHER_ALTIVEC_MASKS (OTHER_VSX_VECTOR_MASKS \ + | OPTION_MASK_VSX) + +#define POWERPC_7400_MASK (OPTION_MASK_PPC_GFXOPT | OPTION_MASK_ALTIVEC) + +/* Deal with ports that do not have -mstrict-align. */ +#ifdef OPTION_MASK_STRICT_ALIGN +#define OPTION_MASK_STRICT_ALIGN_OPTIONAL OPTION_MASK_STRICT_ALIGN +#else +#define OPTION_MASK_STRICT_ALIGN 0 +#define OPTION_MASK_STRICT_ALIGN_OPTIONAL 0 +#ifndef MASK_STRICT_ALIGN +#define MASK_STRICT_ALIGN 0 +#endif +#endif + +/* Mask of all options to set the default isa flags based on -mcpu=. */ +#define POWERPC_MASKS (OPTION_MASK_ALTIVEC \ + | OPTION_MASK_CMPB \ + | OPTION_MASK_CRYPTO \ + | OPTION_MASK_DFP \ + | OPTION_MASK_DIRECT_MOVE \ + | OPTION_MASK_DLMZB \ + | OPTION_MASK_EFFICIENT_UNALIGNED_VSX \ + | OPTION_MASK_FLOAT128_HW \ + | OPTION_MASK_FLOAT128_KEYWORD \ + | OPTION_MASK_FPRND \ + | OPTION_MASK_POWER10 \ + | OPTION_MASK_P10_FUSION \ + | OPTION_MASK_P10_FUSION_LD_CMPI \ + | OPTION_MASK_P10_FUSION_2LOGICAL \ + | OPTION_MASK_P10_FUSION_LOGADD \ + | OPTION_MASK_P10_FUSION_ADDLOG \ + | OPTION_MASK_P10_FUSION_2ADD \ + | OPTION_MASK_P10_FUSION_2STORE \ + | OPTION_MASK_HTM \ + | OPTION_MASK_ISEL \ + | OPTION_MASK_MFCRF \ + | OPTION_MASK_MMA \ + | OPTION_MASK_MODULO \ + | OPTION_MASK_MULHW \ + | OPTION_MASK_NO_UPDATE \ + | OPTION_MASK_P8_FUSION \ + | OPTION_MASK_P8_VECTOR \ + | OPTION_MASK_P9_MINMAX \ + | OPTION_MASK_P9_MISC \ + | OPTION_MASK_P9_VECTOR \ + | OPTION_MASK_PCREL \ + | OPTION_MASK_PCREL_OPT \ + | OPTION_MASK_POPCNTB \ + | OPTION_MASK_POPCNTD \ + | OPTION_MASK_POWERPC64 \ + | OPTION_MASK_PPC_GFXOPT \ + | OPTION_MASK_PPC_GPOPT \ + | OPTION_MASK_PREFIXED \ + | OPTION_MASK_QUAD_MEMORY \ + | OPTION_MASK_QUAD_MEMORY_ATOMIC \ + | OPTION_MASK_RECIP_PRECISION \ + | OPTION_MASK_SOFT_FLOAT \ + | OPTION_MASK_STRICT_ALIGN_OPTIONAL \ + | OPTION_MASK_VSX) + +#endif + +/* This table occasionally claims that a processor does not support a + particular feature even though it does, but the feature is slower than the + alternative. Thus, it shouldn't be relied on as a complete description of + the processor's support. + + Please keep this list in order, and don't forget to update the documentation + in invoke.texi when adding a new processor or flag. + + Before including this file, define a macro: + + RS6000_CPU (NAME, CPU, FLAGS) + + where the arguments are the fields of struct rs6000_ptt. */ + +RS6000_CPU ("401", PROCESSOR_PPC403, MASK_SOFT_FLOAT) +RS6000_CPU ("403", PROCESSOR_PPC403, MASK_SOFT_FLOAT | MASK_STRICT_ALIGN) +RS6000_CPU ("405", PROCESSOR_PPC405, MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB) +RS6000_CPU ("405fp", PROCESSOR_PPC405, MASK_MULHW | MASK_DLMZB) +RS6000_CPU ("440", PROCESSOR_PPC440, MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB) +RS6000_CPU ("440fp", PROCESSOR_PPC440, MASK_MULHW | MASK_DLMZB) +RS6000_CPU ("464", PROCESSOR_PPC440, MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB) +RS6000_CPU ("464fp", PROCESSOR_PPC440, MASK_MULHW | MASK_DLMZB) +RS6000_CPU ("476", PROCESSOR_PPC476, + MASK_SOFT_FLOAT | MASK_PPC_GFXOPT | MASK_MFCRF | MASK_POPCNTB + | MASK_FPRND | MASK_CMPB | MASK_MULHW | MASK_DLMZB) +RS6000_CPU ("476fp", PROCESSOR_PPC476, + MASK_PPC_GFXOPT | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND + | MASK_CMPB | MASK_MULHW | MASK_DLMZB) +RS6000_CPU ("505", PROCESSOR_MPCCORE, 0) +RS6000_CPU ("601", PROCESSOR_PPC601, MASK_MULTIPLE) +RS6000_CPU ("602", PROCESSOR_PPC603, MASK_PPC_GFXOPT) +RS6000_CPU ("603", PROCESSOR_PPC603, MASK_PPC_GFXOPT) +RS6000_CPU ("603e", PROCESSOR_PPC603, MASK_PPC_GFXOPT) +RS6000_CPU ("604", PROCESSOR_PPC604, MASK_PPC_GFXOPT) +RS6000_CPU ("604e", PROCESSOR_PPC604e, MASK_PPC_GFXOPT) +RS6000_CPU ("620", PROCESSOR_PPC620, MASK_PPC_GFXOPT | MASK_POWERPC64) +RS6000_CPU ("630", PROCESSOR_PPC630, MASK_PPC_GFXOPT | MASK_POWERPC64) +RS6000_CPU ("740", PROCESSOR_PPC750, MASK_PPC_GFXOPT) +RS6000_CPU ("7400", PROCESSOR_PPC7400, POWERPC_7400_MASK) +RS6000_CPU ("7450", PROCESSOR_PPC7450, POWERPC_7400_MASK) +RS6000_CPU ("750", PROCESSOR_PPC750, MASK_PPC_GFXOPT) +RS6000_CPU ("801", PROCESSOR_MPCCORE, MASK_SOFT_FLOAT) +RS6000_CPU ("821", PROCESSOR_MPCCORE, MASK_SOFT_FLOAT) +RS6000_CPU ("823", PROCESSOR_MPCCORE, MASK_SOFT_FLOAT) +RS6000_CPU ("8540", PROCESSOR_PPC8540, MASK_STRICT_ALIGN | MASK_ISEL) +RS6000_CPU ("8548", PROCESSOR_PPC8548, MASK_STRICT_ALIGN | MASK_ISEL) +RS6000_CPU ("a2", PROCESSOR_PPCA2, + MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_POPCNTB | MASK_CMPB + | MASK_NO_UPDATE) +RS6000_CPU ("e300c2", PROCESSOR_PPCE300C2, MASK_SOFT_FLOAT) +RS6000_CPU ("e300c3", PROCESSOR_PPCE300C3, 0) +RS6000_CPU ("e500mc", PROCESSOR_PPCE500MC, MASK_PPC_GFXOPT | MASK_ISEL) +RS6000_CPU ("e500mc64", PROCESSOR_PPCE500MC64, + MASK_POWERPC64 | MASK_PPC_GFXOPT | MASK_ISEL) +RS6000_CPU ("e5500", PROCESSOR_PPCE5500, + MASK_POWERPC64 | MASK_PPC_GFXOPT | MASK_ISEL) +RS6000_CPU ("e6500", PROCESSOR_PPCE6500, POWERPC_7400_MASK | MASK_POWERPC64 + | MASK_MFCRF | MASK_ISEL) +RS6000_CPU ("860", PROCESSOR_MPCCORE, MASK_SOFT_FLOAT) +RS6000_CPU ("970", PROCESSOR_POWER4, + POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64) +RS6000_CPU ("cell", PROCESSOR_CELL, + POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64) +RS6000_CPU ("ec603e", PROCESSOR_PPC603, MASK_SOFT_FLOAT) +RS6000_CPU ("G3", PROCESSOR_PPC750, MASK_PPC_GFXOPT) +RS6000_CPU ("G4", PROCESSOR_PPC7450, POWERPC_7400_MASK) +RS6000_CPU ("G5", PROCESSOR_POWER4, + POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64) +RS6000_CPU ("titan", PROCESSOR_TITAN, MASK_MULHW | MASK_DLMZB) +RS6000_CPU ("power3", PROCESSOR_PPC630, MASK_PPC_GFXOPT | MASK_POWERPC64) +RS6000_CPU ("power4", PROCESSOR_POWER4, MASK_POWERPC64 | MASK_PPC_GPOPT + | MASK_PPC_GFXOPT | MASK_MFCRF) +RS6000_CPU ("power5", PROCESSOR_POWER5, MASK_POWERPC64 | MASK_PPC_GPOPT + | MASK_PPC_GFXOPT | MASK_MFCRF | MASK_POPCNTB) +RS6000_CPU ("power5+", PROCESSOR_POWER5, MASK_POWERPC64 | MASK_PPC_GPOPT + | MASK_PPC_GFXOPT | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND) +RS6000_CPU ("power6", PROCESSOR_POWER6, MASK_POWERPC64 | MASK_PPC_GPOPT + | MASK_PPC_GFXOPT | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND + | MASK_CMPB | MASK_DFP | MASK_RECIP_PRECISION) +RS6000_CPU ("power6x", PROCESSOR_POWER6, MASK_POWERPC64 | MASK_PPC_GPOPT + | MASK_PPC_GFXOPT | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND + | MASK_CMPB | MASK_DFP | MASK_RECIP_PRECISION) +RS6000_CPU ("power7", PROCESSOR_POWER7, MASK_POWERPC64 | ISA_2_6_MASKS_SERVER) +RS6000_CPU ("power8", PROCESSOR_POWER8, MASK_POWERPC64 | ISA_2_7_MASKS_SERVER + | OPTION_MASK_HTM) +RS6000_CPU ("power9", PROCESSOR_POWER9, MASK_POWERPC64 | ISA_3_0_MASKS_SERVER + | OPTION_MASK_HTM) +RS6000_CPU ("power10", PROCESSOR_POWER10, MASK_POWERPC64 | ISA_3_1_MASKS_SERVER) +RS6000_CPU ("powerpc", PROCESSOR_POWERPC, 0) +RS6000_CPU ("powerpc64", PROCESSOR_POWERPC64, MASK_PPC_GFXOPT | MASK_POWERPC64) +RS6000_CPU ("powerpc64le", PROCESSOR_POWER8, MASK_POWERPC64 | ISA_2_7_MASKS_SERVER + | OPTION_MASK_HTM) +RS6000_CPU ("rs64", PROCESSOR_RS64A, MASK_PPC_GFXOPT | MASK_POWERPC64) +//wtf - what about le, tar, etc.? PPC_FEATURE2_HAS_TAR - where do you use that??? +RS6000_CPU ("a2p", PROCESSOR_PPCA2P, + OPTION_MASK_CMPB \ + | OPTION_MASK_ISEL \ + | OPTION_MASK_MFCRF \ + | OPTION_MASK_MULHW \ + | OPTION_MASK_PCREL \ + | OPTION_MASK_POPCNTB \ + | OPTION_MASK_POPCNTD \ + | OPTION_MASK_SOFT_FLOAT \ + ) diff --git a/software/gcc/gcc/config/rs6000/rs6000-opts.h b/software/gcc/gcc/config/rs6000/rs6000-opts.h new file mode 100644 index 0000000..c47431f --- /dev/null +++ b/software/gcc/gcc/config/rs6000/rs6000-opts.h @@ -0,0 +1,156 @@ +/* Definitions of target machine needed for option handling for GNU compiler, + for IBM RS/6000. + Copyright (C) 2010-2021 Free Software Foundation, Inc. + Contributed by Michael Meissner (meissner@linux.vnet.ibm.com) + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . */ + +#ifndef RS6000_OPTS_H +#define RS6000_OPTS_H + +/* Processor type. Order must match cpu attribute in MD file. */ +enum processor_type + { + PROCESSOR_PPC601, + PROCESSOR_PPC603, + PROCESSOR_PPC604, + PROCESSOR_PPC604e, + PROCESSOR_PPC620, + PROCESSOR_PPC630, + + PROCESSOR_PPC750, + PROCESSOR_PPC7400, + PROCESSOR_PPC7450, + + PROCESSOR_PPC403, + PROCESSOR_PPC405, + PROCESSOR_PPC440, + PROCESSOR_PPC476, + + PROCESSOR_PPC8540, + PROCESSOR_PPC8548, + PROCESSOR_PPCE300C2, + PROCESSOR_PPCE300C3, + PROCESSOR_PPCE500MC, + PROCESSOR_PPCE500MC64, + PROCESSOR_PPCE5500, + PROCESSOR_PPCE6500, + + PROCESSOR_POWER4, + PROCESSOR_POWER5, + PROCESSOR_POWER6, + PROCESSOR_POWER7, + PROCESSOR_POWER8, + PROCESSOR_POWER9, + PROCESSOR_POWER10, + + PROCESSOR_RS64A, + PROCESSOR_MPCCORE, + PROCESSOR_CELL, + PROCESSOR_PPCA2, + PROCESSOR_PPCA2P, + PROCESSOR_TITAN +}; + + +/* Types of costly dependences. */ +enum rs6000_dependence_cost +{ + max_dep_latency = 1000, + no_dep_costly, + all_deps_costly, + true_store_to_load_dep_costly, + store_to_load_dep_costly +}; + +/* Types of nop insertion schemes in sched target hook sched_finish. */ +enum rs6000_nop_insertion +{ + sched_finish_regroup_exact = 1000, + sched_finish_pad_groups, + sched_finish_none +}; + +/* Dispatch group termination caused by an insn. */ +enum group_termination +{ + current_group, + previous_group +}; + +/* Enumeration to give which calling sequence to use. */ +enum rs6000_abi { + ABI_NONE, + ABI_AIX, /* IBM's AIX, or Linux ELFv1 */ + ABI_ELFv2, /* Linux ELFv2 ABI */ + ABI_V4, /* System V.4/eabi */ + ABI_DARWIN /* Apple's Darwin (OS X kernel) */ +}; + +/* Small data support types. */ +enum rs6000_sdata_type { + SDATA_NONE, /* No small data support. */ + SDATA_DATA, /* Just put data in .sbss/.sdata, don't use relocs. */ + SDATA_SYSV, /* Use r13 to point to .sdata/.sbss. */ + SDATA_EABI /* Use r13 like above, r2 points to .sdata2/.sbss2. */ +}; + +/* Type of traceback to use. */ +enum rs6000_traceback_type { + traceback_default = 0, + traceback_none, + traceback_part, + traceback_full +}; + +/* Code model for 64-bit linux. + small: 16-bit toc offsets. + medium: 32-bit toc offsets, static data and code within 2G of TOC pointer. + large: 32-bit toc offsets, no limit on static data and code. */ +enum rs6000_cmodel { + CMODEL_SMALL, + CMODEL_MEDIUM, + CMODEL_LARGE +}; + +/* Describe which vector unit to use for a given machine mode. The + VECTOR_MEM_* and VECTOR_UNIT_* macros assume that Altivec, VSX, and + P8_VECTOR are contiguous. */ +enum rs6000_vector { + VECTOR_NONE, /* Type is not a vector or not supported */ + VECTOR_ALTIVEC, /* Use altivec for vector processing */ + VECTOR_VSX, /* Use VSX for vector processing */ + VECTOR_P8_VECTOR /* Use ISA 2.07 VSX for vector processing */ +}; + +/* Where to get the canary for the stack protector. */ +enum stack_protector_guard { + SSP_TLS, /* per-thread canary in TLS block */ + SSP_GLOBAL /* global canary */ +}; + +/* No enumeration is defined to index the -mcpu= values (entries in + processor_target_table), with the type int being used instead, but + we need to distinguish the special "native" value. */ +#define RS6000_CPU_OPTION_NATIVE -1 + +#endif diff --git a/software/gcc/gcc/config/rs6000/rs6000.c b/software/gcc/gcc/config/rs6000/rs6000.c new file mode 100644 index 0000000..b3a597b --- /dev/null +++ b/software/gcc/gcc/config/rs6000/rs6000.c @@ -0,0 +1,28617 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +/* Subroutines used for code generation on IBM RS/6000. + Copyright (C) 1991-2021 Free Software Foundation, Inc. + Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu) + + This file is part of GCC. + + GCC is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3, or (at your + option) any later version. + + GCC is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + License for more details. + + You should have received a copy of the GNU General Public License + along with GCC; see the file COPYING3. If not see + . */ + +#define IN_TARGET_CODE 1 + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "backend.h" +#include "rtl.h" +#include "tree.h" +#include "memmodel.h" +#include "gimple.h" +#include "cfghooks.h" +#include "cfgloop.h" +#include "df.h" +#include "tm_p.h" +#include "stringpool.h" +#include "expmed.h" +#include "optabs.h" +#include "regs.h" +#include "ira.h" +#include "recog.h" +#include "cgraph.h" +#include "diagnostic-core.h" +#include "insn-attr.h" +#include "flags.h" +#include "alias.h" +#include "fold-const.h" +#include "attribs.h" +#include "stor-layout.h" +#include "calls.h" +#include "print-tree.h" +#include "varasm.h" +#include "explow.h" +#include "expr.h" +#include "output.h" +#include "common/common-target.h" +#include "langhooks.h" +#include "reload.h" +#include "sched-int.h" +#include "gimplify.h" +#include "gimple-fold.h" +#include "gimple-iterator.h" +#include "gimple-walk.h" +#include "ssa.h" +#include "tree-vectorizer.h" +#include "tree-ssa-propagate.h" +#include "intl.h" +#include "tm-constrs.h" +#include "target-globals.h" +#include "builtins.h" +#include "tree-vector-builder.h" +#include "context.h" +#include "tree-pass.h" +#include "except.h" +#if TARGET_XCOFF +#include "xcoffout.h" /* get declarations of xcoff_*_section_name */ +#endif +#include "case-cfn-macros.h" +#include "ppc-auxv.h" +#include "rs6000-internal.h" +#include "rs6000-builtins.h" +#include "opts.h" + +/* This file should be included last. */ +#include "target-def.h" + + /* Set -mabi=ieeelongdouble on some old targets. In the future, power server + systems will also set long double to be IEEE 128-bit. AIX and Darwin + explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so + those systems will not pick up this default. This needs to be after all + of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are + properly defined. */ +#ifndef TARGET_IEEEQUAD_DEFAULT +#if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD) +#define TARGET_IEEEQUAD_DEFAULT 1 +#else +#define TARGET_IEEEQUAD_DEFAULT 0 +#endif +#endif + +/* Don't enable PC-relative addressing if the target does not support it. */ +#ifndef PCREL_SUPPORTED_BY_OS +#define PCREL_SUPPORTED_BY_OS 0 +#endif + +/* Support targetm.vectorize.builtin_mask_for_load. */ +tree altivec_builtin_mask_for_load; + +#ifdef USING_ELFOS_H +/* Counter for labels which are to be placed in .fixup. */ +int fixuplabelno = 0; +#endif + +/* Whether to use variant of AIX ABI for PowerPC64 Linux. */ +int dot_symbols; + +/* Specify the machine mode that pointers have. After generation of rtl, the + compiler makes no further distinction between pointers and any other objects + of this machine mode. */ +scalar_int_mode rs6000_pmode; + +#if TARGET_ELF +/* Note whether IEEE 128-bit floating point was passed or returned, either as + the __float128/_Float128 explicit type, or when long double is IEEE 128-bit + floating point. We changed the default C++ mangling for these types and we + may want to generate a weak alias of the old mangling (U10__float128) to the + new mangling (u9__ieee128). */ +bool rs6000_passes_ieee128 = false; +#endif + +/* Track use of r13 in 64bit AIX TLS. */ +static bool xcoff_tls_exec_model_detected = false; + +/* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the + name used in current releases (i.e. u9__ieee128). */ +static bool ieee128_mangling_gcc_8_1; + +/* Width in bits of a pointer. */ +unsigned rs6000_pointer_size; + +#ifdef HAVE_AS_GNU_ATTRIBUTE +# ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE +# define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0 +# endif +/* Flag whether floating point values have been passed/returned. + Note that this doesn't say whether fprs are used, since the + Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls + should be set for soft-float values passed in gprs and ieee128 + values passed in vsx registers. */ +bool rs6000_passes_float = false; +bool rs6000_passes_long_double = false; +/* Flag whether vector values have been passed/returned. */ +bool rs6000_passes_vector = false; +/* Flag whether small (<= 8 byte) structures have been returned. */ +bool rs6000_returns_struct = false; +#endif + +/* Value is TRUE if register/mode pair is acceptable. */ +static bool rs6000_hard_regno_mode_ok_p + [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER]; + +/* Maximum number of registers needed for a given register class and mode. */ +unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES]; + +/* How many registers are needed for a given register and mode. */ +unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER]; + +/* Map register number to register class. */ +enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER]; + +static int dbg_cost_ctrl; + +/* Built in types. */ +tree rs6000_builtin_types[RS6000_BTI_MAX]; +tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT]; + +/* Flag to say the TOC is initialized */ +int toc_initialized, need_toc_init; +char toc_label_name[10]; + +/* Cached value of rs6000_variable_issue. This is cached in + rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */ +static short cached_can_issue_more; + +static GTY(()) section *read_only_data_section; +static GTY(()) section *private_data_section; +static GTY(()) section *tls_data_section; +static GTY(()) section *tls_private_data_section; +static GTY(()) section *read_only_private_data_section; +static GTY(()) section *sdata2_section; + +section *toc_section = 0; + +/* Describe the vector unit used for modes. */ +enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES]; +enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES]; + +/* Register classes for various constraints that are based on the target + switches. */ +enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX]; + +/* Describe the alignment of a vector. */ +int rs6000_vector_align[NUM_MACHINE_MODES]; + +/* Map selected modes to types for builtins. */ +tree builtin_mode_to_type[MAX_MACHINE_MODE][2]; + +/* What modes to automatically generate reciprocal divide estimate (fre) and + reciprocal sqrt (frsqrte) for. */ +unsigned char rs6000_recip_bits[MAX_MACHINE_MODE]; + +/* Masks to determine which reciprocal esitmate instructions to generate + automatically. */ +enum rs6000_recip_mask { + RECIP_SF_DIV = 0x001, /* Use divide estimate */ + RECIP_DF_DIV = 0x002, + RECIP_V4SF_DIV = 0x004, + RECIP_V2DF_DIV = 0x008, + + RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */ + RECIP_DF_RSQRT = 0x020, + RECIP_V4SF_RSQRT = 0x040, + RECIP_V2DF_RSQRT = 0x080, + + /* Various combination of flags for -mrecip=xxx. */ + RECIP_NONE = 0, + RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV + | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT + | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT), + + RECIP_HIGH_PRECISION = RECIP_ALL, + + /* On low precision machines like the power5, don't enable double precision + reciprocal square root estimate, since it isn't accurate enough. */ + RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT)) +}; + +/* -mrecip options. */ +static struct +{ + const char *string; /* option name */ + unsigned int mask; /* mask bits to set */ +} recip_options[] = { + { "all", RECIP_ALL }, + { "none", RECIP_NONE }, + { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV + | RECIP_V2DF_DIV) }, + { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) }, + { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) }, + { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT + | RECIP_V2DF_RSQRT) }, + { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) }, + { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) }, +}; + +/* On PowerPC, we have a limited number of target clones that we care about + which means we can use an array to hold the options, rather than having more + elaborate data structures to identify each possible variation. Order the + clones from the default to the highest ISA. */ +enum { + CLONE_DEFAULT = 0, /* default clone. */ + CLONE_ISA_2_05, /* ISA 2.05 (power6). */ + CLONE_ISA_2_06, /* ISA 2.06 (power7). */ + CLONE_ISA_2_07, /* ISA 2.07 (power8). */ + CLONE_ISA_3_00, /* ISA 3.0 (power9). */ + CLONE_ISA_3_1, /* ISA 3.1 (power10). */ + CLONE_MAX +}; + +/* Map compiler ISA bits into HWCAP names. */ +struct clone_map { + HOST_WIDE_INT isa_mask; /* rs6000_isa mask */ + const char *name; /* name to use in __builtin_cpu_supports. */ +}; + +static const struct clone_map rs6000_clone_map[CLONE_MAX] = { + { 0, "" }, /* Default options. */ + { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */ + { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */ + { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */ + { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.0 (power9). */ + { OPTION_MASK_POWER10, "arch_3_1" }, /* ISA 3.1 (power10). */ +}; + + +/* Newer LIBCs explicitly export this symbol to declare that they provide + the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a + reference to this symbol whenever we expand a CPU builtin, so that + we never link against an old LIBC. */ +const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform"; + +/* True if we have expanded a CPU builtin. */ +bool cpu_builtin_p = false; + +/* Pointer to function (in rs6000-c.c) that can define or undefine target + macros that have changed. Languages that don't support the preprocessor + don't link in rs6000-c.c, so we can't call it directly. */ +void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT); + +/* Simplfy register classes into simpler classifications. We assume + GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range + check for standard register classes (gpr/floating/altivec/vsx) and + floating/vector classes (float/altivec/vsx). */ + +enum rs6000_reg_type { + NO_REG_TYPE, + PSEUDO_REG_TYPE, + GPR_REG_TYPE, + VSX_REG_TYPE, + ALTIVEC_REG_TYPE, + FPR_REG_TYPE, + SPR_REG_TYPE, + CR_REG_TYPE +}; + +/* Map register class to register type. */ +static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES]; + +/* First/last register type for the 'normal' register types (i.e. general + purpose, floating point, altivec, and VSX registers). */ +#define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE) + +#define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE) + + +/* Register classes we care about in secondary reload or go if legitimate + address. We only need to worry about GPR, FPR, and Altivec registers here, + along an ANY field that is the OR of the 3 register classes. */ + +enum rs6000_reload_reg_type { + RELOAD_REG_GPR, /* General purpose registers. */ + RELOAD_REG_FPR, /* Traditional floating point regs. */ + RELOAD_REG_VMX, /* Altivec (VMX) registers. */ + RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */ + N_RELOAD_REG +}; + +/* For setting up register classes, loop through the 3 register classes mapping + into real registers, and skip the ANY class, which is just an OR of the + bits. */ +#define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR +#define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX + +/* Map reload register type to a register in the register class. */ +struct reload_reg_map_type { + const char *name; /* Register class name. */ + int reg; /* Register in the register class. */ +}; + +static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = { + { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */ + { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */ + { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */ + { "Any", -1 }, /* RELOAD_REG_ANY. */ +}; + +/* Mask bits for each register class, indexed per mode. Historically the + compiler has been more restrictive which types can do PRE_MODIFY instead of + PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */ +typedef unsigned char addr_mask_type; + +#define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */ +#define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */ +#define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */ +#define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */ +#define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */ +#define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */ +#define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */ +#define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */ + +/* Register type masks based on the type, of valid addressing modes. */ +struct rs6000_reg_addr { + enum insn_code reload_load; /* INSN to reload for loading. */ + enum insn_code reload_store; /* INSN to reload for storing. */ + enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */ + enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */ + enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */ + addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */ + bool scalar_in_vmx_p; /* Scalar value can go in VMX. */ +}; + +static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES]; + +/* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */ +static inline bool +mode_supports_pre_incdec_p (machine_mode mode) +{ + return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC) + != 0); +} + +/* Helper function to say whether a mode supports PRE_MODIFY. */ +static inline bool +mode_supports_pre_modify_p (machine_mode mode) +{ + return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY) + != 0); +} + +/* Return true if we have D-form addressing in altivec registers. */ +static inline bool +mode_supports_vmx_dform (machine_mode mode) +{ + return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0); +} + +/* Return true if we have D-form addressing in VSX registers. This addressing + is more limited than normal d-form addressing in that the offset must be + aligned on a 16-byte boundary. */ +static inline bool +mode_supports_dq_form (machine_mode mode) +{ + return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET) + != 0); +} + +/* Given that there exists at least one variable that is set (produced) + by OUT_INSN and read (consumed) by IN_INSN, return true iff + IN_INSN represents one or more memory store operations and none of + the variables set by OUT_INSN is used by IN_INSN as the address of a + store operation. If either IN_INSN or OUT_INSN does not represent + a "single" RTL SET expression (as loosely defined by the + implementation of the single_set function) or a PARALLEL with only + SETs, CLOBBERs, and USEs inside, this function returns false. + + This rs6000-specific version of store_data_bypass_p checks for + certain conditions that result in assertion failures (and internal + compiler errors) in the generic store_data_bypass_p function and + returns false rather than calling store_data_bypass_p if one of the + problematic conditions is detected. */ + +int +rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn) +{ + rtx out_set, in_set; + rtx out_pat, in_pat; + rtx out_exp, in_exp; + int i, j; + + in_set = single_set (in_insn); + if (in_set) + { + if (MEM_P (SET_DEST (in_set))) + { + out_set = single_set (out_insn); + if (!out_set) + { + out_pat = PATTERN (out_insn); + if (GET_CODE (out_pat) == PARALLEL) + { + for (i = 0; i < XVECLEN (out_pat, 0); i++) + { + out_exp = XVECEXP (out_pat, 0, i); + if ((GET_CODE (out_exp) == CLOBBER) + || (GET_CODE (out_exp) == USE)) + continue; + else if (GET_CODE (out_exp) != SET) + return false; + } + } + } + } + } + else + { + in_pat = PATTERN (in_insn); + if (GET_CODE (in_pat) != PARALLEL) + return false; + + for (i = 0; i < XVECLEN (in_pat, 0); i++) + { + in_exp = XVECEXP (in_pat, 0, i); + if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE)) + continue; + else if (GET_CODE (in_exp) != SET) + return false; + + if (MEM_P (SET_DEST (in_exp))) + { + out_set = single_set (out_insn); + if (!out_set) + { + out_pat = PATTERN (out_insn); + if (GET_CODE (out_pat) != PARALLEL) + return false; + for (j = 0; j < XVECLEN (out_pat, 0); j++) + { + out_exp = XVECEXP (out_pat, 0, j); + if ((GET_CODE (out_exp) == CLOBBER) + || (GET_CODE (out_exp) == USE)) + continue; + else if (GET_CODE (out_exp) != SET) + return false; + } + } + } + } + } + return store_data_bypass_p (out_insn, in_insn); +} + + +/* Processor costs (relative to an add) */ + +const struct processor_costs *rs6000_cost; + +/* Instruction size costs on 32bit processors. */ +static const +struct processor_costs size32_cost = { + COSTS_N_INSNS (1), /* mulsi */ + COSTS_N_INSNS (1), /* mulsi_const */ + COSTS_N_INSNS (1), /* mulsi_const9 */ + COSTS_N_INSNS (1), /* muldi */ + COSTS_N_INSNS (1), /* divsi */ + COSTS_N_INSNS (1), /* divdi */ + COSTS_N_INSNS (1), /* fp */ + COSTS_N_INSNS (1), /* dmul */ + COSTS_N_INSNS (1), /* sdiv */ + COSTS_N_INSNS (1), /* ddiv */ + 32, /* cache line size */ + 0, /* l1 cache */ + 0, /* l2 cache */ + 0, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction size costs on 64bit processors. */ +static const +struct processor_costs size64_cost = { + COSTS_N_INSNS (1), /* mulsi */ + COSTS_N_INSNS (1), /* mulsi_const */ + COSTS_N_INSNS (1), /* mulsi_const9 */ + COSTS_N_INSNS (1), /* muldi */ + COSTS_N_INSNS (1), /* divsi */ + COSTS_N_INSNS (1), /* divdi */ + COSTS_N_INSNS (1), /* fp */ + COSTS_N_INSNS (1), /* dmul */ + COSTS_N_INSNS (1), /* sdiv */ + COSTS_N_INSNS (1), /* ddiv */ + 128, /* cache line size */ + 0, /* l1 cache */ + 0, /* l2 cache */ + 0, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on RS64A processors. */ +static const +struct processor_costs rs64a_cost = { + COSTS_N_INSNS (20), /* mulsi */ + COSTS_N_INSNS (12), /* mulsi_const */ + COSTS_N_INSNS (8), /* mulsi_const9 */ + COSTS_N_INSNS (34), /* muldi */ + COSTS_N_INSNS (65), /* divsi */ + COSTS_N_INSNS (67), /* divdi */ + COSTS_N_INSNS (4), /* fp */ + COSTS_N_INSNS (4), /* dmul */ + COSTS_N_INSNS (31), /* sdiv */ + COSTS_N_INSNS (31), /* ddiv */ + 128, /* cache line size */ + 128, /* l1 cache */ + 2048, /* l2 cache */ + 1, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on MPCCORE processors. */ +static const +struct processor_costs mpccore_cost = { + COSTS_N_INSNS (2), /* mulsi */ + COSTS_N_INSNS (2), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (2), /* muldi */ + COSTS_N_INSNS (6), /* divsi */ + COSTS_N_INSNS (6), /* divdi */ + COSTS_N_INSNS (4), /* fp */ + COSTS_N_INSNS (5), /* dmul */ + COSTS_N_INSNS (10), /* sdiv */ + COSTS_N_INSNS (17), /* ddiv */ + 32, /* cache line size */ + 4, /* l1 cache */ + 16, /* l2 cache */ + 1, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPC403 processors. */ +static const +struct processor_costs ppc403_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (33), /* divsi */ + COSTS_N_INSNS (33), /* divdi */ + COSTS_N_INSNS (11), /* fp */ + COSTS_N_INSNS (11), /* dmul */ + COSTS_N_INSNS (11), /* sdiv */ + COSTS_N_INSNS (11), /* ddiv */ + 32, /* cache line size */ + 4, /* l1 cache */ + 16, /* l2 cache */ + 1, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPC405 processors. */ +static const +struct processor_costs ppc405_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (3), /* mulsi_const9 */ + COSTS_N_INSNS (5), /* muldi */ + COSTS_N_INSNS (35), /* divsi */ + COSTS_N_INSNS (35), /* divdi */ + COSTS_N_INSNS (11), /* fp */ + COSTS_N_INSNS (11), /* dmul */ + COSTS_N_INSNS (11), /* sdiv */ + COSTS_N_INSNS (11), /* ddiv */ + 32, /* cache line size */ + 16, /* l1 cache */ + 128, /* l2 cache */ + 1, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPC440 processors. */ +static const +struct processor_costs ppc440_cost = { + COSTS_N_INSNS (3), /* mulsi */ + COSTS_N_INSNS (2), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (3), /* muldi */ + COSTS_N_INSNS (34), /* divsi */ + COSTS_N_INSNS (34), /* divdi */ + COSTS_N_INSNS (5), /* fp */ + COSTS_N_INSNS (5), /* dmul */ + COSTS_N_INSNS (19), /* sdiv */ + COSTS_N_INSNS (33), /* ddiv */ + 32, /* cache line size */ + 32, /* l1 cache */ + 256, /* l2 cache */ + 1, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPC476 processors. */ +static const +struct processor_costs ppc476_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (11), /* divsi */ + COSTS_N_INSNS (11), /* divdi */ + COSTS_N_INSNS (6), /* fp */ + COSTS_N_INSNS (6), /* dmul */ + COSTS_N_INSNS (19), /* sdiv */ + COSTS_N_INSNS (33), /* ddiv */ + 32, /* l1 cache line size */ + 32, /* l1 cache */ + 512, /* l2 cache */ + 1, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPC601 processors. */ +static const +struct processor_costs ppc601_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (5), /* mulsi_const */ + COSTS_N_INSNS (5), /* mulsi_const9 */ + COSTS_N_INSNS (5), /* muldi */ + COSTS_N_INSNS (36), /* divsi */ + COSTS_N_INSNS (36), /* divdi */ + COSTS_N_INSNS (4), /* fp */ + COSTS_N_INSNS (5), /* dmul */ + COSTS_N_INSNS (17), /* sdiv */ + COSTS_N_INSNS (31), /* ddiv */ + 32, /* cache line size */ + 32, /* l1 cache */ + 256, /* l2 cache */ + 1, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPC603 processors. */ +static const +struct processor_costs ppc603_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (3), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (5), /* muldi */ + COSTS_N_INSNS (37), /* divsi */ + COSTS_N_INSNS (37), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (4), /* dmul */ + COSTS_N_INSNS (18), /* sdiv */ + COSTS_N_INSNS (33), /* ddiv */ + 32, /* cache line size */ + 8, /* l1 cache */ + 64, /* l2 cache */ + 1, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPC604 processors. */ +static const +struct processor_costs ppc604_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (20), /* divsi */ + COSTS_N_INSNS (20), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (18), /* sdiv */ + COSTS_N_INSNS (32), /* ddiv */ + 32, /* cache line size */ + 16, /* l1 cache */ + 512, /* l2 cache */ + 1, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPC604e processors. */ +static const +struct processor_costs ppc604e_cost = { + COSTS_N_INSNS (2), /* mulsi */ + COSTS_N_INSNS (2), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (2), /* muldi */ + COSTS_N_INSNS (20), /* divsi */ + COSTS_N_INSNS (20), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (18), /* sdiv */ + COSTS_N_INSNS (32), /* ddiv */ + 32, /* cache line size */ + 32, /* l1 cache */ + 1024, /* l2 cache */ + 1, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPC620 processors. */ +static const +struct processor_costs ppc620_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (3), /* mulsi_const9 */ + COSTS_N_INSNS (7), /* muldi */ + COSTS_N_INSNS (21), /* divsi */ + COSTS_N_INSNS (37), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (18), /* sdiv */ + COSTS_N_INSNS (32), /* ddiv */ + 128, /* cache line size */ + 32, /* l1 cache */ + 1024, /* l2 cache */ + 1, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPC630 processors. */ +static const +struct processor_costs ppc630_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (3), /* mulsi_const9 */ + COSTS_N_INSNS (7), /* muldi */ + COSTS_N_INSNS (21), /* divsi */ + COSTS_N_INSNS (37), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (17), /* sdiv */ + COSTS_N_INSNS (21), /* ddiv */ + 128, /* cache line size */ + 64, /* l1 cache */ + 1024, /* l2 cache */ + 1, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on Cell processor. */ +/* COSTS_N_INSNS (1) ~ one add. */ +static const +struct processor_costs ppccell_cost = { + COSTS_N_INSNS (9/2)+2, /* mulsi */ + COSTS_N_INSNS (6/2), /* mulsi_const */ + COSTS_N_INSNS (6/2), /* mulsi_const9 */ + COSTS_N_INSNS (15/2)+2, /* muldi */ + COSTS_N_INSNS (38/2), /* divsi */ + COSTS_N_INSNS (70/2), /* divdi */ + COSTS_N_INSNS (10/2), /* fp */ + COSTS_N_INSNS (10/2), /* dmul */ + COSTS_N_INSNS (74/2), /* sdiv */ + COSTS_N_INSNS (74/2), /* ddiv */ + 128, /* cache line size */ + 32, /* l1 cache */ + 512, /* l2 cache */ + 6, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPC750 and PPC7400 processors. */ +static const +struct processor_costs ppc750_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (3), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (5), /* muldi */ + COSTS_N_INSNS (17), /* divsi */ + COSTS_N_INSNS (17), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (17), /* sdiv */ + COSTS_N_INSNS (31), /* ddiv */ + 32, /* cache line size */ + 32, /* l1 cache */ + 512, /* l2 cache */ + 1, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPC7450 processors. */ +static const +struct processor_costs ppc7450_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (3), /* mulsi_const */ + COSTS_N_INSNS (3), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (23), /* divsi */ + COSTS_N_INSNS (23), /* divdi */ + COSTS_N_INSNS (5), /* fp */ + COSTS_N_INSNS (5), /* dmul */ + COSTS_N_INSNS (21), /* sdiv */ + COSTS_N_INSNS (35), /* ddiv */ + 32, /* cache line size */ + 32, /* l1 cache */ + 1024, /* l2 cache */ + 1, /* streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPC8540 processors. */ +static const +struct processor_costs ppc8540_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (19), /* divsi */ + COSTS_N_INSNS (19), /* divdi */ + COSTS_N_INSNS (4), /* fp */ + COSTS_N_INSNS (4), /* dmul */ + COSTS_N_INSNS (29), /* sdiv */ + COSTS_N_INSNS (29), /* ddiv */ + 32, /* cache line size */ + 32, /* l1 cache */ + 256, /* l2 cache */ + 1, /* prefetch streams /*/ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on E300C2 and E300C3 cores. */ +static const +struct processor_costs ppce300c2c3_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (19), /* divsi */ + COSTS_N_INSNS (19), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (4), /* dmul */ + COSTS_N_INSNS (18), /* sdiv */ + COSTS_N_INSNS (33), /* ddiv */ + 32, + 16, /* l1 cache */ + 16, /* l2 cache */ + 1, /* prefetch streams /*/ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPCE500MC processors. */ +static const +struct processor_costs ppce500mc_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (14), /* divsi */ + COSTS_N_INSNS (14), /* divdi */ + COSTS_N_INSNS (8), /* fp */ + COSTS_N_INSNS (10), /* dmul */ + COSTS_N_INSNS (36), /* sdiv */ + COSTS_N_INSNS (66), /* ddiv */ + 64, /* cache line size */ + 32, /* l1 cache */ + 128, /* l2 cache */ + 1, /* prefetch streams /*/ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPCE500MC64 processors. */ +static const +struct processor_costs ppce500mc64_cost = { + COSTS_N_INSNS (4), /* mulsi */ + COSTS_N_INSNS (4), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (14), /* divsi */ + COSTS_N_INSNS (14), /* divdi */ + COSTS_N_INSNS (4), /* fp */ + COSTS_N_INSNS (10), /* dmul */ + COSTS_N_INSNS (36), /* sdiv */ + COSTS_N_INSNS (66), /* ddiv */ + 64, /* cache line size */ + 32, /* l1 cache */ + 128, /* l2 cache */ + 1, /* prefetch streams /*/ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPCE5500 processors. */ +static const +struct processor_costs ppce5500_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (5), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (5), /* muldi */ + COSTS_N_INSNS (14), /* divsi */ + COSTS_N_INSNS (14), /* divdi */ + COSTS_N_INSNS (7), /* fp */ + COSTS_N_INSNS (10), /* dmul */ + COSTS_N_INSNS (36), /* sdiv */ + COSTS_N_INSNS (66), /* ddiv */ + 64, /* cache line size */ + 32, /* l1 cache */ + 128, /* l2 cache */ + 1, /* prefetch streams /*/ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on PPCE6500 processors. */ +static const +struct processor_costs ppce6500_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (5), /* mulsi_const */ + COSTS_N_INSNS (4), /* mulsi_const9 */ + COSTS_N_INSNS (5), /* muldi */ + COSTS_N_INSNS (14), /* divsi */ + COSTS_N_INSNS (14), /* divdi */ + COSTS_N_INSNS (7), /* fp */ + COSTS_N_INSNS (10), /* dmul */ + COSTS_N_INSNS (36), /* sdiv */ + COSTS_N_INSNS (66), /* ddiv */ + 64, /* cache line size */ + 32, /* l1 cache */ + 128, /* l2 cache */ + 1, /* prefetch streams /*/ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on AppliedMicro Titan processors. */ +static const +struct processor_costs titan_cost = { + COSTS_N_INSNS (5), /* mulsi */ + COSTS_N_INSNS (5), /* mulsi_const */ + COSTS_N_INSNS (5), /* mulsi_const9 */ + COSTS_N_INSNS (5), /* muldi */ + COSTS_N_INSNS (18), /* divsi */ + COSTS_N_INSNS (18), /* divdi */ + COSTS_N_INSNS (10), /* fp */ + COSTS_N_INSNS (10), /* dmul */ + COSTS_N_INSNS (46), /* sdiv */ + COSTS_N_INSNS (72), /* ddiv */ + 32, /* cache line size */ + 32, /* l1 cache */ + 512, /* l2 cache */ + 1, /* prefetch streams /*/ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on POWER4 and POWER5 processors. */ +static const +struct processor_costs power4_cost = { + COSTS_N_INSNS (3), /* mulsi */ + COSTS_N_INSNS (2), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (4), /* muldi */ + COSTS_N_INSNS (18), /* divsi */ + COSTS_N_INSNS (34), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (17), /* sdiv */ + COSTS_N_INSNS (17), /* ddiv */ + 128, /* cache line size */ + 32, /* l1 cache */ + 1024, /* l2 cache */ + 8, /* prefetch streams /*/ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on POWER6 processors. */ +static const +struct processor_costs power6_cost = { + COSTS_N_INSNS (8), /* mulsi */ + COSTS_N_INSNS (8), /* mulsi_const */ + COSTS_N_INSNS (8), /* mulsi_const9 */ + COSTS_N_INSNS (8), /* muldi */ + COSTS_N_INSNS (22), /* divsi */ + COSTS_N_INSNS (28), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (13), /* sdiv */ + COSTS_N_INSNS (16), /* ddiv */ + 128, /* cache line size */ + 64, /* l1 cache */ + 2048, /* l2 cache */ + 16, /* prefetch streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on POWER7 processors. */ +static const +struct processor_costs power7_cost = { + COSTS_N_INSNS (2), /* mulsi */ + COSTS_N_INSNS (2), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (2), /* muldi */ + COSTS_N_INSNS (18), /* divsi */ + COSTS_N_INSNS (34), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (13), /* sdiv */ + COSTS_N_INSNS (16), /* ddiv */ + 128, /* cache line size */ + 32, /* l1 cache */ + 256, /* l2 cache */ + 12, /* prefetch streams */ + COSTS_N_INSNS (3), /* SF->DF convert */ +}; + +/* Instruction costs on POWER8 processors. */ +static const +struct processor_costs power8_cost = { + COSTS_N_INSNS (3), /* mulsi */ + COSTS_N_INSNS (3), /* mulsi_const */ + COSTS_N_INSNS (3), /* mulsi_const9 */ + COSTS_N_INSNS (3), /* muldi */ + COSTS_N_INSNS (19), /* divsi */ + COSTS_N_INSNS (35), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (14), /* sdiv */ + COSTS_N_INSNS (17), /* ddiv */ + 128, /* cache line size */ + 32, /* l1 cache */ + 512, /* l2 cache */ + 12, /* prefetch streams */ + COSTS_N_INSNS (3), /* SF->DF convert */ +}; + +/* Instruction costs on POWER9 processors. */ +static const +struct processor_costs power9_cost = { + COSTS_N_INSNS (3), /* mulsi */ + COSTS_N_INSNS (3), /* mulsi_const */ + COSTS_N_INSNS (3), /* mulsi_const9 */ + COSTS_N_INSNS (3), /* muldi */ + COSTS_N_INSNS (8), /* divsi */ + COSTS_N_INSNS (12), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (13), /* sdiv */ + COSTS_N_INSNS (18), /* ddiv */ + 128, /* cache line size */ + 32, /* l1 cache */ + 512, /* l2 cache */ + 8, /* prefetch streams */ + COSTS_N_INSNS (3), /* SF->DF convert */ +}; + +/* Instruction costs on POWER10 processors. */ +static const +struct processor_costs power10_cost = { + COSTS_N_INSNS (2), /* mulsi */ + COSTS_N_INSNS (2), /* mulsi_const */ + COSTS_N_INSNS (2), /* mulsi_const9 */ + COSTS_N_INSNS (2), /* muldi */ + COSTS_N_INSNS (6), /* divsi */ + COSTS_N_INSNS (6), /* divdi */ + COSTS_N_INSNS (2), /* fp */ + COSTS_N_INSNS (2), /* dmul */ + COSTS_N_INSNS (11), /* sdiv */ + COSTS_N_INSNS (13), /* ddiv */ + 128, /* cache line size */ + 32, /* l1 cache */ + 512, /* l2 cache */ + 16, /* prefetch streams */ + COSTS_N_INSNS (2), /* SF->DF convert */ +}; + +/* Instruction costs on POWER A2 processors. */ +static const +struct processor_costs ppca2_cost = { + COSTS_N_INSNS (16), /* mulsi */ + COSTS_N_INSNS (16), /* mulsi_const */ + COSTS_N_INSNS (16), /* mulsi_const9 */ + COSTS_N_INSNS (16), /* muldi */ + COSTS_N_INSNS (22), /* divsi */ + COSTS_N_INSNS (28), /* divdi */ + COSTS_N_INSNS (3), /* fp */ + COSTS_N_INSNS (3), /* dmul */ + COSTS_N_INSNS (59), /* sdiv */ + COSTS_N_INSNS (72), /* ddiv */ + 64, + 16, /* l1 cache */ + 2048, /* l2 cache */ + 16, /* prefetch streams */ + 0, /* SF->DF convert */ +}; + +/* Instruction costs on POWER A2P processors. */ +//wtf guessing - think it's counting stall for next op +// but must be hint to help scheduling +static const +struct processor_costs ppca2_cost = { + //COSTS_N_INSNS (16), /* mulsi */ + //COSTS_N_INSNS (16), /* mulsi_const */ + //COSTS_N_INSNS (16), /* mulsi_const9 */ + //COSTS_N_INSNS (16), /* muldi */ + COSTS_N_INSNS (22), /* divsi */ + //COSTS_N_INSNS (28), /* divdi */ + //COSTS_N_INSNS (3), /* fp */ + //COSTS_N_INSNS (3), /* dmul */ + //COSTS_N_INSNS (59), /* sdiv */ + //COSTS_N_INSNS (72), /* ddiv */ + 32, /* cache line size */ + 16, /* l1 cache */ + 2048, /* l2 cache */ + 0, /* prefetch streams */ + 0, /* SF->DF convert */ +}; + +/* Support for -mveclibabi= to control which vector library to use. */ +static tree (*rs6000_veclib_handler) (combined_fn, tree, tree); + + +static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool); +static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *); +static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *); +static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *); +static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree); +static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT); +static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool); +static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool); +static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t, + bool); +static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int, + unsigned int); +static bool is_microcoded_insn (rtx_insn *); +static bool is_nonpipeline_insn (rtx_insn *); +static bool is_cracked_insn (rtx_insn *); +static bool is_load_insn (rtx, rtx *); +static bool is_store_insn (rtx, rtx *); +static bool set_to_load_agen (rtx_insn *,rtx_insn *); +static bool insn_terminates_group_p (rtx_insn *, enum group_termination); +static bool insn_must_be_first_in_group (rtx_insn *); +static bool insn_must_be_last_in_group (rtx_insn *); +bool easy_vector_constant (rtx, machine_mode); +static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode); +static rtx rs6000_legitimize_tls_address (rtx, enum tls_model); +#if TARGET_MACHO +static tree get_prev_label (tree); +#endif +static bool rs6000_mode_dependent_address (const_rtx); +static bool rs6000_debug_mode_dependent_address (const_rtx); +static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool); +static enum reg_class rs6000_secondary_reload_class (enum reg_class, + machine_mode, rtx); +static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class, + machine_mode, + rtx); +static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class); +static enum reg_class rs6000_debug_preferred_reload_class (rtx, + enum reg_class); +static bool rs6000_debug_secondary_memory_needed (machine_mode, + reg_class_t, + reg_class_t); +static bool rs6000_debug_can_change_mode_class (machine_mode, + machine_mode, + reg_class_t); + +static bool (*rs6000_mode_dependent_address_ptr) (const_rtx) + = rs6000_mode_dependent_address; + +enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class, + machine_mode, rtx) + = rs6000_secondary_reload_class; + +enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class) + = rs6000_preferred_reload_class; + +const int INSN_NOT_AVAILABLE = -1; + +static void rs6000_print_isa_options (FILE *, int, const char *, + HOST_WIDE_INT); +static void rs6000_print_builtin_options (FILE *, int, const char *, + HOST_WIDE_INT); +static HOST_WIDE_INT rs6000_disable_incompatible_switches (void); + +static enum rs6000_reg_type register_to_reg_type (rtx, bool *); +static bool rs6000_secondary_reload_move (enum rs6000_reg_type, + enum rs6000_reg_type, + machine_mode, + secondary_reload_info *, + bool); +rtl_opt_pass *make_pass_analyze_swaps (gcc::context*); + +/* Hash table stuff for keeping track of TOC entries. */ + +struct GTY((for_user)) toc_hash_struct +{ + /* `key' will satisfy CONSTANT_P; in fact, it will satisfy + ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */ + rtx key; + machine_mode key_mode; + int labelno; +}; + +struct toc_hasher : ggc_ptr_hash +{ + static hashval_t hash (toc_hash_struct *); + static bool equal (toc_hash_struct *, toc_hash_struct *); +}; + +static GTY (()) hash_table *toc_hash_table; + + + +/* Default register names. */ +char rs6000_reg_names[][8] = +{ + /* GPRs */ + "0", "1", "2", "3", "4", "5", "6", "7", + "8", "9", "10", "11", "12", "13", "14", "15", + "16", "17", "18", "19", "20", "21", "22", "23", + "24", "25", "26", "27", "28", "29", "30", "31", + /* FPRs */ + "0", "1", "2", "3", "4", "5", "6", "7", + "8", "9", "10", "11", "12", "13", "14", "15", + "16", "17", "18", "19", "20", "21", "22", "23", + "24", "25", "26", "27", "28", "29", "30", "31", + /* VRs */ + "0", "1", "2", "3", "4", "5", "6", "7", + "8", "9", "10", "11", "12", "13", "14", "15", + "16", "17", "18", "19", "20", "21", "22", "23", + "24", "25", "26", "27", "28", "29", "30", "31", + /* lr ctr ca ap */ + "lr", "ctr", "ca", "ap", + /* cr0..cr7 */ + "0", "1", "2", "3", "4", "5", "6", "7", + /* vrsave vscr sfp */ + "vrsave", "vscr", "sfp", +}; + +#ifdef TARGET_REGNAMES +static const char alt_reg_names[][8] = +{ + /* GPRs */ + "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", + "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", + "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23", + "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31", + /* FPRs */ + "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7", + "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15", + "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23", + "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31", + /* VRs */ + "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7", + "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15", + "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23", + "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31", + /* lr ctr ca ap */ + "lr", "ctr", "ca", "ap", + /* cr0..cr7 */ + "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7", + /* vrsave vscr sfp */ + "vrsave", "vscr", "sfp", +}; +#endif + +/* Table of valid machine attributes. */ + +static const struct attribute_spec rs6000_attribute_table[] = +{ + /* { name, min_len, max_len, decl_req, type_req, fn_type_req, + affects_type_identity, handler, exclude } */ + { "altivec", 1, 1, false, true, false, false, + rs6000_handle_altivec_attribute, NULL }, + { "longcall", 0, 0, false, true, true, false, + rs6000_handle_longcall_attribute, NULL }, + { "shortcall", 0, 0, false, true, true, false, + rs6000_handle_longcall_attribute, NULL }, + { "ms_struct", 0, 0, false, false, false, false, + rs6000_handle_struct_attribute, NULL }, + { "gcc_struct", 0, 0, false, false, false, false, + rs6000_handle_struct_attribute, NULL }, +#ifdef SUBTARGET_ATTRIBUTE_TABLE + SUBTARGET_ATTRIBUTE_TABLE, +#endif + { NULL, 0, 0, false, false, false, false, NULL, NULL } +}; + +#ifndef TARGET_PROFILE_KERNEL +#define TARGET_PROFILE_KERNEL 0 +#endif + +/* Initialize the GCC target structure. */ +#undef TARGET_ATTRIBUTE_TABLE +#define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table +#undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES +#define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes +#undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P +#define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p + +#undef TARGET_ASM_ALIGNED_DI_OP +#define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP + +/* Default unaligned ops are only provided for ELF. Find the ops needed + for non-ELF systems. */ +#ifndef OBJECT_FORMAT_ELF +#if TARGET_XCOFF +/* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on + 64-bit targets. */ +#undef TARGET_ASM_UNALIGNED_HI_OP +#define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2," +#undef TARGET_ASM_UNALIGNED_SI_OP +#define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4," +#undef TARGET_ASM_UNALIGNED_DI_OP +#define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8," +#else +/* For Darwin. */ +#undef TARGET_ASM_UNALIGNED_HI_OP +#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t" +#undef TARGET_ASM_UNALIGNED_SI_OP +#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t" +#undef TARGET_ASM_UNALIGNED_DI_OP +#define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t" +#undef TARGET_ASM_ALIGNED_DI_OP +#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t" +#endif +#endif + +/* This hook deals with fixups for relocatable code and DI-mode objects + in 64-bit code. */ +#undef TARGET_ASM_INTEGER +#define TARGET_ASM_INTEGER rs6000_assemble_integer + +#if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO +#undef TARGET_ASM_ASSEMBLE_VISIBILITY +#define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility +#endif + +#undef TARGET_ASM_PRINT_PATCHABLE_FUNCTION_ENTRY +#define TARGET_ASM_PRINT_PATCHABLE_FUNCTION_ENTRY \ + rs6000_print_patchable_function_entry + +#undef TARGET_SET_UP_BY_PROLOGUE +#define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue + +#undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS +#define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components +#undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB +#define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb +#undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS +#define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components +#undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS +#define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components +#undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS +#define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components +#undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS +#define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components + +#undef TARGET_EXTRA_LIVE_ON_ENTRY +#define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry + +#undef TARGET_INTERNAL_ARG_POINTER +#define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer + +#undef TARGET_HAVE_TLS +#define TARGET_HAVE_TLS HAVE_AS_TLS + +#undef TARGET_CANNOT_FORCE_CONST_MEM +#define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem + +#undef TARGET_DELEGITIMIZE_ADDRESS +#define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address + +#undef TARGET_CONST_NOT_OK_FOR_DEBUG_P +#define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p + +#undef TARGET_LEGITIMATE_COMBINED_INSN +#define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn + +#undef TARGET_ASM_FUNCTION_PROLOGUE +#define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue +#undef TARGET_ASM_FUNCTION_EPILOGUE +#define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue + +#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA +#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra + +#undef TARGET_ASM_GENERATE_PIC_ADDR_DIFF_VEC +#define TARGET_ASM_GENERATE_PIC_ADDR_DIFF_VEC rs6000_gen_pic_addr_diff_vec + +#undef TARGET_LEGITIMIZE_ADDRESS +#define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address + +#undef TARGET_SCHED_VARIABLE_ISSUE +#define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue + +#undef TARGET_SCHED_ISSUE_RATE +#define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate +#undef TARGET_SCHED_ADJUST_COST +#define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost +#undef TARGET_SCHED_ADJUST_PRIORITY +#define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority +#undef TARGET_SCHED_IS_COSTLY_DEPENDENCE +#define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence +#undef TARGET_SCHED_INIT +#define TARGET_SCHED_INIT rs6000_sched_init +#undef TARGET_SCHED_FINISH +#define TARGET_SCHED_FINISH rs6000_sched_finish +#undef TARGET_SCHED_REORDER +#define TARGET_SCHED_REORDER rs6000_sched_reorder +#undef TARGET_SCHED_REORDER2 +#define TARGET_SCHED_REORDER2 rs6000_sched_reorder2 + +#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD +#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead + +#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD +#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard + +#undef TARGET_SCHED_ALLOC_SCHED_CONTEXT +#define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context +#undef TARGET_SCHED_INIT_SCHED_CONTEXT +#define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context +#undef TARGET_SCHED_SET_SCHED_CONTEXT +#define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context +#undef TARGET_SCHED_FREE_SCHED_CONTEXT +#define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context + +#undef TARGET_SCHED_CAN_SPECULATE_INSN +#define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn + +#undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD +#define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load +#undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT +#define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \ + rs6000_builtin_support_vector_misalignment +#undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE +#define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable +#undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST +#define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \ + rs6000_builtin_vectorization_cost +#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE +#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \ + rs6000_preferred_simd_mode +#undef TARGET_VECTORIZE_CREATE_COSTS +#define TARGET_VECTORIZE_CREATE_COSTS rs6000_vectorize_create_costs + +#undef TARGET_LOOP_UNROLL_ADJUST +#define TARGET_LOOP_UNROLL_ADJUST rs6000_loop_unroll_adjust + +#undef TARGET_INIT_BUILTINS +#define TARGET_INIT_BUILTINS rs6000_init_builtins +#undef TARGET_BUILTIN_DECL +#define TARGET_BUILTIN_DECL rs6000_builtin_decl + +#undef TARGET_FOLD_BUILTIN +#define TARGET_FOLD_BUILTIN rs6000_fold_builtin +#undef TARGET_GIMPLE_FOLD_BUILTIN +#define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin + +#undef TARGET_EXPAND_BUILTIN +#define TARGET_EXPAND_BUILTIN rs6000_expand_builtin + +#undef TARGET_MANGLE_TYPE +#define TARGET_MANGLE_TYPE rs6000_mangle_type + +#undef TARGET_INIT_LIBFUNCS +#define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs + +#if TARGET_MACHO +#undef TARGET_BINDS_LOCAL_P +#define TARGET_BINDS_LOCAL_P darwin_binds_local_p +#endif + +#undef TARGET_MS_BITFIELD_LAYOUT_P +#define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p + +#undef TARGET_ASM_OUTPUT_MI_THUNK +#define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk + +#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK +#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true + +#undef TARGET_FUNCTION_OK_FOR_SIBCALL +#define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall + +#undef TARGET_REGISTER_MOVE_COST +#define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost +#undef TARGET_MEMORY_MOVE_COST +#define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost +#undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS +#define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS \ + rs6000_ira_change_pseudo_allocno_class +#undef TARGET_CANNOT_COPY_INSN_P +#define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p +#undef TARGET_RTX_COSTS +#define TARGET_RTX_COSTS rs6000_rtx_costs +#undef TARGET_ADDRESS_COST +#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0 +#undef TARGET_INSN_COST +#define TARGET_INSN_COST rs6000_insn_cost + +#undef TARGET_INIT_DWARF_REG_SIZES_EXTRA +#define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra + +#undef TARGET_PROMOTE_FUNCTION_MODE +#define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode + +#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE +#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE rs6000_override_options_after_change + +#undef TARGET_RETURN_IN_MEMORY +#define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory + +#undef TARGET_RETURN_IN_MSB +#define TARGET_RETURN_IN_MSB rs6000_return_in_msb + +#undef TARGET_SETUP_INCOMING_VARARGS +#define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs + +/* Always strict argument naming on rs6000. */ +#undef TARGET_STRICT_ARGUMENT_NAMING +#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true +#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED +#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true +#undef TARGET_SPLIT_COMPLEX_ARG +#define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true +#undef TARGET_MUST_PASS_IN_STACK +#define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack +#undef TARGET_PASS_BY_REFERENCE +#define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference +#undef TARGET_ARG_PARTIAL_BYTES +#define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes +#undef TARGET_FUNCTION_ARG_ADVANCE +#define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance +#undef TARGET_FUNCTION_ARG +#define TARGET_FUNCTION_ARG rs6000_function_arg +#undef TARGET_FUNCTION_ARG_PADDING +#define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding +#undef TARGET_FUNCTION_ARG_BOUNDARY +#define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary + +#undef TARGET_BUILD_BUILTIN_VA_LIST +#define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list + +#undef TARGET_EXPAND_BUILTIN_VA_START +#define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start + +#undef TARGET_GIMPLIFY_VA_ARG_EXPR +#define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg + +#undef TARGET_EH_RETURN_FILTER_MODE +#define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode + +#undef TARGET_TRANSLATE_MODE_ATTRIBUTE +#define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute + +#undef TARGET_SCALAR_MODE_SUPPORTED_P +#define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p + +#undef TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P +#define TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P \ + rs6000_libgcc_floating_mode_supported_p + +#undef TARGET_VECTOR_MODE_SUPPORTED_P +#define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p + +#undef TARGET_FLOATN_MODE +#define TARGET_FLOATN_MODE rs6000_floatn_mode + +#undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN +#define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn + +#undef TARGET_MD_ASM_ADJUST +#define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust + +#undef TARGET_OPTION_OVERRIDE +#define TARGET_OPTION_OVERRIDE rs6000_option_override + +#undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION +#define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \ + rs6000_builtin_vectorized_function + +#undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION +#define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \ + rs6000_builtin_md_vectorized_function + +#undef TARGET_STACK_PROTECT_GUARD +#define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard + +#if !TARGET_MACHO +#undef TARGET_STACK_PROTECT_FAIL +#define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail +#endif + +#ifdef HAVE_AS_TLS +#undef TARGET_ASM_OUTPUT_DWARF_DTPREL +#define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel +#endif + +/* Use a 32-bit anchor range. This leads to sequences like: + + addis tmp,anchor,high + add dest,tmp,low + + where tmp itself acts as an anchor, and can be shared between + accesses to the same 64k page. */ +#undef TARGET_MIN_ANCHOR_OFFSET +#define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1 +#undef TARGET_MAX_ANCHOR_OFFSET +#define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff +#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P +#define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p +#undef TARGET_USE_BLOCKS_FOR_DECL_P +#define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p + +#undef TARGET_BUILTIN_RECIPROCAL +#define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal + +#undef TARGET_SECONDARY_RELOAD +#define TARGET_SECONDARY_RELOAD rs6000_secondary_reload +#undef TARGET_SECONDARY_MEMORY_NEEDED +#define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed +#undef TARGET_SECONDARY_MEMORY_NEEDED_MODE +#define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode + +#undef TARGET_LEGITIMATE_ADDRESS_P +#define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p + +#undef TARGET_MODE_DEPENDENT_ADDRESS_P +#define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p + +#undef TARGET_COMPUTE_PRESSURE_CLASSES +#define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes + +#undef TARGET_CAN_ELIMINATE +#define TARGET_CAN_ELIMINATE rs6000_can_eliminate + +#undef TARGET_CONDITIONAL_REGISTER_USAGE +#define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage + +#undef TARGET_SCHED_REASSOCIATION_WIDTH +#define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width + +#undef TARGET_TRAMPOLINE_INIT +#define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init + +#undef TARGET_FUNCTION_VALUE +#define TARGET_FUNCTION_VALUE rs6000_function_value + +#undef TARGET_OPTION_VALID_ATTRIBUTE_P +#define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p + +#undef TARGET_OPTION_SAVE +#define TARGET_OPTION_SAVE rs6000_function_specific_save + +#undef TARGET_OPTION_RESTORE +#define TARGET_OPTION_RESTORE rs6000_function_specific_restore + +#undef TARGET_OPTION_PRINT +#define TARGET_OPTION_PRINT rs6000_function_specific_print + +#undef TARGET_CAN_INLINE_P +#define TARGET_CAN_INLINE_P rs6000_can_inline_p + +#undef TARGET_SET_CURRENT_FUNCTION +#define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function + +#undef TARGET_LEGITIMATE_CONSTANT_P +#define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p + +#undef TARGET_VECTORIZE_VEC_PERM_CONST +#define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const + +#undef TARGET_CAN_USE_DOLOOP_P +#define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost + +#undef TARGET_PREDICT_DOLOOP_P +#define TARGET_PREDICT_DOLOOP_P rs6000_predict_doloop_p + +#undef TARGET_HAVE_COUNT_REG_DECR_P +#define TARGET_HAVE_COUNT_REG_DECR_P true + +/* 1000000000 is infinite cost in IVOPTs. */ +#undef TARGET_DOLOOP_COST_FOR_GENERIC +#define TARGET_DOLOOP_COST_FOR_GENERIC 1000000000 + +#undef TARGET_DOLOOP_COST_FOR_ADDRESS +#define TARGET_DOLOOP_COST_FOR_ADDRESS 1000000000 + +#undef TARGET_PREFERRED_DOLOOP_MODE +#define TARGET_PREFERRED_DOLOOP_MODE rs6000_preferred_doloop_mode + +#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV +#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv + +#undef TARGET_LIBGCC_CMP_RETURN_MODE +#define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode +#undef TARGET_LIBGCC_SHIFT_COUNT_MODE +#define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode +#undef TARGET_UNWIND_WORD_MODE +#define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode + +#undef TARGET_OFFLOAD_OPTIONS +#define TARGET_OFFLOAD_OPTIONS rs6000_offload_options + +#undef TARGET_C_MODE_FOR_SUFFIX +#define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix + +#undef TARGET_INVALID_BINARY_OP +#define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op + +#undef TARGET_OPTAB_SUPPORTED_P +#define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p + +#undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS +#define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1 + +#undef TARGET_COMPARE_VERSION_PRIORITY +#define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority + +#undef TARGET_GENERATE_VERSION_DISPATCHER_BODY +#define TARGET_GENERATE_VERSION_DISPATCHER_BODY \ + rs6000_generate_version_dispatcher_body + +#undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER +#define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \ + rs6000_get_function_versions_dispatcher + +#undef TARGET_OPTION_FUNCTION_VERSIONS +#define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions + +#undef TARGET_HARD_REGNO_NREGS +#define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook +#undef TARGET_HARD_REGNO_MODE_OK +#define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok + +#undef TARGET_MODES_TIEABLE_P +#define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p + +#undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED +#define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \ + rs6000_hard_regno_call_part_clobbered + +#undef TARGET_SLOW_UNALIGNED_ACCESS +#define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access + +#undef TARGET_CAN_CHANGE_MODE_CLASS +#define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class + +#undef TARGET_CONSTANT_ALIGNMENT +#define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment + +#undef TARGET_STARTING_FRAME_OFFSET +#define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset + +#if TARGET_ELF && RS6000_WEAK +#undef TARGET_ASM_GLOBALIZE_DECL_NAME +#define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name +#endif + +#undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P +#define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true + +#undef TARGET_MANGLE_DECL_ASSEMBLER_NAME +#define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name + +#undef TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P +#define TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P \ + rs6000_cannot_substitute_mem_equiv_p + +#undef TARGET_INVALID_CONVERSION +#define TARGET_INVALID_CONVERSION rs6000_invalid_conversion + + +/* Processor table. */ +struct rs6000_ptt +{ + const char *const name; /* Canonical processor name. */ + const enum processor_type processor; /* Processor type enum value. */ + const HOST_WIDE_INT target_enable; /* Target flags to enable. */ +}; + +static struct rs6000_ptt const processor_target_table[] = +{ +#define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS }, +#include "rs6000-cpus.def" +#undef RS6000_CPU +}; + +/* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the + name is invalid. */ + +static int +rs6000_cpu_name_lookup (const char *name) +{ + size_t i; + + if (name != NULL) + { + for (i = 0; i < ARRAY_SIZE (processor_target_table); i++) + if (! strcmp (name, processor_target_table[i].name)) + return (int)i; + } + + return -1; +} + + +/* Return number of consecutive hard regs needed starting at reg REGNO + to hold something of mode MODE. + This is ordinarily the length in words of a value of mode MODE + but can be less for certain modes in special long registers. + + POWER and PowerPC GPRs hold 32 bits worth; + PowerPC64 GPRs and FPRs point register holds 64 bits worth. */ + +static int +rs6000_hard_regno_nregs_internal (int regno, machine_mode mode) +{ + unsigned HOST_WIDE_INT reg_size; + + /* 128-bit floating point usually takes 2 registers, unless it is IEEE + 128-bit floating point that can go in vector registers, which has VSX + memory addressing. */ + if (FP_REGNO_P (regno)) + reg_size = (VECTOR_MEM_VSX_P (mode) || VECTOR_ALIGNMENT_P (mode) + ? UNITS_PER_VSX_WORD + : UNITS_PER_FP_WORD); + + else if (ALTIVEC_REGNO_P (regno)) + reg_size = UNITS_PER_ALTIVEC_WORD; + + else + reg_size = UNITS_PER_WORD; + + return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size; +} + +/* Value is 1 if hard register REGNO can hold a value of machine-mode + MODE. */ +static int +rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode) +{ + int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1; + + if (COMPLEX_MODE_P (mode)) + mode = GET_MODE_INNER (mode); + + /* Vector pair modes need even/odd VSX register pairs. Only allow vector + registers. */ + if (mode == OOmode) + return (TARGET_MMA && VSX_REGNO_P (regno) && (regno & 1) == 0); + + /* MMA accumulator modes need FPR registers divisible by 4. */ + if (mode == XOmode) + return (TARGET_MMA && FP_REGNO_P (regno) && (regno & 3) == 0); + + /* PTImode can only go in GPRs. Quad word memory operations require even/odd + register combinations, and use PTImode where we need to deal with quad + word memory operations. Don't allow quad words in the argument or frame + pointer registers, just registers 0..31. */ + if (mode == PTImode) + return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO) + && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO) + && ((regno & 1) == 0)); + + /* VSX registers that overlap the FPR registers are larger than for non-VSX + implementations. Don't allow an item to be split between a FP register + and an Altivec register. Allow TImode in all VSX registers if the user + asked for it. */ + if (TARGET_VSX && VSX_REGNO_P (regno) + && (VECTOR_MEM_VSX_P (mode) + || VECTOR_ALIGNMENT_P (mode) + || reg_addr[mode].scalar_in_vmx_p + || mode == TImode + || (TARGET_VADDUQM && mode == V1TImode))) + { + if (FP_REGNO_P (regno)) + return FP_REGNO_P (last_regno); + + if (ALTIVEC_REGNO_P (regno)) + { + if (GET_MODE_SIZE (mode) < 16 && !reg_addr[mode].scalar_in_vmx_p) + return 0; + + return ALTIVEC_REGNO_P (last_regno); + } + } + + /* The GPRs can hold any mode, but values bigger than one register + cannot go past R31. */ + if (INT_REGNO_P (regno)) + return INT_REGNO_P (last_regno); + + /* The float registers (except for VSX vector modes) can only hold floating + modes and DImode. */ + if (FP_REGNO_P (regno)) + { + if (VECTOR_ALIGNMENT_P (mode)) + return false; + + if (SCALAR_FLOAT_MODE_P (mode) + && (mode != TDmode || (regno % 2) == 0) + && FP_REGNO_P (last_regno)) + return 1; + + if (GET_MODE_CLASS (mode) == MODE_INT) + { + if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD) + return 1; + + if (TARGET_P8_VECTOR && (mode == SImode)) + return 1; + + if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode)) + return 1; + } + + return 0; + } + + /* The CR register can only hold CC modes. */ + if (CR_REGNO_P (regno)) + return GET_MODE_CLASS (mode) == MODE_CC; + + if (CA_REGNO_P (regno)) + return mode == Pmode || mode == SImode; + + /* AltiVec only in AldyVec registers. */ + if (ALTIVEC_REGNO_P (regno)) + return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) + || mode == V1TImode); + + /* We cannot put non-VSX TImode or PTImode anywhere except general register + and it must be able to fit within the register set. */ + + return GET_MODE_SIZE (mode) <= UNITS_PER_WORD; +} + +/* Implement TARGET_HARD_REGNO_NREGS. */ + +static unsigned int +rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode) +{ + return rs6000_hard_regno_nregs[mode][regno]; +} + +/* Implement TARGET_HARD_REGNO_MODE_OK. */ + +static bool +rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode) +{ + return rs6000_hard_regno_mode_ok_p[mode][regno]; +} + +/* Implement TARGET_MODES_TIEABLE_P. + + PTImode cannot tie with other modes because PTImode is restricted to even + GPR registers, and TImode can go in any GPR as well as VSX registers (PR + 57744). + + Similarly, don't allow OOmode (vector pair, restricted to even VSX + registers) or XOmode (vector quad, restricted to FPR registers divisible + by 4) to tie with other modes. + + Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE + 128-bit floating point on VSX systems ties with other vectors. */ + +static bool +rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2) +{ + if (mode1 == PTImode || mode1 == OOmode || mode1 == XOmode + || mode2 == PTImode || mode2 == OOmode || mode2 == XOmode) + return mode1 == mode2; + + if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1)) + return ALTIVEC_OR_VSX_VECTOR_MODE (mode2); + if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2)) + return false; + + if (SCALAR_FLOAT_MODE_P (mode1)) + return SCALAR_FLOAT_MODE_P (mode2); + if (SCALAR_FLOAT_MODE_P (mode2)) + return false; + + if (GET_MODE_CLASS (mode1) == MODE_CC) + return GET_MODE_CLASS (mode2) == MODE_CC; + if (GET_MODE_CLASS (mode2) == MODE_CC) + return false; + + return true; +} + +/* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */ + +static bool +rs6000_hard_regno_call_part_clobbered (unsigned int, unsigned int regno, + machine_mode mode) +{ + if (TARGET_32BIT + && TARGET_POWERPC64 + && GET_MODE_SIZE (mode) > 4 + && INT_REGNO_P (regno)) + return true; + + if (TARGET_VSX + && FP_REGNO_P (regno) + && GET_MODE_SIZE (mode) > 8 + && !FLOAT128_2REG_P (mode)) + return true; + + return false; +} + +/* Print interesting facts about registers. */ +static void +rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name) +{ + int r, m; + + for (r = first_regno; r <= last_regno; ++r) + { + const char *comma = ""; + int len; + + if (first_regno == last_regno) + fprintf (stderr, "%s:\t", reg_name); + else + fprintf (stderr, "%s%d:\t", reg_name, r - first_regno); + + len = 8; + for (m = 0; m < NUM_MACHINE_MODES; ++m) + if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r]) + { + if (len > 70) + { + fprintf (stderr, ",\n\t"); + len = 8; + comma = ""; + } + + if (rs6000_hard_regno_nregs[m][r] > 1) + len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m), + rs6000_hard_regno_nregs[m][r]); + else + len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m)); + + comma = ", "; + } + + if (call_used_or_fixed_reg_p (r)) + { + if (len > 70) + { + fprintf (stderr, ",\n\t"); + len = 8; + comma = ""; + } + + len += fprintf (stderr, "%s%s", comma, "call-used"); + comma = ", "; + } + + if (fixed_regs[r]) + { + if (len > 70) + { + fprintf (stderr, ",\n\t"); + len = 8; + comma = ""; + } + + len += fprintf (stderr, "%s%s", comma, "fixed"); + comma = ", "; + } + + if (len > 70) + { + fprintf (stderr, ",\n\t"); + comma = ""; + } + + len += fprintf (stderr, "%sreg-class = %s", comma, + reg_class_names[(int)rs6000_regno_regclass[r]]); + comma = ", "; + + if (len > 70) + { + fprintf (stderr, ",\n\t"); + comma = ""; + } + + fprintf (stderr, "%sregno = %d\n", comma, r); + } +} + +static const char * +rs6000_debug_vector_unit (enum rs6000_vector v) +{ + const char *ret; + + switch (v) + { + case VECTOR_NONE: ret = "none"; break; + case VECTOR_ALTIVEC: ret = "altivec"; break; + case VECTOR_VSX: ret = "vsx"; break; + case VECTOR_P8_VECTOR: ret = "p8_vector"; break; + default: ret = "unknown"; break; + } + + return ret; +} + +/* Inner function printing just the address mask for a particular reload + register class. */ +DEBUG_FUNCTION char * +rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces) +{ + static char ret[8]; + char *p = ret; + + if ((mask & RELOAD_REG_VALID) != 0) + *p++ = 'v'; + else if (keep_spaces) + *p++ = ' '; + + if ((mask & RELOAD_REG_MULTIPLE) != 0) + *p++ = 'm'; + else if (keep_spaces) + *p++ = ' '; + + if ((mask & RELOAD_REG_INDEXED) != 0) + *p++ = 'i'; + else if (keep_spaces) + *p++ = ' '; + + if ((mask & RELOAD_REG_QUAD_OFFSET) != 0) + *p++ = 'O'; + else if ((mask & RELOAD_REG_OFFSET) != 0) + *p++ = 'o'; + else if (keep_spaces) + *p++ = ' '; + + if ((mask & RELOAD_REG_PRE_INCDEC) != 0) + *p++ = '+'; + else if (keep_spaces) + *p++ = ' '; + + if ((mask & RELOAD_REG_PRE_MODIFY) != 0) + *p++ = '+'; + else if (keep_spaces) + *p++ = ' '; + + if ((mask & RELOAD_REG_AND_M16) != 0) + *p++ = '&'; + else if (keep_spaces) + *p++ = ' '; + + *p = '\0'; + + return ret; +} + +/* Print the address masks in a human readble fashion. */ +DEBUG_FUNCTION void +rs6000_debug_print_mode (ssize_t m) +{ + ssize_t rc; + int spaces = 0; + + fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m)); + for (rc = 0; rc < N_RELOAD_REG; rc++) + fprintf (stderr, " %s: %s", reload_reg_map[rc].name, + rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true)); + + if ((reg_addr[m].reload_store != CODE_FOR_nothing) + || (reg_addr[m].reload_load != CODE_FOR_nothing)) + { + fprintf (stderr, "%*s Reload=%c%c", spaces, "", + (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*', + (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*'); + spaces = 0; + } + else + spaces += strlen (" Reload=sl"); + + if (reg_addr[m].scalar_in_vmx_p) + { + fprintf (stderr, "%*s Upper=y", spaces, ""); + spaces = 0; + } + else + spaces += strlen (" Upper=y"); + + if (rs6000_vector_unit[m] != VECTOR_NONE + || rs6000_vector_mem[m] != VECTOR_NONE) + { + fprintf (stderr, "%*s vector: arith=%-10s mem=%s", + spaces, "", + rs6000_debug_vector_unit (rs6000_vector_unit[m]), + rs6000_debug_vector_unit (rs6000_vector_mem[m])); + } + + fputs ("\n", stderr); +} + +#define DEBUG_FMT_ID "%-32s= " +#define DEBUG_FMT_D DEBUG_FMT_ID "%d\n" +#define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: " +#define DEBUG_FMT_S DEBUG_FMT_ID "%s\n" + +/* Print various interesting information with -mdebug=reg. */ +static void +rs6000_debug_reg_global (void) +{ + static const char *const tf[2] = { "false", "true" }; + const char *nl = (const char *)0; + int m; + size_t m1, m2, v; + char costly_num[20]; + char nop_num[20]; + char flags_buffer[40]; + const char *costly_str; + const char *nop_str; + const char *trace_str; + const char *abi_str; + const char *cmodel_str; + struct cl_target_option cl_opts; + + /* Modes we want tieable information on. */ + static const machine_mode print_tieable_modes[] = { + QImode, + HImode, + SImode, + DImode, + TImode, + PTImode, + SFmode, + DFmode, + TFmode, + IFmode, + KFmode, + SDmode, + DDmode, + TDmode, + V2SImode, + V2SFmode, + V16QImode, + V8HImode, + V4SImode, + V2DImode, + V1TImode, + V32QImode, + V16HImode, + V8SImode, + V4DImode, + V2TImode, + V4SFmode, + V2DFmode, + V8SFmode, + V4DFmode, + OOmode, + XOmode, + CCmode, + CCUNSmode, + CCEQmode, + CCFPmode, + }; + + /* Virtual regs we are interested in. */ + const static struct { + int regno; /* register number. */ + const char *name; /* register name. */ + } virtual_regs[] = { + { STACK_POINTER_REGNUM, "stack pointer:" }, + { TOC_REGNUM, "toc: " }, + { STATIC_CHAIN_REGNUM, "static chain: " }, + { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " }, + { HARD_FRAME_POINTER_REGNUM, "hard frame: " }, + { ARG_POINTER_REGNUM, "arg pointer: " }, + { FRAME_POINTER_REGNUM, "frame pointer:" }, + { FIRST_PSEUDO_REGISTER, "first pseudo: " }, + { FIRST_VIRTUAL_REGISTER, "first virtual:" }, + { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" }, + { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " }, + { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" }, + { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" }, + { VIRTUAL_CFA_REGNUM, "cfa (frame): " }, + { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" }, + { LAST_VIRTUAL_REGISTER, "last virtual: " }, + }; + + fputs ("\nHard register information:\n", stderr); + rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr"); + rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp"); + rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO, + LAST_ALTIVEC_REGNO, + "vs"); + rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr"); + rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr"); + rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr"); + rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca"); + rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave"); + rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr"); + + fputs ("\nVirtual/stack/frame registers:\n", stderr); + for (v = 0; v < ARRAY_SIZE (virtual_regs); v++) + fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno); + + fprintf (stderr, + "\n" + "d reg_class = %s\n" + "f reg_class = %s\n" + "v reg_class = %s\n" + "wa reg_class = %s\n" + "we reg_class = %s\n" + "wr reg_class = %s\n" + "wx reg_class = %s\n" + "wA reg_class = %s\n" + "\n", + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]], + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]], + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]], + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]], + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]], + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]], + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]], + reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]]); + + nl = "\n"; + for (m = 0; m < NUM_MACHINE_MODES; ++m) + rs6000_debug_print_mode (m); + + fputs ("\n", stderr); + + for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++) + { + machine_mode mode1 = print_tieable_modes[m1]; + bool first_time = true; + + nl = (const char *)0; + for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++) + { + machine_mode mode2 = print_tieable_modes[m2]; + if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2)) + { + if (first_time) + { + fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1)); + nl = "\n"; + first_time = false; + } + + fprintf (stderr, " %s", GET_MODE_NAME (mode2)); + } + } + + if (!first_time) + fputs ("\n", stderr); + } + + if (nl) + fputs (nl, stderr); + + if (rs6000_recip_control) + { + fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control); + + for (m = 0; m < NUM_MACHINE_MODES; ++m) + if (rs6000_recip_bits[m]) + { + fprintf (stderr, + "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n", + GET_MODE_NAME (m), + (RS6000_RECIP_AUTO_RE_P (m) + ? "auto" + : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")), + (RS6000_RECIP_AUTO_RSQRTE_P (m) + ? "auto" + : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none"))); + } + + fputs ("\n", stderr); + } + + if (rs6000_cpu_index >= 0) + { + const char *name = processor_target_table[rs6000_cpu_index].name; + HOST_WIDE_INT flags + = processor_target_table[rs6000_cpu_index].target_enable; + + sprintf (flags_buffer, "-mcpu=%s flags", name); + rs6000_print_isa_options (stderr, 0, flags_buffer, flags); + } + else + fprintf (stderr, DEBUG_FMT_S, "cpu", ""); + + if (rs6000_tune_index >= 0) + { + const char *name = processor_target_table[rs6000_tune_index].name; + HOST_WIDE_INT flags + = processor_target_table[rs6000_tune_index].target_enable; + + sprintf (flags_buffer, "-mtune=%s flags", name); + rs6000_print_isa_options (stderr, 0, flags_buffer, flags); + } + else + fprintf (stderr, DEBUG_FMT_S, "tune", ""); + + cl_target_option_save (&cl_opts, &global_options, &global_options_set); + rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags", + rs6000_isa_flags); + + rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit", + rs6000_isa_flags_explicit); + + rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask", + rs6000_builtin_mask); + + rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT); + + fprintf (stderr, DEBUG_FMT_S, "--with-cpu default", + OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : ""); + + switch (rs6000_sched_costly_dep) + { + case max_dep_latency: + costly_str = "max_dep_latency"; + break; + + case no_dep_costly: + costly_str = "no_dep_costly"; + break; + + case all_deps_costly: + costly_str = "all_deps_costly"; + break; + + case true_store_to_load_dep_costly: + costly_str = "true_store_to_load_dep_costly"; + break; + + case store_to_load_dep_costly: + costly_str = "store_to_load_dep_costly"; + break; + + default: + costly_str = costly_num; + sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep); + break; + } + + fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str); + + switch (rs6000_sched_insert_nops) + { + case sched_finish_regroup_exact: + nop_str = "sched_finish_regroup_exact"; + break; + + case sched_finish_pad_groups: + nop_str = "sched_finish_pad_groups"; + break; + + case sched_finish_none: + nop_str = "sched_finish_none"; + break; + + default: + nop_str = nop_num; + sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops); + break; + } + + fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str); + + switch (rs6000_sdata) + { + default: + case SDATA_NONE: + break; + + case SDATA_DATA: + fprintf (stderr, DEBUG_FMT_S, "sdata", "data"); + break; + + case SDATA_SYSV: + fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv"); + break; + + case SDATA_EABI: + fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi"); + break; + + } + + switch (rs6000_traceback) + { + case traceback_default: trace_str = "default"; break; + case traceback_none: trace_str = "none"; break; + case traceback_part: trace_str = "part"; break; + case traceback_full: trace_str = "full"; break; + default: trace_str = "unknown"; break; + } + + fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str); + + switch (rs6000_current_cmodel) + { + case CMODEL_SMALL: cmodel_str = "small"; break; + case CMODEL_MEDIUM: cmodel_str = "medium"; break; + case CMODEL_LARGE: cmodel_str = "large"; break; + default: cmodel_str = "unknown"; break; + } + + fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str); + + switch (rs6000_current_abi) + { + case ABI_NONE: abi_str = "none"; break; + case ABI_AIX: abi_str = "aix"; break; + case ABI_ELFv2: abi_str = "ELFv2"; break; + case ABI_V4: abi_str = "V4"; break; + case ABI_DARWIN: abi_str = "darwin"; break; + default: abi_str = "unknown"; break; + } + + fprintf (stderr, DEBUG_FMT_S, "abi", abi_str); + + if (rs6000_altivec_abi) + fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true"); + + if (rs6000_aix_extabi) + fprintf (stderr, DEBUG_FMT_S, "AIX vec-extabi", "true"); + + if (rs6000_darwin64_abi) + fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true"); + + fprintf (stderr, DEBUG_FMT_S, "soft_float", + (TARGET_SOFT_FLOAT ? "true" : "false")); + + if (TARGET_LINK_STACK) + fprintf (stderr, DEBUG_FMT_S, "link_stack", "true"); + + if (TARGET_P8_FUSION) + { + char options[80]; + + strcpy (options, "power8"); + if (TARGET_P8_FUSION_SIGN) + strcat (options, ", sign"); + + fprintf (stderr, DEBUG_FMT_S, "fusion", options); + } + + fprintf (stderr, DEBUG_FMT_S, "plt-format", + TARGET_SECURE_PLT ? "secure" : "bss"); + fprintf (stderr, DEBUG_FMT_S, "struct-return", + aix_struct_return ? "aix" : "sysv"); + fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]); + fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]); + fprintf (stderr, DEBUG_FMT_S, "align_branch", + tf[!!rs6000_align_branch_targets]); + fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size); + fprintf (stderr, DEBUG_FMT_D, "long_double_size", + rs6000_long_double_type_size); + if (rs6000_long_double_type_size > 64) + { + fprintf (stderr, DEBUG_FMT_S, "long double type", + TARGET_IEEEQUAD ? "IEEE" : "IBM"); + fprintf (stderr, DEBUG_FMT_S, "default long double type", + TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM"); + } + fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority", + (int)rs6000_sched_restricted_insns_priority); + fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins", + (int)END_BUILTINS); + fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins", + (int)RS6000_BUILTIN_COUNT); + + fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX", + (int)TARGET_FLOAT128_ENABLE_TYPE); + + if (TARGET_VSX) + fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element", + (int)VECTOR_ELEMENT_SCALAR_64BIT); + + if (TARGET_DIRECT_MOVE_128) + fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element", + (int)VECTOR_ELEMENT_MFVSRLD_64BIT); +} + + +/* Update the addr mask bits in reg_addr to help secondary reload and go if + legitimate address support to figure out the appropriate addressing to + use. */ + +static void +rs6000_setup_reg_addr_masks (void) +{ + ssize_t rc, reg, m, nregs; + addr_mask_type any_addr_mask, addr_mask; + + for (m = 0; m < NUM_MACHINE_MODES; ++m) + { + machine_mode m2 = (machine_mode) m; + bool complex_p = false; + bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode); + size_t msize; + + if (COMPLEX_MODE_P (m2)) + { + complex_p = true; + m2 = GET_MODE_INNER (m2); + } + + msize = GET_MODE_SIZE (m2); + + /* SDmode is special in that we want to access it only via REG+REG + addressing on power7 and above, since we want to use the LFIWZX and + STFIWZX instructions to load it. */ + bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK); + + any_addr_mask = 0; + for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++) + { + addr_mask = 0; + reg = reload_reg_map[rc].reg; + + /* Can mode values go in the GPR/FPR/Altivec registers? */ + if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg]) + { + bool small_int_vsx_p = (small_int_p + && (rc == RELOAD_REG_FPR + || rc == RELOAD_REG_VMX)); + + nregs = rs6000_hard_regno_nregs[m][reg]; + addr_mask |= RELOAD_REG_VALID; + + /* Indicate if the mode takes more than 1 physical register. If + it takes a single register, indicate it can do REG+REG + addressing. Small integers in VSX registers can only do + REG+REG addressing. */ + if (small_int_vsx_p) + addr_mask |= RELOAD_REG_INDEXED; + else if (nregs > 1 || m == BLKmode || complex_p) + addr_mask |= RELOAD_REG_MULTIPLE; + else + addr_mask |= RELOAD_REG_INDEXED; + + /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY + addressing. If we allow scalars into Altivec registers, + don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. + + For VSX systems, we don't allow update addressing for + DFmode/SFmode if those registers can go in both the + traditional floating point registers and Altivec registers. + The load/store instructions for the Altivec registers do not + have update forms. If we allowed update addressing, it seems + to break IV-OPT code using floating point if the index type is + int instead of long (PR target/81550 and target/84042). */ + + if (TARGET_UPDATE + && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR) + && msize <= 8 + && !VECTOR_MODE_P (m2) + && !VECTOR_ALIGNMENT_P (m2) + && !complex_p + && (m != E_DFmode || !TARGET_VSX) + && (m != E_SFmode || !TARGET_P8_VECTOR) + && !small_int_vsx_p) + { + addr_mask |= RELOAD_REG_PRE_INCDEC; + + /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that + we don't allow PRE_MODIFY for some multi-register + operations. */ + switch (m) + { + default: + addr_mask |= RELOAD_REG_PRE_MODIFY; + break; + + case E_DImode: + if (TARGET_POWERPC64) + addr_mask |= RELOAD_REG_PRE_MODIFY; + break; + + case E_DFmode: + case E_DDmode: + if (TARGET_HARD_FLOAT) + addr_mask |= RELOAD_REG_PRE_MODIFY; + break; + } + } + } + + /* GPR and FPR registers can do REG+OFFSET addressing, except + possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing + for 64-bit scalars and 32-bit SFmode to altivec registers. */ + if ((addr_mask != 0) && !indexed_only_p + && msize <= 8 + && (rc == RELOAD_REG_GPR + || ((msize == 8 || m2 == SFmode) + && (rc == RELOAD_REG_FPR + || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR))))) + addr_mask |= RELOAD_REG_OFFSET; + + /* VSX registers can do REG+OFFSET addresssing if ISA 3.0 + instructions are enabled. The offset for 128-bit VSX registers is + only 12-bits. While GPRs can handle the full offset range, VSX + registers can only handle the restricted range. */ + else if ((addr_mask != 0) && !indexed_only_p + && msize == 16 && TARGET_P9_VECTOR + && (ALTIVEC_OR_VSX_VECTOR_MODE (m2) + || (m2 == TImode && TARGET_VSX))) + { + addr_mask |= RELOAD_REG_OFFSET; + if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX) + addr_mask |= RELOAD_REG_QUAD_OFFSET; + } + + /* Vector pairs can do both indexed and offset loads if the + instructions are enabled, otherwise they can only do offset loads + since it will be broken into two vector moves. Vector quads can + only do offset loads. */ + else if ((addr_mask != 0) && TARGET_MMA + && (m2 == OOmode || m2 == XOmode)) + { + addr_mask |= RELOAD_REG_OFFSET; + if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX) + { + addr_mask |= RELOAD_REG_QUAD_OFFSET; + if (m2 == OOmode) + addr_mask |= RELOAD_REG_INDEXED; + } + } + + /* VMX registers can do (REG & -16) and ((REG+REG) & -16) + addressing on 128-bit types. */ + if (rc == RELOAD_REG_VMX && msize == 16 + && (addr_mask & RELOAD_REG_VALID) != 0) + addr_mask |= RELOAD_REG_AND_M16; + + reg_addr[m].addr_mask[rc] = addr_mask; + any_addr_mask |= addr_mask; + } + + reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask; + } +} + + +/* Initialize the various global tables that are based on register size. */ +static void +rs6000_init_hard_regno_mode_ok (bool global_init_p) +{ + ssize_t r, m, c; + int align64; + int align32; + + /* Precalculate REGNO_REG_CLASS. */ + rs6000_regno_regclass[0] = GENERAL_REGS; + for (r = 1; r < 32; ++r) + rs6000_regno_regclass[r] = BASE_REGS; + + for (r = 32; r < 64; ++r) + rs6000_regno_regclass[r] = FLOAT_REGS; + + for (r = 64; HARD_REGISTER_NUM_P (r); ++r) + rs6000_regno_regclass[r] = NO_REGS; + + for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r) + rs6000_regno_regclass[r] = ALTIVEC_REGS; + + rs6000_regno_regclass[CR0_REGNO] = CR0_REGS; + for (r = CR1_REGNO; r <= CR7_REGNO; ++r) + rs6000_regno_regclass[r] = CR_REGS; + + rs6000_regno_regclass[LR_REGNO] = LINK_REGS; + rs6000_regno_regclass[CTR_REGNO] = CTR_REGS; + rs6000_regno_regclass[CA_REGNO] = NO_REGS; + rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS; + rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS; + rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS; + rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS; + + /* Precalculate register class to simpler reload register class. We don't + need all of the register classes that are combinations of different + classes, just the simple ones that have constraint letters. */ + for (c = 0; c < N_REG_CLASSES; c++) + reg_class_to_reg_type[c] = NO_REG_TYPE; + + reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE; + reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE; + reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE; + reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE; + reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE; + reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE; + reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE; + reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE; + reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE; + reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE; + + if (TARGET_VSX) + { + reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE; + reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE; + } + else + { + reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE; + reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE; + } + + /* Precalculate the valid memory formats as well as the vector information, + this must be set up before the rs6000_hard_regno_nregs_internal calls + below. */ + gcc_assert ((int)VECTOR_NONE == 0); + memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit)); + memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_mem)); + + gcc_assert ((int)CODE_FOR_nothing == 0); + memset ((void *) ®_addr[0], '\0', sizeof (reg_addr)); + + gcc_assert ((int)NO_REGS == 0); + memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints)); + + /* The VSX hardware allows native alignment for vectors, but control whether the compiler + believes it can use native alignment or still uses 128-bit alignment. */ + if (TARGET_VSX && !TARGET_VSX_ALIGN_128) + { + align64 = 64; + align32 = 32; + } + else + { + align64 = 128; + align32 = 128; + } + + /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so + only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */ + if (TARGET_FLOAT128_TYPE) + { + rs6000_vector_mem[KFmode] = VECTOR_VSX; + rs6000_vector_align[KFmode] = 128; + + if (FLOAT128_IEEE_P (TFmode)) + { + rs6000_vector_mem[TFmode] = VECTOR_VSX; + rs6000_vector_align[TFmode] = 128; + } + } + + /* V2DF mode, VSX only. */ + if (TARGET_VSX) + { + rs6000_vector_unit[V2DFmode] = VECTOR_VSX; + rs6000_vector_mem[V2DFmode] = VECTOR_VSX; + rs6000_vector_align[V2DFmode] = align64; + } + + /* V4SF mode, either VSX or Altivec. */ + if (TARGET_VSX) + { + rs6000_vector_unit[V4SFmode] = VECTOR_VSX; + rs6000_vector_mem[V4SFmode] = VECTOR_VSX; + rs6000_vector_align[V4SFmode] = align32; + } + else if (TARGET_ALTIVEC) + { + rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC; + rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC; + rs6000_vector_align[V4SFmode] = align32; + } + + /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads + and stores. */ + if (TARGET_ALTIVEC) + { + rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC; + rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC; + rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC; + rs6000_vector_align[V4SImode] = align32; + rs6000_vector_align[V8HImode] = align32; + rs6000_vector_align[V16QImode] = align32; + + if (TARGET_VSX) + { + rs6000_vector_mem[V4SImode] = VECTOR_VSX; + rs6000_vector_mem[V8HImode] = VECTOR_VSX; + rs6000_vector_mem[V16QImode] = VECTOR_VSX; + } + else + { + rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC; + rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC; + rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC; + } + } + + /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to + do insert/splat/extract. Altivec doesn't have 64-bit integer support. */ + if (TARGET_VSX) + { + rs6000_vector_mem[V2DImode] = VECTOR_VSX; + rs6000_vector_unit[V2DImode] + = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE; + rs6000_vector_align[V2DImode] = align64; + + rs6000_vector_mem[V1TImode] = VECTOR_VSX; + rs6000_vector_unit[V1TImode] + = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE; + rs6000_vector_align[V1TImode] = 128; + } + + /* DFmode, see if we want to use the VSX unit. Memory is handled + differently, so don't set rs6000_vector_mem. */ + if (TARGET_VSX) + { + rs6000_vector_unit[DFmode] = VECTOR_VSX; + rs6000_vector_align[DFmode] = 64; + } + + /* SFmode, see if we want to use the VSX unit. */ + if (TARGET_P8_VECTOR) + { + rs6000_vector_unit[SFmode] = VECTOR_VSX; + rs6000_vector_align[SFmode] = 32; + } + + /* Allow TImode in VSX register and set the VSX memory macros. */ + if (TARGET_VSX) + { + rs6000_vector_mem[TImode] = VECTOR_VSX; + rs6000_vector_align[TImode] = align64; + } + + /* Add support for vector pairs and vector quad registers. */ + if (TARGET_MMA) + { + rs6000_vector_unit[OOmode] = VECTOR_NONE; + rs6000_vector_mem[OOmode] = VECTOR_VSX; + rs6000_vector_align[OOmode] = 256; + + rs6000_vector_unit[XOmode] = VECTOR_NONE; + rs6000_vector_mem[XOmode] = VECTOR_VSX; + rs6000_vector_align[XOmode] = 512; + } + + /* Register class constraints for the constraints that depend on compile + switches. When the VSX code was added, different constraints were added + based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all + of the VSX registers are used. The register classes for scalar floating + point types is set, based on whether we allow that type into the upper + (Altivec) registers. GCC has register classes to target the Altivec + registers for load/store operations, to select using a VSX memory + operation instead of the traditional floating point operation. The + constraints are: + + d - Register class to use with traditional DFmode instructions. + f - Register class to use with traditional SFmode instructions. + v - Altivec register. + wa - Any VSX register. + wc - Reserved to represent individual CR bits (used in LLVM). + wn - always NO_REGS. + wr - GPR if 64-bit mode is permitted. + wx - Float register if we can do 32-bit int stores. */ + + if (TARGET_HARD_FLOAT) + { + rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */ + rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */ + } + + if (TARGET_VSX) + rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS; + + /* Add conditional constraints based on various options, to allow us to + collapse multiple insn patterns. */ + if (TARGET_ALTIVEC) + rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS; + + if (TARGET_POWERPC64) + { + rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS; + rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS; + } + + if (TARGET_STFIWX) + rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */ + + /* Support for new direct moves (ISA 3.0 + 64bit). */ + if (TARGET_DIRECT_MOVE_128) + rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS; + + /* Set up the reload helper and direct move functions. */ + if (TARGET_VSX || TARGET_ALTIVEC) + { + if (TARGET_64BIT) + { + reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store; + reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load; + reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store; + reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load; + reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store; + reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load; + reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store; + reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load; + reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store; + reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load; + reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store; + reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load; + reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store; + reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load; + reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store; + reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load; + reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store; + reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load; + reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store; + reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load; + + if (FLOAT128_VECTOR_P (KFmode)) + { + reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store; + reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load; + } + + if (FLOAT128_VECTOR_P (TFmode)) + { + reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store; + reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load; + } + + /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are + available. */ + if (TARGET_NO_SDMODE_STACK) + { + reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store; + reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load; + } + + if (TARGET_VSX) + { + reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store; + reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load; + } + + if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128) + { + reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti; + reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti; + reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df; + reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di; + reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf; + reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si; + reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi; + reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi; + reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf; + + reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti; + reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti; + reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df; + reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di; + reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf; + reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si; + reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi; + reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi; + reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf; + + if (FLOAT128_VECTOR_P (KFmode)) + { + reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf; + reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf; + } + + if (FLOAT128_VECTOR_P (TFmode)) + { + reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf; + reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf; + } + + if (TARGET_MMA) + { + reg_addr[OOmode].reload_store = CODE_FOR_reload_oo_di_store; + reg_addr[OOmode].reload_load = CODE_FOR_reload_oo_di_load; + reg_addr[XOmode].reload_store = CODE_FOR_reload_xo_di_store; + reg_addr[XOmode].reload_load = CODE_FOR_reload_xo_di_load; + } + } + } + else + { + reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store; + reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load; + reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store; + reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load; + reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store; + reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load; + reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store; + reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load; + reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store; + reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load; + reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store; + reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load; + reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store; + reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load; + reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store; + reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load; + reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store; + reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load; + reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store; + reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load; + + if (FLOAT128_VECTOR_P (KFmode)) + { + reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store; + reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load; + } + + if (FLOAT128_IEEE_P (TFmode)) + { + reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store; + reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load; + } + + /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are + available. */ + if (TARGET_NO_SDMODE_STACK) + { + reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store; + reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load; + } + + if (TARGET_VSX) + { + reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store; + reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load; + } + + if (TARGET_DIRECT_MOVE) + { + reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi; + reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd; + reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf; + } + } + + reg_addr[DFmode].scalar_in_vmx_p = true; + reg_addr[DImode].scalar_in_vmx_p = true; + + if (TARGET_P8_VECTOR) + { + reg_addr[SFmode].scalar_in_vmx_p = true; + reg_addr[SImode].scalar_in_vmx_p = true; + + if (TARGET_P9_VECTOR) + { + reg_addr[HImode].scalar_in_vmx_p = true; + reg_addr[QImode].scalar_in_vmx_p = true; + } + } + } + + /* Precalculate HARD_REGNO_NREGS. */ + for (r = 0; HARD_REGISTER_NUM_P (r); ++r) + for (m = 0; m < NUM_MACHINE_MODES; ++m) + rs6000_hard_regno_nregs[m][r] + = rs6000_hard_regno_nregs_internal (r, (machine_mode) m); + + /* Precalculate TARGET_HARD_REGNO_MODE_OK. */ + for (r = 0; HARD_REGISTER_NUM_P (r); ++r) + for (m = 0; m < NUM_MACHINE_MODES; ++m) + rs6000_hard_regno_mode_ok_p[m][r] + = rs6000_hard_regno_mode_ok_uncached (r, (machine_mode) m); + + /* Precalculate CLASS_MAX_NREGS sizes. */ + for (c = 0; c < LIM_REG_CLASSES; ++c) + { + int reg_size; + + if (TARGET_VSX && VSX_REG_CLASS_P (c)) + reg_size = UNITS_PER_VSX_WORD; + + else if (c == ALTIVEC_REGS) + reg_size = UNITS_PER_ALTIVEC_WORD; + + else if (c == FLOAT_REGS) + reg_size = UNITS_PER_FP_WORD; + + else + reg_size = UNITS_PER_WORD; + + for (m = 0; m < NUM_MACHINE_MODES; ++m) + { + machine_mode m2 = (machine_mode)m; + int reg_size2 = reg_size; + + /* TDmode & IBM 128-bit floating point always takes 2 registers, even + in VSX. */ + if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m)) + reg_size2 = UNITS_PER_FP_WORD; + + rs6000_class_max_nregs[m][c] + = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2; + } + } + + /* Calculate which modes to automatically generate code to use a the + reciprocal divide and square root instructions. In the future, possibly + automatically generate the instructions even if the user did not specify + -mrecip. The older machines double precision reciprocal sqrt estimate is + not accurate enough. */ + memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits)); + if (TARGET_FRES) + rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE; + if (TARGET_FRE) + rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE; + if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)) + rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE; + if (VECTOR_UNIT_VSX_P (V2DFmode)) + rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE; + + if (TARGET_FRSQRTES) + rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE; + if (TARGET_FRSQRTE) + rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE; + if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)) + rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE; + if (VECTOR_UNIT_VSX_P (V2DFmode)) + rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE; + + if (rs6000_recip_control) + { + if (!flag_finite_math_only) + warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math", + "-ffast-math"); + if (flag_trapping_math) + warning (0, "%qs requires %qs or %qs", "-mrecip", + "-fno-trapping-math", "-ffast-math"); + if (!flag_reciprocal_math) + warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math", + "-ffast-math"); + if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math) + { + if (RS6000_RECIP_HAVE_RE_P (SFmode) + && (rs6000_recip_control & RECIP_SF_DIV) != 0) + rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE; + + if (RS6000_RECIP_HAVE_RE_P (DFmode) + && (rs6000_recip_control & RECIP_DF_DIV) != 0) + rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE; + + if (RS6000_RECIP_HAVE_RE_P (V4SFmode) + && (rs6000_recip_control & RECIP_V4SF_DIV) != 0) + rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE; + + if (RS6000_RECIP_HAVE_RE_P (V2DFmode) + && (rs6000_recip_control & RECIP_V2DF_DIV) != 0) + rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE; + + if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode) + && (rs6000_recip_control & RECIP_SF_RSQRT) != 0) + rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE; + + if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode) + && (rs6000_recip_control & RECIP_DF_RSQRT) != 0) + rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE; + + if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode) + && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0) + rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE; + + if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode) + && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0) + rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE; + } + } + + /* Update the addr mask bits in reg_addr to help secondary reload and go if + legitimate address support to figure out the appropriate addressing to + use. */ + rs6000_setup_reg_addr_masks (); + + if (global_init_p || TARGET_DEBUG_TARGET) + { + if (TARGET_DEBUG_REG) + rs6000_debug_reg_global (); + + if (TARGET_DEBUG_COST || TARGET_DEBUG_REG) + fprintf (stderr, + "SImode variable mult cost = %d\n" + "SImode constant mult cost = %d\n" + "SImode short constant mult cost = %d\n" + "DImode multipliciation cost = %d\n" + "SImode division cost = %d\n" + "DImode division cost = %d\n" + "Simple fp operation cost = %d\n" + "DFmode multiplication cost = %d\n" + "SFmode division cost = %d\n" + "DFmode division cost = %d\n" + "cache line size = %d\n" + "l1 cache size = %d\n" + "l2 cache size = %d\n" + "simultaneous prefetches = %d\n" + "\n", + rs6000_cost->mulsi, + rs6000_cost->mulsi_const, + rs6000_cost->mulsi_const9, + rs6000_cost->muldi, + rs6000_cost->divsi, + rs6000_cost->divdi, + rs6000_cost->fp, + rs6000_cost->dmul, + rs6000_cost->sdiv, + rs6000_cost->ddiv, + rs6000_cost->cache_line_size, + rs6000_cost->l1_cache_size, + rs6000_cost->l2_cache_size, + rs6000_cost->simultaneous_prefetches); + } +} + +#if TARGET_MACHO +/* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */ + +static void +darwin_rs6000_override_options (void) +{ + /* The Darwin ABI always includes AltiVec, can't be (validly) turned + off. */ + rs6000_altivec_abi = 1; + TARGET_ALTIVEC_VRSAVE = 1; + rs6000_current_abi = ABI_DARWIN; + + if (DEFAULT_ABI == ABI_DARWIN + && TARGET_64BIT) + darwin_one_byte_bool = 1; + + if (TARGET_64BIT && ! TARGET_POWERPC64) + { + rs6000_isa_flags |= OPTION_MASK_POWERPC64; + warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64"); + } + + /* The linkers [ld64] that support 64Bit do not need the JBSR longcall + optimisation, and will not work with the most generic case (where the + symbol is undefined external, but there is no symbl stub). */ + if (TARGET_64BIT) + rs6000_default_long_calls = 0; + + /* ld_classic is (so far) still used for kernel (static) code, and supports + the JBSR longcall / branch islands. */ + if (flag_mkernel) + { + rs6000_default_long_calls = 1; + + /* Allow a kext author to do -mkernel -mhard-float. */ + if (! (rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT)) + rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT; + } + + /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes + Altivec. */ + if (!flag_mkernel && !flag_apple_kext + && TARGET_64BIT + && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)) + rs6000_isa_flags |= OPTION_MASK_ALTIVEC; + + /* Unless the user (not the configurer) has explicitly overridden + it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to + G4 unless targeting the kernel. */ + if (!flag_mkernel + && !flag_apple_kext + && strverscmp (darwin_macosx_version_min, "10.5") >= 0 + && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC) + && ! OPTION_SET_P (rs6000_cpu_index)) + { + rs6000_isa_flags |= OPTION_MASK_ALTIVEC; + } +} +#endif + +/* If not otherwise specified by a target, make 'long double' equivalent to + 'double'. */ + +#ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE +#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64 +#endif + +/* Return the builtin mask of the various options used that could affect which + builtins were used. In the past we used target_flags, but we've run out of + bits, and some options are no longer in target_flags. */ + +HOST_WIDE_INT +rs6000_builtin_mask_calculate (void) +{ + return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0) + | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0) + | ((TARGET_VSX) ? RS6000_BTM_VSX : 0) + | ((TARGET_FRE) ? RS6000_BTM_FRE : 0) + | ((TARGET_FRES) ? RS6000_BTM_FRES : 0) + | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0) + | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0) + | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0) + | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0) + | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0) + | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0) + | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0) + | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0) + | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0) + | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0) + | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0) + | ((TARGET_HTM) ? RS6000_BTM_HTM : 0) + | ((TARGET_DFP) ? RS6000_BTM_DFP : 0) + | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0) + | ((TARGET_LONG_DOUBLE_128 + && TARGET_HARD_FLOAT + && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0) + | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0) + | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0) + | ((TARGET_MMA) ? RS6000_BTM_MMA : 0) + | ((TARGET_POWER10) ? RS6000_BTM_P10 : 0)); +} + +/* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered + to clobber the XER[CA] bit because clobbering that bit without telling + the compiler worked just fine with versions of GCC before GCC 5, and + breaking a lot of older code in ways that are hard to track down is + not such a great idea. */ + +static rtx_insn * +rs6000_md_asm_adjust (vec & /*outputs*/, vec & /*inputs*/, + vec & /*input_modes*/, + vec & /*constraints*/, vec &clobbers, + HARD_REG_SET &clobbered_regs, location_t /*loc*/) +{ + clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO)); + SET_HARD_REG_BIT (clobbered_regs, CA_REGNO); + return NULL; +} + +/* This target function is similar to the hook TARGET_OPTION_OVERRIDE + but is called when the optimize level is changed via an attribute or + pragma or when it is reset at the end of the code affected by the + attribute or pragma. It is not called at the beginning of compilation + when TARGET_OPTION_OVERRIDE is called so if you want to perform these + actions then, you should have TARGET_OPTION_OVERRIDE call + TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */ + +static void +rs6000_override_options_after_change (void) +{ + /* Explicit -funroll-loops turns -munroll-only-small-loops off, and + turns -frename-registers on. */ + if ((OPTION_SET_P (flag_unroll_loops) && flag_unroll_loops) + || (OPTION_SET_P (flag_unroll_all_loops) + && flag_unroll_all_loops)) + { + if (!OPTION_SET_P (unroll_only_small_loops)) + unroll_only_small_loops = 0; + if (!OPTION_SET_P (flag_rename_registers)) + flag_rename_registers = 1; + if (!OPTION_SET_P (flag_cunroll_grow_size)) + flag_cunroll_grow_size = 1; + } + else if (!OPTION_SET_P (flag_cunroll_grow_size)) + flag_cunroll_grow_size = flag_peel_loops || optimize >= 3; +} + +#ifdef TARGET_USES_LINUX64_OPT +static void +rs6000_linux64_override_options () +{ + if (!OPTION_SET_P (rs6000_alignment_flags)) + rs6000_alignment_flags = MASK_ALIGN_NATURAL; + if (rs6000_isa_flags & OPTION_MASK_64BIT) + { + if (DEFAULT_ABI != ABI_AIX) + { + rs6000_current_abi = ABI_AIX; + error (INVALID_64BIT, "call"); + } + dot_symbols = !strcmp (rs6000_abi_name, "aixdesc"); + if (ELFv2_ABI_CHECK) + { + rs6000_current_abi = ABI_ELFv2; + if (dot_symbols) + error ("%<-mcall-aixdesc%> incompatible with %<-mabi=elfv2%>"); + } + if (rs6000_isa_flags & OPTION_MASK_RELOCATABLE) + { + rs6000_isa_flags &= ~OPTION_MASK_RELOCATABLE; + error (INVALID_64BIT, "relocatable"); + } + if (rs6000_isa_flags & OPTION_MASK_EABI) + { + rs6000_isa_flags &= ~OPTION_MASK_EABI; + error (INVALID_64BIT, "eabi"); + } + if (TARGET_PROTOTYPE) + { + target_prototype = 0; + error (INVALID_64BIT, "prototype"); + } + if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) == 0) + { + rs6000_isa_flags |= OPTION_MASK_POWERPC64; + error ("%<-m64%> requires a PowerPC64 cpu"); + } + if (!OPTION_SET_P (rs6000_current_cmodel)) + SET_CMODEL (CMODEL_MEDIUM); + if ((rs6000_isa_flags_explicit & OPTION_MASK_MINIMAL_TOC) != 0) + { + if (OPTION_SET_P (rs6000_current_cmodel) + && rs6000_current_cmodel != CMODEL_SMALL) + error ("%<-mcmodel incompatible with other toc options%>"); + if (TARGET_MINIMAL_TOC) + SET_CMODEL (CMODEL_SMALL); + else if (TARGET_PCREL + || (PCREL_SUPPORTED_BY_OS + && (rs6000_isa_flags_explicit & OPTION_MASK_PCREL) == 0)) + /* Ignore -mno-minimal-toc. */ + ; + else + SET_CMODEL (CMODEL_SMALL); + } + if (rs6000_current_cmodel != CMODEL_SMALL) + { + if (!OPTION_SET_P (TARGET_NO_FP_IN_TOC)) + TARGET_NO_FP_IN_TOC = rs6000_current_cmodel == CMODEL_MEDIUM; + if (!OPTION_SET_P (TARGET_NO_SUM_IN_TOC)) + TARGET_NO_SUM_IN_TOC = 0; + } + if (TARGET_PLTSEQ && DEFAULT_ABI != ABI_ELFv2) + { + if (OPTION_SET_P (rs6000_pltseq)) + warning (0, "%qs unsupported for this ABI", + "-mpltseq"); + rs6000_pltseq = false; + } + } + else if (TARGET_64BIT) + error (INVALID_32BIT, "32"); + else + { + if (TARGET_PROFILE_KERNEL) + { + profile_kernel = 0; + error (INVALID_32BIT, "profile-kernel"); + } + if (OPTION_SET_P (rs6000_current_cmodel)) + { + SET_CMODEL (CMODEL_SMALL); + error (INVALID_32BIT, "cmodel"); + } + } +} +#endif + +/* Return true if we are using GLIBC, and it supports IEEE 128-bit long double. + This support is only in little endian GLIBC 2.32 or newer. */ +static bool +glibc_supports_ieee_128bit (void) +{ +#ifdef OPTION_GLIBC + if (OPTION_GLIBC && !BYTES_BIG_ENDIAN + && ((TARGET_GLIBC_MAJOR * 1000) + TARGET_GLIBC_MINOR) >= 2032) + return true; +#endif /* OPTION_GLIBC. */ + + return false; +} + +/* Override command line options. + + Combine build-specific configuration information with options + specified on the command line to set various state variables which + influence code generation, optimization, and expansion of built-in + functions. Assure that command-line configuration preferences are + compatible with each other and with the build configuration; issue + warnings while adjusting configuration or error messages while + rejecting configuration. + + Upon entry to this function: + + This function is called once at the beginning of + compilation, and then again at the start and end of compiling + each section of code that has a different configuration, as + indicated, for example, by adding the + + __attribute__((__target__("cpu=power9"))) + + qualifier to a function definition or, for example, by bracketing + code between + + #pragma GCC target("altivec") + + and + + #pragma GCC reset_options + + directives. Parameter global_init_p is true for the initial + invocation, which initializes global variables, and false for all + subsequent invocations. + + + Various global state information is assumed to be valid. This + includes OPTION_TARGET_CPU_DEFAULT, representing the name of the + default CPU specified at build configure time, TARGET_DEFAULT, + representing the default set of option flags for the default + target, and OPTION_SET_P (rs6000_isa_flags), representing + which options were requested on the command line. + + Upon return from this function: + + rs6000_isa_flags_explicit has a non-zero bit for each flag that + was set by name on the command line. Additionally, if certain + attributes are automatically enabled or disabled by this function + in order to assure compatibility between options and + configuration, the flags associated with those attributes are + also set. By setting these "explicit bits", we avoid the risk + that other code might accidentally overwrite these particular + attributes with "default values". + + The various bits of rs6000_isa_flags are set to indicate the + target options that have been selected for the most current + compilation efforts. This has the effect of also turning on the + associated TARGET_XXX values since these are macros which are + generally defined to test the corresponding bit of the + rs6000_isa_flags variable. + + The variable rs6000_builtin_mask is set to represent the target + options for the most current compilation efforts, consistent with + the current contents of rs6000_isa_flags. This variable controls + expansion of built-in functions. + + Various other global variables and fields of global structures + (over 50 in all) are initialized to reflect the desired options + for the most current compilation efforts. */ + +static bool +rs6000_option_override_internal (bool global_init_p) +{ + bool ret = true; + + HOST_WIDE_INT set_masks; + HOST_WIDE_INT ignore_masks; + int cpu_index = -1; + int tune_index; + struct cl_target_option *main_target_opt + = ((global_init_p || target_option_default_node == NULL) + ? NULL : TREE_TARGET_OPTION (target_option_default_node)); + + /* Print defaults. */ + if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p) + rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT); + + /* Remember the explicit arguments. */ + if (global_init_p) + rs6000_isa_flags_explicit = OPTION_SET_P (rs6000_isa_flags); + + /* On 64-bit Darwin, power alignment is ABI-incompatible with some C + library functions, so warn about it. The flag may be useful for + performance studies from time to time though, so don't disable it + entirely. */ + if (OPTION_SET_P (rs6000_alignment_flags) + && rs6000_alignment_flags == MASK_ALIGN_POWER + && DEFAULT_ABI == ABI_DARWIN + && TARGET_64BIT) + warning (0, "%qs is not supported for 64-bit Darwin;" + " it is incompatible with the installed C and C++ libraries", + "-malign-power"); + + /* Numerous experiment shows that IRA based loop pressure + calculation works better for RTL loop invariant motion on targets + with enough (>= 32) registers. It is an expensive optimization. + So it is on only for peak performance. */ + if (optimize >= 3 && global_init_p + && !OPTION_SET_P (flag_ira_loop_pressure)) + flag_ira_loop_pressure = 1; + + /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order + for tracebacks to be complete but not if any -fasynchronous-unwind-tables + options were already specified. */ + if (flag_sanitize & SANITIZE_USER_ADDRESS + && !OPTION_SET_P (flag_asynchronous_unwind_tables)) + flag_asynchronous_unwind_tables = 1; + + /* -fvariable-expansion-in-unroller is a win for POWER whenever the + loop unroller is active. It is only checked during unrolling, so + we can just set it on by default. */ + if (!OPTION_SET_P (flag_variable_expansion_in_unroller)) + flag_variable_expansion_in_unroller = 1; + + /* Set the pointer size. */ + if (TARGET_64BIT) + { + rs6000_pmode = DImode; + rs6000_pointer_size = 64; + } + else + { + rs6000_pmode = SImode; + rs6000_pointer_size = 32; + } + + /* Some OSs don't support saving the high part of 64-bit registers on context + switch. Other OSs don't support saving Altivec registers. On those OSs, + we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings; + if the user wants either, the user must explicitly specify them and we + won't interfere with the user's specification. */ + + set_masks = POWERPC_MASKS; +#ifdef OS_MISSING_POWERPC64 + if (OS_MISSING_POWERPC64) + set_masks &= ~OPTION_MASK_POWERPC64; +#endif +#ifdef OS_MISSING_ALTIVEC + if (OS_MISSING_ALTIVEC) + set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX + | OTHER_VSX_VECTOR_MASKS); +#endif + + /* Don't override by the processor default if given explicitly. */ + set_masks &= ~rs6000_isa_flags_explicit; + + /* Process the -mcpu= and -mtune= argument. If the user changed + the cpu in a target attribute or pragma, but did not specify a tuning + option, use the cpu for the tuning option rather than the option specified + with -mtune on the command line. Process a '--with-cpu' configuration + request as an implicit --cpu. */ + if (rs6000_cpu_index >= 0) + cpu_index = rs6000_cpu_index; + else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0) + cpu_index = main_target_opt->x_rs6000_cpu_index; + else if (OPTION_TARGET_CPU_DEFAULT) + cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT); + + /* If we have a cpu, either through an explicit -mcpu= or if the + compiler was configured with --with-cpu=, replace all of the ISA bits + with those from the cpu, except for options that were explicitly set. If + we don't have a cpu, do not override the target bits set in + TARGET_DEFAULT. */ + if (cpu_index >= 0) + { + rs6000_cpu_index = cpu_index; + rs6000_isa_flags &= ~set_masks; + rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable + & set_masks); + } + else + { + /* If no -mcpu=, inherit any default options that were cleared via + POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize + target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched + to using rs6000_isa_flags, we need to do the initialization here. + + If there is a TARGET_DEFAULT, use that. Otherwise fall back to using + -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */ + HOST_WIDE_INT flags; + if (TARGET_DEFAULT) + flags = TARGET_DEFAULT; + else + { + /* PowerPC 64-bit LE requires at least ISA 2.07. */ + const char *default_cpu = (!TARGET_POWERPC64 + ? "powerpc" + : (BYTES_BIG_ENDIAN + ? "powerpc64" + : "powerpc64le")); + int default_cpu_index = rs6000_cpu_name_lookup (default_cpu); + flags = processor_target_table[default_cpu_index].target_enable; + } + rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit); + } + + if (rs6000_tune_index >= 0) + tune_index = rs6000_tune_index; + else if (cpu_index >= 0) + rs6000_tune_index = tune_index = cpu_index; + else + { + size_t i; + enum processor_type tune_proc + = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT); + + tune_index = -1; + for (i = 0; i < ARRAY_SIZE (processor_target_table); i++) + if (processor_target_table[i].processor == tune_proc) + { + tune_index = i; + break; + } + } + + if (cpu_index >= 0) + rs6000_cpu = processor_target_table[cpu_index].processor; + else + rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT; + + gcc_assert (tune_index >= 0); + rs6000_tune = processor_target_table[tune_index].processor; + + if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3 + || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64 + || rs6000_cpu == PROCESSOR_PPCE5500) + { + if (TARGET_ALTIVEC) + error ("AltiVec not supported in this target"); + } + + /* If we are optimizing big endian systems for space, use the load/store + multiple instructions. */ + if (BYTES_BIG_ENDIAN && optimize_size) + rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE; + + /* Don't allow -mmultiple on little endian systems unless the cpu is a 750, + because the hardware doesn't support the instructions used in little + endian mode, and causes an alignment trap. The 750 does not cause an + alignment trap (except when the target is unaligned). */ + + if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE) + { + rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE; + if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0) + warning (0, "%qs is not supported on little endian systems", + "-mmultiple"); + } + + /* If little-endian, default to -mstrict-align on older processors. + Testing for direct_move matches power8 and later. */ + if (!BYTES_BIG_ENDIAN + && !(processor_target_table[tune_index].target_enable + & OPTION_MASK_DIRECT_MOVE)) + rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN; + + if (!rs6000_fold_gimple) + fprintf (stderr, + "gimple folding of rs6000 builtins has been disabled.\n"); + + /* Add some warnings for VSX. */ + if (TARGET_VSX) + { + const char *msg = NULL; + if (!TARGET_HARD_FLOAT) + { + if (rs6000_isa_flags_explicit & OPTION_MASK_VSX) + msg = N_("%<-mvsx%> requires hardware floating point"); + else + { + rs6000_isa_flags &= ~ OPTION_MASK_VSX; + rs6000_isa_flags_explicit |= OPTION_MASK_VSX; + } + } + else if (TARGET_AVOID_XFORM > 0) + msg = N_("%<-mvsx%> needs indexed addressing"); + else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit + & OPTION_MASK_ALTIVEC)) + { + if (rs6000_isa_flags_explicit & OPTION_MASK_VSX) + msg = N_("%<-mvsx%> and %<-mno-altivec%> are incompatible"); + else + msg = N_("%<-mno-altivec%> disables vsx"); + } + + if (msg) + { + warning (0, msg); + rs6000_isa_flags &= ~ OPTION_MASK_VSX; + rs6000_isa_flags_explicit |= OPTION_MASK_VSX; + } + } + + /* If hard-float/altivec/vsx were explicitly turned off then don't allow + the -mcpu setting to enable options that conflict. */ + if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX) + && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT + | OPTION_MASK_ALTIVEC + | OPTION_MASK_VSX)) != 0) + rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO + | OPTION_MASK_DIRECT_MOVE) + & ~rs6000_isa_flags_explicit); + + if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) + rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags); + +#ifdef XCOFF_DEBUGGING_INFO + /* For AIX default to 64-bit DWARF. */ + if (!OPTION_SET_P (dwarf_offset_size)) + dwarf_offset_size = POINTER_SIZE_UNITS; +#endif + + /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn + off all of the options that depend on those flags. */ + ignore_masks = rs6000_disable_incompatible_switches (); + + /* For the newer switches (vsx, dfp, etc.) set some of the older options, + unless the user explicitly used the -mno-