2015-07-17 23:35:54 +02:00
|
|
|
|
--- original-gcc/config.sub
|
|
|
|
|
+++ gcc-5.2.0/config.sub
|
|
|
|
|
@@ -340,6 +340,9 @@ case $basic_machine in
|
2015-03-07 10:57:25 +01:00
|
|
|
|
ms1)
|
|
|
|
|
basic_machine=mt-unknown
|
|
|
|
|
;;
|
|
|
|
|
+ riscv)
|
|
|
|
|
+ basic_machine=riscv-ucb
|
|
|
|
|
+ ;;
|
|
|
|
|
|
|
|
|
|
strongarm | thumb | xscale)
|
|
|
|
|
basic_machine=arm-unknown
|
2015-07-17 23:35:54 +02:00
|
|
|
|
--- original-gcc/gcc/config.gcc
|
|
|
|
|
+++ gcc-5.2.0/gcc/config.gcc
|
|
|
|
|
@@ -439,6 +439,10 @@ powerpc*-*-*)
|
|
|
|
|
esac
|
|
|
|
|
extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
|
|
|
|
|
;;
|
|
|
|
|
+riscv*)
|
|
|
|
|
+ cpu_type=riscv
|
|
|
|
|
+ need_64bit_hwint=yes
|
|
|
|
|
+ ;;
|
|
|
|
|
rs6000*-*-*)
|
|
|
|
|
extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
|
|
|
|
|
;;
|
|
|
|
|
@@ -1976,6 +1980,27 @@ microblaze*-*-elf)
|
|
|
|
|
cxx_target_objs="${cxx_target_objs} microblaze-c.o"
|
|
|
|
|
tmake_file="${tmake_file} microblaze/t-microblaze"
|
|
|
|
|
;;
|
|
|
|
|
+riscv32*-*-linux*) # Linux RISC-V
|
|
|
|
|
+ tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h riscv/default-32.h ${tm_file} riscv/linux.h riscv/linux64.h"
|
|
|
|
|
+ tmake_file="${tmake_file} riscv/t-linux64"
|
|
|
|
|
+ gnu_ld=yes
|
|
|
|
|
+ gas=yes
|
|
|
|
|
+ gcc_cv_initfini_array=yes
|
|
|
|
|
+ ;;
|
|
|
|
|
+riscv*-*-linux*) # Linux RISC-V
|
|
|
|
|
+ tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} riscv/linux.h riscv/linux64.h"
|
|
|
|
|
+ tmake_file="${tmake_file} riscv/t-linux64"
|
|
|
|
|
+ gnu_ld=yes
|
|
|
|
|
+ gas=yes
|
|
|
|
|
+ gcc_cv_initfini_array=yes
|
|
|
|
|
+ ;;
|
|
|
|
|
+riscv*-*-elf*) # Linux RISC-V
|
|
|
|
|
+ tm_file="elfos.h newlib-stdint.h ${tm_file} riscv/elf.h"
|
|
|
|
|
+ tmake_file="${tmake_file} riscv/t-elf"
|
|
|
|
|
+ gnu_ld=yes
|
|
|
|
|
+ gas=yes
|
|
|
|
|
+ gcc_cv_initfini_array=yes
|
|
|
|
|
+ ;;
|
|
|
|
|
mips*-*-netbsd*) # NetBSD/mips, either endian.
|
|
|
|
|
target_cpu_default="MASK_ABICALLS"
|
|
|
|
|
tm_file="elfos.h ${tm_file} mips/elf.h netbsd.h netbsd-elf.h mips/netbsd.h"
|
|
|
|
|
@@ -3851,6 +3876,31 @@ case "${target}" in
|
|
|
|
|
done
|
|
|
|
|
;;
|
|
|
|
|
|
|
|
|
|
+ riscv*-*-*)
|
|
|
|
|
+ supported_defaults="abi arch arch_32 arch_64 float tune tune_32 tune_64"
|
|
|
|
|
+
|
|
|
|
|
+ case ${with_float} in
|
|
|
|
|
+ "" | soft | hard)
|
|
|
|
|
+ # OK
|
|
|
|
|
+ ;;
|
|
|
|
|
+ *)
|
|
|
|
|
+ echo "Unknown floating point type used in --with-float=$with_float" 1>&2
|
|
|
|
|
+ exit 1
|
|
|
|
|
+ ;;
|
|
|
|
|
+ esac
|
|
|
|
|
+
|
|
|
|
|
+ case ${with_abi} in
|
|
|
|
|
+ "" | 32 | 64)
|
|
|
|
|
+ # OK
|
|
|
|
|
+ ;;
|
|
|
|
|
+ *)
|
|
|
|
|
+ echo "Unknown ABI used in --with-abi=$with_abi" 1>&2
|
|
|
|
|
+ exit 1
|
|
|
|
|
+ ;;
|
|
|
|
|
+ esac
|
|
|
|
|
+
|
|
|
|
|
+ ;;
|
|
|
|
|
+
|
|
|
|
|
mips*-*-*)
|
|
|
|
|
supported_defaults="abi arch arch_32 arch_64 float fpu nan fp_32 odd_spreg_32 tune tune_32 tune_64 divide llsc mips-plt synci"
|
|
|
|
|
|
|
|
|
|
--- original-gcc/gcc/configure
|
|
|
|
|
+++ gcc-5.2.0/gcc/configure
|
|
|
|
|
@@ -23708,6 +23708,25 @@ x3: .space 4
|
|
|
|
|
tls_first_minor=14
|
|
|
|
|
tls_as_opt="-a32 --fatal-warnings"
|
|
|
|
|
;;
|
|
|
|
|
+ riscv*-*-*)
|
|
|
|
|
+ conftest_s='
|
|
|
|
|
+ .section .tdata,"awT",@progbits
|
|
|
|
|
+x:
|
|
|
|
|
+ .word 2
|
|
|
|
|
+ .text
|
|
|
|
|
+ la.tls.gd a0,x
|
|
|
|
|
+ la.tls.ie a1,x
|
|
|
|
|
+ lui a0,%tls_ie_pcrel_hi(x)
|
|
|
|
|
+ lw a0,%pcrel_lo(x)(a0)
|
|
|
|
|
+ add a0,a0,tp
|
|
|
|
|
+ lw a0,0(a0)
|
|
|
|
|
+ lui a0,%tprel_hi(x)
|
|
|
|
|
+ add a0,a0,tp,%tprel_add(x)
|
|
|
|
|
+ lw a0,%tprel_lo(x)(a0)'
|
|
|
|
|
+ tls_first_major=2
|
|
|
|
|
+ tls_first_minor=21
|
|
|
|
|
+ tls_as_opt='-m32 --fatal-warnings'
|
|
|
|
|
+ ;;
|
|
|
|
|
s390-*-*)
|
|
|
|
|
conftest_s='
|
|
|
|
|
.section ".tdata","awT",@progbits
|
|
|
|
|
--- original-gcc/gcc/configure.ac
|
|
|
|
|
+++ gcc-5.2.0/gcc/configure.ac
|
|
|
|
|
@@ -3263,6 +3263,25 @@ x3: .space 4
|
|
|
|
|
tls_first_minor=14
|
|
|
|
|
tls_as_opt="-a32 --fatal-warnings"
|
|
|
|
|
;;
|
|
|
|
|
+ riscv*-*-*)
|
|
|
|
|
+ conftest_s='
|
|
|
|
|
+ .section .tdata,"awT",@progbits
|
|
|
|
|
+x:
|
|
|
|
|
+ .word 2
|
|
|
|
|
+ .text
|
|
|
|
|
+ la.tls.gd a0,x
|
|
|
|
|
+ la.tls.ie a1,x
|
|
|
|
|
+ lui a0,%tls_ie_pcrel_hi(x)
|
|
|
|
|
+ lw a0,%pcrel_lo(x)(a0)
|
|
|
|
|
+ add a0,a0,tp
|
|
|
|
|
+ lw a0,0(a0)
|
|
|
|
|
+ lui a0,%tprel_hi(x)
|
|
|
|
|
+ add a0,a0,tp,%tprel_add(x)
|
|
|
|
|
+ lw a0,%tprel_lo(x)(a0)'
|
|
|
|
|
+ tls_first_major=2
|
|
|
|
|
+ tls_first_minor=21
|
|
|
|
|
+ tls_as_opt='-m32 --fatal-warnings'
|
|
|
|
|
+ ;;
|
|
|
|
|
s390-*-*)
|
|
|
|
|
conftest_s='
|
|
|
|
|
.section ".tdata","awT",@progbits
|
|
|
|
|
--- original-gcc/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
|
|
|
|
|
+++ gcc-5.2.0/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
|
|
|
|
|
@@ -6,6 +6,9 @@
|
|
|
|
|
#elif defined (__powerpc__) || defined (__PPC__) || defined (__ppc__) || defined (__POWERPC__) || defined (__ppc)
|
|
|
|
|
/* On PPC division by zero does not trap. */
|
|
|
|
|
# define DO_TEST 0
|
|
|
|
|
+#elif defined (__riscv__)
|
|
|
|
|
+ /* On RISC-V division by zero does not trap. */
|
|
|
|
|
+# define DO_TEST 0
|
|
|
|
|
#elif defined (__SPU__)
|
|
|
|
|
/* On SPU division by zero does not trap. */
|
|
|
|
|
# define DO_TEST 0
|
|
|
|
|
--- original-gcc/gcc/testsuite/gcc.dg/20020312-2.c
|
|
|
|
|
+++ gcc-5.2.0/gcc/testsuite/gcc.dg/20020312-2.c
|
|
|
|
|
@@ -66,6 +66,8 @@ extern void abort (void);
|
|
|
|
|
# else
|
|
|
|
|
# define PIC_REG "30"
|
|
|
|
|
# endif
|
|
|
|
|
+#elif defined(__riscv__)
|
|
|
|
|
+/* No pic register. */
|
|
|
|
|
#elif defined(__RX__)
|
|
|
|
|
/* No pic register. */
|
|
|
|
|
#elif defined(__s390__)
|
|
|
|
|
--- original-gcc/gcc/testsuite/gcc.dg/20040813-1.c
|
|
|
|
|
+++ gcc-5.2.0/gcc/testsuite/gcc.dg/20040813-1.c
|
|
|
|
|
@@ -2,7 +2,7 @@
|
|
|
|
|
/* Contributed by Devang Patel <dpatel@apple.com> */
|
|
|
|
|
|
|
|
|
|
/* { dg-do compile } */
|
|
|
|
|
-/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* nios2-*-* *-*-vxworks* nvptx-*-* } { "*" } { "" } } */
|
|
|
|
|
+/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* riscv*-*-* tile*-*-* nios2-*-* *-*-vxworks* nvptx-*-* } { "*" } { "" } } */
|
|
|
|
|
/* { dg-options "-gstabs" } */
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
|
--- original-gcc/gcc/testsuite/gcc.dg/stack-usage-1.c
|
|
|
|
|
+++ gcc-5.2.0/gcc/testsuite/gcc.dg/stack-usage-1.c
|
|
|
|
|
@@ -61,6 +61,8 @@
|
|
|
|
|
# else
|
|
|
|
|
# define SIZE 240
|
|
|
|
|
# endif
|
|
|
|
|
+#elif defined (__riscv__)
|
|
|
|
|
+# define SIZE 240
|
|
|
|
|
#elif defined (__AVR__)
|
|
|
|
|
# define SIZE 254
|
|
|
|
|
#elif defined (__s390x__)
|
|
|
|
|
--- original-gcc/libatomic/configure.tgt
|
|
|
|
|
+++ gcc-5.2.0/libatomic/configure.tgt
|
|
|
|
|
@@ -33,6 +33,7 @@ case "${target_cpu}" in
|
|
|
|
|
ARCH=alpha
|
|
|
|
|
;;
|
|
|
|
|
rs6000 | powerpc*) ARCH=powerpc ;;
|
|
|
|
|
+ riscv*) ARCH=riscv ;;
|
|
|
|
|
sh*) ARCH=sh ;;
|
|
|
|
|
|
|
|
|
|
arm*)
|
|
|
|
|
--- original-gcc/libgcc/config.host
|
|
|
|
|
+++ gcc-5.2.0/libgcc/config.host
|
|
|
|
|
@@ -167,6 +167,9 @@ powerpc*-*-*)
|
|
|
|
|
;;
|
|
|
|
|
rs6000*-*-*)
|
|
|
|
|
;;
|
|
|
|
|
+riscv*)
|
|
|
|
|
+ cpu_type=riscv
|
|
|
|
|
+ ;;
|
|
|
|
|
sparc64*-*-*)
|
|
|
|
|
cpu_type=sparc
|
|
|
|
|
;;
|
|
|
|
|
@@ -1057,6 +1060,18 @@ powerpcle-*-eabi*)
|
|
|
|
|
tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
|
|
|
|
|
extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
|
|
|
|
|
;;
|
|
|
|
|
+riscv32*-*-linux*)
|
|
|
|
|
+ tmake_file="${tmake_file} riscv/t-fpbit riscv/t-dpbit riscv/t-tpbit riscv/t-elf riscv/t-elf32"
|
|
|
|
|
+ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o crtendS.o crtbeginT.o"
|
|
|
|
|
+ ;;
|
|
|
|
|
+riscv*-*-linux*)
|
|
|
|
|
+ tmake_file="${tmake_file} riscv/t-fpbit riscv/t-dpbit riscv/t-tpbit riscv/t-elf"
|
|
|
|
|
+ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o crtendS.o crtbeginT.o"
|
|
|
|
|
+ ;;
|
|
|
|
|
+riscv*-*-*)
|
|
|
|
|
+ tmake_file="${tmake_file} riscv/t-fpbit riscv/t-dpbit riscv/t-elf"
|
|
|
|
|
+ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o"
|
|
|
|
|
+ ;;
|
|
|
|
|
rs6000-ibm-aix4.[3456789]* | powerpc-ibm-aix4.[3456789]*)
|
|
|
|
|
md_unwind_header=rs6000/aix-unwind.h
|
|
|
|
|
tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-slibgcc-aix rs6000/t-ibm-ldouble"
|
|
|
|
|
--- original-gcc/libsanitizer/asan/asan_linux.cc
|
|
|
|
|
+++ gcc-5.2.0/libsanitizer/asan/asan_linux.cc
|
|
|
|
|
@@ -213,6 +213,11 @@ void GetPcSpBp(void *context, uptr *pc,
|
|
|
|
|
*pc = ucontext->uc_mcontext.gregs[31];
|
|
|
|
|
*bp = ucontext->uc_mcontext.gregs[30];
|
|
|
|
|
*sp = ucontext->uc_mcontext.gregs[29];
|
|
|
|
|
+# elif defined(__riscv__)
|
|
|
|
|
+ ucontext_t *ucontext = (ucontext_t*)context;
|
|
|
|
|
+ *pc = ucontext->uc_mcontext.gregs[REG_PC];
|
|
|
|
|
+ *bp = ucontext->uc_mcontext.gregs[REG_S0];
|
|
|
|
|
+ *sp = ucontext->uc_mcontext.gregs[REG_SP];
|
|
|
|
|
#else
|
|
|
|
|
# error "Unsupported arch"
|
|
|
|
|
#endif
|
|
|
|
|
--- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
|
|
|
|
|
+++ gcc-5.2.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
|
|
|
|
|
@@ -61,7 +61,8 @@ namespace __sanitizer {
|
|
|
|
|
} // namespace __sanitizer
|
|
|
|
|
|
|
|
|
|
#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
|
|
|
|
|
- && !defined(__mips__) && !defined(__sparc__)
|
|
|
|
|
+ && !defined(__mips__) && !defined(__sparc__)\
|
|
|
|
|
+ && !defined(__riscv__)
|
|
|
|
|
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
--- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
|
|
|
|
|
+++ gcc-5.2.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
|
|
|
|
|
@@ -72,6 +72,10 @@ namespace __sanitizer {
|
|
|
|
|
const unsigned struct_kernel_stat_sz = 144;
|
|
|
|
|
#endif
|
|
|
|
|
const unsigned struct_kernel_stat64_sz = 104;
|
|
|
|
|
+#elif defined(__riscv__)
|
|
|
|
|
+ const unsigned struct___old_kernel_stat_sz = 0;
|
|
|
|
|
+ const unsigned struct_kernel_stat_sz = 128;
|
|
|
|
|
+ const unsigned struct_kernel_stat64_sz = 128;
|
|
|
|
|
#elif defined(__sparc__) && defined(__arch64__)
|
|
|
|
|
const unsigned struct___old_kernel_stat_sz = 0;
|
|
|
|
|
const unsigned struct_kernel_stat_sz = 104;
|
|
|
|
|
@@ -511,7 +515,7 @@ namespace __sanitizer {
|
|
|
|
|
typedef long __sanitizer___kernel_off_t;
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
-#if defined(__powerpc__) || defined(__mips__)
|
|
|
|
|
+#if defined(__powerpc__) || defined(__mips__) || defined(__riscv__)
|
|
|
|
|
typedef unsigned int __sanitizer___kernel_old_uid_t;
|
|
|
|
|
typedef unsigned int __sanitizer___kernel_old_gid_t;
|
|
|
|
|
#else
|
|
|
|
|
diff -ru gcc-5.1.0.orig/libsanitizer/sanitizer_common/sanitizer_platform.h gcc-5.1.0/libsanitizer/sanitizer_common/sanitizer_platform.h
|
|
|
|
|
--- gcc-5.1.0.orig/libsanitizer/sanitizer_common/sanitizer_platform.h 2015-05-13 19:36:27.061421043 -0700
|
|
|
|
|
+++ gcc-5.2.0/libsanitizer/sanitizer_common/sanitizer_platform.h 2015-05-13 19:44:19.274355577 -0700
|
|
|
|
|
@@ -98,9 +98,9 @@
|
|
|
|
|
|
|
|
|
|
// The AArch64 linux port uses the canonical syscall set as mandated by
|
|
|
|
|
// the upstream linux community for all new ports. Other ports may still
|
|
|
|
|
-// use legacy syscalls.
|
|
|
|
|
+// use legacy syscalls. The RISC-V port also does this.
|
|
|
|
|
#ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
|
|
|
|
|
-# if defined(__aarch64__) && SANITIZER_LINUX
|
|
|
|
|
+# if (defined(__aarch64__) || defined(__riscv__)) && SANITIZER_LINUX
|
|
|
|
|
# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
|
|
|
|
|
# else
|
|
|
|
|
# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
|
|
|
|
|
diff -ru gcc-5.1.0.orig/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h gcc-5.1.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
|
|
|
|
|
--- gcc-5.1.0.orig/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h 2015-05-13 19:36:27.061421043 -0700
|
|
|
|
|
+++ gcc-5.2.0/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h 2015-05-13 19:39:13.515487834 -0700
|
|
|
|
|
@@ -73,7 +73,6 @@
|
|
|
|
|
#endif
|
|
|
|
|
const unsigned struct_kernel_stat64_sz = 104;
|
|
|
|
|
#elif defined(__riscv__)
|
|
|
|
|
- const unsigned struct___old_kernel_stat_sz = 0;
|
|
|
|
|
const unsigned struct_kernel_stat_sz = 128;
|
|
|
|
|
const unsigned struct_kernel_stat64_sz = 128;
|
|
|
|
|
#elif defined(__sparc__) && defined(__arch64__)
|
|
|
|
|
@@ -104,7 +103,7 @@
|
|
|
|
|
|
|
|
|
|
#if SANITIZER_LINUX || SANITIZER_FREEBSD
|
|
|
|
|
|
|
|
|
|
-#if defined(__powerpc64__)
|
|
|
|
|
+#if defined(__powerpc64__) || defined(__riscv__)
|
|
|
|
|
const unsigned struct___old_kernel_stat_sz = 0;
|
|
|
|
|
#elif !defined(__sparc__)
|
|
|
|
|
const unsigned struct___old_kernel_stat_sz = 32;
|
|
|
|
|
diff -urN empty/gcc/common/config/riscv/riscv-common.c gcc-5.2.0/gcc/common/config/riscv/riscv-common.c
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/common/config/riscv/riscv-common.c 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/common/config/riscv/riscv-common.c 2015-07-17 22:36:52.315705931 +0200
|
|
|
|
|
@@ -0,0 +1,140 @@
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+/* Common hooks for RISC-V.
|
|
|
|
|
+ Copyright (C) 1989-2014 Free Software Foundation, Inc.
|
|
|
|
|
+
|
|
|
|
|
+This file is part of GCC.
|
|
|
|
|
+
|
|
|
|
|
+GCC is free software; you can redistribute it and/or modify
|
|
|
|
|
+it under the terms of the GNU General Public License as published by
|
|
|
|
|
+the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
|
+any later version.
|
|
|
|
|
+
|
|
|
|
|
+GCC is distributed in the hope that it will be useful,
|
|
|
|
|
+but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
+GNU General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+You should have received a copy of the GNU General Public License
|
|
|
|
|
+along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+<http://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include "config.h"
|
|
|
|
|
+#include "system.h"
|
|
|
|
|
+#include "coretypes.h"
|
|
|
|
|
+#include "tm.h"
|
|
|
|
|
+#include "common/common-target.h"
|
|
|
|
|
+#include "common/common-target-def.h"
|
|
|
|
|
+#include "opts.h"
|
|
|
|
|
+#include "flags.h"
|
|
|
|
|
+#include "errors.h"
|
|
|
|
|
+
|
|
|
|
|
+/* Parse a RISC-V ISA string into an option mask. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_parse_arch_string (const char *isa, int *flags)
|
|
|
|
|
+{
|
|
|
|
|
+ const char *p = isa;
|
|
|
|
|
+
|
|
|
|
|
+ if (strncmp (p, "RV32", 4) == 0)
|
|
|
|
|
+ *flags |= MASK_32BIT, p += 4;
|
|
|
|
|
+ else if (strncmp (p, "RV64", 4) == 0)
|
|
|
|
|
+ *flags &= ~MASK_32BIT, p += 4;
|
|
|
|
|
+
|
|
|
|
|
+ if (*p++ != 'I')
|
|
|
|
|
+ {
|
|
|
|
|
+ error ("-march=%s: ISA strings must begin with I, RV32I, or RV64I", isa);
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *flags &= ~MASK_MULDIV;
|
|
|
|
|
+ if (*p == 'M')
|
|
|
|
|
+ *flags |= MASK_MULDIV, p++;
|
|
|
|
|
+
|
|
|
|
|
+ *flags &= ~MASK_ATOMIC;
|
|
|
|
|
+ if (*p == 'A')
|
|
|
|
|
+ *flags |= MASK_ATOMIC, p++;
|
|
|
|
|
+
|
|
|
|
|
+ *flags |= MASK_SOFT_FLOAT_ABI;
|
|
|
|
|
+ if (*p == 'F')
|
|
|
|
|
+ *flags &= ~MASK_SOFT_FLOAT_ABI, p++;
|
|
|
|
|
+
|
|
|
|
|
+ if (*p == 'D')
|
|
|
|
|
+ {
|
|
|
|
|
+ p++;
|
|
|
|
|
+ if (!TARGET_HARD_FLOAT)
|
|
|
|
|
+ {
|
|
|
|
|
+ error ("-march=%s: the D extension requires the F extension", isa);
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ else if (TARGET_HARD_FLOAT)
|
|
|
|
|
+ {
|
|
|
|
|
+ error ("-march=%s: single-precision-only is not yet supported", isa);
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+ *flags &= ~MASK_RVC;
|
|
|
|
|
+ if (*p == 'C')
|
|
|
|
|
+ *flags |= MASK_RVC, p++;
|
|
|
|
|
+
|
|
|
|
|
+ /* FIXME: For now we just stop parsing when faced with a
|
|
|
|
|
+ non-standard RISC-V ISA extension, partially becauses of a
|
|
|
|
|
+ problem with the naming scheme. */
|
|
|
|
|
+ if (*p == 'X')
|
|
|
|
|
+ return;
|
|
|
|
|
+
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+ if (*p)
|
|
|
|
|
+ {
|
|
|
|
|
+ error ("-march=%s: unsupported ISA substring %s", isa, p);
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_flags_from_arch_string (const char *isa)
|
|
|
|
|
+{
|
|
|
|
|
+ int flags = 0;
|
|
|
|
|
+ riscv_parse_arch_string (isa, &flags);
|
|
|
|
|
+ return flags;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_HANDLE_OPTION. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_handle_option (struct gcc_options *opts,
|
|
|
|
|
+ struct gcc_options *opts_set ATTRIBUTE_UNUSED,
|
|
|
|
|
+ const struct cl_decoded_option *decoded,
|
|
|
|
|
+ location_t loc ATTRIBUTE_UNUSED)
|
|
|
|
|
+{
|
|
|
|
|
+ switch (decoded->opt_index)
|
|
|
|
|
+ {
|
|
|
|
|
+ case OPT_march_:
|
|
|
|
|
+ riscv_parse_arch_string (decoded->arg, &opts->x_target_flags);
|
|
|
|
|
+ return true;
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
|
|
|
|
|
+static const struct default_options riscv_option_optimization_table[] =
|
|
|
|
|
+ {
|
|
|
|
|
+ { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 },
|
|
|
|
|
+ { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+ { OPT_LEVELS_SIZE, OPT_msave_restore, NULL, 1 },
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+ { OPT_LEVELS_NONE, 0, NULL, 0 }
|
|
|
|
|
+ };
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_OPTION_OPTIMIZATION_TABLE
|
|
|
|
|
+#define TARGET_OPTION_OPTIMIZATION_TABLE riscv_option_optimization_table
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_DEFAULT_TARGET_FLAGS
|
|
|
|
|
+#define TARGET_DEFAULT_TARGET_FLAGS \
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+ (TARGET_DEFAULT \
|
|
|
|
|
+ | riscv_flags_from_arch_string (RISCV_ARCH_STRING_DEFAULT) \
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+ | (TARGET_64BIT_DEFAULT ? 0 : MASK_32BIT))
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_HANDLE_OPTION
|
|
|
|
|
+#define TARGET_HANDLE_OPTION riscv_handle_option
|
|
|
|
|
+
|
|
|
|
|
+struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/gcc/config/riscv/constraints.md gcc-5.2.0/gcc/config/riscv/constraints.md
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/constraints.md 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/constraints.md 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,90 @@
|
|
|
|
|
+;; Constraint definitions for RISC-V target.
|
|
|
|
|
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
|
|
|
|
|
+;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
|
|
|
|
|
+;; Based on MIPS target for GNU compiler.
|
|
|
|
|
+;;
|
|
|
|
|
+;; This file is part of GCC.
|
|
|
|
|
+;;
|
|
|
|
|
+;; GCC is free software; you can redistribute it and/or modify
|
|
|
|
|
+;; it under the terms of the GNU General Public License as published by
|
|
|
|
|
+;; the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
|
+;; any later version.
|
|
|
|
|
+;;
|
|
|
|
|
+;; GCC is distributed in the hope that it will be useful,
|
|
|
|
|
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
+;; GNU General Public License for more details.
|
|
|
|
|
+;;
|
|
|
|
|
+;; You should have received a copy of the GNU General Public License
|
|
|
|
|
+;; along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+;; <http://www.gnu.org/licenses/>.
|
|
|
|
|
+
|
|
|
|
|
+;; Register constraints
|
|
|
|
|
+
|
|
|
|
|
+(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
|
|
|
|
|
+ "A floating-point register (if available).")
|
|
|
|
|
+
|
|
|
|
|
+(define_register_constraint "b" "ALL_REGS"
|
|
|
|
|
+ "@internal")
|
|
|
|
|
+
|
|
|
|
|
+(define_register_constraint "j" "T_REGS"
|
|
|
|
|
+ "@internal")
|
|
|
|
|
+
|
|
|
|
|
+;; Integer constraints
|
|
|
|
|
+
|
|
|
|
|
+(define_constraint "Z"
|
|
|
|
|
+ "@internal"
|
|
|
|
|
+ (and (match_code "const_int")
|
|
|
|
|
+ (match_test "1")))
|
|
|
|
|
+
|
|
|
|
|
+(define_constraint "I"
|
|
|
|
|
+ "An I-type 12-bit signed immediate."
|
|
|
|
|
+ (and (match_code "const_int")
|
|
|
|
|
+ (match_test "SMALL_OPERAND (ival)")))
|
|
|
|
|
+
|
|
|
|
|
+(define_constraint "J"
|
|
|
|
|
+ "Integer zero."
|
|
|
|
|
+ (and (match_code "const_int")
|
|
|
|
|
+ (match_test "ival == 0")))
|
|
|
|
|
+
|
|
|
|
|
+;; Floating-point constraints
|
|
|
|
|
+
|
|
|
|
|
+(define_constraint "G"
|
|
|
|
|
+ "Floating-point zero."
|
|
|
|
|
+ (and (match_code "const_double")
|
|
|
|
|
+ (match_test "op == CONST0_RTX (mode)")))
|
|
|
|
|
+
|
|
|
|
|
+;; General constraints
|
|
|
|
|
+
|
|
|
|
|
+(define_constraint "Q"
|
|
|
|
|
+ "@internal"
|
|
|
|
|
+ (match_operand 0 "const_arith_operand"))
|
|
|
|
|
+
|
|
|
|
|
+(define_memory_constraint "A"
|
|
|
|
|
+ "An address that is held in a general-purpose register."
|
|
|
|
|
+ (and (match_code "mem")
|
|
|
|
|
+ (match_test "GET_CODE(XEXP(op,0)) == REG")))
|
|
|
|
|
+
|
|
|
|
|
+(define_constraint "S"
|
|
|
|
|
+ "@internal
|
|
|
|
|
+ A constant call address."
|
|
|
|
|
+ (and (match_operand 0 "call_insn_operand")
|
|
|
|
|
+ (match_test "CONSTANT_P (op)")))
|
|
|
|
|
+
|
|
|
|
|
+(define_constraint "T"
|
|
|
|
|
+ "@internal
|
|
|
|
|
+ A constant @code{move_operand}."
|
|
|
|
|
+ (and (match_operand 0 "move_operand")
|
|
|
|
|
+ (match_test "CONSTANT_P (op)")))
|
|
|
|
|
+
|
|
|
|
|
+(define_memory_constraint "W"
|
|
|
|
|
+ "@internal
|
|
|
|
|
+ A memory address based on a member of @code{BASE_REG_CLASS}."
|
|
|
|
|
+ (and (match_code "mem")
|
|
|
|
|
+ (match_operand 0 "memory_operand")))
|
|
|
|
|
+
|
|
|
|
|
+(define_constraint "YG"
|
|
|
|
|
+ "@internal
|
|
|
|
|
+ A vector zero."
|
|
|
|
|
+ (and (match_code "const_vector")
|
|
|
|
|
+ (match_test "op == CONST0_RTX (mode)")))
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/gcc/config/riscv/default-32.h gcc-5.2.0/gcc/config/riscv/default-32.h
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/default-32.h 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/default-32.h 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,22 @@
|
|
|
|
|
+/* Definitions of target machine for GCC, for RISC-V,
|
|
|
|
|
+ defaulting to 32-bit code generation.
|
|
|
|
|
+
|
|
|
|
|
+ Copyright (C) 1999-2014 Free Software Foundation, Inc.
|
|
|
|
|
+
|
|
|
|
|
+This file is part of GCC.
|
|
|
|
|
+
|
|
|
|
|
+GCC is free software; you can redistribute it and/or modify
|
|
|
|
|
+it under the terms of the GNU General Public License as published by
|
|
|
|
|
+the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
|
+any later version.
|
|
|
|
|
+
|
|
|
|
|
+GCC is distributed in the hope that it will be useful,
|
|
|
|
|
+but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
+GNU General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+You should have received a copy of the GNU General Public License
|
|
|
|
|
+along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+<http://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#define TARGET_64BIT_DEFAULT 0
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/gcc/config/riscv/elf.h gcc-5.2.0/gcc/config/riscv/elf.h
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/elf.h 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/elf.h 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,31 @@
|
|
|
|
|
+/* Target macros for riscv*-elf targets.
|
|
|
|
|
+ Copyright (C) 1994, 1997, 1999, 2000, 2002, 2003, 2004, 2007, 2010
|
|
|
|
|
+ Free Software Foundation, Inc.
|
|
|
|
|
+
|
|
|
|
|
+This file is part of GCC.
|
|
|
|
|
+
|
|
|
|
|
+GCC is free software; you can redistribute it and/or modify
|
|
|
|
|
+it under the terms of the GNU General Public License as published by
|
|
|
|
|
+the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
|
+any later version.
|
|
|
|
|
+
|
|
|
|
|
+GCC is distributed in the hope that it will be useful,
|
|
|
|
|
+but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
+GNU General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+You should have received a copy of the GNU General Public License
|
|
|
|
|
+along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+<http://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+/* Leave the linker script to choose the appropriate libraries. */
|
|
|
|
|
+#undef LIB_SPEC
|
|
|
|
|
+#define LIB_SPEC ""
|
|
|
|
|
+
|
|
|
|
|
+#undef STARTFILE_SPEC
|
|
|
|
|
+#define STARTFILE_SPEC "crt0%O%s crtbegin%O%s"
|
|
|
|
|
+
|
|
|
|
|
+#undef ENDFILE_SPEC
|
|
|
|
|
+#define ENDFILE_SPEC "crtend%O%s"
|
|
|
|
|
+
|
|
|
|
|
+#define NO_IMPLICIT_EXTERN_C 1
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/gcc/config/riscv/generic.md gcc-5.2.0/gcc/config/riscv/generic.md
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/generic.md 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/generic.md 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,98 @@
|
|
|
|
|
+;; Generic DFA-based pipeline description for RISC-V targets.
|
|
|
|
|
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
|
|
|
|
|
+;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
|
|
|
|
|
+;; Based on MIPS target for GNU compiler.
|
|
|
|
|
+
|
|
|
|
|
+;; This file is part of GCC.
|
|
|
|
|
+
|
|
|
|
|
+;; GCC is free software; you can redistribute it and/or modify it
|
|
|
|
|
+;; under the terms of the GNU General Public License as published
|
|
|
|
|
+;; by the Free Software Foundation; either version 3, or (at your
|
|
|
|
|
+;; option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
|
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
|
|
|
|
|
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
|
|
|
|
|
+;; License for more details.
|
|
|
|
|
+
|
|
|
|
|
+;; You should have received a copy of the GNU General Public License
|
|
|
|
|
+;; along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+;; <http://www.gnu.org/licenses/>.
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+;; This file is derived from the old define_function_unit description.
|
|
|
|
|
+;; Each reservation can be overridden on a processor-by-processor basis.
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_alu" 1
|
|
|
|
|
+ (eq_attr "type" "unknown,const,arith,shift,slt,multi,nop,logical,move")
|
|
|
|
|
+ "alu")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_load" 3
|
|
|
|
|
+ (eq_attr "type" "load,fpload,fpidxload")
|
|
|
|
|
+ "alu")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_store" 1
|
|
|
|
|
+ (eq_attr "type" "store,fpstore,fpidxstore")
|
|
|
|
|
+ "alu")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_xfer" 2
|
|
|
|
|
+ (eq_attr "type" "mfc,mtc")
|
|
|
|
|
+ "alu")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_branch" 1
|
|
|
|
|
+ (eq_attr "type" "branch,jump,call")
|
|
|
|
|
+ "alu")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_imul" 17
|
|
|
|
|
+ (eq_attr "type" "imul")
|
|
|
|
|
+ "imuldiv*17")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_idiv" 38
|
|
|
|
|
+ (eq_attr "type" "idiv")
|
|
|
|
|
+ "imuldiv*38")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_fcvt" 1
|
|
|
|
|
+ (eq_attr "type" "fcvt")
|
|
|
|
|
+ "alu")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_fmove" 2
|
|
|
|
|
+ (eq_attr "type" "fmove")
|
|
|
|
|
+ "alu")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_fcmp" 3
|
|
|
|
|
+ (eq_attr "type" "fcmp")
|
|
|
|
|
+ "alu")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_fadd" 4
|
|
|
|
|
+ (eq_attr "type" "fadd")
|
|
|
|
|
+ "alu")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_fmul_single" 7
|
|
|
|
|
+ (and (eq_attr "type" "fmul,fmadd")
|
|
|
|
|
+ (eq_attr "mode" "SF"))
|
|
|
|
|
+ "alu")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_fmul_double" 8
|
|
|
|
|
+ (and (eq_attr "type" "fmul,fmadd")
|
|
|
|
|
+ (eq_attr "mode" "DF"))
|
|
|
|
|
+ "alu")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_fdiv_single" 23
|
|
|
|
|
+ (and (eq_attr "type" "fdiv")
|
|
|
|
|
+ (eq_attr "mode" "SF"))
|
|
|
|
|
+ "alu")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_fdiv_double" 36
|
|
|
|
|
+ (and (eq_attr "type" "fdiv")
|
|
|
|
|
+ (eq_attr "mode" "DF"))
|
|
|
|
|
+ "alu")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_fsqrt_single" 54
|
|
|
|
|
+ (and (eq_attr "type" "fsqrt")
|
|
|
|
|
+ (eq_attr "mode" "SF"))
|
|
|
|
|
+ "alu")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_reservation "generic_fsqrt_double" 112
|
|
|
|
|
+ (and (eq_attr "type" "fsqrt")
|
|
|
|
|
+ (eq_attr "mode" "DF"))
|
|
|
|
|
+ "alu")
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/gcc/config/riscv/linux.h gcc-5.2.0/gcc/config/riscv/linux.h
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/linux.h 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/linux.h 2015-07-17 22:36:52.319705931 +0200
|
|
|
|
|
@@ -0,0 +1,60 @@
|
|
|
|
|
+/* Definitions for RISC-V GNU/Linux systems with ELF format.
|
|
|
|
|
+ Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
|
|
|
|
|
+ 2007, 2008, 2010, 2011 Free Software Foundation, Inc.
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
|
|
|
|
+This file is part of GCC.
|
|
|
|
|
+
|
|
|
|
|
+GCC is free software; you can redistribute it and/or modify
|
|
|
|
|
+it under the terms of the GNU General Public License as published by
|
|
|
|
|
+the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
|
+any later version.
|
|
|
|
|
+
|
|
|
|
|
+GCC is distributed in the hope that it will be useful,
|
|
|
|
|
+but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
+GNU General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+You should have received a copy of the GNU General Public License
|
|
|
|
|
+along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+<http://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#undef WCHAR_TYPE
|
|
|
|
|
+#define WCHAR_TYPE "int"
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#undef WCHAR_TYPE_SIZE
|
|
|
|
|
+#define WCHAR_TYPE_SIZE 32
|
|
|
|
|
+
|
|
|
|
|
+#define TARGET_OS_CPP_BUILTINS() \
|
|
|
|
|
+ do { \
|
|
|
|
|
+ GNU_USER_TARGET_OS_CPP_BUILTINS(); \
|
|
|
|
|
+ /* The GNU C++ standard library requires this. */ \
|
|
|
|
|
+ if (c_dialect_cxx ()) \
|
|
|
|
|
+ builtin_define ("_GNU_SOURCE"); \
|
|
|
|
|
+ } while (0)
|
|
|
|
|
+
|
|
|
|
|
+#undef SUBTARGET_CPP_SPEC
|
|
|
|
|
+#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
|
|
|
|
|
+
|
|
|
|
|
+#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+/* Borrowed from sparc/linux.h */
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#undef LINK_SPEC
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define LINK_SPEC \
|
|
|
|
|
+ "%{shared:-shared} \
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+ %{!shared: \
|
|
|
|
|
+ %{!static: \
|
|
|
|
|
+ %{rdynamic:-export-dynamic} \
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+ -dynamic-linker " GNU_USER_DYNAMIC_LINKER "} \
|
|
|
|
|
+ %{static:-static}}"
|
|
|
|
|
+
|
|
|
|
|
+#undef LIB_SPEC
|
|
|
|
|
+#define LIB_SPEC "\
|
|
|
|
|
+%{pthread:-lpthread} \
|
|
|
|
|
+%{shared:-lc} \
|
|
|
|
|
+%{!shared: \
|
|
|
|
|
+ %{profile:-lc_p} %{!profile:-lc}}"
|
|
|
|
|
+
|
|
|
|
|
+/* Similar to standard Linux, but adding -ffast-math support. */
|
|
|
|
|
+#undef ENDFILE_SPEC
|
|
|
|
|
+#define ENDFILE_SPEC \
|
|
|
|
|
+ "%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
|
|
|
|
|
diff -urN empty/gcc/config/riscv/linux64.h gcc-5.2.0/gcc/config/riscv/linux64.h
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/linux64.h 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/linux64.h 2015-07-17 22:36:52.319705931 +0200
|
|
|
|
|
@@ -0,0 +1,43 @@
|
|
|
|
|
+/* Definitions for 64-bit RISC-V GNU/Linux systems with ELF format.
|
|
|
|
|
+ Copyright 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011
|
|
|
|
|
+ Free Software Foundation, Inc.
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
|
|
|
|
+This file is part of GCC.
|
|
|
|
|
+
|
|
|
|
|
+GCC is free software; you can redistribute it and/or modify
|
|
|
|
|
+it under the terms of the GNU General Public License as published by
|
|
|
|
|
+the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
|
+any later version.
|
|
|
|
|
+
|
|
|
|
|
+GCC is distributed in the hope that it will be useful,
|
|
|
|
|
+but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
+GNU General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+You should have received a copy of the GNU General Public License
|
|
|
|
|
+along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+<http://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+/* Force the default ABI flags onto the command line
|
|
|
|
|
+ in order to make the other specs easier to write. */
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#undef LIB_SPEC
|
|
|
|
|
+#define LIB_SPEC "\
|
|
|
|
|
+%{pthread:-lpthread} \
|
|
|
|
|
+%{shared:-lc} \
|
|
|
|
|
+%{!shared: \
|
|
|
|
|
+ %{profile:-lc_p} %{!profile:-lc}}"
|
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define GLIBC_DYNAMIC_LINKER32 "/lib32/ld.so.1"
|
|
|
|
|
+#define GLIBC_DYNAMIC_LINKER64 "/lib/ld.so.1"
|
|
|
|
|
+
|
|
|
|
|
+#undef LINK_SPEC
|
|
|
|
|
+#define LINK_SPEC "\
|
|
|
|
|
+%{shared} \
|
|
|
|
|
+ %{!shared: \
|
|
|
|
|
+ %{!static: \
|
|
|
|
|
+ %{rdynamic:-export-dynamic} \
|
|
|
|
|
+ %{" OPT_ARCH64 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER64 "} \
|
|
|
|
|
+ %{" OPT_ARCH32 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER32 "}} \
|
|
|
|
|
+ %{static:-static}} \
|
|
|
|
|
+%{" OPT_ARCH64 ":-melf64lriscv} \
|
|
|
|
|
+%{" OPT_ARCH32 ":-melf32lriscv}"
|
|
|
|
|
diff -urN empty/gcc/config/riscv/opcode-riscv.h gcc-5.2.0/gcc/config/riscv/opcode-riscv.h
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/opcode-riscv.h 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/opcode-riscv.h 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,149 @@
|
|
|
|
|
+/* RISC-V ISA encoding.
|
|
|
|
|
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
|
|
|
|
|
+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
|
|
|
|
|
+ Based on MIPS target for GNU compiler.
|
|
|
|
|
+
|
|
|
|
|
+This file is part of GDB, GAS, and the GNU binutils.
|
|
|
|
|
+
|
|
|
|
|
+GDB, GAS, and the GNU binutils are free software; you can redistribute
|
|
|
|
|
+them and/or modify them under the terms of the GNU General Public
|
|
|
|
|
+License as published by the Free Software Foundation; either version
|
|
|
|
|
+1, or (at your option) any later version.
|
|
|
|
|
+
|
|
|
|
|
+GDB, GAS, and the GNU binutils are distributed in the hope that they
|
|
|
|
|
+will be useful, but WITHOUT ANY WARRANTY; without even the implied
|
|
|
|
|
+warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
|
|
|
|
|
+the GNU General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+You should have received a copy of the GNU General Public License
|
|
|
|
|
+along with this file; see the file COPYING. If not, write to the Free
|
|
|
|
|
+Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
|
|
|
|
|
+
|
|
|
|
|
+#ifndef _RISCV_H_
|
|
|
|
|
+#define _RISCV_H_
|
|
|
|
|
+
|
|
|
|
|
+#define RV_X(x, s, n) (((x) >> (s)) & ((1<<(n))-1))
|
|
|
|
|
+#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
|
|
|
|
|
+
|
|
|
|
|
+#define EXTRACT_ITYPE_IMM(x) \
|
|
|
|
|
+ (RV_X(x, 20, 12) | (RV_IMM_SIGN(x) << 12))
|
|
|
|
|
+#define EXTRACT_STYPE_IMM(x) \
|
|
|
|
|
+ (RV_X(x, 7, 5) | (RV_X(x, 25, 7) << 5) | (RV_IMM_SIGN(x) << 12))
|
|
|
|
|
+#define EXTRACT_SBTYPE_IMM(x) \
|
|
|
|
|
+ ((RV_X(x, 8, 4) << 1) | (RV_X(x, 25, 6) << 5) | (RV_X(x, 7, 1) << 11) | (RV_IMM_SIGN(x) << 12))
|
|
|
|
|
+#define EXTRACT_UTYPE_IMM(x) \
|
|
|
|
|
+ ((RV_X(x, 12, 20) << 20) | (RV_IMM_SIGN(x) << 32))
|
|
|
|
|
+#define EXTRACT_UJTYPE_IMM(x) \
|
|
|
|
|
+ ((RV_X(x, 21, 10) << 1) | (RV_X(x, 20, 1) << 11) | (RV_X(x, 12, 8) << 12) | (RV_IMM_SIGN(x) << 20))
|
|
|
|
|
+
|
|
|
|
|
+#define ENCODE_ITYPE_IMM(x) \
|
|
|
|
|
+ (RV_X(x, 0, 12) << 20)
|
|
|
|
|
+#define ENCODE_STYPE_IMM(x) \
|
|
|
|
|
+ ((RV_X(x, 0, 5) << 7) | (RV_X(x, 5, 7) << 25))
|
|
|
|
|
+#define ENCODE_SBTYPE_IMM(x) \
|
|
|
|
|
+ ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31))
|
|
|
|
|
+#define ENCODE_UTYPE_IMM(x) \
|
|
|
|
|
+ (RV_X(x, 12, 20) << 12)
|
|
|
|
|
+#define ENCODE_UJTYPE_IMM(x) \
|
|
|
|
|
+ ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31))
|
|
|
|
|
+
|
|
|
|
|
+#define VALID_ITYPE_IMM(x) (EXTRACT_ITYPE_IMM(ENCODE_ITYPE_IMM(x)) == (x))
|
|
|
|
|
+#define VALID_STYPE_IMM(x) (EXTRACT_STYPE_IMM(ENCODE_STYPE_IMM(x)) == (x))
|
|
|
|
|
+#define VALID_SBTYPE_IMM(x) (EXTRACT_SBTYPE_IMM(ENCODE_SBTYPE_IMM(x)) == (x))
|
|
|
|
|
+#define VALID_UTYPE_IMM(x) (EXTRACT_UTYPE_IMM(ENCODE_UTYPE_IMM(x)) == (x))
|
|
|
|
|
+#define VALID_UJTYPE_IMM(x) (EXTRACT_UJTYPE_IMM(ENCODE_UJTYPE_IMM(x)) == (x))
|
|
|
|
|
+
|
|
|
|
|
+#define RISCV_RTYPE(insn, rd, rs1, rs2) \
|
|
|
|
|
+ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2))
|
|
|
|
|
+#define RISCV_ITYPE(insn, rd, rs1, imm) \
|
|
|
|
|
+ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS1) | ENCODE_ITYPE_IMM(imm))
|
|
|
|
|
+#define RISCV_STYPE(insn, rs1, rs2, imm) \
|
|
|
|
|
+ ((MATCH_ ## insn) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2) | ENCODE_STYPE_IMM(imm))
|
|
|
|
|
+#define RISCV_SBTYPE(insn, rs1, rs2, target) \
|
|
|
|
|
+ ((MATCH_ ## insn) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2) | ENCODE_SBTYPE_IMM(target))
|
|
|
|
|
+#define RISCV_UTYPE(insn, rd, bigimm) \
|
|
|
|
|
+ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ENCODE_UTYPE_IMM(bigimm))
|
|
|
|
|
+#define RISCV_UJTYPE(insn, rd, target) \
|
|
|
|
|
+ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ENCODE_UJTYPE_IMM(target))
|
|
|
|
|
+
|
|
|
|
|
+#define RISCV_NOP RISCV_ITYPE(ADDI, 0, 0, 0)
|
|
|
|
|
+
|
|
|
|
|
+#define RISCV_CONST_HIGH_PART(VALUE) \
|
|
|
|
|
+ (((VALUE) + (RISCV_IMM_REACH/2)) & ~(RISCV_IMM_REACH-1))
|
|
|
|
|
+#define RISCV_CONST_LOW_PART(VALUE) ((VALUE) - RISCV_CONST_HIGH_PART (VALUE))
|
|
|
|
|
+
|
|
|
|
|
+/* RV fields */
|
|
|
|
|
+
|
|
|
|
|
+#define OP_MASK_OP 0x7f
|
|
|
|
|
+#define OP_SH_OP 0
|
|
|
|
|
+#define OP_MASK_RS2 0x1f
|
|
|
|
|
+#define OP_SH_RS2 20
|
|
|
|
|
+#define OP_MASK_RS1 0x1f
|
|
|
|
|
+#define OP_SH_RS1 15
|
|
|
|
|
+#define OP_MASK_RS3 0x1f
|
|
|
|
|
+#define OP_SH_RS3 27
|
|
|
|
|
+#define OP_MASK_RD 0x1f
|
|
|
|
|
+#define OP_SH_RD 7
|
|
|
|
|
+#define OP_MASK_SHAMT 0x3f
|
|
|
|
|
+#define OP_SH_SHAMT 20
|
|
|
|
|
+#define OP_MASK_SHAMTW 0x1f
|
|
|
|
|
+#define OP_SH_SHAMTW 20
|
|
|
|
|
+#define OP_MASK_RM 0x7
|
|
|
|
|
+#define OP_SH_RM 12
|
|
|
|
|
+#define OP_MASK_PRED 0xf
|
|
|
|
|
+#define OP_SH_PRED 24
|
|
|
|
|
+#define OP_MASK_SUCC 0xf
|
|
|
|
|
+#define OP_SH_SUCC 20
|
|
|
|
|
+#define OP_MASK_AQ 0x1
|
|
|
|
|
+#define OP_SH_AQ 26
|
|
|
|
|
+#define OP_MASK_RL 0x1
|
|
|
|
|
+#define OP_SH_RL 25
|
|
|
|
|
+
|
|
|
|
|
+#define OP_MASK_VRD 0x1f
|
|
|
|
|
+#define OP_SH_VRD 7
|
|
|
|
|
+#define OP_MASK_VRS 0x1f
|
|
|
|
|
+#define OP_SH_VRS 15
|
|
|
|
|
+#define OP_MASK_VRT 0x1f
|
|
|
|
|
+#define OP_SH_VRT 20
|
|
|
|
|
+#define OP_MASK_VRR 0x1f
|
|
|
|
|
+#define OP_SH_VRR 25
|
|
|
|
|
+
|
|
|
|
|
+#define OP_MASK_VFD 0x1f
|
|
|
|
|
+#define OP_SH_VFD 7
|
|
|
|
|
+#define OP_MASK_VFS 0x1f
|
|
|
|
|
+#define OP_SH_VFS 15
|
|
|
|
|
+#define OP_MASK_VFT 0x1f
|
|
|
|
|
+#define OP_SH_VFT 20
|
|
|
|
|
+#define OP_MASK_VFR 0x1f
|
|
|
|
|
+#define OP_SH_VFR 25
|
|
|
|
|
+
|
|
|
|
|
+#define OP_MASK_IMMNGPR 0x3f
|
|
|
|
|
+#define OP_SH_IMMNGPR 20
|
|
|
|
|
+#define OP_MASK_IMMNFPR 0x3f
|
|
|
|
|
+#define OP_SH_IMMNFPR 26
|
|
|
|
|
+#define OP_MASK_IMMSEGNELM 0x1f
|
|
|
|
|
+#define OP_SH_IMMSEGNELM 17
|
|
|
|
|
+#define OP_MASK_IMMSEGSTNELM 0x1f
|
|
|
|
|
+#define OP_SH_IMMSEGSTNELM 12
|
|
|
|
|
+#define OP_MASK_CUSTOM_IMM 0x7f
|
|
|
|
|
+#define OP_SH_CUSTOM_IMM 25
|
|
|
|
|
+
|
|
|
|
|
+#define LINK_REG 1
|
|
|
|
|
+
|
|
|
|
|
+#define RISCV_JUMP_BITS RISCV_BIGIMM_BITS
|
|
|
|
|
+#define RISCV_JUMP_ALIGN_BITS 1
|
|
|
|
|
+#define RISCV_JUMP_ALIGN (1 << RISCV_JUMP_ALIGN_BITS)
|
|
|
|
|
+#define RISCV_JUMP_REACH ((1ULL<<RISCV_JUMP_BITS)*RISCV_JUMP_ALIGN)
|
|
|
|
|
+
|
|
|
|
|
+#define RISCV_IMM_BITS 12
|
|
|
|
|
+#define RISCV_BIGIMM_BITS (32-RISCV_IMM_BITS)
|
|
|
|
|
+#define RISCV_IMM_REACH (1LL<<RISCV_IMM_BITS)
|
|
|
|
|
+#define RISCV_BIGIMM_REACH (1LL<<RISCV_BIGIMM_BITS)
|
|
|
|
|
+#define RISCV_BRANCH_BITS RISCV_IMM_BITS
|
|
|
|
|
+#define RISCV_BRANCH_ALIGN_BITS RISCV_JUMP_ALIGN_BITS
|
|
|
|
|
+#define RISCV_BRANCH_ALIGN (1 << RISCV_BRANCH_ALIGN_BITS)
|
|
|
|
|
+#define RISCV_BRANCH_REACH (RISCV_IMM_REACH*RISCV_BRANCH_ALIGN)
|
|
|
|
|
+
|
|
|
|
|
+#include "riscv-opc.h"
|
|
|
|
|
+
|
|
|
|
|
+#endif /* _RISCV_H_ */
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/gcc/config/riscv/peephole.md gcc-5.2.0/gcc/config/riscv/peephole.md
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/peephole.md 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/peephole.md 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,100 @@
|
|
|
|
|
+;;........................
|
|
|
|
|
+;; DI -> SI optimizations
|
|
|
|
|
+;;........................
|
|
|
|
|
+
|
|
|
|
|
+;; Simplify (int)(a + 1), etc.
|
|
|
|
|
+(define_peephole2
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand")
|
|
|
|
|
+ (match_operator:DI 4 "modular_operator"
|
|
|
|
|
+ [(match_operand:DI 1 "register_operand")
|
|
|
|
|
+ (match_operand:DI 2 "arith_operand")]))
|
|
|
|
|
+ (set (match_operand:SI 3 "register_operand")
|
|
|
|
|
+ (truncate:SI (match_dup 0)))]
|
|
|
|
|
+ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))
|
|
|
|
|
+ && (GET_CODE (operands[4]) != ASHIFT || (CONST_INT_P (operands[2]) && INTVAL (operands[2]) < 32))"
|
|
|
|
|
+ [(set (match_dup 3)
|
|
|
|
|
+ (truncate:SI
|
|
|
|
|
+ (match_op_dup:DI 4
|
|
|
|
|
+ [(match_operand:DI 1 "register_operand")
|
|
|
|
|
+ (match_operand:DI 2 "arith_operand")])))])
|
|
|
|
|
+
|
|
|
|
|
+;; Simplify (int)a + 1, etc.
|
|
|
|
|
+(define_peephole2
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand")
|
|
|
|
|
+ (truncate:SI (match_operand:DI 1 "register_operand")))
|
|
|
|
|
+ (set (match_operand:SI 3 "register_operand")
|
|
|
|
|
+ (match_operator:SI 4 "modular_operator"
|
|
|
|
|
+ [(match_dup 0)
|
|
|
|
|
+ (match_operand:SI 2 "arith_operand")]))]
|
|
|
|
|
+ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
|
|
|
|
|
+ [(set (match_dup 3)
|
|
|
|
|
+ (match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
|
|
|
|
|
+
|
|
|
|
|
+;; Simplify -(int)a, etc.
|
|
|
|
|
+(define_peephole2
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand")
|
|
|
|
|
+ (truncate:SI (match_operand:DI 2 "register_operand")))
|
|
|
|
|
+ (set (match_operand:SI 3 "register_operand")
|
|
|
|
|
+ (match_operator:SI 4 "modular_operator"
|
|
|
|
|
+ [(match_operand:SI 1 "reg_or_0_operand")
|
|
|
|
|
+ (match_dup 0)]))]
|
|
|
|
|
+ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
|
|
|
|
|
+ [(set (match_dup 3)
|
|
|
|
|
+ (match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
|
|
|
|
|
+
|
|
|
|
|
+;; Simplify PIC loads to static variables.
|
|
|
|
|
+;; These will go away once we figure out how to emit auipc discretely.
|
|
|
|
|
+(define_insn "*local_pic_load<mode>"
|
|
|
|
|
+ [(set (match_operand:ANYI 0 "register_operand" "=r")
|
|
|
|
|
+ (mem:ANYI (match_operand 1 "absolute_symbolic_operand" "")))]
|
|
|
|
|
+ "flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
|
|
|
|
|
+ "<load>\t%0,%1"
|
|
|
|
|
+ [(set (attr "length") (const_int 8))])
|
|
|
|
|
+(define_insn "*local_pic_load<mode>"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
|
|
|
|
|
+ (clobber (match_scratch:DI 2 "=&r"))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && TARGET_64BIT && flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
|
|
|
|
|
+ "<load>\t%0,%1,%2"
|
|
|
|
|
+ [(set (attr "length") (const_int 8))])
|
|
|
|
|
+(define_insn "*local_pic_load<mode>"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
|
|
|
|
|
+ (clobber (match_scratch:SI 2 "=&r"))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && !TARGET_64BIT && flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
|
|
|
|
|
+ "<load>\t%0,%1,%2"
|
|
|
|
|
+ [(set (attr "length") (const_int 8))])
|
|
|
|
|
+(define_insn "*local_pic_loadu<mode>"
|
|
|
|
|
+ [(set (match_operand:SUPERQI 0 "register_operand" "=r")
|
|
|
|
|
+ (zero_extend:SUPERQI (mem:SUBDI (match_operand 1 "absolute_symbolic_operand" ""))))]
|
|
|
|
|
+ "flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
|
|
|
|
|
+ "<load>u\t%0,%1"
|
|
|
|
|
+ [(set (attr "length") (const_int 8))])
|
|
|
|
|
+(define_insn "*local_pic_storedi<mode>"
|
|
|
|
|
+ [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
|
|
|
|
|
+ (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
|
|
|
|
|
+ (clobber (match_scratch:DI 2 "=&r"))]
|
|
|
|
|
+ "TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
|
|
|
|
|
+ "<store>\t%z1,%0,%2"
|
|
|
|
|
+ [(set (attr "length") (const_int 8))])
|
|
|
|
|
+(define_insn "*local_pic_storesi<mode>"
|
|
|
|
|
+ [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
|
|
|
|
|
+ (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
|
|
|
|
|
+ (clobber (match_scratch:SI 2 "=&r"))]
|
|
|
|
|
+ "!TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
|
|
|
|
|
+ "<store>\t%z1,%0,%2"
|
|
|
|
|
+ [(set (attr "length") (const_int 8))])
|
|
|
|
|
+(define_insn "*local_pic_storedi<mode>"
|
|
|
|
|
+ [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
|
|
|
|
|
+ (match_operand:ANYF 1 "register_operand" "f"))
|
|
|
|
|
+ (clobber (match_scratch:DI 2 "=&r"))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
|
|
|
|
|
+ "<store>\t%1,%0,%2"
|
|
|
|
|
+ [(set (attr "length") (const_int 8))])
|
|
|
|
|
+(define_insn "*local_pic_storesi<mode>"
|
|
|
|
|
+ [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
|
|
|
|
|
+ (match_operand:ANYF 1 "register_operand" "f"))
|
|
|
|
|
+ (clobber (match_scratch:SI 2 "=&r"))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && !TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
|
|
|
|
|
+ "<store>\t%1,%0,%2"
|
|
|
|
|
+ [(set (attr "length") (const_int 8))])
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/gcc/config/riscv/predicates.md gcc-5.2.0/gcc/config/riscv/predicates.md
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/predicates.md 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/predicates.md 2015-07-17 22:36:52.319705931 +0200
|
|
|
|
|
@@ -0,0 +1,187 @@
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+;; Predicate description for RISC-V target.
|
|
|
|
|
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
|
|
|
|
|
+;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
|
|
|
|
|
+;; Based on MIPS target for GNU compiler.
|
|
|
|
|
+;;
|
|
|
|
|
+;; This file is part of GCC.
|
|
|
|
|
+;;
|
|
|
|
|
+;; GCC is free software; you can redistribute it and/or modify
|
|
|
|
|
+;; it under the terms of the GNU General Public License as published by
|
|
|
|
|
+;; the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
|
+;; any later version.
|
|
|
|
|
+;;
|
|
|
|
|
+;; GCC is distributed in the hope that it will be useful,
|
|
|
|
|
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
+;; GNU General Public License for more details.
|
|
|
|
|
+;;
|
|
|
|
|
+;; You should have received a copy of the GNU General Public License
|
|
|
|
|
+;; along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+;; <http://www.gnu.org/licenses/>.
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "const_arith_operand"
|
|
|
|
|
+ (and (match_code "const_int")
|
|
|
|
|
+ (match_test "SMALL_OPERAND (INTVAL (op))")))
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "arith_operand"
|
|
|
|
|
+ (ior (match_operand 0 "const_arith_operand")
|
|
|
|
|
+ (match_operand 0 "register_operand")))
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "sle_operand"
|
|
|
|
|
+ (and (match_code "const_int")
|
|
|
|
|
+ (match_test "SMALL_OPERAND (INTVAL (op) + 1)")))
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "sleu_operand"
|
|
|
|
|
+ (and (match_operand 0 "sle_operand")
|
|
|
|
|
+ (match_test "INTVAL (op) + 1 != 0")))
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "const_0_operand"
|
|
|
|
|
+ (and (match_code "const_int,const_double,const_vector")
|
|
|
|
|
+ (match_test "op == CONST0_RTX (GET_MODE (op))")))
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "reg_or_0_operand"
|
|
|
|
|
+ (ior (match_operand 0 "const_0_operand")
|
|
|
|
|
+ (match_operand 0 "register_operand")))
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "const_1_operand"
|
|
|
|
|
+ (and (match_code "const_int,const_double,const_vector")
|
|
|
|
|
+ (match_test "op == CONST1_RTX (GET_MODE (op))")))
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "reg_or_1_operand"
|
|
|
|
|
+ (ior (match_operand 0 "const_1_operand")
|
|
|
|
|
+ (match_operand 0 "register_operand")))
|
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+;; Only use branch-on-bit sequences when the mask is not an ANDI immediate.
|
|
|
|
|
+(define_predicate "branch_on_bit_operand"
|
|
|
|
|
+ (and (match_code "const_int")
|
|
|
|
|
+ (match_test "INTVAL (op) >= RISCV_IMM_BITS - 1")))
|
|
|
|
|
+
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+;; This is used for indexing into vectors, and hence only accepts const_int.
|
|
|
|
|
+(define_predicate "const_0_or_1_operand"
|
|
|
|
|
+ (and (match_code "const_int")
|
|
|
|
|
+ (ior (match_test "op == CONST0_RTX (GET_MODE (op))")
|
|
|
|
|
+ (match_test "op == CONST1_RTX (GET_MODE (op))"))))
|
|
|
|
|
+
|
|
|
|
|
+(define_special_predicate "pc_or_label_operand"
|
|
|
|
|
+ (match_code "pc,label_ref"))
|
|
|
|
|
+
|
|
|
|
|
+;; A legitimate CONST_INT operand that takes more than one instruction
|
|
|
|
|
+;; to load.
|
|
|
|
|
+(define_predicate "splittable_const_int_operand"
|
|
|
|
|
+ (match_code "const_int")
|
|
|
|
|
+{
|
|
|
|
|
+ /* Don't handle multi-word moves this way; we don't want to introduce
|
|
|
|
|
+ the individual word-mode moves until after reload. */
|
|
|
|
|
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ /* Otherwise check whether the constant can be loaded in a single
|
|
|
|
|
+ instruction. */
|
|
|
|
|
+ return !LUI_INT (op) && !SMALL_INT (op);
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "move_operand"
|
|
|
|
|
+ (match_operand 0 "general_operand")
|
|
|
|
|
+{
|
|
|
|
|
+ enum riscv_symbol_type symbol_type;
|
|
|
|
|
+
|
|
|
|
|
+ /* The thinking here is as follows:
|
|
|
|
|
+
|
|
|
|
|
+ (1) The move expanders should split complex load sequences into
|
|
|
|
|
+ individual instructions. Those individual instructions can
|
|
|
|
|
+ then be optimized by all rtl passes.
|
|
|
|
|
+
|
|
|
|
|
+ (2) The target of pre-reload load sequences should not be used
|
|
|
|
|
+ to store temporary results. If the target register is only
|
|
|
|
|
+ assigned one value, reload can rematerialize that value
|
|
|
|
|
+ on demand, rather than spill it to the stack.
|
|
|
|
|
+
|
|
|
|
|
+ (3) If we allowed pre-reload passes like combine and cse to recreate
|
|
|
|
|
+ complex load sequences, we would want to be able to split the
|
|
|
|
|
+ sequences before reload as well, so that the pre-reload scheduler
|
|
|
|
|
+ can see the individual instructions. This falls foul of (2);
|
|
|
|
|
+ the splitter would be forced to reuse the target register for
|
|
|
|
|
+ intermediate results.
|
|
|
|
|
+
|
|
|
|
|
+ (4) We want to define complex load splitters for combine. These
|
|
|
|
|
+ splitters can request a temporary scratch register, which avoids
|
|
|
|
|
+ the problem in (2). They allow things like:
|
|
|
|
|
+
|
|
|
|
|
+ (set (reg T1) (high SYM))
|
|
|
|
|
+ (set (reg T2) (low (reg T1) SYM))
|
|
|
|
|
+ (set (reg X) (plus (reg T2) (const_int OFFSET)))
|
|
|
|
|
+
|
|
|
|
|
+ to be combined into:
|
|
|
|
|
+
|
|
|
|
|
+ (set (reg T3) (high SYM+OFFSET))
|
|
|
|
|
+ (set (reg X) (lo_sum (reg T3) SYM+OFFSET))
|
|
|
|
|
+
|
|
|
|
|
+ if T2 is only used this once. */
|
|
|
|
|
+ switch (GET_CODE (op))
|
|
|
|
|
+ {
|
|
|
|
|
+ case CONST_INT:
|
|
|
|
|
+ return !splittable_const_int_operand (op, mode);
|
|
|
|
|
+
|
|
|
|
|
+ case CONST:
|
|
|
|
|
+ case SYMBOL_REF:
|
|
|
|
|
+ case LABEL_REF:
|
|
|
|
|
+ return (riscv_symbolic_constant_p (op, &symbol_type)
|
|
|
|
|
+ && !riscv_hi_relocs[symbol_type]);
|
|
|
|
|
+
|
|
|
|
|
+ case HIGH:
|
|
|
|
|
+ op = XEXP (op, 0);
|
|
|
|
|
+ return riscv_symbolic_constant_p (op, &symbol_type);
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "consttable_operand"
|
|
|
|
|
+ (match_test "CONSTANT_P (op)"))
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "symbolic_operand"
|
|
|
|
|
+ (match_code "const,symbol_ref,label_ref")
|
|
|
|
|
+{
|
|
|
|
|
+ enum riscv_symbol_type type;
|
|
|
|
|
+ return riscv_symbolic_constant_p (op, &type);
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "absolute_symbolic_operand"
|
|
|
|
|
+ (match_code "const,symbol_ref,label_ref")
|
|
|
|
|
+{
|
|
|
|
|
+ enum riscv_symbol_type type;
|
|
|
|
|
+ return (riscv_symbolic_constant_p (op, &type)
|
|
|
|
|
+ && type == SYMBOL_ABSOLUTE);
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "plt_symbolic_operand"
|
|
|
|
|
+ (match_code "const,symbol_ref,label_ref")
|
|
|
|
|
+{
|
|
|
|
|
+ enum riscv_symbol_type type;
|
|
|
|
|
+ return (riscv_symbolic_constant_p (op, &type)
|
|
|
|
|
+ && type == SYMBOL_GOT_DISP && !SYMBOL_REF_WEAK (op) && TARGET_PLT);
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "call_insn_operand"
|
|
|
|
|
+ (ior (match_operand 0 "absolute_symbolic_operand")
|
|
|
|
|
+ (match_operand 0 "plt_symbolic_operand")
|
|
|
|
|
+ (match_operand 0 "register_operand")))
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "symbol_ref_operand"
|
|
|
|
|
+ (match_code "symbol_ref"))
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "modular_operator"
|
|
|
|
|
+ (match_code "plus,minus,mult,ashift"))
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "equality_operator"
|
|
|
|
|
+ (match_code "eq,ne"))
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "order_operator"
|
|
|
|
|
+ (match_code "eq,ne,lt,ltu,le,leu,ge,geu,gt,gtu"))
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "fp_order_operator"
|
|
|
|
|
+ (match_code "eq,lt,le,gt,ge"))
|
|
|
|
|
+
|
|
|
|
|
+(define_predicate "fp_unorder_operator"
|
|
|
|
|
+ (match_code "ordered,unordered"))
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/gcc/config/riscv/riscv-ftypes.def gcc-5.2.0/gcc/config/riscv/riscv-ftypes.def
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/riscv-ftypes.def 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/riscv-ftypes.def 2015-07-17 22:36:52.319705931 +0200
|
|
|
|
|
@@ -0,0 +1,39 @@
|
|
|
|
|
+/* Definitions of prototypes for RISC-V built-in functions.
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
|
|
|
|
|
+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
|
|
|
|
|
+ Based on MIPS target for GNU compiler.
|
|
|
|
|
+
|
|
|
|
|
+This file is part of GCC.
|
|
|
|
|
+
|
|
|
|
|
+GCC is free software; you can redistribute it and/or modify
|
|
|
|
|
+it under the terms of the GNU General Public License as published by
|
|
|
|
|
+the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
|
+any later version.
|
|
|
|
|
+
|
|
|
|
|
+GCC is distributed in the hope that it will be useful,
|
|
|
|
|
+but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
+GNU General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+You should have received a copy of the GNU General Public License
|
|
|
|
|
+along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+<http://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+/* Invoke DEF_RISCV_FTYPE (NARGS, LIST) for each prototype used by
|
|
|
|
|
+ MIPS built-in functions, where:
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+ NARGS is the number of arguments.
|
|
|
|
|
+ LIST contains the return-type code followed by the codes for each
|
|
|
|
|
+ argument type.
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+ Argument- and return-type codes are either modes or one of the following:
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+ VOID for void_type_node
|
|
|
|
|
+ INT for integer_type_node
|
|
|
|
|
+ POINTER for ptr_type_node
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+ (we don't use PTR because that's a ANSI-compatibillity macro).
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+ Please keep this list lexicographically sorted by the LIST argument. */
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DEF_RISCV_FTYPE (1, (VOID, VOID))
|
|
|
|
|
diff -urN empty/gcc/config/riscv/riscv-modes.def gcc-5.2.0/gcc/config/riscv/riscv-modes.def
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/riscv-modes.def 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/riscv-modes.def 2015-07-17 22:36:52.319705931 +0200
|
|
|
|
|
@@ -0,0 +1,26 @@
|
|
|
|
|
+/* Extra machine modes for RISC-V target.
|
|
|
|
|
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
|
|
|
|
|
+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
|
|
|
|
|
+ Based on MIPS target for GNU compiler.
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+This file is part of GCC.
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+GCC is free software; you can redistribute it and/or modify
|
|
|
|
|
+it under the terms of the GNU General Public License as published by
|
|
|
|
|
+the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
|
+any later version.
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+GCC is distributed in the hope that it will be useful,
|
|
|
|
|
+but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
+GNU General Public License for more details.
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+You should have received a copy of the GNU General Public License
|
|
|
|
|
+along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+<http://www.gnu.org/licenses/>. */
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+FLOAT_MODE (TF, 16, ieee_quad_format);
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+/* Vector modes. */
|
|
|
|
|
+VECTOR_MODES (INT, 4); /* V8QI V4HI V2SI */
|
|
|
|
|
+VECTOR_MODES (FLOAT, 4); /* V4HF V2SF */
|
|
|
|
|
diff -urN empty/gcc/config/riscv/riscv-opc.h gcc-5.2.0/gcc/config/riscv/riscv-opc.h
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/riscv-opc.h 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/riscv-opc.h 2015-07-17 22:36:52.319705931 +0200
|
|
|
|
|
@@ -0,0 +1,1348 @@
|
|
|
|
|
+/* Automatically generated by parse-opcodes */
|
|
|
|
|
+#ifndef RISCV_ENCODING_H
|
|
|
|
|
+#define RISCV_ENCODING_H
|
|
|
|
|
+#define MATCH_ADD 0x33
|
|
|
|
|
+#define MASK_ADD 0xfe00707f
|
|
|
|
|
+#define MATCH_ADDI 0x13
|
|
|
|
|
+#define MASK_ADDI 0x707f
|
|
|
|
|
+#define MATCH_ADDIW 0x1b
|
|
|
|
|
+#define MASK_ADDIW 0x707f
|
|
|
|
|
+#define MATCH_ADDW 0x3b
|
|
|
|
|
+#define MASK_ADDW 0xfe00707f
|
|
|
|
|
+#define MATCH_AMOADD_D 0x302f
|
|
|
|
|
+#define MASK_AMOADD_D 0xf800707f
|
|
|
|
|
+#define MATCH_AMOADD_W 0x202f
|
|
|
|
|
+#define MASK_AMOADD_W 0xf800707f
|
|
|
|
|
+#define MATCH_AMOAND_D 0x6000302f
|
|
|
|
|
+#define MASK_AMOAND_D 0xf800707f
|
|
|
|
|
+#define MATCH_AMOAND_W 0x6000202f
|
|
|
|
|
+#define MASK_AMOAND_W 0xf800707f
|
|
|
|
|
+#define MATCH_AMOMAX_D 0xa000302f
|
|
|
|
|
+#define MASK_AMOMAX_D 0xf800707f
|
|
|
|
|
+#define MATCH_AMOMAX_W 0xa000202f
|
|
|
|
|
+#define MASK_AMOMAX_W 0xf800707f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_AMOMAXU_D 0xe000302f
|
|
|
|
|
+#define MASK_AMOMAXU_D 0xf800707f
|
|
|
|
|
+#define MATCH_AMOMAXU_W 0xe000202f
|
|
|
|
|
+#define MASK_AMOMAXU_W 0xf800707f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_AMOMIN_D 0x8000302f
|
|
|
|
|
+#define MASK_AMOMIN_D 0xf800707f
|
|
|
|
|
+#define MATCH_AMOMIN_W 0x8000202f
|
|
|
|
|
+#define MASK_AMOMIN_W 0xf800707f
|
|
|
|
|
+#define MATCH_AMOMINU_D 0xc000302f
|
|
|
|
|
+#define MASK_AMOMINU_D 0xf800707f
|
|
|
|
|
+#define MATCH_AMOMINU_W 0xc000202f
|
|
|
|
|
+#define MASK_AMOMINU_W 0xf800707f
|
|
|
|
|
+#define MATCH_AMOOR_D 0x4000302f
|
|
|
|
|
+#define MASK_AMOOR_D 0xf800707f
|
|
|
|
|
+#define MATCH_AMOOR_W 0x4000202f
|
|
|
|
|
+#define MASK_AMOOR_W 0xf800707f
|
|
|
|
|
+#define MATCH_AMOSWAP_D 0x800302f
|
|
|
|
|
+#define MASK_AMOSWAP_D 0xf800707f
|
|
|
|
|
+#define MATCH_AMOSWAP_W 0x800202f
|
|
|
|
|
+#define MASK_AMOSWAP_W 0xf800707f
|
|
|
|
|
+#define MATCH_AMOXOR_D 0x2000302f
|
|
|
|
|
+#define MASK_AMOXOR_D 0xf800707f
|
|
|
|
|
+#define MATCH_AMOXOR_W 0x2000202f
|
|
|
|
|
+#define MASK_AMOXOR_W 0xf800707f
|
|
|
|
|
+#define MATCH_AND 0x7033
|
|
|
|
|
+#define MASK_AND 0xfe00707f
|
|
|
|
|
+#define MATCH_ANDI 0x7013
|
|
|
|
|
+#define MASK_ANDI 0x707f
|
|
|
|
|
+#define MATCH_AUIPC 0x17
|
|
|
|
|
+#define MASK_AUIPC 0x7f
|
|
|
|
|
+#define MATCH_BEQ 0x63
|
|
|
|
|
+#define MASK_BEQ 0x707f
|
|
|
|
|
+#define MATCH_BGE 0x5063
|
|
|
|
|
+#define MASK_BGE 0x707f
|
|
|
|
|
+#define MATCH_BGEU 0x7063
|
|
|
|
|
+#define MASK_BGEU 0x707f
|
|
|
|
|
+#define MATCH_BLT 0x4063
|
|
|
|
|
+#define MASK_BLT 0x707f
|
|
|
|
|
+#define MATCH_BLTU 0x6063
|
|
|
|
|
+#define MASK_BLTU 0x707f
|
|
|
|
|
+#define MATCH_BNE 0x1063
|
|
|
|
|
+#define MASK_BNE 0x707f
|
|
|
|
|
+#define MATCH_C_ADD 0x1000
|
|
|
|
|
+#define MASK_C_ADD 0xf003
|
|
|
|
|
+#define MATCH_C_ADD3 0xa000
|
|
|
|
|
+#define MASK_C_ADD3 0xe063
|
|
|
|
|
+#define MATCH_C_ADDI 0xc002
|
|
|
|
|
+#define MASK_C_ADDI 0xe003
|
|
|
|
|
+#define MATCH_C_ADDI16SP 0xc002
|
|
|
|
|
+#define MASK_C_ADDI16SP 0xef83
|
|
|
|
|
+#define MATCH_C_ADDI4SPN 0xa001
|
|
|
|
|
+#define MASK_C_ADDI4SPN 0xe003
|
|
|
|
|
+#define MATCH_C_ADDIN 0x8001
|
|
|
|
|
+#define MASK_C_ADDIN 0xe063
|
|
|
|
|
+#define MATCH_C_ADDIW 0xe002
|
|
|
|
|
+#define MASK_C_ADDIW 0xe003
|
|
|
|
|
+#define MATCH_C_ADDW 0x9000
|
|
|
|
|
+#define MASK_C_ADDW 0xf003
|
|
|
|
|
+#define MATCH_C_AND3 0xa060
|
|
|
|
|
+#define MASK_C_AND3 0xe063
|
|
|
|
|
+#define MATCH_C_ANDI 0xe002
|
|
|
|
|
+#define MASK_C_ANDI 0xe003
|
|
|
|
|
+#define MATCH_C_ANDIN 0x8061
|
|
|
|
|
+#define MASK_C_ANDIN 0xe063
|
|
|
|
|
+#define MATCH_C_BEQZ 0x4002
|
|
|
|
|
+#define MASK_C_BEQZ 0xe003
|
|
|
|
|
+#define MATCH_C_BGEZ 0xe001
|
|
|
|
|
+#define MASK_C_BGEZ 0xe003
|
|
|
|
|
+#define MATCH_C_BLTZ 0x6001
|
|
|
|
|
+#define MASK_C_BLTZ 0xe003
|
|
|
|
|
+#define MATCH_C_BNEZ 0x6002
|
|
|
|
|
+#define MASK_C_BNEZ 0xe003
|
|
|
|
|
+#define MATCH_C_EBREAK 0x1000
|
|
|
|
|
+#define MASK_C_EBREAK 0xffff
|
|
|
|
|
+#define MATCH_C_J 0x2
|
|
|
|
|
+#define MASK_C_J 0xe003
|
|
|
|
|
+#define MATCH_C_JAL 0x2002
|
|
|
|
|
+#define MASK_C_JAL 0xe003
|
|
|
|
|
+#define MATCH_C_JALR 0xa002
|
|
|
|
|
+#define MASK_C_JALR 0xf07f
|
|
|
|
|
+#define MATCH_C_JR 0x8002
|
|
|
|
|
+#define MASK_C_JR 0xf07f
|
|
|
|
|
+#define MATCH_C_LD 0xe000
|
|
|
|
|
+#define MASK_C_LD 0xe003
|
|
|
|
|
+#define MATCH_C_LDSP 0xe001
|
|
|
|
|
+#define MASK_C_LDSP 0xe003
|
|
|
|
|
+#define MATCH_C_LI 0x8002
|
|
|
|
|
+#define MASK_C_LI 0xe003
|
|
|
|
|
+#define MATCH_C_LUI 0xa002
|
|
|
|
|
+#define MASK_C_LUI 0xe003
|
|
|
|
|
+#define MATCH_C_LW 0xc000
|
|
|
|
|
+#define MASK_C_LW 0xe003
|
|
|
|
|
+#define MATCH_C_LWSP 0xc001
|
|
|
|
|
+#define MASK_C_LWSP 0xe003
|
|
|
|
|
+#define MATCH_C_MV 0x0
|
|
|
|
|
+#define MASK_C_MV 0xf003
|
|
|
|
|
+#define MATCH_C_OR3 0xa040
|
|
|
|
|
+#define MASK_C_OR3 0xe063
|
|
|
|
|
+#define MATCH_C_ORIN 0x8041
|
|
|
|
|
+#define MASK_C_ORIN 0xe063
|
|
|
|
|
+#define MATCH_C_SD 0x6000
|
|
|
|
|
+#define MASK_C_SD 0xe003
|
|
|
|
|
+#define MATCH_C_SDSP 0x6001
|
|
|
|
|
+#define MASK_C_SDSP 0xe003
|
|
|
|
|
+#define MATCH_C_SLL 0x6400
|
|
|
|
|
+#define MASK_C_SLL 0xfc63
|
|
|
|
|
+#define MATCH_C_SLLI 0x1
|
|
|
|
|
+#define MASK_C_SLLI 0xe003
|
|
|
|
|
+#define MATCH_C_SLLIW 0x8001
|
|
|
|
|
+#define MASK_C_SLLIW 0xe003
|
|
|
|
|
+#define MATCH_C_SLLR 0x6c00
|
|
|
|
|
+#define MASK_C_SLLR 0xfc63
|
|
|
|
|
+#define MATCH_C_SLT 0x6440
|
|
|
|
|
+#define MASK_C_SLT 0xfc63
|
|
|
|
|
+#define MATCH_C_SLTR 0x6c40
|
|
|
|
|
+#define MASK_C_SLTR 0xfc63
|
|
|
|
|
+#define MATCH_C_SLTU 0x6460
|
|
|
|
|
+#define MASK_C_SLTU 0xfc63
|
|
|
|
|
+#define MATCH_C_SLTUR 0x6c60
|
|
|
|
|
+#define MASK_C_SLTUR 0xfc63
|
|
|
|
|
+#define MATCH_C_SRA 0x6020
|
|
|
|
|
+#define MASK_C_SRA 0xfc63
|
|
|
|
|
+#define MATCH_C_SRAI 0x2000
|
|
|
|
|
+#define MASK_C_SRAI 0xe003
|
|
|
|
|
+#define MATCH_C_SRL 0x6420
|
|
|
|
|
+#define MASK_C_SRL 0xfc63
|
|
|
|
|
+#define MATCH_C_SRLI 0x2001
|
|
|
|
|
+#define MASK_C_SRLI 0xe003
|
|
|
|
|
+#define MATCH_C_SRLR 0x6c20
|
|
|
|
|
+#define MASK_C_SRLR 0xfc63
|
|
|
|
|
+#define MATCH_C_SUB 0x8000
|
|
|
|
|
+#define MASK_C_SUB 0xf003
|
|
|
|
|
+#define MATCH_C_SUB3 0xa020
|
|
|
|
|
+#define MASK_C_SUB3 0xe063
|
|
|
|
|
+#define MATCH_C_SW 0x4000
|
|
|
|
|
+#define MASK_C_SW 0xe003
|
|
|
|
|
+#define MATCH_C_SWSP 0x4001
|
|
|
|
|
+#define MASK_C_SWSP 0xe003
|
|
|
|
|
+#define MATCH_C_XOR 0x6000
|
|
|
|
|
+#define MASK_C_XOR 0xfc63
|
|
|
|
|
+#define MATCH_C_XORIN 0x8021
|
|
|
|
|
+#define MASK_C_XORIN 0xe063
|
|
|
|
|
+#define MATCH_CSRRC 0x3073
|
|
|
|
|
+#define MASK_CSRRC 0x707f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_CSRRCI 0x7073
|
|
|
|
|
+#define MASK_CSRRCI 0x707f
|
|
|
|
|
+#define MATCH_CSRRS 0x2073
|
|
|
|
|
+#define MASK_CSRRS 0x707f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_CSRRSI 0x6073
|
|
|
|
|
+#define MASK_CSRRSI 0x707f
|
|
|
|
|
+#define MATCH_CSRRW 0x1073
|
|
|
|
|
+#define MASK_CSRRW 0x707f
|
|
|
|
|
+#define MATCH_CSRRWI 0x5073
|
|
|
|
|
+#define MASK_CSRRWI 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM0 0xb
|
|
|
|
|
+#define MASK_CUSTOM0 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM0_RD 0x400b
|
|
|
|
|
+#define MASK_CUSTOM0_RD 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM0_RD_RS1 0x600b
|
|
|
|
|
+#define MASK_CUSTOM0_RD_RS1 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b
|
|
|
|
|
+#define MASK_CUSTOM0_RD_RS1_RS2 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM0_RS1 0x200b
|
|
|
|
|
+#define MASK_CUSTOM0_RS1 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM0_RS1_RS2 0x300b
|
|
|
|
|
+#define MASK_CUSTOM0_RS1_RS2 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM1 0x2b
|
|
|
|
|
+#define MASK_CUSTOM1 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM1_RD 0x402b
|
|
|
|
|
+#define MASK_CUSTOM1_RD 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM1_RD_RS1 0x602b
|
|
|
|
|
+#define MASK_CUSTOM1_RD_RS1 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b
|
|
|
|
|
+#define MASK_CUSTOM1_RD_RS1_RS2 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM1_RS1 0x202b
|
|
|
|
|
+#define MASK_CUSTOM1_RS1 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM1_RS1_RS2 0x302b
|
|
|
|
|
+#define MASK_CUSTOM1_RS1_RS2 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM2 0x5b
|
|
|
|
|
+#define MASK_CUSTOM2 0x707f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_CUSTOM2_RD 0x405b
|
|
|
|
|
+#define MASK_CUSTOM2_RD 0x707f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_CUSTOM2_RD_RS1 0x605b
|
|
|
|
|
+#define MASK_CUSTOM2_RD_RS1 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b
|
|
|
|
|
+#define MASK_CUSTOM2_RD_RS1_RS2 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM2_RS1 0x205b
|
|
|
|
|
+#define MASK_CUSTOM2_RS1 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM2_RS1_RS2 0x305b
|
|
|
|
|
+#define MASK_CUSTOM2_RS1_RS2 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM3 0x7b
|
|
|
|
|
+#define MASK_CUSTOM3 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM3_RD 0x407b
|
|
|
|
|
+#define MASK_CUSTOM3_RD 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM3_RD_RS1 0x607b
|
|
|
|
|
+#define MASK_CUSTOM3_RD_RS1 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b
|
|
|
|
|
+#define MASK_CUSTOM3_RD_RS1_RS2 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM3_RS1 0x207b
|
|
|
|
|
+#define MASK_CUSTOM3_RS1 0x707f
|
|
|
|
|
+#define MATCH_CUSTOM3_RS1_RS2 0x307b
|
|
|
|
|
+#define MASK_CUSTOM3_RS1_RS2 0x707f
|
|
|
|
|
+#define MATCH_DIV 0x2004033
|
|
|
|
|
+#define MASK_DIV 0xfe00707f
|
|
|
|
|
+#define MATCH_DIVU 0x2005033
|
|
|
|
|
+#define MASK_DIVU 0xfe00707f
|
|
|
|
|
+#define MATCH_DIVUW 0x200503b
|
|
|
|
|
+#define MASK_DIVUW 0xfe00707f
|
|
|
|
|
+#define MATCH_DIVW 0x200403b
|
|
|
|
|
+#define MASK_DIVW 0xfe00707f
|
|
|
|
|
+#define MATCH_EBREAK 0x100073
|
|
|
|
|
+#define MASK_EBREAK 0xffffffff
|
|
|
|
|
+#define MATCH_ECALL 0x73
|
|
|
|
|
+#define MASK_ECALL 0xffffffff
|
|
|
|
|
+#define MATCH_ERET 0x10000073
|
|
|
|
|
+#define MASK_ERET 0xffffffff
|
|
|
|
|
+#define MATCH_FADD_D 0x2000053
|
|
|
|
|
+#define MASK_FADD_D 0xfe00007f
|
|
|
|
|
+#define MATCH_FADD_H 0x4000053
|
|
|
|
|
+#define MASK_FADD_H 0xfe00007f
|
|
|
|
|
+#define MATCH_FADD_S 0x53
|
|
|
|
|
+#define MASK_FADD_S 0xfe00007f
|
|
|
|
|
+#define MATCH_FCLASS_D 0xe2001053
|
|
|
|
|
+#define MASK_FCLASS_D 0xfff0707f
|
|
|
|
|
+#define MATCH_FCLASS_S 0xe0001053
|
|
|
|
|
+#define MASK_FCLASS_S 0xfff0707f
|
|
|
|
|
+#define MATCH_FCVT_D_H 0x8c000053
|
|
|
|
|
+#define MASK_FCVT_D_H 0xfff0007f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_FCVT_D_L 0xd2200053
|
|
|
|
|
+#define MASK_FCVT_D_L 0xfff0007f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_FCVT_D_LU 0xd2300053
|
|
|
|
|
+#define MASK_FCVT_D_LU 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_D_S 0x42000053
|
|
|
|
|
+#define MASK_FCVT_D_S 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_D_W 0xd2000053
|
|
|
|
|
+#define MASK_FCVT_D_W 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_D_WU 0xd2100053
|
|
|
|
|
+#define MASK_FCVT_D_WU 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_H_D 0x92000053
|
|
|
|
|
+#define MASK_FCVT_H_D 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_H_L 0x64000053
|
|
|
|
|
+#define MASK_FCVT_H_L 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_H_LU 0x6c000053
|
|
|
|
|
+#define MASK_FCVT_H_LU 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_H_S 0x90000053
|
|
|
|
|
+#define MASK_FCVT_H_S 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_H_W 0x74000053
|
|
|
|
|
+#define MASK_FCVT_H_W 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_H_WU 0x7c000053
|
|
|
|
|
+#define MASK_FCVT_H_WU 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_L_D 0xc2200053
|
|
|
|
|
+#define MASK_FCVT_L_D 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_L_H 0x44000053
|
|
|
|
|
+#define MASK_FCVT_L_H 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_L_S 0xc0200053
|
|
|
|
|
+#define MASK_FCVT_L_S 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_LU_D 0xc2300053
|
|
|
|
|
+#define MASK_FCVT_LU_D 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_LU_H 0x4c000053
|
|
|
|
|
+#define MASK_FCVT_LU_H 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_LU_S 0xc0300053
|
|
|
|
|
+#define MASK_FCVT_LU_S 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_S_D 0x40100053
|
|
|
|
|
+#define MASK_FCVT_S_D 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_S_H 0x84000053
|
|
|
|
|
+#define MASK_FCVT_S_H 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_S_L 0xd0200053
|
|
|
|
|
+#define MASK_FCVT_S_L 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_S_LU 0xd0300053
|
|
|
|
|
+#define MASK_FCVT_S_LU 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_S_W 0xd0000053
|
|
|
|
|
+#define MASK_FCVT_S_W 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_S_WU 0xd0100053
|
|
|
|
|
+#define MASK_FCVT_S_WU 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_W_D 0xc2000053
|
|
|
|
|
+#define MASK_FCVT_W_D 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_W_H 0x54000053
|
|
|
|
|
+#define MASK_FCVT_W_H 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_W_S 0xc0000053
|
|
|
|
|
+#define MASK_FCVT_W_S 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_WU_D 0xc2100053
|
|
|
|
|
+#define MASK_FCVT_WU_D 0xfff0007f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_FCVT_WU_H 0x5c000053
|
|
|
|
|
+#define MASK_FCVT_WU_H 0xfff0007f
|
|
|
|
|
+#define MATCH_FCVT_WU_S 0xc0100053
|
|
|
|
|
+#define MASK_FCVT_WU_S 0xfff0007f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_FDIV_D 0x1a000053
|
|
|
|
|
+#define MASK_FDIV_D 0xfe00007f
|
|
|
|
|
+#define MATCH_FDIV_H 0x1c000053
|
|
|
|
|
+#define MASK_FDIV_H 0xfe00007f
|
|
|
|
|
+#define MATCH_FDIV_S 0x18000053
|
|
|
|
|
+#define MASK_FDIV_S 0xfe00007f
|
|
|
|
|
+#define MATCH_FENCE 0xf
|
|
|
|
|
+#define MASK_FENCE 0x707f
|
|
|
|
|
+#define MATCH_FENCE_I 0x100f
|
|
|
|
|
+#define MASK_FENCE_I 0x707f
|
|
|
|
|
+#define MATCH_FEQ_D 0xa2002053
|
|
|
|
|
+#define MASK_FEQ_D 0xfe00707f
|
|
|
|
|
+#define MATCH_FEQ_H 0xac000053
|
|
|
|
|
+#define MASK_FEQ_H 0xfe00707f
|
|
|
|
|
+#define MATCH_FEQ_S 0xa0002053
|
|
|
|
|
+#define MASK_FEQ_S 0xfe00707f
|
|
|
|
|
+#define MATCH_FLD 0x3007
|
|
|
|
|
+#define MASK_FLD 0x707f
|
|
|
|
|
+#define MATCH_FLE_D 0xa2000053
|
|
|
|
|
+#define MASK_FLE_D 0xfe00707f
|
|
|
|
|
+#define MATCH_FLE_H 0xbc000053
|
|
|
|
|
+#define MASK_FLE_H 0xfe00707f
|
|
|
|
|
+#define MATCH_FLE_S 0xa0000053
|
|
|
|
|
+#define MASK_FLE_S 0xfe00707f
|
|
|
|
|
+#define MATCH_FLH 0x1007
|
|
|
|
|
+#define MASK_FLH 0x707f
|
|
|
|
|
+#define MATCH_FLT_D 0xa2001053
|
|
|
|
|
+#define MASK_FLT_D 0xfe00707f
|
|
|
|
|
+#define MATCH_FLT_H 0xb4000053
|
|
|
|
|
+#define MASK_FLT_H 0xfe00707f
|
|
|
|
|
+#define MATCH_FLT_S 0xa0001053
|
|
|
|
|
+#define MASK_FLT_S 0xfe00707f
|
|
|
|
|
+#define MATCH_FLW 0x2007
|
|
|
|
|
+#define MASK_FLW 0x707f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_FMADD_D 0x2000043
|
|
|
|
|
+#define MASK_FMADD_D 0x600007f
|
|
|
|
|
+#define MATCH_FMADD_H 0x4000043
|
|
|
|
|
+#define MASK_FMADD_H 0x600007f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_FMADD_S 0x43
|
|
|
|
|
+#define MASK_FMADD_S 0x600007f
|
|
|
|
|
+#define MATCH_FMAX_D 0x2a001053
|
|
|
|
|
+#define MASK_FMAX_D 0xfe00707f
|
|
|
|
|
+#define MATCH_FMAX_H 0xcc000053
|
|
|
|
|
+#define MASK_FMAX_H 0xfe00707f
|
|
|
|
|
+#define MATCH_FMAX_S 0x28001053
|
|
|
|
|
+#define MASK_FMAX_S 0xfe00707f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_FMIN_D 0x2a000053
|
|
|
|
|
+#define MASK_FMIN_D 0xfe00707f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_FMIN_H 0xc4000053
|
|
|
|
|
+#define MASK_FMIN_H 0xfe00707f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_FMIN_S 0x28000053
|
|
|
|
|
+#define MASK_FMIN_S 0xfe00707f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_FMOVN 0x6007077
|
|
|
|
|
+#define MASK_FMOVN 0xfe00707f
|
|
|
|
|
+#define MATCH_FMOVZ 0x4007077
|
|
|
|
|
+#define MASK_FMOVZ 0xfe00707f
|
|
|
|
|
+#define MATCH_FMSUB_D 0x2000047
|
|
|
|
|
+#define MASK_FMSUB_D 0x600007f
|
|
|
|
|
+#define MATCH_FMSUB_H 0x4000047
|
|
|
|
|
+#define MASK_FMSUB_H 0x600007f
|
|
|
|
|
+#define MATCH_FMSUB_S 0x47
|
|
|
|
|
+#define MASK_FMSUB_S 0x600007f
|
|
|
|
|
+#define MATCH_FMUL_D 0x12000053
|
|
|
|
|
+#define MASK_FMUL_D 0xfe00007f
|
|
|
|
|
+#define MATCH_FMUL_H 0x14000053
|
|
|
|
|
+#define MASK_FMUL_H 0xfe00007f
|
|
|
|
|
+#define MATCH_FMUL_S 0x10000053
|
|
|
|
|
+#define MASK_FMUL_S 0xfe00007f
|
|
|
|
|
+#define MATCH_FMV_D_X 0xf2000053
|
|
|
|
|
+#define MASK_FMV_D_X 0xfff0707f
|
|
|
|
|
+#define MATCH_FMV_H_X 0xf4000053
|
|
|
|
|
+#define MASK_FMV_H_X 0xfff0707f
|
|
|
|
|
+#define MATCH_FMV_S_X 0xf0000053
|
|
|
|
|
+#define MASK_FMV_S_X 0xfff0707f
|
|
|
|
|
+#define MATCH_FMV_X_D 0xe2000053
|
|
|
|
|
+#define MASK_FMV_X_D 0xfff0707f
|
|
|
|
|
+#define MATCH_FMV_X_H 0xe4000053
|
|
|
|
|
+#define MASK_FMV_X_H 0xfff0707f
|
|
|
|
|
+#define MATCH_FMV_X_S 0xe0000053
|
|
|
|
|
+#define MASK_FMV_X_S 0xfff0707f
|
|
|
|
|
+#define MATCH_FNMADD_D 0x200004f
|
|
|
|
|
+#define MASK_FNMADD_D 0x600007f
|
|
|
|
|
+#define MATCH_FNMADD_H 0x400004f
|
|
|
|
|
+#define MASK_FNMADD_H 0x600007f
|
|
|
|
|
+#define MATCH_FNMADD_S 0x4f
|
|
|
|
|
+#define MASK_FNMADD_S 0x600007f
|
|
|
|
|
+#define MATCH_FNMSUB_D 0x200004b
|
|
|
|
|
+#define MASK_FNMSUB_D 0x600007f
|
|
|
|
|
+#define MATCH_FNMSUB_H 0x400004b
|
|
|
|
|
+#define MASK_FNMSUB_H 0x600007f
|
|
|
|
|
+#define MATCH_FNMSUB_S 0x4b
|
|
|
|
|
+#define MASK_FNMSUB_S 0x600007f
|
|
|
|
|
+#define MATCH_FRCSR 0x302073
|
|
|
|
|
+#define MASK_FRCSR 0xfffff07f
|
|
|
|
|
+#define MATCH_FRFLAGS 0x102073
|
|
|
|
|
+#define MASK_FRFLAGS 0xfffff07f
|
|
|
|
|
+#define MATCH_FRRM 0x202073
|
|
|
|
|
+#define MASK_FRRM 0xfffff07f
|
|
|
|
|
+#define MATCH_FSCSR 0x301073
|
|
|
|
|
+#define MASK_FSCSR 0xfff0707f
|
|
|
|
|
+#define MATCH_FSD 0x3027
|
|
|
|
|
+#define MASK_FSD 0x707f
|
|
|
|
|
+#define MATCH_FSFLAGS 0x101073
|
|
|
|
|
+#define MASK_FSFLAGS 0xfff0707f
|
|
|
|
|
+#define MATCH_FSFLAGSI 0x105073
|
|
|
|
|
+#define MASK_FSFLAGSI 0xfff0707f
|
|
|
|
|
+#define MATCH_FSGNJ_D 0x22000053
|
|
|
|
|
+#define MASK_FSGNJ_D 0xfe00707f
|
|
|
|
|
+#define MATCH_FSGNJ_H 0x2c000053
|
|
|
|
|
+#define MASK_FSGNJ_H 0xfe00707f
|
|
|
|
|
+#define MATCH_FSGNJ_S 0x20000053
|
|
|
|
|
+#define MASK_FSGNJ_S 0xfe00707f
|
|
|
|
|
+#define MATCH_FSGNJN_D 0x22001053
|
|
|
|
|
+#define MASK_FSGNJN_D 0xfe00707f
|
|
|
|
|
+#define MATCH_FSGNJN_H 0x34000053
|
|
|
|
|
+#define MASK_FSGNJN_H 0xfe00707f
|
|
|
|
|
+#define MATCH_FSGNJN_S 0x20001053
|
|
|
|
|
+#define MASK_FSGNJN_S 0xfe00707f
|
|
|
|
|
+#define MATCH_FSGNJX_D 0x22002053
|
|
|
|
|
+#define MASK_FSGNJX_D 0xfe00707f
|
|
|
|
|
+#define MATCH_FSGNJX_H 0x3c000053
|
|
|
|
|
+#define MASK_FSGNJX_H 0xfe00707f
|
|
|
|
|
+#define MATCH_FSGNJX_S 0x20002053
|
|
|
|
|
+#define MASK_FSGNJX_S 0xfe00707f
|
|
|
|
|
+#define MATCH_FSH 0x1027
|
|
|
|
|
+#define MASK_FSH 0x707f
|
|
|
|
|
+#define MATCH_FSQRT_D 0x5a000053
|
|
|
|
|
+#define MASK_FSQRT_D 0xfff0007f
|
|
|
|
|
+#define MATCH_FSQRT_H 0x24000053
|
|
|
|
|
+#define MASK_FSQRT_H 0xfff0007f
|
|
|
|
|
+#define MATCH_FSQRT_S 0x58000053
|
|
|
|
|
+#define MASK_FSQRT_S 0xfff0007f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_FSRM 0x201073
|
|
|
|
|
+#define MASK_FSRM 0xfff0707f
|
|
|
|
|
+#define MATCH_FSRMI 0x205073
|
|
|
|
|
+#define MASK_FSRMI 0xfff0707f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_FSUB_D 0xa000053
|
|
|
|
|
+#define MASK_FSUB_D 0xfe00007f
|
|
|
|
|
+#define MATCH_FSUB_H 0xc000053
|
|
|
|
|
+#define MASK_FSUB_H 0xfe00007f
|
|
|
|
|
+#define MATCH_FSUB_S 0x8000053
|
|
|
|
|
+#define MASK_FSUB_S 0xfe00007f
|
|
|
|
|
+#define MATCH_FSW 0x2027
|
|
|
|
|
+#define MASK_FSW 0x707f
|
|
|
|
|
+#define MATCH_HRTS 0x20500073
|
|
|
|
|
+#define MASK_HRTS 0xffffffff
|
|
|
|
|
+#define MATCH_JAL 0x6f
|
|
|
|
|
+#define MASK_JAL 0x7f
|
|
|
|
|
+#define MATCH_JALR 0x67
|
|
|
|
|
+#define MASK_JALR 0x707f
|
|
|
|
|
+#define MATCH_LB 0x3
|
|
|
|
|
+#define MASK_LB 0x707f
|
|
|
|
|
+#define MATCH_LBU 0x4003
|
|
|
|
|
+#define MASK_LBU 0x707f
|
|
|
|
|
+#define MATCH_LD 0x3003
|
|
|
|
|
+#define MASK_LD 0x707f
|
|
|
|
|
+#define MATCH_LH 0x1003
|
|
|
|
|
+#define MASK_LH 0x707f
|
|
|
|
|
+#define MATCH_LHU 0x5003
|
|
|
|
|
+#define MASK_LHU 0x707f
|
|
|
|
|
+#define MATCH_LR_D 0x1000302f
|
|
|
|
|
+#define MASK_LR_D 0xf9f0707f
|
|
|
|
|
+#define MATCH_LR_W 0x1000202f
|
|
|
|
|
+#define MASK_LR_W 0xf9f0707f
|
|
|
|
|
+#define MATCH_LUI 0x37
|
|
|
|
|
+#define MASK_LUI 0x7f
|
|
|
|
|
+#define MATCH_LW 0x2003
|
|
|
|
|
+#define MASK_LW 0x707f
|
|
|
|
|
+#define MATCH_LWU 0x6003
|
|
|
|
|
+#define MASK_LWU 0x707f
|
|
|
|
|
+#define MATCH_MOVN 0x2007077
|
|
|
|
|
+#define MASK_MOVN 0xfe00707f
|
|
|
|
|
+#define MATCH_MOVZ 0x7077
|
|
|
|
|
+#define MASK_MOVZ 0xfe00707f
|
|
|
|
|
+#define MATCH_MRTH 0x30600073
|
|
|
|
|
+#define MASK_MRTH 0xffffffff
|
|
|
|
|
+#define MATCH_MRTS 0x30500073
|
|
|
|
|
+#define MASK_MRTS 0xffffffff
|
|
|
|
|
+#define MATCH_MUL 0x2000033
|
|
|
|
|
+#define MASK_MUL 0xfe00707f
|
|
|
|
|
+#define MATCH_MULH 0x2001033
|
|
|
|
|
+#define MASK_MULH 0xfe00707f
|
|
|
|
|
+#define MATCH_MULHSU 0x2002033
|
|
|
|
|
+#define MASK_MULHSU 0xfe00707f
|
|
|
|
|
+#define MATCH_MULHU 0x2003033
|
|
|
|
|
+#define MASK_MULHU 0xfe00707f
|
|
|
|
|
+#define MATCH_MULW 0x200003b
|
|
|
|
|
+#define MASK_MULW 0xfe00707f
|
|
|
|
|
+#define MATCH_OR 0x6033
|
|
|
|
|
+#define MASK_OR 0xfe00707f
|
|
|
|
|
+#define MATCH_ORI 0x6013
|
|
|
|
|
+#define MASK_ORI 0x707f
|
|
|
|
|
+#define MATCH_RDCYCLE 0xc0002073
|
|
|
|
|
+#define MASK_RDCYCLE 0xfffff07f
|
|
|
|
|
+#define MATCH_RDCYCLEH 0xc8002073
|
|
|
|
|
+#define MASK_RDCYCLEH 0xfffff07f
|
|
|
|
|
+#define MATCH_RDINSTRET 0xc0202073
|
|
|
|
|
+#define MASK_RDINSTRET 0xfffff07f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_RDINSTRETH 0xc8202073
|
|
|
|
|
+#define MASK_RDINSTRETH 0xfffff07f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_RDTIME 0xc0102073
|
|
|
|
|
+#define MASK_RDTIME 0xfffff07f
|
|
|
|
|
+#define MATCH_RDTIMEH 0xc8102073
|
|
|
|
|
+#define MASK_RDTIMEH 0xfffff07f
|
|
|
|
|
+#define MATCH_REM 0x2006033
|
|
|
|
|
+#define MASK_REM 0xfe00707f
|
|
|
|
|
+#define MATCH_REMU 0x2007033
|
|
|
|
|
+#define MASK_REMU 0xfe00707f
|
|
|
|
|
+#define MATCH_REMUW 0x200703b
|
|
|
|
|
+#define MASK_REMUW 0xfe00707f
|
|
|
|
|
+#define MATCH_REMW 0x200603b
|
|
|
|
|
+#define MASK_REMW 0xfe00707f
|
|
|
|
|
+#define MATCH_SB 0x23
|
|
|
|
|
+#define MASK_SB 0x707f
|
|
|
|
|
+#define MATCH_SBREAK 0x100073
|
|
|
|
|
+#define MASK_SBREAK 0xffffffff
|
|
|
|
|
+#define MATCH_SC_D 0x1800302f
|
|
|
|
|
+#define MASK_SC_D 0xf800707f
|
|
|
|
|
+#define MATCH_SC_W 0x1800202f
|
|
|
|
|
+#define MASK_SC_W 0xf800707f
|
|
|
|
|
+#define MATCH_SCALL 0x73
|
|
|
|
|
+#define MASK_SCALL 0xffffffff
|
|
|
|
|
+#define MATCH_SD 0x3023
|
|
|
|
|
+#define MASK_SD 0x707f
|
|
|
|
|
+#define MATCH_SFENCE_VM 0x10100073
|
|
|
|
|
+#define MASK_SFENCE_VM 0xfff07fff
|
|
|
|
|
+#define MATCH_SH 0x1023
|
|
|
|
|
+#define MASK_SH 0x707f
|
|
|
|
|
+#define MATCH_SLL 0x1033
|
|
|
|
|
+#define MASK_SLL 0xfe00707f
|
|
|
|
|
+#define MATCH_SLLI 0x1013
|
|
|
|
|
+#define MASK_SLLI 0xfc00707f
|
|
|
|
|
+#define MATCH_SLLI_RV32 0x1013
|
|
|
|
|
+#define MASK_SLLI_RV32 0xfe00707f
|
|
|
|
|
+#define MATCH_SLLIW 0x101b
|
|
|
|
|
+#define MASK_SLLIW 0xfe00707f
|
|
|
|
|
+#define MATCH_SLLW 0x103b
|
|
|
|
|
+#define MASK_SLLW 0xfe00707f
|
|
|
|
|
+#define MATCH_SLT 0x2033
|
|
|
|
|
+#define MASK_SLT 0xfe00707f
|
|
|
|
|
+#define MATCH_SLTI 0x2013
|
|
|
|
|
+#define MASK_SLTI 0x707f
|
|
|
|
|
+#define MATCH_SLTIU 0x3013
|
|
|
|
|
+#define MASK_SLTIU 0x707f
|
|
|
|
|
+#define MATCH_SLTU 0x3033
|
|
|
|
|
+#define MASK_SLTU 0xfe00707f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_SRA 0x40005033
|
|
|
|
|
+#define MASK_SRA 0xfe00707f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_SRAI 0x40005013
|
|
|
|
|
+#define MASK_SRAI 0xfc00707f
|
|
|
|
|
+#define MATCH_SRAI_RV32 0x40005013
|
|
|
|
|
+#define MASK_SRAI_RV32 0xfe00707f
|
|
|
|
|
+#define MATCH_SRAIW 0x4000501b
|
|
|
|
|
+#define MASK_SRAIW 0xfe00707f
|
|
|
|
|
+#define MATCH_SRAW 0x4000503b
|
|
|
|
|
+#define MASK_SRAW 0xfe00707f
|
|
|
|
|
+#define MATCH_SRET 0x10000073
|
|
|
|
|
+#define MASK_SRET 0xffffffff
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_SRL 0x5033
|
|
|
|
|
+#define MASK_SRL 0xfe00707f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_SRLI 0x5013
|
|
|
|
|
+#define MASK_SRLI 0xfc00707f
|
|
|
|
|
+#define MATCH_SRLI_RV32 0x5013
|
|
|
|
|
+#define MASK_SRLI_RV32 0xfe00707f
|
|
|
|
|
+#define MATCH_SRLIW 0x501b
|
|
|
|
|
+#define MASK_SRLIW 0xfe00707f
|
|
|
|
|
+#define MATCH_SRLW 0x503b
|
|
|
|
|
+#define MASK_SRLW 0xfe00707f
|
|
|
|
|
+#define MATCH_STOP 0x5077
|
|
|
|
|
+#define MASK_STOP 0xffffffff
|
|
|
|
|
+#define MATCH_SUB 0x40000033
|
|
|
|
|
+#define MASK_SUB 0xfe00707f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_SUBW 0x4000003b
|
|
|
|
|
+#define MASK_SUBW 0xfe00707f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_SW 0x2023
|
|
|
|
|
+#define MASK_SW 0x707f
|
|
|
|
|
+#define MATCH_UTIDX 0x6077
|
|
|
|
|
+#define MASK_UTIDX 0xfffff07f
|
|
|
|
|
+#define MATCH_VENQCMD 0xa00302b
|
|
|
|
|
+#define MASK_VENQCMD 0xfe007fff
|
|
|
|
|
+#define MATCH_VENQCNT 0x1000302b
|
|
|
|
|
+#define MASK_VENQCNT 0xfe007fff
|
|
|
|
|
+#define MATCH_VENQIMM1 0xc00302b
|
|
|
|
|
+#define MASK_VENQIMM1 0xfe007fff
|
|
|
|
|
+#define MATCH_VENQIMM2 0xe00302b
|
|
|
|
|
+#define MASK_VENQIMM2 0xfe007fff
|
|
|
|
|
+#define MATCH_VF 0x10202b
|
|
|
|
|
+#define MASK_VF 0x1f0707f
|
|
|
|
|
+#define MATCH_VFLD 0x1600205b
|
|
|
|
|
+#define MASK_VFLD 0xfff0707f
|
|
|
|
|
+#define MATCH_VFLSEGD 0x1600205b
|
|
|
|
|
+#define MASK_VFLSEGD 0x1ff0707f
|
|
|
|
|
+#define MATCH_VFLSEGSTD 0x1600305b
|
|
|
|
|
+#define MASK_VFLSEGSTD 0x1e00707f
|
|
|
|
|
+#define MATCH_VFLSEGSTW 0x1400305b
|
|
|
|
|
+#define MASK_VFLSEGSTW 0x1e00707f
|
|
|
|
|
+#define MATCH_VFLSEGW 0x1400205b
|
|
|
|
|
+#define MASK_VFLSEGW 0x1ff0707f
|
|
|
|
|
+#define MATCH_VFLSTD 0x1600305b
|
|
|
|
|
+#define MASK_VFLSTD 0xfe00707f
|
|
|
|
|
+#define MATCH_VFLSTW 0x1400305b
|
|
|
|
|
+#define MASK_VFLSTW 0xfe00707f
|
|
|
|
|
+#define MATCH_VFLW 0x1400205b
|
|
|
|
|
+#define MASK_VFLW 0xfff0707f
|
|
|
|
|
+#define MATCH_VFMSV_D 0x1200202b
|
|
|
|
|
+#define MASK_VFMSV_D 0xfff0707f
|
|
|
|
|
+#define MATCH_VFMSV_S 0x1000202b
|
|
|
|
|
+#define MASK_VFMSV_S 0xfff0707f
|
|
|
|
|
+#define MATCH_VFMVV 0x1000002b
|
|
|
|
|
+#define MASK_VFMVV 0xfff0707f
|
|
|
|
|
+#define MATCH_VFSD 0x1600207b
|
|
|
|
|
+#define MASK_VFSD 0xfff0707f
|
|
|
|
|
+#define MATCH_VFSSEGD 0x1600207b
|
|
|
|
|
+#define MASK_VFSSEGD 0x1ff0707f
|
|
|
|
|
+#define MATCH_VFSSEGSTD 0x1600307b
|
|
|
|
|
+#define MASK_VFSSEGSTD 0x1e00707f
|
|
|
|
|
+#define MATCH_VFSSEGSTW 0x1400307b
|
|
|
|
|
+#define MASK_VFSSEGSTW 0x1e00707f
|
|
|
|
|
+#define MATCH_VFSSEGW 0x1400207b
|
|
|
|
|
+#define MASK_VFSSEGW 0x1ff0707f
|
|
|
|
|
+#define MATCH_VFSSTD 0x1600307b
|
|
|
|
|
+#define MASK_VFSSTD 0xfe00707f
|
|
|
|
|
+#define MATCH_VFSSTW 0x1400307b
|
|
|
|
|
+#define MASK_VFSSTW 0xfe00707f
|
|
|
|
|
+#define MATCH_VFSW 0x1400207b
|
|
|
|
|
+#define MASK_VFSW 0xfff0707f
|
|
|
|
|
+#define MATCH_VGETCFG 0x400b
|
|
|
|
|
+#define MASK_VGETCFG 0xfffff07f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_VGETVL 0x200400b
|
|
|
|
|
+#define MASK_VGETVL 0xfffff07f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_VLB 0x205b
|
|
|
|
|
+#define MASK_VLB 0xfff0707f
|
|
|
|
|
+#define MATCH_VLBU 0x800205b
|
|
|
|
|
+#define MASK_VLBU 0xfff0707f
|
|
|
|
|
+#define MATCH_VLD 0x600205b
|
|
|
|
|
+#define MASK_VLD 0xfff0707f
|
|
|
|
|
+#define MATCH_VLH 0x200205b
|
|
|
|
|
+#define MASK_VLH 0xfff0707f
|
|
|
|
|
+#define MATCH_VLHU 0xa00205b
|
|
|
|
|
+#define MASK_VLHU 0xfff0707f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_VLSEGB 0x205b
|
|
|
|
|
+#define MASK_VLSEGB 0x1ff0707f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_VLSEGBU 0x800205b
|
|
|
|
|
+#define MASK_VLSEGBU 0x1ff0707f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_VLSEGD 0x600205b
|
|
|
|
|
+#define MASK_VLSEGD 0x1ff0707f
|
|
|
|
|
+#define MATCH_VLSEGH 0x200205b
|
|
|
|
|
+#define MASK_VLSEGH 0x1ff0707f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_VLSEGHU 0xa00205b
|
|
|
|
|
+#define MASK_VLSEGHU 0x1ff0707f
|
|
|
|
|
+#define MATCH_VLSEGSTB 0x305b
|
|
|
|
|
+#define MASK_VLSEGSTB 0x1e00707f
|
|
|
|
|
+#define MATCH_VLSEGSTBU 0x800305b
|
|
|
|
|
+#define MASK_VLSEGSTBU 0x1e00707f
|
|
|
|
|
+#define MATCH_VLSEGSTD 0x600305b
|
|
|
|
|
+#define MASK_VLSEGSTD 0x1e00707f
|
|
|
|
|
+#define MATCH_VLSEGSTH 0x200305b
|
|
|
|
|
+#define MASK_VLSEGSTH 0x1e00707f
|
|
|
|
|
+#define MATCH_VLSEGSTHU 0xa00305b
|
|
|
|
|
+#define MASK_VLSEGSTHU 0x1e00707f
|
|
|
|
|
+#define MATCH_VLSEGSTW 0x400305b
|
|
|
|
|
+#define MASK_VLSEGSTW 0x1e00707f
|
|
|
|
|
+#define MATCH_VLSEGSTWU 0xc00305b
|
|
|
|
|
+#define MASK_VLSEGSTWU 0x1e00707f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define MATCH_VLSEGW 0x400205b
|
|
|
|
|
+#define MASK_VLSEGW 0x1ff0707f
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define MATCH_VLSEGWU 0xc00205b
|
|
|
|
|
+#define MASK_VLSEGWU 0x1ff0707f
|
|
|
|
|
+#define MATCH_VLSTB 0x305b
|
|
|
|
|
+#define MASK_VLSTB 0xfe00707f
|
|
|
|
|
+#define MATCH_VLSTBU 0x800305b
|
|
|
|
|
+#define MASK_VLSTBU 0xfe00707f
|
|
|
|
|
+#define MATCH_VLSTD 0x600305b
|
|
|
|
|
+#define MASK_VLSTD 0xfe00707f
|
|
|
|
|
+#define MATCH_VLSTH 0x200305b
|
|
|
|
|
+#define MASK_VLSTH 0xfe00707f
|
|
|
|
|
+#define MATCH_VLSTHU 0xa00305b
|
|
|
|
|
+#define MASK_VLSTHU 0xfe00707f
|
|
|
|
|
+#define MATCH_VLSTW 0x400305b
|
|
|
|
|
+#define MASK_VLSTW 0xfe00707f
|
|
|
|
|
+#define MATCH_VLSTWU 0xc00305b
|
|
|
|
|
+#define MASK_VLSTWU 0xfe00707f
|
|
|
|
|
+#define MATCH_VLW 0x400205b
|
|
|
|
|
+#define MASK_VLW 0xfff0707f
|
|
|
|
|
+#define MATCH_VLWU 0xc00205b
|
|
|
|
|
+#define MASK_VLWU 0xfff0707f
|
|
|
|
|
+#define MATCH_VMSV 0x200202b
|
|
|
|
|
+#define MASK_VMSV 0xfff0707f
|
|
|
|
|
+#define MATCH_VMVV 0x200002b
|
|
|
|
|
+#define MASK_VMVV 0xfff0707f
|
|
|
|
|
+#define MATCH_VSB 0x207b
|
|
|
|
|
+#define MASK_VSB 0xfff0707f
|
|
|
|
|
+#define MATCH_VSD 0x600207b
|
|
|
|
|
+#define MASK_VSD 0xfff0707f
|
|
|
|
|
+#define MATCH_VSETCFG 0x200b
|
|
|
|
|
+#define MASK_VSETCFG 0x7fff
|
|
|
|
|
+#define MATCH_VSETVL 0x600b
|
|
|
|
|
+#define MASK_VSETVL 0xfff0707f
|
|
|
|
|
+#define MATCH_VSH 0x200207b
|
|
|
|
|
+#define MASK_VSH 0xfff0707f
|
|
|
|
|
+#define MATCH_VSSEGB 0x207b
|
|
|
|
|
+#define MASK_VSSEGB 0x1ff0707f
|
|
|
|
|
+#define MATCH_VSSEGD 0x600207b
|
|
|
|
|
+#define MASK_VSSEGD 0x1ff0707f
|
|
|
|
|
+#define MATCH_VSSEGH 0x200207b
|
|
|
|
|
+#define MASK_VSSEGH 0x1ff0707f
|
|
|
|
|
+#define MATCH_VSSEGSTB 0x307b
|
|
|
|
|
+#define MASK_VSSEGSTB 0x1e00707f
|
|
|
|
|
+#define MATCH_VSSEGSTD 0x600307b
|
|
|
|
|
+#define MASK_VSSEGSTD 0x1e00707f
|
|
|
|
|
+#define MATCH_VSSEGSTH 0x200307b
|
|
|
|
|
+#define MASK_VSSEGSTH 0x1e00707f
|
|
|
|
|
+#define MATCH_VSSEGSTW 0x400307b
|
|
|
|
|
+#define MASK_VSSEGSTW 0x1e00707f
|
|
|
|
|
+#define MATCH_VSSEGW 0x400207b
|
|
|
|
|
+#define MASK_VSSEGW 0x1ff0707f
|
|
|
|
|
+#define MATCH_VSSTB 0x307b
|
|
|
|
|
+#define MASK_VSSTB 0xfe00707f
|
|
|
|
|
+#define MATCH_VSSTD 0x600307b
|
|
|
|
|
+#define MASK_VSSTD 0xfe00707f
|
|
|
|
|
+#define MATCH_VSSTH 0x200307b
|
|
|
|
|
+#define MASK_VSSTH 0xfe00707f
|
|
|
|
|
+#define MATCH_VSSTW 0x400307b
|
|
|
|
|
+#define MASK_VSSTW 0xfe00707f
|
|
|
|
|
+#define MATCH_VSW 0x400207b
|
|
|
|
|
+#define MASK_VSW 0xfff0707f
|
|
|
|
|
+#define MATCH_VXCPTAUX 0x200402b
|
|
|
|
|
+#define MASK_VXCPTAUX 0xfffff07f
|
|
|
|
|
+#define MATCH_VXCPTCAUSE 0x402b
|
|
|
|
|
+#define MASK_VXCPTCAUSE 0xfffff07f
|
|
|
|
|
+#define MATCH_VXCPTEVAC 0x600302b
|
|
|
|
|
+#define MASK_VXCPTEVAC 0xfff07fff
|
|
|
|
|
+#define MATCH_VXCPTHOLD 0x800302b
|
|
|
|
|
+#define MASK_VXCPTHOLD 0xfff07fff
|
|
|
|
|
+#define MATCH_VXCPTKILL 0x400302b
|
|
|
|
|
+#define MASK_VXCPTKILL 0xffffffff
|
|
|
|
|
+#define MATCH_VXCPTRESTORE 0x200302b
|
|
|
|
|
+#define MASK_VXCPTRESTORE 0xfff07fff
|
|
|
|
|
+#define MATCH_VXCPTSAVE 0x302b
|
|
|
|
|
+#define MASK_VXCPTSAVE 0xfff07fff
|
|
|
|
|
+#define MATCH_WFI 0x10200073
|
|
|
|
|
+#define MASK_WFI 0xffffffff
|
|
|
|
|
+#define MATCH_XOR 0x4033
|
|
|
|
|
+#define MASK_XOR 0xfe00707f
|
|
|
|
|
+#define MATCH_XORI 0x4013
|
|
|
|
|
+#define MASK_XORI 0x707f
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define CSR_FFLAGS 0x1
|
|
|
|
|
+#define CSR_FRM 0x2
|
|
|
|
|
+#define CSR_FCSR 0x3
|
|
|
|
|
+#define CSR_CYCLE 0xc00
|
|
|
|
|
+#define CSR_TIME 0xc01
|
|
|
|
|
+#define CSR_INSTRET 0xc02
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define CSR_STATS 0xc0
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define CSR_UARCH0 0xcc0
|
|
|
|
|
+#define CSR_UARCH1 0xcc1
|
|
|
|
|
+#define CSR_UARCH2 0xcc2
|
|
|
|
|
+#define CSR_UARCH3 0xcc3
|
|
|
|
|
+#define CSR_UARCH4 0xcc4
|
|
|
|
|
+#define CSR_UARCH5 0xcc5
|
|
|
|
|
+#define CSR_UARCH6 0xcc6
|
|
|
|
|
+#define CSR_UARCH7 0xcc7
|
|
|
|
|
+#define CSR_UARCH8 0xcc8
|
|
|
|
|
+#define CSR_UARCH9 0xcc9
|
|
|
|
|
+#define CSR_UARCH10 0xcca
|
|
|
|
|
+#define CSR_UARCH11 0xccb
|
|
|
|
|
+#define CSR_UARCH12 0xccc
|
|
|
|
|
+#define CSR_UARCH13 0xccd
|
|
|
|
|
+#define CSR_UARCH14 0xcce
|
|
|
|
|
+#define CSR_UARCH15 0xccf
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define CSR_SSTATUS 0x100
|
|
|
|
|
+#define CSR_STVEC 0x101
|
|
|
|
|
+#define CSR_SIE 0x104
|
|
|
|
|
+#define CSR_SSCRATCH 0x140
|
|
|
|
|
+#define CSR_SEPC 0x141
|
|
|
|
|
+#define CSR_SIP 0x144
|
|
|
|
|
+#define CSR_SPTBR 0x180
|
|
|
|
|
+#define CSR_SASID 0x181
|
|
|
|
|
+#define CSR_CYCLEW 0x900
|
|
|
|
|
+#define CSR_TIMEW 0x901
|
|
|
|
|
+#define CSR_INSTRETW 0x902
|
|
|
|
|
+#define CSR_STIME 0xd01
|
|
|
|
|
+#define CSR_SCAUSE 0xd42
|
|
|
|
|
+#define CSR_SBADADDR 0xd43
|
|
|
|
|
+#define CSR_STIMEW 0xa01
|
|
|
|
|
+#define CSR_MSTATUS 0x300
|
|
|
|
|
+#define CSR_MTVEC 0x301
|
|
|
|
|
+#define CSR_MTDELEG 0x302
|
|
|
|
|
+#define CSR_MIE 0x304
|
|
|
|
|
+#define CSR_MTIMECMP 0x321
|
|
|
|
|
+#define CSR_MSCRATCH 0x340
|
|
|
|
|
+#define CSR_MEPC 0x341
|
|
|
|
|
+#define CSR_MCAUSE 0x342
|
|
|
|
|
+#define CSR_MBADADDR 0x343
|
|
|
|
|
+#define CSR_MIP 0x344
|
|
|
|
|
+#define CSR_MTIME 0x701
|
|
|
|
|
+#define CSR_MCPUID 0xf00
|
|
|
|
|
+#define CSR_MIMPID 0xf01
|
|
|
|
|
+#define CSR_MHARTID 0xf10
|
|
|
|
|
+#define CSR_MTOHOST 0x780
|
|
|
|
|
+#define CSR_MFROMHOST 0x781
|
|
|
|
|
+#define CSR_MRESET 0x782
|
|
|
|
|
+#define CSR_SEND_IPI 0x783
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define CSR_CYCLEH 0xc80
|
|
|
|
|
+#define CSR_TIMEH 0xc81
|
|
|
|
|
+#define CSR_INSTRETH 0xc82
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define CSR_CYCLEHW 0x980
|
|
|
|
|
+#define CSR_TIMEHW 0x981
|
|
|
|
|
+#define CSR_INSTRETHW 0x982
|
|
|
|
|
+#define CSR_STIMEH 0xd81
|
|
|
|
|
+#define CSR_STIMEHW 0xa81
|
|
|
|
|
+#define CSR_MTIMECMPH 0x361
|
|
|
|
|
+#define CSR_MTIMEH 0x741
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#define CAUSE_MISALIGNED_FETCH 0x0
|
|
|
|
|
+#define CAUSE_FAULT_FETCH 0x1
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+#define CAUSE_ILLEGAL_INSTRUCTION 0x2
|
|
|
|
|
+#define CAUSE_BREAKPOINT 0x3
|
|
|
|
|
+#define CAUSE_MISALIGNED_LOAD 0x4
|
|
|
|
|
+#define CAUSE_FAULT_LOAD 0x5
|
|
|
|
|
+#define CAUSE_MISALIGNED_STORE 0x6
|
|
|
|
|
+#define CAUSE_FAULT_STORE 0x7
|
|
|
|
|
+#define CAUSE_USER_ECALL 0x8
|
|
|
|
|
+#define CAUSE_SUPERVISOR_ECALL 0x9
|
|
|
|
|
+#define CAUSE_HYPERVISOR_ECALL 0xa
|
|
|
|
|
+#define CAUSE_MACHINE_ECALL 0xb
|
|
|
|
|
+#endif
|
|
|
|
|
+#ifdef DECLARE_INSN
|
|
|
|
|
+DECLARE_INSN(add, MATCH_ADD, MASK_ADD)
|
|
|
|
|
+DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI)
|
|
|
|
|
+DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW)
|
|
|
|
|
+DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW)
|
|
|
|
|
+DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D)
|
|
|
|
|
+DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W)
|
|
|
|
|
+DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D)
|
|
|
|
|
+DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W)
|
|
|
|
|
+DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D)
|
|
|
|
|
+DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W)
|
|
|
|
|
+DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D)
|
|
|
|
|
+DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W)
|
|
|
|
|
+DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D)
|
|
|
|
|
+DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W)
|
|
|
|
|
+DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D)
|
|
|
|
|
+DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W)
|
|
|
|
|
+DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D)
|
|
|
|
|
+DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W)
|
|
|
|
|
+DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D)
|
|
|
|
|
+DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W)
|
|
|
|
|
+DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D)
|
|
|
|
|
+DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W)
|
|
|
|
|
+DECLARE_INSN(and, MATCH_AND, MASK_AND)
|
|
|
|
|
+DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI)
|
|
|
|
|
+DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC)
|
|
|
|
|
+DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
|
|
|
|
|
+DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
|
|
|
|
|
+DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
|
|
|
|
|
+DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
|
|
|
|
|
+DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
|
|
|
|
|
+DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
|
|
|
|
|
+DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD)
|
|
|
|
|
+DECLARE_INSN(c_add3, MATCH_C_ADD3, MASK_C_ADD3)
|
|
|
|
|
+DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI)
|
|
|
|
|
+DECLARE_INSN(c_addi16sp, MATCH_C_ADDI16SP, MASK_C_ADDI16SP)
|
|
|
|
|
+DECLARE_INSN(c_addi4spn, MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN)
|
|
|
|
|
+DECLARE_INSN(c_addin, MATCH_C_ADDIN, MASK_C_ADDIN)
|
|
|
|
|
+DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW)
|
|
|
|
|
+DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW)
|
|
|
|
|
+DECLARE_INSN(c_and3, MATCH_C_AND3, MASK_C_AND3)
|
|
|
|
|
+DECLARE_INSN(c_andi, MATCH_C_ANDI, MASK_C_ANDI)
|
|
|
|
|
+DECLARE_INSN(c_andin, MATCH_C_ANDIN, MASK_C_ANDIN)
|
|
|
|
|
+DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
|
|
|
|
|
+DECLARE_INSN(c_bgez, MATCH_C_BGEZ, MASK_C_BGEZ)
|
|
|
|
|
+DECLARE_INSN(c_bltz, MATCH_C_BLTZ, MASK_C_BLTZ)
|
|
|
|
|
+DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
|
|
|
|
|
+DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK)
|
|
|
|
|
+DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
|
|
|
|
|
+DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL)
|
|
|
|
|
+DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
|
|
|
|
|
+DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP)
|
|
|
|
|
+DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI)
|
|
|
|
|
+DECLARE_INSN(c_lui, MATCH_C_LUI, MASK_C_LUI)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW)
|
|
|
|
|
+DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(c_mv, MATCH_C_MV, MASK_C_MV)
|
|
|
|
|
+DECLARE_INSN(c_or3, MATCH_C_OR3, MASK_C_OR3)
|
|
|
|
|
+DECLARE_INSN(c_orin, MATCH_C_ORIN, MASK_C_ORIN)
|
|
|
|
|
+DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD)
|
|
|
|
|
+DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP)
|
|
|
|
|
+DECLARE_INSN(c_sll, MATCH_C_SLL, MASK_C_SLL)
|
|
|
|
|
+DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI)
|
|
|
|
|
+DECLARE_INSN(c_slliw, MATCH_C_SLLIW, MASK_C_SLLIW)
|
|
|
|
|
+DECLARE_INSN(c_sllr, MATCH_C_SLLR, MASK_C_SLLR)
|
|
|
|
|
+DECLARE_INSN(c_slt, MATCH_C_SLT, MASK_C_SLT)
|
|
|
|
|
+DECLARE_INSN(c_sltr, MATCH_C_SLTR, MASK_C_SLTR)
|
|
|
|
|
+DECLARE_INSN(c_sltu, MATCH_C_SLTU, MASK_C_SLTU)
|
|
|
|
|
+DECLARE_INSN(c_sltur, MATCH_C_SLTUR, MASK_C_SLTUR)
|
|
|
|
|
+DECLARE_INSN(c_sra, MATCH_C_SRA, MASK_C_SRA)
|
|
|
|
|
+DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI)
|
|
|
|
|
+DECLARE_INSN(c_srl, MATCH_C_SRL, MASK_C_SRL)
|
|
|
|
|
+DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI)
|
|
|
|
|
+DECLARE_INSN(c_srlr, MATCH_C_SRLR, MASK_C_SRLR)
|
|
|
|
|
+DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB)
|
|
|
|
|
+DECLARE_INSN(c_sub3, MATCH_C_SUB3, MASK_C_SUB3)
|
|
|
|
|
+DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(c_xor, MATCH_C_XOR, MASK_C_XOR)
|
|
|
|
|
+DECLARE_INSN(c_xorin, MATCH_C_XORIN, MASK_C_XORIN)
|
|
|
|
|
+DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC)
|
|
|
|
|
+DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI)
|
|
|
|
|
+DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS)
|
|
|
|
|
+DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI)
|
|
|
|
|
+DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW)
|
|
|
|
|
+DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI)
|
|
|
|
|
+DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0)
|
|
|
|
|
+DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD)
|
|
|
|
|
+DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2)
|
|
|
|
|
+DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD)
|
|
|
|
|
+DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1)
|
|
|
|
|
+DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2)
|
|
|
|
|
+DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1)
|
|
|
|
|
+DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD)
|
|
|
|
|
+DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1)
|
|
|
|
|
+DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2)
|
|
|
|
|
+DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1)
|
|
|
|
|
+DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD)
|
|
|
|
|
+DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1)
|
|
|
|
|
+DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2)
|
|
|
|
|
+DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1)
|
|
|
|
|
+DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2)
|
|
|
|
|
+DECLARE_INSN(div, MATCH_DIV, MASK_DIV)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW)
|
|
|
|
|
+DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW)
|
|
|
|
|
+DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK)
|
|
|
|
|
+DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL)
|
|
|
|
|
+DECLARE_INSN(eret, MATCH_ERET, MASK_ERET)
|
|
|
|
|
+DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D)
|
|
|
|
|
+DECLARE_INSN(fadd_h, MATCH_FADD_H, MASK_FADD_H)
|
|
|
|
|
+DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S)
|
|
|
|
|
+DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D)
|
|
|
|
|
+DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S)
|
|
|
|
|
+DECLARE_INSN(fcvt_d_h, MATCH_FCVT_D_H, MASK_FCVT_D_H)
|
|
|
|
|
+DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L)
|
|
|
|
|
+DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU)
|
|
|
|
|
+DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S)
|
|
|
|
|
+DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W)
|
|
|
|
|
+DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU)
|
|
|
|
|
+DECLARE_INSN(fcvt_h_d, MATCH_FCVT_H_D, MASK_FCVT_H_D)
|
|
|
|
|
+DECLARE_INSN(fcvt_h_l, MATCH_FCVT_H_L, MASK_FCVT_H_L)
|
|
|
|
|
+DECLARE_INSN(fcvt_h_lu, MATCH_FCVT_H_LU, MASK_FCVT_H_LU)
|
|
|
|
|
+DECLARE_INSN(fcvt_h_s, MATCH_FCVT_H_S, MASK_FCVT_H_S)
|
|
|
|
|
+DECLARE_INSN(fcvt_h_w, MATCH_FCVT_H_W, MASK_FCVT_H_W)
|
|
|
|
|
+DECLARE_INSN(fcvt_h_wu, MATCH_FCVT_H_WU, MASK_FCVT_H_WU)
|
|
|
|
|
+DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D)
|
|
|
|
|
+DECLARE_INSN(fcvt_l_h, MATCH_FCVT_L_H, MASK_FCVT_L_H)
|
|
|
|
|
+DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S)
|
|
|
|
|
+DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(fcvt_lu_h, MATCH_FCVT_LU_H, MASK_FCVT_LU_H)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D)
|
|
|
|
|
+DECLARE_INSN(fcvt_s_h, MATCH_FCVT_S_H, MASK_FCVT_S_H)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L)
|
|
|
|
|
+DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU)
|
|
|
|
|
+DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D)
|
|
|
|
|
+DECLARE_INSN(fcvt_w_h, MATCH_FCVT_W_H, MASK_FCVT_W_H)
|
|
|
|
|
+DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S)
|
|
|
|
|
+DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D)
|
|
|
|
|
+DECLARE_INSN(fcvt_wu_h, MATCH_FCVT_WU_H, MASK_FCVT_WU_H)
|
|
|
|
|
+DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S)
|
|
|
|
|
+DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D)
|
|
|
|
|
+DECLARE_INSN(fdiv_h, MATCH_FDIV_H, MASK_FDIV_H)
|
|
|
|
|
+DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S)
|
|
|
|
|
+DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE)
|
|
|
|
|
+DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I)
|
|
|
|
|
+DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D)
|
|
|
|
|
+DECLARE_INSN(feq_h, MATCH_FEQ_H, MASK_FEQ_H)
|
|
|
|
|
+DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S)
|
|
|
|
|
+DECLARE_INSN(fld, MATCH_FLD, MASK_FLD)
|
|
|
|
|
+DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D)
|
|
|
|
|
+DECLARE_INSN(fle_h, MATCH_FLE_H, MASK_FLE_H)
|
|
|
|
|
+DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S)
|
|
|
|
|
+DECLARE_INSN(flh, MATCH_FLH, MASK_FLH)
|
|
|
|
|
+DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D)
|
|
|
|
|
+DECLARE_INSN(flt_h, MATCH_FLT_H, MASK_FLT_H)
|
|
|
|
|
+DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S)
|
|
|
|
|
+DECLARE_INSN(flw, MATCH_FLW, MASK_FLW)
|
|
|
|
|
+DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D)
|
|
|
|
|
+DECLARE_INSN(fmadd_h, MATCH_FMADD_H, MASK_FMADD_H)
|
|
|
|
|
+DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S)
|
|
|
|
|
+DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D)
|
|
|
|
|
+DECLARE_INSN(fmax_h, MATCH_FMAX_H, MASK_FMAX_H)
|
|
|
|
|
+DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S)
|
|
|
|
|
+DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D)
|
|
|
|
|
+DECLARE_INSN(fmin_h, MATCH_FMIN_H, MASK_FMIN_H)
|
|
|
|
|
+DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S)
|
|
|
|
|
+DECLARE_INSN(fmovn, MATCH_FMOVN, MASK_FMOVN)
|
|
|
|
|
+DECLARE_INSN(fmovz, MATCH_FMOVZ, MASK_FMOVZ)
|
|
|
|
|
+DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D)
|
|
|
|
|
+DECLARE_INSN(fmsub_h, MATCH_FMSUB_H, MASK_FMSUB_H)
|
|
|
|
|
+DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D)
|
|
|
|
|
+DECLARE_INSN(fmul_h, MATCH_FMUL_H, MASK_FMUL_H)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S)
|
|
|
|
|
+DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X)
|
|
|
|
|
+DECLARE_INSN(fmv_h_x, MATCH_FMV_H_X, MASK_FMV_H_X)
|
|
|
|
|
+DECLARE_INSN(fmv_s_x, MATCH_FMV_S_X, MASK_FMV_S_X)
|
|
|
|
|
+DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D)
|
|
|
|
|
+DECLARE_INSN(fmv_x_h, MATCH_FMV_X_H, MASK_FMV_X_H)
|
|
|
|
|
+DECLARE_INSN(fmv_x_s, MATCH_FMV_X_S, MASK_FMV_X_S)
|
|
|
|
|
+DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D)
|
|
|
|
|
+DECLARE_INSN(fnmadd_h, MATCH_FNMADD_H, MASK_FNMADD_H)
|
|
|
|
|
+DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S)
|
|
|
|
|
+DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D)
|
|
|
|
|
+DECLARE_INSN(fnmsub_h, MATCH_FNMSUB_H, MASK_FNMSUB_H)
|
|
|
|
|
+DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S)
|
|
|
|
|
+DECLARE_INSN(frcsr, MATCH_FRCSR, MASK_FRCSR)
|
|
|
|
|
+DECLARE_INSN(frflags, MATCH_FRFLAGS, MASK_FRFLAGS)
|
|
|
|
|
+DECLARE_INSN(frrm, MATCH_FRRM, MASK_FRRM)
|
|
|
|
|
+DECLARE_INSN(fscsr, MATCH_FSCSR, MASK_FSCSR)
|
|
|
|
|
+DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD)
|
|
|
|
|
+DECLARE_INSN(fsflags, MATCH_FSFLAGS, MASK_FSFLAGS)
|
|
|
|
|
+DECLARE_INSN(fsflagsi, MATCH_FSFLAGSI, MASK_FSFLAGSI)
|
|
|
|
|
+DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D)
|
|
|
|
|
+DECLARE_INSN(fsgnj_h, MATCH_FSGNJ_H, MASK_FSGNJ_H)
|
|
|
|
|
+DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S)
|
|
|
|
|
+DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D)
|
|
|
|
|
+DECLARE_INSN(fsgnjn_h, MATCH_FSGNJN_H, MASK_FSGNJN_H)
|
|
|
|
|
+DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D)
|
|
|
|
|
+DECLARE_INSN(fsgnjx_h, MATCH_FSGNJX_H, MASK_FSGNJX_H)
|
|
|
|
|
+DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(fsh, MATCH_FSH, MASK_FSH)
|
|
|
|
|
+DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D)
|
|
|
|
|
+DECLARE_INSN(fsqrt_h, MATCH_FSQRT_H, MASK_FSQRT_H)
|
|
|
|
|
+DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S)
|
|
|
|
|
+DECLARE_INSN(fsrm, MATCH_FSRM, MASK_FSRM)
|
|
|
|
|
+DECLARE_INSN(fsrmi, MATCH_FSRMI, MASK_FSRMI)
|
|
|
|
|
+DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D)
|
|
|
|
|
+DECLARE_INSN(fsub_h, MATCH_FSUB_H, MASK_FSUB_H)
|
|
|
|
|
+DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S)
|
|
|
|
|
+DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW)
|
|
|
|
|
+DECLARE_INSN(hrts, MATCH_HRTS, MASK_HRTS)
|
|
|
|
|
+DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
|
|
|
|
|
+DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
|
|
|
|
|
+DECLARE_INSN(lb, MATCH_LB, MASK_LB)
|
|
|
|
|
+DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU)
|
|
|
|
|
+DECLARE_INSN(ld, MATCH_LD, MASK_LD)
|
|
|
|
|
+DECLARE_INSN(lh, MATCH_LH, MASK_LH)
|
|
|
|
|
+DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D)
|
|
|
|
|
+DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W)
|
|
|
|
|
+DECLARE_INSN(lui, MATCH_LUI, MASK_LUI)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(lw, MATCH_LW, MASK_LW)
|
|
|
|
|
+DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU)
|
|
|
|
|
+DECLARE_INSN(movn, MATCH_MOVN, MASK_MOVN)
|
|
|
|
|
+DECLARE_INSN(movz, MATCH_MOVZ, MASK_MOVZ)
|
|
|
|
|
+DECLARE_INSN(mrth, MATCH_MRTH, MASK_MRTH)
|
|
|
|
|
+DECLARE_INSN(mrts, MATCH_MRTS, MASK_MRTS)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(mul, MATCH_MUL, MASK_MUL)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH)
|
|
|
|
|
+DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU)
|
|
|
|
|
+DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU)
|
|
|
|
|
+DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW)
|
|
|
|
|
+DECLARE_INSN(or, MATCH_OR, MASK_OR)
|
|
|
|
|
+DECLARE_INSN(ori, MATCH_ORI, MASK_ORI)
|
|
|
|
|
+DECLARE_INSN(rdcycle, MATCH_RDCYCLE, MASK_RDCYCLE)
|
|
|
|
|
+DECLARE_INSN(rdcycleh, MATCH_RDCYCLEH, MASK_RDCYCLEH)
|
|
|
|
|
+DECLARE_INSN(rdinstret, MATCH_RDINSTRET, MASK_RDINSTRET)
|
|
|
|
|
+DECLARE_INSN(rdinstreth, MATCH_RDINSTRETH, MASK_RDINSTRETH)
|
|
|
|
|
+DECLARE_INSN(rdtime, MATCH_RDTIME, MASK_RDTIME)
|
|
|
|
|
+DECLARE_INSN(rdtimeh, MATCH_RDTIMEH, MASK_RDTIMEH)
|
|
|
|
|
+DECLARE_INSN(rem, MATCH_REM, MASK_REM)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(remu, MATCH_REMU, MASK_REMU)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(remw, MATCH_REMW, MASK_REMW)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(sb, MATCH_SB, MASK_SB)
|
|
|
|
|
+DECLARE_INSN(sbreak, MATCH_SBREAK, MASK_SBREAK)
|
|
|
|
|
+DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D)
|
|
|
|
|
+DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W)
|
|
|
|
|
+DECLARE_INSN(scall, MATCH_SCALL, MASK_SCALL)
|
|
|
|
|
+DECLARE_INSN(sd, MATCH_SD, MASK_SD)
|
|
|
|
|
+DECLARE_INSN(sfence_vm, MATCH_SFENCE_VM, MASK_SFENCE_VM)
|
|
|
|
|
+DECLARE_INSN(sh, MATCH_SH, MASK_SH)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(sll, MATCH_SLL, MASK_SLL)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI)
|
|
|
|
|
+DECLARE_INSN(slli_rv32, MATCH_SLLI_RV32, MASK_SLLI_RV32)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW)
|
|
|
|
|
+DECLARE_INSN(slt, MATCH_SLT, MASK_SLT)
|
|
|
|
|
+DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI)
|
|
|
|
|
+DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU)
|
|
|
|
|
+DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU)
|
|
|
|
|
+DECLARE_INSN(sra, MATCH_SRA, MASK_SRA)
|
|
|
|
|
+DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI)
|
|
|
|
|
+DECLARE_INSN(srai_rv32, MATCH_SRAI_RV32, MASK_SRAI_RV32)
|
|
|
|
|
+DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW)
|
|
|
|
|
+DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW)
|
|
|
|
|
+DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
|
|
|
|
|
+DECLARE_INSN(srl, MATCH_SRL, MASK_SRL)
|
|
|
|
|
+DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI)
|
|
|
|
|
+DECLARE_INSN(srli_rv32, MATCH_SRLI_RV32, MASK_SRLI_RV32)
|
|
|
|
|
+DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW)
|
|
|
|
|
+DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW)
|
|
|
|
|
+DECLARE_INSN(stop, MATCH_STOP, MASK_STOP)
|
|
|
|
|
+DECLARE_INSN(sub, MATCH_SUB, MASK_SUB)
|
|
|
|
|
+DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW)
|
|
|
|
|
+DECLARE_INSN(sw, MATCH_SW, MASK_SW)
|
|
|
|
|
+DECLARE_INSN(utidx, MATCH_UTIDX, MASK_UTIDX)
|
|
|
|
|
+DECLARE_INSN(venqcmd, MATCH_VENQCMD, MASK_VENQCMD)
|
|
|
|
|
+DECLARE_INSN(venqcnt, MATCH_VENQCNT, MASK_VENQCNT)
|
|
|
|
|
+DECLARE_INSN(venqimm1, MATCH_VENQIMM1, MASK_VENQIMM1)
|
|
|
|
|
+DECLARE_INSN(venqimm2, MATCH_VENQIMM2, MASK_VENQIMM2)
|
|
|
|
|
+DECLARE_INSN(vf, MATCH_VF, MASK_VF)
|
|
|
|
|
+DECLARE_INSN(vfld, MATCH_VFLD, MASK_VFLD)
|
|
|
|
|
+DECLARE_INSN(vflsegd, MATCH_VFLSEGD, MASK_VFLSEGD)
|
|
|
|
|
+DECLARE_INSN(vflsegstd, MATCH_VFLSEGSTD, MASK_VFLSEGSTD)
|
|
|
|
|
+DECLARE_INSN(vflsegstw, MATCH_VFLSEGSTW, MASK_VFLSEGSTW)
|
|
|
|
|
+DECLARE_INSN(vflsegw, MATCH_VFLSEGW, MASK_VFLSEGW)
|
|
|
|
|
+DECLARE_INSN(vflstd, MATCH_VFLSTD, MASK_VFLSTD)
|
|
|
|
|
+DECLARE_INSN(vflstw, MATCH_VFLSTW, MASK_VFLSTW)
|
|
|
|
|
+DECLARE_INSN(vflw, MATCH_VFLW, MASK_VFLW)
|
|
|
|
|
+DECLARE_INSN(vfmsv_d, MATCH_VFMSV_D, MASK_VFMSV_D)
|
|
|
|
|
+DECLARE_INSN(vfmsv_s, MATCH_VFMSV_S, MASK_VFMSV_S)
|
|
|
|
|
+DECLARE_INSN(vfmvv, MATCH_VFMVV, MASK_VFMVV)
|
|
|
|
|
+DECLARE_INSN(vfsd, MATCH_VFSD, MASK_VFSD)
|
|
|
|
|
+DECLARE_INSN(vfssegd, MATCH_VFSSEGD, MASK_VFSSEGD)
|
|
|
|
|
+DECLARE_INSN(vfssegstd, MATCH_VFSSEGSTD, MASK_VFSSEGSTD)
|
|
|
|
|
+DECLARE_INSN(vfssegstw, MATCH_VFSSEGSTW, MASK_VFSSEGSTW)
|
|
|
|
|
+DECLARE_INSN(vfssegw, MATCH_VFSSEGW, MASK_VFSSEGW)
|
|
|
|
|
+DECLARE_INSN(vfsstd, MATCH_VFSSTD, MASK_VFSSTD)
|
|
|
|
|
+DECLARE_INSN(vfsstw, MATCH_VFSSTW, MASK_VFSSTW)
|
|
|
|
|
+DECLARE_INSN(vfsw, MATCH_VFSW, MASK_VFSW)
|
|
|
|
|
+DECLARE_INSN(vgetcfg, MATCH_VGETCFG, MASK_VGETCFG)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(vgetvl, MATCH_VGETVL, MASK_VGETVL)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(vlb, MATCH_VLB, MASK_VLB)
|
|
|
|
|
+DECLARE_INSN(vlbu, MATCH_VLBU, MASK_VLBU)
|
|
|
|
|
+DECLARE_INSN(vld, MATCH_VLD, MASK_VLD)
|
|
|
|
|
+DECLARE_INSN(vlh, MATCH_VLH, MASK_VLH)
|
|
|
|
|
+DECLARE_INSN(vlhu, MATCH_VLHU, MASK_VLHU)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(vlsegb, MATCH_VLSEGB, MASK_VLSEGB)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(vlsegbu, MATCH_VLSEGBU, MASK_VLSEGBU)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(vlsegd, MATCH_VLSEGD, MASK_VLSEGD)
|
|
|
|
|
+DECLARE_INSN(vlsegh, MATCH_VLSEGH, MASK_VLSEGH)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(vlseghu, MATCH_VLSEGHU, MASK_VLSEGHU)
|
|
|
|
|
+DECLARE_INSN(vlsegstb, MATCH_VLSEGSTB, MASK_VLSEGSTB)
|
|
|
|
|
+DECLARE_INSN(vlsegstbu, MATCH_VLSEGSTBU, MASK_VLSEGSTBU)
|
|
|
|
|
+DECLARE_INSN(vlsegstd, MATCH_VLSEGSTD, MASK_VLSEGSTD)
|
|
|
|
|
+DECLARE_INSN(vlsegsth, MATCH_VLSEGSTH, MASK_VLSEGSTH)
|
|
|
|
|
+DECLARE_INSN(vlsegsthu, MATCH_VLSEGSTHU, MASK_VLSEGSTHU)
|
|
|
|
|
+DECLARE_INSN(vlsegstw, MATCH_VLSEGSTW, MASK_VLSEGSTW)
|
|
|
|
|
+DECLARE_INSN(vlsegstwu, MATCH_VLSEGSTWU, MASK_VLSEGSTWU)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+DECLARE_INSN(vlsegw, MATCH_VLSEGW, MASK_VLSEGW)
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+DECLARE_INSN(vlsegwu, MATCH_VLSEGWU, MASK_VLSEGWU)
|
|
|
|
|
+DECLARE_INSN(vlstb, MATCH_VLSTB, MASK_VLSTB)
|
|
|
|
|
+DECLARE_INSN(vlstbu, MATCH_VLSTBU, MASK_VLSTBU)
|
|
|
|
|
+DECLARE_INSN(vlstd, MATCH_VLSTD, MASK_VLSTD)
|
|
|
|
|
+DECLARE_INSN(vlsth, MATCH_VLSTH, MASK_VLSTH)
|
|
|
|
|
+DECLARE_INSN(vlsthu, MATCH_VLSTHU, MASK_VLSTHU)
|
|
|
|
|
+DECLARE_INSN(vlstw, MATCH_VLSTW, MASK_VLSTW)
|
|
|
|
|
+DECLARE_INSN(vlstwu, MATCH_VLSTWU, MASK_VLSTWU)
|
|
|
|
|
+DECLARE_INSN(vlw, MATCH_VLW, MASK_VLW)
|
|
|
|
|
+DECLARE_INSN(vlwu, MATCH_VLWU, MASK_VLWU)
|
|
|
|
|
+DECLARE_INSN(vmsv, MATCH_VMSV, MASK_VMSV)
|
|
|
|
|
+DECLARE_INSN(vmvv, MATCH_VMVV, MASK_VMVV)
|
|
|
|
|
+DECLARE_INSN(vsb, MATCH_VSB, MASK_VSB)
|
|
|
|
|
+DECLARE_INSN(vsd, MATCH_VSD, MASK_VSD)
|
|
|
|
|
+DECLARE_INSN(vsetcfg, MATCH_VSETCFG, MASK_VSETCFG)
|
|
|
|
|
+DECLARE_INSN(vsetvl, MATCH_VSETVL, MASK_VSETVL)
|
|
|
|
|
+DECLARE_INSN(vsh, MATCH_VSH, MASK_VSH)
|
|
|
|
|
+DECLARE_INSN(vssegb, MATCH_VSSEGB, MASK_VSSEGB)
|
|
|
|
|
+DECLARE_INSN(vssegd, MATCH_VSSEGD, MASK_VSSEGD)
|
|
|
|
|
+DECLARE_INSN(vssegh, MATCH_VSSEGH, MASK_VSSEGH)
|
|
|
|
|
+DECLARE_INSN(vssegstb, MATCH_VSSEGSTB, MASK_VSSEGSTB)
|
|
|
|
|
+DECLARE_INSN(vssegstd, MATCH_VSSEGSTD, MASK_VSSEGSTD)
|
|
|
|
|
+DECLARE_INSN(vssegsth, MATCH_VSSEGSTH, MASK_VSSEGSTH)
|
|
|
|
|
+DECLARE_INSN(vssegstw, MATCH_VSSEGSTW, MASK_VSSEGSTW)
|
|
|
|
|
+DECLARE_INSN(vssegw, MATCH_VSSEGW, MASK_VSSEGW)
|
|
|
|
|
+DECLARE_INSN(vsstb, MATCH_VSSTB, MASK_VSSTB)
|
|
|
|
|
+DECLARE_INSN(vsstd, MATCH_VSSTD, MASK_VSSTD)
|
|
|
|
|
+DECLARE_INSN(vssth, MATCH_VSSTH, MASK_VSSTH)
|
|
|
|
|
+DECLARE_INSN(vsstw, MATCH_VSSTW, MASK_VSSTW)
|
|
|
|
|
+DECLARE_INSN(vsw, MATCH_VSW, MASK_VSW)
|
|
|
|
|
+DECLARE_INSN(vxcptaux, MATCH_VXCPTAUX, MASK_VXCPTAUX)
|
|
|
|
|
+DECLARE_INSN(vxcptcause, MATCH_VXCPTCAUSE, MASK_VXCPTCAUSE)
|
|
|
|
|
+DECLARE_INSN(vxcptevac, MATCH_VXCPTEVAC, MASK_VXCPTEVAC)
|
|
|
|
|
+DECLARE_INSN(vxcpthold, MATCH_VXCPTHOLD, MASK_VXCPTHOLD)
|
|
|
|
|
+DECLARE_INSN(vxcptkill, MATCH_VXCPTKILL, MASK_VXCPTKILL)
|
|
|
|
|
+DECLARE_INSN(vxcptrestore, MATCH_VXCPTRESTORE, MASK_VXCPTRESTORE)
|
|
|
|
|
+DECLARE_INSN(vxcptsave, MATCH_VXCPTSAVE, MASK_VXCPTSAVE)
|
|
|
|
|
+DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI)
|
|
|
|
|
+DECLARE_INSN(xor, MATCH_XOR, MASK_XOR)
|
|
|
|
|
+DECLARE_INSN(xori, MATCH_XORI, MASK_XORI)
|
|
|
|
|
+#endif
|
|
|
|
|
+#ifdef DECLARE_CSR
|
|
|
|
|
+DECLARE_CSR(fflags, CSR_FFLAGS)
|
|
|
|
|
+DECLARE_CSR(frm, CSR_FRM)
|
|
|
|
|
+DECLARE_CSR(fcsr, CSR_FCSR)
|
|
|
|
|
+DECLARE_CSR(cycle, CSR_CYCLE)
|
|
|
|
|
+DECLARE_CSR(time, CSR_TIME)
|
|
|
|
|
+DECLARE_CSR(instret, CSR_INSTRET)
|
|
|
|
|
+DECLARE_CSR(stats, CSR_STATS)
|
|
|
|
|
+DECLARE_CSR(uarch0, CSR_UARCH0)
|
|
|
|
|
+DECLARE_CSR(uarch1, CSR_UARCH1)
|
|
|
|
|
+DECLARE_CSR(uarch2, CSR_UARCH2)
|
|
|
|
|
+DECLARE_CSR(uarch3, CSR_UARCH3)
|
|
|
|
|
+DECLARE_CSR(uarch4, CSR_UARCH4)
|
|
|
|
|
+DECLARE_CSR(uarch5, CSR_UARCH5)
|
|
|
|
|
+DECLARE_CSR(uarch6, CSR_UARCH6)
|
|
|
|
|
+DECLARE_CSR(uarch7, CSR_UARCH7)
|
|
|
|
|
+DECLARE_CSR(uarch8, CSR_UARCH8)
|
|
|
|
|
+DECLARE_CSR(uarch9, CSR_UARCH9)
|
|
|
|
|
+DECLARE_CSR(uarch10, CSR_UARCH10)
|
|
|
|
|
+DECLARE_CSR(uarch11, CSR_UARCH11)
|
|
|
|
|
+DECLARE_CSR(uarch12, CSR_UARCH12)
|
|
|
|
|
+DECLARE_CSR(uarch13, CSR_UARCH13)
|
|
|
|
|
+DECLARE_CSR(uarch14, CSR_UARCH14)
|
|
|
|
|
+DECLARE_CSR(uarch15, CSR_UARCH15)
|
|
|
|
|
+DECLARE_CSR(sstatus, CSR_SSTATUS)
|
|
|
|
|
+DECLARE_CSR(stvec, CSR_STVEC)
|
|
|
|
|
+DECLARE_CSR(sie, CSR_SIE)
|
|
|
|
|
+DECLARE_CSR(sscratch, CSR_SSCRATCH)
|
|
|
|
|
+DECLARE_CSR(sepc, CSR_SEPC)
|
|
|
|
|
+DECLARE_CSR(sip, CSR_SIP)
|
|
|
|
|
+DECLARE_CSR(sptbr, CSR_SPTBR)
|
|
|
|
|
+DECLARE_CSR(sasid, CSR_SASID)
|
|
|
|
|
+DECLARE_CSR(cyclew, CSR_CYCLEW)
|
|
|
|
|
+DECLARE_CSR(timew, CSR_TIMEW)
|
|
|
|
|
+DECLARE_CSR(instretw, CSR_INSTRETW)
|
|
|
|
|
+DECLARE_CSR(stime, CSR_STIME)
|
|
|
|
|
+DECLARE_CSR(scause, CSR_SCAUSE)
|
|
|
|
|
+DECLARE_CSR(sbadaddr, CSR_SBADADDR)
|
|
|
|
|
+DECLARE_CSR(stimew, CSR_STIMEW)
|
|
|
|
|
+DECLARE_CSR(mstatus, CSR_MSTATUS)
|
|
|
|
|
+DECLARE_CSR(mtvec, CSR_MTVEC)
|
|
|
|
|
+DECLARE_CSR(mtdeleg, CSR_MTDELEG)
|
|
|
|
|
+DECLARE_CSR(mie, CSR_MIE)
|
|
|
|
|
+DECLARE_CSR(mtimecmp, CSR_MTIMECMP)
|
|
|
|
|
+DECLARE_CSR(mscratch, CSR_MSCRATCH)
|
|
|
|
|
+DECLARE_CSR(mepc, CSR_MEPC)
|
|
|
|
|
+DECLARE_CSR(mcause, CSR_MCAUSE)
|
|
|
|
|
+DECLARE_CSR(mbadaddr, CSR_MBADADDR)
|
|
|
|
|
+DECLARE_CSR(mip, CSR_MIP)
|
|
|
|
|
+DECLARE_CSR(mtime, CSR_MTIME)
|
|
|
|
|
+DECLARE_CSR(mcpuid, CSR_MCPUID)
|
|
|
|
|
+DECLARE_CSR(mimpid, CSR_MIMPID)
|
|
|
|
|
+DECLARE_CSR(mhartid, CSR_MHARTID)
|
|
|
|
|
+DECLARE_CSR(mtohost, CSR_MTOHOST)
|
|
|
|
|
+DECLARE_CSR(mfromhost, CSR_MFROMHOST)
|
|
|
|
|
+DECLARE_CSR(mreset, CSR_MRESET)
|
|
|
|
|
+DECLARE_CSR(send_ipi, CSR_SEND_IPI)
|
|
|
|
|
+DECLARE_CSR(cycleh, CSR_CYCLEH)
|
|
|
|
|
+DECLARE_CSR(timeh, CSR_TIMEH)
|
|
|
|
|
+DECLARE_CSR(instreth, CSR_INSTRETH)
|
|
|
|
|
+DECLARE_CSR(cyclehw, CSR_CYCLEHW)
|
|
|
|
|
+DECLARE_CSR(timehw, CSR_TIMEHW)
|
|
|
|
|
+DECLARE_CSR(instrethw, CSR_INSTRETHW)
|
|
|
|
|
+DECLARE_CSR(stimeh, CSR_STIMEH)
|
|
|
|
|
+DECLARE_CSR(stimehw, CSR_STIMEHW)
|
|
|
|
|
+DECLARE_CSR(mtimecmph, CSR_MTIMECMPH)
|
|
|
|
|
+DECLARE_CSR(mtimeh, CSR_MTIMEH)
|
|
|
|
|
+#endif
|
|
|
|
|
+#ifdef DECLARE_CAUSE
|
|
|
|
|
+DECLARE_CAUSE("fflags", CAUSE_FFLAGS)
|
|
|
|
|
+DECLARE_CAUSE("frm", CAUSE_FRM)
|
|
|
|
|
+DECLARE_CAUSE("fcsr", CAUSE_FCSR)
|
|
|
|
|
+DECLARE_CAUSE("cycle", CAUSE_CYCLE)
|
|
|
|
|
+DECLARE_CAUSE("time", CAUSE_TIME)
|
|
|
|
|
+DECLARE_CAUSE("instret", CAUSE_INSTRET)
|
|
|
|
|
+DECLARE_CAUSE("stats", CAUSE_STATS)
|
|
|
|
|
+DECLARE_CAUSE("uarch0", CAUSE_UARCH0)
|
|
|
|
|
+DECLARE_CAUSE("uarch1", CAUSE_UARCH1)
|
|
|
|
|
+DECLARE_CAUSE("uarch2", CAUSE_UARCH2)
|
|
|
|
|
+DECLARE_CAUSE("uarch3", CAUSE_UARCH3)
|
|
|
|
|
+DECLARE_CAUSE("uarch4", CAUSE_UARCH4)
|
|
|
|
|
+DECLARE_CAUSE("uarch5", CAUSE_UARCH5)
|
|
|
|
|
+DECLARE_CAUSE("uarch6", CAUSE_UARCH6)
|
|
|
|
|
+DECLARE_CAUSE("uarch7", CAUSE_UARCH7)
|
|
|
|
|
+DECLARE_CAUSE("uarch8", CAUSE_UARCH8)
|
|
|
|
|
+DECLARE_CAUSE("uarch9", CAUSE_UARCH9)
|
|
|
|
|
+DECLARE_CAUSE("uarch10", CAUSE_UARCH10)
|
|
|
|
|
+DECLARE_CAUSE("uarch11", CAUSE_UARCH11)
|
|
|
|
|
+DECLARE_CAUSE("uarch12", CAUSE_UARCH12)
|
|
|
|
|
+DECLARE_CAUSE("uarch13", CAUSE_UARCH13)
|
|
|
|
|
+DECLARE_CAUSE("uarch14", CAUSE_UARCH14)
|
|
|
|
|
+DECLARE_CAUSE("uarch15", CAUSE_UARCH15)
|
|
|
|
|
+DECLARE_CAUSE("sstatus", CAUSE_SSTATUS)
|
|
|
|
|
+DECLARE_CAUSE("stvec", CAUSE_STVEC)
|
|
|
|
|
+DECLARE_CAUSE("sie", CAUSE_SIE)
|
|
|
|
|
+DECLARE_CAUSE("sscratch", CAUSE_SSCRATCH)
|
|
|
|
|
+DECLARE_CAUSE("sepc", CAUSE_SEPC)
|
|
|
|
|
+DECLARE_CAUSE("sip", CAUSE_SIP)
|
|
|
|
|
+DECLARE_CAUSE("sptbr", CAUSE_SPTBR)
|
|
|
|
|
+DECLARE_CAUSE("sasid", CAUSE_SASID)
|
|
|
|
|
+DECLARE_CAUSE("cyclew", CAUSE_CYCLEW)
|
|
|
|
|
+DECLARE_CAUSE("timew", CAUSE_TIMEW)
|
|
|
|
|
+DECLARE_CAUSE("instretw", CAUSE_INSTRETW)
|
|
|
|
|
+DECLARE_CAUSE("stime", CAUSE_STIME)
|
|
|
|
|
+DECLARE_CAUSE("scause", CAUSE_SCAUSE)
|
|
|
|
|
+DECLARE_CAUSE("sbadaddr", CAUSE_SBADADDR)
|
|
|
|
|
+DECLARE_CAUSE("stimew", CAUSE_STIMEW)
|
|
|
|
|
+DECLARE_CAUSE("mstatus", CAUSE_MSTATUS)
|
|
|
|
|
+DECLARE_CAUSE("mtvec", CAUSE_MTVEC)
|
|
|
|
|
+DECLARE_CAUSE("mtdeleg", CAUSE_MTDELEG)
|
|
|
|
|
+DECLARE_CAUSE("mie", CAUSE_MIE)
|
|
|
|
|
+DECLARE_CAUSE("mtimecmp", CAUSE_MTIMECMP)
|
|
|
|
|
+DECLARE_CAUSE("mscratch", CAUSE_MSCRATCH)
|
|
|
|
|
+DECLARE_CAUSE("mepc", CAUSE_MEPC)
|
|
|
|
|
+DECLARE_CAUSE("mcause", CAUSE_MCAUSE)
|
|
|
|
|
+DECLARE_CAUSE("mbadaddr", CAUSE_MBADADDR)
|
|
|
|
|
+DECLARE_CAUSE("mip", CAUSE_MIP)
|
|
|
|
|
+DECLARE_CAUSE("mtime", CAUSE_MTIME)
|
|
|
|
|
+DECLARE_CAUSE("mcpuid", CAUSE_MCPUID)
|
|
|
|
|
+DECLARE_CAUSE("mimpid", CAUSE_MIMPID)
|
|
|
|
|
+DECLARE_CAUSE("mhartid", CAUSE_MHARTID)
|
|
|
|
|
+DECLARE_CAUSE("mtohost", CAUSE_MTOHOST)
|
|
|
|
|
+DECLARE_CAUSE("mfromhost", CAUSE_MFROMHOST)
|
|
|
|
|
+DECLARE_CAUSE("mreset", CAUSE_MRESET)
|
|
|
|
|
+DECLARE_CAUSE("send_ipi", CAUSE_SEND_IPI)
|
|
|
|
|
+DECLARE_CAUSE("cycleh", CAUSE_CYCLEH)
|
|
|
|
|
+DECLARE_CAUSE("timeh", CAUSE_TIMEH)
|
|
|
|
|
+DECLARE_CAUSE("instreth", CAUSE_INSTRETH)
|
|
|
|
|
+DECLARE_CAUSE("cyclehw", CAUSE_CYCLEHW)
|
|
|
|
|
+DECLARE_CAUSE("timehw", CAUSE_TIMEHW)
|
|
|
|
|
+DECLARE_CAUSE("instrethw", CAUSE_INSTRETHW)
|
|
|
|
|
+DECLARE_CAUSE("stimeh", CAUSE_STIMEH)
|
|
|
|
|
+DECLARE_CAUSE("stimehw", CAUSE_STIMEHW)
|
|
|
|
|
+DECLARE_CAUSE("mtimecmph", CAUSE_MTIMECMPH)
|
|
|
|
|
+DECLARE_CAUSE("mtimeh", CAUSE_MTIMEH)
|
|
|
|
|
+#endif
|
|
|
|
|
diff -urN empty/gcc/config/riscv/riscv-protos.h gcc-5.2.0/gcc/config/riscv/riscv-protos.h
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/riscv-protos.h 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/riscv-protos.h 2015-07-17 22:36:52.319705931 +0200
|
|
|
|
|
@@ -0,0 +1,96 @@
|
|
|
|
|
+/* Definition of RISC-V target for GNU compiler.
|
|
|
|
|
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
|
|
|
|
|
+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
|
|
|
|
|
+ Based on MIPS target for GNU compiler.
|
|
|
|
|
+
|
|
|
|
|
+This file is part of GCC.
|
|
|
|
|
+
|
|
|
|
|
+GCC is free software; you can redistribute it and/or modify
|
|
|
|
|
+it under the terms of the GNU General Public License as published by
|
|
|
|
|
+the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
|
+any later version.
|
|
|
|
|
+
|
|
|
|
|
+GCC is distributed in the hope that it will be useful,
|
|
|
|
|
+but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
+GNU General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+You should have received a copy of the GNU General Public License
|
|
|
|
|
+along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+<http://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#ifndef GCC_RISCV_PROTOS_H
|
|
|
|
|
+#define GCC_RISCV_PROTOS_H
|
|
|
|
|
+
|
|
|
|
|
+enum riscv_symbol_type {
|
|
|
|
|
+ SYMBOL_ABSOLUTE,
|
|
|
|
|
+ SYMBOL_GOT_DISP,
|
|
|
|
|
+ SYMBOL_TLS,
|
|
|
|
|
+ SYMBOL_TLS_LE,
|
|
|
|
|
+ SYMBOL_TLS_IE,
|
|
|
|
|
+ SYMBOL_TLS_GD
|
|
|
|
|
+};
|
|
|
|
|
+#define NUM_SYMBOL_TYPES (SYMBOL_TLS_GD + 1)
|
|
|
|
|
+
|
|
|
|
|
+enum riscv_code_model {
|
|
|
|
|
+ CM_MEDLOW,
|
|
|
|
|
+ CM_MEDANY,
|
|
|
|
|
+ CM_PIC
|
|
|
|
|
+};
|
|
|
|
|
+extern enum riscv_code_model riscv_cmodel;
|
|
|
|
|
+
|
|
|
|
|
+extern bool riscv_symbolic_constant_p (rtx, enum riscv_symbol_type *);
|
|
|
|
|
+extern int riscv_regno_mode_ok_for_base_p (int, enum machine_mode, bool);
|
|
|
|
|
+extern int riscv_address_insns (rtx, enum machine_mode, bool);
|
|
|
|
|
+extern int riscv_const_insns (rtx);
|
|
|
|
|
+extern int riscv_split_const_insns (rtx);
|
|
|
|
|
+extern int riscv_load_store_insns (rtx, rtx_insn *);
|
|
|
|
|
+extern rtx riscv_emit_move (rtx, rtx);
|
|
|
|
|
+extern bool riscv_split_symbol (rtx, rtx, enum machine_mode, rtx *);
|
|
|
|
|
+extern rtx riscv_unspec_address (rtx, enum riscv_symbol_type);
|
|
|
|
|
+extern void riscv_move_integer (rtx, rtx, HOST_WIDE_INT);
|
|
|
|
|
+extern bool riscv_legitimize_move (enum machine_mode, rtx, rtx);
|
|
|
|
|
+extern bool riscv_legitimize_vector_move (enum machine_mode, rtx, rtx);
|
|
|
|
|
+
|
|
|
|
|
+extern rtx riscv_subword (rtx, bool);
|
|
|
|
|
+extern bool riscv_split_64bit_move_p (rtx, rtx);
|
|
|
|
|
+extern void riscv_split_doubleword_move (rtx, rtx);
|
|
|
|
|
+extern const char *riscv_output_move (rtx, rtx);
|
|
|
|
|
+extern const char *riscv_output_gpr_save (unsigned);
|
|
|
|
|
+#ifdef RTX_CODE
|
|
|
|
|
+extern void riscv_expand_scc (rtx *);
|
|
|
|
|
+extern void riscv_expand_conditional_branch (rtx *);
|
|
|
|
|
+#endif
|
|
|
|
|
+extern rtx riscv_expand_call (bool, rtx, rtx, rtx);
|
|
|
|
|
+extern void riscv_expand_fcc_reload (rtx, rtx, rtx);
|
|
|
|
|
+extern void riscv_set_return_address (rtx, rtx);
|
|
|
|
|
+extern bool riscv_expand_block_move (rtx, rtx, rtx);
|
|
|
|
|
+extern void riscv_expand_synci_loop (rtx, rtx);
|
|
|
|
|
+
|
|
|
|
|
+extern bool riscv_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT,
|
|
|
|
|
+ HOST_WIDE_INT);
|
|
|
|
|
+extern bool riscv_expand_ins_as_unaligned_store (rtx, rtx, HOST_WIDE_INT,
|
|
|
|
|
+ HOST_WIDE_INT);
|
|
|
|
|
+extern void riscv_order_regs_for_local_alloc (void);
|
|
|
|
|
+
|
|
|
|
|
+extern rtx riscv_return_addr (int, rtx);
|
|
|
|
|
+extern HOST_WIDE_INT riscv_initial_elimination_offset (int, int);
|
|
|
|
|
+extern void riscv_expand_prologue (void);
|
|
|
|
|
+extern void riscv_expand_epilogue (bool);
|
|
|
|
|
+extern bool riscv_can_use_return_insn (void);
|
|
|
|
|
+extern rtx riscv_function_value (const_tree, const_tree, enum machine_mode);
|
|
|
|
|
+
|
|
|
|
|
+extern enum reg_class riscv_secondary_reload_class (enum reg_class,
|
|
|
|
|
+ enum machine_mode,
|
|
|
|
|
+ rtx, bool);
|
|
|
|
|
+extern int riscv_class_max_nregs (enum reg_class, enum machine_mode);
|
|
|
|
|
+
|
|
|
|
|
+extern unsigned int riscv_hard_regno_nregs (int, enum machine_mode);
|
|
|
|
|
+
|
|
|
|
|
+extern void irix_asm_output_align (FILE *, unsigned);
|
|
|
|
|
+extern const char *current_section_name (void);
|
|
|
|
|
+extern unsigned int current_section_flags (void);
|
|
|
|
|
+
|
|
|
|
|
+extern void riscv_expand_vector_init (rtx, rtx);
|
|
|
|
|
+
|
|
|
|
|
+#endif /* ! GCC_RISCV_PROTOS_H */
|
|
|
|
|
diff -urN empty/gcc/config/riscv/riscv.c gcc-5.2.0/gcc/config/riscv/riscv.c
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/riscv.c 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/riscv.c 2015-07-17 22:36:52.319705931 +0200
|
|
|
|
|
@@ -0,0 +1,4439 @@
|
|
|
|
|
+/* Subroutines used for code generation for RISC-V.
|
|
|
|
|
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
|
|
|
|
|
+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
|
|
|
|
|
+ Based on MIPS target for GNU compiler.
|
|
|
|
|
+
|
|
|
|
|
+This file is part of GCC.
|
|
|
|
|
+
|
|
|
|
|
+GCC is free software; you can redistribute it and/or modify
|
|
|
|
|
+it under the terms of the GNU General Public License as published by
|
|
|
|
|
+the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
|
+any later version.
|
|
|
|
|
+
|
|
|
|
|
+GCC is distributed in the hope that it will be useful,
|
|
|
|
|
+but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
+GNU General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+You should have received a copy of the GNU General Public License
|
|
|
|
|
+along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+<http://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#include "config.h"
|
|
|
|
|
+#include "system.h"
|
|
|
|
|
+#include "coretypes.h"
|
|
|
|
|
+#include "tm.h"
|
|
|
|
|
+#include "rtl.h"
|
|
|
|
|
+#include "regs.h"
|
|
|
|
|
+#include "hard-reg-set.h"
|
|
|
|
|
+#include "insn-config.h"
|
|
|
|
|
+#include "conditions.h"
|
|
|
|
|
+#include "insn-attr.h"
|
|
|
|
|
+#include "recog.h"
|
|
|
|
|
+#include "output.h"
|
|
|
|
|
+#include "hash-set.h"
|
|
|
|
|
+#include "machmode.h"
|
|
|
|
|
+#include "vec.h"
|
|
|
|
|
+#include "double-int.h"
|
|
|
|
|
+#include "input.h"
|
|
|
|
|
+#include "alias.h"
|
|
|
|
|
+#include "symtab.h"
|
|
|
|
|
+#include "wide-int.h"
|
|
|
|
|
+#include "inchash.h"
|
|
|
|
|
+#include "tree.h"
|
|
|
|
|
+#include "fold-const.h"
|
|
|
|
|
+#include "varasm.h"
|
|
|
|
|
+#include "stringpool.h"
|
|
|
|
|
+#include "stor-layout.h"
|
|
|
|
|
+#include "calls.h"
|
|
|
|
|
+#include "function.h"
|
|
|
|
|
+#include "hashtab.h"
|
|
|
|
|
+#include "flags.h"
|
|
|
|
|
+#include "statistics.h"
|
|
|
|
|
+#include "real.h"
|
|
|
|
|
+#include "fixed-value.h"
|
|
|
|
|
+#include "expmed.h"
|
|
|
|
|
+#include "dojump.h"
|
|
|
|
|
+#include "explow.h"
|
|
|
|
|
+#include "emit-rtl.h"
|
|
|
|
|
+#include "stmt.h"
|
|
|
|
|
+#include "expr.h"
|
|
|
|
|
+#include "insn-codes.h"
|
|
|
|
|
+#include "optabs.h"
|
|
|
|
|
+#include "libfuncs.h"
|
|
|
|
|
+#include "reload.h"
|
|
|
|
|
+#include "tm_p.h"
|
|
|
|
|
+#include "ggc.h"
|
|
|
|
|
+#include "gstab.h"
|
|
|
|
|
+#include "hash-table.h"
|
|
|
|
|
+#include "debug.h"
|
|
|
|
|
+#include "target.h"
|
|
|
|
|
+#include "target-def.h"
|
|
|
|
|
+#include "common/common-target.h"
|
|
|
|
|
+#include "langhooks.h"
|
|
|
|
|
+#include "dominance.h"
|
|
|
|
|
+#include "cfg.h"
|
|
|
|
|
+#include "cfgrtl.h"
|
|
|
|
|
+#include "cfganal.h"
|
|
|
|
|
+#include "lcm.h"
|
|
|
|
|
+#include "cfgbuild.h"
|
|
|
|
|
+#include "cfgcleanup.h"
|
|
|
|
|
+#include "predict.h"
|
|
|
|
|
+#include "basic-block.h"
|
|
|
|
|
+#include "sched-int.h"
|
|
|
|
|
+#include "tree-ssa-alias.h"
|
|
|
|
|
+#include "internal-fn.h"
|
|
|
|
|
+#include "gimple-fold.h"
|
|
|
|
|
+#include "tree-eh.h"
|
|
|
|
|
+#include "gimple-expr.h"
|
|
|
|
|
+#include "is-a.h"
|
|
|
|
|
+#include "gimple.h"
|
|
|
|
|
+#include "gimplify.h"
|
|
|
|
|
+#include "bitmap.h"
|
|
|
|
|
+#include "diagnostic.h"
|
|
|
|
|
+#include "target-globals.h"
|
|
|
|
|
+#include "opts.h"
|
|
|
|
|
+#include "tree-pass.h"
|
|
|
|
|
+#include "context.h"
|
|
|
|
|
+#include "hash-map.h"
|
|
|
|
|
+#include "plugin-api.h"
|
|
|
|
|
+#include "ipa-ref.h"
|
|
|
|
|
+#include "cgraph.h"
|
|
|
|
|
+#include "builtins.h"
|
|
|
|
|
+#include "rtl-iter.h"
|
|
|
|
|
+#include <stdint.h>
|
|
|
|
|
+
|
|
|
|
|
+/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
|
|
|
|
|
+#define UNSPEC_ADDRESS_P(X) \
|
|
|
|
|
+ (GET_CODE (X) == UNSPEC \
|
|
|
|
|
+ && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
|
|
|
|
|
+ && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
|
|
|
|
|
+
|
|
|
|
|
+/* Extract the symbol or label from UNSPEC wrapper X. */
|
|
|
|
|
+#define UNSPEC_ADDRESS(X) \
|
|
|
|
|
+ XVECEXP (X, 0, 0)
|
|
|
|
|
+
|
|
|
|
|
+/* Extract the symbol type from UNSPEC wrapper X. */
|
|
|
|
|
+#define UNSPEC_ADDRESS_TYPE(X) \
|
|
|
|
|
+ ((enum riscv_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
|
|
|
|
|
+
|
|
|
|
|
+/* The maximum distance between the top of the stack frame and the
|
|
|
|
|
+ value sp has when we save and restore registers. This is set by the
|
|
|
|
|
+ range of load/store offsets and must also preserve stack alignment. */
|
|
|
|
|
+#define RISCV_MAX_FIRST_STACK_STEP (RISCV_IMM_REACH/2 - 16)
|
|
|
|
|
+
|
|
|
|
|
+/* True if INSN is a riscv.md pattern or asm statement. */
|
|
|
|
|
+#define USEFUL_INSN_P(INSN) \
|
|
|
|
|
+ (NONDEBUG_INSN_P (INSN) \
|
|
|
|
|
+ && GET_CODE (PATTERN (INSN)) != USE \
|
|
|
|
|
+ && GET_CODE (PATTERN (INSN)) != CLOBBER \
|
|
|
|
|
+ && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
|
|
|
|
|
+ && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
|
|
|
|
|
+
|
|
|
|
|
+/* True if bit BIT is set in VALUE. */
|
|
|
|
|
+#define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
|
|
|
|
|
+
|
|
|
|
|
+/* Classifies an address.
|
|
|
|
|
+
|
|
|
|
|
+ ADDRESS_REG
|
|
|
|
|
+ A natural register + offset address. The register satisfies
|
|
|
|
|
+ riscv_valid_base_register_p and the offset is a const_arith_operand.
|
|
|
|
|
+
|
|
|
|
|
+ ADDRESS_LO_SUM
|
|
|
|
|
+ A LO_SUM rtx. The first operand is a valid base register and
|
|
|
|
|
+ the second operand is a symbolic address.
|
|
|
|
|
+
|
|
|
|
|
+ ADDRESS_CONST_INT
|
|
|
|
|
+ A signed 16-bit constant address.
|
|
|
|
|
+
|
|
|
|
|
+ ADDRESS_SYMBOLIC:
|
|
|
|
|
+ A constant symbolic address. */
|
|
|
|
|
+enum riscv_address_type {
|
|
|
|
|
+ ADDRESS_REG,
|
|
|
|
|
+ ADDRESS_LO_SUM,
|
|
|
|
|
+ ADDRESS_CONST_INT,
|
|
|
|
|
+ ADDRESS_SYMBOLIC
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+enum riscv_code_model riscv_cmodel = TARGET_DEFAULT_CMODEL;
|
|
|
|
|
+
|
|
|
|
|
+/* Macros to create an enumeration identifier for a function prototype. */
|
|
|
|
|
+#define RISCV_FTYPE_NAME1(A, B) RISCV_##A##_FTYPE_##B
|
|
|
|
|
+#define RISCV_FTYPE_NAME2(A, B, C) RISCV_##A##_FTYPE_##B##_##C
|
|
|
|
|
+#define RISCV_FTYPE_NAME3(A, B, C, D) RISCV_##A##_FTYPE_##B##_##C##_##D
|
|
|
|
|
+#define RISCV_FTYPE_NAME4(A, B, C, D, E) RISCV_##A##_FTYPE_##B##_##C##_##D##_##E
|
|
|
|
|
+
|
|
|
|
|
+/* Classifies the prototype of a built-in function. */
|
|
|
|
|
+enum riscv_function_type {
|
|
|
|
|
+#define DEF_RISCV_FTYPE(NARGS, LIST) RISCV_FTYPE_NAME##NARGS LIST,
|
|
|
|
|
+#include "config/riscv/riscv-ftypes.def"
|
|
|
|
|
+#undef DEF_RISCV_FTYPE
|
|
|
|
|
+ RISCV_MAX_FTYPE_MAX
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* Specifies how a built-in function should be converted into rtl. */
|
|
|
|
|
+enum riscv_builtin_type {
|
|
|
|
|
+ /* The function corresponds directly to an .md pattern. The return
|
|
|
|
|
+ value is mapped to operand 0 and the arguments are mapped to
|
|
|
|
|
+ operands 1 and above. */
|
|
|
|
|
+ RISCV_BUILTIN_DIRECT,
|
|
|
|
|
+
|
|
|
|
|
+ /* The function corresponds directly to an .md pattern. There is no return
|
|
|
|
|
+ value and the arguments are mapped to operands 0 and above. */
|
|
|
|
|
+ RISCV_BUILTIN_DIRECT_NO_TARGET
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* Information about a function's frame layout. */
|
|
|
|
|
+struct GTY(()) riscv_frame_info {
|
|
|
|
|
+ /* The size of the frame in bytes. */
|
|
|
|
|
+ HOST_WIDE_INT total_size;
|
|
|
|
|
+
|
|
|
|
|
+ /* Bit X is set if the function saves or restores GPR X. */
|
|
|
|
|
+ unsigned int mask;
|
|
|
|
|
+
|
|
|
|
|
+ /* Likewise FPR X. */
|
|
|
|
|
+ unsigned int fmask;
|
|
|
|
|
+
|
|
|
|
|
+ /* How much the GPR save/restore routines adjust sp (or 0 if unused). */
|
|
|
|
|
+ unsigned save_libcall_adjustment;
|
|
|
|
|
+
|
|
|
|
|
+ /* Offsets of fixed-point and floating-point save areas from frame bottom */
|
|
|
|
|
+ HOST_WIDE_INT gp_sp_offset;
|
|
|
|
|
+ HOST_WIDE_INT fp_sp_offset;
|
|
|
|
|
+
|
|
|
|
|
+ /* Offset of virtual frame pointer from stack pointer/frame bottom */
|
|
|
|
|
+ HOST_WIDE_INT frame_pointer_offset;
|
|
|
|
|
+
|
|
|
|
|
+ /* Offset of hard frame pointer from stack pointer/frame bottom */
|
|
|
|
|
+ HOST_WIDE_INT hard_frame_pointer_offset;
|
|
|
|
|
+
|
|
|
|
|
+ /* The offset of arg_pointer_rtx from the bottom of the frame. */
|
|
|
|
|
+ HOST_WIDE_INT arg_pointer_offset;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+struct GTY(()) machine_function {
|
|
|
|
|
+ /* The number of extra stack bytes taken up by register varargs.
|
|
|
|
|
+ This area is allocated by the callee at the very top of the frame. */
|
|
|
|
|
+ int varargs_size;
|
|
|
|
|
+
|
|
|
|
|
+ /* Cached return value of leaf_function_p. <0 if false, >0 if true. */
|
|
|
|
|
+ int is_leaf;
|
|
|
|
|
+
|
|
|
|
|
+ /* The current frame information, calculated by riscv_compute_frame_info. */
|
|
|
|
|
+ struct riscv_frame_info frame;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* Information about a single argument. */
|
|
|
|
|
+struct riscv_arg_info {
|
|
|
|
|
+ /* True if the argument is passed in a floating-point register, or
|
|
|
|
|
+ would have been if we hadn't run out of registers. */
|
|
|
|
|
+ bool fpr_p;
|
|
|
|
|
+
|
|
|
|
|
+ /* The number of words passed in registers, rounded up. */
|
|
|
|
|
+ unsigned int reg_words;
|
|
|
|
|
+
|
|
|
|
|
+ /* For EABI, the offset of the first register from GP_ARG_FIRST or
|
|
|
|
|
+ FP_ARG_FIRST. For other ABIs, the offset of the first register from
|
|
|
|
|
+ the start of the ABI's argument structure (see the CUMULATIVE_ARGS
|
|
|
|
|
+ comment for details).
|
|
|
|
|
+
|
|
|
|
|
+ The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
|
|
|
|
|
+ on the stack. */
|
|
|
|
|
+ unsigned int reg_offset;
|
|
|
|
|
+
|
|
|
|
|
+ /* The number of words that must be passed on the stack, rounded up. */
|
|
|
|
|
+ unsigned int stack_words;
|
|
|
|
|
+
|
|
|
|
|
+ /* The offset from the start of the stack overflow area of the argument's
|
|
|
|
|
+ first stack word. Only meaningful when STACK_WORDS is nonzero. */
|
|
|
|
|
+ unsigned int stack_offset;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* Information about an address described by riscv_address_type.
|
|
|
|
|
+
|
|
|
|
|
+ ADDRESS_CONST_INT
|
|
|
|
|
+ No fields are used.
|
|
|
|
|
+
|
|
|
|
|
+ ADDRESS_REG
|
|
|
|
|
+ REG is the base register and OFFSET is the constant offset.
|
|
|
|
|
+
|
|
|
|
|
+ ADDRESS_LO_SUM
|
|
|
|
|
+ REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
|
|
|
|
|
+ is the type of symbol it references.
|
|
|
|
|
+
|
|
|
|
|
+ ADDRESS_SYMBOLIC
|
|
|
|
|
+ SYMBOL_TYPE is the type of symbol that the address references. */
|
|
|
|
|
+struct riscv_address_info {
|
|
|
|
|
+ enum riscv_address_type type;
|
|
|
|
|
+ rtx reg;
|
|
|
|
|
+ rtx offset;
|
|
|
|
|
+ enum riscv_symbol_type symbol_type;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* One stage in a constant building sequence. These sequences have
|
|
|
|
|
+ the form:
|
|
|
|
|
+
|
|
|
|
|
+ A = VALUE[0]
|
|
|
|
|
+ A = A CODE[1] VALUE[1]
|
|
|
|
|
+ A = A CODE[2] VALUE[2]
|
|
|
|
|
+ ...
|
|
|
|
|
+
|
|
|
|
|
+ where A is an accumulator, each CODE[i] is a binary rtl operation
|
|
|
|
|
+ and each VALUE[i] is a constant integer. CODE[0] is undefined. */
|
|
|
|
|
+struct riscv_integer_op {
|
|
|
|
|
+ enum rtx_code code;
|
|
|
|
|
+ unsigned HOST_WIDE_INT value;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* The largest number of operations needed to load an integer constant.
|
|
|
|
|
+ The worst case is LUI, ADDI, SLLI, ADDI, SLLI, ADDI, SLLI, ADDI,
|
|
|
|
|
+ but we may attempt and reject even worse sequences. */
|
|
|
|
|
+#define RISCV_MAX_INTEGER_OPS 32
|
|
|
|
|
+
|
|
|
|
|
+/* Costs of various operations on the different architectures. */
|
|
|
|
|
+
|
|
|
|
|
+struct riscv_tune_info
|
|
|
|
|
+{
|
|
|
|
|
+ unsigned short fp_add[2];
|
|
|
|
|
+ unsigned short fp_mul[2];
|
|
|
|
|
+ unsigned short fp_div[2];
|
|
|
|
|
+ unsigned short int_mul[2];
|
|
|
|
|
+ unsigned short int_div[2];
|
|
|
|
|
+ unsigned short issue_rate;
|
|
|
|
|
+ unsigned short branch_cost;
|
|
|
|
|
+ unsigned short fp_to_int_cost;
|
|
|
|
|
+ unsigned short memory_cost;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* Information about one CPU we know about. */
|
|
|
|
|
+struct riscv_cpu_info {
|
|
|
|
|
+ /* This CPU's canonical name. */
|
|
|
|
|
+ const char *name;
|
|
|
|
|
+
|
|
|
|
|
+ /* The RISC-V ISA and extensions supported by this CPU. */
|
|
|
|
|
+ const char *isa;
|
|
|
|
|
+
|
|
|
|
|
+ /* Tuning parameters for this CPU. */
|
|
|
|
|
+ const struct riscv_tune_info *tune_info;
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* Global variables for machine-dependent things. */
|
|
|
|
|
+
|
|
|
|
|
+/* Which tuning parameters to use. */
|
|
|
|
|
+static const struct riscv_tune_info *tune_info;
|
|
|
|
|
+
|
|
|
|
|
+/* Index [M][R] is true if register R is allowed to hold a value of mode M. */
|
|
|
|
|
+bool riscv_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
|
|
|
|
|
+
|
|
|
|
|
+/* riscv_lo_relocs[X] is the relocation to use when a symbol of type X
|
|
|
|
|
+ appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
|
|
|
|
|
+ if they are matched by a special .md file pattern. */
|
|
|
|
|
+const char *riscv_lo_relocs[NUM_SYMBOL_TYPES];
|
|
|
|
|
+
|
|
|
|
|
+/* Likewise for HIGHs. */
|
|
|
|
|
+const char *riscv_hi_relocs[NUM_SYMBOL_TYPES];
|
|
|
|
|
+
|
|
|
|
|
+/* Index R is the smallest register class that contains register R. */
|
|
|
|
|
+const enum reg_class riscv_regno_to_class[FIRST_PSEUDO_REGISTER] = {
|
|
|
|
|
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
|
|
|
|
|
+ GR_REGS, T_REGS, T_REGS, T_REGS,
|
|
|
|
|
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
|
|
|
|
|
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
|
|
|
|
|
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
|
|
|
|
|
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
|
|
|
|
|
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
|
|
|
|
|
+ T_REGS, T_REGS, T_REGS, T_REGS,
|
|
|
|
|
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
|
|
|
|
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
|
|
|
|
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
|
|
|
|
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
|
|
|
|
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
|
|
|
|
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
|
|
|
|
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
|
|
|
|
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
|
|
|
|
+ FRAME_REGS, FRAME_REGS,
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* Costs to use when optimizing for size. */
|
|
|
|
|
+static const struct riscv_tune_info rocket_tune_info = {
|
|
|
|
|
+ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_add */
|
|
|
|
|
+ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_mul */
|
|
|
|
|
+ {COSTS_N_INSNS (20), COSTS_N_INSNS (20)}, /* fp_div */
|
|
|
|
|
+ {COSTS_N_INSNS (4), COSTS_N_INSNS (4)}, /* int_mul */
|
|
|
|
|
+ {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
|
|
|
|
|
+ 1, /* issue_rate */
|
|
|
|
|
+ 3, /* branch_cost */
|
|
|
|
|
+ COSTS_N_INSNS (2), /* fp_to_int_cost */
|
|
|
|
|
+ 5 /* memory_cost */
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* Costs to use when optimizing for size. */
|
|
|
|
|
+static const struct riscv_tune_info optimize_size_tune_info = {
|
|
|
|
|
+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_add */
|
|
|
|
|
+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_mul */
|
|
|
|
|
+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_div */
|
|
|
|
|
+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_mul */
|
|
|
|
|
+ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_div */
|
|
|
|
|
+ 1, /* issue_rate */
|
|
|
|
|
+ 1, /* branch_cost */
|
|
|
|
|
+ COSTS_N_INSNS (1), /* fp_to_int_cost */
|
|
|
|
|
+ 1 /* memory_cost */
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* A table describing all the processors GCC knows about. */
|
|
|
|
|
+static const struct riscv_cpu_info riscv_cpu_info_table[] = {
|
|
|
|
|
+ /* Entries for generic ISAs. */
|
|
|
|
|
+ { "rocket", "IMAFD", &rocket_tune_info },
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* Return the riscv_cpu_info entry for the given name string. */
|
|
|
|
|
+
|
|
|
|
|
+static const struct riscv_cpu_info *
|
|
|
|
|
+riscv_parse_cpu (const char *cpu_string)
|
|
|
|
|
+{
|
|
|
|
|
+ unsigned int i;
|
|
|
|
|
+
|
|
|
|
|
+ for (i = 0; i < ARRAY_SIZE (riscv_cpu_info_table); i++)
|
|
|
|
|
+ if (strcmp (riscv_cpu_info_table[i].name, cpu_string) == 0)
|
|
|
|
|
+ return riscv_cpu_info_table + i;
|
|
|
|
|
+
|
|
|
|
|
+ error ("unknown cpu `%s'", cpu_string);
|
|
|
|
|
+ return riscv_cpu_info_table;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Fill CODES with a sequence of rtl operations to load VALUE.
|
|
|
|
|
+ Return the number of operations needed. */
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_build_integer_1 (struct riscv_integer_op *codes, HOST_WIDE_INT value,
|
|
|
|
|
+ enum machine_mode mode)
|
|
|
|
|
+{
|
|
|
|
|
+ HOST_WIDE_INT low_part = RISCV_CONST_LOW_PART (value);
|
|
|
|
|
+ int cost = INT_MAX, alt_cost;
|
|
|
|
|
+ struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
|
|
|
|
|
+
|
|
|
|
|
+ if (SMALL_OPERAND (value) || LUI_OPERAND (value))
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Simply ADDI or LUI */
|
|
|
|
|
+ codes[0].code = UNKNOWN;
|
|
|
|
|
+ codes[0].value = value;
|
|
|
|
|
+ return 1;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* End with ADDI */
|
|
|
|
|
+ if (low_part != 0
|
|
|
|
|
+ && !(mode == HImode && (int16_t)(value - low_part) != (value - low_part)))
|
|
|
|
|
+ {
|
|
|
|
|
+ cost = 1 + riscv_build_integer_1 (codes, value - low_part, mode);
|
|
|
|
|
+ codes[cost-1].code = PLUS;
|
|
|
|
|
+ codes[cost-1].value = low_part;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* End with XORI */
|
|
|
|
|
+ if (cost > 2 && (low_part < 0 || mode == HImode))
|
|
|
|
|
+ {
|
|
|
|
|
+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, value ^ low_part, mode);
|
|
|
|
|
+ alt_codes[alt_cost-1].code = XOR;
|
|
|
|
|
+ alt_codes[alt_cost-1].value = low_part;
|
|
|
|
|
+ if (alt_cost < cost)
|
|
|
|
|
+ cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Eliminate trailing zeros and end with SLLI */
|
|
|
|
|
+ if (cost > 2 && (value & 1) == 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ int shift = 0;
|
|
|
|
|
+ while ((value & 1) == 0)
|
|
|
|
|
+ shift++, value >>= 1;
|
|
|
|
|
+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, value, mode);
|
|
|
|
|
+ alt_codes[alt_cost-1].code = ASHIFT;
|
|
|
|
|
+ alt_codes[alt_cost-1].value = shift;
|
|
|
|
|
+ if (alt_cost < cost)
|
|
|
|
|
+ cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ gcc_assert (cost <= RISCV_MAX_INTEGER_OPS);
|
|
|
|
|
+ return cost;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
|
|
|
|
|
+ enum machine_mode mode)
|
|
|
|
|
+{
|
|
|
|
|
+ int cost = riscv_build_integer_1 (codes, value, mode);
|
|
|
|
|
+
|
|
|
|
|
+ /* Eliminate leading zeros and end with SRLI */
|
|
|
|
|
+ if (value > 0 && cost > 2)
|
|
|
|
|
+ {
|
|
|
|
|
+ struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
|
|
|
|
|
+ int alt_cost, shift = 0;
|
|
|
|
|
+ HOST_WIDE_INT shifted_val;
|
|
|
|
|
+
|
|
|
|
|
+ /* Try filling trailing bits with 1s */
|
|
|
|
|
+ while ((value << shift) >= 0)
|
|
|
|
|
+ shift++;
|
|
|
|
|
+ shifted_val = (value << shift) | ((((HOST_WIDE_INT) 1) << shift) - 1);
|
|
|
|
|
+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
|
|
|
|
|
+ alt_codes[alt_cost-1].code = LSHIFTRT;
|
|
|
|
|
+ alt_codes[alt_cost-1].value = shift;
|
|
|
|
|
+ if (alt_cost < cost)
|
|
|
|
|
+ cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
|
|
|
|
|
+
|
|
|
|
|
+ /* Try filling trailing bits with 0s */
|
|
|
|
|
+ shifted_val = value << shift;
|
|
|
|
|
+ alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
|
|
|
|
|
+ alt_codes[alt_cost-1].code = LSHIFTRT;
|
|
|
|
|
+ alt_codes[alt_cost-1].value = shift;
|
|
|
|
|
+ if (alt_cost < cost)
|
|
|
|
|
+ cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return cost;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_split_integer_cost (HOST_WIDE_INT val)
|
|
|
|
|
+{
|
|
|
|
|
+ int cost;
|
|
|
|
|
+ int32_t loval = val, hival = (val - (int32_t)val) >> 32;
|
|
|
|
|
+ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
|
|
|
|
|
+
|
|
|
|
|
+ cost = 2 + riscv_build_integer (codes, loval, VOIDmode);
|
|
|
|
|
+ if (loval != hival)
|
|
|
|
|
+ cost += riscv_build_integer (codes, hival, VOIDmode);
|
|
|
|
|
+
|
|
|
|
|
+ return cost;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_integer_cost (HOST_WIDE_INT val)
|
|
|
|
|
+{
|
|
|
|
|
+ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
|
|
|
|
|
+ return MIN (riscv_build_integer (codes, val, VOIDmode),
|
|
|
|
|
+ riscv_split_integer_cost (val));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Try to split a 64b integer into 32b parts, then reassemble. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_split_integer (HOST_WIDE_INT val, enum machine_mode mode)
|
|
|
|
|
+{
|
|
|
|
|
+ int32_t loval = val, hival = (val - (int32_t)val) >> 32;
|
|
|
|
|
+ rtx hi = gen_reg_rtx (mode), lo = gen_reg_rtx (mode);
|
|
|
|
|
+
|
|
|
|
|
+ riscv_move_integer (hi, hi, hival);
|
|
|
|
|
+ riscv_move_integer (lo, lo, loval);
|
|
|
|
|
+
|
|
|
|
|
+ hi = gen_rtx_fmt_ee (ASHIFT, mode, hi, GEN_INT (32));
|
|
|
|
|
+ hi = force_reg (mode, hi);
|
|
|
|
|
+
|
|
|
|
|
+ return gen_rtx_fmt_ee (PLUS, mode, hi, lo);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if X is a thread-local symbol. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_tls_symbol_p (const_rtx x)
|
|
|
|
|
+{
|
|
|
|
|
+ return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_symbol_binds_local_p (const_rtx x)
|
|
|
|
|
+{
|
|
|
|
|
+ return (SYMBOL_REF_DECL (x)
|
|
|
|
|
+ ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
|
|
|
|
|
+ : SYMBOL_REF_LOCAL_P (x));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return the method that should be used to access SYMBOL_REF or
|
|
|
|
|
+ LABEL_REF X in context CONTEXT. */
|
|
|
|
|
+
|
|
|
|
|
+static enum riscv_symbol_type
|
|
|
|
|
+riscv_classify_symbol (const_rtx x)
|
|
|
|
|
+{
|
|
|
|
|
+ if (riscv_tls_symbol_p (x))
|
|
|
|
|
+ return SYMBOL_TLS;
|
|
|
|
|
+
|
|
|
|
|
+ if (GET_CODE (x) == LABEL_REF)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (LABEL_REF_NONLOCAL_P (x))
|
|
|
|
|
+ return SYMBOL_GOT_DISP;
|
|
|
|
|
+ return SYMBOL_ABSOLUTE;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
|
|
|
|
|
+
|
|
|
|
|
+ if (flag_pic && !riscv_symbol_binds_local_p (x))
|
|
|
|
|
+ return SYMBOL_GOT_DISP;
|
|
|
|
|
+
|
|
|
|
|
+ return SYMBOL_ABSOLUTE;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Classify the base of symbolic expression X, given that X appears in
|
|
|
|
|
+ context CONTEXT. */
|
|
|
|
|
+
|
|
|
|
|
+static enum riscv_symbol_type
|
|
|
|
|
+riscv_classify_symbolic_expression (rtx x)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx offset;
|
|
|
|
|
+
|
|
|
|
|
+ split_const (x, &x, &offset);
|
|
|
|
|
+ if (UNSPEC_ADDRESS_P (x))
|
|
|
|
|
+ return UNSPEC_ADDRESS_TYPE (x);
|
|
|
|
|
+
|
|
|
|
|
+ return riscv_classify_symbol (x);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if X is a symbolic constant that can be used in context
|
|
|
|
|
+ CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
|
|
|
|
|
+
|
|
|
|
|
+bool
|
|
|
|
|
+riscv_symbolic_constant_p (rtx x, enum riscv_symbol_type *symbol_type)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx offset;
|
|
|
|
|
+
|
|
|
|
|
+ split_const (x, &x, &offset);
|
|
|
|
|
+ if (UNSPEC_ADDRESS_P (x))
|
|
|
|
|
+ {
|
|
|
|
|
+ *symbol_type = UNSPEC_ADDRESS_TYPE (x);
|
|
|
|
|
+ x = UNSPEC_ADDRESS (x);
|
|
|
|
|
+ }
|
|
|
|
|
+ else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
|
|
|
|
|
+ *symbol_type = riscv_classify_symbol (x);
|
|
|
|
|
+ else
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ if (offset == const0_rtx)
|
|
|
|
|
+ return true;
|
|
|
|
|
+
|
|
|
|
|
+ /* Check whether a nonzero offset is valid for the underlying
|
|
|
|
|
+ relocations. */
|
|
|
|
|
+ switch (*symbol_type)
|
|
|
|
|
+ {
|
|
|
|
|
+ case SYMBOL_ABSOLUTE:
|
|
|
|
|
+ case SYMBOL_TLS_LE:
|
|
|
|
|
+ return (int32_t) INTVAL (offset) == INTVAL (offset);
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ return false;
|
|
|
|
|
+ }
|
|
|
|
|
+ gcc_unreachable ();
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Returns the number of instructions necessary to reference a symbol. */
|
|
|
|
|
+
|
|
|
|
|
+static int riscv_symbol_insns (enum riscv_symbol_type type)
|
|
|
|
|
+{
|
|
|
|
|
+ switch (type)
|
|
|
|
|
+ {
|
|
|
|
|
+ case SYMBOL_TLS: return 0; /* Depends on the TLS model. */
|
|
|
|
|
+ case SYMBOL_ABSOLUTE: return 2; /* LUI + the reference itself */
|
|
|
|
|
+ case SYMBOL_TLS_LE: return 3; /* LUI + ADD TP + the reference itself */
|
|
|
|
|
+ case SYMBOL_GOT_DISP: return 3; /* AUIPC + LD GOT + the reference itself */
|
|
|
|
|
+ default: gcc_unreachable();
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_LEGITIMATE_CONSTANT_P. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
|
|
|
|
|
+{
|
|
|
|
|
+ return riscv_const_insns (x) > 0;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
|
|
|
|
|
+{
|
|
|
|
|
+ enum riscv_symbol_type type;
|
|
|
|
|
+ rtx base, offset;
|
|
|
|
|
+
|
|
|
|
|
+ /* There is no assembler syntax for expressing an address-sized
|
|
|
|
|
+ high part. */
|
|
|
|
|
+ if (GET_CODE (x) == HIGH)
|
|
|
|
|
+ return true;
|
|
|
|
|
+
|
|
|
|
|
+ split_const (x, &base, &offset);
|
|
|
|
|
+ if (riscv_symbolic_constant_p (base, &type))
|
|
|
|
|
+ {
|
|
|
|
|
+ /* As an optimization, don't spill symbolic constants that are as
|
|
|
|
|
+ cheap to rematerialize as to access in the constant pool. */
|
|
|
|
|
+ if (SMALL_INT (offset) && riscv_symbol_insns (type) > 0)
|
|
|
|
|
+ return true;
|
|
|
|
|
+
|
|
|
|
|
+ /* As an optimization, avoid needlessly generate dynamic relocations. */
|
|
|
|
|
+ if (flag_pic)
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* TLS symbols must be computed by riscv_legitimize_move. */
|
|
|
|
|
+ if (tls_referenced_p (x))
|
|
|
|
|
+ return true;
|
|
|
|
|
+
|
|
|
|
|
+ return false;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if register REGNO is a valid base register for mode MODE.
|
|
|
|
|
+ STRICT_P is true if REG_OK_STRICT is in effect. */
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+riscv_regno_mode_ok_for_base_p (int regno, enum machine_mode mode ATTRIBUTE_UNUSED,
|
|
|
|
|
+ bool strict_p)
|
|
|
|
|
+{
|
|
|
|
|
+ if (!HARD_REGISTER_NUM_P (regno))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (!strict_p)
|
|
|
|
|
+ return true;
|
|
|
|
|
+ regno = reg_renumber[regno];
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* These fake registers will be eliminated to either the stack or
|
|
|
|
|
+ hard frame pointer, both of which are usually valid base registers.
|
|
|
|
|
+ Reload deals with the cases where the eliminated form isn't valid. */
|
|
|
|
|
+ if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
|
|
|
|
|
+ return true;
|
|
|
|
|
+
|
|
|
|
|
+ return GP_REG_P (regno);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if X is a valid base register for mode MODE.
|
|
|
|
|
+ STRICT_P is true if REG_OK_STRICT is in effect. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
|
|
|
|
|
+{
|
|
|
|
|
+ if (!strict_p && GET_CODE (x) == SUBREG)
|
|
|
|
|
+ x = SUBREG_REG (x);
|
|
|
|
|
+
|
|
|
|
|
+ return (REG_P (x)
|
|
|
|
|
+ && riscv_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if, for every base register BASE_REG, (plus BASE_REG X)
|
|
|
|
|
+ can address a value of mode MODE. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_valid_offset_p (rtx x, enum machine_mode mode)
|
|
|
|
|
+{
|
|
|
|
|
+ /* Check that X is a signed 12-bit number. */
|
|
|
|
|
+ if (!const_arith_operand (x, Pmode))
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ /* We may need to split multiword moves, so make sure that every word
|
|
|
|
|
+ is accessible. */
|
|
|
|
|
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
|
|
|
|
|
+ && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ return true;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if a LO_SUM can address a value of mode MODE when the
|
|
|
|
|
+ LO_SUM symbol has type SYMBOL_TYPE. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_valid_lo_sum_p (enum riscv_symbol_type symbol_type, enum machine_mode mode)
|
|
|
|
|
+{
|
|
|
|
|
+ /* Check that symbols of type SYMBOL_TYPE can be used to access values
|
|
|
|
|
+ of mode MODE. */
|
|
|
|
|
+ if (riscv_symbol_insns (symbol_type) == 0)
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ /* Check that there is a known low-part relocation. */
|
|
|
|
|
+ if (riscv_lo_relocs[symbol_type] == NULL)
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ /* We may need to split multiword moves, so make sure that each word
|
|
|
|
|
+ can be accessed without inducing a carry. This is mainly needed
|
|
|
|
|
+ for o64, which has historically only guaranteed 64-bit alignment
|
|
|
|
|
+ for 128-bit types. */
|
|
|
|
|
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
|
|
|
|
|
+ && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ return true;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if X is a valid address for machine mode MODE. If it is,
|
|
|
|
|
+ fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
|
|
|
|
|
+ effect. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_classify_address (struct riscv_address_info *info, rtx x,
|
|
|
|
|
+ enum machine_mode mode, bool strict_p)
|
|
|
|
|
+{
|
|
|
|
|
+ switch (GET_CODE (x))
|
|
|
|
|
+ {
|
|
|
|
|
+ case REG:
|
|
|
|
|
+ case SUBREG:
|
|
|
|
|
+ info->type = ADDRESS_REG;
|
|
|
|
|
+ info->reg = x;
|
|
|
|
|
+ info->offset = const0_rtx;
|
|
|
|
|
+ return riscv_valid_base_register_p (info->reg, mode, strict_p);
|
|
|
|
|
+
|
|
|
|
|
+ case PLUS:
|
|
|
|
|
+ info->type = ADDRESS_REG;
|
|
|
|
|
+ info->reg = XEXP (x, 0);
|
|
|
|
|
+ info->offset = XEXP (x, 1);
|
|
|
|
|
+ return (riscv_valid_base_register_p (info->reg, mode, strict_p)
|
|
|
|
|
+ && riscv_valid_offset_p (info->offset, mode));
|
|
|
|
|
+
|
|
|
|
|
+ case LO_SUM:
|
|
|
|
|
+ info->type = ADDRESS_LO_SUM;
|
|
|
|
|
+ info->reg = XEXP (x, 0);
|
|
|
|
|
+ info->offset = XEXP (x, 1);
|
|
|
|
|
+ /* We have to trust the creator of the LO_SUM to do something vaguely
|
|
|
|
|
+ sane. Target-independent code that creates a LO_SUM should also
|
|
|
|
|
+ create and verify the matching HIGH. Target-independent code that
|
|
|
|
|
+ adds an offset to a LO_SUM must prove that the offset will not
|
|
|
|
|
+ induce a carry. Failure to do either of these things would be
|
|
|
|
|
+ a bug, and we are not required to check for it here. The RISCV
|
|
|
|
|
+ backend itself should only create LO_SUMs for valid symbolic
|
|
|
|
|
+ constants, with the high part being either a HIGH or a copy
|
|
|
|
|
+ of _gp. */
|
|
|
|
|
+ info->symbol_type
|
|
|
|
|
+ = riscv_classify_symbolic_expression (info->offset);
|
|
|
|
|
+ return (riscv_valid_base_register_p (info->reg, mode, strict_p)
|
|
|
|
|
+ && riscv_valid_lo_sum_p (info->symbol_type, mode));
|
|
|
|
|
+
|
|
|
|
|
+ case CONST_INT:
|
|
|
|
|
+ /* Small-integer addresses don't occur very often, but they
|
|
|
|
|
+ are legitimate if $0 is a valid base register. */
|
|
|
|
|
+ info->type = ADDRESS_CONST_INT;
|
|
|
|
|
+ return SMALL_INT (x);
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ return false;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_LEGITIMATE_ADDRESS_P. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
|
|
|
|
|
+{
|
|
|
|
|
+ struct riscv_address_info addr;
|
|
|
|
|
+
|
|
|
|
|
+ return riscv_classify_address (&addr, x, mode, strict_p);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return the number of instructions needed to load or store a value
|
|
|
|
|
+ of mode MODE at address X. Return 0 if X isn't valid for MODE.
|
|
|
|
|
+ Assume that multiword moves may need to be split into word moves
|
|
|
|
|
+ if MIGHT_SPLIT_P, otherwise assume that a single load or store is
|
|
|
|
|
+ enough. */
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+riscv_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
|
|
|
|
|
+{
|
|
|
|
|
+ struct riscv_address_info addr;
|
|
|
|
|
+ int n = 1;
|
|
|
|
|
+
|
|
|
|
|
+ if (!riscv_classify_address (&addr, x, mode, false))
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ /* BLKmode is used for single unaligned loads and stores and should
|
|
|
|
|
+ not count as a multiword mode. */
|
|
|
|
|
+ if (mode != BLKmode && might_split_p)
|
|
|
|
|
+ n += (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
|
|
|
|
|
+
|
|
|
|
|
+ if (addr.type == ADDRESS_LO_SUM)
|
|
|
|
|
+ n += riscv_symbol_insns (addr.symbol_type) - 1;
|
|
|
|
|
+
|
|
|
|
|
+ return n;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return the number of instructions needed to load constant X.
|
|
|
|
|
+ Return 0 if X isn't a valid constant. */
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+riscv_const_insns (rtx x)
|
|
|
|
|
+{
|
|
|
|
|
+ enum riscv_symbol_type symbol_type;
|
|
|
|
|
+ rtx offset;
|
|
|
|
|
+
|
|
|
|
|
+ switch (GET_CODE (x))
|
|
|
|
|
+ {
|
|
|
|
|
+ case HIGH:
|
|
|
|
|
+ if (!riscv_symbolic_constant_p (XEXP (x, 0), &symbol_type)
|
|
|
|
|
+ || !riscv_hi_relocs[symbol_type])
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ /* This is simply an LUI. */
|
|
|
|
|
+ return 1;
|
|
|
|
|
+
|
|
|
|
|
+ case CONST_INT:
|
|
|
|
|
+ {
|
|
|
|
|
+ int cost = riscv_integer_cost (INTVAL (x));
|
|
|
|
|
+ /* Force complicated constants to memory. */
|
|
|
|
|
+ return cost < 4 ? cost : 0;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ case CONST_DOUBLE:
|
|
|
|
|
+ case CONST_VECTOR:
|
|
|
|
|
+ /* Allow zeros for normal mode, where we can use x0. */
|
|
|
|
|
+ return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
|
|
|
|
|
+
|
|
|
|
|
+ case CONST:
|
|
|
|
|
+ /* See if we can refer to X directly. */
|
|
|
|
|
+ if (riscv_symbolic_constant_p (x, &symbol_type))
|
|
|
|
|
+ return riscv_symbol_insns (symbol_type);
|
|
|
|
|
+
|
|
|
|
|
+ /* Otherwise try splitting the constant into a base and offset.
|
|
|
|
|
+ If the offset is a 16-bit value, we can load the base address
|
|
|
|
|
+ into a register and then use (D)ADDIU to add in the offset.
|
|
|
|
|
+ If the offset is larger, we can load the base and offset
|
|
|
|
|
+ into separate registers and add them together with (D)ADDU.
|
|
|
|
|
+ However, the latter is only possible before reload; during
|
|
|
|
|
+ and after reload, we must have the option of forcing the
|
|
|
|
|
+ constant into the pool instead. */
|
|
|
|
|
+ split_const (x, &x, &offset);
|
|
|
|
|
+ if (offset != 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ int n = riscv_const_insns (x);
|
|
|
|
|
+ if (n != 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (SMALL_INT (offset))
|
|
|
|
|
+ return n + 1;
|
|
|
|
|
+ else if (!targetm.cannot_force_const_mem (GET_MODE (x), x))
|
|
|
|
|
+ return n + 1 + riscv_integer_cost (INTVAL (offset));
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ case SYMBOL_REF:
|
|
|
|
|
+ case LABEL_REF:
|
|
|
|
|
+ return riscv_symbol_insns (riscv_classify_symbol (x));
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* X is a doubleword constant that can be handled by splitting it into
|
|
|
|
|
+ two words and loading each word separately. Return the number of
|
|
|
|
|
+ instructions required to do this. */
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+riscv_split_const_insns (rtx x)
|
|
|
|
|
+{
|
|
|
|
|
+ unsigned int low, high;
|
|
|
|
|
+
|
|
|
|
|
+ low = riscv_const_insns (riscv_subword (x, false));
|
|
|
|
|
+ high = riscv_const_insns (riscv_subword (x, true));
|
|
|
|
|
+ gcc_assert (low > 0 && high > 0);
|
|
|
|
|
+ return low + high;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return the number of instructions needed to implement INSN,
|
|
|
|
|
+ given that it loads from or stores to MEM. */
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+riscv_load_store_insns (rtx mem, rtx_insn *insn)
|
|
|
|
|
+{
|
|
|
|
|
+ enum machine_mode mode;
|
|
|
|
|
+ bool might_split_p;
|
|
|
|
|
+ rtx set;
|
|
|
|
|
+
|
|
|
|
|
+ gcc_assert (MEM_P (mem));
|
|
|
|
|
+ mode = GET_MODE (mem);
|
|
|
|
|
+
|
|
|
|
|
+ /* Try to prove that INSN does not need to be split. */
|
|
|
|
|
+ might_split_p = true;
|
|
|
|
|
+ if (GET_MODE_BITSIZE (mode) == 64)
|
|
|
|
|
+ {
|
|
|
|
|
+ set = single_set (insn);
|
|
|
|
|
+ if (set && !riscv_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
|
|
|
|
|
+ might_split_p = false;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return riscv_address_insns (XEXP (mem, 0), mode, might_split_p);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Emit a move from SRC to DEST. Assume that the move expanders can
|
|
|
|
|
+ handle all moves if !can_create_pseudo_p (). The distinction is
|
|
|
|
|
+ important because, unlike emit_move_insn, the move expanders know
|
|
|
|
|
+ how to force Pmode objects into the constant pool even when the
|
|
|
|
|
+ constant pool address is not itself legitimate. */
|
|
|
|
|
+
|
|
|
|
|
+rtx
|
|
|
|
|
+riscv_emit_move (rtx dest, rtx src)
|
|
|
|
|
+{
|
|
|
|
|
+ return (can_create_pseudo_p ()
|
|
|
|
|
+ ? emit_move_insn (dest, src)
|
|
|
|
|
+ : emit_move_insn_1 (dest, src));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
|
|
|
|
|
+{
|
|
|
|
|
+ emit_insn (gen_rtx_SET (VOIDmode, target,
|
|
|
|
|
+ gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Compute (CODE OP0 OP1) and store the result in a new register
|
|
|
|
|
+ of mode MODE. Return that new register. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx reg;
|
|
|
|
|
+
|
|
|
|
|
+ reg = gen_reg_rtx (mode);
|
|
|
|
|
+ riscv_emit_binary (code, reg, op0, op1);
|
|
|
|
|
+ return reg;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Copy VALUE to a register and return that register. If new pseudos
|
|
|
|
|
+ are allowed, copy it into a new register, otherwise use DEST. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_force_temporary (rtx dest, rtx value)
|
|
|
|
|
+{
|
|
|
|
|
+ if (can_create_pseudo_p ())
|
|
|
|
|
+ return force_reg (Pmode, value);
|
|
|
|
|
+ else
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_emit_move (dest, value);
|
|
|
|
|
+ return dest;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
|
|
|
|
|
+ then add CONST_INT OFFSET to the result. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_unspec_address_offset (rtx base, rtx offset,
|
|
|
|
|
+ enum riscv_symbol_type symbol_type)
|
|
|
|
|
+{
|
|
|
|
|
+ base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
|
|
|
|
|
+ UNSPEC_ADDRESS_FIRST + symbol_type);
|
|
|
|
|
+ if (offset != const0_rtx)
|
|
|
|
|
+ base = gen_rtx_PLUS (Pmode, base, offset);
|
|
|
|
|
+ return gen_rtx_CONST (Pmode, base);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return an UNSPEC address with underlying address ADDRESS and symbol
|
|
|
|
|
+ type SYMBOL_TYPE. */
|
|
|
|
|
+
|
|
|
|
|
+rtx
|
|
|
|
|
+riscv_unspec_address (rtx address, enum riscv_symbol_type symbol_type)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx base, offset;
|
|
|
|
|
+
|
|
|
|
|
+ split_const (address, &base, &offset);
|
|
|
|
|
+ return riscv_unspec_address_offset (base, offset, symbol_type);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* If OP is an UNSPEC address, return the address to which it refers,
|
|
|
|
|
+ otherwise return OP itself. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_strip_unspec_address (rtx op)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx base, offset;
|
|
|
|
|
+
|
|
|
|
|
+ split_const (op, &base, &offset);
|
|
|
|
|
+ if (UNSPEC_ADDRESS_P (base))
|
|
|
|
|
+ op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
|
|
|
|
|
+ return op;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* If riscv_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
|
|
|
|
|
+ high part to BASE and return the result. Just return BASE otherwise.
|
|
|
|
|
+ TEMP is as for riscv_force_temporary.
|
|
|
|
|
+
|
|
|
|
|
+ The returned expression can be used as the first operand to a LO_SUM. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_unspec_offset_high (rtx temp, rtx addr, enum riscv_symbol_type symbol_type)
|
|
|
|
|
+{
|
|
|
|
|
+ addr = gen_rtx_HIGH (Pmode, riscv_unspec_address (addr, symbol_type));
|
|
|
|
|
+ return riscv_force_temporary (temp, addr);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Load an entry from the GOT. */
|
|
|
|
|
+static rtx riscv_got_load_tls_gd(rtx dest, rtx sym)
|
|
|
|
|
+{
|
|
|
|
|
+ return (Pmode == DImode ? gen_got_load_tls_gddi(dest, sym) : gen_got_load_tls_gdsi(dest, sym));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static rtx riscv_got_load_tls_ie(rtx dest, rtx sym)
|
|
|
|
|
+{
|
|
|
|
|
+ return (Pmode == DImode ? gen_got_load_tls_iedi(dest, sym) : gen_got_load_tls_iesi(dest, sym));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static rtx riscv_tls_add_tp_le(rtx dest, rtx base, rtx sym)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
|
|
|
|
|
+ return (Pmode == DImode ? gen_tls_add_tp_ledi(dest, base, tp, sym) : gen_tls_add_tp_lesi(dest, base, tp, sym));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
|
|
|
|
|
+ it appears in a MEM of that mode. Return true if ADDR is a legitimate
|
|
|
|
|
+ constant in that context and can be split into high and low parts.
|
|
|
|
|
+ If so, and if LOW_OUT is nonnull, emit the high part and store the
|
|
|
|
|
+ low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
|
|
|
|
|
+
|
|
|
|
|
+ TEMP is as for riscv_force_temporary and is used to load the high
|
|
|
|
|
+ part into a register.
|
|
|
|
|
+
|
|
|
|
|
+ When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
|
|
|
|
|
+ a legitimize SET_SRC for an .md pattern, otherwise the low part
|
|
|
|
|
+ is guaranteed to be a legitimate address for mode MODE. */
|
|
|
|
|
+
|
|
|
|
|
+bool
|
|
|
|
|
+riscv_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
|
|
|
|
|
+{
|
|
|
|
|
+ enum riscv_symbol_type symbol_type;
|
|
|
|
|
+ rtx high;
|
|
|
|
|
+
|
|
|
|
|
+ if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)
|
|
|
|
|
+ || !riscv_symbolic_constant_p (addr, &symbol_type)
|
|
|
|
|
+ || riscv_symbol_insns (symbol_type) == 0
|
|
|
|
|
+ || !riscv_hi_relocs[symbol_type])
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ if (low_out)
|
|
|
|
|
+ {
|
|
|
|
|
+ switch (symbol_type)
|
|
|
|
|
+ {
|
|
|
|
|
+ case SYMBOL_ABSOLUTE:
|
|
|
|
|
+ high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
|
|
|
|
|
+ high = riscv_force_temporary (temp, high);
|
|
|
|
|
+ *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ gcc_unreachable ();
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return true;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return a legitimate address for REG + OFFSET. TEMP is as for
|
|
|
|
|
+ riscv_force_temporary; it is only needed when OFFSET is not a
|
|
|
|
|
+ SMALL_OPERAND. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
|
|
|
|
|
+{
|
|
|
|
|
+ if (!SMALL_OPERAND (offset))
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx high;
|
|
|
|
|
+
|
|
|
|
|
+ /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
|
|
|
|
|
+ The addition inside the macro CONST_HIGH_PART may cause an
|
|
|
|
|
+ overflow, so we need to force a sign-extension check. */
|
|
|
|
|
+ high = gen_int_mode (RISCV_CONST_HIGH_PART (offset), Pmode);
|
|
|
|
|
+ offset = RISCV_CONST_LOW_PART (offset);
|
|
|
|
|
+ high = riscv_force_temporary (temp, high);
|
|
|
|
|
+ reg = riscv_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
|
|
|
|
|
+ }
|
|
|
|
|
+ return plus_constant (Pmode, reg, offset);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* The __tls_get_attr symbol. */
|
|
|
|
|
+static GTY(()) rtx riscv_tls_symbol;
|
|
|
|
|
+
|
|
|
|
|
+/* Return an instruction sequence that calls __tls_get_addr. SYM is
|
|
|
|
|
+ the TLS symbol we are referencing and TYPE is the symbol type to use
|
|
|
|
|
+ (either global dynamic or local dynamic). RESULT is an RTX for the
|
|
|
|
|
+ return value location. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_call_tls_get_addr (rtx sym, rtx result)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx insn, a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
|
|
|
|
|
+
|
|
|
|
|
+ if (!riscv_tls_symbol)
|
|
|
|
|
+ riscv_tls_symbol = init_one_libfunc ("__tls_get_addr");
|
|
|
|
|
+
|
|
|
|
|
+ start_sequence ();
|
|
|
|
|
+
|
|
|
|
|
+ emit_insn (riscv_got_load_tls_gd (a0, sym));
|
|
|
|
|
+ insn = riscv_expand_call (false, result, riscv_tls_symbol, const0_rtx);
|
|
|
|
|
+ RTL_CONST_CALL_P (insn) = 1;
|
|
|
|
|
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
|
|
|
|
|
+ insn = get_insns ();
|
|
|
|
|
+
|
|
|
|
|
+ end_sequence ();
|
|
|
|
|
+
|
|
|
|
|
+ return insn;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
|
|
|
|
|
+ its address. The return value will be both a valid address and a valid
|
|
|
|
|
+ SET_SRC (either a REG or a LO_SUM). */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_legitimize_tls_address (rtx loc)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx dest, insn, tp, tmp1;
|
|
|
|
|
+ enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
|
|
|
|
|
+
|
|
|
|
|
+ /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */
|
|
|
|
|
+ if (!flag_pic)
|
|
|
|
|
+ model = TLS_MODEL_LOCAL_EXEC;
|
|
|
|
|
+
|
|
|
|
|
+ switch (model)
|
|
|
|
|
+ {
|
|
|
|
|
+ case TLS_MODEL_LOCAL_DYNAMIC:
|
|
|
|
|
+ /* Rely on section anchors for the optimization that LDM TLS
|
|
|
|
|
+ provides. The anchor's address is loaded with GD TLS. */
|
|
|
|
|
+ case TLS_MODEL_GLOBAL_DYNAMIC:
|
|
|
|
|
+ tmp1 = gen_rtx_REG (Pmode, GP_RETURN);
|
|
|
|
|
+ insn = riscv_call_tls_get_addr (loc, tmp1);
|
|
|
|
|
+ dest = gen_reg_rtx (Pmode);
|
|
|
|
|
+ emit_libcall_block (insn, dest, tmp1, loc);
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ case TLS_MODEL_INITIAL_EXEC:
|
|
|
|
|
+ /* la.tls.ie; tp-relative add */
|
|
|
|
|
+ tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
|
|
|
|
|
+ tmp1 = gen_reg_rtx (Pmode);
|
|
|
|
|
+ emit_insn (riscv_got_load_tls_ie (tmp1, loc));
|
|
|
|
|
+ dest = gen_reg_rtx (Pmode);
|
|
|
|
|
+ emit_insn (gen_add3_insn (dest, tmp1, tp));
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ case TLS_MODEL_LOCAL_EXEC:
|
|
|
|
|
+ tmp1 = riscv_unspec_offset_high (NULL, loc, SYMBOL_TLS_LE);
|
|
|
|
|
+ dest = gen_reg_rtx (Pmode);
|
|
|
|
|
+ emit_insn (riscv_tls_add_tp_le (dest, tmp1, loc));
|
|
|
|
|
+ dest = gen_rtx_LO_SUM (Pmode, dest,
|
|
|
|
|
+ riscv_unspec_address (loc, SYMBOL_TLS_LE));
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ gcc_unreachable ();
|
|
|
|
|
+ }
|
|
|
|
|
+ return dest;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* If X is not a valid address for mode MODE, force it into a register. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_force_address (rtx x, enum machine_mode mode)
|
|
|
|
|
+{
|
|
|
|
|
+ if (!riscv_legitimate_address_p (mode, x, false))
|
|
|
|
|
+ x = force_reg (Pmode, x);
|
|
|
|
|
+ return x;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* This function is used to implement LEGITIMIZE_ADDRESS. If X can
|
|
|
|
|
+ be legitimized in a way that the generic machinery might not expect,
|
|
|
|
|
+ return a new address, otherwise return NULL. MODE is the mode of
|
|
|
|
|
+ the memory being accessed. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
|
|
|
|
|
+ enum machine_mode mode)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx addr;
|
|
|
|
|
+
|
|
|
|
|
+ if (riscv_tls_symbol_p (x))
|
|
|
|
|
+ return riscv_legitimize_tls_address (x);
|
|
|
|
|
+
|
|
|
|
|
+ /* See if the address can split into a high part and a LO_SUM. */
|
|
|
|
|
+ if (riscv_split_symbol (NULL, x, mode, &addr))
|
|
|
|
|
+ return riscv_force_address (addr, mode);
|
|
|
|
|
+
|
|
|
|
|
+ /* Handle BASE + OFFSET using riscv_add_offset. */
|
|
|
|
|
+ if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
|
|
|
|
|
+ && INTVAL (XEXP (x, 1)) != 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx base = XEXP (x, 0);
|
|
|
|
|
+ HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
|
|
|
|
|
+
|
|
|
|
|
+ if (!riscv_valid_base_register_p (base, mode, false))
|
|
|
|
|
+ base = copy_to_mode_reg (Pmode, base);
|
|
|
|
|
+ addr = riscv_add_offset (NULL, base, offset);
|
|
|
|
|
+ return riscv_force_address (addr, mode);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return x;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Load VALUE into DEST. TEMP is as for riscv_force_temporary. */
|
|
|
|
|
+
|
|
|
|
|
+void
|
|
|
|
|
+riscv_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value)
|
|
|
|
|
+{
|
|
|
|
|
+ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
|
|
|
|
|
+ enum machine_mode mode;
|
|
|
|
|
+ int i, num_ops;
|
|
|
|
|
+ rtx x;
|
|
|
|
|
+
|
|
|
|
|
+ mode = GET_MODE (dest);
|
|
|
|
|
+ num_ops = riscv_build_integer (codes, value, mode);
|
|
|
|
|
+
|
|
|
|
|
+ if (can_create_pseudo_p () && num_ops > 2 /* not a simple constant */
|
|
|
|
|
+ && num_ops >= riscv_split_integer_cost (value))
|
|
|
|
|
+ x = riscv_split_integer (value, mode);
|
|
|
|
|
+ else
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Apply each binary operation to X. */
|
|
|
|
|
+ x = GEN_INT (codes[0].value);
|
|
|
|
|
+
|
|
|
|
|
+ for (i = 1; i < num_ops; i++)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (!can_create_pseudo_p ())
|
|
|
|
|
+ {
|
|
|
|
|
+ emit_insn (gen_rtx_SET (VOIDmode, temp, x));
|
|
|
|
|
+ x = temp;
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+ x = force_reg (mode, x);
|
|
|
|
|
+
|
|
|
|
|
+ x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Subroutine of riscv_legitimize_move. Move constant SRC into register
|
|
|
|
|
+ DEST given that SRC satisfies immediate_operand but doesn't satisfy
|
|
|
|
|
+ move_operand. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx base, offset;
|
|
|
|
|
+
|
|
|
|
|
+ /* Split moves of big integers into smaller pieces. */
|
|
|
|
|
+ if (splittable_const_int_operand (src, mode))
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_move_integer (dest, dest, INTVAL (src));
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Split moves of symbolic constants into high/low pairs. */
|
|
|
|
|
+ if (riscv_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
|
|
|
|
|
+ {
|
|
|
|
|
+ emit_insn (gen_rtx_SET (VOIDmode, dest, src));
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Generate the appropriate access sequences for TLS symbols. */
|
|
|
|
|
+ if (riscv_tls_symbol_p (src))
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_emit_move (dest, riscv_legitimize_tls_address (src));
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* If we have (const (plus symbol offset)), and that expression cannot
|
|
|
|
|
+ be forced into memory, load the symbol first and add in the offset. Also
|
|
|
|
|
+ prefer to do this even if the constant _can_ be forced into memory, as it
|
|
|
|
|
+ usually produces better code. */
|
|
|
|
|
+ split_const (src, &base, &offset);
|
|
|
|
|
+ if (offset != const0_rtx
|
|
|
|
|
+ && (targetm.cannot_force_const_mem (mode, src) || can_create_pseudo_p ()))
|
|
|
|
|
+ {
|
|
|
|
|
+ base = riscv_force_temporary (dest, base);
|
|
|
|
|
+ riscv_emit_move (dest, riscv_add_offset (NULL, base, INTVAL (offset)));
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ src = force_const_mem (mode, src);
|
|
|
|
|
+
|
|
|
|
|
+ /* When using explicit relocs, constant pool references are sometimes
|
|
|
|
|
+ not legitimate addresses. */
|
|
|
|
|
+ riscv_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
|
|
|
|
|
+ riscv_emit_move (dest, src);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* If (set DEST SRC) is not a valid move instruction, emit an equivalent
|
|
|
|
|
+ sequence that is valid. */
|
|
|
|
|
+
|
|
|
|
|
+bool
|
|
|
|
|
+riscv_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
|
|
|
|
|
+{
|
|
|
|
|
+ if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_emit_move (dest, force_reg (mode, src));
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* We need to deal with constants that would be legitimate
|
|
|
|
|
+ immediate_operands but aren't legitimate move_operands. */
|
|
|
|
|
+ if (CONSTANT_P (src) && !move_operand (src, mode))
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_legitimize_const_move (mode, dest, src);
|
|
|
|
|
+ set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+ return false;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if there is an instruction that implements CODE and accepts
|
|
|
|
|
+ X as an immediate operand. */
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_immediate_operand_p (int code, HOST_WIDE_INT x)
|
|
|
|
|
+{
|
|
|
|
|
+ switch (code)
|
|
|
|
|
+ {
|
|
|
|
|
+ case ASHIFT:
|
|
|
|
|
+ case ASHIFTRT:
|
|
|
|
|
+ case LSHIFTRT:
|
|
|
|
|
+ /* All shift counts are truncated to a valid constant. */
|
|
|
|
|
+ return true;
|
|
|
|
|
+
|
|
|
|
|
+ case AND:
|
|
|
|
|
+ case IOR:
|
|
|
|
|
+ case XOR:
|
|
|
|
|
+ case PLUS:
|
|
|
|
|
+ case LT:
|
|
|
|
|
+ case LTU:
|
|
|
|
|
+ /* These instructions take 12-bit signed immediates. */
|
|
|
|
|
+ return SMALL_OPERAND (x);
|
|
|
|
|
+
|
|
|
|
|
+ case LE:
|
|
|
|
|
+ /* We add 1 to the immediate and use SLT. */
|
|
|
|
|
+ return SMALL_OPERAND (x + 1);
|
|
|
|
|
+
|
|
|
|
|
+ case LEU:
|
|
|
|
|
+ /* Likewise SLTU, but reject the always-true case. */
|
|
|
|
|
+ return SMALL_OPERAND (x + 1) && x + 1 != 0;
|
|
|
|
|
+
|
|
|
|
|
+ case GE:
|
|
|
|
|
+ case GEU:
|
|
|
|
|
+ /* We can emulate an immediate of 1 by using GT/GTU against x0. */
|
|
|
|
|
+ return x == 1;
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ /* By default assume that x0 can be used for 0. */
|
|
|
|
|
+ return x == 0;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return the cost of binary operation X, given that the instruction
|
|
|
|
|
+ sequence for a word-sized or smaller operation takes SIGNLE_INSNS
|
|
|
|
|
+ instructions and that the sequence of a double-word operation takes
|
|
|
|
|
+ DOUBLE_INSNS instructions. */
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_binary_cost (rtx x, int single_insns, int double_insns)
|
|
|
|
|
+{
|
|
|
|
|
+ if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
|
|
|
|
|
+ return COSTS_N_INSNS (double_insns);
|
|
|
|
|
+ return COSTS_N_INSNS (single_insns);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return the cost of sign-extending OP to mode MODE, not including the
|
|
|
|
|
+ cost of OP itself. */
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_sign_extend_cost (enum machine_mode mode, rtx op)
|
|
|
|
|
+{
|
|
|
|
|
+ if (MEM_P (op))
|
|
|
|
|
+ /* Extended loads are as cheap as unextended ones. */
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
|
|
|
|
|
+ /* A sign extension from SImode to DImode in 64-bit mode is free. */
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ /* We need to use a shift left and a shift right. */
|
|
|
|
|
+ return COSTS_N_INSNS (2);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return the cost of zero-extending OP to mode MODE, not including the
|
|
|
|
|
+ cost of OP itself. */
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_zero_extend_cost (enum machine_mode mode, rtx op)
|
|
|
|
|
+{
|
|
|
|
|
+ if (MEM_P (op))
|
|
|
|
|
+ /* Extended loads are as cheap as unextended ones. */
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ if ((TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) ||
|
|
|
|
|
+ ((mode == DImode || mode == SImode) && GET_MODE (op) == HImode))
|
|
|
|
|
+ /* We need a shift left by 32 bits and a shift right by 32 bits. */
|
|
|
|
|
+ return COSTS_N_INSNS (2);
|
|
|
|
|
+
|
|
|
|
|
+ /* We can use ANDI. */
|
|
|
|
|
+ return COSTS_N_INSNS (1);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_RTX_COSTS. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
|
|
|
|
|
+ int *total, bool speed)
|
|
|
|
|
+{
|
|
|
|
|
+ enum machine_mode mode = GET_MODE (x);
|
|
|
|
|
+ bool float_mode_p = FLOAT_MODE_P (mode);
|
|
|
|
|
+ int cost;
|
|
|
|
|
+
|
|
|
|
|
+ switch (code)
|
|
|
|
|
+ {
|
|
|
|
|
+ case CONST_INT:
|
|
|
|
|
+ if (riscv_immediate_operand_p (outer_code, INTVAL (x)))
|
|
|
|
|
+ {
|
|
|
|
|
+ *total = 0;
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+ /* Fall through. */
|
|
|
|
|
+
|
|
|
|
|
+ case SYMBOL_REF:
|
|
|
|
|
+ case LABEL_REF:
|
|
|
|
|
+ case CONST_DOUBLE:
|
|
|
|
|
+ case CONST:
|
|
|
|
|
+ if (speed)
|
|
|
|
|
+ *total = 1;
|
|
|
|
|
+ else if ((cost = riscv_const_insns (x)) > 0)
|
|
|
|
|
+ *total = COSTS_N_INSNS (cost);
|
|
|
|
|
+ else /* The instruction will be fetched from the constant pool. */
|
|
|
|
|
+ *total = COSTS_N_INSNS (riscv_symbol_insns (SYMBOL_ABSOLUTE));
|
|
|
|
|
+ return true;
|
|
|
|
|
+
|
|
|
|
|
+ case MEM:
|
|
|
|
|
+ /* If the address is legitimate, return the number of
|
|
|
|
|
+ instructions it needs. */
|
|
|
|
|
+ if ((cost = riscv_address_insns (XEXP (x, 0), mode, true)) > 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ *total = COSTS_N_INSNS (cost + tune_info->memory_cost);
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+ /* Otherwise use the default handling. */
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ case NOT:
|
|
|
|
|
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ case AND:
|
|
|
|
|
+ case IOR:
|
|
|
|
|
+ case XOR:
|
|
|
|
|
+ /* Double-word operations use two single-word operations. */
|
|
|
|
|
+ *total = riscv_binary_cost (x, 1, 2);
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ case ASHIFT:
|
|
|
|
|
+ case ASHIFTRT:
|
|
|
|
|
+ case LSHIFTRT:
|
|
|
|
|
+ *total = riscv_binary_cost (x, 1, CONSTANT_P (XEXP (x, 1)) ? 4 : 9);
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ case ABS:
|
|
|
|
|
+ *total = COSTS_N_INSNS (float_mode_p ? 1 : 3);
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ case LO_SUM:
|
|
|
|
|
+ *total = set_src_cost (XEXP (x, 0), speed);
|
|
|
|
|
+ return true;
|
|
|
|
|
+
|
|
|
|
|
+ case LT:
|
|
|
|
|
+ case LTU:
|
|
|
|
|
+ case LE:
|
|
|
|
|
+ case LEU:
|
|
|
|
|
+ case GT:
|
|
|
|
|
+ case GTU:
|
|
|
|
|
+ case GE:
|
|
|
|
|
+ case GEU:
|
|
|
|
|
+ case EQ:
|
|
|
|
|
+ case NE:
|
|
|
|
|
+ case UNORDERED:
|
|
|
|
|
+ case LTGT:
|
|
|
|
|
+ /* Branch comparisons have VOIDmode, so use the first operand's
|
|
|
|
|
+ mode instead. */
|
|
|
|
|
+ mode = GET_MODE (XEXP (x, 0));
|
|
|
|
|
+ if (float_mode_p)
|
|
|
|
|
+ *total = tune_info->fp_add[mode == DFmode];
|
|
|
|
|
+ else
|
|
|
|
|
+ *total = riscv_binary_cost (x, 1, 3);
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ case MINUS:
|
|
|
|
|
+ if (float_mode_p
|
|
|
|
|
+ && !HONOR_NANS (mode)
|
|
|
|
|
+ && !HONOR_SIGNED_ZEROS (mode))
|
|
|
|
|
+ {
|
|
|
|
|
+ /* See if we can use NMADD or NMSUB. See riscv.md for the
|
|
|
|
|
+ associated patterns. */
|
|
|
|
|
+ rtx op0 = XEXP (x, 0);
|
|
|
|
|
+ rtx op1 = XEXP (x, 1);
|
|
|
|
|
+ if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
|
|
|
|
|
+ {
|
|
|
|
|
+ *total = (tune_info->fp_mul[mode == DFmode]
|
|
|
|
|
+ + set_src_cost (XEXP (XEXP (op0, 0), 0), speed)
|
|
|
|
|
+ + set_src_cost (XEXP (op0, 1), speed)
|
|
|
|
|
+ + set_src_cost (op1, speed));
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+ if (GET_CODE (op1) == MULT)
|
|
|
|
|
+ {
|
|
|
|
|
+ *total = (tune_info->fp_mul[mode == DFmode]
|
|
|
|
|
+ + set_src_cost (op0, speed)
|
|
|
|
|
+ + set_src_cost (XEXP (op1, 0), speed)
|
|
|
|
|
+ + set_src_cost (XEXP (op1, 1), speed));
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ /* Fall through. */
|
|
|
|
|
+
|
|
|
|
|
+ case PLUS:
|
|
|
|
|
+ if (float_mode_p)
|
|
|
|
|
+ *total = tune_info->fp_add[mode == DFmode];
|
|
|
|
|
+ else
|
|
|
|
|
+ *total = riscv_binary_cost (x, 1, 4);
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ case NEG:
|
|
|
|
|
+ if (float_mode_p
|
|
|
|
|
+ && !HONOR_NANS (mode)
|
|
|
|
|
+ && HONOR_SIGNED_ZEROS (mode))
|
|
|
|
|
+ {
|
|
|
|
|
+ /* See if we can use NMADD or NMSUB. See riscv.md for the
|
|
|
|
|
+ associated patterns. */
|
|
|
|
|
+ rtx op = XEXP (x, 0);
|
|
|
|
|
+ if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
|
|
|
|
|
+ && GET_CODE (XEXP (op, 0)) == MULT)
|
|
|
|
|
+ {
|
|
|
|
|
+ *total = (tune_info->fp_mul[mode == DFmode]
|
|
|
|
|
+ + set_src_cost (XEXP (XEXP (op, 0), 0), speed)
|
|
|
|
|
+ + set_src_cost (XEXP (XEXP (op, 0), 1), speed)
|
|
|
|
|
+ + set_src_cost (XEXP (op, 1), speed));
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (float_mode_p)
|
|
|
|
|
+ *total = tune_info->fp_add[mode == DFmode];
|
|
|
|
|
+ else
|
|
|
|
|
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ case MULT:
|
|
|
|
|
+ if (float_mode_p)
|
|
|
|
|
+ *total = tune_info->fp_mul[mode == DFmode];
|
|
|
|
|
+ else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
|
|
|
|
|
+ *total = 3 * tune_info->int_mul[0] + COSTS_N_INSNS (2);
|
|
|
|
|
+ else if (!speed)
|
|
|
|
|
+ *total = COSTS_N_INSNS (1);
|
|
|
|
|
+ else
|
|
|
|
|
+ *total = tune_info->int_mul[mode == DImode];
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ case DIV:
|
|
|
|
|
+ case SQRT:
|
|
|
|
|
+ case MOD:
|
|
|
|
|
+ if (float_mode_p)
|
|
|
|
|
+ {
|
|
|
|
|
+ *total = tune_info->fp_div[mode == DFmode];
|
|
|
|
|
+ return false;
|
|
|
|
|
+ }
|
|
|
|
|
+ /* Fall through. */
|
|
|
|
|
+
|
|
|
|
|
+ case UDIV:
|
|
|
|
|
+ case UMOD:
|
|
|
|
|
+ if (speed)
|
|
|
|
|
+ *total = tune_info->int_div[mode == DImode];
|
|
|
|
|
+ else
|
|
|
|
|
+ *total = COSTS_N_INSNS (1);
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ case SIGN_EXTEND:
|
|
|
|
|
+ *total = riscv_sign_extend_cost (mode, XEXP (x, 0));
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ case ZERO_EXTEND:
|
|
|
|
|
+ *total = riscv_zero_extend_cost (mode, XEXP (x, 0));
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ case FLOAT:
|
|
|
|
|
+ case UNSIGNED_FLOAT:
|
|
|
|
|
+ case FIX:
|
|
|
|
|
+ case FLOAT_EXTEND:
|
|
|
|
|
+ case FLOAT_TRUNCATE:
|
|
|
|
|
+ *total = tune_info->fp_add[mode == DFmode];
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ return false;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_ADDRESS_COST. */
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_address_cost (rtx addr, enum machine_mode mode,
|
|
|
|
|
+ addr_space_t as ATTRIBUTE_UNUSED,
|
|
|
|
|
+ bool speed ATTRIBUTE_UNUSED)
|
|
|
|
|
+{
|
|
|
|
|
+ return riscv_address_insns (addr, mode, false);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return one word of double-word value OP. HIGH_P is true to select the
|
|
|
|
|
+ high part or false to select the low part. */
|
|
|
|
|
+
|
|
|
|
|
+rtx
|
|
|
|
|
+riscv_subword (rtx op, bool high_p)
|
|
|
|
|
+{
|
|
|
|
|
+ unsigned int byte;
|
|
|
|
|
+ enum machine_mode mode;
|
|
|
|
|
+
|
|
|
|
|
+ mode = GET_MODE (op);
|
|
|
|
|
+ if (mode == VOIDmode)
|
|
|
|
|
+ mode = TARGET_64BIT ? TImode : DImode;
|
|
|
|
|
+
|
|
|
|
|
+ byte = high_p ? UNITS_PER_WORD : 0;
|
|
|
|
|
+
|
|
|
|
|
+ if (FP_REG_RTX_P (op))
|
|
|
|
|
+ return gen_rtx_REG (word_mode, REGNO (op) + high_p);
|
|
|
|
|
+
|
|
|
|
|
+ if (MEM_P (op))
|
|
|
|
|
+ return adjust_address (op, word_mode, byte);
|
|
|
|
|
+
|
|
|
|
|
+ return simplify_gen_subreg (word_mode, op, mode, byte);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if a 64-bit move from SRC to DEST should be split into two. */
|
|
|
|
|
+
|
|
|
|
|
+bool
|
|
|
|
|
+riscv_split_64bit_move_p (rtx dest, rtx src)
|
|
|
|
|
+{
|
|
|
|
|
+ /* All 64b moves are legal in 64b mode. All 64b FPR <-> FPR and
|
|
|
|
|
+ FPR <-> MEM moves are legal in 32b mode, too. Although
|
|
|
|
|
+ FPR <-> GPR moves are not available in general in 32b mode,
|
|
|
|
|
+ we can at least load 0 into an FPR with fcvt.d.w fpr, x0. */
|
|
|
|
|
+ return !(TARGET_64BIT
|
|
|
|
|
+ || (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
|
|
|
|
|
+ || (FP_REG_RTX_P (dest) && MEM_P (src))
|
|
|
|
|
+ || (FP_REG_RTX_P (src) && MEM_P (dest))
|
|
|
|
|
+ || (FP_REG_RTX_P(dest) && src == CONST0_RTX(GET_MODE(src))));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Split a doubleword move from SRC to DEST. On 32-bit targets,
|
|
|
|
|
+ this function handles 64-bit moves for which riscv_split_64bit_move_p
|
|
|
|
|
+ holds. For 64-bit targets, this function handles 128-bit moves. */
|
|
|
|
|
+
|
|
|
|
|
+void
|
|
|
|
|
+riscv_split_doubleword_move (rtx dest, rtx src)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx low_dest;
|
|
|
|
|
+
|
|
|
|
|
+ /* The operation can be split into two normal moves. Decide in
|
|
|
|
|
+ which order to do them. */
|
|
|
|
|
+ low_dest = riscv_subword (dest, false);
|
|
|
|
|
+ if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
|
|
|
|
|
+ riscv_emit_move (low_dest, riscv_subword (src, false));
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_emit_move (low_dest, riscv_subword (src, false));
|
|
|
|
|
+ riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return the appropriate instructions to move SRC into DEST. Assume
|
|
|
|
|
+ that SRC is operand 1 and DEST is operand 0. */
|
|
|
|
|
+
|
|
|
|
|
+const char *
|
|
|
|
|
+riscv_output_move (rtx dest, rtx src)
|
|
|
|
|
+{
|
|
|
|
|
+ enum rtx_code dest_code, src_code;
|
|
|
|
|
+ enum machine_mode mode;
|
|
|
|
|
+ bool dbl_p;
|
|
|
|
|
+
|
|
|
|
|
+ dest_code = GET_CODE (dest);
|
|
|
|
|
+ src_code = GET_CODE (src);
|
|
|
|
|
+ mode = GET_MODE (dest);
|
|
|
|
|
+ dbl_p = (GET_MODE_SIZE (mode) == 8);
|
|
|
|
|
+
|
|
|
|
|
+ if (dbl_p && riscv_split_64bit_move_p (dest, src))
|
|
|
|
|
+ return "#";
|
|
|
|
|
+
|
|
|
|
|
+ if ((src_code == REG && GP_REG_P (REGNO (src)))
|
|
|
|
|
+ || (src == CONST0_RTX (mode)))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (dest_code == REG)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (GP_REG_P (REGNO (dest)))
|
|
|
|
|
+ return "mv\t%0,%z1";
|
|
|
|
|
+
|
|
|
|
|
+ if (FP_REG_P (REGNO (dest)))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (!dbl_p)
|
|
|
|
|
+ return "fmv.s.x\t%0,%z1";
|
|
|
|
|
+ if (TARGET_64BIT)
|
|
|
|
|
+ return "fmv.d.x\t%0,%z1";
|
|
|
|
|
+ /* in RV32, we can emulate fmv.d.x %0, x0 using fcvt.d.w */
|
|
|
|
|
+ gcc_assert (src == CONST0_RTX (mode));
|
|
|
|
|
+ return "fcvt.d.w\t%0,x0";
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ if (dest_code == MEM)
|
|
|
|
|
+ switch (GET_MODE_SIZE (mode))
|
|
|
|
|
+ {
|
|
|
|
|
+ case 1: return "sb\t%z1,%0";
|
|
|
|
|
+ case 2: return "sh\t%z1,%0";
|
|
|
|
|
+ case 4: return "sw\t%z1,%0";
|
|
|
|
|
+ case 8: return "sd\t%z1,%0";
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ if (dest_code == REG && GP_REG_P (REGNO (dest)))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (src_code == REG)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (FP_REG_P (REGNO (src)))
|
|
|
|
|
+ return dbl_p ? "fmv.x.d\t%0,%1" : "fmv.x.s\t%0,%1";
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (src_code == MEM)
|
|
|
|
|
+ switch (GET_MODE_SIZE (mode))
|
|
|
|
|
+ {
|
|
|
|
|
+ case 1: return "lbu\t%0,%1";
|
|
|
|
|
+ case 2: return "lhu\t%0,%1";
|
|
|
|
|
+ case 4: return "lw\t%0,%1";
|
|
|
|
|
+ case 8: return "ld\t%0,%1";
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (src_code == CONST_INT)
|
|
|
|
|
+ return "li\t%0,%1";
|
|
|
|
|
+
|
|
|
|
|
+ if (src_code == HIGH)
|
|
|
|
|
+ return "lui\t%0,%h1";
|
|
|
|
|
+
|
|
|
|
|
+ if (symbolic_operand (src, VOIDmode))
|
|
|
|
|
+ switch (riscv_classify_symbolic_expression (src))
|
|
|
|
|
+ {
|
|
|
|
|
+ case SYMBOL_GOT_DISP: return "la\t%0,%1";
|
|
|
|
|
+ case SYMBOL_ABSOLUTE: return "lla\t%0,%1";
|
|
|
|
|
+ default: gcc_unreachable();
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ if (src_code == REG && FP_REG_P (REGNO (src)))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (dest_code == REG && FP_REG_P (REGNO (dest)))
|
|
|
|
|
+ return dbl_p ? "fmv.d\t%0,%1" : "fmv.s\t%0,%1";
|
|
|
|
|
+
|
|
|
|
|
+ if (dest_code == MEM)
|
|
|
|
|
+ return dbl_p ? "fsd\t%1,%0" : "fsw\t%1,%0";
|
|
|
|
|
+ }
|
|
|
|
|
+ if (dest_code == REG && FP_REG_P (REGNO (dest)))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (src_code == MEM)
|
|
|
|
|
+ return dbl_p ? "fld\t%0,%1" : "flw\t%0,%1";
|
|
|
|
|
+ }
|
|
|
|
|
+ gcc_unreachable ();
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if CMP1 is a suitable second operand for integer ordering
|
|
|
|
|
+ test CODE. See also the *sCC patterns in riscv.md. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
|
|
|
|
|
+{
|
|
|
|
|
+ switch (code)
|
|
|
|
|
+ {
|
|
|
|
|
+ case GT:
|
|
|
|
|
+ case GTU:
|
|
|
|
|
+ return reg_or_0_operand (cmp1, VOIDmode);
|
|
|
|
|
+
|
|
|
|
|
+ case GE:
|
|
|
|
|
+ case GEU:
|
|
|
|
|
+ return cmp1 == const1_rtx;
|
|
|
|
|
+
|
|
|
|
|
+ case LT:
|
|
|
|
|
+ case LTU:
|
|
|
|
|
+ return arith_operand (cmp1, VOIDmode);
|
|
|
|
|
+
|
|
|
|
|
+ case LE:
|
|
|
|
|
+ return sle_operand (cmp1, VOIDmode);
|
|
|
|
|
+
|
|
|
|
|
+ case LEU:
|
|
|
|
|
+ return sleu_operand (cmp1, VOIDmode);
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ gcc_unreachable ();
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if *CMP1 (of mode MODE) is a valid second operand for
|
|
|
|
|
+ integer ordering test *CODE, or if an equivalent combination can
|
|
|
|
|
+ be formed by adjusting *CODE and *CMP1. When returning true, update
|
|
|
|
|
+ *CODE and *CMP1 with the chosen code and operand, otherwise leave
|
|
|
|
|
+ them alone. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
|
|
|
|
|
+ enum machine_mode mode)
|
|
|
|
|
+{
|
|
|
|
|
+ HOST_WIDE_INT plus_one;
|
|
|
|
|
+
|
|
|
|
|
+ if (riscv_int_order_operand_ok_p (*code, *cmp1))
|
|
|
|
|
+ return true;
|
|
|
|
|
+
|
|
|
|
|
+ if (CONST_INT_P (*cmp1))
|
|
|
|
|
+ switch (*code)
|
|
|
|
|
+ {
|
|
|
|
|
+ case LE:
|
|
|
|
|
+ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
|
|
|
|
|
+ if (INTVAL (*cmp1) < plus_one)
|
|
|
|
|
+ {
|
|
|
|
|
+ *code = LT;
|
|
|
|
|
+ *cmp1 = force_reg (mode, GEN_INT (plus_one));
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ case LEU:
|
|
|
|
|
+ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
|
|
|
|
|
+ if (plus_one != 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ *code = LTU;
|
|
|
|
|
+ *cmp1 = force_reg (mode, GEN_INT (plus_one));
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ break;
|
|
|
|
|
+ }
|
|
|
|
|
+ return false;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Compare CMP0 and CMP1 using ordering test CODE and store the result
|
|
|
|
|
+ in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
|
|
|
|
|
+ is nonnull, it's OK to set TARGET to the inverse of the result and
|
|
|
|
|
+ flip *INVERT_PTR instead. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
|
|
|
|
|
+ rtx target, rtx cmp0, rtx cmp1)
|
|
|
|
|
+{
|
|
|
|
|
+ enum machine_mode mode;
|
|
|
|
|
+
|
|
|
|
|
+ /* First see if there is a RISCV instruction that can do this operation.
|
|
|
|
|
+ If not, try doing the same for the inverse operation. If that also
|
|
|
|
|
+ fails, force CMP1 into a register and try again. */
|
|
|
|
|
+ mode = GET_MODE (cmp0);
|
|
|
|
|
+ if (riscv_canonicalize_int_order_test (&code, &cmp1, mode))
|
|
|
|
|
+ riscv_emit_binary (code, target, cmp0, cmp1);
|
|
|
|
|
+ else
|
|
|
|
|
+ {
|
|
|
|
|
+ enum rtx_code inv_code = reverse_condition (code);
|
|
|
|
|
+ if (!riscv_canonicalize_int_order_test (&inv_code, &cmp1, mode))
|
|
|
|
|
+ {
|
|
|
|
|
+ cmp1 = force_reg (mode, cmp1);
|
|
|
|
|
+ riscv_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
|
|
|
|
|
+ }
|
|
|
|
|
+ else if (invert_ptr == 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx inv_target;
|
|
|
|
|
+
|
|
|
|
|
+ inv_target = riscv_force_binary (GET_MODE (target),
|
|
|
|
|
+ inv_code, cmp0, cmp1);
|
|
|
|
|
+ riscv_emit_binary (XOR, target, inv_target, const1_rtx);
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+ {
|
|
|
|
|
+ *invert_ptr = !*invert_ptr;
|
|
|
|
|
+ riscv_emit_binary (inv_code, target, cmp0, cmp1);
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return a register that is zero iff CMP0 and CMP1 are equal.
|
|
|
|
|
+ The register will have the same mode as CMP0. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_zero_if_equal (rtx cmp0, rtx cmp1)
|
|
|
|
|
+{
|
|
|
|
|
+ if (cmp1 == const0_rtx)
|
|
|
|
|
+ return cmp0;
|
|
|
|
|
+
|
|
|
|
|
+ return expand_binop (GET_MODE (cmp0), sub_optab,
|
|
|
|
|
+ cmp0, cmp1, 0, 0, OPTAB_DIRECT);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return false if we can easily emit code for the FP comparison specified
|
|
|
|
|
+ by *CODE. If not, set *CODE to its inverse and return true. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_reversed_fp_cond (enum rtx_code *code)
|
|
|
|
|
+{
|
|
|
|
|
+ switch (*code)
|
|
|
|
|
+ {
|
|
|
|
|
+ case EQ:
|
|
|
|
|
+ case LT:
|
|
|
|
|
+ case LE:
|
|
|
|
|
+ case GT:
|
|
|
|
|
+ case GE:
|
|
|
|
|
+ case LTGT:
|
|
|
|
|
+ case ORDERED:
|
|
|
|
|
+ /* We know how to emit code for these cases... */
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ /* ...but we must invert these and rely on the others. */
|
|
|
|
|
+ *code = reverse_condition_maybe_unordered (*code);
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Convert a comparison into something that can be used in a branch or
|
|
|
|
|
+ conditional move. On entry, *OP0 and *OP1 are the values being
|
|
|
|
|
+ compared and *CODE is the code used to compare them.
|
|
|
|
|
+
|
|
|
|
|
+ Update *CODE, *OP0 and *OP1 so that they describe the final comparison. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx cmp_op0 = *op0;
|
|
|
|
|
+ rtx cmp_op1 = *op1;
|
|
|
|
|
+
|
|
|
|
|
+ if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (splittable_const_int_operand (cmp_op1, VOIDmode))
|
|
|
|
|
+ {
|
|
|
|
|
+ HOST_WIDE_INT rhs = INTVAL (cmp_op1), new_rhs;
|
|
|
|
|
+ enum rtx_code new_code;
|
|
|
|
|
+
|
|
|
|
|
+ switch (*code)
|
|
|
|
|
+ {
|
|
|
|
|
+ case LTU: new_rhs = rhs - 1; new_code = LEU; goto try_new_rhs;
|
|
|
|
|
+ case LEU: new_rhs = rhs + 1; new_code = LTU; goto try_new_rhs;
|
|
|
|
|
+ case GTU: new_rhs = rhs + 1; new_code = GEU; goto try_new_rhs;
|
|
|
|
|
+ case GEU: new_rhs = rhs - 1; new_code = GTU; goto try_new_rhs;
|
|
|
|
|
+ case LT: new_rhs = rhs - 1; new_code = LE; goto try_new_rhs;
|
|
|
|
|
+ case LE: new_rhs = rhs + 1; new_code = LT; goto try_new_rhs;
|
|
|
|
|
+ case GT: new_rhs = rhs + 1; new_code = GE; goto try_new_rhs;
|
|
|
|
|
+ case GE: new_rhs = rhs - 1; new_code = GT;
|
|
|
|
|
+ try_new_rhs:
|
|
|
|
|
+ /* Convert e.g. OP0 > 4095 into OP0 >= 4096. */
|
|
|
|
|
+ if ((rhs < 0) == (new_rhs < 0)
|
|
|
|
|
+ && riscv_integer_cost (new_rhs) < riscv_integer_cost (rhs))
|
|
|
|
|
+ {
|
|
|
|
|
+ *op1 = GEN_INT (new_rhs);
|
|
|
|
|
+ *code = new_code;
|
|
|
|
|
+ }
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ case EQ:
|
|
|
|
|
+ case NE:
|
|
|
|
|
+ /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */
|
|
|
|
|
+ if (SMALL_OPERAND (-rhs))
|
|
|
|
|
+ {
|
|
|
|
|
+ *op0 = gen_reg_rtx (GET_MODE (cmp_op0));
|
|
|
|
|
+ riscv_emit_binary (PLUS, *op0, cmp_op0, GEN_INT (-rhs));
|
|
|
|
|
+ *op1 = const0_rtx;
|
|
|
|
|
+ }
|
|
|
|
|
+ default:
|
|
|
|
|
+ break;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (*op1 != const0_rtx)
|
|
|
|
|
+ *op1 = force_reg (GET_MODE (cmp_op0), *op1);
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+ {
|
|
|
|
|
+ /* For FP comparisons, set an integer register with the result of the
|
|
|
|
|
+ comparison, then branch on it. */
|
|
|
|
|
+ rtx tmp0, tmp1, final_op;
|
|
|
|
|
+ enum rtx_code fp_code = *code;
|
|
|
|
|
+ *code = riscv_reversed_fp_cond (&fp_code) ? EQ : NE;
|
|
|
|
|
+
|
|
|
|
|
+ switch (fp_code)
|
|
|
|
|
+ {
|
|
|
|
|
+ case ORDERED:
|
|
|
|
|
+ /* a == a && b == b */
|
|
|
|
|
+ tmp0 = gen_reg_rtx (SImode);
|
|
|
|
|
+ riscv_emit_binary (EQ, tmp0, cmp_op0, cmp_op0);
|
|
|
|
|
+ tmp1 = gen_reg_rtx (SImode);
|
|
|
|
|
+ riscv_emit_binary (EQ, tmp1, cmp_op1, cmp_op1);
|
|
|
|
|
+ final_op = gen_reg_rtx (SImode);
|
|
|
|
|
+ riscv_emit_binary (AND, final_op, tmp0, tmp1);
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ case LTGT:
|
|
|
|
|
+ /* a < b || a > b */
|
|
|
|
|
+ tmp0 = gen_reg_rtx (SImode);
|
|
|
|
|
+ riscv_emit_binary (LT, tmp0, cmp_op0, cmp_op1);
|
|
|
|
|
+ tmp1 = gen_reg_rtx (SImode);
|
|
|
|
|
+ riscv_emit_binary (GT, tmp1, cmp_op0, cmp_op1);
|
|
|
|
|
+ final_op = gen_reg_rtx (SImode);
|
|
|
|
|
+ riscv_emit_binary (IOR, final_op, tmp0, tmp1);
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ case EQ:
|
|
|
|
|
+ case LE:
|
|
|
|
|
+ case LT:
|
|
|
|
|
+ case GE:
|
|
|
|
|
+ case GT:
|
|
|
|
|
+ /* We have instructions for these cases. */
|
|
|
|
|
+ final_op = gen_reg_rtx (SImode);
|
|
|
|
|
+ riscv_emit_binary (fp_code, final_op, cmp_op0, cmp_op1);
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ gcc_unreachable ();
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Compare the binary result against 0. */
|
|
|
|
|
+ *op0 = final_op;
|
|
|
|
|
+ *op1 = const0_rtx;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
|
|
|
|
|
+ and OPERAND[3]. Store the result in OPERANDS[0].
|
|
|
|
|
+
|
|
|
|
|
+ On 64-bit targets, the mode of the comparison and target will always be
|
|
|
|
|
+ SImode, thus possibly narrower than that of the comparison's operands. */
|
|
|
|
|
+
|
|
|
|
|
+void
|
|
|
|
|
+riscv_expand_scc (rtx operands[])
|
|
|
|
|
+{
|
|
|
|
|
+ rtx target = operands[0];
|
|
|
|
|
+ enum rtx_code code = GET_CODE (operands[1]);
|
|
|
|
|
+ rtx op0 = operands[2];
|
|
|
|
|
+ rtx op1 = operands[3];
|
|
|
|
|
+
|
|
|
|
|
+ gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT);
|
|
|
|
|
+
|
|
|
|
|
+ if (code == EQ || code == NE)
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx zie = riscv_zero_if_equal (op0, op1);
|
|
|
|
|
+ riscv_emit_binary (code, target, zie, const0_rtx);
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+ riscv_emit_int_order_test (code, 0, target, op0, op1);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Compare OPERANDS[1] with OPERANDS[2] using comparison code
|
|
|
|
|
+ CODE and jump to OPERANDS[3] if the condition holds. */
|
|
|
|
|
+
|
|
|
|
|
+void
|
|
|
|
|
+riscv_expand_conditional_branch (rtx *operands)
|
|
|
|
|
+{
|
|
|
|
|
+ enum rtx_code code = GET_CODE (operands[0]);
|
|
|
|
|
+ rtx op0 = operands[1];
|
|
|
|
|
+ rtx op1 = operands[2];
|
|
|
|
|
+ rtx condition;
|
|
|
|
|
+
|
|
|
|
|
+ riscv_emit_compare (&code, &op0, &op1);
|
|
|
|
|
+ condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
|
|
|
|
|
+ emit_jump_insn (gen_condjump (condition, operands[3]));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
|
|
|
|
|
+ least PARM_BOUNDARY bits of alignment, but will be given anything up
|
|
|
|
|
+ to STACK_BOUNDARY bits if the type requires it. */
|
|
|
|
|
+
|
|
|
|
|
+static unsigned int
|
|
|
|
|
+riscv_function_arg_boundary (enum machine_mode mode, const_tree type)
|
|
|
|
|
+{
|
|
|
|
|
+ unsigned int alignment;
|
|
|
|
|
+
|
|
|
|
|
+ alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
|
|
|
|
|
+ if (alignment < PARM_BOUNDARY)
|
|
|
|
|
+ alignment = PARM_BOUNDARY;
|
|
|
|
|
+ if (alignment > STACK_BOUNDARY)
|
|
|
|
|
+ alignment = STACK_BOUNDARY;
|
|
|
|
|
+ return alignment;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Fill INFO with information about a single argument. CUM is the
|
|
|
|
|
+ cumulative state for earlier arguments. MODE is the mode of this
|
|
|
|
|
+ argument and TYPE is its type (if known). NAMED is true if this
|
|
|
|
|
+ is a named (fixed) argument rather than a variable one. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_get_arg_info (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
|
|
|
|
|
+ enum machine_mode mode, const_tree type, bool named)
|
|
|
|
|
+{
|
|
|
|
|
+ bool doubleword_aligned_p;
|
|
|
|
|
+ unsigned int num_bytes, num_words, max_regs;
|
|
|
|
|
+
|
|
|
|
|
+ /* Work out the size of the argument. */
|
|
|
|
|
+ num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
|
|
|
|
|
+ num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
|
|
|
|
|
+
|
|
|
|
|
+ /* Scalar, complex and vector floating-point types are passed in
|
|
|
|
|
+ floating-point registers, as long as this is a named rather
|
|
|
|
|
+ than a variable argument. */
|
|
|
|
|
+ info->fpr_p = (named
|
|
|
|
|
+ && (type == 0 || FLOAT_TYPE_P (type))
|
|
|
|
|
+ && (GET_MODE_CLASS (mode) == MODE_FLOAT
|
|
|
|
|
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
|
|
|
|
|
+ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
|
|
|
|
|
+ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
|
|
|
|
|
+
|
|
|
|
|
+ /* Complex floats should only go into FPRs if there are two FPRs free,
|
|
|
|
|
+ otherwise they should be passed in the same way as a struct
|
|
|
|
|
+ containing two floats. */
|
|
|
|
|
+ if (info->fpr_p
|
|
|
|
|
+ && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
|
|
|
|
|
+ && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
|
|
|
|
|
+ info->fpr_p = false;
|
|
|
|
|
+ else
|
|
|
|
|
+ num_words = 2;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* See whether the argument has doubleword alignment. */
|
|
|
|
|
+ doubleword_aligned_p = (riscv_function_arg_boundary (mode, type)
|
|
|
|
|
+ > BITS_PER_WORD);
|
|
|
|
|
+
|
|
|
|
|
+ /* Set REG_OFFSET to the register count we're interested in.
|
|
|
|
|
+ The EABI allocates the floating-point registers separately,
|
|
|
|
|
+ but the other ABIs allocate them like integer registers. */
|
|
|
|
|
+ info->reg_offset = cum->num_gprs;
|
|
|
|
|
+
|
|
|
|
|
+ /* Advance to an even register if the argument is doubleword-aligned. */
|
|
|
|
|
+ if (doubleword_aligned_p)
|
|
|
|
|
+ info->reg_offset += info->reg_offset & 1;
|
|
|
|
|
+
|
|
|
|
|
+ /* Work out the offset of a stack argument. */
|
|
|
|
|
+ info->stack_offset = cum->stack_words;
|
|
|
|
|
+ if (doubleword_aligned_p)
|
|
|
|
|
+ info->stack_offset += info->stack_offset & 1;
|
|
|
|
|
+
|
|
|
|
|
+ max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
|
|
|
|
|
+
|
|
|
|
|
+ /* Partition the argument between registers and stack. */
|
|
|
|
|
+ info->reg_words = MIN (num_words, max_regs);
|
|
|
|
|
+ info->stack_words = num_words - info->reg_words;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* INFO describes a register argument that has the normal format for the
|
|
|
|
|
+ argument's mode. Return the register it uses, assuming that FPRs are
|
|
|
|
|
+ available if HARD_FLOAT_P. */
|
|
|
|
|
+
|
|
|
|
|
+static unsigned int
|
|
|
|
|
+riscv_arg_regno (const struct riscv_arg_info *info, bool hard_float_p)
|
|
|
|
|
+{
|
|
|
|
|
+ if (!info->fpr_p || !hard_float_p)
|
|
|
|
|
+ return GP_ARG_FIRST + info->reg_offset;
|
|
|
|
|
+ else
|
|
|
|
|
+ return FP_ARG_FIRST + info->reg_offset;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_FUNCTION_ARG. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
|
|
|
|
|
+ const_tree type, bool named)
|
|
|
|
|
+{
|
|
|
|
|
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
|
|
|
|
|
+ struct riscv_arg_info info;
|
|
|
|
|
+
|
|
|
|
|
+ if (mode == VOIDmode)
|
|
|
|
|
+ return NULL;
|
|
|
|
|
+
|
|
|
|
|
+ riscv_get_arg_info (&info, cum, mode, type, named);
|
|
|
|
|
+
|
|
|
|
|
+ /* Return straight away if the whole argument is passed on the stack. */
|
|
|
|
|
+ if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
|
|
|
|
|
+ return NULL;
|
|
|
|
|
+
|
|
|
|
|
+ /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
|
|
|
|
|
+ contains a double in its entirety, then that 64-bit chunk is passed
|
|
|
|
|
+ in a floating-point register. */
|
|
|
|
|
+ if (TARGET_HARD_FLOAT
|
|
|
|
|
+ && named
|
|
|
|
|
+ && type != 0
|
|
|
|
|
+ && TREE_CODE (type) == RECORD_TYPE
|
|
|
|
|
+ && TYPE_SIZE_UNIT (type)
|
|
|
|
|
+ && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
|
|
|
|
|
+ {
|
|
|
|
|
+ tree field;
|
|
|
|
|
+
|
|
|
|
|
+ /* First check to see if there is any such field. */
|
|
|
|
|
+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
|
|
|
|
|
+ if (TREE_CODE (field) == FIELD_DECL
|
|
|
|
|
+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
|
|
|
|
|
+ && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
|
|
|
|
|
+ && tree_fits_shwi_p (bit_position (field))
|
|
|
|
|
+ && int_bit_position (field) % BITS_PER_WORD == 0)
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ if (field != 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Now handle the special case by returning a PARALLEL
|
|
|
|
|
+ indicating where each 64-bit chunk goes. INFO.REG_WORDS
|
|
|
|
|
+ chunks are passed in registers. */
|
|
|
|
|
+ unsigned int i;
|
|
|
|
|
+ HOST_WIDE_INT bitpos;
|
|
|
|
|
+ rtx ret;
|
|
|
|
|
+
|
|
|
|
|
+ /* assign_parms checks the mode of ENTRY_PARM, so we must
|
|
|
|
|
+ use the actual mode here. */
|
|
|
|
|
+ ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
|
|
|
|
|
+
|
|
|
|
|
+ bitpos = 0;
|
|
|
|
|
+ field = TYPE_FIELDS (type);
|
|
|
|
|
+ for (i = 0; i < info.reg_words; i++)
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx reg;
|
|
|
|
|
+
|
|
|
|
|
+ for (; field; field = DECL_CHAIN (field))
|
|
|
|
|
+ if (TREE_CODE (field) == FIELD_DECL
|
|
|
|
|
+ && int_bit_position (field) >= bitpos)
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ if (field
|
|
|
|
|
+ && int_bit_position (field) == bitpos
|
|
|
|
|
+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
|
|
|
|
|
+ && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
|
|
|
|
|
+ reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
|
|
|
|
|
+ else
|
|
|
|
|
+ reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
|
|
|
|
|
+
|
|
|
|
|
+ XVECEXP (ret, 0, i)
|
|
|
|
|
+ = gen_rtx_EXPR_LIST (VOIDmode, reg,
|
|
|
|
|
+ GEN_INT (bitpos / BITS_PER_UNIT));
|
|
|
|
|
+
|
|
|
|
|
+ bitpos += BITS_PER_WORD;
|
|
|
|
|
+ }
|
|
|
|
|
+ return ret;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Handle the n32/n64 conventions for passing complex floating-point
|
|
|
|
|
+ arguments in FPR pairs. The real part goes in the lower register
|
|
|
|
|
+ and the imaginary part goes in the upper register. */
|
|
|
|
|
+ if (info.fpr_p
|
|
|
|
|
+ && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx real, imag;
|
|
|
|
|
+ enum machine_mode inner;
|
|
|
|
|
+ unsigned int regno;
|
|
|
|
|
+
|
|
|
|
|
+ inner = GET_MODE_INNER (mode);
|
|
|
|
|
+ regno = FP_ARG_FIRST + info.reg_offset;
|
|
|
|
|
+ if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Real part in registers, imaginary part on stack. */
|
|
|
|
|
+ gcc_assert (info.stack_words == info.reg_words);
|
|
|
|
|
+ return gen_rtx_REG (inner, regno);
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+ {
|
|
|
|
|
+ gcc_assert (info.stack_words == 0);
|
|
|
|
|
+ real = gen_rtx_EXPR_LIST (VOIDmode,
|
|
|
|
|
+ gen_rtx_REG (inner, regno),
|
|
|
|
|
+ const0_rtx);
|
|
|
|
|
+ imag = gen_rtx_EXPR_LIST (VOIDmode,
|
|
|
|
|
+ gen_rtx_REG (inner,
|
|
|
|
|
+ regno + info.reg_words / 2),
|
|
|
|
|
+ GEN_INT (GET_MODE_SIZE (inner)));
|
|
|
|
|
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return gen_rtx_REG (mode, riscv_arg_regno (&info, TARGET_HARD_FLOAT));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_FUNCTION_ARG_ADVANCE. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
|
|
|
|
|
+ const_tree type, bool named)
|
|
|
|
|
+{
|
|
|
|
|
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
|
|
|
|
|
+ struct riscv_arg_info info;
|
|
|
|
|
+
|
|
|
|
|
+ riscv_get_arg_info (&info, cum, mode, type, named);
|
|
|
|
|
+
|
|
|
|
|
+ /* Advance the register count. This has the effect of setting
|
|
|
|
|
+ num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
|
|
|
|
|
+ argument required us to skip the final GPR and pass the whole
|
|
|
|
|
+ argument on the stack. */
|
|
|
|
|
+ cum->num_gprs = info.reg_offset + info.reg_words;
|
|
|
|
|
+
|
|
|
|
|
+ /* Advance the stack word count. */
|
|
|
|
|
+ if (info.stack_words > 0)
|
|
|
|
|
+ cum->stack_words = info.stack_offset + info.stack_words;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_ARG_PARTIAL_BYTES. */
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_arg_partial_bytes (cumulative_args_t cum,
|
|
|
|
|
+ enum machine_mode mode, tree type, bool named)
|
|
|
|
|
+{
|
|
|
|
|
+ struct riscv_arg_info info;
|
|
|
|
|
+
|
|
|
|
|
+ riscv_get_arg_info (&info, get_cumulative_args (cum), mode, type, named);
|
|
|
|
|
+ return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* See whether VALTYPE is a record whose fields should be returned in
|
|
|
|
|
+ floating-point registers. If so, return the number of fields and
|
|
|
|
|
+ list them in FIELDS (which should have two elements). Return 0
|
|
|
|
|
+ otherwise.
|
|
|
|
|
+
|
|
|
|
|
+ For n32 & n64, a structure with one or two fields is returned in
|
|
|
|
|
+ floating-point registers as long as every field has a floating-point
|
|
|
|
|
+ type. */
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_fpr_return_fields (const_tree valtype, tree *fields)
|
|
|
|
|
+{
|
|
|
|
|
+ tree field;
|
|
|
|
|
+ int i;
|
|
|
|
|
+
|
|
|
|
|
+ if (TREE_CODE (valtype) != RECORD_TYPE)
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ i = 0;
|
|
|
|
|
+ for (field = TYPE_FIELDS (valtype); field != 0; field = DECL_CHAIN (field))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (TREE_CODE (field) != FIELD_DECL)
|
|
|
|
|
+ continue;
|
|
|
|
|
+
|
|
|
|
|
+ if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ if (i == 2)
|
|
|
|
|
+ return 0;
|
|
|
|
|
+
|
|
|
|
|
+ fields[i++] = field;
|
|
|
|
|
+ }
|
|
|
|
|
+ return i;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if the function return value MODE will get returned in a
|
|
|
|
|
+ floating-point register. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_return_mode_in_fpr_p (enum machine_mode mode)
|
|
|
|
|
+{
|
|
|
|
|
+ return ((GET_MODE_CLASS (mode) == MODE_FLOAT
|
|
|
|
|
+ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
|
|
|
|
|
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
|
|
|
|
|
+ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return the representation of an FPR return register when the
|
|
|
|
|
+ value being returned in FP_RETURN has mode VALUE_MODE and the
|
|
|
|
|
+ return type itself has mode TYPE_MODE. On NewABI targets,
|
|
|
|
|
+ the two modes may be different for structures like:
|
|
|
|
|
+
|
|
|
|
|
+ struct __attribute__((packed)) foo { float f; }
|
|
|
|
|
+
|
|
|
|
|
+ where we return the SFmode value of "f" in FP_RETURN, but where
|
|
|
|
|
+ the structure itself has mode BLKmode. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_return_fpr_single (enum machine_mode type_mode,
|
|
|
|
|
+ enum machine_mode value_mode)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx x;
|
|
|
|
|
+
|
|
|
|
|
+ x = gen_rtx_REG (value_mode, FP_RETURN);
|
|
|
|
|
+ if (type_mode != value_mode)
|
|
|
|
|
+ {
|
|
|
|
|
+ x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
|
|
|
|
|
+ x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
|
|
|
|
|
+ }
|
|
|
|
|
+ return x;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return a composite value in a pair of floating-point registers.
|
|
|
|
|
+ MODE1 and OFFSET1 are the mode and byte offset for the first value,
|
|
|
|
|
+ likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
|
|
|
|
|
+ complete value.
|
|
|
|
|
+
|
|
|
|
|
+ For n32 & n64, $f0 always holds the first value and $f2 the second.
|
|
|
|
|
+ Otherwise the values are packed together as closely as possible. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_return_fpr_pair (enum machine_mode mode,
|
|
|
|
|
+ enum machine_mode mode1, HOST_WIDE_INT offset1,
|
|
|
|
|
+ enum machine_mode mode2, HOST_WIDE_INT offset2)
|
|
|
|
|
+{
|
|
|
|
|
+ return gen_rtx_PARALLEL
|
|
|
|
|
+ (mode,
|
|
|
|
|
+ gen_rtvec (2,
|
|
|
|
|
+ gen_rtx_EXPR_LIST (VOIDmode,
|
|
|
|
|
+ gen_rtx_REG (mode1, FP_RETURN),
|
|
|
|
|
+ GEN_INT (offset1)),
|
|
|
|
|
+ gen_rtx_EXPR_LIST (VOIDmode,
|
|
|
|
|
+ gen_rtx_REG (mode2, FP_RETURN + 1),
|
|
|
|
|
+ GEN_INT (offset2))));
|
|
|
|
|
+
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
|
|
|
|
|
+ VALTYPE is the return type and MODE is VOIDmode. For libcalls,
|
|
|
|
|
+ VALTYPE is null and MODE is the mode of the return value. */
|
|
|
|
|
+
|
|
|
|
|
+rtx
|
|
|
|
|
+riscv_function_value (const_tree valtype, const_tree func, enum machine_mode mode)
|
|
|
|
|
+{
|
|
|
|
|
+ if (valtype)
|
|
|
|
|
+ {
|
|
|
|
|
+ tree fields[2];
|
|
|
|
|
+ int unsigned_p;
|
|
|
|
|
+
|
|
|
|
|
+ mode = TYPE_MODE (valtype);
|
|
|
|
|
+ unsigned_p = TYPE_UNSIGNED (valtype);
|
|
|
|
|
+
|
|
|
|
|
+ /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
|
|
|
|
|
+ return values, promote the mode here too. */
|
|
|
|
|
+ mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
|
|
|
|
|
+
|
|
|
|
|
+ /* Handle structures whose fields are returned in $f0/$f2. */
|
|
|
|
|
+ switch (riscv_fpr_return_fields (valtype, fields))
|
|
|
|
|
+ {
|
|
|
|
|
+ case 1:
|
|
|
|
|
+ return riscv_return_fpr_single (mode,
|
|
|
|
|
+ TYPE_MODE (TREE_TYPE (fields[0])));
|
|
|
|
|
+
|
|
|
|
|
+ case 2:
|
|
|
|
|
+ return riscv_return_fpr_pair (mode,
|
|
|
|
|
+ TYPE_MODE (TREE_TYPE (fields[0])),
|
|
|
|
|
+ int_byte_position (fields[0]),
|
|
|
|
|
+ TYPE_MODE (TREE_TYPE (fields[1])),
|
|
|
|
|
+ int_byte_position (fields[1]));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Only use FPRs for scalar, complex or vector types. */
|
|
|
|
|
+ if (!FLOAT_TYPE_P (valtype))
|
|
|
|
|
+ return gen_rtx_REG (mode, GP_RETURN);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Handle long doubles for n32 & n64. */
|
|
|
|
|
+ if (mode == TFmode)
|
|
|
|
|
+ return riscv_return_fpr_pair (mode,
|
|
|
|
|
+ DImode, 0,
|
|
|
|
|
+ DImode, GET_MODE_SIZE (mode) / 2);
|
|
|
|
|
+
|
|
|
|
|
+ if (riscv_return_mode_in_fpr_p (mode))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
|
|
|
|
|
+ return riscv_return_fpr_pair (mode,
|
|
|
|
|
+ GET_MODE_INNER (mode), 0,
|
|
|
|
|
+ GET_MODE_INNER (mode),
|
|
|
|
|
+ GET_MODE_SIZE (mode) / 2);
|
|
|
|
|
+ else
|
|
|
|
|
+ return gen_rtx_REG (mode, FP_RETURN);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return gen_rtx_REG (mode, GP_RETURN);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_RETURN_IN_MEMORY. Scalars and small structures
|
|
|
|
|
+ that fit in two registers are returned in a0/a1. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
|
|
|
|
|
+{
|
|
|
|
|
+ return !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_PASS_BY_REFERENCE. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
|
|
|
|
|
+ enum machine_mode mode, const_tree type,
|
|
|
|
|
+ bool named ATTRIBUTE_UNUSED)
|
|
|
|
|
+{
|
|
|
|
|
+ if (type && riscv_return_in_memory (type, NULL_TREE))
|
|
|
|
|
+ return true;
|
|
|
|
|
+ return targetm.calls.must_pass_in_stack (mode, type);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_SETUP_INCOMING_VARARGS. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
|
|
|
|
|
+ tree type, int *pretend_size ATTRIBUTE_UNUSED,
|
|
|
|
|
+ int no_rtl)
|
|
|
|
|
+{
|
|
|
|
|
+ CUMULATIVE_ARGS local_cum;
|
|
|
|
|
+ int gp_saved;
|
|
|
|
|
+
|
|
|
|
|
+ /* The caller has advanced CUM up to, but not beyond, the last named
|
|
|
|
|
+ argument. Advance a local copy of CUM past the last "real" named
|
|
|
|
|
+ argument, to find out how many registers are left over. */
|
|
|
|
|
+ local_cum = *get_cumulative_args (cum);
|
|
|
|
|
+ riscv_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1);
|
|
|
|
|
+
|
|
|
|
|
+ /* Found out how many registers we need to save. */
|
|
|
|
|
+ gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
|
|
|
|
|
+
|
|
|
|
|
+ if (!no_rtl && gp_saved > 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx ptr, mem;
|
|
|
|
|
+
|
|
|
|
|
+ ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
|
|
|
|
|
+ REG_PARM_STACK_SPACE (cfun->decl)
|
|
|
|
|
+ - gp_saved * UNITS_PER_WORD);
|
|
|
|
|
+ mem = gen_frame_mem (BLKmode, ptr);
|
|
|
|
|
+ set_mem_alias_set (mem, get_varargs_alias_set ());
|
|
|
|
|
+
|
|
|
|
|
+ move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
|
|
|
|
|
+ mem, gp_saved);
|
|
|
|
|
+ }
|
|
|
|
|
+ if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
|
|
|
|
|
+ cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_EXPAND_BUILTIN_VA_START. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_va_start (tree valist, rtx nextarg)
|
|
|
|
|
+{
|
|
|
|
|
+ nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
|
|
|
|
|
+ std_expand_builtin_va_start (valist, nextarg);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Expand a call of type TYPE. RESULT is where the result will go (null
|
|
|
|
|
+ for "call"s and "sibcall"s), ADDR is the address of the function,
|
|
|
|
|
+ ARGS_SIZE is the size of the arguments and AUX is the value passed
|
|
|
|
|
+ to us by riscv_function_arg. Return the call itself. */
|
|
|
|
|
+
|
|
|
|
|
+rtx
|
|
|
|
|
+riscv_expand_call (bool sibcall_p, rtx result, rtx addr, rtx args_size)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx pattern;
|
|
|
|
|
+
|
|
|
|
|
+ if (!call_insn_operand (addr, VOIDmode))
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx reg = RISCV_EPILOGUE_TEMP (Pmode);
|
|
|
|
|
+ riscv_emit_move (reg, addr);
|
|
|
|
|
+ addr = reg;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (result == 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx (*fn) (rtx, rtx);
|
|
|
|
|
+
|
|
|
|
|
+ if (sibcall_p)
|
|
|
|
|
+ fn = gen_sibcall_internal;
|
|
|
|
|
+ else
|
|
|
|
|
+ fn = gen_call_internal;
|
|
|
|
|
+
|
|
|
|
|
+ pattern = fn (addr, args_size);
|
|
|
|
|
+ }
|
|
|
|
|
+ else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Handle return values created by riscv_return_fpr_pair. */
|
|
|
|
|
+ rtx (*fn) (rtx, rtx, rtx, rtx);
|
|
|
|
|
+ rtx reg1, reg2;
|
|
|
|
|
+
|
|
|
|
|
+ if (sibcall_p)
|
|
|
|
|
+ fn = gen_sibcall_value_multiple_internal;
|
|
|
|
|
+ else
|
|
|
|
|
+ fn = gen_call_value_multiple_internal;
|
|
|
|
|
+
|
|
|
|
|
+ reg1 = XEXP (XVECEXP (result, 0, 0), 0);
|
|
|
|
|
+ reg2 = XEXP (XVECEXP (result, 0, 1), 0);
|
|
|
|
|
+ pattern = fn (reg1, addr, args_size, reg2);
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx (*fn) (rtx, rtx, rtx);
|
|
|
|
|
+
|
|
|
|
|
+ if (sibcall_p)
|
|
|
|
|
+ fn = gen_sibcall_value_internal;
|
|
|
|
|
+ else
|
|
|
|
|
+ fn = gen_call_value_internal;
|
|
|
|
|
+
|
|
|
|
|
+ /* Handle return values created by riscv_return_fpr_single. */
|
|
|
|
|
+ if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
|
|
|
|
|
+ result = XEXP (XVECEXP (result, 0, 0), 0);
|
|
|
|
|
+ pattern = fn (result, addr, args_size);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return emit_call_insn (pattern);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
|
|
|
|
|
+ Assume that the areas do not overlap. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
|
|
|
|
|
+{
|
|
|
|
|
+ HOST_WIDE_INT offset, delta;
|
|
|
|
|
+ unsigned HOST_WIDE_INT bits;
|
|
|
|
|
+ int i;
|
|
|
|
|
+ enum machine_mode mode;
|
|
|
|
|
+ rtx *regs;
|
|
|
|
|
+
|
|
|
|
|
+ bits = MAX( BITS_PER_UNIT,
|
|
|
|
|
+ MIN( BITS_PER_WORD, MIN( MEM_ALIGN(src),MEM_ALIGN(dest) ) ) );
|
|
|
|
|
+
|
|
|
|
|
+ mode = mode_for_size (bits, MODE_INT, 0);
|
|
|
|
|
+ delta = bits / BITS_PER_UNIT;
|
|
|
|
|
+
|
|
|
|
|
+ /* Allocate a buffer for the temporary registers. */
|
|
|
|
|
+ regs = XALLOCAVEC (rtx, length / delta);
|
|
|
|
|
+
|
|
|
|
|
+ /* Load as many BITS-sized chunks as possible. Use a normal load if
|
|
|
|
|
+ the source has enough alignment, otherwise use left/right pairs. */
|
|
|
|
|
+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
|
|
|
|
|
+ {
|
|
|
|
|
+ regs[i] = gen_reg_rtx (mode);
|
|
|
|
|
+ riscv_emit_move (regs[i], adjust_address (src, mode, offset));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Copy the chunks to the destination. */
|
|
|
|
|
+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
|
|
|
|
|
+ riscv_emit_move (adjust_address (dest, mode, offset), regs[i]);
|
|
|
|
|
+
|
|
|
|
|
+ /* Mop up any left-over bytes. */
|
|
|
|
|
+ if (offset < length)
|
|
|
|
|
+ {
|
|
|
|
|
+ src = adjust_address (src, BLKmode, offset);
|
|
|
|
|
+ dest = adjust_address (dest, BLKmode, offset);
|
|
|
|
|
+ move_by_pieces (dest, src, length - offset,
|
|
|
|
|
+ MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Helper function for doing a loop-based block operation on memory
|
|
|
|
|
+ reference MEM. Each iteration of the loop will operate on LENGTH
|
|
|
|
|
+ bytes of MEM.
|
|
|
|
|
+
|
|
|
|
|
+ Create a new base register for use within the loop and point it to
|
|
|
|
|
+ the start of MEM. Create a new memory reference that uses this
|
|
|
|
|
+ register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
|
|
|
|
|
+ rtx *loop_reg, rtx *loop_mem)
|
|
|
|
|
+{
|
|
|
|
|
+ *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
|
|
|
|
|
+
|
|
|
|
|
+ /* Although the new mem does not refer to a known location,
|
|
|
|
|
+ it does keep up to LENGTH bytes of alignment. */
|
|
|
|
|
+ *loop_mem = change_address (mem, BLKmode, *loop_reg);
|
|
|
|
|
+ set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
|
|
|
|
|
+ bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
|
|
|
|
|
+ the memory regions do not overlap. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
|
|
|
|
|
+ HOST_WIDE_INT bytes_per_iter)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx label, src_reg, dest_reg, final_src, test;
|
|
|
|
|
+ HOST_WIDE_INT leftover;
|
|
|
|
|
+
|
|
|
|
|
+ leftover = length % bytes_per_iter;
|
|
|
|
|
+ length -= leftover;
|
|
|
|
|
+
|
|
|
|
|
+ /* Create registers and memory references for use within the loop. */
|
|
|
|
|
+ riscv_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
|
|
|
|
|
+ riscv_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
|
|
|
|
|
+
|
|
|
|
|
+ /* Calculate the value that SRC_REG should have after the last iteration
|
|
|
|
|
+ of the loop. */
|
|
|
|
|
+ final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
|
|
|
|
|
+ 0, 0, OPTAB_WIDEN);
|
|
|
|
|
+
|
|
|
|
|
+ /* Emit the start of the loop. */
|
|
|
|
|
+ label = gen_label_rtx ();
|
|
|
|
|
+ emit_label (label);
|
|
|
|
|
+
|
|
|
|
|
+ /* Emit the loop body. */
|
|
|
|
|
+ riscv_block_move_straight (dest, src, bytes_per_iter);
|
|
|
|
|
+
|
|
|
|
|
+ /* Move on to the next block. */
|
|
|
|
|
+ riscv_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
|
|
|
|
|
+ riscv_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
|
|
|
|
|
+
|
|
|
|
|
+ /* Emit the loop condition. */
|
|
|
|
|
+ test = gen_rtx_NE (VOIDmode, src_reg, final_src);
|
|
|
|
|
+ if (Pmode == DImode)
|
|
|
|
|
+ emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
|
|
|
|
|
+ else
|
|
|
|
|
+ emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
|
|
|
|
|
+
|
|
|
|
|
+ /* Mop up any left-over bytes. */
|
|
|
|
|
+ if (leftover)
|
|
|
|
|
+ riscv_block_move_straight (dest, src, leftover);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Expand a movmemsi instruction, which copies LENGTH bytes from
|
|
|
|
|
+ memory reference SRC to memory reference DEST. */
|
|
|
|
|
+
|
|
|
|
|
+bool
|
|
|
|
|
+riscv_expand_block_move (rtx dest, rtx src, rtx length)
|
|
|
|
|
+{
|
|
|
|
|
+ if (CONST_INT_P (length))
|
|
|
|
|
+ {
|
|
|
|
|
+ HOST_WIDE_INT factor, align;
|
|
|
|
|
+
|
|
|
|
|
+ align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
|
|
|
|
|
+ factor = BITS_PER_WORD / align;
|
|
|
|
|
+
|
|
|
|
|
+ if (INTVAL (length) <= RISCV_MAX_MOVE_BYTES_STRAIGHT / factor)
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_block_move_straight (dest, src, INTVAL (length));
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+ else if (optimize && align >= BITS_PER_WORD)
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_block_move_loop (dest, src, INTVAL (length),
|
|
|
|
|
+ RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / factor);
|
|
|
|
|
+ return true;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ return false;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* (Re-)Initialize riscv_lo_relocs and riscv_hi_relocs. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_init_relocs (void)
|
|
|
|
|
+{
|
|
|
|
|
+ memset (riscv_hi_relocs, '\0', sizeof (riscv_hi_relocs));
|
|
|
|
|
+ memset (riscv_lo_relocs, '\0', sizeof (riscv_lo_relocs));
|
|
|
|
|
+
|
|
|
|
|
+ if (!flag_pic && riscv_cmodel == CM_MEDLOW)
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
|
|
|
|
|
+ riscv_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (!flag_pic || flag_pie)
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_hi_relocs[SYMBOL_TLS_LE] = "%tprel_hi(";
|
|
|
|
|
+ riscv_lo_relocs[SYMBOL_TLS_LE] = "%tprel_lo(";
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
|
|
|
|
|
+ in context CONTEXT. RELOCS is the array of relocations to use. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_print_operand_reloc (FILE *file, rtx op, const char **relocs)
|
|
|
|
|
+{
|
|
|
|
|
+ enum riscv_symbol_type symbol_type;
|
|
|
|
|
+ const char *p;
|
|
|
|
|
+
|
|
|
|
|
+ symbol_type = riscv_classify_symbolic_expression (op);
|
|
|
|
|
+ gcc_assert (relocs[symbol_type]);
|
|
|
|
|
+
|
|
|
|
|
+ fputs (relocs[symbol_type], file);
|
|
|
|
|
+ output_addr_const (file, riscv_strip_unspec_address (op));
|
|
|
|
|
+ for (p = relocs[symbol_type]; *p != 0; p++)
|
|
|
|
|
+ if (*p == '(')
|
|
|
|
|
+ fputc (')', file);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static const char *
|
|
|
|
|
+riscv_memory_model_suffix (enum memmodel model)
|
|
|
|
|
+{
|
|
|
|
|
+ switch (model)
|
|
|
|
|
+ {
|
|
|
|
|
+ case MEMMODEL_ACQ_REL:
|
|
|
|
|
+ case MEMMODEL_SEQ_CST:
|
|
|
|
|
+ return ".sc";
|
|
|
|
|
+ case MEMMODEL_ACQUIRE:
|
|
|
|
|
+ case MEMMODEL_CONSUME:
|
|
|
|
|
+ return ".aq";
|
|
|
|
|
+ case MEMMODEL_RELEASE:
|
|
|
|
|
+ return ".rl";
|
|
|
|
|
+ case MEMMODEL_RELAXED:
|
|
|
|
|
+ return "";
|
|
|
|
|
+ default: gcc_unreachable();
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_PRINT_OPERAND. The RISCV-specific operand codes are:
|
|
|
|
|
+
|
|
|
|
|
+ 'h' Print the high-part relocation associated with OP, after stripping
|
|
|
|
|
+ any outermost HIGH.
|
|
|
|
|
+ 'R' Print the low-part relocation associated with OP.
|
|
|
|
|
+ 'C' Print the integer branch condition for comparison OP.
|
|
|
|
|
+ 'A' Print the atomic operation suffix for memory model OP.
|
|
|
|
|
+ 'z' Print $0 if OP is zero, otherwise print OP normally. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_print_operand (FILE *file, rtx op, int letter)
|
|
|
|
|
+{
|
|
|
|
|
+ enum rtx_code code;
|
|
|
|
|
+
|
|
|
|
|
+ gcc_assert (op);
|
|
|
|
|
+ code = GET_CODE (op);
|
|
|
|
|
+
|
|
|
|
|
+ switch (letter)
|
|
|
|
|
+ {
|
|
|
|
|
+ case 'h':
|
|
|
|
|
+ if (code == HIGH)
|
|
|
|
|
+ op = XEXP (op, 0);
|
|
|
|
|
+ riscv_print_operand_reloc (file, op, riscv_hi_relocs);
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ case 'R':
|
|
|
|
|
+ riscv_print_operand_reloc (file, op, riscv_lo_relocs);
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ case 'C':
|
|
|
|
|
+ /* The RTL names match the instruction names. */
|
|
|
|
|
+ fputs (GET_RTX_NAME (code), file);
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ case 'A':
|
|
|
|
|
+ fputs (riscv_memory_model_suffix ((enum memmodel)INTVAL (op)), file);
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ switch (code)
|
|
|
|
|
+ {
|
|
|
|
|
+ case REG:
|
|
|
|
|
+ if (letter && letter != 'z')
|
|
|
|
|
+ output_operand_lossage ("invalid use of '%%%c'", letter);
|
|
|
|
|
+ fprintf (file, "%s", reg_names[REGNO (op)]);
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ case MEM:
|
|
|
|
|
+ if (letter == 'y')
|
|
|
|
|
+ fprintf (file, "%s", reg_names[REGNO(XEXP(op, 0))]);
|
|
|
|
|
+ else if (letter && letter != 'z')
|
|
|
|
|
+ output_operand_lossage ("invalid use of '%%%c'", letter);
|
|
|
|
|
+ else
|
|
|
|
|
+ output_address (XEXP (op, 0));
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
|
|
|
|
|
+ fputs (reg_names[GP_REG_FIRST], file);
|
|
|
|
|
+ else if (letter && letter != 'z')
|
|
|
|
|
+ output_operand_lossage ("invalid use of '%%%c'", letter);
|
|
|
|
|
+ else
|
|
|
|
|
+ output_addr_const (file, riscv_strip_unspec_address (op));
|
|
|
|
|
+ break;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_PRINT_OPERAND_ADDRESS. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_print_operand_address (FILE *file, rtx x)
|
|
|
|
|
+{
|
|
|
|
|
+ struct riscv_address_info addr;
|
|
|
|
|
+
|
|
|
|
|
+ if (riscv_classify_address (&addr, x, word_mode, true))
|
|
|
|
|
+ switch (addr.type)
|
|
|
|
|
+ {
|
|
|
|
|
+ case ADDRESS_REG:
|
|
|
|
|
+ riscv_print_operand (file, addr.offset, 0);
|
|
|
|
|
+ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
|
|
|
|
|
+ return;
|
|
|
|
|
+
|
|
|
|
|
+ case ADDRESS_LO_SUM:
|
|
|
|
|
+ riscv_print_operand_reloc (file, addr.offset, riscv_lo_relocs);
|
|
|
|
|
+ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
|
|
|
|
|
+ return;
|
|
|
|
|
+
|
|
|
|
|
+ case ADDRESS_CONST_INT:
|
|
|
|
|
+ output_addr_const (file, x);
|
|
|
|
|
+ fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
|
|
|
|
|
+ return;
|
|
|
|
|
+
|
|
|
|
|
+ case ADDRESS_SYMBOLIC:
|
|
|
|
|
+ output_addr_const (file, riscv_strip_unspec_address (x));
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+ gcc_unreachable ();
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_size_ok_for_small_data_p (int size)
|
|
|
|
|
+{
|
|
|
|
|
+ return g_switch_value && IN_RANGE (size, 1, g_switch_value);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if EXP should be placed in the small data section. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_in_small_data_p (const_tree x)
|
|
|
|
|
+{
|
|
|
|
|
+ if (TREE_CODE (x) == STRING_CST || TREE_CODE (x) == FUNCTION_DECL)
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ if (TREE_CODE (x) == VAR_DECL && DECL_SECTION_NAME (x))
|
|
|
|
|
+ {
|
|
|
|
|
+ const char *sec = DECL_SECTION_NAME (x);
|
|
|
|
|
+ return strcmp (sec, ".sdata") == 0 || strcmp (sec, ".sbss") == 0;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return riscv_size_ok_for_small_data_p (int_size_in_bytes (TREE_TYPE (x)));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return a section for X, handling small data. */
|
|
|
|
|
+
|
|
|
|
|
+static section *
|
|
|
|
|
+riscv_elf_select_rtx_section (enum machine_mode mode, rtx x,
|
|
|
|
|
+ unsigned HOST_WIDE_INT align)
|
|
|
|
|
+{
|
|
|
|
|
+ section *s = default_elf_select_rtx_section (mode, x, align);
|
|
|
|
|
+
|
|
|
|
|
+ if (riscv_size_ok_for_small_data_p (GET_MODE_SIZE (mode)))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (strncmp (s->named.name, ".rodata.cst", strlen (".rodata.cst")) == 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Rename .rodata.cst* to .srodata.cst*. */
|
|
|
|
|
+ char name[32];
|
|
|
|
|
+ sprintf (name, ".s%s", s->named.name + 1);
|
|
|
|
|
+ return get_section (name, s->named.common.flags, NULL);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (s == data_section)
|
|
|
|
|
+ return sdata_section;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return s;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
|
|
|
|
|
+
|
|
|
|
|
+static void ATTRIBUTE_UNUSED
|
|
|
|
|
+riscv_output_dwarf_dtprel (FILE *file, int size, rtx x)
|
|
|
|
|
+{
|
|
|
|
|
+ switch (size)
|
|
|
|
|
+ {
|
|
|
|
|
+ case 4:
|
|
|
|
|
+ fputs ("\t.dtprelword\t", file);
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ case 8:
|
|
|
|
|
+ fputs ("\t.dtpreldword\t", file);
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ gcc_unreachable ();
|
|
|
|
|
+ }
|
|
|
|
|
+ output_addr_const (file, x);
|
|
|
|
|
+ fputs ("+0x800", file);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Make the last instruction frame-related and note that it performs
|
|
|
|
|
+ the operation described by FRAME_PATTERN. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_set_frame_expr (rtx frame_pattern)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx insn;
|
|
|
|
|
+
|
|
|
|
|
+ insn = get_last_insn ();
|
|
|
|
|
+ RTX_FRAME_RELATED_P (insn) = 1;
|
|
|
|
|
+ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
|
|
|
|
|
+ frame_pattern,
|
|
|
|
|
+ REG_NOTES (insn));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return a frame-related rtx that stores REG at MEM.
|
|
|
|
|
+ REG must be a single register. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_frame_set (rtx mem, rtx reg)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx set;
|
|
|
|
|
+
|
|
|
|
|
+ set = gen_rtx_SET (VOIDmode, mem, reg);
|
|
|
|
|
+ RTX_FRAME_RELATED_P (set) = 1;
|
|
|
|
|
+
|
|
|
|
|
+ return set;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if the current function must save register REGNO. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_save_reg_p (unsigned int regno)
|
|
|
|
|
+{
|
|
|
|
|
+ bool call_saved = !global_regs[regno] && !call_really_used_regs[regno];
|
|
|
|
|
+ bool might_clobber = crtl->saves_all_registers
|
|
|
|
|
+ || df_regs_ever_live_p (regno)
|
|
|
|
|
+ || (regno == HARD_FRAME_POINTER_REGNUM
|
|
|
|
|
+ && frame_pointer_needed);
|
|
|
|
|
+
|
|
|
|
|
+ return (call_saved && might_clobber)
|
|
|
|
|
+ || (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Determine whether to call GPR save/restore routines. */
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_use_save_libcall (const struct riscv_frame_info *frame)
|
|
|
|
|
+{
|
|
|
|
|
+ if (!TARGET_SAVE_RESTORE || crtl->calls_eh_return || frame_pointer_needed)
|
|
|
|
|
+ return false;
|
|
|
|
|
+
|
|
|
|
|
+ return frame->save_libcall_adjustment != 0;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Determine which GPR save/restore routine to call. */
|
|
|
|
|
+
|
|
|
|
|
+static unsigned
|
|
|
|
|
+riscv_save_libcall_count (unsigned mask)
|
|
|
|
|
+{
|
|
|
|
|
+ for (unsigned n = GP_REG_LAST; n > GP_REG_FIRST; n--)
|
|
|
|
|
+ if (BITSET_P (mask, n))
|
|
|
|
|
+ return CALLEE_SAVED_REG_NUMBER (n) + 1;
|
|
|
|
|
+ abort ();
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Populate the current function's riscv_frame_info structure.
|
|
|
|
|
+
|
|
|
|
|
+ RISC-V stack frames grown downward. High addresses are at the top.
|
|
|
|
|
+
|
|
|
|
|
+ +-------------------------------+
|
|
|
|
|
+ | |
|
|
|
|
|
+ | incoming stack arguments |
|
|
|
|
|
+ | |
|
|
|
|
|
+ +-------------------------------+ <-- incoming stack pointer
|
|
|
|
|
+ | |
|
|
|
|
|
+ | callee-allocated save area |
|
|
|
|
|
+ | for arguments that are |
|
|
|
|
|
+ | split between registers and |
|
|
|
|
|
+ | the stack |
|
|
|
|
|
+ | |
|
|
|
|
|
+ +-------------------------------+ <-- arg_pointer_rtx
|
|
|
|
|
+ | |
|
|
|
|
|
+ | callee-allocated save area |
|
|
|
|
|
+ | for register varargs |
|
|
|
|
|
+ | |
|
|
|
|
|
+ +-------------------------------+ <-- hard_frame_pointer_rtx;
|
|
|
|
|
+ | | stack_pointer_rtx + gp_sp_offset
|
|
|
|
|
+ | GPR save area | + UNITS_PER_WORD
|
|
|
|
|
+ | |
|
|
|
|
|
+ +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
|
|
|
|
|
+ | | + UNITS_PER_HWVALUE
|
|
|
|
|
+ | FPR save area |
|
|
|
|
|
+ | |
|
|
|
|
|
+ +-------------------------------+ <-- frame_pointer_rtx (virtual)
|
|
|
|
|
+ | |
|
|
|
|
|
+ | local variables |
|
|
|
|
|
+ | |
|
|
|
|
|
+ P +-------------------------------+
|
|
|
|
|
+ | |
|
|
|
|
|
+ | outgoing stack arguments |
|
|
|
|
|
+ | |
|
|
|
|
|
+ +-------------------------------+ <-- stack_pointer_rtx
|
|
|
|
|
+
|
|
|
|
|
+ Dynamic stack allocations such as alloca insert data at point P.
|
|
|
|
|
+ They decrease stack_pointer_rtx but leave frame_pointer_rtx and
|
|
|
|
|
+ hard_frame_pointer_rtx unchanged. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_compute_frame_info (void)
|
|
|
|
|
+{
|
|
|
|
|
+ struct riscv_frame_info *frame;
|
|
|
|
|
+ HOST_WIDE_INT offset;
|
|
|
|
|
+ unsigned int regno, i, num_x_saved = 0, num_f_saved = 0;
|
|
|
|
|
+
|
|
|
|
|
+ frame = &cfun->machine->frame;
|
|
|
|
|
+ memset (frame, 0, sizeof (*frame));
|
|
|
|
|
+
|
|
|
|
|
+ /* Find out which GPRs we need to save. */
|
|
|
|
|
+ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
|
|
|
|
|
+ if (riscv_save_reg_p (regno))
|
|
|
|
|
+ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
|
|
|
|
|
+
|
|
|
|
|
+ /* If this function calls eh_return, we must also save and restore the
|
|
|
|
|
+ EH data registers. */
|
|
|
|
|
+ if (crtl->calls_eh_return)
|
|
|
|
|
+ for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++)
|
|
|
|
|
+ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
|
|
|
|
|
+
|
|
|
|
|
+ /* Find out which FPRs we need to save. This loop must iterate over
|
|
|
|
|
+ the same space as its companion in riscv_for_each_saved_gpr_and_fpr. */
|
|
|
|
|
+ if (TARGET_HARD_FLOAT)
|
|
|
|
|
+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
|
|
|
|
|
+ if (riscv_save_reg_p (regno))
|
|
|
|
|
+ frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++;
|
|
|
|
|
+
|
|
|
|
|
+ /* At the bottom of the frame are any outgoing stack arguments. */
|
|
|
|
|
+ offset = crtl->outgoing_args_size;
|
|
|
|
|
+ /* Next are local stack variables. */
|
|
|
|
|
+ offset += RISCV_STACK_ALIGN (get_frame_size ());
|
|
|
|
|
+ /* The virtual frame pointer points above the local variables. */
|
|
|
|
|
+ frame->frame_pointer_offset = offset;
|
|
|
|
|
+ /* Next are the callee-saved FPRs. */
|
|
|
|
|
+ if (frame->fmask)
|
|
|
|
|
+ {
|
|
|
|
|
+ offset += RISCV_STACK_ALIGN (num_f_saved * UNITS_PER_FPREG);
|
|
|
|
|
+ frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
|
|
|
|
|
+ }
|
|
|
|
|
+ /* Next are the callee-saved GPRs. */
|
|
|
|
|
+ if (frame->mask)
|
|
|
|
|
+ {
|
|
|
|
|
+ unsigned x_save_size = RISCV_STACK_ALIGN (num_x_saved * UNITS_PER_WORD);
|
|
|
|
|
+ unsigned num_save_restore = 1 + riscv_save_libcall_count (frame->mask);
|
|
|
|
|
+
|
|
|
|
|
+ /* Only use save/restore routines if they don't alter the stack size. */
|
|
|
|
|
+ if (RISCV_STACK_ALIGN (num_save_restore * UNITS_PER_WORD) == x_save_size)
|
|
|
|
|
+ frame->save_libcall_adjustment = x_save_size;
|
|
|
|
|
+
|
|
|
|
|
+ offset += x_save_size;
|
|
|
|
|
+ frame->gp_sp_offset = offset - UNITS_PER_WORD;
|
|
|
|
|
+ }
|
|
|
|
|
+ /* The hard frame pointer points above the callee-saved GPRs. */
|
|
|
|
|
+ frame->hard_frame_pointer_offset = offset;
|
|
|
|
|
+ /* Above the hard frame pointer is the callee-allocated varags save area. */
|
|
|
|
|
+ offset += RISCV_STACK_ALIGN (cfun->machine->varargs_size);
|
|
|
|
|
+ frame->arg_pointer_offset = offset;
|
|
|
|
|
+ /* Next is the callee-allocated area for pretend stack arguments. */
|
|
|
|
|
+ offset += crtl->args.pretend_args_size;
|
|
|
|
|
+ frame->total_size = offset;
|
|
|
|
|
+ /* Next points the incoming stack pointer and any incoming arguments. */
|
|
|
|
|
+
|
|
|
|
|
+ /* Only use save/restore routines when the GPRs are atop the frame. */
|
|
|
|
|
+ if (frame->hard_frame_pointer_offset != frame->total_size)
|
|
|
|
|
+ frame->save_libcall_adjustment = 0;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Make sure that we're not trying to eliminate to the wrong hard frame
|
|
|
|
|
+ pointer. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
|
|
|
|
|
+{
|
|
|
|
|
+ return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
|
|
|
|
|
+ or argument pointer. TO is either the stack pointer or hard frame
|
|
|
|
|
+ pointer. */
|
|
|
|
|
+
|
|
|
|
|
+HOST_WIDE_INT
|
|
|
|
|
+riscv_initial_elimination_offset (int from, int to)
|
|
|
|
|
+{
|
|
|
|
|
+ HOST_WIDE_INT src, dest;
|
|
|
|
|
+
|
|
|
|
|
+ riscv_compute_frame_info ();
|
|
|
|
|
+
|
|
|
|
|
+ if (to == HARD_FRAME_POINTER_REGNUM)
|
|
|
|
|
+ dest = cfun->machine->frame.hard_frame_pointer_offset;
|
|
|
|
|
+ else if (to == STACK_POINTER_REGNUM)
|
|
|
|
|
+ dest = 0; /* this is the base of all offsets */
|
|
|
|
|
+ else
|
|
|
|
|
+ gcc_unreachable ();
|
|
|
|
|
+
|
|
|
|
|
+ if (from == FRAME_POINTER_REGNUM)
|
|
|
|
|
+ src = cfun->machine->frame.frame_pointer_offset;
|
|
|
|
|
+ else if (from == ARG_POINTER_REGNUM)
|
|
|
|
|
+ src = cfun->machine->frame.arg_pointer_offset;
|
|
|
|
|
+ else
|
|
|
|
|
+ gcc_unreachable ();
|
|
|
|
|
+
|
|
|
|
|
+ return src - dest;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement RETURN_ADDR_RTX. We do not support moving back to a
|
|
|
|
|
+ previous frame. */
|
|
|
|
|
+
|
|
|
|
|
+rtx
|
|
|
|
|
+riscv_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
|
|
|
|
|
+{
|
|
|
|
|
+ if (count != 0)
|
|
|
|
|
+ return const0_rtx;
|
|
|
|
|
+
|
|
|
|
|
+ return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Emit code to change the current function's return address to
|
|
|
|
|
+ ADDRESS. SCRATCH is available as a scratch register, if needed.
|
|
|
|
|
+ ADDRESS and SCRATCH are both word-mode GPRs. */
|
|
|
|
|
+
|
|
|
|
|
+void
|
|
|
|
|
+riscv_set_return_address (rtx address, rtx scratch)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx slot_address;
|
|
|
|
|
+
|
|
|
|
|
+ gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
|
|
|
|
|
+ slot_address = riscv_add_offset (scratch, stack_pointer_rtx,
|
|
|
|
|
+ cfun->machine->frame.gp_sp_offset);
|
|
|
|
|
+ riscv_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* A function to save or store a register. The first argument is the
|
|
|
|
|
+ register and the second is the stack slot. */
|
|
|
|
|
+typedef void (*riscv_save_restore_fn) (rtx, rtx);
|
|
|
|
|
+
|
|
|
|
|
+/* Use FN to save or restore register REGNO. MODE is the register's
|
|
|
|
|
+ mode and OFFSET is the offset of its save slot from the current
|
|
|
|
|
+ stack pointer. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_save_restore_reg (enum machine_mode mode, int regno,
|
|
|
|
|
+ HOST_WIDE_INT offset, riscv_save_restore_fn fn)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx mem;
|
|
|
|
|
+
|
|
|
|
|
+ mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset));
|
|
|
|
|
+ fn (gen_rtx_REG (mode, regno), mem);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Call FN for each register that is saved by the current function.
|
|
|
|
|
+ SP_OFFSET is the offset of the current stack pointer from the start
|
|
|
|
|
+ of the frame. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset,
|
|
|
|
|
+ riscv_save_restore_fn fn)
|
|
|
|
|
+{
|
|
|
|
|
+ HOST_WIDE_INT offset;
|
|
|
|
|
+ int regno;
|
|
|
|
|
+
|
|
|
|
|
+ /* Save the link register and s-registers. */
|
|
|
|
|
+ offset = cfun->machine->frame.gp_sp_offset - sp_offset;
|
|
|
|
|
+ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
|
|
|
|
|
+ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_save_restore_reg (word_mode, regno, offset, fn);
|
|
|
|
|
+ offset -= UNITS_PER_WORD;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* This loop must iterate over the same space as its companion in
|
|
|
|
|
+ riscv_compute_frame_info. */
|
|
|
|
|
+ offset = cfun->machine->frame.fp_sp_offset - sp_offset;
|
|
|
|
|
+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
|
|
|
|
|
+ if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_save_restore_reg (DFmode, regno, offset, fn);
|
|
|
|
|
+ offset -= GET_MODE_SIZE (DFmode);
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Emit a move from SRC to DEST, given that one of them is a register
|
|
|
|
|
+ save slot and that the other is a register. TEMP is a temporary
|
|
|
|
|
+ GPR of the same mode that is available if need be. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_emit_save_slot_move (rtx dest, rtx src, rtx temp)
|
|
|
|
|
+{
|
|
|
|
|
+ unsigned int regno;
|
|
|
|
|
+ rtx mem;
|
|
|
|
|
+ enum reg_class rclass;
|
|
|
|
|
+
|
|
|
|
|
+ if (REG_P (src))
|
|
|
|
|
+ {
|
|
|
|
|
+ regno = REGNO (src);
|
|
|
|
|
+ mem = dest;
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+ {
|
|
|
|
|
+ regno = REGNO (dest);
|
|
|
|
|
+ mem = src;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ rclass = riscv_secondary_reload_class (REGNO_REG_CLASS (regno),
|
|
|
|
|
+ GET_MODE (mem), mem, mem == src);
|
|
|
|
|
+
|
|
|
|
|
+ if (rclass == NO_REGS)
|
|
|
|
|
+ riscv_emit_move (dest, src);
|
|
|
|
|
+ else
|
|
|
|
|
+ {
|
|
|
|
|
+ gcc_assert (!reg_overlap_mentioned_p (dest, temp));
|
|
|
|
|
+ riscv_emit_move (temp, src);
|
|
|
|
|
+ riscv_emit_move (dest, temp);
|
|
|
|
|
+ }
|
|
|
|
|
+ if (MEM_P (dest))
|
|
|
|
|
+ riscv_set_frame_expr (riscv_frame_set (dest, src));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Save register REG to MEM. Make the instruction frame-related. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_save_reg (rtx reg, rtx mem)
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_emit_save_slot_move (mem, reg, RISCV_PROLOGUE_TEMP (GET_MODE (reg)));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return the code to invoke the GPR save routine. */
|
|
|
|
|
+
|
|
|
|
|
+const char *
|
|
|
|
|
+riscv_output_gpr_save (unsigned mask)
|
|
|
|
|
+{
|
|
|
|
|
+ static char buf[GP_REG_NUM * 32];
|
|
|
|
|
+ size_t len = 0;
|
|
|
|
|
+ unsigned n = riscv_save_libcall_count (mask), i;
|
|
|
|
|
+ unsigned frame_size = RISCV_STACK_ALIGN ((n + 1) * UNITS_PER_WORD);
|
|
|
|
|
+
|
|
|
|
|
+ len += sprintf (buf + len, "call\tt0,__riscv_save_%u", n);
|
|
|
|
|
+
|
|
|
|
|
+#ifdef DWARF2_UNWIND_INFO
|
|
|
|
|
+ /* Describe the effect of the call to __riscv_save_X. */
|
|
|
|
|
+ if (dwarf2out_do_cfi_asm ())
|
|
|
|
|
+ {
|
|
|
|
|
+ len += sprintf (buf + len, "\n\t.cfi_def_cfa_offset %u", frame_size);
|
|
|
|
|
+
|
|
|
|
|
+ for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
|
|
|
|
|
+ if (BITSET_P (cfun->machine->frame.mask, i))
|
|
|
|
|
+ len += sprintf (buf + len, "\n\t.cfi_offset %u,%d", i,
|
|
|
|
|
+ (CALLEE_SAVED_REG_NUMBER (i) + 2) * -UNITS_PER_WORD);
|
|
|
|
|
+ }
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ return buf;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Expand the "prologue" pattern. */
|
|
|
|
|
+
|
|
|
|
|
+void
|
|
|
|
|
+riscv_expand_prologue (void)
|
|
|
|
|
+{
|
|
|
|
|
+ struct riscv_frame_info *frame = &cfun->machine->frame;
|
|
|
|
|
+ HOST_WIDE_INT size = frame->total_size;
|
|
|
|
|
+ unsigned mask = frame->mask;
|
|
|
|
|
+ rtx insn;
|
|
|
|
|
+
|
|
|
|
|
+ if (flag_stack_usage_info)
|
|
|
|
|
+ current_function_static_stack_size = size;
|
|
|
|
|
+
|
|
|
|
|
+ /* When optimizing for size, call a subroutine to save the registers. */
|
|
|
|
|
+ if (riscv_use_save_libcall (frame))
|
|
|
|
|
+ {
|
|
|
|
|
+ frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
|
|
|
|
|
+ size -= frame->save_libcall_adjustment;
|
|
|
|
|
+ emit_insn (gen_gpr_save (GEN_INT (mask)));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Save the registers. Allocate up to RISCV_MAX_FIRST_STACK_STEP
|
|
|
|
|
+ bytes beforehand; this is enough to cover the register save area
|
|
|
|
|
+ without going out of range. */
|
|
|
|
|
+ if ((frame->mask | frame->fmask) != 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ HOST_WIDE_INT step1;
|
|
|
|
|
+
|
|
|
|
|
+ step1 = MIN (size, RISCV_MAX_FIRST_STACK_STEP);
|
|
|
|
|
+ insn = gen_add3_insn (stack_pointer_rtx,
|
|
|
|
|
+ stack_pointer_rtx,
|
|
|
|
|
+ GEN_INT (-step1));
|
|
|
|
|
+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
|
|
|
|
|
+ size -= step1;
|
|
|
|
|
+ riscv_for_each_saved_gpr_and_fpr (size, riscv_save_reg);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ frame->mask = mask; /* Undo the above fib. */
|
|
|
|
|
+
|
|
|
|
|
+ /* Set up the frame pointer, if we're using one. */
|
|
|
|
|
+ if (frame_pointer_needed)
|
|
|
|
|
+ {
|
|
|
|
|
+ insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx,
|
|
|
|
|
+ GEN_INT (frame->hard_frame_pointer_offset - size));
|
|
|
|
|
+ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Allocate the rest of the frame. */
|
|
|
|
|
+ if (size > 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (SMALL_OPERAND (-size))
|
|
|
|
|
+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
|
|
|
|
|
+ GEN_INT (-size)));
|
|
|
|
|
+ else
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), GEN_INT (-size));
|
|
|
|
|
+ emit_insn (gen_add3_insn (stack_pointer_rtx,
|
|
|
|
|
+ stack_pointer_rtx,
|
|
|
|
|
+ RISCV_PROLOGUE_TEMP (Pmode)));
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (frame->total_size > 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Describe the effect of the instructions that adjusted sp. */
|
|
|
|
|
+ insn = plus_constant (Pmode, stack_pointer_rtx, -frame->total_size);
|
|
|
|
|
+ insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx, insn);
|
|
|
|
|
+ riscv_set_frame_expr (insn);
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Emit instructions to restore register REG from slot MEM. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_restore_reg (rtx reg, rtx mem)
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_emit_save_slot_move (reg, mem, RISCV_EPILOGUE_TEMP (GET_MODE (reg)));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
|
|
|
|
|
+ says which. */
|
|
|
|
|
+
|
|
|
|
|
+void
|
|
|
|
|
+riscv_expand_epilogue (bool sibcall_p)
|
|
|
|
|
+{
|
|
|
|
|
+ /* Split the frame into two. STEP1 is the amount of stack we should
|
|
|
|
|
+ deallocate before restoring the registers. STEP2 is the amount we
|
|
|
|
|
+ should deallocate afterwards.
|
|
|
|
|
+
|
|
|
|
|
+ Start off by assuming that no registers need to be restored. */
|
|
|
|
|
+ struct riscv_frame_info *frame = &cfun->machine->frame;
|
|
|
|
|
+ unsigned mask = frame->mask;
|
|
|
|
|
+ HOST_WIDE_INT step1 = frame->total_size;
|
|
|
|
|
+ HOST_WIDE_INT step2 = 0;
|
|
|
|
|
+ bool use_restore_libcall = !sibcall_p && riscv_use_save_libcall (frame);
|
|
|
|
|
+ rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
|
|
|
|
|
+
|
|
|
|
|
+ if (!sibcall_p && riscv_can_use_return_insn ())
|
|
|
|
|
+ {
|
|
|
|
|
+ emit_jump_insn (gen_return ());
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Move past any dynamic stack allocations. */
|
|
|
|
|
+ if (cfun->calls_alloca)
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset);
|
|
|
|
|
+ if (!SMALL_INT (adjust))
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_emit_move (RISCV_EPILOGUE_TEMP (Pmode), adjust);
|
|
|
|
|
+ adjust = RISCV_EPILOGUE_TEMP (Pmode);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ emit_insn (gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx, adjust));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* If we need to restore registers, deallocate as much stack as
|
|
|
|
|
+ possible in the second step without going out of range. */
|
|
|
|
|
+ if ((frame->mask | frame->fmask) != 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ step2 = MIN (step1, RISCV_MAX_FIRST_STACK_STEP);
|
|
|
|
|
+ step1 -= step2;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Set TARGET to BASE + STEP1. */
|
|
|
|
|
+ if (step1 > 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* Get an rtx for STEP1 that we can add to BASE. */
|
|
|
|
|
+ rtx adjust = GEN_INT (step1);
|
|
|
|
|
+ if (!SMALL_OPERAND (step1))
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_emit_move (RISCV_EPILOGUE_TEMP (Pmode), adjust);
|
|
|
|
|
+ adjust = RISCV_EPILOGUE_TEMP (Pmode);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (use_restore_libcall)
|
|
|
|
|
+ frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
|
|
|
|
|
+
|
|
|
|
|
+ /* Restore the registers. */
|
|
|
|
|
+ riscv_for_each_saved_gpr_and_fpr (frame->total_size - step2,
|
|
|
|
|
+ riscv_restore_reg);
|
|
|
|
|
+
|
|
|
|
|
+ if (use_restore_libcall)
|
|
|
|
|
+ {
|
|
|
|
|
+ frame->mask = mask; /* Undo the above fib. */
|
|
|
|
|
+ gcc_assert (step2 >= frame->save_libcall_adjustment);
|
|
|
|
|
+ step2 -= frame->save_libcall_adjustment;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Deallocate the final bit of the frame. */
|
|
|
|
|
+ if (step2 > 0)
|
|
|
|
|
+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
|
|
|
|
|
+ GEN_INT (step2)));
|
|
|
|
|
+
|
|
|
|
|
+ if (use_restore_libcall)
|
|
|
|
|
+ {
|
|
|
|
|
+ emit_insn (gen_gpr_restore (GEN_INT (riscv_save_libcall_count (mask))));
|
|
|
|
|
+ emit_jump_insn (gen_gpr_restore_return (ra));
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Add in the __builtin_eh_return stack adjustment. */
|
|
|
|
|
+ if (crtl->calls_eh_return)
|
|
|
|
|
+ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
|
|
|
|
|
+ EH_RETURN_STACKADJ_RTX));
|
|
|
|
|
+
|
|
|
|
|
+ if (!sibcall_p)
|
|
|
|
|
+ emit_jump_insn (gen_simple_return_internal (ra));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return nonzero if this function is known to have a null epilogue.
|
|
|
|
|
+ This allows the optimizer to omit jumps to jumps if no stack
|
|
|
|
|
+ was created. */
|
|
|
|
|
+
|
|
|
|
|
+bool
|
|
|
|
|
+riscv_can_use_return_insn (void)
|
|
|
|
|
+{
|
|
|
|
|
+ return reload_completed && cfun->machine->frame.total_size == 0;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return true if register REGNO can store a value of mode MODE.
|
|
|
|
|
+ The result of this function is cached in riscv_hard_regno_mode_ok. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
|
|
|
|
|
+{
|
|
|
|
|
+ unsigned int size = GET_MODE_SIZE (mode);
|
|
|
|
|
+ enum mode_class mclass = GET_MODE_CLASS (mode);
|
|
|
|
|
+
|
|
|
|
|
+ /* This is hella bogus but ira_build segfaults on RV32 without it. */
|
|
|
|
|
+ if (VECTOR_MODE_P (mode))
|
|
|
|
|
+ return true;
|
|
|
|
|
+
|
|
|
|
|
+ if (GP_REG_P (regno))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (size <= UNITS_PER_WORD)
|
|
|
|
|
+ return true;
|
|
|
|
|
+
|
|
|
|
|
+ /* Double-word values must be even-register-aligned. */
|
|
|
|
|
+ if (size <= 2 * UNITS_PER_WORD)
|
|
|
|
|
+ return regno % 2 == 0;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (FP_REG_P (regno))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (mclass == MODE_FLOAT
|
|
|
|
|
+ || mclass == MODE_COMPLEX_FLOAT
|
|
|
|
|
+ || mclass == MODE_VECTOR_FLOAT)
|
|
|
|
|
+ return size <= UNITS_PER_FPVALUE;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return false;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement HARD_REGNO_NREGS. */
|
|
|
|
|
+
|
|
|
|
|
+unsigned int
|
|
|
|
|
+riscv_hard_regno_nregs (int regno, enum machine_mode mode)
|
|
|
|
|
+{
|
|
|
|
|
+ if (FP_REG_P (regno))
|
|
|
|
|
+ return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
|
|
|
|
|
+
|
|
|
|
|
+ /* All other registers are word-sized. */
|
|
|
|
|
+ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement CLASS_MAX_NREGS, taking the maximum of the cases
|
|
|
|
|
+ in riscv_hard_regno_nregs. */
|
|
|
|
|
+
|
|
|
|
|
+int
|
|
|
|
|
+riscv_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
|
|
|
|
|
+{
|
|
|
|
|
+ int size;
|
|
|
|
|
+ HARD_REG_SET left;
|
|
|
|
|
+
|
|
|
|
|
+ size = 0x8000;
|
|
|
|
|
+ COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
|
|
|
|
|
+ if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
|
|
|
|
|
+ {
|
|
|
|
|
+ size = MIN (size, UNITS_PER_FPREG);
|
|
|
|
|
+ AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
|
|
|
|
|
+ }
|
|
|
|
|
+ if (!hard_reg_set_empty_p (left))
|
|
|
|
|
+ size = MIN (size, UNITS_PER_WORD);
|
|
|
|
|
+ return (GET_MODE_SIZE (mode) + size - 1) / size;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
|
|
|
|
|
+
|
|
|
|
|
+static reg_class_t
|
|
|
|
|
+riscv_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t rclass)
|
|
|
|
|
+{
|
|
|
|
|
+ return reg_class_subset_p (FP_REGS, rclass) ? FP_REGS :
|
|
|
|
|
+ reg_class_subset_p (GR_REGS, rclass) ? GR_REGS :
|
|
|
|
|
+ rclass;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* RCLASS is a class involved in a REGISTER_MOVE_COST calculation.
|
|
|
|
|
+ Return a "canonical" class to represent it in later calculations. */
|
|
|
|
|
+
|
|
|
|
|
+static reg_class_t
|
|
|
|
|
+riscv_canonicalize_move_class (reg_class_t rclass)
|
|
|
|
|
+{
|
|
|
|
|
+ if (reg_class_subset_p (rclass, GENERAL_REGS))
|
|
|
|
|
+ rclass = GENERAL_REGS;
|
|
|
|
|
+
|
|
|
|
|
+ return rclass;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_REGISTER_MOVE_COST. Return 0 for classes that are the
|
|
|
|
|
+ maximum of the move costs for subclasses; regclass will work out
|
|
|
|
|
+ the maximum for us. */
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
|
|
|
|
|
+ reg_class_t from, reg_class_t to)
|
|
|
|
|
+{
|
|
|
|
|
+ from = riscv_canonicalize_move_class (from);
|
|
|
|
|
+ to = riscv_canonicalize_move_class (to);
|
|
|
|
|
+
|
|
|
|
|
+ if ((from == GENERAL_REGS && to == GENERAL_REGS)
|
|
|
|
|
+ || (from == GENERAL_REGS && to == FP_REGS)
|
|
|
|
|
+ || (from == FP_REGS && to == FP_REGS))
|
|
|
|
|
+ return COSTS_N_INSNS (1);
|
|
|
|
|
+
|
|
|
|
|
+ if (from == FP_REGS && to == GENERAL_REGS)
|
|
|
|
|
+ return tune_info->fp_to_int_cost;
|
|
|
|
|
+
|
|
|
|
|
+ return 0;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_MEMORY_MOVE_COST. */
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
|
|
|
|
|
+{
|
|
|
|
|
+ return (tune_info->memory_cost
|
|
|
|
|
+ + memory_move_secondary_cost (mode, rclass, in));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return the register class required for a secondary register when
|
|
|
|
|
+ copying between one of the registers in RCLASS and value X, which
|
|
|
|
|
+ has mode MODE. X is the source of the move if IN_P, otherwise it
|
|
|
|
|
+ is the destination. Return NO_REGS if no secondary register is
|
|
|
|
|
+ needed. */
|
|
|
|
|
+
|
|
|
|
|
+enum reg_class
|
|
|
|
|
+riscv_secondary_reload_class (enum reg_class rclass,
|
|
|
|
|
+ enum machine_mode mode, rtx x,
|
|
|
|
|
+ bool in_p ATTRIBUTE_UNUSED)
|
|
|
|
|
+{
|
|
|
|
|
+ int regno;
|
|
|
|
|
+
|
|
|
|
|
+ regno = true_regnum (x);
|
|
|
|
|
+
|
|
|
|
|
+ if (reg_class_subset_p (rclass, FP_REGS))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (MEM_P (x) && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
|
|
|
|
|
+ /* We can use flw/fld/fsw/fsd. */
|
|
|
|
|
+ return NO_REGS;
|
|
|
|
|
+
|
|
|
|
|
+ if (GP_REG_P (regno) || x == CONST0_RTX (mode))
|
|
|
|
|
+ /* We can use fmv or go through memory when mode > Pmode. */
|
|
|
|
|
+ return NO_REGS;
|
|
|
|
|
+
|
|
|
|
|
+ if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (mode, x))
|
|
|
|
|
+ /* We can force the constant to memory and use flw/fld. */
|
|
|
|
|
+ return NO_REGS;
|
|
|
|
|
+
|
|
|
|
|
+ if (FP_REG_P (regno))
|
|
|
|
|
+ /* We can use fmv.fmt. */
|
|
|
|
|
+ return NO_REGS;
|
|
|
|
|
+
|
|
|
|
|
+ /* Otherwise, we need to reload through an integer register. */
|
|
|
|
|
+ return GR_REGS;
|
|
|
|
|
+ }
|
|
|
|
|
+ if (FP_REG_P (regno))
|
|
|
|
|
+ return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
|
|
|
|
|
+
|
|
|
|
|
+ return NO_REGS;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_MODE_REP_EXTENDED. */
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
|
|
|
|
|
+{
|
|
|
|
|
+ /* On 64-bit targets, SImode register values are sign-extended to DImode. */
|
|
|
|
|
+ if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
|
|
|
|
|
+ return SIGN_EXTEND;
|
|
|
|
|
+
|
|
|
|
|
+ return UNKNOWN;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_scalar_mode_supported_p (enum machine_mode mode)
|
|
|
|
|
+{
|
|
|
|
|
+ if (ALL_FIXED_POINT_MODE_P (mode)
|
|
|
|
|
+ && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
|
|
|
|
|
+ return true;
|
|
|
|
|
+
|
|
|
|
|
+ return default_scalar_mode_supported_p (mode);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
|
|
|
|
|
+ dependencies have no cost. */
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_adjust_cost (rtx_insn *insn ATTRIBUTE_UNUSED, rtx link,
|
|
|
|
|
+ rtx_insn *dep ATTRIBUTE_UNUSED, int cost)
|
|
|
|
|
+{
|
|
|
|
|
+ if (REG_NOTE_KIND (link) != 0)
|
|
|
|
|
+ return 0;
|
|
|
|
|
+ return cost;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return the number of instructions that can be issued per cycle. */
|
|
|
|
|
+
|
|
|
|
|
+static int
|
|
|
|
|
+riscv_issue_rate (void)
|
|
|
|
|
+{
|
|
|
|
|
+ return tune_info->issue_rate;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* This structure describes a single built-in function. */
|
|
|
|
|
+struct riscv_builtin_description {
|
|
|
|
|
+ /* The code of the main .md file instruction. See riscv_builtin_type
|
|
|
|
|
+ for more information. */
|
|
|
|
|
+ enum insn_code icode;
|
|
|
|
|
+
|
|
|
|
|
+ /* The name of the built-in function. */
|
|
|
|
|
+ const char *name;
|
|
|
|
|
+
|
|
|
|
|
+ /* Specifies how the function should be expanded. */
|
|
|
|
|
+ enum riscv_builtin_type builtin_type;
|
|
|
|
|
+
|
|
|
|
|
+ /* The function's prototype. */
|
|
|
|
|
+ enum riscv_function_type function_type;
|
|
|
|
|
+
|
|
|
|
|
+ /* Whether the function is available. */
|
|
|
|
|
+ unsigned int (*avail) (void);
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+static unsigned int
|
|
|
|
|
+riscv_builtin_avail_riscv (void)
|
|
|
|
|
+{
|
|
|
|
|
+ return 1;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Construct a riscv_builtin_description from the given arguments.
|
|
|
|
|
+
|
|
|
|
|
+ INSN is the name of the associated instruction pattern, without the
|
|
|
|
|
+ leading CODE_FOR_riscv_.
|
|
|
|
|
+
|
|
|
|
|
+ CODE is the floating-point condition code associated with the
|
|
|
|
|
+ function. It can be 'f' if the field is not applicable.
|
|
|
|
|
+
|
|
|
|
|
+ NAME is the name of the function itself, without the leading
|
|
|
|
|
+ "__builtin_riscv_".
|
|
|
|
|
+
|
|
|
|
|
+ BUILTIN_TYPE and FUNCTION_TYPE are riscv_builtin_description fields.
|
|
|
|
|
+
|
|
|
|
|
+ AVAIL is the name of the availability predicate, without the leading
|
|
|
|
|
+ riscv_builtin_avail_. */
|
|
|
|
|
+#define RISCV_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL) \
|
|
|
|
|
+ { CODE_FOR_ ## INSN, "__builtin_riscv_" NAME, \
|
|
|
|
|
+ BUILTIN_TYPE, FUNCTION_TYPE, riscv_builtin_avail_ ## AVAIL }
|
|
|
|
|
+
|
|
|
|
|
+/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT function
|
|
|
|
|
+ mapped to instruction CODE_FOR_<INSN>, FUNCTION_TYPE and AVAIL
|
|
|
|
|
+ are as for RISCV_BUILTIN. */
|
|
|
|
|
+#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
|
|
|
|
|
+ RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
|
|
|
|
|
+
|
|
|
|
|
+/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT_NO_TARGET
|
|
|
|
|
+ function mapped to instruction CODE_FOR_<INSN>, FUNCTION_TYPE
|
|
|
|
|
+ and AVAIL are as for RISCV_BUILTIN. */
|
|
|
|
|
+#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
|
|
|
|
|
+ RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT_NO_TARGET, \
|
|
|
|
|
+ FUNCTION_TYPE, AVAIL)
|
|
|
|
|
+
|
|
|
|
|
+static const struct riscv_builtin_description riscv_builtins[] = {
|
|
|
|
|
+ DIRECT_NO_TARGET_BUILTIN (nop, RISCV_VOID_FTYPE_VOID, riscv),
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+/* Index I is the function declaration for riscv_builtins[I], or null if the
|
|
|
|
|
+ function isn't defined on this target. */
|
|
|
|
|
+static GTY(()) tree riscv_builtin_decls[ARRAY_SIZE (riscv_builtins)];
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Source-level argument types. */
|
|
|
|
|
+#define RISCV_ATYPE_VOID void_type_node
|
|
|
|
|
+#define RISCV_ATYPE_INT integer_type_node
|
|
|
|
|
+#define RISCV_ATYPE_POINTER ptr_type_node
|
|
|
|
|
+#define RISCV_ATYPE_CPOINTER const_ptr_type_node
|
|
|
|
|
+
|
|
|
|
|
+/* Standard mode-based argument types. */
|
|
|
|
|
+#define RISCV_ATYPE_UQI unsigned_intQI_type_node
|
|
|
|
|
+#define RISCV_ATYPE_SI intSI_type_node
|
|
|
|
|
+#define RISCV_ATYPE_USI unsigned_intSI_type_node
|
|
|
|
|
+#define RISCV_ATYPE_DI intDI_type_node
|
|
|
|
|
+#define RISCV_ATYPE_UDI unsigned_intDI_type_node
|
|
|
|
|
+#define RISCV_ATYPE_SF float_type_node
|
|
|
|
|
+#define RISCV_ATYPE_DF double_type_node
|
|
|
|
|
+
|
|
|
|
|
+/* RISCV_FTYPE_ATYPESN takes N RISCV_FTYPES-like type codes and lists
|
|
|
|
|
+ their associated RISCV_ATYPEs. */
|
|
|
|
|
+#define RISCV_FTYPE_ATYPES1(A, B) \
|
|
|
|
|
+ RISCV_ATYPE_##A, RISCV_ATYPE_##B
|
|
|
|
|
+
|
|
|
|
|
+#define RISCV_FTYPE_ATYPES2(A, B, C) \
|
|
|
|
|
+ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C
|
|
|
|
|
+
|
|
|
|
|
+#define RISCV_FTYPE_ATYPES3(A, B, C, D) \
|
|
|
|
|
+ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D
|
|
|
|
|
+
|
|
|
|
|
+#define RISCV_FTYPE_ATYPES4(A, B, C, D, E) \
|
|
|
|
|
+ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D, \
|
|
|
|
|
+ RISCV_ATYPE_##E
|
|
|
|
|
+
|
|
|
|
|
+/* Return the function type associated with function prototype TYPE. */
|
|
|
|
|
+
|
|
|
|
|
+static tree
|
|
|
|
|
+riscv_build_function_type (enum riscv_function_type type)
|
|
|
|
|
+{
|
|
|
|
|
+ static tree types[(int) RISCV_MAX_FTYPE_MAX];
|
|
|
|
|
+
|
|
|
|
|
+ if (types[(int) type] == NULL_TREE)
|
|
|
|
|
+ switch (type)
|
|
|
|
|
+ {
|
|
|
|
|
+#define DEF_RISCV_FTYPE(NUM, ARGS) \
|
|
|
|
|
+ case RISCV_FTYPE_NAME##NUM ARGS: \
|
|
|
|
|
+ types[(int) type] \
|
|
|
|
|
+ = build_function_type_list (RISCV_FTYPE_ATYPES##NUM ARGS, \
|
|
|
|
|
+ NULL_TREE); \
|
|
|
|
|
+ break;
|
|
|
|
|
+#include "config/riscv/riscv-ftypes.def"
|
|
|
|
|
+#undef DEF_RISCV_FTYPE
|
|
|
|
|
+ default:
|
|
|
|
|
+ gcc_unreachable ();
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return types[(int) type];
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_INIT_BUILTINS. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_init_builtins (void)
|
|
|
|
|
+{
|
|
|
|
|
+ const struct riscv_builtin_description *d;
|
|
|
|
|
+ unsigned int i;
|
|
|
|
|
+
|
|
|
|
|
+ /* Iterate through all of the bdesc arrays, initializing all of the
|
|
|
|
|
+ builtin functions. */
|
|
|
|
|
+ for (i = 0; i < ARRAY_SIZE (riscv_builtins); i++)
|
|
|
|
|
+ {
|
|
|
|
|
+ d = &riscv_builtins[i];
|
|
|
|
|
+ if (d->avail ())
|
|
|
|
|
+ riscv_builtin_decls[i]
|
|
|
|
|
+ = add_builtin_function (d->name,
|
|
|
|
|
+ riscv_build_function_type (d->function_type),
|
|
|
|
|
+ i, BUILT_IN_MD, NULL, NULL);
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_BUILTIN_DECL. */
|
|
|
|
|
+
|
|
|
|
|
+static tree
|
|
|
|
|
+riscv_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
|
|
|
|
|
+{
|
|
|
|
|
+ if (code >= ARRAY_SIZE (riscv_builtins))
|
|
|
|
|
+ return error_mark_node;
|
|
|
|
|
+ return riscv_builtin_decls[code];
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Take argument ARGNO from EXP's argument list and convert it into a
|
|
|
|
|
+ form suitable for input operand OPNO of instruction ICODE. Return the
|
|
|
|
|
+ value. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_prepare_builtin_arg (enum insn_code icode,
|
|
|
|
|
+ unsigned int opno, tree exp, unsigned int argno)
|
|
|
|
|
+{
|
|
|
|
|
+ tree arg;
|
|
|
|
|
+ rtx value;
|
|
|
|
|
+ enum machine_mode mode;
|
|
|
|
|
+
|
|
|
|
|
+ arg = CALL_EXPR_ARG (exp, argno);
|
|
|
|
|
+ value = expand_normal (arg);
|
|
|
|
|
+ mode = insn_data[icode].operand[opno].mode;
|
|
|
|
|
+ if (!insn_data[icode].operand[opno].predicate (value, mode))
|
|
|
|
|
+ {
|
|
|
|
|
+ /* We need to get the mode from ARG for two reasons:
|
|
|
|
|
+
|
|
|
|
|
+ - to cope with address operands, where MODE is the mode of the
|
|
|
|
|
+ memory, rather than of VALUE itself.
|
|
|
|
|
+
|
|
|
|
|
+ - to cope with special predicates like pmode_register_operand,
|
|
|
|
|
+ where MODE is VOIDmode. */
|
|
|
|
|
+ value = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (arg)), value);
|
|
|
|
|
+
|
|
|
|
|
+ /* Check the predicate again. */
|
|
|
|
|
+ if (!insn_data[icode].operand[opno].predicate (value, mode))
|
|
|
|
|
+ {
|
|
|
|
|
+ error ("invalid argument to built-in function");
|
|
|
|
|
+ return const0_rtx;
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return value;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Return an rtx suitable for output operand OP of instruction ICODE.
|
|
|
|
|
+ If TARGET is non-null, try to use it where possible. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
|
|
|
|
|
+{
|
|
|
|
|
+ enum machine_mode mode;
|
|
|
|
|
+
|
|
|
|
|
+ mode = insn_data[icode].operand[op].mode;
|
|
|
|
|
+ if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
|
|
|
|
|
+ target = gen_reg_rtx (mode);
|
|
|
|
|
+
|
|
|
|
|
+ return target;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Expand a RISCV_BUILTIN_DIRECT or RISCV_BUILTIN_DIRECT_NO_TARGET function;
|
|
|
|
|
+ HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
|
|
|
|
|
+ and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
|
|
|
|
|
+ suggests a good place to put the result. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
|
|
|
|
|
+ bool has_target_p)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx ops[MAX_RECOG_OPERANDS];
|
|
|
|
|
+ int opno, argno;
|
|
|
|
|
+
|
|
|
|
|
+ /* Map any target to operand 0. */
|
|
|
|
|
+ opno = 0;
|
|
|
|
|
+ if (has_target_p)
|
|
|
|
|
+ {
|
|
|
|
|
+ target = riscv_prepare_builtin_target (icode, opno, target);
|
|
|
|
|
+ ops[opno] = target;
|
|
|
|
|
+ opno++;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Map the arguments to the other operands. The n_operands value
|
|
|
|
|
+ for an expander includes match_dups and match_scratches as well as
|
|
|
|
|
+ match_operands, so n_operands is only an upper bound on the number
|
|
|
|
|
+ of arguments to the expander function. */
|
|
|
|
|
+ gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
|
|
|
|
|
+ for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
|
|
|
|
|
+ ops[opno] = riscv_prepare_builtin_arg (icode, opno, exp, argno);
|
|
|
|
|
+
|
|
|
|
|
+ switch (opno)
|
|
|
|
|
+ {
|
|
|
|
|
+ case 2:
|
|
|
|
|
+ emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ case 3:
|
|
|
|
|
+ emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ case 4:
|
|
|
|
|
+ emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
|
|
|
|
|
+ break;
|
|
|
|
|
+
|
|
|
|
|
+ default:
|
|
|
|
|
+ gcc_unreachable ();
|
|
|
|
|
+ }
|
|
|
|
|
+ return target;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_EXPAND_BUILTIN. */
|
|
|
|
|
+
|
|
|
|
|
+static rtx
|
|
|
|
|
+riscv_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
|
|
|
|
|
+ enum machine_mode mode ATTRIBUTE_UNUSED,
|
|
|
|
|
+ int ignore ATTRIBUTE_UNUSED)
|
|
|
|
|
+{
|
|
|
|
|
+ tree fndecl;
|
|
|
|
|
+ unsigned int fcode, avail;
|
|
|
|
|
+ const struct riscv_builtin_description *d;
|
|
|
|
|
+
|
|
|
|
|
+ fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
|
|
|
|
|
+ fcode = DECL_FUNCTION_CODE (fndecl);
|
|
|
|
|
+ gcc_assert (fcode < ARRAY_SIZE (riscv_builtins));
|
|
|
|
|
+ d = &riscv_builtins[fcode];
|
|
|
|
|
+ avail = d->avail ();
|
|
|
|
|
+ gcc_assert (avail != 0);
|
|
|
|
|
+ switch (d->builtin_type)
|
|
|
|
|
+ {
|
|
|
|
|
+ case RISCV_BUILTIN_DIRECT:
|
|
|
|
|
+ return riscv_expand_builtin_direct (d->icode, target, exp, true);
|
|
|
|
|
+
|
|
|
|
|
+ case RISCV_BUILTIN_DIRECT_NO_TARGET:
|
|
|
|
|
+ return riscv_expand_builtin_direct (d->icode, target, exp, false);
|
|
|
|
|
+ }
|
|
|
|
|
+ gcc_unreachable ();
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
|
|
|
|
|
+ in order to avoid duplicating too much logic from elsewhere. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
|
|
|
|
|
+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
|
|
|
|
|
+ tree function)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx this_rtx, temp1, temp2, fnaddr;
|
|
|
|
|
+ rtx_insn *insn;
|
|
|
|
|
+ bool use_sibcall_p;
|
|
|
|
|
+
|
|
|
|
|
+ /* Pretend to be a post-reload pass while generating rtl. */
|
|
|
|
|
+ reload_completed = 1;
|
|
|
|
|
+
|
|
|
|
|
+ /* Mark the end of the (empty) prologue. */
|
|
|
|
|
+ emit_note (NOTE_INSN_PROLOGUE_END);
|
|
|
|
|
+
|
|
|
|
|
+ /* Determine if we can use a sibcall to call FUNCTION directly. */
|
|
|
|
|
+ fnaddr = XEXP (DECL_RTL (function), 0);
|
|
|
|
|
+ use_sibcall_p = absolute_symbolic_operand (fnaddr, Pmode);
|
|
|
|
|
+
|
|
|
|
|
+ /* We need two temporary registers in some cases. */
|
|
|
|
|
+ temp1 = gen_rtx_REG (Pmode, GP_TEMP_FIRST);
|
|
|
|
|
+ temp2 = gen_rtx_REG (Pmode, GP_TEMP_FIRST + 1);
|
|
|
|
|
+
|
|
|
|
|
+ /* Find out which register contains the "this" pointer. */
|
|
|
|
|
+ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
|
|
|
|
|
+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
|
|
|
|
|
+ else
|
|
|
|
|
+ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
|
|
|
|
|
+
|
|
|
|
|
+ /* Add DELTA to THIS_RTX. */
|
|
|
|
|
+ if (delta != 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx offset = GEN_INT (delta);
|
|
|
|
|
+ if (!SMALL_OPERAND (delta))
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_emit_move (temp1, offset);
|
|
|
|
|
+ offset = temp1;
|
|
|
|
|
+ }
|
|
|
|
|
+ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
|
|
|
|
|
+ if (vcall_offset != 0)
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx addr;
|
|
|
|
|
+
|
|
|
|
|
+ /* Set TEMP1 to *THIS_RTX. */
|
|
|
|
|
+ riscv_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
|
|
|
|
|
+
|
|
|
|
|
+ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
|
|
|
|
|
+ addr = riscv_add_offset (temp2, temp1, vcall_offset);
|
|
|
|
|
+
|
|
|
|
|
+ /* Load the offset and add it to THIS_RTX. */
|
|
|
|
|
+ riscv_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
|
|
|
|
|
+ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Jump to the target function. Use a sibcall if direct jumps are
|
|
|
|
|
+ allowed, otherwise load the address into a register first. */
|
|
|
|
|
+ if (use_sibcall_p)
|
|
|
|
|
+ {
|
|
|
|
|
+ insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
|
|
|
|
|
+ SIBLING_CALL_P (insn) = 1;
|
|
|
|
|
+ }
|
|
|
|
|
+ else
|
|
|
|
|
+ {
|
|
|
|
|
+ riscv_emit_move(temp1, fnaddr);
|
|
|
|
|
+ emit_jump_insn (gen_indirect_jump (temp1));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Run just enough of rest_of_compilation. This sequence was
|
|
|
|
|
+ "borrowed" from alpha.c. */
|
|
|
|
|
+ insn = get_insns ();
|
|
|
|
|
+ split_all_insns_noflow ();
|
|
|
|
|
+ shorten_branches (insn);
|
|
|
|
|
+ final_start_function (insn, file, 1);
|
|
|
|
|
+ final (insn, file, 1);
|
|
|
|
|
+ final_end_function ();
|
|
|
|
|
+
|
|
|
|
|
+ /* Clean up the vars set above. Note that final_end_function resets
|
|
|
|
|
+ the global pointer for us. */
|
|
|
|
|
+ reload_completed = 0;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Allocate a chunk of memory for per-function machine-dependent data. */
|
|
|
|
|
+
|
|
|
|
|
+static struct machine_function *
|
|
|
|
|
+riscv_init_machine_status (void)
|
|
|
|
|
+{
|
|
|
|
|
+ return ggc_cleared_alloc<machine_function> ();
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_OPTION_OVERRIDE. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_option_override (void)
|
|
|
|
|
+{
|
|
|
|
|
+ int regno, mode;
|
|
|
|
|
+ const struct riscv_cpu_info *cpu;
|
|
|
|
|
+
|
|
|
|
|
+#ifdef SUBTARGET_OVERRIDE_OPTIONS
|
|
|
|
|
+ SUBTARGET_OVERRIDE_OPTIONS;
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ flag_pcc_struct_return = 0;
|
|
|
|
|
+
|
|
|
|
|
+ if (flag_pic)
|
|
|
|
|
+ g_switch_value = 0;
|
|
|
|
|
+
|
|
|
|
|
+ /* Prefer a call to memcpy over inline code when optimizing for size,
|
|
|
|
|
+ though see MOVE_RATIO in riscv.h. */
|
|
|
|
|
+ if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
|
|
|
|
|
+ target_flags |= MASK_MEMCPY;
|
|
|
|
|
+
|
|
|
|
|
+ /* Handle -mtune. */
|
|
|
|
|
+ cpu = riscv_parse_cpu (riscv_tune_string ? riscv_tune_string :
|
|
|
|
|
+ RISCV_TUNE_STRING_DEFAULT);
|
|
|
|
|
+ tune_info = optimize_size ? &optimize_size_tune_info : cpu->tune_info;
|
|
|
|
|
+
|
|
|
|
|
+ /* If the user hasn't specified a branch cost, use the processor's
|
|
|
|
|
+ default. */
|
|
|
|
|
+ if (riscv_branch_cost == 0)
|
|
|
|
|
+ riscv_branch_cost = tune_info->branch_cost;
|
|
|
|
|
+
|
|
|
|
|
+ /* Set up riscv_hard_regno_mode_ok. */
|
|
|
|
|
+ for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
|
|
|
|
|
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
|
|
|
|
|
+ riscv_hard_regno_mode_ok[mode][regno]
|
|
|
|
|
+ = riscv_hard_regno_mode_ok_p (regno, (enum machine_mode) mode);
|
|
|
|
|
+
|
|
|
|
|
+ /* Function to allocate machine-dependent function status. */
|
|
|
|
|
+ init_machine_status = &riscv_init_machine_status;
|
|
|
|
|
+
|
|
|
|
|
+ if (riscv_cmodel_string)
|
|
|
|
|
+ {
|
|
|
|
|
+ if (strcmp (riscv_cmodel_string, "medlow") == 0)
|
|
|
|
|
+ riscv_cmodel = CM_MEDLOW;
|
|
|
|
|
+ else if (strcmp (riscv_cmodel_string, "medany") == 0)
|
|
|
|
|
+ riscv_cmodel = CM_MEDANY;
|
|
|
|
|
+ else
|
|
|
|
|
+ error ("unsupported code model: %s", riscv_cmodel_string);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ if (flag_pic)
|
|
|
|
|
+ riscv_cmodel = CM_PIC;
|
|
|
|
|
+
|
|
|
|
|
+ riscv_init_relocs ();
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_conditional_register_usage (void)
|
|
|
|
|
+{
|
|
|
|
|
+ int regno;
|
|
|
|
|
+
|
|
|
|
|
+ if (!TARGET_HARD_FLOAT)
|
|
|
|
|
+ {
|
|
|
|
|
+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
|
|
|
|
|
+ fixed_regs[regno] = call_used_regs[regno] = 1;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_TRAMPOLINE_INIT. */
|
|
|
|
|
+
|
|
|
|
|
+static void
|
|
|
|
|
+riscv_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
|
|
|
|
|
+{
|
|
|
|
|
+ rtx addr, end_addr, mem;
|
|
|
|
|
+ rtx trampoline[4];
|
|
|
|
|
+ unsigned int i;
|
|
|
|
|
+ HOST_WIDE_INT static_chain_offset, target_function_offset;
|
|
|
|
|
+
|
|
|
|
|
+ /* Work out the offsets of the pointers from the start of the
|
|
|
|
|
+ trampoline code. */
|
|
|
|
|
+ gcc_assert (ARRAY_SIZE (trampoline) * 4 == TRAMPOLINE_CODE_SIZE);
|
|
|
|
|
+ static_chain_offset = TRAMPOLINE_CODE_SIZE;
|
|
|
|
|
+ target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
|
|
|
|
|
+
|
|
|
|
|
+ /* Get pointers to the beginning and end of the code block. */
|
|
|
|
|
+ addr = force_reg (Pmode, XEXP (m_tramp, 0));
|
|
|
|
|
+ end_addr = riscv_force_binary (Pmode, PLUS, addr, GEN_INT (TRAMPOLINE_CODE_SIZE));
|
|
|
|
|
+
|
|
|
|
|
+#define OP(X) gen_int_mode (X, SImode)
|
|
|
|
|
+#define MATCH_LREG ((Pmode) == DImode ? MATCH_LD : MATCH_LW)
|
|
|
|
|
+
|
|
|
|
|
+ /* auipc t0, 0
|
|
|
|
|
+ l[wd] t1, target_function_offset(t0)
|
|
|
|
|
+ l[wd] $static_chain, static_chain_offset(t0)
|
|
|
|
|
+ jr t1
|
|
|
|
|
+ */
|
|
|
|
|
+
|
|
|
|
|
+ trampoline[0] = OP (RISCV_UTYPE (AUIPC, STATIC_CHAIN_REGNUM, 0));
|
|
|
|
|
+ trampoline[1] = OP (RISCV_ITYPE (LREG, RISCV_PROLOGUE_TEMP_REGNUM,
|
|
|
|
|
+ STATIC_CHAIN_REGNUM, target_function_offset));
|
|
|
|
|
+ trampoline[2] = OP (RISCV_ITYPE (LREG, STATIC_CHAIN_REGNUM,
|
|
|
|
|
+ STATIC_CHAIN_REGNUM, static_chain_offset));
|
|
|
|
|
+ trampoline[3] = OP (RISCV_ITYPE (JALR, 0, RISCV_PROLOGUE_TEMP_REGNUM, 0));
|
|
|
|
|
+
|
|
|
|
|
+#undef MATCH_LREG
|
|
|
|
|
+#undef OP
|
|
|
|
|
+
|
|
|
|
|
+ /* Copy the trampoline code. Leave any padding uninitialized. */
|
|
|
|
|
+ for (i = 0; i < ARRAY_SIZE (trampoline); i++)
|
|
|
|
|
+ {
|
|
|
|
|
+ mem = adjust_address (m_tramp, SImode, i * GET_MODE_SIZE (SImode));
|
|
|
|
|
+ riscv_emit_move (mem, trampoline[i]);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ /* Set up the static chain pointer field. */
|
|
|
|
|
+ mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
|
|
|
|
|
+ riscv_emit_move (mem, chain_value);
|
|
|
|
|
+
|
|
|
|
|
+ /* Set up the target function field. */
|
|
|
|
|
+ mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
|
|
|
|
|
+ riscv_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
|
|
|
|
|
+
|
|
|
|
|
+ /* Flush the code part of the trampoline. */
|
|
|
|
|
+ emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
|
|
|
|
|
+ emit_insn (gen_clear_cache (addr, end_addr));
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
|
|
|
|
|
+{
|
|
|
|
|
+ if (TARGET_SAVE_RESTORE)
|
|
|
|
|
+ {
|
|
|
|
|
+ /* When optimzing for size, don't use sibcalls in non-leaf routines */
|
|
|
|
|
+ if (cfun->machine->is_leaf == 0)
|
|
|
|
|
+ cfun->machine->is_leaf = leaf_function_p () ? 1 : -1;
|
|
|
|
|
+
|
|
|
|
|
+ return cfun->machine->is_leaf > 0;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return true;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+static bool
|
|
|
|
|
+riscv_lra_p (void)
|
|
|
|
|
+{
|
|
|
|
|
+ return riscv_lra_flag;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Initialize the GCC target structure. */
|
|
|
|
|
+#undef TARGET_ASM_ALIGNED_HI_OP
|
|
|
|
|
+#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
|
|
|
|
|
+#undef TARGET_ASM_ALIGNED_SI_OP
|
|
|
|
|
+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
|
|
|
|
|
+#undef TARGET_ASM_ALIGNED_DI_OP
|
|
|
|
|
+#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_OPTION_OVERRIDE
|
|
|
|
|
+#define TARGET_OPTION_OVERRIDE riscv_option_override
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_LEGITIMIZE_ADDRESS
|
|
|
|
|
+#define TARGET_LEGITIMIZE_ADDRESS riscv_legitimize_address
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_SCHED_ADJUST_COST
|
|
|
|
|
+#define TARGET_SCHED_ADJUST_COST riscv_adjust_cost
|
|
|
|
|
+#undef TARGET_SCHED_ISSUE_RATE
|
|
|
|
|
+#define TARGET_SCHED_ISSUE_RATE riscv_issue_rate
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
|
|
|
|
|
+#define TARGET_FUNCTION_OK_FOR_SIBCALL riscv_function_ok_for_sibcall
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_REGISTER_MOVE_COST
|
|
|
|
|
+#define TARGET_REGISTER_MOVE_COST riscv_register_move_cost
|
|
|
|
|
+#undef TARGET_MEMORY_MOVE_COST
|
|
|
|
|
+#define TARGET_MEMORY_MOVE_COST riscv_memory_move_cost
|
|
|
|
|
+#undef TARGET_RTX_COSTS
|
|
|
|
|
+#define TARGET_RTX_COSTS riscv_rtx_costs
|
|
|
|
|
+#undef TARGET_ADDRESS_COST
|
|
|
|
|
+#define TARGET_ADDRESS_COST riscv_address_cost
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_PREFERRED_RELOAD_CLASS
|
|
|
|
|
+#define TARGET_PREFERRED_RELOAD_CLASS riscv_preferred_reload_class
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
|
|
|
|
|
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_EXPAND_BUILTIN_VA_START
|
|
|
|
|
+#define TARGET_EXPAND_BUILTIN_VA_START riscv_va_start
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_PROMOTE_FUNCTION_MODE
|
|
|
|
|
+#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_RETURN_IN_MEMORY
|
|
|
|
|
+#define TARGET_RETURN_IN_MEMORY riscv_return_in_memory
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_ASM_OUTPUT_MI_THUNK
|
|
|
|
|
+#define TARGET_ASM_OUTPUT_MI_THUNK riscv_output_mi_thunk
|
|
|
|
|
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
|
|
|
|
|
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_PRINT_OPERAND
|
|
|
|
|
+#define TARGET_PRINT_OPERAND riscv_print_operand
|
|
|
|
|
+#undef TARGET_PRINT_OPERAND_ADDRESS
|
|
|
|
|
+#define TARGET_PRINT_OPERAND_ADDRESS riscv_print_operand_address
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_SETUP_INCOMING_VARARGS
|
|
|
|
|
+#define TARGET_SETUP_INCOMING_VARARGS riscv_setup_incoming_varargs
|
|
|
|
|
+#undef TARGET_STRICT_ARGUMENT_NAMING
|
|
|
|
|
+#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
|
|
|
|
|
+#undef TARGET_MUST_PASS_IN_STACK
|
|
|
|
|
+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
|
|
|
|
|
+#undef TARGET_PASS_BY_REFERENCE
|
|
|
|
|
+#define TARGET_PASS_BY_REFERENCE riscv_pass_by_reference
|
|
|
|
|
+#undef TARGET_ARG_PARTIAL_BYTES
|
|
|
|
|
+#define TARGET_ARG_PARTIAL_BYTES riscv_arg_partial_bytes
|
|
|
|
|
+#undef TARGET_FUNCTION_ARG
|
|
|
|
|
+#define TARGET_FUNCTION_ARG riscv_function_arg
|
|
|
|
|
+#undef TARGET_FUNCTION_ARG_ADVANCE
|
|
|
|
|
+#define TARGET_FUNCTION_ARG_ADVANCE riscv_function_arg_advance
|
|
|
|
|
+#undef TARGET_FUNCTION_ARG_BOUNDARY
|
|
|
|
|
+#define TARGET_FUNCTION_ARG_BOUNDARY riscv_function_arg_boundary
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_MODE_REP_EXTENDED
|
|
|
|
|
+#define TARGET_MODE_REP_EXTENDED riscv_mode_rep_extended
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
|
|
|
|
|
+#define TARGET_SCALAR_MODE_SUPPORTED_P riscv_scalar_mode_supported_p
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_INIT_BUILTINS
|
|
|
|
|
+#define TARGET_INIT_BUILTINS riscv_init_builtins
|
|
|
|
|
+#undef TARGET_BUILTIN_DECL
|
|
|
|
|
+#define TARGET_BUILTIN_DECL riscv_builtin_decl
|
|
|
|
|
+#undef TARGET_EXPAND_BUILTIN
|
|
|
|
|
+#define TARGET_EXPAND_BUILTIN riscv_expand_builtin
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_HAVE_TLS
|
|
|
|
|
+#define TARGET_HAVE_TLS HAVE_AS_TLS
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_CANNOT_FORCE_CONST_MEM
|
|
|
|
|
+#define TARGET_CANNOT_FORCE_CONST_MEM riscv_cannot_force_const_mem
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_LEGITIMATE_CONSTANT_P
|
|
|
|
|
+#define TARGET_LEGITIMATE_CONSTANT_P riscv_legitimate_constant_p
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
|
|
|
|
|
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
|
|
|
|
|
+
|
|
|
|
|
+#ifdef HAVE_AS_DTPRELWORD
|
|
|
|
|
+#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
|
|
|
|
|
+#define TARGET_ASM_OUTPUT_DWARF_DTPREL riscv_output_dwarf_dtprel
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_LEGITIMATE_ADDRESS_P
|
|
|
|
|
+#define TARGET_LEGITIMATE_ADDRESS_P riscv_legitimate_address_p
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_CAN_ELIMINATE
|
|
|
|
|
+#define TARGET_CAN_ELIMINATE riscv_can_eliminate
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
|
|
|
|
|
+#define TARGET_CONDITIONAL_REGISTER_USAGE riscv_conditional_register_usage
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_TRAMPOLINE_INIT
|
|
|
|
|
+#define TARGET_TRAMPOLINE_INIT riscv_trampoline_init
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_IN_SMALL_DATA_P
|
|
|
|
|
+#define TARGET_IN_SMALL_DATA_P riscv_in_small_data_p
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_ASM_SELECT_RTX_SECTION
|
|
|
|
|
+#define TARGET_ASM_SELECT_RTX_SECTION riscv_elf_select_rtx_section
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_MIN_ANCHOR_OFFSET
|
|
|
|
|
+#define TARGET_MIN_ANCHOR_OFFSET (-RISCV_IMM_REACH/2)
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_MAX_ANCHOR_OFFSET
|
|
|
|
|
+#define TARGET_MAX_ANCHOR_OFFSET (RISCV_IMM_REACH/2-1)
|
|
|
|
|
+
|
|
|
|
|
+#undef TARGET_LRA_P
|
|
|
|
|
+#define TARGET_LRA_P riscv_lra_p
|
|
|
|
|
+
|
|
|
|
|
+struct gcc_target targetm = TARGET_INITIALIZER;
|
|
|
|
|
+
|
|
|
|
|
+#include "gt-riscv.h"
|
|
|
|
|
diff -urN empty/gcc/config/riscv/riscv.h gcc-5.2.0/gcc/config/riscv/riscv.h
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/riscv.h 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/riscv.h 2015-07-17 22:36:52.319705931 +0200
|
|
|
|
|
@@ -0,0 +1,1109 @@
|
|
|
|
|
+/* Definition of RISC-V target for GNU compiler.
|
|
|
|
|
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
|
|
|
|
|
+ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
|
|
|
|
|
+ Based on MIPS target for GNU compiler.
|
|
|
|
|
+
|
|
|
|
|
+This file is part of GCC.
|
|
|
|
|
+
|
|
|
|
|
+GCC is free software; you can redistribute it and/or modify
|
|
|
|
|
+it under the terms of the GNU General Public License as published by
|
|
|
|
|
+the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
|
+any later version.
|
|
|
|
|
+
|
|
|
|
|
+GCC is distributed in the hope that it will be useful,
|
|
|
|
|
+but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
+GNU General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+You should have received a copy of the GNU General Public License
|
|
|
|
|
+along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+<http://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+/* TARGET_HARD_FLOAT and TARGET_SOFT_FLOAT reflect whether the FPU is
|
|
|
|
|
+ directly accessible, while the command-line options select
|
|
|
|
|
+ TARGET_HARD_FLOAT_ABI and TARGET_SOFT_FLOAT_ABI to reflect the ABI
|
|
|
|
|
+ in use. */
|
|
|
|
|
+#define TARGET_HARD_FLOAT TARGET_HARD_FLOAT_ABI
|
|
|
|
|
+#define TARGET_SOFT_FLOAT TARGET_SOFT_FLOAT_ABI
|
|
|
|
|
+
|
|
|
|
|
+/* Target CPU builtins. */
|
|
|
|
|
+#define TARGET_CPU_CPP_BUILTINS() \
|
|
|
|
|
+ do \
|
|
|
|
|
+ { \
|
|
|
|
|
+ builtin_assert ("machine=riscv"); \
|
|
|
|
|
+ \
|
|
|
|
|
+ builtin_assert ("cpu=riscv"); \
|
|
|
|
|
+ builtin_define ("__riscv__"); \
|
|
|
|
|
+ builtin_define ("__riscv"); \
|
|
|
|
|
+ builtin_define ("_riscv"); \
|
|
|
|
|
+ \
|
|
|
|
|
+ if (TARGET_64BIT) \
|
|
|
|
|
+ { \
|
|
|
|
|
+ builtin_define ("__riscv64"); \
|
|
|
|
|
+ builtin_define ("_RISCV_SIM=_ABI64"); \
|
|
|
|
|
+ } \
|
|
|
|
|
+ else \
|
|
|
|
|
+ builtin_define ("_RISCV_SIM=_ABI32"); \
|
|
|
|
|
+ \
|
|
|
|
|
+ builtin_define ("_ABI32=1"); \
|
|
|
|
|
+ builtin_define ("_ABI64=3"); \
|
|
|
|
|
+ \
|
|
|
|
|
+ \
|
|
|
|
|
+ builtin_define_with_int_value ("_RISCV_SZINT", INT_TYPE_SIZE); \
|
|
|
|
|
+ builtin_define_with_int_value ("_RISCV_SZLONG", LONG_TYPE_SIZE); \
|
|
|
|
|
+ builtin_define_with_int_value ("_RISCV_SZPTR", POINTER_SIZE); \
|
|
|
|
|
+ builtin_define_with_int_value ("_RISCV_FPSET", 32); \
|
|
|
|
|
+ \
|
|
|
|
|
+ if (TARGET_ATOMIC) { \
|
|
|
|
|
+ builtin_define ("__riscv_atomic"); \
|
|
|
|
|
+ } \
|
|
|
|
|
+ \
|
|
|
|
|
+ /* These defines reflect the ABI in use, not whether the \
|
|
|
|
|
+ FPU is directly accessible. */ \
|
|
|
|
|
+ if (TARGET_HARD_FLOAT_ABI) { \
|
|
|
|
|
+ builtin_define ("__riscv_hard_float"); \
|
|
|
|
|
+ if (TARGET_FDIV) { \
|
|
|
|
|
+ builtin_define ("__riscv_fdiv"); \
|
|
|
|
|
+ builtin_define ("__riscv_fsqrt"); \
|
|
|
|
|
+ } \
|
|
|
|
|
+ } else \
|
|
|
|
|
+ builtin_define ("__riscv_soft_float"); \
|
|
|
|
|
+ \
|
|
|
|
|
+ /* The base RISC-V ISA is always little-endian. */ \
|
|
|
|
|
+ builtin_define_std ("RISCVEL"); \
|
|
|
|
|
+ builtin_define ("_RISCVEL"); \
|
|
|
|
|
+ \
|
|
|
|
|
+ /* Macros dependent on the C dialect. */ \
|
|
|
|
|
+ if (preprocessing_asm_p ()) \
|
|
|
|
|
+ { \
|
|
|
|
|
+ builtin_define_std ("LANGUAGE_ASSEMBLY"); \
|
|
|
|
|
+ builtin_define ("_LANGUAGE_ASSEMBLY"); \
|
|
|
|
|
+ } \
|
|
|
|
|
+ else if (c_dialect_cxx ()) \
|
|
|
|
|
+ { \
|
|
|
|
|
+ builtin_define ("_LANGUAGE_C_PLUS_PLUS"); \
|
|
|
|
|
+ builtin_define ("__LANGUAGE_C_PLUS_PLUS"); \
|
|
|
|
|
+ builtin_define ("__LANGUAGE_C_PLUS_PLUS__"); \
|
|
|
|
|
+ } \
|
|
|
|
|
+ else \
|
|
|
|
|
+ { \
|
|
|
|
|
+ builtin_define_std ("LANGUAGE_C"); \
|
|
|
|
|
+ builtin_define ("_LANGUAGE_C"); \
|
|
|
|
|
+ } \
|
|
|
|
|
+ if (c_dialect_objc ()) \
|
|
|
|
|
+ { \
|
|
|
|
|
+ builtin_define ("_LANGUAGE_OBJECTIVE_C"); \
|
|
|
|
|
+ builtin_define ("__LANGUAGE_OBJECTIVE_C"); \
|
|
|
|
|
+ /* Bizarre, but needed at least for Irix. */ \
|
|
|
|
|
+ builtin_define_std ("LANGUAGE_C"); \
|
|
|
|
|
+ builtin_define ("_LANGUAGE_C"); \
|
|
|
|
|
+ } \
|
|
|
|
|
+ if (riscv_cmodel == CM_MEDANY) \
|
|
|
|
|
+ builtin_define ("_RISCV_CMODEL_MEDANY"); \
|
|
|
|
|
+ } \
|
|
|
|
|
+ while (0)
|
|
|
|
|
+
|
|
|
|
|
+/* Default target_flags if no switches are specified */
|
|
|
|
|
+
|
|
|
|
|
+#ifndef TARGET_DEFAULT
|
|
|
|
|
+#define TARGET_DEFAULT 0
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#ifndef RISCV_ARCH_STRING_DEFAULT
|
|
|
|
|
+#define RISCV_ARCH_STRING_DEFAULT "IMAFD"
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#ifndef RISCV_TUNE_STRING_DEFAULT
|
|
|
|
|
+#define RISCV_TUNE_STRING_DEFAULT "rocket"
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#ifndef TARGET_64BIT_DEFAULT
|
|
|
|
|
+#define TARGET_64BIT_DEFAULT 1
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#if TARGET_64BIT_DEFAULT
|
|
|
|
|
+# define MULTILIB_ARCH_DEFAULT "m64"
|
|
|
|
|
+# define OPT_ARCH64 "!m32"
|
|
|
|
|
+# define OPT_ARCH32 "m32"
|
|
|
|
|
+#else
|
|
|
|
|
+# define MULTILIB_ARCH_DEFAULT "m32"
|
|
|
|
|
+# define OPT_ARCH64 "m64"
|
|
|
|
|
+# define OPT_ARCH32 "!m64"
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#ifndef MULTILIB_DEFAULTS
|
|
|
|
|
+#define MULTILIB_DEFAULTS \
|
|
|
|
|
+ { MULTILIB_ARCH_DEFAULT }
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Support for a compile-time default CPU, et cetera. The rules are:
|
|
|
|
|
+ --with-arch is ignored if -march is specified.
|
|
|
|
|
+ --with-tune is ignored if -mtune is specified.
|
|
|
|
|
+ --with-float is ignored if -mhard-float or -msoft-float are specified. */
|
|
|
|
|
+#define OPTION_DEFAULT_SPECS \
|
|
|
|
|
+ {"arch", "%{!march=*:-march=%(VALUE)}"}, \
|
|
|
|
|
+ {"arch_32", "%{" OPT_ARCH32 ":%{m32}}" }, \
|
|
|
|
|
+ {"arch_64", "%{" OPT_ARCH64 ":%{m64}}" }, \
|
|
|
|
|
+ {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
|
|
|
|
|
+ {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \
|
|
|
|
|
+
|
|
|
|
|
+#define DRIVER_SELF_SPECS ""
|
|
|
|
|
+
|
|
|
|
|
+#ifdef IN_LIBGCC2
|
|
|
|
|
+#undef TARGET_64BIT
|
|
|
|
|
+/* Make this compile time constant for libgcc2 */
|
|
|
|
|
+#ifdef __riscv64
|
|
|
|
|
+#define TARGET_64BIT 1
|
|
|
|
|
+#else
|
|
|
|
|
+#define TARGET_64BIT 0
|
|
|
|
|
+#endif
|
|
|
|
|
+#endif /* IN_LIBGCC2 */
|
|
|
|
|
+
|
|
|
|
|
+/* Tell collect what flags to pass to nm. */
|
|
|
|
|
+#ifndef NM_FLAGS
|
|
|
|
|
+#define NM_FLAGS "-Bn"
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#undef ASM_SPEC
|
|
|
|
|
+#define ASM_SPEC "\
|
|
|
|
|
+%(subtarget_asm_debugging_spec) \
|
|
|
|
|
+%{m32} %{m64} %{!m32:%{!m64: %(asm_abi_default_spec)}} \
|
|
|
|
|
+%{mrvc} \
|
|
|
|
|
+%{fPIC|fpic|fPIE|fpie:-fpic} \
|
|
|
|
|
+%{march=*} \
|
|
|
|
|
+%(subtarget_asm_spec)"
|
|
|
|
|
+
|
|
|
|
|
+/* Extra switches sometimes passed to the linker. */
|
|
|
|
|
+
|
|
|
|
|
+#ifndef LINK_SPEC
|
|
|
|
|
+#define LINK_SPEC "\
|
|
|
|
|
+%{!T:-dT riscv.ld} \
|
|
|
|
|
+%{m64:-melf64lriscv} \
|
|
|
|
|
+%{m32:-melf32lriscv} \
|
|
|
|
|
+%{shared}"
|
|
|
|
|
+#endif /* LINK_SPEC defined */
|
|
|
|
|
+
|
|
|
|
|
+/* This macro defines names of additional specifications to put in the specs
|
|
|
|
|
+ that can be used in various specifications like CC1_SPEC. Its definition
|
|
|
|
|
+ is an initializer with a subgrouping for each command option.
|
|
|
|
|
+
|
|
|
|
|
+ Each subgrouping contains a string constant, that defines the
|
|
|
|
|
+ specification name, and a string constant that used by the GCC driver
|
|
|
|
|
+ program.
|
|
|
|
|
+
|
|
|
|
|
+ Do not define this macro if it does not need to do anything. */
|
|
|
|
|
+
|
|
|
|
|
+#define EXTRA_SPECS \
|
|
|
|
|
+ { "asm_abi_default_spec", "-" MULTILIB_ARCH_DEFAULT }, \
|
|
|
|
|
+ SUBTARGET_EXTRA_SPECS
|
|
|
|
|
+
|
|
|
|
|
+#ifndef SUBTARGET_EXTRA_SPECS
|
|
|
|
|
+#define SUBTARGET_EXTRA_SPECS
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#define TARGET_DEFAULT_CMODEL CM_MEDLOW
|
|
|
|
|
+
|
|
|
|
|
+/* By default, turn on GDB extensions. */
|
|
|
|
|
+#define DEFAULT_GDB_EXTENSIONS 1
|
|
|
|
|
+
|
|
|
|
|
+#define LOCAL_LABEL_PREFIX "."
|
|
|
|
|
+#define USER_LABEL_PREFIX ""
|
|
|
|
|
+
|
|
|
|
|
+#define DWARF2_DEBUGGING_INFO 1
|
|
|
|
|
+#define DWARF2_ASM_LINE_DEBUG_INFO 0
|
|
|
|
|
+
|
|
|
|
|
+/* The mapping from gcc register number to DWARF 2 CFA column number. */
|
|
|
|
|
+#define DWARF_FRAME_REGNUM(REGNO) \
|
|
|
|
|
+ (GP_REG_P (REGNO) || FP_REG_P (REGNO) ? REGNO : INVALID_REGNUM)
|
|
|
|
|
+
|
|
|
|
|
+/* The DWARF 2 CFA column which tracks the return address. */
|
|
|
|
|
+#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM
|
|
|
|
|
+
|
|
|
|
|
+/* Don't emit .cfi_sections, as it does not work */
|
|
|
|
|
+#undef HAVE_GAS_CFI_SECTIONS_DIRECTIVE
|
|
|
|
|
+#define HAVE_GAS_CFI_SECTIONS_DIRECTIVE 0
|
|
|
|
|
+
|
|
|
|
|
+/* Before the prologue, RA lives in r31. */
|
|
|
|
|
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (VOIDmode, RETURN_ADDR_REGNUM)
|
|
|
|
|
+
|
|
|
|
|
+/* Describe how we implement __builtin_eh_return. */
|
|
|
|
|
+#define EH_RETURN_DATA_REGNO(N) \
|
|
|
|
|
+ ((N) < 4 ? (N) + GP_ARG_FIRST : INVALID_REGNUM)
|
|
|
|
|
+
|
|
|
|
|
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, GP_ARG_FIRST + 4)
|
|
|
|
|
+
|
|
|
|
|
+/* Target machine storage layout */
|
|
|
|
|
+
|
|
|
|
|
+#define BITS_BIG_ENDIAN 0
|
|
|
|
|
+#define BYTES_BIG_ENDIAN 0
|
|
|
|
|
+#define WORDS_BIG_ENDIAN 0
|
|
|
|
|
+
|
|
|
|
|
+#define MAX_BITS_PER_WORD 64
|
|
|
|
|
+
|
|
|
|
|
+/* Width of a word, in units (bytes). */
|
|
|
|
|
+#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
|
|
|
|
|
+#ifndef IN_LIBGCC2
|
|
|
|
|
+#define MIN_UNITS_PER_WORD 4
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* We currently require both or neither of the `F' and `D' extensions. */
|
|
|
|
|
+#define UNITS_PER_FPREG 8
|
|
|
|
|
+
|
|
|
|
|
+/* If FP regs aren't wide enough for a given FP argument, it is passed in
|
|
|
|
|
+ integer registers. */
|
|
|
|
|
+#define MIN_FPRS_PER_FMT 1
|
|
|
|
|
+
|
|
|
|
|
+/* The largest size of value that can be held in floating-point
|
|
|
|
|
+ registers and moved with a single instruction. */
|
|
|
|
|
+#define UNITS_PER_HWFPVALUE \
|
|
|
|
|
+ (TARGET_SOFT_FLOAT_ABI ? 0 : UNITS_PER_FPREG)
|
|
|
|
|
+
|
|
|
|
|
+/* The largest size of value that can be held in floating-point
|
|
|
|
|
+ registers. */
|
|
|
|
|
+#define UNITS_PER_FPVALUE \
|
|
|
|
|
+ (TARGET_SOFT_FLOAT_ABI ? 0 \
|
|
|
|
|
+ : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT)
|
|
|
|
|
+
|
|
|
|
|
+/* The number of bytes in a double. */
|
|
|
|
|
+#define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT)
|
|
|
|
|
+
|
|
|
|
|
+/* Set the sizes of the core types. */
|
|
|
|
|
+#define SHORT_TYPE_SIZE 16
|
|
|
|
|
+#define INT_TYPE_SIZE 32
|
|
|
|
|
+#define LONG_TYPE_SIZE (TARGET_64BIT ? 64 : 32)
|
|
|
|
|
+#define LONG_LONG_TYPE_SIZE 64
|
|
|
|
|
+
|
|
|
|
|
+#define FLOAT_TYPE_SIZE 32
|
|
|
|
|
+#define DOUBLE_TYPE_SIZE 64
|
|
|
|
|
+/* XXX The ABI says long doubles are IEEE-754-2008 float128s. */
|
|
|
|
|
+#define LONG_DOUBLE_TYPE_SIZE 64
|
|
|
|
|
+
|
|
|
|
|
+#ifdef IN_LIBGCC2
|
|
|
|
|
+# define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
|
|
|
|
|
+#define PARM_BOUNDARY BITS_PER_WORD
|
|
|
|
|
+
|
|
|
|
|
+/* Allocation boundary (in *bits*) for the code of a function. */
|
|
|
|
|
+#define FUNCTION_BOUNDARY (TARGET_RVC ? 16 : 32)
|
|
|
|
|
+
|
|
|
|
|
+/* There is no point aligning anything to a rounder boundary than this. */
|
|
|
|
|
+#define BIGGEST_ALIGNMENT 128
|
|
|
|
|
+
|
|
|
|
|
+/* All accesses must be aligned. */
|
|
|
|
|
+#define STRICT_ALIGNMENT 1
|
|
|
|
|
+
|
|
|
|
|
+/* Define this if you wish to imitate the way many other C compilers
|
|
|
|
|
+ handle alignment of bitfields and the structures that contain
|
|
|
|
|
+ them.
|
|
|
|
|
+
|
|
|
|
|
+ The behavior is that the type written for a bit-field (`int',
|
|
|
|
|
+ `short', or other integer type) imposes an alignment for the
|
|
|
|
|
+ entire structure, as if the structure really did contain an
|
|
|
|
|
+ ordinary field of that type. In addition, the bit-field is placed
|
|
|
|
|
+ within the structure so that it would fit within such a field,
|
|
|
|
|
+ not crossing a boundary for it.
|
|
|
|
|
+
|
|
|
|
|
+ Thus, on most machines, a bit-field whose type is written as `int'
|
|
|
|
|
+ would not cross a four-byte boundary, and would force four-byte
|
|
|
|
|
+ alignment for the whole structure. (The alignment used may not
|
|
|
|
|
+ be four bytes; it is controlled by the other alignment
|
|
|
|
|
+ parameters.)
|
|
|
|
|
+
|
|
|
|
|
+ If the macro is defined, its definition should be a C expression;
|
|
|
|
|
+ a nonzero value for the expression enables this behavior. */
|
|
|
|
|
+
|
|
|
|
|
+#define PCC_BITFIELD_TYPE_MATTERS 1
|
|
|
|
|
+
|
|
|
|
|
+/* If defined, a C expression to compute the alignment given to a
|
|
|
|
|
+ constant that is being placed in memory. CONSTANT is the constant
|
|
|
|
|
+ and ALIGN is the alignment that the object would ordinarily have.
|
|
|
|
|
+ The value of this macro is used instead of that alignment to align
|
|
|
|
|
+ the object.
|
|
|
|
|
+
|
|
|
|
|
+ If this macro is not defined, then ALIGN is used.
|
|
|
|
|
+
|
|
|
|
|
+ The typical use of this macro is to increase alignment for string
|
|
|
|
|
+ constants to be word aligned so that `strcpy' calls that copy
|
|
|
|
|
+ constants can be done inline. */
|
|
|
|
|
+
|
|
|
|
|
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
|
|
|
|
|
+ ((TREE_CODE (EXP) == STRING_CST || TREE_CODE (EXP) == CONSTRUCTOR) \
|
|
|
|
|
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
|
|
|
|
|
+
|
|
|
|
|
+/* If defined, a C expression to compute the alignment for a static
|
|
|
|
|
+ variable. TYPE is the data type, and ALIGN is the alignment that
|
|
|
|
|
+ the object would ordinarily have. The value of this macro is used
|
|
|
|
|
+ instead of that alignment to align the object.
|
|
|
|
|
+
|
|
|
|
|
+ If this macro is not defined, then ALIGN is used.
|
|
|
|
|
+
|
|
|
|
|
+ One use of this macro is to increase alignment of medium-size
|
|
|
|
|
+ data to make it all fit in fewer cache lines. Another is to
|
|
|
|
|
+ cause character arrays to be word-aligned so that `strcpy' calls
|
|
|
|
|
+ that copy constants to character arrays can be done inline. */
|
|
|
|
|
+
|
|
|
|
|
+#undef DATA_ALIGNMENT
|
|
|
|
|
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
|
|
|
|
|
+ ((((ALIGN) < BITS_PER_WORD) \
|
|
|
|
|
+ && (TREE_CODE (TYPE) == ARRAY_TYPE \
|
|
|
|
|
+ || TREE_CODE (TYPE) == UNION_TYPE \
|
|
|
|
|
+ || TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
|
|
|
|
|
+
|
|
|
|
|
+/* We need this for the same reason as DATA_ALIGNMENT, namely to cause
|
|
|
|
|
+ character arrays to be word-aligned so that `strcpy' calls that copy
|
|
|
|
|
+ constants to character arrays can be done inline, and 'strcmp' can be
|
|
|
|
|
+ optimised to use word loads. */
|
|
|
|
|
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
|
|
|
|
|
+ DATA_ALIGNMENT (TYPE, ALIGN)
|
|
|
|
|
+
|
|
|
|
|
+/* Define if operations between registers always perform the operation
|
|
|
|
|
+ on the full register even if a narrower mode is specified. */
|
|
|
|
|
+#define WORD_REGISTER_OPERATIONS
|
|
|
|
|
+
|
|
|
|
|
+/* When in 64-bit mode, move insns will sign extend SImode and CCmode
|
|
|
|
|
+ moves. All other references are zero extended. */
|
|
|
|
|
+#define LOAD_EXTEND_OP(MODE) \
|
|
|
|
|
+ (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \
|
|
|
|
|
+ ? SIGN_EXTEND : ZERO_EXTEND)
|
|
|
|
|
+
|
|
|
|
|
+/* Define this macro if it is advisable to hold scalars in registers
|
|
|
|
|
+ in a wider mode than that declared by the program. In such cases,
|
|
|
|
|
+ the value is constrained to be within the bounds of the declared
|
|
|
|
|
+ type, but kept valid in the wider mode. The signedness of the
|
|
|
|
|
+ extension may differ from that of the type. */
|
|
|
|
|
+
|
|
|
|
|
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
|
|
|
|
|
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
|
|
|
|
|
+ && GET_MODE_SIZE (MODE) < 4) \
|
|
|
|
|
+ { \
|
|
|
|
|
+ (MODE) = Pmode; \
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+/* Pmode is always the same as ptr_mode, but not always the same as word_mode.
|
|
|
|
|
+ Extensions of pointers to word_mode must be signed. */
|
|
|
|
|
+#define POINTERS_EXTEND_UNSIGNED false
|
|
|
|
|
+
|
|
|
|
|
+/* RV32 double-precision FP <-> integer moves go through memory */
|
|
|
|
|
+#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \
|
|
|
|
|
+ (!TARGET_64BIT && GET_MODE_SIZE (MODE) == 8 && \
|
|
|
|
|
+ (((CLASS1) == FP_REGS && (CLASS2) != FP_REGS) \
|
|
|
|
|
+ || ((CLASS2) == FP_REGS && (CLASS1) != FP_REGS)))
|
|
|
|
|
+
|
|
|
|
|
+/* Define if loading short immediate values into registers sign extends. */
|
|
|
|
|
+#define SHORT_IMMEDIATES_SIGN_EXTEND
|
|
|
|
|
+
|
|
|
|
|
+/* Standard register usage. */
|
|
|
|
|
+
|
|
|
|
|
+/* Number of hardware registers. We have:
|
|
|
|
|
+
|
|
|
|
|
+ - 32 integer registers
|
|
|
|
|
+ - 32 floating point registers
|
|
|
|
|
+ - 32 vector integer registers
|
|
|
|
|
+ - 32 vector floating point registers
|
|
|
|
|
+ - 2 fake registers:
|
|
|
|
|
+ - ARG_POINTER_REGNUM
|
|
|
|
|
+ - FRAME_POINTER_REGNUM */
|
|
|
|
|
+
|
|
|
|
|
+#define FIRST_PSEUDO_REGISTER 66
|
|
|
|
|
+
|
|
|
|
|
+/* x0, sp, gp, and tp are fixed. */
|
|
|
|
|
+
|
|
|
|
|
+#define FIXED_REGISTERS \
|
|
|
|
|
+{ /* General registers. */ \
|
|
|
|
|
+ 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
|
|
|
|
|
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
|
|
|
|
|
+ /* Floating-point registers. */ \
|
|
|
|
|
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
|
|
|
|
|
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
|
|
|
|
|
+ /* Others. */ \
|
|
|
|
|
+ 1, 1 \
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* a0-a7, t0-a6, fa0-fa7, and ft0-ft11 are volatile across calls.
|
|
|
|
|
+ The call RTLs themselves clobber ra. */
|
|
|
|
|
+
|
|
|
|
|
+#define CALL_USED_REGISTERS \
|
|
|
|
|
+{ /* General registers. */ \
|
|
|
|
|
+ 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
|
|
|
|
|
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
|
|
|
|
|
+ /* Floating-point registers. */ \
|
|
|
|
|
+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
|
|
|
|
|
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
|
|
|
|
|
+ /* Others. */ \
|
|
|
|
|
+ 1, 1 \
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+#define CALL_REALLY_USED_REGISTERS \
|
|
|
|
|
+{ /* General registers. */ \
|
|
|
|
|
+ 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
|
|
|
|
|
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
|
|
|
|
|
+ /* Floating-point registers. */ \
|
|
|
|
|
+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
|
|
|
|
|
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
|
|
|
|
|
+ /* Others. */ \
|
|
|
|
|
+ 1, 1 \
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Internal macros to classify an ISA register's type. */
|
|
|
|
|
+
|
|
|
|
|
+#define GP_REG_FIRST 0
|
|
|
|
|
+#define GP_REG_LAST 31
|
|
|
|
|
+#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1)
|
|
|
|
|
+
|
|
|
|
|
+#define FP_REG_FIRST 32
|
|
|
|
|
+#define FP_REG_LAST 63
|
|
|
|
|
+#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1)
|
|
|
|
|
+
|
|
|
|
|
+/* The DWARF 2 CFA column which tracks the return address from a
|
|
|
|
|
+ signal handler context. This means that to maintain backwards
|
|
|
|
|
+ compatibility, no hard register can be assigned this column if it
|
|
|
|
|
+ would need to be handled by the DWARF unwinder. */
|
|
|
|
|
+#define DWARF_ALT_FRAME_RETURN_COLUMN 64
|
|
|
|
|
+
|
|
|
|
|
+#define GP_REG_P(REGNO) \
|
|
|
|
|
+ ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM)
|
|
|
|
|
+#define FP_REG_P(REGNO) \
|
|
|
|
|
+ ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM)
|
|
|
|
|
+
|
|
|
|
|
+#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
|
|
|
|
|
+
|
|
|
|
|
+/* Return coprocessor number from register number. */
|
|
|
|
|
+
|
|
|
|
|
+#define COPNUM_AS_CHAR_FROM_REGNUM(REGNO) \
|
|
|
|
|
+ (COP0_REG_P (REGNO) ? '0' : COP2_REG_P (REGNO) ? '2' \
|
|
|
|
|
+ : COP3_REG_P (REGNO) ? '3' : '?')
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+#define HARD_REGNO_NREGS(REGNO, MODE) riscv_hard_regno_nregs (REGNO, MODE)
|
|
|
|
|
+
|
|
|
|
|
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
|
|
|
|
|
+ riscv_hard_regno_mode_ok[ (int)(MODE) ][ (REGNO) ]
|
|
|
|
|
+
|
|
|
|
|
+#define MODES_TIEABLE_P(MODE1, MODE2) \
|
|
|
|
|
+ ((MODE1) == (MODE2) || (GET_MODE_CLASS (MODE1) == MODE_INT \
|
|
|
|
|
+ && GET_MODE_CLASS (MODE2) == MODE_INT))
|
|
|
|
|
+
|
|
|
|
|
+/* Use s0 as the frame pointer if it is so requested. */
|
|
|
|
|
+#define HARD_FRAME_POINTER_REGNUM 8
|
|
|
|
|
+#define STACK_POINTER_REGNUM 2
|
|
|
|
|
+#define THREAD_POINTER_REGNUM 4
|
|
|
|
|
+
|
|
|
|
|
+/* These two registers don't really exist: they get eliminated to either
|
|
|
|
|
+ the stack or hard frame pointer. */
|
|
|
|
|
+#define ARG_POINTER_REGNUM 64
|
|
|
|
|
+#define FRAME_POINTER_REGNUM 65
|
|
|
|
|
+
|
|
|
|
|
+#define HARD_FRAME_POINTER_IS_FRAME_POINTER 0
|
|
|
|
|
+#define HARD_FRAME_POINTER_IS_ARG_POINTER 0
|
|
|
|
|
+
|
|
|
|
|
+/* Register in which static-chain is passed to a function. */
|
|
|
|
|
+#define STATIC_CHAIN_REGNUM GP_TEMP_FIRST
|
|
|
|
|
+
|
|
|
|
|
+/* Registers used as temporaries in prologue/epilogue code.
|
|
|
|
|
+
|
|
|
|
|
+ The prologue registers mustn't conflict with any
|
|
|
|
|
+ incoming arguments, the static chain pointer, or the frame pointer.
|
|
|
|
|
+ The epilogue temporary mustn't conflict with the return registers,
|
|
|
|
|
+ the frame pointer, the EH stack adjustment, or the EH data registers. */
|
|
|
|
|
+
|
|
|
|
|
+#define RISCV_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1)
|
|
|
|
|
+#define RISCV_EPILOGUE_TEMP_REGNUM RISCV_PROLOGUE_TEMP_REGNUM
|
|
|
|
|
+
|
|
|
|
|
+#define RISCV_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_PROLOGUE_TEMP_REGNUM)
|
|
|
|
|
+#define RISCV_EPILOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_EPILOGUE_TEMP_REGNUM)
|
|
|
|
|
+
|
|
|
|
|
+#define FUNCTION_PROFILER(STREAM, LABELNO) \
|
|
|
|
|
+{ \
|
|
|
|
|
+ sorry ("profiler support for RISC-V"); \
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Define this macro if it is as good or better to call a constant
|
|
|
|
|
+ function address than to call an address kept in a register. */
|
|
|
|
|
+#define NO_FUNCTION_CSE 1
|
|
|
|
|
+
|
|
|
|
|
+/* Define the classes of registers for register constraints in the
|
|
|
|
|
+ machine description. Also define ranges of constants.
|
|
|
|
|
+
|
|
|
|
|
+ One of the classes must always be named ALL_REGS and include all hard regs.
|
|
|
|
|
+ If there is more than one class, another class must be named NO_REGS
|
|
|
|
|
+ and contain no registers.
|
|
|
|
|
+
|
|
|
|
|
+ The name GENERAL_REGS must be the name of a class (or an alias for
|
|
|
|
|
+ another name such as ALL_REGS). This is the class of registers
|
|
|
|
|
+ that is allowed by "g" or "r" in a register constraint.
|
|
|
|
|
+ Also, registers outside this class are allocated only when
|
|
|
|
|
+ instructions express preferences for them.
|
|
|
|
|
+
|
|
|
|
|
+ The classes must be numbered in nondecreasing order; that is,
|
|
|
|
|
+ a larger-numbered class must never be contained completely
|
|
|
|
|
+ in a smaller-numbered class.
|
|
|
|
|
+
|
|
|
|
|
+ For any two classes, it is very desirable that there be another
|
|
|
|
|
+ class that represents their union. */
|
|
|
|
|
+
|
|
|
|
|
+enum reg_class
|
|
|
|
|
+{
|
|
|
|
|
+ NO_REGS, /* no registers in set */
|
|
|
|
|
+ T_REGS, /* registers used by indirect sibcalls */
|
|
|
|
|
+ GR_REGS, /* integer registers */
|
|
|
|
|
+ FP_REGS, /* floating point registers */
|
|
|
|
|
+ FRAME_REGS, /* $arg and $frame */
|
|
|
|
|
+ ALL_REGS, /* all registers */
|
|
|
|
|
+ LIM_REG_CLASSES /* max value + 1 */
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
|
|
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
|
|
|
|
|
+
|
|
|
|
|
+#define GENERAL_REGS GR_REGS
|
|
|
|
|
+
|
|
|
|
|
+/* An initializer containing the names of the register classes as C
|
|
|
|
|
+ string constants. These names are used in writing some of the
|
|
|
|
|
+ debugging dumps. */
|
|
|
|
|
+
|
|
|
|
|
+#define REG_CLASS_NAMES \
|
|
|
|
|
+{ \
|
|
|
|
|
+ "NO_REGS", \
|
|
|
|
|
+ "T_REGS", \
|
|
|
|
|
+ "GR_REGS", \
|
|
|
|
|
+ "FP_REGS", \
|
|
|
|
|
+ "FRAME_REGS", \
|
|
|
|
|
+ "ALL_REGS" \
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* An initializer containing the contents of the register classes,
|
|
|
|
|
+ as integers which are bit masks. The Nth integer specifies the
|
|
|
|
|
+ contents of class N. The way the integer MASK is interpreted is
|
|
|
|
|
+ that register R is in the class if `MASK & (1 << R)' is 1.
|
|
|
|
|
+
|
|
|
|
|
+ When the machine has more than 32 registers, an integer does not
|
|
|
|
|
+ suffice. Then the integers are replaced by sub-initializers,
|
|
|
|
|
+ braced groupings containing several integers. Each
|
|
|
|
|
+ sub-initializer must be suitable as an initializer for the type
|
|
|
|
|
+ `HARD_REG_SET' which is defined in `hard-reg-set.h'. */
|
|
|
|
|
+
|
|
|
|
|
+#define REG_CLASS_CONTENTS \
|
|
|
|
|
+{ \
|
|
|
|
|
+ { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
|
|
|
|
|
+ { 0xf00000e0, 0x00000000, 0x00000000 }, /* T_REGS */ \
|
|
|
|
|
+ { 0xffffffff, 0x00000000, 0x00000000 }, /* GR_REGS */ \
|
|
|
|
|
+ { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \
|
|
|
|
|
+ { 0x00000000, 0x00000000, 0x00000003 }, /* FRAME_REGS */ \
|
|
|
|
|
+ { 0xffffffff, 0xffffffff, 0x00000003 } /* ALL_REGS */ \
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* A C expression whose value is a register class containing hard
|
|
|
|
|
+ register REGNO. In general there is more that one such class;
|
|
|
|
|
+ choose a class which is "minimal", meaning that no smaller class
|
|
|
|
|
+ also contains the register. */
|
|
|
|
|
+
|
|
|
|
|
+#define REGNO_REG_CLASS(REGNO) riscv_regno_to_class[ (REGNO) ]
|
|
|
|
|
+
|
|
|
|
|
+/* A macro whose definition is the name of the class to which a
|
|
|
|
|
+ valid base register must belong. A base register is one used in
|
|
|
|
|
+ an address which is the register value plus a displacement. */
|
|
|
|
|
+
|
|
|
|
|
+#define BASE_REG_CLASS GR_REGS
|
|
|
|
|
+
|
|
|
|
|
+/* A macro whose definition is the name of the class to which a
|
|
|
|
|
+ valid index register must belong. An index register is one used
|
|
|
|
|
+ in an address where its value is either multiplied by a scale
|
|
|
|
|
+ factor or added to another register (as well as added to a
|
|
|
|
|
+ displacement). */
|
|
|
|
|
+
|
|
|
|
|
+#define INDEX_REG_CLASS NO_REGS
|
|
|
|
|
+
|
|
|
|
|
+/* We generally want to put call-clobbered registers ahead of
|
|
|
|
|
+ call-saved ones. (IRA expects this.) */
|
|
|
|
|
+
|
|
|
|
|
+#define REG_ALLOC_ORDER \
|
|
|
|
|
+{ \
|
|
|
|
|
+ /* Call-clobbered GPRs. */ \
|
|
|
|
|
+ 15, 14, 13, 12, 11, 10, 16, 17, 5, 6, 7, 28, 29, 30, 31, 1, \
|
|
|
|
|
+ /* Call-saved GPRs. */ \
|
|
|
|
|
+ 8, 9, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, \
|
|
|
|
|
+ /* GPRs that can never be exposed to the register allocator. */ \
|
|
|
|
|
+ 0, 2, 3, 4, \
|
|
|
|
|
+ /* Call-clobbered FPRs. */ \
|
|
|
|
|
+ 32, 33, 34, 35, 36, 37, 38, 39, 42, 43, 44, 45, 46, 47, 48, 49, \
|
|
|
|
|
+ 60, 61, 62, 63, \
|
|
|
|
|
+ /* Call-saved FPRs. */ \
|
|
|
|
|
+ 40, 41, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, \
|
|
|
|
|
+ /* None of the remaining classes have defined call-saved \
|
|
|
|
|
+ registers. */ \
|
|
|
|
|
+ 64, 65 \
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* True if VALUE is a signed 16-bit number. */
|
|
|
|
|
+
|
|
|
|
|
+#include "opcode-riscv.h"
|
|
|
|
|
+#define SMALL_OPERAND(VALUE) \
|
|
|
|
|
+ ((unsigned HOST_WIDE_INT) (VALUE) + RISCV_IMM_REACH/2 < RISCV_IMM_REACH)
|
|
|
|
|
+
|
|
|
|
|
+/* True if VALUE can be loaded into a register using LUI. */
|
|
|
|
|
+
|
|
|
|
|
+#define LUI_OPERAND(VALUE) \
|
|
|
|
|
+ (((VALUE) | ((1UL<<31) - RISCV_IMM_REACH)) == ((1UL<<31) - RISCV_IMM_REACH) \
|
|
|
|
|
+ || ((VALUE) | ((1UL<<31) - RISCV_IMM_REACH)) + RISCV_IMM_REACH == 0)
|
|
|
|
|
+
|
|
|
|
|
+/* Return a value X with the low 16 bits clear, and such that
|
|
|
|
|
+ VALUE - X is a signed 16-bit value. */
|
|
|
|
|
+
|
|
|
|
|
+#define SMALL_INT(X) SMALL_OPERAND (INTVAL (X))
|
|
|
|
|
+#define LUI_INT(X) LUI_OPERAND (INTVAL (X))
|
|
|
|
|
+
|
|
|
|
|
+/* The HI and LO registers can only be reloaded via the general
|
|
|
|
|
+ registers. Condition code registers can only be loaded to the
|
|
|
|
|
+ general registers, and from the floating point registers. */
|
|
|
|
|
+
|
|
|
|
|
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
|
|
|
|
|
+ riscv_secondary_reload_class (CLASS, MODE, X, true)
|
|
|
|
|
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
|
|
|
|
|
+ riscv_secondary_reload_class (CLASS, MODE, X, false)
|
|
|
|
|
+
|
|
|
|
|
+/* Return the maximum number of consecutive registers
|
|
|
|
|
+ needed to represent mode MODE in a register of class CLASS. */
|
|
|
|
|
+
|
|
|
|
|
+#define CLASS_MAX_NREGS(CLASS, MODE) riscv_class_max_nregs (CLASS, MODE)
|
|
|
|
|
+
|
|
|
|
|
+/* It is undefined to interpret an FP register in a different format than
|
|
|
|
|
+ that which it was created to be. */
|
|
|
|
|
+
|
|
|
|
|
+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
|
|
|
|
|
+ reg_classes_intersect_p (FP_REGS, CLASS)
|
|
|
|
|
+
|
|
|
|
|
+/* Stack layout; function entry, exit and calling. */
|
|
|
|
|
+
|
|
|
|
|
+#define STACK_GROWS_DOWNWARD
|
|
|
|
|
+
|
|
|
|
|
+#define FRAME_GROWS_DOWNWARD 1
|
|
|
|
|
+
|
|
|
|
|
+#define STARTING_FRAME_OFFSET 0
|
|
|
|
|
+
|
|
|
|
|
+#define RETURN_ADDR_RTX riscv_return_addr
|
|
|
|
|
+
|
|
|
|
|
+#define ELIMINABLE_REGS \
|
|
|
|
|
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
|
|
|
|
|
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
|
|
|
|
|
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
|
|
|
|
|
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \
|
|
|
|
|
+
|
|
|
|
|
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
|
|
|
|
|
+ (OFFSET) = riscv_initial_elimination_offset (FROM, TO)
|
|
|
|
|
+
|
|
|
|
|
+/* Allocate stack space for arguments at the beginning of each function. */
|
|
|
|
|
+#define ACCUMULATE_OUTGOING_ARGS 1
|
|
|
|
|
+
|
|
|
|
|
+/* The argument pointer always points to the first argument. */
|
|
|
|
|
+#define FIRST_PARM_OFFSET(FNDECL) 0
|
|
|
|
|
+
|
|
|
|
|
+#define REG_PARM_STACK_SPACE(FNDECL) 0
|
|
|
|
|
+
|
|
|
|
|
+/* Define this if it is the responsibility of the caller to
|
|
|
|
|
+ allocate the area reserved for arguments passed in registers.
|
|
|
|
|
+ If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect
|
|
|
|
|
+ of this macro is to determine whether the space is included in
|
|
|
|
|
+ `crtl->outgoing_args_size'. */
|
|
|
|
|
+#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
|
|
|
|
|
+
|
|
|
|
|
+#define STACK_BOUNDARY 128
|
|
|
|
|
+
|
|
|
|
|
+/* Symbolic macros for the registers used to return integer and floating
|
|
|
|
|
+ point values. */
|
|
|
|
|
+
|
|
|
|
|
+#define GP_RETURN GP_ARG_FIRST
|
|
|
|
|
+#define FP_RETURN ((TARGET_SOFT_FLOAT) ? GP_RETURN : FP_ARG_FIRST)
|
|
|
|
|
+
|
|
|
|
|
+#define MAX_ARGS_IN_REGISTERS 8
|
|
|
|
|
+
|
|
|
|
|
+/* Symbolic macros for the first/last argument registers. */
|
|
|
|
|
+
|
|
|
|
|
+#define GP_ARG_FIRST (GP_REG_FIRST + 10)
|
|
|
|
|
+#define GP_ARG_LAST (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
|
|
|
|
|
+#define GP_TEMP_FIRST (GP_REG_FIRST + 5)
|
|
|
|
|
+#define FP_ARG_FIRST (FP_REG_FIRST + 10)
|
|
|
|
|
+#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
|
|
|
|
|
+
|
|
|
|
|
+#define CALLEE_SAVED_REG_NUMBER(REGNO) \
|
|
|
|
|
+ ((REGNO) >= 8 && (REGNO) <= 9 ? (REGNO) - 8 : \
|
|
|
|
|
+ (REGNO) >= 18 && (REGNO) <= 27 ? (REGNO) - 16 : -1)
|
|
|
|
|
+
|
|
|
|
|
+#define LIBCALL_VALUE(MODE) \
|
|
|
|
|
+ riscv_function_value (NULL_TREE, NULL_TREE, MODE)
|
|
|
|
|
+
|
|
|
|
|
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
|
|
|
|
|
+ riscv_function_value (VALTYPE, FUNC, VOIDmode)
|
|
|
|
|
+
|
|
|
|
|
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN)
|
|
|
|
|
+
|
|
|
|
|
+/* 1 if N is a possible register number for function argument passing.
|
|
|
|
|
+ We have no FP argument registers when soft-float. When FP registers
|
|
|
|
|
+ are 32 bits, we can't directly reference the odd numbered ones. */
|
|
|
|
|
+
|
|
|
|
|
+/* Accept arguments in a0-a7 and/or fa0-fa7. */
|
|
|
|
|
+#define FUNCTION_ARG_REGNO_P(N) \
|
|
|
|
|
+ (IN_RANGE((N), GP_ARG_FIRST, GP_ARG_LAST) \
|
|
|
|
|
+ || IN_RANGE((N), FP_ARG_FIRST, FP_ARG_LAST))
|
|
|
|
|
+
|
|
|
|
|
+/* The ABI views the arguments as a structure, of which the first 8
|
|
|
|
|
+ words go in registers and the rest go on the stack. If I < 8, N, the Ith
|
|
|
|
|
+ word might go in the Ith integer argument register or the Ith
|
|
|
|
|
+ floating-point argument register. */
|
|
|
|
|
+
|
|
|
|
|
+typedef struct {
|
|
|
|
|
+ /* Number of integer registers used so far, up to MAX_ARGS_IN_REGISTERS. */
|
|
|
|
|
+ unsigned int num_gprs;
|
|
|
|
|
+
|
|
|
|
|
+ /* Number of words passed on the stack. */
|
|
|
|
|
+ unsigned int stack_words;
|
|
|
|
|
+} CUMULATIVE_ARGS;
|
|
|
|
|
+
|
|
|
|
|
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
|
|
|
|
|
+ for a call to a function whose data type is FNTYPE.
|
|
|
|
|
+ For a library call, FNTYPE is 0. */
|
|
|
|
|
+
|
|
|
|
|
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
|
|
|
|
|
+ memset (&(CUM), 0, sizeof (CUM))
|
|
|
|
|
+
|
|
|
|
|
+#define EPILOGUE_USES(REGNO) ((REGNO) == RETURN_ADDR_REGNUM)
|
|
|
|
|
+
|
|
|
|
|
+/* ABI requires 16-byte alignment, even on ven on RV32. */
|
|
|
|
|
+#define RISCV_STACK_ALIGN(LOC) (((LOC) + 15) & -16)
|
|
|
|
|
+
|
|
|
|
|
+#define NO_PROFILE_COUNTERS 1
|
|
|
|
|
+
|
|
|
|
|
+/* Define this macro if the code for function profiling should come
|
|
|
|
|
+ before the function prologue. Normally, the profiling code comes
|
|
|
|
|
+ after. */
|
|
|
|
|
+
|
|
|
|
|
+/* #define PROFILE_BEFORE_PROLOGUE */
|
|
|
|
|
+
|
|
|
|
|
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
|
|
|
|
|
+ the stack pointer does not matter. The value is tested only in
|
|
|
|
|
+ functions that have frame pointers.
|
|
|
|
|
+ No definition is equivalent to always zero. */
|
|
|
|
|
+
|
|
|
|
|
+#define EXIT_IGNORE_STACK 1
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Trampolines are a block of code followed by two pointers. */
|
|
|
|
|
+
|
|
|
|
|
+#define TRAMPOLINE_CODE_SIZE 16
|
|
|
|
|
+#define TRAMPOLINE_SIZE (TRAMPOLINE_CODE_SIZE + POINTER_SIZE * 2)
|
|
|
|
|
+#define TRAMPOLINE_ALIGNMENT POINTER_SIZE
|
|
|
|
|
+
|
|
|
|
|
+/* Addressing modes, and classification of registers for them. */
|
|
|
|
|
+
|
|
|
|
|
+#define REGNO_OK_FOR_INDEX_P(REGNO) 0
|
|
|
|
|
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
|
|
|
|
|
+ riscv_regno_mode_ok_for_base_p (REGNO, MODE, 1)
|
|
|
|
|
+
|
|
|
|
|
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
|
|
|
|
|
+ and check its validity for a certain class.
|
|
|
|
|
+ We have two alternate definitions for each of them.
|
|
|
|
|
+ The usual definition accepts all pseudo regs; the other rejects them all.
|
|
|
|
|
+ The symbol REG_OK_STRICT causes the latter definition to be used.
|
|
|
|
|
+
|
|
|
|
|
+ Most source files want to accept pseudo regs in the hope that
|
|
|
|
|
+ they will get allocated to the class that the insn wants them to be in.
|
|
|
|
|
+ Some source files that are used after register allocation
|
|
|
|
|
+ need to be strict. */
|
|
|
|
|
+
|
|
|
|
|
+#ifndef REG_OK_STRICT
|
|
|
|
|
+#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
|
|
|
|
|
+ riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 0)
|
|
|
|
|
+#else
|
|
|
|
|
+#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
|
|
|
|
|
+ riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 1)
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#define REG_OK_FOR_INDEX_P(X) 0
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+/* Maximum number of registers that can appear in a valid memory address. */
|
|
|
|
|
+
|
|
|
|
|
+#define MAX_REGS_PER_ADDRESS 1
|
|
|
|
|
+
|
|
|
|
|
+#define CONSTANT_ADDRESS_P(X) \
|
|
|
|
|
+ (CONSTANT_P (X) && memory_address_p (SImode, X))
|
|
|
|
|
+
|
|
|
|
|
+/* This handles the magic '..CURRENT_FUNCTION' symbol, which means
|
|
|
|
|
+ 'the start of the function that this code is output in'. */
|
|
|
|
|
+
|
|
|
|
|
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
|
|
|
|
|
+ if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \
|
|
|
|
|
+ asm_fprintf ((FILE), "%U%s", \
|
|
|
|
|
+ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); \
|
|
|
|
|
+ else \
|
|
|
|
|
+ asm_fprintf ((FILE), "%U%s", (NAME))
|
|
|
|
|
+
|
|
|
|
|
+/* This flag marks functions that cannot be lazily bound. */
|
|
|
|
|
+#define SYMBOL_FLAG_BIND_NOW (SYMBOL_FLAG_MACH_DEP << 1)
|
|
|
|
|
+#define SYMBOL_REF_BIND_NOW_P(RTX) \
|
|
|
|
|
+ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_BIND_NOW) != 0)
|
|
|
|
|
+
|
|
|
|
|
+#define JUMP_TABLES_IN_TEXT_SECTION 0
|
|
|
|
|
+#define CASE_VECTOR_MODE SImode
|
|
|
|
|
+#define CASE_VECTOR_PC_RELATIVE (riscv_cmodel != CM_MEDLOW)
|
|
|
|
|
+
|
|
|
|
|
+/* Define this as 1 if `char' should by default be signed; else as 0. */
|
|
|
|
|
+#define DEFAULT_SIGNED_CHAR 0
|
|
|
|
|
+
|
|
|
|
|
+/* Consider using fld/fsd to move 8 bytes at a time for RV32IFD. */
|
|
|
|
|
+#define MOVE_MAX UNITS_PER_WORD
|
|
|
|
|
+#define MAX_MOVE_MAX 8
|
|
|
|
|
+
|
|
|
|
|
+#define SLOW_BYTE_ACCESS 0
|
|
|
|
|
+
|
|
|
|
|
+#define SHIFT_COUNT_TRUNCATED 1
|
|
|
|
|
+
|
|
|
|
|
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
|
|
|
|
|
+ is done just by pretending it is already truncated. */
|
|
|
|
|
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) \
|
|
|
|
|
+ (TARGET_64BIT ? ((INPREC) <= 32 || (OUTPREC) < 32) : 1)
|
|
|
|
|
+
|
|
|
|
|
+/* Specify the machine mode that pointers have.
|
|
|
|
|
+ After generation of rtl, the compiler makes no further distinction
|
|
|
|
|
+ between pointers and any other objects of this machine mode. */
|
|
|
|
|
+
|
|
|
|
|
+#ifndef Pmode
|
|
|
|
|
+#define Pmode (TARGET_64BIT ? DImode : SImode)
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#endif
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+
|
|
|
|
|
+/* Give call MEMs SImode since it is the "most permissive" mode
|
|
|
|
|
+ for both 32-bit and 64-bit targets. */
|
|
|
|
|
+
|
|
|
|
|
+#define FUNCTION_MODE SImode
|
|
|
|
|
+
|
|
|
|
|
+/* A C expression for the cost of a branch instruction. A value of 2
|
|
|
|
|
+ seems to minimize code size. */
|
|
|
|
|
+
|
|
|
|
|
+#define BRANCH_COST(speed_p, predictable_p) \
|
|
|
|
|
+ ((!(speed_p) || (predictable_p)) ? 2 : riscv_branch_cost)
|
|
|
|
|
+
|
|
|
|
|
+#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
|
|
|
|
|
+
|
|
|
|
|
+/* Control the assembler format that we output. */
|
|
|
|
|
+
|
|
|
|
|
+/* Output to assembler file text saying following lines
|
|
|
|
|
+ may contain character constants, extra white space, comments, etc. */
|
|
|
|
|
+
|
|
|
|
|
+#ifndef ASM_APP_ON
|
|
|
|
|
+#define ASM_APP_ON " #APP\n"
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+/* Output to assembler file text saying following lines
|
|
|
|
|
+ no longer contain unusual constructs. */
|
|
|
|
|
+
|
|
|
|
|
+#ifndef ASM_APP_OFF
|
|
|
|
|
+#define ASM_APP_OFF " #NO_APP\n"
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+#define REGISTER_NAMES \
|
|
|
|
|
+{ "zero","ra", "sp", "gp", "tp", "t0", "t1", "t2", \
|
|
|
|
|
+ "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", \
|
|
|
|
|
+ "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", \
|
|
|
|
|
+ "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", \
|
|
|
|
|
+ "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", \
|
|
|
|
|
+ "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", \
|
|
|
|
|
+ "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", \
|
|
|
|
|
+ "fs8", "fs9", "fs10","fs11","ft8", "ft9", "ft10","ft11", \
|
|
|
|
|
+ "arg", "frame", }
|
|
|
|
|
+
|
|
|
|
|
+#define ADDITIONAL_REGISTER_NAMES \
|
|
|
|
|
+{ \
|
|
|
|
|
+ { "x0", 0 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x1", 1 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x2", 2 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x3", 3 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x4", 4 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x5", 5 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x6", 6 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x7", 7 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x8", 8 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x9", 9 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x10", 10 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x11", 11 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x12", 12 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x13", 13 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x14", 14 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x15", 15 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x16", 16 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x17", 17 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x18", 18 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x19", 19 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x20", 20 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x21", 21 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x22", 22 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x23", 23 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x24", 24 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x25", 25 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x26", 26 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x27", 27 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x28", 28 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x29", 29 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x30", 30 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "x31", 31 + GP_REG_FIRST }, \
|
|
|
|
|
+ { "f0", 0 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f1", 1 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f2", 2 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f3", 3 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f4", 4 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f5", 5 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f6", 6 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f7", 7 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f8", 8 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f9", 9 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f10", 10 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f11", 11 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f12", 12 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f13", 13 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f14", 14 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f15", 15 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f16", 16 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f17", 17 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f18", 18 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f19", 19 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f20", 20 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f21", 21 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f22", 22 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f23", 23 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f24", 24 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f25", 25 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f26", 26 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f27", 27 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f28", 28 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f29", 29 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f30", 30 + FP_REG_FIRST }, \
|
|
|
|
|
+ { "f31", 31 + FP_REG_FIRST }, \
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* Globalizing directive for a label. */
|
|
|
|
|
+#define GLOBAL_ASM_OP "\t.globl\t"
|
|
|
|
|
+
|
|
|
|
|
+/* This is how to store into the string LABEL
|
|
|
|
|
+ the symbol_ref name of an internal numbered label where
|
|
|
|
|
+ PREFIX is the class of label and NUM is the number within the class.
|
|
|
|
|
+ This is suitable for output with `assemble_name'. */
|
|
|
|
|
+
|
|
|
|
|
+#undef ASM_GENERATE_INTERNAL_LABEL
|
|
|
|
|
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
|
|
|
|
|
+ sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM))
|
|
|
|
|
+
|
|
|
|
|
+/* This is how to output an element of a case-vector that is absolute. */
|
|
|
|
|
+
|
|
|
|
|
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
|
|
|
|
|
+ fprintf (STREAM, "\t.word\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
|
|
|
|
|
+
|
|
|
|
|
+/* This is how to output an element of a PIC case-vector. */
|
|
|
|
|
+
|
|
|
|
|
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
|
|
|
|
|
+ fprintf (STREAM, "\t.word\t%sL%d-%sL%d\n", \
|
|
|
|
|
+ LOCAL_LABEL_PREFIX, VALUE, LOCAL_LABEL_PREFIX, REL)
|
|
|
|
|
+
|
|
|
|
|
+/* This is how to output an assembler line
|
|
|
|
|
+ that says to advance the location counter
|
|
|
|
|
+ to a multiple of 2**LOG bytes. */
|
|
|
|
|
+
|
|
|
|
|
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
|
|
|
|
|
+ fprintf (STREAM, "\t.align\t%d\n", (LOG))
|
|
|
|
|
+
|
|
|
|
|
+/* Define the strings to put out for each section in the object file. */
|
|
|
|
|
+#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */
|
|
|
|
|
+#define DATA_SECTION_ASM_OP "\t.data" /* large data */
|
|
|
|
|
+#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata"
|
|
|
|
|
+#define BSS_SECTION_ASM_OP "\t.bss"
|
|
|
|
|
+#define SBSS_SECTION_ASM_OP "\t.section\t.sbss,\"aw\",@nobits"
|
|
|
|
|
+#define SDATA_SECTION_ASM_OP "\t.section\t.sdata,\"aw\",@progbits"
|
|
|
|
|
+
|
|
|
|
|
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
|
|
|
|
|
+do \
|
|
|
|
|
+ { \
|
|
|
|
|
+ fprintf (STREAM, "\taddi\t%s,%s,-8\n\t%s\t%s,0(%s)\n", \
|
|
|
|
|
+ reg_names[STACK_POINTER_REGNUM], \
|
|
|
|
|
+ reg_names[STACK_POINTER_REGNUM], \
|
|
|
|
|
+ TARGET_64BIT ? "sd" : "sw", \
|
|
|
|
|
+ reg_names[REGNO], \
|
|
|
|
|
+ reg_names[STACK_POINTER_REGNUM]); \
|
|
|
|
|
+ } \
|
|
|
|
|
+while (0)
|
|
|
|
|
+
|
|
|
|
|
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
|
|
|
|
|
+do \
|
|
|
|
|
+ { \
|
|
|
|
|
+ fprintf (STREAM, "\t%s\t%s,0(%s)\n\taddi\t%s,%s,8\n", \
|
|
|
|
|
+ TARGET_64BIT ? "ld" : "lw", \
|
|
|
|
|
+ reg_names[REGNO], \
|
|
|
|
|
+ reg_names[STACK_POINTER_REGNUM], \
|
|
|
|
|
+ reg_names[STACK_POINTER_REGNUM], \
|
|
|
|
|
+ reg_names[STACK_POINTER_REGNUM]); \
|
|
|
|
|
+ } \
|
|
|
|
|
+while (0)
|
|
|
|
|
+
|
|
|
|
|
+#define ASM_COMMENT_START "#"
|
|
|
|
|
+
|
|
|
|
|
+#undef SIZE_TYPE
|
|
|
|
|
+#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int")
|
|
|
|
|
+
|
|
|
|
|
+#undef PTRDIFF_TYPE
|
|
|
|
|
+#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int")
|
|
|
|
|
+
|
|
|
|
|
+/* The maximum number of bytes that can be copied by one iteration of
|
|
|
|
|
+ a movmemsi loop; see riscv_block_move_loop. */
|
|
|
|
|
+#define RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER (UNITS_PER_WORD * 4)
|
|
|
|
|
+
|
|
|
|
|
+/* The maximum number of bytes that can be copied by a straight-line
|
|
|
|
|
+ implementation of movmemsi; see riscv_block_move_straight. We want
|
|
|
|
|
+ to make sure that any loop-based implementation will iterate at
|
|
|
|
|
+ least twice. */
|
|
|
|
|
+#define RISCV_MAX_MOVE_BYTES_STRAIGHT (RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER * 2)
|
|
|
|
|
+
|
|
|
|
|
+/* The base cost of a memcpy call, for MOVE_RATIO and friends. */
|
|
|
|
|
+
|
|
|
|
|
+#define RISCV_CALL_RATIO 6
|
|
|
|
|
+
|
|
|
|
|
+/* Any loop-based implementation of movmemsi will have at least
|
|
|
|
|
+ RISCV_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory
|
|
|
|
|
+ moves, so allow individual copies of fewer elements.
|
|
|
|
|
+
|
|
|
|
|
+ When movmemsi is not available, use a value approximating
|
|
|
|
|
+ the length of a memcpy call sequence, so that move_by_pieces
|
|
|
|
|
+ will generate inline code if it is shorter than a function call.
|
|
|
|
|
+ Since move_by_pieces_ninsns counts memory-to-memory moves, but
|
|
|
|
|
+ we'll have to generate a load/store pair for each, halve the
|
|
|
|
|
+ value of RISCV_CALL_RATIO to take that into account. */
|
|
|
|
|
+
|
|
|
|
|
+#define MOVE_RATIO(speed) \
|
|
|
|
|
+ (HAVE_movmemsi \
|
|
|
|
|
+ ? RISCV_MAX_MOVE_BYTES_STRAIGHT / MOVE_MAX \
|
|
|
|
|
+ : RISCV_CALL_RATIO / 2)
|
|
|
|
|
+
|
|
|
|
|
+/* For CLEAR_RATIO, when optimizing for size, give a better estimate
|
|
|
|
|
+ of the length of a memset call, but use the default otherwise. */
|
|
|
|
|
+
|
|
|
|
|
+#define CLEAR_RATIO(speed)\
|
|
|
|
|
+ ((speed) ? 15 : RISCV_CALL_RATIO)
|
|
|
|
|
+
|
|
|
|
|
+/* This is similar to CLEAR_RATIO, but for a non-zero constant, so when
|
|
|
|
|
+ optimizing for size adjust the ratio to account for the overhead of
|
|
|
|
|
+ loading the constant and replicating it across the word. */
|
|
|
|
|
+
|
|
|
|
|
+#define SET_RATIO(speed) \
|
|
|
|
|
+ ((speed) ? 15 : RISCV_CALL_RATIO - 2)
|
|
|
|
|
+
|
|
|
|
|
+#ifndef HAVE_AS_TLS
|
|
|
|
|
+#define HAVE_AS_TLS 0
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#endif
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+
|
|
|
|
|
+#ifndef USED_FOR_TARGET
|
|
|
|
|
+
|
|
|
|
|
+extern const enum reg_class riscv_regno_to_class[];
|
|
|
|
|
+extern bool riscv_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
|
|
|
|
|
+extern const char* riscv_hi_relocs[];
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+#endif
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+
|
|
|
|
|
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
|
|
|
|
|
+ (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4)
|
|
|
|
|
diff -urN empty/gcc/config/riscv/riscv.md gcc-5.2.0/gcc/config/riscv/riscv.md
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/riscv.md 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/riscv.md 2015-07-17 22:36:52.319705931 +0200
|
|
|
|
|
@@ -0,0 +1,2448 @@
|
|
|
|
|
+;; Machine description for RISC-V for GNU compiler.
|
|
|
|
|
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
|
|
|
|
|
+;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
|
|
|
|
|
+;; Based on MIPS target for GNU compiler.
|
|
|
|
|
+
|
|
|
|
|
+;; This file is part of GCC.
|
|
|
|
|
+
|
|
|
|
|
+;; GCC is free software; you can redistribute it and/or modify
|
|
|
|
|
+;; it under the terms of the GNU General Public License as published by
|
|
|
|
|
+;; the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
|
+;; any later version.
|
|
|
|
|
+
|
|
|
|
|
+;; GCC is distributed in the hope that it will be useful,
|
|
|
|
|
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
+;; GNU General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+;; You should have received a copy of the GNU General Public License
|
|
|
|
|
+;; along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+;; <http://www.gnu.org/licenses/>.
|
|
|
|
|
+
|
|
|
|
|
+(define_c_enum "unspec" [
|
|
|
|
|
+ ;; Floating-point moves.
|
|
|
|
|
+ UNSPEC_LOAD_LOW
|
|
|
|
|
+ UNSPEC_LOAD_HIGH
|
|
|
|
|
+ UNSPEC_STORE_WORD
|
|
|
|
|
+
|
|
|
|
|
+ ;; GP manipulation.
|
|
|
|
|
+ UNSPEC_EH_RETURN
|
|
|
|
|
+
|
|
|
|
|
+ ;; Symbolic accesses.
|
|
|
|
|
+ UNSPEC_ADDRESS_FIRST
|
|
|
|
|
+ UNSPEC_LOAD_GOT
|
|
|
|
|
+ UNSPEC_TLS
|
|
|
|
|
+ UNSPEC_TLS_LE
|
|
|
|
|
+ UNSPEC_TLS_IE
|
|
|
|
|
+ UNSPEC_TLS_GD
|
|
|
|
|
+
|
|
|
|
|
+ ;; Register save and restore.
|
|
|
|
|
+ UNSPEC_GPR_SAVE
|
|
|
|
|
+ UNSPEC_GPR_RESTORE
|
|
|
|
|
+
|
|
|
|
|
+ ;; Blockage and synchronisation.
|
|
|
|
|
+ UNSPEC_BLOCKAGE
|
|
|
|
|
+ UNSPEC_FENCE
|
|
|
|
|
+ UNSPEC_FENCE_I
|
|
|
|
|
+])
|
|
|
|
|
+
|
|
|
|
|
+(define_constants
|
|
|
|
|
+ [(RETURN_ADDR_REGNUM 1)
|
|
|
|
|
+ (T0_REGNUM 5)
|
|
|
|
|
+ (T1_REGNUM 6)
|
|
|
|
|
+])
|
|
|
|
|
+
|
|
|
|
|
+(include "predicates.md")
|
|
|
|
|
+(include "constraints.md")
|
|
|
|
|
+
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; Attributes
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+(define_attr "got" "unset,xgot_high,load"
|
|
|
|
|
+ (const_string "unset"))
|
|
|
|
|
+
|
|
|
|
|
+;; For jal instructions, this attribute is DIRECT when the target address
|
|
|
|
|
+;; is symbolic and INDIRECT when it is a register.
|
|
|
|
|
+(define_attr "jal" "unset,direct,indirect"
|
|
|
|
|
+ (const_string "unset"))
|
|
|
|
|
+
|
|
|
|
|
+;; Classification of moves, extensions and truncations. Most values
|
|
|
|
|
+;; are as for "type" (see below) but there are also the following
|
|
|
|
|
+;; move-specific values:
|
|
|
|
|
+;;
|
|
|
|
|
+;; andi a single ANDI instruction
|
|
|
|
|
+;; shift_shift a shift left followed by a shift right
|
|
|
|
|
+;;
|
|
|
|
|
+;; This attribute is used to determine the instruction's length and
|
|
|
|
|
+;; scheduling type. For doubleword moves, the attribute always describes
|
|
|
|
|
+;; the split instructions; in some cases, it is more appropriate for the
|
|
|
|
|
+;; scheduling type to be "multi" instead.
|
|
|
|
|
+(define_attr "move_type"
|
|
|
|
|
+ "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
|
|
|
|
|
+ const,logical,arith,andi,shift_shift"
|
|
|
|
|
+ (const_string "unknown"))
|
|
|
|
|
+
|
|
|
|
|
+(define_attr "alu_type" "unknown,add,sub,and,or,xor"
|
|
|
|
|
+ (const_string "unknown"))
|
|
|
|
|
+
|
|
|
|
|
+;; Main data type used by the insn
|
|
|
|
|
+(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FPSW"
|
|
|
|
|
+ (const_string "unknown"))
|
|
|
|
|
+
|
|
|
|
|
+;; True if the main data type is twice the size of a word.
|
|
|
|
|
+(define_attr "dword_mode" "no,yes"
|
|
|
|
|
+ (cond [(and (eq_attr "mode" "DI,DF")
|
|
|
|
|
+ (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
|
|
|
|
|
+ (const_string "yes")
|
|
|
|
|
+
|
|
|
|
|
+ (and (eq_attr "mode" "TI,TF")
|
|
|
|
|
+ (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
|
|
|
|
|
+ (const_string "yes")]
|
|
|
|
|
+ (const_string "no")))
|
|
|
|
|
+
|
|
|
|
|
+;; Classification of each insn.
|
|
|
|
|
+;; branch conditional branch
|
|
|
|
|
+;; jump unconditional jump
|
|
|
|
|
+;; call unconditional call
|
|
|
|
|
+;; load load instruction(s)
|
|
|
|
|
+;; fpload floating point load
|
|
|
|
|
+;; fpidxload floating point indexed load
|
|
|
|
|
+;; store store instruction(s)
|
|
|
|
|
+;; fpstore floating point store
|
|
|
|
|
+;; fpidxstore floating point indexed store
|
|
|
|
|
+;; mtc transfer to coprocessor
|
|
|
|
|
+;; mfc transfer from coprocessor
|
|
|
|
|
+;; const load constant
|
|
|
|
|
+;; arith integer arithmetic instructions
|
|
|
|
|
+;; logical integer logical instructions
|
|
|
|
|
+;; shift integer shift instructions
|
|
|
|
|
+;; slt set less than instructions
|
|
|
|
|
+;; imul integer multiply
|
|
|
|
|
+;; idiv integer divide
|
|
|
|
|
+;; move integer register move (addi rd, rs1, 0)
|
|
|
|
|
+;; fmove floating point register move
|
|
|
|
|
+;; fadd floating point add/subtract
|
|
|
|
|
+;; fmul floating point multiply
|
|
|
|
|
+;; fmadd floating point multiply-add
|
|
|
|
|
+;; fdiv floating point divide
|
|
|
|
|
+;; fcmp floating point compare
|
|
|
|
|
+;; fcvt floating point convert
|
|
|
|
|
+;; fsqrt floating point square root
|
|
|
|
|
+;; multi multiword sequence (or user asm statements)
|
|
|
|
|
+;; nop no operation
|
|
|
|
|
+;; ghost an instruction that produces no real code
|
|
|
|
|
+(define_attr "type"
|
|
|
|
|
+ "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore,
|
|
|
|
|
+ mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
|
|
|
|
|
+ fmadd,fdiv,fcmp,fcvt,fsqrt,multi,nop,ghost"
|
|
|
|
|
+ (cond [(eq_attr "jal" "!unset") (const_string "call")
|
|
|
|
|
+ (eq_attr "got" "load") (const_string "load")
|
|
|
|
|
+
|
|
|
|
|
+ (eq_attr "alu_type" "add,sub") (const_string "arith")
|
|
|
|
|
+
|
|
|
|
|
+ (eq_attr "alu_type" "and,or,xor") (const_string "logical")
|
|
|
|
|
+
|
|
|
|
|
+ ;; If a doubleword move uses these expensive instructions,
|
|
|
|
|
+ ;; it is usually better to schedule them in the same way
|
|
|
|
|
+ ;; as the singleword form, rather than as "multi".
|
|
|
|
|
+ (eq_attr "move_type" "load") (const_string "load")
|
|
|
|
|
+ (eq_attr "move_type" "fpload") (const_string "fpload")
|
|
|
|
|
+ (eq_attr "move_type" "store") (const_string "store")
|
|
|
|
|
+ (eq_attr "move_type" "fpstore") (const_string "fpstore")
|
|
|
|
|
+ (eq_attr "move_type" "mtc") (const_string "mtc")
|
|
|
|
|
+ (eq_attr "move_type" "mfc") (const_string "mfc")
|
|
|
|
|
+
|
|
|
|
|
+ ;; These types of move are always single insns.
|
|
|
|
|
+ (eq_attr "move_type" "fmove") (const_string "fmove")
|
|
|
|
|
+ (eq_attr "move_type" "arith") (const_string "arith")
|
|
|
|
|
+ (eq_attr "move_type" "logical") (const_string "logical")
|
|
|
|
|
+ (eq_attr "move_type" "andi") (const_string "logical")
|
|
|
|
|
+
|
|
|
|
|
+ ;; These types of move are always split.
|
|
|
|
|
+ (eq_attr "move_type" "shift_shift")
|
|
|
|
|
+ (const_string "multi")
|
|
|
|
|
+
|
|
|
|
|
+ ;; These types of move are split for doubleword modes only.
|
|
|
|
|
+ (and (eq_attr "move_type" "move,const")
|
|
|
|
|
+ (eq_attr "dword_mode" "yes"))
|
|
|
|
|
+ (const_string "multi")
|
|
|
|
|
+ (eq_attr "move_type" "move") (const_string "move")
|
|
|
|
|
+ (eq_attr "move_type" "const") (const_string "const")]
|
|
|
|
|
+ (const_string "unknown")))
|
|
|
|
|
+
|
|
|
|
|
+;; Mode for conversion types (fcvt)
|
|
|
|
|
+;; I2S integer to float single (SI/DI to SF)
|
|
|
|
|
+;; I2D integer to float double (SI/DI to DF)
|
|
|
|
|
+;; S2I float to integer (SF to SI/DI)
|
|
|
|
|
+;; D2I float to integer (DF to SI/DI)
|
|
|
|
|
+;; D2S double to float single
|
|
|
|
|
+;; S2D float single to double
|
|
|
|
|
+
|
|
|
|
|
+(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D"
|
|
|
|
|
+ (const_string "unknown"))
|
|
|
|
|
+
|
|
|
|
|
+;; Length of instruction in bytes.
|
|
|
|
|
+(define_attr "length" ""
|
|
|
|
|
+ (cond [
|
|
|
|
|
+ ;; Direct branch instructions have a range of [-0x1000,0xffc],
|
|
|
|
|
+ ;; relative to the address of the delay slot. If a branch is
|
|
|
|
|
+ ;; outside this range, convert a branch like:
|
|
|
|
|
+ ;;
|
|
|
|
|
+ ;; bne r1,r2,target
|
|
|
|
|
+ ;;
|
|
|
|
|
+ ;; to:
|
|
|
|
|
+ ;;
|
|
|
|
|
+ ;; beq r1,r2,1f
|
|
|
|
|
+ ;; j target
|
|
|
|
|
+ ;; 1:
|
|
|
|
|
+ ;;
|
|
|
|
|
+ (eq_attr "type" "branch")
|
|
|
|
|
+ (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 4088))
|
|
|
|
|
+ (le (minus (pc) (match_dup 0)) (const_int 4092)))
|
|
|
|
|
+ (const_int 4)
|
|
|
|
|
+ (const_int 8))
|
|
|
|
|
+
|
|
|
|
|
+ ;; Conservatively assume calls take two instructions, as in:
|
|
|
|
|
+ ;; auipc t0, %pcrel_hi(target)
|
|
|
|
|
+ ;; jalr ra, t0, %lo(target)
|
|
|
|
|
+ ;; The linker will relax these into JAL when appropriate.
|
|
|
|
|
+ (eq_attr "type" "call")
|
|
|
|
|
+ (const_int 8)
|
|
|
|
|
+
|
|
|
|
|
+ ;; "Ghost" instructions occupy no space.
|
|
|
|
|
+ (eq_attr "type" "ghost")
|
|
|
|
|
+ (const_int 0)
|
|
|
|
|
+
|
|
|
|
|
+ (eq_attr "got" "load") (const_int 8)
|
|
|
|
|
+
|
|
|
|
|
+ ;; SHIFT_SHIFTs are decomposed into two separate instructions.
|
|
|
|
|
+ (eq_attr "move_type" "shift_shift")
|
|
|
|
|
+ (const_int 8)
|
|
|
|
|
+
|
|
|
|
|
+ ;; Check for doubleword moves that are decomposed into two
|
|
|
|
|
+ ;; instructions.
|
|
|
|
|
+ (and (eq_attr "move_type" "mtc,mfc,move")
|
|
|
|
|
+ (eq_attr "dword_mode" "yes"))
|
|
|
|
|
+ (const_int 8)
|
|
|
|
|
+
|
|
|
|
|
+ ;; Doubleword CONST{,N} moves are split into two word
|
|
|
|
|
+ ;; CONST{,N} moves.
|
|
|
|
|
+ (and (eq_attr "move_type" "const")
|
|
|
|
|
+ (eq_attr "dword_mode" "yes"))
|
|
|
|
|
+ (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
|
|
|
|
|
+
|
|
|
|
|
+ ;; Otherwise, constants, loads and stores are handled by external
|
|
|
|
|
+ ;; routines.
|
|
|
|
|
+ (eq_attr "move_type" "load,fpload")
|
|
|
|
|
+ (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
|
|
|
|
|
+ (eq_attr "move_type" "store,fpstore")
|
|
|
|
|
+ (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
|
|
|
|
|
+ ] (const_int 4)))
|
|
|
|
|
+
|
|
|
|
|
+;; Describe a user's asm statement.
|
|
|
|
|
+(define_asm_attributes
|
|
|
|
|
+ [(set_attr "type" "multi")])
|
|
|
|
|
+
|
|
|
|
|
+;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated
|
|
|
|
|
+;; from the same template.
|
|
|
|
|
+(define_mode_iterator GPR [SI (DI "TARGET_64BIT")])
|
|
|
|
|
+(define_mode_iterator SUPERQI [HI SI (DI "TARGET_64BIT")])
|
|
|
|
|
+
|
|
|
|
|
+;; A copy of GPR that can be used when a pattern has two independent
|
|
|
|
|
+;; modes.
|
|
|
|
|
+(define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")])
|
|
|
|
|
+
|
|
|
|
|
+;; This mode iterator allows :P to be used for patterns that operate on
|
|
|
|
|
+;; pointer-sized quantities. Exactly one of the two alternatives will match.
|
|
|
|
|
+(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
|
|
|
|
|
+
|
|
|
|
|
+;; 32-bit integer moves for which we provide move patterns.
|
|
|
|
|
+(define_mode_iterator IMOVE32 [SI])
|
|
|
|
|
+
|
|
|
|
|
+;; 64-bit modes for which we provide move patterns.
|
|
|
|
|
+(define_mode_iterator MOVE64 [DI DF])
|
|
|
|
|
+
|
|
|
|
|
+;; 128-bit modes for which we provide move patterns on 64-bit targets.
|
|
|
|
|
+(define_mode_iterator MOVE128 [TI TF])
|
|
|
|
|
+
|
|
|
|
|
+;; This mode iterator allows the QI and HI extension patterns to be
|
|
|
|
|
+;; defined from the same template.
|
|
|
|
|
+(define_mode_iterator SHORT [QI HI])
|
|
|
|
|
+
|
|
|
|
|
+;; Likewise the 64-bit truncate-and-shift patterns.
|
|
|
|
|
+(define_mode_iterator SUBDI [QI HI SI])
|
|
|
|
|
+(define_mode_iterator HISI [HI SI])
|
|
|
|
|
+(define_mode_iterator ANYI [QI HI SI (DI "TARGET_64BIT")])
|
|
|
|
|
+
|
|
|
|
|
+;; This mode iterator allows :ANYF to be used wherever a scalar or vector
|
|
|
|
|
+;; floating-point mode is allowed.
|
|
|
|
|
+(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT")
|
|
|
|
|
+ (DF "TARGET_HARD_FLOAT")])
|
|
|
|
|
+(define_mode_iterator ANYIF [QI HI SI (DI "TARGET_64BIT")
|
|
|
|
|
+ (SF "TARGET_HARD_FLOAT")
|
|
|
|
|
+ (DF "TARGET_HARD_FLOAT")])
|
|
|
|
|
+
|
|
|
|
|
+;; Like ANYF, but only applies to scalar modes.
|
|
|
|
|
+(define_mode_iterator SCALARF [(SF "TARGET_HARD_FLOAT")
|
|
|
|
|
+ (DF "TARGET_HARD_FLOAT")])
|
|
|
|
|
+
|
|
|
|
|
+;; A floating-point mode for which moves involving FPRs may need to be split.
|
|
|
|
|
+(define_mode_iterator SPLITF
|
|
|
|
|
+ [(DF "!TARGET_64BIT")
|
|
|
|
|
+ (DI "!TARGET_64BIT")
|
|
|
|
|
+ (TF "TARGET_64BIT")])
|
|
|
|
|
+
|
|
|
|
|
+;; This attribute gives the length suffix for a sign- or zero-extension
|
|
|
|
|
+;; instruction.
|
|
|
|
|
+(define_mode_attr size [(QI "b") (HI "h")])
|
|
|
|
|
+
|
|
|
|
|
+;; Mode attributes for loads.
|
|
|
|
|
+(define_mode_attr load [(QI "lb") (HI "lh") (SI "lw") (DI "ld") (SF "flw") (DF "fld")])
|
|
|
|
|
+
|
|
|
|
|
+;; Instruction names for stores.
|
|
|
|
|
+(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd") (SF "fsw") (DF "fsd")])
|
|
|
|
|
+
|
|
|
|
|
+;; This attribute gives the best constraint to use for registers of
|
|
|
|
|
+;; a given mode.
|
|
|
|
|
+(define_mode_attr reg [(SI "d") (DI "d") (CC "d")])
|
|
|
|
|
+
|
|
|
|
|
+;; This attribute gives the format suffix for floating-point operations.
|
|
|
|
|
+(define_mode_attr fmt [(SF "s") (DF "d")])
|
|
|
|
|
+
|
|
|
|
|
+;; This attribute gives the format suffix for atomic memory operations.
|
|
|
|
|
+(define_mode_attr amo [(SI "w") (DI "d")])
|
|
|
|
|
+
|
|
|
|
|
+;; This attribute gives the upper-case mode name for one unit of a
|
|
|
|
|
+;; floating-point mode.
|
|
|
|
|
+(define_mode_attr UNITMODE [(SF "SF") (DF "DF")])
|
|
|
|
|
+
|
|
|
|
|
+;; This attribute gives the integer mode that has half the size of
|
|
|
|
|
+;; the controlling mode.
|
|
|
|
|
+(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (TF "DI")])
|
|
|
|
|
+
|
|
|
|
|
+;; This code iterator allows signed and unsigned widening multiplications
|
|
|
|
|
+;; to use the same template.
|
|
|
|
|
+(define_code_iterator any_extend [sign_extend zero_extend])
|
|
|
|
|
+
|
|
|
|
|
+;; This code iterator allows the two right shift instructions to be
|
|
|
|
|
+;; generated from the same template.
|
|
|
|
|
+(define_code_iterator any_shiftrt [ashiftrt lshiftrt])
|
|
|
|
|
+
|
|
|
|
|
+;; This code iterator allows the three shift instructions to be generated
|
|
|
|
|
+;; from the same template.
|
|
|
|
|
+(define_code_iterator any_shift [ashift ashiftrt lshiftrt])
|
|
|
|
|
+
|
|
|
|
|
+;; This code iterator allows unsigned and signed division to be generated
|
|
|
|
|
+;; from the same template.
|
|
|
|
|
+(define_code_iterator any_div [div udiv])
|
|
|
|
|
+
|
|
|
|
|
+;; This code iterator allows unsigned and signed modulus to be generated
|
|
|
|
|
+;; from the same template.
|
|
|
|
|
+(define_code_iterator any_mod [mod umod])
|
|
|
|
|
+
|
|
|
|
|
+;; These code iterators allow the signed and unsigned scc operations to use
|
|
|
|
|
+;; the same template.
|
|
|
|
|
+(define_code_iterator any_gt [gt gtu])
|
|
|
|
|
+(define_code_iterator any_ge [ge geu])
|
|
|
|
|
+(define_code_iterator any_lt [lt ltu])
|
|
|
|
|
+(define_code_iterator any_le [le leu])
|
|
|
|
|
+
|
|
|
|
|
+;; <u> expands to an empty string when doing a signed operation and
|
|
|
|
|
+;; "u" when doing an unsigned operation.
|
|
|
|
|
+(define_code_attr u [(sign_extend "") (zero_extend "u")
|
|
|
|
|
+ (div "") (udiv "u")
|
|
|
|
|
+ (mod "") (umod "u")
|
|
|
|
|
+ (gt "") (gtu "u")
|
|
|
|
|
+ (ge "") (geu "u")
|
|
|
|
|
+ (lt "") (ltu "u")
|
|
|
|
|
+ (le "") (leu "u")])
|
|
|
|
|
+
|
|
|
|
|
+;; <su> is like <u>, but the signed form expands to "s" rather than "".
|
|
|
|
|
+(define_code_attr su [(sign_extend "s") (zero_extend "u")])
|
|
|
|
|
+
|
|
|
|
|
+;; <optab> expands to the name of the optab for a particular code.
|
|
|
|
|
+(define_code_attr optab [(ashift "ashl")
|
|
|
|
|
+ (ashiftrt "ashr")
|
|
|
|
|
+ (lshiftrt "lshr")
|
|
|
|
|
+ (ior "ior")
|
|
|
|
|
+ (xor "xor")
|
|
|
|
|
+ (and "and")
|
|
|
|
|
+ (plus "add")
|
|
|
|
|
+ (minus "sub")])
|
|
|
|
|
+
|
|
|
|
|
+;; <insn> expands to the name of the insn that implements a particular code.
|
|
|
|
|
+(define_code_attr insn [(ashift "sll")
|
|
|
|
|
+ (ashiftrt "sra")
|
|
|
|
|
+ (lshiftrt "srl")
|
|
|
|
|
+ (ior "or")
|
|
|
|
|
+ (xor "xor")
|
|
|
|
|
+ (and "and")
|
|
|
|
|
+ (plus "add")
|
|
|
|
|
+ (minus "sub")])
|
|
|
|
|
+
|
|
|
|
|
+;; Pipeline descriptions.
|
|
|
|
|
+;;
|
|
|
|
|
+;; generic.md provides a fallback for processors without a specific
|
|
|
|
|
+;; pipeline description. It is derived from the old define_function_unit
|
|
|
|
|
+;; version and uses the "alu" and "imuldiv" units declared below.
|
|
|
|
|
+;;
|
|
|
|
|
+;; Some of the processor-specific files are also derived from old
|
|
|
|
|
+;; define_function_unit descriptions and simply override the parts of
|
|
|
|
|
+;; generic.md that don't apply. The other processor-specific files
|
|
|
|
|
+;; are self-contained.
|
|
|
|
|
+(define_automaton "alu,imuldiv")
|
|
|
|
|
+
|
|
|
|
|
+(define_cpu_unit "alu" "alu")
|
|
|
|
|
+(define_cpu_unit "imuldiv" "imuldiv")
|
|
|
|
|
+
|
|
|
|
|
+;; Ghost instructions produce no real code and introduce no hazards.
|
|
|
|
|
+;; They exist purely to express an effect on dataflow.
|
|
|
|
|
+(define_insn_reservation "ghost" 0
|
|
|
|
|
+ (eq_attr "type" "ghost")
|
|
|
|
|
+ "nothing")
|
|
|
|
|
+
|
|
|
|
|
+(include "generic.md")
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; ADDITION
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "add<mode>3"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (plus:ANYF (match_operand:ANYF 1 "register_operand" "f")
|
|
|
|
|
+ (match_operand:ANYF 2 "register_operand" "f")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "fadd.<fmt>\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "fadd")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "add<mode>3"
|
|
|
|
|
+ [(set (match_operand:GPR 0 "register_operand")
|
|
|
|
|
+ (plus:GPR (match_operand:GPR 1 "register_operand")
|
|
|
|
|
+ (match_operand:GPR 2 "arith_operand")))]
|
|
|
|
|
+ "")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*addsi3"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
|
|
|
|
|
+ (plus:SI (match_operand:GPR 1 "register_operand" "r,r")
|
|
|
|
|
+ (match_operand:GPR2 2 "arith_operand" "r,Q")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*adddi3"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
|
|
|
|
|
+ (plus:DI (match_operand:DI 1 "register_operand" "r,r")
|
|
|
|
|
+ (match_operand:DI 2 "arith_operand" "r,Q")))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "add\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "DI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*addsi3_extended"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
|
|
|
|
|
+ (sign_extend:DI
|
|
|
|
|
+ (plus:SI (match_operand:SI 1 "register_operand" "r,r")
|
|
|
|
|
+ (match_operand:SI 2 "arith_operand" "r,Q"))))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "addw\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*adddisi3"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
|
|
|
|
|
+ (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
|
|
|
|
|
+ (truncate:SI (match_operand:DI 2 "arith_operand" "r,Q"))))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "addw\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*adddisisi3"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
|
|
|
|
|
+ (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
|
|
|
|
|
+ (match_operand:SI 2 "arith_operand" "r,Q")))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "addw\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*adddi3_truncsi"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
|
|
|
|
|
+ (truncate:SI
|
|
|
|
|
+ (plus:DI (match_operand:DI 1 "register_operand" "r,r")
|
|
|
|
|
+ (match_operand:DI 2 "arith_operand" "r,Q"))))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "addw\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; SUBTRACTION
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "sub<mode>3"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (minus:ANYF (match_operand:ANYF 1 "register_operand" "f")
|
|
|
|
|
+ (match_operand:ANYF 2 "register_operand" "f")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "fsub.<fmt>\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "fadd")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "sub<mode>3"
|
|
|
|
|
+ [(set (match_operand:GPR 0 "register_operand")
|
|
|
|
|
+ (minus:GPR (match_operand:GPR 1 "reg_or_0_operand")
|
|
|
|
|
+ (match_operand:GPR 2 "register_operand")))]
|
|
|
|
|
+ "")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*subdi3"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
|
|
|
|
|
+ (match_operand:DI 2 "register_operand" "r")))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "sub\t%0,%z1,%2"
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "DI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*subsi3"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (minus:SI (match_operand:GPR 1 "reg_or_0_operand" "rJ")
|
|
|
|
|
+ (match_operand:GPR2 2 "register_operand" "r")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ { return TARGET_64BIT ? "subw\t%0,%z1,%2" : "sub\t%0,%z1,%2"; }
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*subsi3_extended"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (sign_extend:DI
|
|
|
|
|
+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
|
|
|
|
|
+ (match_operand:SI 2 "register_operand" "r"))))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "subw\t%0,%z1,%2"
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "DI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*subdisi3"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
|
|
|
|
|
+ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "subw\t%0,%z1,%2"
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*subdisisi3"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
|
|
|
|
|
+ (match_operand:SI 2 "register_operand" "r")))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "subw\t%0,%z1,%2"
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*subsidisi3"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
|
|
|
|
|
+ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "subw\t%0,%z1,%2"
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*subdi3_truncsi"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
|
|
|
|
|
+ (truncate:SI
|
|
|
|
|
+ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,r")
|
|
|
|
|
+ (match_operand:DI 2 "arith_operand" "r,Q"))))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "subw\t%0,%z1,%2"
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; MULTIPLICATION
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "mul<mode>3"
|
|
|
|
|
+ [(set (match_operand:SCALARF 0 "register_operand" "=f")
|
|
|
|
|
+ (mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f")
|
|
|
|
|
+ (match_operand:SCALARF 2 "register_operand" "f")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "fmul.<fmt>\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "fmul")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "mul<mode>3"
|
|
|
|
|
+ [(set (match_operand:GPR 0 "register_operand")
|
|
|
|
|
+ (mult:GPR (match_operand:GPR 1 "reg_or_0_operand")
|
|
|
|
|
+ (match_operand:GPR 2 "register_operand")))]
|
|
|
|
|
+ "TARGET_MULDIV")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*mulsi3"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (mult:SI (match_operand:GPR 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:GPR2 2 "register_operand" "r")))]
|
|
|
|
|
+ "TARGET_MULDIV"
|
|
|
|
|
+ { return TARGET_64BIT ? "mulw\t%0,%1,%2" : "mul\t%0,%1,%2"; }
|
|
|
|
|
+ [(set_attr "type" "imul")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*muldisi3"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (mult:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
|
|
|
|
|
+ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
|
|
|
|
|
+ "TARGET_MULDIV && TARGET_64BIT"
|
|
|
|
|
+ "mulw\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "imul")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*muldi3_truncsi"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (truncate:SI
|
|
|
|
|
+ (mult:DI (match_operand:DI 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:DI 2 "register_operand" "r"))))]
|
|
|
|
|
+ "TARGET_MULDIV && TARGET_64BIT"
|
|
|
|
|
+ "mulw\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "imul")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*muldi3"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (mult:DI (match_operand:DI 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:DI 2 "register_operand" "r")))]
|
|
|
|
|
+ "TARGET_MULDIV && TARGET_64BIT"
|
|
|
|
|
+ "mul\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "imul")
|
|
|
|
|
+ (set_attr "mode" "DI")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ........................
|
|
|
|
|
+;;
|
|
|
|
|
+;; MULTIPLICATION HIGH-PART
|
|
|
|
|
+;;
|
|
|
|
|
+;; ........................
|
|
|
|
|
+;;
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+;; Using a clobber here is ghetto, but I'm not smart enough to do better. '
|
|
|
|
|
+(define_insn_and_split "<u>mulditi3"
|
|
|
|
|
+ [(set (match_operand:TI 0 "register_operand" "=r")
|
|
|
|
|
+ (mult:TI (any_extend:TI
|
|
|
|
|
+ (match_operand:DI 1 "register_operand" "r"))
|
|
|
|
|
+ (any_extend:TI
|
|
|
|
|
+ (match_operand:DI 2 "register_operand" "r"))))
|
|
|
|
|
+ (clobber (match_scratch:DI 3 "=r"))]
|
|
|
|
|
+ "TARGET_MULDIV && TARGET_64BIT"
|
|
|
|
|
+ "#"
|
|
|
|
|
+ "reload_completed"
|
|
|
|
|
+ [
|
|
|
|
|
+ (set (match_dup 3) (mult:DI (match_dup 1) (match_dup 2)))
|
|
|
|
|
+ (set (match_dup 4) (truncate:DI
|
|
|
|
|
+ (lshiftrt:TI
|
|
|
|
|
+ (mult:TI (any_extend:TI (match_dup 1))
|
|
|
|
|
+ (any_extend:TI (match_dup 2)))
|
|
|
|
|
+ (const_int 64))))
|
|
|
|
|
+ (set (match_dup 5) (match_dup 3))
|
|
|
|
|
+ ]
|
|
|
|
|
+{
|
|
|
|
|
+ operands[4] = riscv_subword (operands[0], true);
|
|
|
|
|
+ operands[5] = riscv_subword (operands[0], false);
|
|
|
|
|
+}
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "<u>muldi3_highpart"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (truncate:DI
|
|
|
|
|
+ (lshiftrt:TI
|
|
|
|
|
+ (mult:TI (any_extend:TI
|
|
|
|
|
+ (match_operand:DI 1 "register_operand" "r"))
|
|
|
|
|
+ (any_extend:TI
|
|
|
|
|
+ (match_operand:DI 2 "register_operand" "r")))
|
|
|
|
|
+ (const_int 64))))]
|
|
|
|
|
+ "TARGET_MULDIV && TARGET_64BIT"
|
|
|
|
|
+ "mulh<u>\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "imul")
|
|
|
|
|
+ (set_attr "mode" "DI")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_and_split "usmulditi3"
|
|
|
|
|
+ [(set (match_operand:TI 0 "register_operand" "=r")
|
|
|
|
|
+ (mult:TI (zero_extend:TI
|
|
|
|
|
+ (match_operand:DI 1 "register_operand" "r"))
|
|
|
|
|
+ (sign_extend:TI
|
|
|
|
|
+ (match_operand:DI 2 "register_operand" "r"))))
|
|
|
|
|
+ (clobber (match_scratch:DI 3 "=r"))]
|
|
|
|
|
+ "TARGET_MULDIV && TARGET_64BIT"
|
|
|
|
|
+ "#"
|
|
|
|
|
+ "reload_completed"
|
|
|
|
|
+ [
|
|
|
|
|
+ (set (match_dup 3) (mult:DI (match_dup 1) (match_dup 2)))
|
|
|
|
|
+ (set (match_dup 4) (truncate:DI
|
|
|
|
|
+ (lshiftrt:TI
|
|
|
|
|
+ (mult:TI (zero_extend:TI (match_dup 1))
|
|
|
|
|
+ (sign_extend:TI (match_dup 2)))
|
|
|
|
|
+ (const_int 64))))
|
|
|
|
|
+ (set (match_dup 5) (match_dup 3))
|
|
|
|
|
+ ]
|
|
|
|
|
+{
|
|
|
|
|
+ operands[4] = riscv_subword (operands[0], true);
|
|
|
|
|
+ operands[5] = riscv_subword (operands[0], false);
|
|
|
|
|
+}
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "usmuldi3_highpart"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (truncate:DI
|
|
|
|
|
+ (lshiftrt:TI
|
|
|
|
|
+ (mult:TI (zero_extend:TI
|
|
|
|
|
+ (match_operand:DI 1 "register_operand" "r"))
|
|
|
|
|
+ (sign_extend:TI
|
|
|
|
|
+ (match_operand:DI 2 "register_operand" "r")))
|
|
|
|
|
+ (const_int 64))))]
|
|
|
|
|
+ "TARGET_MULDIV && TARGET_64BIT"
|
|
|
|
|
+ "mulhsu\t%0,%2,%1"
|
|
|
|
|
+ [(set_attr "type" "imul")
|
|
|
|
|
+ (set_attr "mode" "DI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "<u>mulsidi3"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (mult:DI (any_extend:DI
|
|
|
|
|
+ (match_operand:SI 1 "register_operand" "r"))
|
|
|
|
|
+ (any_extend:DI
|
|
|
|
|
+ (match_operand:SI 2 "register_operand" "r"))))
|
|
|
|
|
+ (clobber (match_scratch:SI 3 "=r"))]
|
|
|
|
|
+ "TARGET_MULDIV && !TARGET_64BIT"
|
|
|
|
|
+{
|
|
|
|
|
+ rtx temp = gen_reg_rtx (SImode);
|
|
|
|
|
+ emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
|
|
|
|
|
+ emit_insn (gen_<u>mulsi3_highpart (riscv_subword (operands[0], true),
|
|
|
|
|
+ operands[1], operands[2]));
|
|
|
|
|
+ emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
|
|
|
|
|
+ DONE;
|
|
|
|
|
+}
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "<u>mulsi3_highpart"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (truncate:SI
|
|
|
|
|
+ (lshiftrt:DI
|
|
|
|
|
+ (mult:DI (any_extend:DI
|
|
|
|
|
+ (match_operand:SI 1 "register_operand" "r"))
|
|
|
|
|
+ (any_extend:DI
|
|
|
|
|
+ (match_operand:SI 2 "register_operand" "r")))
|
|
|
|
|
+ (const_int 32))))]
|
|
|
|
|
+ "TARGET_MULDIV && !TARGET_64BIT"
|
|
|
|
|
+ "mulh<u>\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "imul")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "usmulsidi3"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (mult:DI (zero_extend:DI
|
|
|
|
|
+ (match_operand:SI 1 "register_operand" "r"))
|
|
|
|
|
+ (sign_extend:DI
|
|
|
|
|
+ (match_operand:SI 2 "register_operand" "r"))))
|
|
|
|
|
+ (clobber (match_scratch:SI 3 "=r"))]
|
|
|
|
|
+ "TARGET_MULDIV && !TARGET_64BIT"
|
|
|
|
|
+{
|
|
|
|
|
+ rtx temp = gen_reg_rtx (SImode);
|
|
|
|
|
+ emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
|
|
|
|
|
+ emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
|
|
|
|
|
+ operands[1], operands[2]));
|
|
|
|
|
+ emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
|
|
|
|
|
+ DONE;
|
|
|
|
|
+}
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "usmulsi3_highpart"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (truncate:SI
|
|
|
|
|
+ (lshiftrt:DI
|
|
|
|
|
+ (mult:DI (zero_extend:DI
|
|
|
|
|
+ (match_operand:SI 1 "register_operand" "r"))
|
|
|
|
|
+ (sign_extend:DI
|
|
|
|
|
+ (match_operand:SI 2 "register_operand" "r")))
|
|
|
|
|
+ (const_int 32))))]
|
|
|
|
|
+ "TARGET_MULDIV && !TARGET_64BIT"
|
|
|
|
|
+ "mulhsu\t%0,%2,%1"
|
|
|
|
|
+ [(set_attr "type" "imul")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; DIVISION and REMAINDER
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "<u>divsi3"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (any_div:SI (match_operand:SI 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:SI 2 "register_operand" "r")))]
|
|
|
|
|
+ "TARGET_MULDIV"
|
|
|
|
|
+ { return TARGET_64BIT ? "div<u>w\t%0,%1,%2" : "div<u>\t%0,%1,%2"; }
|
|
|
|
|
+ [(set_attr "type" "idiv")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "<u>divdi3"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (any_div:DI (match_operand:DI 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:DI 2 "register_operand" "r")))]
|
|
|
|
|
+ "TARGET_MULDIV && TARGET_64BIT"
|
|
|
|
|
+ "div<u>\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "idiv")
|
|
|
|
|
+ (set_attr "mode" "DI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "<u>modsi3"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (any_mod:SI (match_operand:SI 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:SI 2 "register_operand" "r")))]
|
|
|
|
|
+ "TARGET_MULDIV"
|
|
|
|
|
+ { return TARGET_64BIT ? "rem<u>w\t%0,%1,%2" : "rem<u>\t%0,%1,%2"; }
|
|
|
|
|
+ [(set_attr "type" "idiv")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "<u>moddi3"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (any_mod:DI (match_operand:DI 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:DI 2 "register_operand" "r")))]
|
|
|
|
|
+ "TARGET_MULDIV && TARGET_64BIT"
|
|
|
|
|
+ "rem<u>\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "idiv")
|
|
|
|
|
+ (set_attr "mode" "DI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "div<mode>3"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (div:ANYF (match_operand:ANYF 1 "register_operand" "f")
|
|
|
|
|
+ (match_operand:ANYF 2 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && TARGET_FDIV"
|
|
|
|
|
+ "fdiv.<fmt>\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "fdiv")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; SQUARE ROOT
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "sqrt<mode>2"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && TARGET_FDIV"
|
|
|
|
|
+{
|
|
|
|
|
+ return "fsqrt.<fmt>\t%0,%1";
|
|
|
|
|
+}
|
|
|
|
|
+ [(set_attr "type" "fsqrt")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;; Floating point multiply accumulate instructions.
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "fma<mode>4"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (fma:ANYF
|
|
|
|
|
+ (match_operand:ANYF 1 "register_operand" "f")
|
|
|
|
|
+ (match_operand:ANYF 2 "register_operand" "f")
|
|
|
|
|
+ (match_operand:ANYF 3 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fmadd.<fmt>\t%0,%1,%2,%3"
|
|
|
|
|
+ [(set_attr "type" "fmadd")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "fms<mode>4"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (fma:ANYF
|
|
|
|
|
+ (match_operand:ANYF 1 "register_operand" "f")
|
|
|
|
|
+ (match_operand:ANYF 2 "register_operand" "f")
|
|
|
|
|
+ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fmsub.<fmt>\t%0,%1,%2,%3"
|
|
|
|
|
+ [(set_attr "type" "fmadd")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "nfma<mode>4"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (neg:ANYF
|
|
|
|
|
+ (fma:ANYF
|
|
|
|
|
+ (match_operand:ANYF 1 "register_operand" "f")
|
|
|
|
|
+ (match_operand:ANYF 2 "register_operand" "f")
|
|
|
|
|
+ (match_operand:ANYF 3 "register_operand" "f"))))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fnmadd.<fmt>\t%0,%1,%2,%3"
|
|
|
|
|
+ [(set_attr "type" "fmadd")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "nfms<mode>4"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (neg:ANYF
|
|
|
|
|
+ (fma:ANYF
|
|
|
|
|
+ (match_operand:ANYF 1 "register_operand" "f")
|
|
|
|
|
+ (match_operand:ANYF 2 "register_operand" "f")
|
|
|
|
|
+ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f")))))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fnmsub.<fmt>\t%0,%1,%2,%3"
|
|
|
|
|
+ [(set_attr "type" "fmadd")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;; modulo signed zeros, -(a*b+c) == -c-a*b
|
|
|
|
|
+(define_insn "*nfma<mode>4_fastmath"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (minus:ANYF
|
|
|
|
|
+ (match_operand:ANYF 3 "register_operand" "f")
|
|
|
|
|
+ (mult:ANYF
|
|
|
|
|
+ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
|
|
|
|
|
+ (match_operand:ANYF 2 "register_operand" "f"))))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
|
|
|
|
|
+ "fnmadd.<fmt>\t%0,%1,%2,%3"
|
|
|
|
|
+ [(set_attr "type" "fmadd")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;; modulo signed zeros, -(a*b-c) == c-a*b
|
|
|
|
|
+(define_insn "*nfms<mode>4_fastmath"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (minus:ANYF
|
|
|
|
|
+ (match_operand:ANYF 3 "register_operand" "f")
|
|
|
|
|
+ (mult:ANYF
|
|
|
|
|
+ (match_operand:ANYF 1 "register_operand" "f")
|
|
|
|
|
+ (match_operand:ANYF 2 "register_operand" "f"))))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
|
|
|
|
|
+ "fnmsub.<fmt>\t%0,%1,%2,%3"
|
|
|
|
|
+ [(set_attr "type" "fmadd")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; ABSOLUTE VALUE
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "abs<mode>2"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fabs.<fmt>\t%0,%1"
|
|
|
|
|
+ [(set_attr "type" "fmove")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; MIN/MAX
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "smin<mode>3"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (smin:ANYF (match_operand:ANYF 1 "register_operand" "f")
|
|
|
|
|
+ (match_operand:ANYF 2 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fmin.<fmt>\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "fmove")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "smax<mode>3"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (smax:ANYF (match_operand:ANYF 1 "register_operand" "f")
|
|
|
|
|
+ (match_operand:ANYF 2 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fmax.<fmt>\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "fmove")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; NEGATION and ONE'S COMPLEMENT '
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "neg<mode>2"
|
|
|
|
|
+ [(set (match_operand:ANYF 0 "register_operand" "=f")
|
|
|
|
|
+ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fneg.<fmt>\t%0,%1"
|
|
|
|
|
+ [(set_attr "type" "fmove")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "one_cmpl<mode>2"
|
|
|
|
|
+ [(set (match_operand:GPR 0 "register_operand" "=r")
|
|
|
|
|
+ (not:GPR (match_operand:GPR 1 "register_operand" "r")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "not\t%0,%1"
|
|
|
|
|
+ [(set_attr "type" "logical")
|
|
|
|
|
+ (set_attr "mode" "<MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; LOGICAL
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "and<mode>3"
|
|
|
|
|
+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
|
|
|
|
|
+ (and:GPR (match_operand:GPR 1 "register_operand" "%r,r")
|
|
|
|
|
+ (match_operand:GPR 2 "arith_operand" "r,Q")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "and\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "logical")
|
|
|
|
|
+ (set_attr "mode" "<MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "ior<mode>3"
|
|
|
|
|
+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
|
|
|
|
|
+ (ior:GPR (match_operand:GPR 1 "register_operand" "%r,r")
|
|
|
|
|
+ (match_operand:GPR 2 "arith_operand" "r,Q")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "or\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "logical")
|
|
|
|
|
+ (set_attr "mode" "<MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "xor<mode>3"
|
|
|
|
|
+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
|
|
|
|
|
+ (xor:GPR (match_operand:GPR 1 "register_operand" "%r,r")
|
|
|
|
|
+ (match_operand:GPR 2 "arith_operand" "r,Q")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "xor\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "logical")
|
|
|
|
|
+ (set_attr "mode" "<MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; TRUNCATION
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "truncdfsf2"
|
|
|
|
|
+ [(set (match_operand:SF 0 "register_operand" "=f")
|
|
|
|
|
+ (float_truncate:SF (match_operand:DF 1 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fcvt.s.d\t%0,%1"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "cnv_mode" "D2S")
|
|
|
|
|
+ (set_attr "mode" "SF")])
|
|
|
|
|
+
|
|
|
|
|
+;; Integer truncation patterns. Truncating to HImode/QImode is a no-op.
|
|
|
|
|
+;; Truncating from DImode to SImode is not, because we always keep SImode
|
|
|
|
|
+;; values sign-extended in a register so we can safely use DImode branches
|
|
|
|
|
+;; and comparisons on SImode values.
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "truncdisi2"
|
|
|
|
|
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,m")
|
|
|
|
|
+ (truncate:SI (match_operand:DI 1 "register_operand" "r,r")))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "@
|
|
|
|
|
+ sext.w\t%0,%1
|
|
|
|
|
+ sw\t%1,%0"
|
|
|
|
|
+ [(set_attr "move_type" "arith,store")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+;; Combiner patterns to optimize shift/truncate combinations.
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*ashr_trunc<mode>"
|
|
|
|
|
+ [(set (match_operand:SUBDI 0 "register_operand" "=r")
|
|
|
|
|
+ (truncate:SUBDI
|
|
|
|
|
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:DI 2 "const_arith_operand" ""))))]
|
|
|
|
|
+ "TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)"
|
|
|
|
|
+ "sra\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "shift")
|
|
|
|
|
+ (set_attr "mode" "<MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*lshr32_trunc<mode>"
|
|
|
|
|
+ [(set (match_operand:SUBDI 0 "register_operand" "=r")
|
|
|
|
|
+ (truncate:SUBDI
|
|
|
|
|
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
|
|
|
|
|
+ (const_int 32))))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "sra\t%0,%1,32"
|
|
|
|
|
+ [(set_attr "type" "shift")
|
|
|
|
|
+ (set_attr "mode" "<MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; ZERO EXTENSION
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+;; Extension insns.
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_and_split "zero_extendsidi2"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
|
|
|
|
|
+ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,W")))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "@
|
|
|
|
|
+ #
|
|
|
|
|
+ lwu\t%0,%1"
|
|
|
|
|
+ "&& reload_completed && REG_P (operands[1])"
|
|
|
|
|
+ [(set (match_dup 0)
|
|
|
|
|
+ (ashift:DI (match_dup 1) (const_int 32)))
|
|
|
|
|
+ (set (match_dup 0)
|
|
|
|
|
+ (lshiftrt:DI (match_dup 0) (const_int 32)))]
|
|
|
|
|
+ { operands[1] = gen_lowpart (DImode, operands[1]); }
|
|
|
|
|
+ [(set_attr "move_type" "shift_shift,load")
|
|
|
|
|
+ (set_attr "mode" "DI")])
|
|
|
|
|
+
|
|
|
|
|
+;; Combine is not allowed to convert this insn into a zero_extendsidi2
|
|
|
|
|
+;; because of TRULY_NOOP_TRUNCATION.
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_and_split "*clear_upper32"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
|
|
|
|
|
+ (and:DI (match_operand:DI 1 "nonimmediate_operand" "r,W")
|
|
|
|
|
+ (const_int 4294967295)))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+{
|
|
|
|
|
+ if (which_alternative == 0)
|
|
|
|
|
+ return "#";
|
|
|
|
|
+
|
|
|
|
|
+ operands[1] = gen_lowpart (SImode, operands[1]);
|
|
|
|
|
+ return "lwu\t%0,%1";
|
|
|
|
|
+}
|
|
|
|
|
+ "&& reload_completed && REG_P (operands[1])"
|
|
|
|
|
+ [(set (match_dup 0)
|
|
|
|
|
+ (ashift:DI (match_dup 1) (const_int 32)))
|
|
|
|
|
+ (set (match_dup 0)
|
|
|
|
|
+ (lshiftrt:DI (match_dup 0) (const_int 32)))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ [(set_attr "move_type" "shift_shift,load")
|
|
|
|
|
+ (set_attr "mode" "DI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_and_split "zero_extendhi<GPR:mode>2"
|
|
|
|
|
+ [(set (match_operand:GPR 0 "register_operand" "=r,r")
|
|
|
|
|
+ (zero_extend:GPR (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "@
|
|
|
|
|
+ #
|
|
|
|
|
+ lhu\t%0,%1"
|
|
|
|
|
+ "&& reload_completed && REG_P (operands[1])"
|
|
|
|
|
+ [(set (match_dup 0)
|
|
|
|
|
+ (ashift:GPR (match_dup 1) (match_dup 2)))
|
|
|
|
|
+ (set (match_dup 0)
|
|
|
|
|
+ (lshiftrt:GPR (match_dup 0) (match_dup 2)))]
|
|
|
|
|
+ {
|
|
|
|
|
+ operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
|
|
|
|
|
+ operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16);
|
|
|
|
|
+ }
|
|
|
|
|
+ [(set_attr "move_type" "shift_shift,load")
|
|
|
|
|
+ (set_attr "mode" "<GPR:MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "zero_extendqi<SUPERQI:mode>2"
|
|
|
|
|
+ [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
|
|
|
|
|
+ (zero_extend:SUPERQI
|
|
|
|
|
+ (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "@
|
|
|
|
|
+ and\t%0,%1,0xff
|
|
|
|
|
+ lbu\t%0,%1"
|
|
|
|
|
+ [(set_attr "move_type" "andi,load")
|
|
|
|
|
+ (set_attr "mode" "<SUPERQI:MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; SIGN EXTENSION
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+;; Extension insns.
|
|
|
|
|
+;; Those for integer source operand are ordered widest source type first.
|
|
|
|
|
+
|
|
|
|
|
+;; When TARGET_64BIT, all SImode integer registers should already be in
|
|
|
|
|
+;; sign-extended form (see TRULY_NOOP_TRUNCATION and truncdisi2). We can
|
|
|
|
|
+;; therefore get rid of register->register instructions if we constrain
|
|
|
|
|
+;; the source to be in the same register as the destination.
|
|
|
|
|
+;;
|
|
|
|
|
+;; The register alternative has type "arith" so that the pre-reload
|
|
|
|
|
+;; scheduler will treat it as a move. This reflects what happens if
|
|
|
|
|
+;; the register alternative needs a reload.
|
|
|
|
|
+(define_insn_and_split "extendsidi2"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
|
|
|
|
|
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "@
|
|
|
|
|
+ #
|
|
|
|
|
+ lw\t%0,%1"
|
|
|
|
|
+ "&& reload_completed && register_operand (operands[1], VOIDmode)"
|
|
|
|
|
+ [(set (match_dup 0) (match_dup 1))]
|
|
|
|
|
+{
|
|
|
|
|
+ if (REGNO (operands[0]) == REGNO (operands[1]))
|
|
|
|
|
+ {
|
|
|
|
|
+ emit_note (NOTE_INSN_DELETED);
|
|
|
|
|
+ DONE;
|
|
|
|
|
+ }
|
|
|
|
|
+ operands[1] = gen_rtx_REG (DImode, REGNO (operands[1]));
|
|
|
|
|
+}
|
|
|
|
|
+ [(set_attr "move_type" "move,load")
|
|
|
|
|
+ (set_attr "mode" "DI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_and_split "extend<SHORT:mode><SUPERQI:mode>2"
|
|
|
|
|
+ [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
|
|
|
|
|
+ (sign_extend:SUPERQI
|
|
|
|
|
+ (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "@
|
|
|
|
|
+ #
|
|
|
|
|
+ l<SHORT:size>\t%0,%1"
|
|
|
|
|
+ "&& reload_completed && REG_P (operands[1])"
|
|
|
|
|
+ [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
|
|
|
|
|
+ (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
|
|
|
|
|
+{
|
|
|
|
|
+ operands[0] = gen_lowpart (SImode, operands[0]);
|
|
|
|
|
+ operands[1] = gen_lowpart (SImode, operands[1]);
|
|
|
|
|
+ operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
|
|
|
|
|
+ - GET_MODE_BITSIZE (<SHORT:MODE>mode));
|
|
|
|
|
+}
|
|
|
|
|
+ [(set_attr "move_type" "shift_shift,load")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "extendsfdf2"
|
|
|
|
|
+ [(set (match_operand:DF 0 "register_operand" "=f")
|
|
|
|
|
+ (float_extend:DF (match_operand:SF 1 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fcvt.d.s\t%0,%1"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "cnv_mode" "S2D")
|
|
|
|
|
+ (set_attr "mode" "DF")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; CONVERSIONS
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "fix_truncdfsi2"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (fix:SI (match_operand:DF 1 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fcvt.w.d %0,%1,rtz"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "DF")
|
|
|
|
|
+ (set_attr "cnv_mode" "D2I")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "fix_truncsfsi2"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (fix:SI (match_operand:SF 1 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fcvt.w.s %0,%1,rtz"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "SF")
|
|
|
|
|
+ (set_attr "cnv_mode" "S2I")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "fix_truncdfdi2"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (fix:DI (match_operand:DF 1 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
|
|
|
|
|
+ "fcvt.l.d %0,%1,rtz"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "DF")
|
|
|
|
|
+ (set_attr "cnv_mode" "D2I")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "fix_truncsfdi2"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (fix:DI (match_operand:SF 1 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
|
|
|
|
|
+ "fcvt.l.s %0,%1,rtz"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "SF")
|
|
|
|
|
+ (set_attr "cnv_mode" "S2I")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "floatsidf2"
|
|
|
|
|
+ [(set (match_operand:DF 0 "register_operand" "=f")
|
|
|
|
|
+ (float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fcvt.d.w\t%0,%z1"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "DF")
|
|
|
|
|
+ (set_attr "cnv_mode" "I2D")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "floatdidf2"
|
|
|
|
|
+ [(set (match_operand:DF 0 "register_operand" "=f")
|
|
|
|
|
+ (float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
|
|
|
|
|
+ "fcvt.d.l\t%0,%z1"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "DF")
|
|
|
|
|
+ (set_attr "cnv_mode" "I2D")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "floatsisf2"
|
|
|
|
|
+ [(set (match_operand:SF 0 "register_operand" "=f")
|
|
|
|
|
+ (float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fcvt.s.w\t%0,%z1"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "SF")
|
|
|
|
|
+ (set_attr "cnv_mode" "I2S")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "floatdisf2"
|
|
|
|
|
+ [(set (match_operand:SF 0 "register_operand" "=f")
|
|
|
|
|
+ (float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
|
|
|
|
|
+ "fcvt.s.l\t%0,%z1"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "SF")
|
|
|
|
|
+ (set_attr "cnv_mode" "I2S")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "floatunssidf2"
|
|
|
|
|
+ [(set (match_operand:DF 0 "register_operand" "=f")
|
|
|
|
|
+ (unsigned_float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fcvt.d.wu\t%0,%z1"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "DF")
|
|
|
|
|
+ (set_attr "cnv_mode" "I2D")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "floatunsdidf2"
|
|
|
|
|
+ [(set (match_operand:DF 0 "register_operand" "=f")
|
|
|
|
|
+ (unsigned_float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
|
|
|
|
|
+ "fcvt.d.lu\t%0,%z1"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "DF")
|
|
|
|
|
+ (set_attr "cnv_mode" "I2D")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "floatunssisf2"
|
|
|
|
|
+ [(set (match_operand:SF 0 "register_operand" "=f")
|
|
|
|
|
+ (unsigned_float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fcvt.s.wu\t%0,%z1"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "SF")
|
|
|
|
|
+ (set_attr "cnv_mode" "I2S")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "floatunsdisf2"
|
|
|
|
|
+ [(set (match_operand:SF 0 "register_operand" "=f")
|
|
|
|
|
+ (unsigned_float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
|
|
|
|
|
+ "fcvt.s.lu\t%0,%z1"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "SF")
|
|
|
|
|
+ (set_attr "cnv_mode" "I2S")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "fixuns_truncdfsi2"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (unsigned_fix:SI (match_operand:DF 1 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fcvt.wu.d %0,%1,rtz"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "DF")
|
|
|
|
|
+ (set_attr "cnv_mode" "D2I")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "fixuns_truncsfsi2"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (unsigned_fix:SI (match_operand:SF 1 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "fcvt.wu.s %0,%1,rtz"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "SF")
|
|
|
|
|
+ (set_attr "cnv_mode" "S2I")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "fixuns_truncdfdi2"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (unsigned_fix:DI (match_operand:DF 1 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
|
|
|
|
|
+ "fcvt.lu.d %0,%1,rtz"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "DF")
|
|
|
|
|
+ (set_attr "cnv_mode" "D2I")])
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "fixuns_truncsfdi2"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (unsigned_fix:DI (match_operand:SF 1 "register_operand" "f")))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT && TARGET_64BIT"
|
|
|
|
|
+ "fcvt.lu.s %0,%1,rtz"
|
|
|
|
|
+ [(set_attr "type" "fcvt")
|
|
|
|
|
+ (set_attr "mode" "SF")
|
|
|
|
|
+ (set_attr "cnv_mode" "S2I")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; DATA MOVEMENT
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+;; Lower-level instructions for loading an address from the GOT.
|
|
|
|
|
+;; We could use MEMs, but an unspec gives more optimization
|
|
|
|
|
+;; opportunities.
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "got_load<mode>"
|
|
|
|
|
+ [(set (match_operand:P 0 "register_operand" "=r")
|
|
|
|
|
+ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
|
|
|
|
|
+ UNSPEC_LOAD_GOT))]
|
|
|
|
|
+ "flag_pic"
|
|
|
|
|
+ "la\t%0,%1"
|
|
|
|
|
+ [(set_attr "got" "load")
|
|
|
|
|
+ (set_attr "mode" "<MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "tls_add_tp_le<mode>"
|
|
|
|
|
+ [(set (match_operand:P 0 "register_operand" "=r")
|
|
|
|
|
+ (unspec:P [(match_operand:P 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:P 2 "register_operand" "r")
|
|
|
|
|
+ (match_operand:P 3 "symbolic_operand" "")]
|
|
|
|
|
+ UNSPEC_TLS_LE))]
|
|
|
|
|
+ "!flag_pic || flag_pie"
|
|
|
|
|
+ "add\t%0,%1,%2,%%tprel_add(%3)"
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "<MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "got_load_tls_gd<mode>"
|
|
|
|
|
+ [(set (match_operand:P 0 "register_operand" "=r")
|
|
|
|
|
+ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
|
|
|
|
|
+ UNSPEC_TLS_GD))]
|
|
|
|
|
+ "flag_pic"
|
|
|
|
|
+ "la.tls.gd\t%0,%1"
|
|
|
|
|
+ [(set_attr "got" "load")
|
|
|
|
|
+ (set_attr "mode" "<MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "got_load_tls_ie<mode>"
|
|
|
|
|
+ [(set (match_operand:P 0 "register_operand" "=r")
|
|
|
|
|
+ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
|
|
|
|
|
+ UNSPEC_TLS_IE))]
|
|
|
|
|
+ "flag_pic"
|
|
|
|
|
+ "la.tls.ie\t%0,%1"
|
|
|
|
|
+ [(set_attr "got" "load")
|
|
|
|
|
+ (set_attr "mode" "<MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;; Instructions for adding the low 16 bits of an address to a register.
|
|
|
|
|
+;; Operand 2 is the address: riscv_print_operand works out which relocation
|
|
|
|
|
+;; should be applied.
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*low<mode>"
|
|
|
|
|
+ [(set (match_operand:P 0 "register_operand" "=r")
|
|
|
|
|
+ (lo_sum:P (match_operand:P 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:P 2 "immediate_operand" "")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "add\t%0,%1,%R2"
|
|
|
|
|
+ [(set_attr "alu_type" "add")
|
|
|
|
|
+ (set_attr "mode" "<MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;; Allow combine to split complex const_int load sequences, using operand 2
|
|
|
|
|
+;; to store the intermediate results. See move_operand for details.
|
|
|
|
|
+(define_split
|
|
|
|
|
+ [(set (match_operand:GPR 0 "register_operand")
|
|
|
|
|
+ (match_operand:GPR 1 "splittable_const_int_operand"))
|
|
|
|
|
+ (clobber (match_operand:GPR 2 "register_operand"))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ [(const_int 0)]
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]));
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+;; Likewise, for symbolic operands.
|
|
|
|
|
+(define_split
|
|
|
|
|
+ [(set (match_operand:P 0 "register_operand")
|
|
|
|
|
+ (match_operand:P 1))
|
|
|
|
|
+ (clobber (match_operand:P 2 "register_operand"))]
|
|
|
|
|
+ "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
|
|
|
|
|
+ [(set (match_dup 0) (match_dup 3))]
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_split_symbol (operands[2], operands[1],
|
|
|
|
|
+ MAX_MACHINE_MODE, &operands[3]);
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+;; 64-bit integer moves
|
|
|
|
|
+
|
|
|
|
|
+;; Unlike most other insns, the move insns can't be split with '
|
|
|
|
|
+;; different predicates, because register spilling and other parts of
|
|
|
|
|
+;; the compiler, have memoized the insn number already.
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "movdi"
|
|
|
|
|
+ [(set (match_operand:DI 0 "")
|
|
|
|
|
+ (match_operand:DI 1 ""))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ if (riscv_legitimize_move (DImode, operands[0], operands[1]))
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*movdi_32bit"
|
|
|
|
|
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
|
|
|
|
|
+ (match_operand:DI 1 "move_operand" "r,i,m,r,*J*r,*m,*f,*f"))]
|
|
|
|
|
+ "!TARGET_64BIT
|
|
|
|
|
+ && (register_operand (operands[0], DImode)
|
|
|
|
|
+ || reg_or_0_operand (operands[1], DImode))"
|
|
|
|
|
+ { return riscv_output_move (operands[0], operands[1]); }
|
|
|
|
|
+ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
|
|
|
|
|
+ (set_attr "mode" "DI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*movdi_64bit"
|
|
|
|
|
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
|
|
|
|
|
+ (match_operand:DI 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
|
|
|
|
|
+ "TARGET_64BIT
|
|
|
|
|
+ && (register_operand (operands[0], DImode)
|
|
|
|
|
+ || reg_or_0_operand (operands[1], DImode))"
|
|
|
|
|
+ { return riscv_output_move (operands[0], operands[1]); }
|
|
|
|
|
+ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
|
|
|
|
|
+ (set_attr "mode" "DI")])
|
|
|
|
|
+
|
|
|
|
|
+;; 32-bit Integer moves
|
|
|
|
|
+
|
|
|
|
|
+;; Unlike most other insns, the move insns can't be split with
|
|
|
|
|
+;; different predicates, because register spilling and other parts of
|
|
|
|
|
+;; the compiler, have memoized the insn number already.
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "mov<mode>"
|
|
|
|
|
+ [(set (match_operand:IMOVE32 0 "")
|
|
|
|
|
+ (match_operand:IMOVE32 1 ""))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*mov<mode>_internal"
|
|
|
|
|
+ [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
|
|
|
|
|
+ (match_operand:IMOVE32 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
|
|
|
|
|
+ "(register_operand (operands[0], <MODE>mode)
|
|
|
|
|
+ || reg_or_0_operand (operands[1], <MODE>mode))"
|
|
|
|
|
+ { return riscv_output_move (operands[0], operands[1]); }
|
|
|
|
|
+ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+;; 16-bit Integer moves
|
|
|
|
|
+
|
|
|
|
|
+;; Unlike most other insns, the move insns can't be split with
|
|
|
|
|
+;; different predicates, because register spilling and other parts of
|
|
|
|
|
+;; the compiler, have memoized the insn number already.
|
|
|
|
|
+;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "movhi"
|
|
|
|
|
+ [(set (match_operand:HI 0 "")
|
|
|
|
|
+ (match_operand:HI 1 ""))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ if (riscv_legitimize_move (HImode, operands[0], operands[1]))
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*movhi_internal"
|
|
|
|
|
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
|
|
|
|
|
+ (match_operand:HI 1 "move_operand" "r,T,m,rJ,*r*J,*f"))]
|
|
|
|
|
+ "(register_operand (operands[0], HImode)
|
|
|
|
|
+ || reg_or_0_operand (operands[1], HImode))"
|
|
|
|
|
+ { return riscv_output_move (operands[0], operands[1]); }
|
|
|
|
|
+ [(set_attr "move_type" "move,const,load,store,mtc,mfc")
|
|
|
|
|
+ (set_attr "mode" "HI")])
|
|
|
|
|
+
|
|
|
|
|
+;; HImode constant generation; see riscv_move_integer for details.
|
|
|
|
|
+;; si+si->hi without truncation is legal because of TRULY_NOOP_TRUNCATION.
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "add<mode>hi3"
|
|
|
|
|
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
|
|
|
|
|
+ (plus:HI (match_operand:HISI 1 "register_operand" "r,r")
|
|
|
|
|
+ (match_operand:HISI 2 "arith_operand" "r,Q")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
|
|
|
|
|
+ [(set_attr "type" "arith")
|
|
|
|
|
+ (set_attr "mode" "HI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "xor<mode>hi3"
|
|
|
|
|
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
|
|
|
|
|
+ (xor:HI (match_operand:HISI 1 "register_operand" "r,r")
|
|
|
|
|
+ (match_operand:HISI 2 "arith_operand" "r,Q")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "xor\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "logical")
|
|
|
|
|
+ (set_attr "mode" "HI")])
|
|
|
|
|
+
|
|
|
|
|
+;; 8-bit Integer moves
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "movqi"
|
|
|
|
|
+ [(set (match_operand:QI 0 "")
|
|
|
|
|
+ (match_operand:QI 1 ""))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ if (riscv_legitimize_move (QImode, operands[0], operands[1]))
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*movqi_internal"
|
|
|
|
|
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
|
|
|
|
|
+ (match_operand:QI 1 "move_operand" "r,I,m,rJ,*r*J,*f"))]
|
|
|
|
|
+ "(register_operand (operands[0], QImode)
|
|
|
|
|
+ || reg_or_0_operand (operands[1], QImode))"
|
|
|
|
|
+ { return riscv_output_move (operands[0], operands[1]); }
|
|
|
|
|
+ [(set_attr "move_type" "move,const,load,store,mtc,mfc")
|
|
|
|
|
+ (set_attr "mode" "QI")])
|
|
|
|
|
+
|
|
|
|
|
+;; 32-bit floating point moves
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "movsf"
|
|
|
|
|
+ [(set (match_operand:SF 0 "")
|
|
|
|
|
+ (match_operand:SF 1 ""))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*movsf_hardfloat"
|
|
|
|
|
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
|
|
|
|
|
+ (match_operand:SF 1 "move_operand" "f,G,m,f,G,*r,*f,*G*r,*m,*r"))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT
|
|
|
|
|
+ && (register_operand (operands[0], SFmode)
|
|
|
|
|
+ || reg_or_0_operand (operands[1], SFmode))"
|
|
|
|
|
+ { return riscv_output_move (operands[0], operands[1]); }
|
|
|
|
|
+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
|
|
|
|
|
+ (set_attr "mode" "SF")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*movsf_softfloat"
|
|
|
|
|
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
|
|
|
|
|
+ (match_operand:SF 1 "move_operand" "Gr,m,r"))]
|
|
|
|
|
+ "TARGET_SOFT_FLOAT
|
|
|
|
|
+ && (register_operand (operands[0], SFmode)
|
|
|
|
|
+ || reg_or_0_operand (operands[1], SFmode))"
|
|
|
|
|
+ { return riscv_output_move (operands[0], operands[1]); }
|
|
|
|
|
+ [(set_attr "move_type" "move,load,store")
|
|
|
|
|
+ (set_attr "mode" "SF")])
|
|
|
|
|
+
|
|
|
|
|
+;; 64-bit floating point moves
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "movdf"
|
|
|
|
|
+ [(set (match_operand:DF 0 "")
|
|
|
|
|
+ (match_operand:DF 1 ""))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+;; In RV32, we lack mtf.d/mff.d. Go through memory instead.
|
|
|
|
|
+;; (except for moving a constant 0 to an FPR. for that we use fcvt.d.w.)
|
|
|
|
|
+(define_insn "*movdf_hardfloat_rv32"
|
|
|
|
|
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*r,*r,*m")
|
|
|
|
|
+ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r*G,*m,*r"))]
|
|
|
|
|
+ "!TARGET_64BIT && TARGET_HARD_FLOAT
|
|
|
|
|
+ && (register_operand (operands[0], DFmode)
|
|
|
|
|
+ || reg_or_0_operand (operands[1], DFmode))"
|
|
|
|
|
+ { return riscv_output_move (operands[0], operands[1]); }
|
|
|
|
|
+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,move,load,store")
|
|
|
|
|
+ (set_attr "mode" "DF")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*movdf_hardfloat_rv64"
|
|
|
|
|
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
|
|
|
|
|
+ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r,*f,*r*G,*m,*r"))]
|
|
|
|
|
+ "TARGET_64BIT && TARGET_HARD_FLOAT
|
|
|
|
|
+ && (register_operand (operands[0], DFmode)
|
|
|
|
|
+ || reg_or_0_operand (operands[1], DFmode))"
|
|
|
|
|
+ { return riscv_output_move (operands[0], operands[1]); }
|
|
|
|
|
+ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
|
|
|
|
|
+ (set_attr "mode" "DF")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*movdf_softfloat"
|
|
|
|
|
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
|
|
|
|
|
+ (match_operand:DF 1 "move_operand" "rG,m,rG"))]
|
|
|
|
|
+ "TARGET_SOFT_FLOAT
|
|
|
|
|
+ && (register_operand (operands[0], DFmode)
|
|
|
|
|
+ || reg_or_0_operand (operands[1], DFmode))"
|
|
|
|
|
+ { return riscv_output_move (operands[0], operands[1]); }
|
|
|
|
|
+ [(set_attr "move_type" "move,load,store")
|
|
|
|
|
+ (set_attr "mode" "DF")])
|
|
|
|
|
+
|
|
|
|
|
+;; 128-bit integer moves
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "movti"
|
|
|
|
|
+ [(set (match_operand:TI 0)
|
|
|
|
|
+ (match_operand:TI 1))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+{
|
|
|
|
|
+ if (riscv_legitimize_move (TImode, operands[0], operands[1]))
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*movti"
|
|
|
|
|
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,r,r,m")
|
|
|
|
|
+ (match_operand:TI 1 "move_operand" "r,i,m,rJ"))]
|
|
|
|
|
+ "TARGET_64BIT
|
|
|
|
|
+ && (register_operand (operands[0], TImode)
|
|
|
|
|
+ || reg_or_0_operand (operands[1], TImode))"
|
|
|
|
|
+ "#"
|
|
|
|
|
+ [(set_attr "move_type" "move,const,load,store")
|
|
|
|
|
+ (set_attr "mode" "TI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_split
|
|
|
|
|
+ [(set (match_operand:MOVE64 0 "nonimmediate_operand")
|
|
|
|
|
+ (match_operand:MOVE64 1 "move_operand"))]
|
|
|
|
|
+ "reload_completed && !TARGET_64BIT
|
|
|
|
|
+ && riscv_split_64bit_move_p (operands[0], operands[1])"
|
|
|
|
|
+ [(const_int 0)]
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_split_doubleword_move (operands[0], operands[1]);
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_split
|
|
|
|
|
+ [(set (match_operand:MOVE128 0 "nonimmediate_operand")
|
|
|
|
|
+ (match_operand:MOVE128 1 "move_operand"))]
|
|
|
|
|
+ "TARGET_64BIT && reload_completed"
|
|
|
|
|
+ [(const_int 0)]
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_split_doubleword_move (operands[0], operands[1]);
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+;; 64-bit paired-single floating point moves
|
|
|
|
|
+
|
|
|
|
|
+;; Load the low word of operand 0 with operand 1.
|
|
|
|
|
+(define_insn "load_low<mode>"
|
|
|
|
|
+ [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
|
|
|
|
|
+ (unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "rJ,m")]
|
|
|
|
|
+ UNSPEC_LOAD_LOW))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+{
|
|
|
|
|
+ operands[0] = riscv_subword (operands[0], 0);
|
|
|
|
|
+ return riscv_output_move (operands[0], operands[1]);
|
|
|
|
|
+}
|
|
|
|
|
+ [(set_attr "move_type" "mtc,fpload")
|
|
|
|
|
+ (set_attr "mode" "<HALFMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;; Load the high word of operand 0 from operand 1, preserving the value
|
|
|
|
|
+;; in the low word.
|
|
|
|
|
+(define_insn "load_high<mode>"
|
|
|
|
|
+ [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
|
|
|
|
|
+ (unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "rJ,m")
|
|
|
|
|
+ (match_operand:SPLITF 2 "register_operand" "0,0")]
|
|
|
|
|
+ UNSPEC_LOAD_HIGH))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+{
|
|
|
|
|
+ operands[0] = riscv_subword (operands[0], 1);
|
|
|
|
|
+ return riscv_output_move (operands[0], operands[1]);
|
|
|
|
|
+}
|
|
|
|
|
+ [(set_attr "move_type" "mtc,fpload")
|
|
|
|
|
+ (set_attr "mode" "<HALFMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;; Store one word of operand 1 in operand 0. Operand 2 is 1 to store the
|
|
|
|
|
+;; high word and 0 to store the low word.
|
|
|
|
|
+(define_insn "store_word<mode>"
|
|
|
|
|
+ [(set (match_operand:<HALFMODE> 0 "nonimmediate_operand" "=r,m")
|
|
|
|
|
+ (unspec:<HALFMODE> [(match_operand:SPLITF 1 "register_operand" "f,f")
|
|
|
|
|
+ (match_operand 2 "const_int_operand")]
|
|
|
|
|
+ UNSPEC_STORE_WORD))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+{
|
|
|
|
|
+ operands[1] = riscv_subword (operands[1], INTVAL (operands[2]));
|
|
|
|
|
+ return riscv_output_move (operands[0], operands[1]);
|
|
|
|
|
+}
|
|
|
|
|
+ [(set_attr "move_type" "mfc,fpstore")
|
|
|
|
|
+ (set_attr "mode" "<HALFMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;; Expand in-line code to clear the instruction cache between operand[0] and
|
|
|
|
|
+;; operand[1].
|
|
|
|
|
+(define_expand "clear_cache"
|
|
|
|
|
+ [(match_operand 0 "pmode_register_operand")
|
|
|
|
|
+ (match_operand 1 "pmode_register_operand")]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "
|
|
|
|
|
+{
|
|
|
|
|
+ emit_insn(gen_fence_i());
|
|
|
|
|
+ DONE;
|
|
|
|
|
+}")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "fence"
|
|
|
|
|
+ [(unspec_volatile [(const_int 0)] UNSPEC_FENCE)]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "%|fence%-")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "fence_i"
|
|
|
|
|
+ [(unspec_volatile [(const_int 0)] UNSPEC_FENCE_I)]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "fence.i")
|
|
|
|
|
+
|
|
|
|
|
+;; Block moves, see riscv.c for more details.
|
|
|
|
|
+;; Argument 0 is the destination
|
|
|
|
|
+;; Argument 1 is the source
|
|
|
|
|
+;; Argument 2 is the length
|
|
|
|
|
+;; Argument 3 is the alignment
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "movmemsi"
|
|
|
|
|
+ [(parallel [(set (match_operand:BLK 0 "general_operand")
|
|
|
|
|
+ (match_operand:BLK 1 "general_operand"))
|
|
|
|
|
+ (use (match_operand:SI 2 ""))
|
|
|
|
|
+ (use (match_operand:SI 3 "const_int_operand"))])]
|
|
|
|
|
+ "!TARGET_MEMCPY"
|
|
|
|
|
+{
|
|
|
|
|
+ if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
|
|
|
|
|
+ DONE;
|
|
|
|
|
+ else
|
|
|
|
|
+ FAIL;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; SHIFTS
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "<optab>si3"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (any_shift:SI (match_operand:SI 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:SI 2 "arith_operand" "rI")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ if (GET_CODE (operands[2]) == CONST_INT)
|
|
|
|
|
+ operands[2] = GEN_INT (INTVAL (operands[2])
|
|
|
|
|
+ & (GET_MODE_BITSIZE (SImode) - 1));
|
|
|
|
|
+
|
|
|
|
|
+ return TARGET_64BIT ? "<insn>w\t%0,%1,%2" : "<insn>\t%0,%1,%2";
|
|
|
|
|
+}
|
|
|
|
|
+ [(set_attr "type" "shift")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*<optab>disi3"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (any_shift:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
|
|
|
|
|
+ (truncate:SI (match_operand:DI 2 "arith_operand" "rI"))))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "<insn>w\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "shift")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*ashldi3_truncsi"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (truncate:SI
|
|
|
|
|
+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:DI 2 "const_arith_operand" "I"))))]
|
|
|
|
|
+ "TARGET_64BIT && INTVAL (operands[2]) < 32"
|
|
|
|
|
+ "sllw\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "shift")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*ashldisi3"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (ashift:SI (match_operand:GPR 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:GPR2 2 "arith_operand" "rI")))]
|
|
|
|
|
+ "TARGET_64BIT && (GET_CODE (operands[2]) == CONST_INT ? INTVAL (operands[2]) < 32 : 1)"
|
|
|
|
|
+ "sllw\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "shift")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "<optab>di3"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (any_shift:DI (match_operand:DI 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:DI 2 "arith_operand" "rI")))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+{
|
|
|
|
|
+ if (GET_CODE (operands[2]) == CONST_INT)
|
|
|
|
|
+ operands[2] = GEN_INT (INTVAL (operands[2])
|
|
|
|
|
+ & (GET_MODE_BITSIZE (DImode) - 1));
|
|
|
|
|
+
|
|
|
|
|
+ return "<insn>\t%0,%1,%2";
|
|
|
|
|
+}
|
|
|
|
|
+ [(set_attr "type" "shift")
|
|
|
|
|
+ (set_attr "mode" "DI")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "<optab>si3_extend"
|
|
|
|
|
+ [(set (match_operand:DI 0 "register_operand" "=r")
|
|
|
|
|
+ (sign_extend:DI
|
|
|
|
|
+ (any_shift:SI (match_operand:SI 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:SI 2 "arith_operand" "rI"))))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+{
|
|
|
|
|
+ if (GET_CODE (operands[2]) == CONST_INT)
|
|
|
|
|
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
|
|
|
|
|
+
|
|
|
|
|
+ return "<insn>w\t%0,%1,%2";
|
|
|
|
|
+}
|
|
|
|
|
+ [(set_attr "type" "shift")
|
|
|
|
|
+ (set_attr "mode" "SI")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; CONDITIONAL BRANCHES
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+;; Conditional branches
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*branch_order<mode>"
|
|
|
|
|
+ [(set (pc)
|
|
|
|
|
+ (if_then_else
|
|
|
|
|
+ (match_operator 1 "order_operator"
|
|
|
|
|
+ [(match_operand:GPR 2 "register_operand" "r")
|
|
|
|
|
+ (match_operand:GPR 3 "reg_or_0_operand" "rJ")])
|
|
|
|
|
+ (label_ref (match_operand 0 "" ""))
|
|
|
|
|
+ (pc)))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ if (GET_CODE (operands[3]) == CONST_INT)
|
|
|
|
|
+ return "b%C1z\t%2,%0";
|
|
|
|
|
+ return "b%C1\t%2,%3,%0";
|
|
|
|
|
+}
|
|
|
|
|
+ [(set_attr "type" "branch")
|
|
|
|
|
+ (set_attr "mode" "none")])
|
|
|
|
|
+
|
|
|
|
|
+;; Used to implement built-in functions.
|
|
|
|
|
+(define_expand "condjump"
|
|
|
|
|
+ [(set (pc)
|
|
|
|
|
+ (if_then_else (match_operand 0)
|
|
|
|
|
+ (label_ref (match_operand 1))
|
|
|
|
|
+ (pc)))])
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "cbranch<mode>4"
|
|
|
|
|
+ [(set (pc)
|
|
|
|
|
+ (if_then_else (match_operator 0 "comparison_operator"
|
|
|
|
|
+ [(match_operand:GPR 1 "register_operand")
|
|
|
|
|
+ (match_operand:GPR 2 "nonmemory_operand")])
|
|
|
|
|
+ (label_ref (match_operand 3 ""))
|
|
|
|
|
+ (pc)))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_expand_conditional_branch (operands);
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "cbranch<mode>4"
|
|
|
|
|
+ [(set (pc)
|
|
|
|
|
+ (if_then_else (match_operator 0 "comparison_operator"
|
|
|
|
|
+ [(match_operand:SCALARF 1 "register_operand")
|
|
|
|
|
+ (match_operand:SCALARF 2 "register_operand")])
|
|
|
|
|
+ (label_ref (match_operand 3 ""))
|
|
|
|
|
+ (pc)))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_expand_conditional_branch (operands);
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_and_split "*branch_on_bit<GPR:mode>"
|
|
|
|
|
+ [(set (pc)
|
|
|
|
|
+ (if_then_else
|
|
|
|
|
+ (match_operator 0 "equality_operator"
|
|
|
|
|
+ [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
|
|
|
|
|
+ (const_int 1)
|
|
|
|
|
+ (match_operand 3 "branch_on_bit_operand"))
|
|
|
|
|
+ (const_int 0)])
|
|
|
|
|
+ (label_ref (match_operand 1))
|
|
|
|
|
+ (pc)))
|
|
|
|
|
+ (clobber (match_scratch:GPR 4 "=&r"))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "#"
|
|
|
|
|
+ "reload_completed"
|
|
|
|
|
+ [(set (match_dup 4)
|
|
|
|
|
+ (ashift:GPR (match_dup 2) (match_dup 3)))
|
|
|
|
|
+ (set (pc)
|
|
|
|
|
+ (if_then_else
|
|
|
|
|
+ (match_op_dup 0 [(match_dup 4) (const_int 0)])
|
|
|
|
|
+ (label_ref (match_operand 1))
|
|
|
|
|
+ (pc)))]
|
|
|
|
|
+{
|
|
|
|
|
+ int shift = GET_MODE_BITSIZE (<MODE>mode) - 1 - INTVAL (operands[3]);
|
|
|
|
|
+ operands[3] = GEN_INT (shift);
|
|
|
|
|
+
|
|
|
|
|
+ if (GET_CODE (operands[0]) == EQ)
|
|
|
|
|
+ operands[0] = gen_rtx_GE (<MODE>mode, operands[4], const0_rtx);
|
|
|
|
|
+ else
|
|
|
|
|
+ operands[0] = gen_rtx_LT (<MODE>mode, operands[4], const0_rtx);
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn_and_split "*branch_on_bit_range<GPR:mode>"
|
|
|
|
|
+ [(set (pc)
|
|
|
|
|
+ (if_then_else
|
|
|
|
|
+ (match_operator 0 "equality_operator"
|
|
|
|
|
+ [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
|
|
|
|
|
+ (match_operand 3 "branch_on_bit_operand")
|
|
|
|
|
+ (const_int 0))
|
|
|
|
|
+ (const_int 0)])
|
|
|
|
|
+ (label_ref (match_operand 1))
|
|
|
|
|
+ (pc)))
|
|
|
|
|
+ (clobber (match_scratch:GPR 4 "=&r"))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "#"
|
|
|
|
|
+ "reload_completed"
|
|
|
|
|
+ [(set (match_dup 4)
|
|
|
|
|
+ (ashift:GPR (match_dup 2) (match_dup 3)))
|
|
|
|
|
+ (set (pc)
|
|
|
|
|
+ (if_then_else
|
|
|
|
|
+ (match_op_dup 0 [(match_dup 4) (const_int 0)])
|
|
|
|
|
+ (label_ref (match_operand 1))
|
|
|
|
|
+ (pc)))]
|
|
|
|
|
+{
|
|
|
|
|
+ operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - INTVAL (operands[3]));
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; SETTING A REGISTER FROM A COMPARISON
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+;; Destination is always set in SI mode.
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "cstore<mode>4"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand")
|
|
|
|
|
+ (match_operator:SI 1 "order_operator"
|
|
|
|
|
+ [(match_operand:GPR 2 "register_operand")
|
|
|
|
|
+ (match_operand:GPR 3 "nonmemory_operand")]))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_expand_scc (operands);
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "cstore<mode>4"
|
|
|
|
|
+ [(set (match_operand:SI 0 "register_operand" "=r")
|
|
|
|
|
+ (match_operator:SI 1 "fp_order_operator"
|
|
|
|
|
+ [(match_operand:SCALARF 2 "register_operand" "f")
|
|
|
|
|
+ (match_operand:SCALARF 3 "register_operand" "f")]))]
|
|
|
|
|
+ "TARGET_HARD_FLOAT"
|
|
|
|
|
+ "f%C1.<fmt>\t%0,%2,%3"
|
|
|
|
|
+ [(set_attr "type" "fcmp")
|
|
|
|
|
+ (set_attr "mode" "<UNITMODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*seq_zero_<GPR:mode><GPR2:mode>"
|
|
|
|
|
+ [(set (match_operand:GPR2 0 "register_operand" "=r")
|
|
|
|
|
+ (eq:GPR2 (match_operand:GPR 1 "register_operand" "r")
|
|
|
|
|
+ (const_int 0)))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "seqz\t%0,%1"
|
|
|
|
|
+ [(set_attr "type" "slt")
|
|
|
|
|
+ (set_attr "mode" "<GPR:MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*sne_zero_<GPR:mode><GPR2:mode>"
|
|
|
|
|
+ [(set (match_operand:GPR2 0 "register_operand" "=r")
|
|
|
|
|
+ (ne:GPR2 (match_operand:GPR 1 "register_operand" "r")
|
|
|
|
|
+ (const_int 0)))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "snez\t%0,%1"
|
|
|
|
|
+ [(set_attr "type" "slt")
|
|
|
|
|
+ (set_attr "mode" "<GPR:MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*sgt<u>_<GPR:mode><GPR2:mode>"
|
|
|
|
|
+ [(set (match_operand:GPR2 0 "register_operand" "=r")
|
|
|
|
|
+ (any_gt:GPR2 (match_operand:GPR 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:GPR 2 "reg_or_0_operand" "rJ")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "slt<u>\t%0,%z2,%1"
|
|
|
|
|
+ [(set_attr "type" "slt")
|
|
|
|
|
+ (set_attr "mode" "<GPR:MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*sge<u>_<GPR:mode><GPR2:mode>"
|
|
|
|
|
+ [(set (match_operand:GPR2 0 "register_operand" "=r")
|
|
|
|
|
+ (any_ge:GPR2 (match_operand:GPR 1 "register_operand" "r")
|
|
|
|
|
+ (const_int 1)))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "slt<u>\t%0,zero,%1"
|
|
|
|
|
+ [(set_attr "type" "slt")
|
|
|
|
|
+ (set_attr "mode" "<GPR:MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*slt<u>_<GPR:mode><GPR2:mode>"
|
|
|
|
|
+ [(set (match_operand:GPR2 0 "register_operand" "=r")
|
|
|
|
|
+ (any_lt:GPR2 (match_operand:GPR 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:GPR 2 "arith_operand" "rI")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "slt<u>\t%0,%1,%2"
|
|
|
|
|
+ [(set_attr "type" "slt")
|
|
|
|
|
+ (set_attr "mode" "<GPR:MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "*sle<u>_<GPR:mode><GPR2:mode>"
|
|
|
|
|
+ [(set (match_operand:GPR2 0 "register_operand" "=r")
|
|
|
|
|
+ (any_le:GPR2 (match_operand:GPR 1 "register_operand" "r")
|
|
|
|
|
+ (match_operand:GPR 2 "sle_operand" "")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
|
|
|
|
|
+ return "slt<u>\t%0,%1,%2";
|
|
|
|
|
+}
|
|
|
|
|
+ [(set_attr "type" "slt")
|
|
|
|
|
+ (set_attr "mode" "<GPR:MODE>")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; UNCONDITIONAL BRANCHES
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+;; Unconditional branches.
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "jump"
|
|
|
|
|
+ [(set (pc)
|
|
|
|
|
+ (label_ref (match_operand 0 "" "")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "j\t%l0"
|
|
|
|
|
+ [(set_attr "type" "jump")
|
|
|
|
|
+ (set_attr "mode" "none")])
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "indirect_jump"
|
|
|
|
|
+ [(set (pc) (match_operand 0 "register_operand"))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ operands[0] = force_reg (Pmode, operands[0]);
|
|
|
|
|
+ if (Pmode == SImode)
|
|
|
|
|
+ emit_jump_insn (gen_indirect_jumpsi (operands[0]));
|
|
|
|
|
+ else
|
|
|
|
|
+ emit_jump_insn (gen_indirect_jumpdi (operands[0]));
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "indirect_jump<mode>"
|
|
|
|
|
+ [(set (pc) (match_operand:P 0 "register_operand" "r"))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "jr\t%0"
|
|
|
|
|
+ [(set_attr "type" "jump")
|
|
|
|
|
+ (set_attr "mode" "none")])
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "tablejump"
|
|
|
|
|
+ [(set (pc) (match_operand 0 "register_operand" ""))
|
|
|
|
|
+ (use (label_ref (match_operand 1 "" "")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ if (CASE_VECTOR_PC_RELATIVE)
|
|
|
|
|
+ operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
|
|
|
|
|
+ gen_rtx_LABEL_REF (Pmode, operands[1]),
|
|
|
|
|
+ NULL_RTX, 0, OPTAB_DIRECT);
|
|
|
|
|
+
|
|
|
|
|
+ if (CASE_VECTOR_PC_RELATIVE && Pmode == DImode)
|
|
|
|
|
+ emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
|
|
|
|
|
+ else
|
|
|
|
|
+ emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "tablejump<mode>"
|
|
|
|
|
+ [(set (pc) (match_operand:GPR 0 "register_operand" "r"))
|
|
|
|
|
+ (use (label_ref (match_operand 1 "" "")))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "jr\t%0"
|
|
|
|
|
+ [(set_attr "type" "jump")
|
|
|
|
|
+ (set_attr "mode" "none")])
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; Function prologue/epilogue
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "prologue"
|
|
|
|
|
+ [(const_int 1)]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_expand_prologue ();
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+;; Block any insns from being moved before this point, since the
|
|
|
|
|
+;; profiling call to mcount can use various registers that aren't
|
|
|
|
|
+;; saved or used to pass arguments.
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "blockage"
|
|
|
|
|
+ [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)]
|
|
|
|
|
+ ""
|
|
|
|
|
+ ""
|
|
|
|
|
+ [(set_attr "type" "ghost")
|
|
|
|
|
+ (set_attr "mode" "none")])
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "epilogue"
|
|
|
|
|
+ [(const_int 2)]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_expand_epilogue (false);
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "sibcall_epilogue"
|
|
|
|
|
+ [(const_int 2)]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_expand_epilogue (true);
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+;; Trivial return. Make it look like a normal return insn as that
|
|
|
|
|
+;; allows jump optimizations to work better.
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "return"
|
|
|
|
|
+ [(simple_return)]
|
|
|
|
|
+ "riscv_can_use_return_insn ()"
|
|
|
|
|
+ "")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "simple_return"
|
|
|
|
|
+ [(simple_return)]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "ret"
|
|
|
|
|
+ [(set_attr "type" "jump")
|
|
|
|
|
+ (set_attr "mode" "none")])
|
|
|
|
|
+
|
|
|
|
|
+;; Normal return.
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "simple_return_internal"
|
|
|
|
|
+ [(simple_return)
|
|
|
|
|
+ (use (match_operand 0 "pmode_register_operand" ""))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "jr\t%0"
|
|
|
|
|
+ [(set_attr "type" "jump")
|
|
|
|
|
+ (set_attr "mode" "none")])
|
|
|
|
|
+
|
|
|
|
|
+;; This is used in compiling the unwind routines.
|
|
|
|
|
+(define_expand "eh_return"
|
|
|
|
|
+ [(use (match_operand 0 "general_operand"))]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ if (GET_MODE (operands[0]) != word_mode)
|
|
|
|
|
+ operands[0] = convert_to_mode (word_mode, operands[0], 0);
|
|
|
|
|
+ if (TARGET_64BIT)
|
|
|
|
|
+ emit_insn (gen_eh_set_lr_di (operands[0]));
|
|
|
|
|
+ else
|
|
|
|
|
+ emit_insn (gen_eh_set_lr_si (operands[0]));
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+;; Clobber the return address on the stack. We can't expand this
|
|
|
|
|
+;; until we know where it will be put in the stack frame.
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "eh_set_lr_si"
|
|
|
|
|
+ [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
|
|
|
|
|
+ (clobber (match_scratch:SI 1 "=&r"))]
|
|
|
|
|
+ "! TARGET_64BIT"
|
|
|
|
|
+ "#")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "eh_set_lr_di"
|
|
|
|
|
+ [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
|
|
|
|
|
+ (clobber (match_scratch:DI 1 "=&r"))]
|
|
|
|
|
+ "TARGET_64BIT"
|
|
|
|
|
+ "#")
|
|
|
|
|
+
|
|
|
|
|
+(define_split
|
|
|
|
|
+ [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
|
|
|
|
|
+ (clobber (match_scratch 1))]
|
|
|
|
|
+ "reload_completed"
|
|
|
|
|
+ [(const_int 0)]
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_set_return_address (operands[0], operands[1]);
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+;;
|
|
|
|
|
+;; FUNCTION CALLS
|
|
|
|
|
+;;
|
|
|
|
|
+;; ....................
|
|
|
|
|
+
|
|
|
|
|
+;; Sibling calls. All these patterns use jump instructions.
|
|
|
|
|
+
|
|
|
|
|
+;; call_insn_operand will only accept constant
|
|
|
|
|
+;; addresses if a direct jump is acceptable. Since the 'S' constraint
|
|
|
|
|
+;; is defined in terms of call_insn_operand, the same is true of the
|
|
|
|
|
+;; constraints.
|
|
|
|
|
+
|
|
|
|
|
+;; When we use an indirect jump, we need a register that will be
|
|
|
|
|
+;; preserved by the epilogue (constraint j).
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "sibcall"
|
|
|
|
|
+ [(parallel [(call (match_operand 0 "")
|
|
|
|
|
+ (match_operand 1 ""))
|
|
|
|
|
+ (use (match_operand 2 "")) ;; next_arg_reg
|
|
|
|
|
+ (use (match_operand 3 ""))])] ;; struct_value_size_rtx
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_expand_call (true, NULL_RTX, XEXP (operands[0], 0), operands[1]);
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "sibcall_internal"
|
|
|
|
|
+ [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S"))
|
|
|
|
|
+ (match_operand 1 "" ""))]
|
|
|
|
|
+ "SIBLING_CALL_P (insn)"
|
|
|
|
|
+ { return REG_P (operands[0]) ? "jr\t%0"
|
|
|
|
|
+ : absolute_symbolic_operand (operands[0], VOIDmode) ? "tail\t%0"
|
|
|
|
|
+ : "tail\t%0@"; }
|
|
|
|
|
+ [(set_attr "type" "call")])
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "sibcall_value"
|
|
|
|
|
+ [(parallel [(set (match_operand 0 "")
|
|
|
|
|
+ (call (match_operand 1 "")
|
|
|
|
|
+ (match_operand 2 "")))
|
|
|
|
|
+ (use (match_operand 3 ""))])] ;; next_arg_reg
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_expand_call (true, operands[0], XEXP (operands[1], 0), operands[2]);
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "sibcall_value_internal"
|
|
|
|
|
+ [(set (match_operand 0 "register_operand" "")
|
|
|
|
|
+ (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
|
|
|
|
|
+ (match_operand 2 "" "")))]
|
|
|
|
|
+ "SIBLING_CALL_P (insn)"
|
|
|
|
|
+ { return REG_P (operands[1]) ? "jr\t%1"
|
|
|
|
|
+ : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
|
|
|
|
|
+ : "tail\t%1@"; }
|
|
|
|
|
+ [(set_attr "type" "call")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "sibcall_value_multiple_internal"
|
|
|
|
|
+ [(set (match_operand 0 "register_operand" "")
|
|
|
|
|
+ (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
|
|
|
|
|
+ (match_operand 2 "" "")))
|
|
|
|
|
+ (set (match_operand 3 "register_operand" "")
|
|
|
|
|
+ (call (mem:SI (match_dup 1))
|
|
|
|
|
+ (match_dup 2)))]
|
|
|
|
|
+ "SIBLING_CALL_P (insn)"
|
|
|
|
|
+ { return REG_P (operands[1]) ? "jr\t%1"
|
|
|
|
|
+ : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
|
|
|
|
|
+ : "tail\t%1@"; }
|
|
|
|
|
+ [(set_attr "type" "call")])
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "call"
|
|
|
|
|
+ [(parallel [(call (match_operand 0 "")
|
|
|
|
|
+ (match_operand 1 ""))
|
|
|
|
|
+ (use (match_operand 2 "")) ;; next_arg_reg
|
|
|
|
|
+ (use (match_operand 3 ""))])] ;; struct_value_size_rtx
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_expand_call (false, NULL_RTX, XEXP (operands[0], 0), operands[1]);
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "call_internal"
|
|
|
|
|
+ [(call (mem:SI (match_operand 0 "call_insn_operand" "r,S"))
|
|
|
|
|
+ (match_operand 1 "" ""))
|
|
|
|
|
+ (clobber (reg:SI RETURN_ADDR_REGNUM))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ { return REG_P (operands[0]) ? "jalr\t%0"
|
|
|
|
|
+ : absolute_symbolic_operand (operands[0], VOIDmode) ? "call\t%0"
|
|
|
|
|
+ : "call\t%0@"; }
|
|
|
|
|
+ [(set_attr "jal" "indirect,direct")])
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "call_value"
|
|
|
|
|
+ [(parallel [(set (match_operand 0 "")
|
|
|
|
|
+ (call (match_operand 1 "")
|
|
|
|
|
+ (match_operand 2 "")))
|
|
|
|
|
+ (use (match_operand 3 ""))])] ;; next_arg_reg
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ riscv_expand_call (false, operands[0], XEXP (operands[1], 0), operands[2]);
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+;; See comment for call_internal.
|
|
|
|
|
+(define_insn "call_value_internal"
|
|
|
|
|
+ [(set (match_operand 0 "register_operand" "")
|
|
|
|
|
+ (call (mem:SI (match_operand 1 "call_insn_operand" "r,S"))
|
|
|
|
|
+ (match_operand 2 "" "")))
|
|
|
|
|
+ (clobber (reg:SI RETURN_ADDR_REGNUM))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ { return REG_P (operands[1]) ? "jalr\t%1"
|
|
|
|
|
+ : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
|
|
|
|
|
+ : "call\t%1@"; }
|
|
|
|
|
+ [(set_attr "jal" "indirect,direct")])
|
|
|
|
|
+
|
|
|
|
|
+;; See comment for call_internal.
|
|
|
|
|
+(define_insn "call_value_multiple_internal"
|
|
|
|
|
+ [(set (match_operand 0 "register_operand" "")
|
|
|
|
|
+ (call (mem:SI (match_operand 1 "call_insn_operand" "r,S"))
|
|
|
|
|
+ (match_operand 2 "" "")))
|
|
|
|
|
+ (set (match_operand 3 "register_operand" "")
|
|
|
|
|
+ (call (mem:SI (match_dup 1))
|
|
|
|
|
+ (match_dup 2)))
|
|
|
|
|
+ (clobber (reg:SI RETURN_ADDR_REGNUM))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ { return REG_P (operands[1]) ? "jalr\t%1"
|
|
|
|
|
+ : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
|
|
|
|
|
+ : "call\t%1@"; }
|
|
|
|
|
+ [(set_attr "jal" "indirect,direct")])
|
|
|
|
|
+
|
|
|
|
|
+;; Call subroutine returning any type.
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "untyped_call"
|
|
|
|
|
+ [(parallel [(call (match_operand 0 "")
|
|
|
|
|
+ (const_int 0))
|
|
|
|
|
+ (match_operand 1 "")
|
|
|
|
|
+ (match_operand 2 "")])]
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ int i;
|
|
|
|
|
+
|
|
|
|
|
+ emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
|
|
|
|
|
+
|
|
|
|
|
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx set = XVECEXP (operands[2], 0, i);
|
|
|
|
|
+ riscv_emit_move (SET_DEST (set), SET_SRC (set));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ emit_insn (gen_blockage ());
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "nop"
|
|
|
|
|
+ [(const_int 0)]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "nop"
|
|
|
|
|
+ [(set_attr "type" "nop")
|
|
|
|
|
+ (set_attr "mode" "none")])
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "trap"
|
|
|
|
|
+ [(trap_if (const_int 1) (const_int 0))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "sbreak")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "gpr_save"
|
|
|
|
|
+ [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPEC_GPR_SAVE)
|
|
|
|
|
+ (clobber (reg:SI T0_REGNUM))
|
|
|
|
|
+ (clobber (reg:SI T1_REGNUM))]
|
|
|
|
|
+ ""
|
|
|
|
|
+ { return riscv_output_gpr_save (INTVAL (operands[0])); })
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "gpr_restore"
|
|
|
|
|
+ [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPEC_GPR_RESTORE)]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "tail\t__riscv_restore_%0")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "gpr_restore_return"
|
|
|
|
|
+ [(return)
|
|
|
|
|
+ (use (match_operand 0 "pmode_register_operand" ""))
|
|
|
|
|
+ (const_int 0)]
|
|
|
|
|
+ ""
|
|
|
|
|
+ "")
|
|
|
|
|
+
|
|
|
|
|
+(include "sync.md")
|
|
|
|
|
+(include "peephole.md")
|
|
|
|
|
diff -urN empty/gcc/config/riscv/riscv.opt gcc-5.2.0/gcc/config/riscv/riscv.opt
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/riscv.opt 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/riscv.opt 2015-07-17 22:36:52.319705931 +0200
|
|
|
|
|
@@ -0,0 +1,87 @@
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+; Options for the MIPS port of the compiler
|
|
|
|
|
+;
|
|
|
|
|
+; Copyright (C) 2005, 2007, 2008, 2010, 2011 Free Software Foundation, Inc.
|
|
|
|
|
+;
|
|
|
|
|
+; This file is part of GCC.
|
|
|
|
|
+;
|
|
|
|
|
+; GCC is free software; you can redistribute it and/or modify it under
|
|
|
|
|
+; the terms of the GNU General Public License as published by the Free
|
|
|
|
|
+; Software Foundation; either version 3, or (at your option) any later
|
|
|
|
|
+; version.
|
|
|
|
|
+;
|
|
|
|
|
+; GCC is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
|
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
|
|
|
|
|
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
|
|
|
|
|
+; License for more details.
|
|
|
|
|
+;
|
|
|
|
|
+; You should have received a copy of the GNU General Public License
|
|
|
|
|
+; along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+; <http://www.gnu.org/licenses/>.
|
|
|
|
|
+
|
|
|
|
|
+m32
|
|
|
|
|
+Target RejectNegative Mask(32BIT)
|
|
|
|
|
+Generate RV32 code
|
|
|
|
|
+
|
|
|
|
|
+m64
|
|
|
|
|
+Target RejectNegative InverseMask(32BIT, 64BIT)
|
|
|
|
|
+Generate RV64 code
|
|
|
|
|
+
|
|
|
|
|
+mbranch-cost=
|
|
|
|
|
+Target RejectNegative Joined UInteger Var(riscv_branch_cost)
|
|
|
|
|
+-mbranch-cost=COST Set the cost of branches to roughly COST instructions
|
|
|
|
|
+
|
|
|
|
|
+mhard-float
|
|
|
|
|
+Target Report RejectNegative InverseMask(SOFT_FLOAT_ABI, HARD_FLOAT_ABI)
|
|
|
|
|
+Allow the use of hardware floating-point ABI and instructions
|
|
|
|
|
+
|
|
|
|
|
+mmemcpy
|
|
|
|
|
+Target Report Mask(MEMCPY)
|
|
|
|
|
+Don't optimize block moves
|
|
|
|
|
+
|
|
|
|
|
+mplt
|
|
|
|
|
+Target Report Var(TARGET_PLT) Init(1)
|
|
|
|
|
+When generating -fpic code, allow the use of PLTs. Ignored for fno-pic.
|
|
|
|
|
+
|
|
|
|
|
+msoft-float
|
|
|
|
|
+Target Report RejectNegative Mask(SOFT_FLOAT_ABI)
|
|
|
|
|
+Prevent the use of all hardware floating-point instructions
|
|
|
|
|
+
|
|
|
|
|
+mfdiv
|
|
|
|
|
+Target Report RejectNegative Mask(FDIV)
|
|
|
|
|
+Use hardware floating-point divide and square root instructions
|
|
|
|
|
+
|
|
|
|
|
+march=
|
|
|
|
|
+Target RejectNegative Joined Var(riscv_arch_string)
|
|
|
|
|
+-march= Generate code for given RISC-V ISA (e.g. RV64IM)
|
|
|
|
|
+
|
|
|
|
|
+mtune=
|
|
|
|
|
+Target RejectNegative Joined Var(riscv_tune_string)
|
|
|
|
|
+-mtune=PROCESSOR Optimize the output for PROCESSOR
|
|
|
|
|
+
|
|
|
|
|
+msmall-data-limit=
|
|
|
|
|
+Target Joined Separate UInteger Var(g_switch_value) Init(8)
|
|
|
|
|
+-msmall-data-limit=<number> Put global and static data smaller than <number> bytes into a special section (on some targets)
|
|
|
|
|
+
|
|
|
|
|
+matomic
|
|
|
|
|
+Target Report Mask(ATOMIC)
|
|
|
|
|
+Use hardware atomic memory instructions.
|
|
|
|
|
+
|
|
|
|
|
+mmuldiv
|
|
|
|
|
+Target Report Mask(MULDIV)
|
|
|
|
|
+Use hardware instructions for integer multiplication and division.
|
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+mrvc
|
|
|
|
|
+Target Report Mask(RVC)
|
|
|
|
|
+Use compressed instruction encoding
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+msave-restore
|
|
|
|
|
+Target Report Mask(SAVE_RESTORE)
|
|
|
|
|
+Use smaller but slower prologue and epilogue code
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+mlra
|
|
|
|
|
+Target Report Var(riscv_lra_flag) Init(0) Save
|
|
|
|
|
+Use LRA instead of reload
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+mcmodel=
|
|
|
|
|
+Target RejectNegative Joined Var(riscv_cmodel_string)
|
|
|
|
|
+Use given RISC-V code model (medlow or medany)
|
|
|
|
|
diff -urN empty/gcc/config/riscv/sync.md gcc-5.2.0/gcc/config/riscv/sync.md
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/sync.md 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/sync.md 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,198 @@
|
|
|
|
|
+;; Machine description for RISC-V atomic operations.
|
|
|
|
|
+;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
|
|
|
|
|
+;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
|
|
|
|
|
+;; Based on MIPS target for GNU compiler.
|
|
|
|
|
+
|
|
|
|
|
+;; This file is part of GCC.
|
|
|
|
|
+
|
|
|
|
|
+;; GCC is free software; you can redistribute it and/or modify
|
|
|
|
|
+;; it under the terms of the GNU General Public License as published by
|
|
|
|
|
+;; the Free Software Foundation; either version 3, or (at your option)
|
|
|
|
|
+;; any later version.
|
|
|
|
|
+
|
|
|
|
|
+;; GCC is distributed in the hope that it will be useful,
|
|
|
|
|
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
+;; GNU General Public License for more details.
|
|
|
|
|
+
|
|
|
|
|
+;; You should have received a copy of the GNU General Public License
|
|
|
|
|
+;; along with GCC; see the file COPYING3. If not see
|
|
|
|
|
+;; <http://www.gnu.org/licenses/>.
|
|
|
|
|
+
|
|
|
|
|
+(define_c_enum "unspec" [
|
|
|
|
|
+ UNSPEC_COMPARE_AND_SWAP
|
|
|
|
|
+ UNSPEC_SYNC_OLD_OP
|
|
|
|
|
+ UNSPEC_SYNC_EXCHANGE
|
|
|
|
|
+ UNSPEC_ATOMIC_STORE
|
|
|
|
|
+ UNSPEC_MEMORY_BARRIER
|
|
|
|
|
+])
|
|
|
|
|
+
|
|
|
|
|
+(define_code_iterator any_atomic [plus ior xor and])
|
|
|
|
|
+(define_code_attr atomic_optab
|
|
|
|
|
+ [(plus "add") (ior "or") (xor "xor") (and "and")])
|
|
|
|
|
+
|
|
|
|
|
+;; Memory barriers.
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "mem_thread_fence"
|
|
|
|
|
+ [(match_operand:SI 0 "const_int_operand" "")] ;; model
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ if (INTVAL (operands[0]) != MEMMODEL_RELAXED)
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
|
|
|
|
|
+ MEM_VOLATILE_P (mem) = 1;
|
|
|
|
|
+ emit_insn (gen_mem_thread_fence_1 (mem, operands[0]));
|
|
|
|
|
+ }
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "mem_thread_fence_1"
|
|
|
|
|
+ [(set (match_operand:BLK 0 "" "")
|
|
|
|
|
+ (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
|
|
|
|
|
+ (match_operand:SI 1 "const_int_operand" "")] ;; model
|
|
|
|
|
+ ""
|
|
|
|
|
+{
|
|
|
|
|
+ switch (INTVAL (operands[1]))
|
|
|
|
|
+ {
|
|
|
|
|
+ case MEMMODEL_SEQ_CST:
|
|
|
|
|
+ case MEMMODEL_ACQ_REL:
|
|
|
|
|
+ return "fence rw,rw";
|
|
|
|
|
+ case MEMMODEL_ACQUIRE:
|
|
|
|
|
+ case MEMMODEL_CONSUME:
|
|
|
|
|
+ return "fence r,rw";
|
|
|
|
|
+ case MEMMODEL_RELEASE:
|
|
|
|
|
+ return "fence rw,w";
|
|
|
|
|
+ default:
|
|
|
|
|
+ gcc_unreachable();
|
|
|
|
|
+ }
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+;; Atomic memory operations.
|
|
|
|
|
+
|
|
|
|
|
+;; Implement atomic stores with amoswap. Fall back to fences for atomic loads.
|
|
|
|
|
+(define_insn "atomic_store<mode>"
|
|
|
|
|
+ [(set (match_operand:GPR 0 "memory_operand" "=A")
|
|
|
|
|
+ (unspec_volatile:GPR
|
|
|
|
|
+ [(match_operand:GPR 1 "reg_or_0_operand" "rJ")
|
|
|
|
|
+ (match_operand:SI 2 "const_int_operand")] ;; model
|
|
|
|
|
+ UNSPEC_ATOMIC_STORE))]
|
|
|
|
|
+ "TARGET_ATOMIC"
|
|
|
|
|
+ "amoswap.<amo>%A2 zero,%z1,%0")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "atomic_<atomic_optab><mode>"
|
|
|
|
|
+ [(set (match_operand:GPR 0 "memory_operand" "+A")
|
|
|
|
|
+ (unspec_volatile:GPR
|
|
|
|
|
+ [(any_atomic:GPR (match_dup 0)
|
|
|
|
|
+ (match_operand:GPR 1 "reg_or_0_operand" "rJ"))
|
|
|
|
|
+ (match_operand:SI 2 "const_int_operand")] ;; model
|
|
|
|
|
+ UNSPEC_SYNC_OLD_OP))]
|
|
|
|
|
+ "TARGET_ATOMIC"
|
|
|
|
|
+ "amo<insn>.<amo>%A2 zero,%z1,%0")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "atomic_fetch_<atomic_optab><mode>"
|
|
|
|
|
+ [(set (match_operand:GPR 0 "register_operand" "=&r")
|
|
|
|
|
+ (match_operand:GPR 1 "memory_operand" "+A"))
|
|
|
|
|
+ (set (match_dup 1)
|
|
|
|
|
+ (unspec_volatile:GPR
|
|
|
|
|
+ [(any_atomic:GPR (match_dup 1)
|
|
|
|
|
+ (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
|
|
|
|
|
+ (match_operand:SI 3 "const_int_operand")] ;; model
|
|
|
|
|
+ UNSPEC_SYNC_OLD_OP))]
|
|
|
|
|
+ "TARGET_ATOMIC"
|
|
|
|
|
+ "amo<insn>.<amo>%A3 %0,%z2,%1")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "atomic_exchange<mode>"
|
|
|
|
|
+ [(set (match_operand:GPR 0 "register_operand" "=&r")
|
|
|
|
|
+ (unspec_volatile:GPR
|
|
|
|
|
+ [(match_operand:GPR 1 "memory_operand" "+A")
|
|
|
|
|
+ (match_operand:SI 3 "const_int_operand")] ;; model
|
|
|
|
|
+ UNSPEC_SYNC_EXCHANGE))
|
|
|
|
|
+ (set (match_dup 1)
|
|
|
|
|
+ (match_operand:GPR 2 "register_operand" "0"))]
|
|
|
|
|
+ "TARGET_ATOMIC"
|
|
|
|
|
+ "amoswap.<amo>%A3 %0,%z2,%1")
|
|
|
|
|
+
|
|
|
|
|
+(define_insn "atomic_cas_value_strong<mode>"
|
|
|
|
|
+ [(set (match_operand:GPR 0 "register_operand" "=&r")
|
|
|
|
|
+ (match_operand:GPR 1 "memory_operand" "+A"))
|
|
|
|
|
+ (set (match_dup 1)
|
|
|
|
|
+ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ")
|
|
|
|
|
+ (match_operand:GPR 3 "reg_or_0_operand" "rJ")
|
|
|
|
|
+ (match_operand:SI 4 "const_int_operand") ;; mod_s
|
|
|
|
|
+ (match_operand:SI 5 "const_int_operand")] ;; mod_f
|
|
|
|
|
+ UNSPEC_COMPARE_AND_SWAP))
|
|
|
|
|
+ (clobber (match_scratch:GPR 6 "=&r"))]
|
|
|
|
|
+ "TARGET_ATOMIC"
|
|
|
|
|
+ "1: lr.<amo>%A5 %0,%1; bne %0,%z2,1f; sc.<amo>%A4 %6,%z3,%1; bnez %6,1b; 1:"
|
|
|
|
|
+ [(set (attr "length") (const_int 16))])
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "atomic_compare_and_swap<mode>"
|
|
|
|
|
+ [(match_operand:SI 0 "register_operand" "") ;; bool output
|
|
|
|
|
+ (match_operand:GPR 1 "register_operand" "") ;; val output
|
|
|
|
|
+ (match_operand:GPR 2 "memory_operand" "") ;; memory
|
|
|
|
|
+ (match_operand:GPR 3 "reg_or_0_operand" "") ;; expected value
|
|
|
|
|
+ (match_operand:GPR 4 "reg_or_0_operand" "") ;; desired value
|
|
|
|
|
+ (match_operand:SI 5 "const_int_operand" "") ;; is_weak
|
|
|
|
|
+ (match_operand:SI 6 "const_int_operand" "") ;; mod_s
|
|
|
|
|
+ (match_operand:SI 7 "const_int_operand" "")] ;; mod_f
|
|
|
|
|
+ "TARGET_ATOMIC"
|
|
|
|
|
+{
|
|
|
|
|
+ emit_insn (gen_atomic_cas_value_strong<mode> (operands[1], operands[2],
|
|
|
|
|
+ operands[3], operands[4],
|
|
|
|
|
+ operands[6], operands[7]));
|
|
|
|
|
+
|
|
|
|
|
+ rtx compare = operands[1];
|
|
|
|
|
+ if (operands[3] != const0_rtx)
|
|
|
|
|
+ {
|
|
|
|
|
+ rtx difference = gen_rtx_MINUS (<MODE>mode, operands[1], operands[3]);
|
|
|
|
|
+ compare = gen_reg_rtx (<MODE>mode);
|
|
|
|
|
+ emit_insn (gen_rtx_SET (VOIDmode, compare, difference));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ rtx eq = gen_rtx_EQ (<MODE>mode, compare, const0_rtx);
|
|
|
|
|
+ rtx result = gen_reg_rtx (<MODE>mode);
|
|
|
|
|
+ emit_insn (gen_rtx_SET (VOIDmode, result, eq));
|
|
|
|
|
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], gen_lowpart (SImode, result)));
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
|
|
|
|
+
|
|
|
|
|
+(define_expand "atomic_test_and_set"
|
|
|
|
|
+ [(match_operand:QI 0 "register_operand" "") ;; bool output
|
|
|
|
|
+ (match_operand:QI 1 "memory_operand" "+A") ;; memory
|
|
|
|
|
+ (match_operand:SI 2 "const_int_operand" "")] ;; model
|
|
|
|
|
+ "TARGET_ATOMIC"
|
|
|
|
|
+{
|
|
|
|
|
+ /* We have no QImode atomics, so use the address LSBs to form a mask,
|
|
|
|
|
+ then use an aligned SImode atomic. */
|
|
|
|
|
+ rtx result = operands[0];
|
|
|
|
|
+ rtx mem = operands[1];
|
|
|
|
|
+ rtx model = operands[2];
|
|
|
|
|
+ rtx addr = force_reg (Pmode, XEXP (mem, 0));
|
|
|
|
|
+
|
|
|
|
|
+ rtx aligned_addr = gen_reg_rtx (Pmode);
|
|
|
|
|
+ emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, GEN_INT (-4)));
|
|
|
|
|
+
|
|
|
|
|
+ rtx aligned_mem = change_address (mem, SImode, aligned_addr);
|
|
|
|
|
+ set_mem_alias_set (aligned_mem, 0);
|
|
|
|
|
+
|
|
|
|
|
+ rtx offset = gen_reg_rtx (SImode);
|
|
|
|
|
+ emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
|
|
|
|
|
+ GEN_INT (3)));
|
|
|
|
|
+
|
|
|
|
|
+ rtx tmp = gen_reg_rtx (SImode);
|
|
|
|
|
+ emit_move_insn (tmp, GEN_INT (1));
|
|
|
|
|
+
|
|
|
|
|
+ rtx shmt = gen_reg_rtx (SImode);
|
|
|
|
|
+ emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3)));
|
|
|
|
|
+
|
|
|
|
|
+ rtx word = gen_reg_rtx (SImode);
|
|
|
|
|
+ emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp, shmt));
|
|
|
|
|
+
|
|
|
|
|
+ tmp = gen_reg_rtx (SImode);
|
|
|
|
|
+ emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model));
|
|
|
|
|
+
|
|
|
|
|
+ emit_move_insn (gen_lowpart (SImode, result),
|
|
|
|
|
+ gen_rtx_LSHIFTRT (SImode, tmp,
|
|
|
|
|
+ gen_lowpart (SImode, shmt)));
|
|
|
|
|
+ DONE;
|
|
|
|
|
+})
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/gcc/config/riscv/t-elf gcc-5.2.0/gcc/config/riscv/t-elf
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/t-elf 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/t-elf 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,4 @@
|
|
|
|
|
+# Build the libraries for both hard and soft floating point
|
|
|
|
|
+
|
|
|
|
|
+MULTILIB_OPTIONS = msoft-float m64/m32 mno-atomic
|
|
|
|
|
+MULTILIB_DIRNAMES = soft-float 64 32 no-atomic
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/gcc/config/riscv/t-linux64 gcc-5.2.0/gcc/config/riscv/t-linux64
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/gcc/config/riscv/t-linux64 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/gcc/config/riscv/t-linux64 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,5 @@
|
|
|
|
|
+# Build the libraries for both hard and soft floating point
|
|
|
|
|
+
|
|
|
|
|
+MULTILIB_OPTIONS = m64/m32 msoft-float mno-atomic
|
|
|
|
|
+MULTILIB_DIRNAMES = 64 32 soft-float no-atomic
|
|
|
|
|
+MULTILIB_OSDIRNAMES = ../lib ../lib32
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/libgcc/config/riscv/crti.S gcc-5.2.0/libgcc/config/riscv/crti.S
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/libgcc/config/riscv/crti.S 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/libgcc/config/riscv/crti.S 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1 @@
|
|
|
|
|
+/* crti.S is empty because .init_array/.fini_array are used exclusively. */
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/libgcc/config/riscv/crtn.S gcc-5.2.0/libgcc/config/riscv/crtn.S
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/libgcc/config/riscv/crtn.S 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/libgcc/config/riscv/crtn.S 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1 @@
|
|
|
|
|
+/* crtn.S is empty because .init_array/.fini_array are used exclusively. */
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/libgcc/config/riscv/div.S gcc-5.2.0/libgcc/config/riscv/div.S
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/libgcc/config/riscv/div.S 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/libgcc/config/riscv/div.S 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,121 @@
|
|
|
|
|
+ .text
|
|
|
|
|
+ .align 2
|
|
|
|
|
+
|
|
|
|
|
+#ifndef __riscv64
|
|
|
|
|
+/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
|
|
|
|
|
+# define __udivdi3 __udivsi3
|
|
|
|
|
+# define __umoddi3 __umodsi3
|
|
|
|
|
+# define __divdi3 __divsi3
|
|
|
|
|
+# define __moddi3 __modsi3
|
|
|
|
|
+#else
|
|
|
|
|
+ .globl __udivsi3
|
|
|
|
|
+__udivsi3:
|
|
|
|
|
+ /* Compute __udivdi3(a0 << 32, a1 << 32); cast result to uint32_t. */
|
|
|
|
|
+ sll a0, a0, 32
|
|
|
|
|
+ sll a1, a1, 32
|
|
|
|
|
+ move t0, ra
|
|
|
|
|
+ jal __udivdi3
|
|
|
|
|
+ sext.w a0, a0
|
|
|
|
|
+ jr t0
|
|
|
|
|
+
|
|
|
|
|
+ .globl __umodsi3
|
|
|
|
|
+__umodsi3:
|
|
|
|
|
+ /* Compute __udivdi3((uint32_t)a0, (uint32_t)a1); cast a1 to uint32_t. */
|
|
|
|
|
+ sll a0, a0, 32
|
|
|
|
|
+ sll a1, a1, 32
|
|
|
|
|
+ srl a0, a0, 32
|
|
|
|
|
+ srl a1, a1, 32
|
|
|
|
|
+ move t0, ra
|
|
|
|
|
+ jal __udivdi3
|
|
|
|
|
+ sext.w a0, a1
|
|
|
|
|
+ jr t0
|
|
|
|
|
+
|
|
|
|
|
+ .globl __modsi3
|
|
|
|
|
+ __modsi3 = __moddi3
|
|
|
|
|
+
|
|
|
|
|
+ .globl __divsi3
|
|
|
|
|
+__divsi3:
|
|
|
|
|
+ /* Check for special case of INT_MIN/-1. Otherwise, fall into __divdi3. */
|
|
|
|
|
+ li t0, -1
|
|
|
|
|
+ beq a1, t0, .L20
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ .globl __divdi3
|
|
|
|
|
+__divdi3:
|
|
|
|
|
+ bltz a0, .L10
|
|
|
|
|
+ bltz a1, .L11
|
|
|
|
|
+ /* Since the quotient is positive, fall into __udivdi3. */
|
|
|
|
|
+
|
|
|
|
|
+ .globl __udivdi3
|
|
|
|
|
+__udivdi3:
|
|
|
|
|
+ mv a2, a1
|
|
|
|
|
+ mv a1, a0
|
|
|
|
|
+ li a0, -1
|
|
|
|
|
+ beqz a2, .L5
|
|
|
|
|
+ li a3, 1
|
|
|
|
|
+ bgeu a2, a1, .L2
|
|
|
|
|
+.L1:
|
|
|
|
|
+ blez a2, .L2
|
|
|
|
|
+ slli a2, a2, 1
|
|
|
|
|
+ slli a3, a3, 1
|
|
|
|
|
+ bgtu a1, a2, .L1
|
|
|
|
|
+.L2:
|
|
|
|
|
+ li a0, 0
|
|
|
|
|
+.L3:
|
|
|
|
|
+ bltu a1, a2, .L4
|
|
|
|
|
+ sub a1, a1, a2
|
|
|
|
|
+ or a0, a0, a3
|
|
|
|
|
+.L4:
|
|
|
|
|
+ srli a3, a3, 1
|
|
|
|
|
+ srli a2, a2, 1
|
|
|
|
|
+ bnez a3, .L3
|
|
|
|
|
+.L5:
|
|
|
|
|
+ ret
|
|
|
|
|
+
|
|
|
|
|
+ .globl __umoddi3
|
|
|
|
|
+__umoddi3:
|
|
|
|
|
+ /* Call __udivdi3(a0, a1), then return the remainder, which is in a1. */
|
|
|
|
|
+ move t0, ra
|
|
|
|
|
+ jal __udivdi3
|
|
|
|
|
+ move a0, a1
|
|
|
|
|
+ jr t0
|
|
|
|
|
+
|
|
|
|
|
+ /* Handle negative arguments to __divdi3. */
|
|
|
|
|
+.L10:
|
|
|
|
|
+ neg a0, a0
|
|
|
|
|
+ bgez a1, .L12 /* Compute __udivdi3(-a0, a1), then negate the result. */
|
|
|
|
|
+ neg a1, a1
|
|
|
|
|
+ j __divdi3 /* Compute __udivdi3(-a0, -a1). */
|
|
|
|
|
+.L11: /* Compute __udivdi3(a0, -a1), then negate the result. */
|
|
|
|
|
+ neg a1, a1
|
|
|
|
|
+.L12:
|
|
|
|
|
+ move t0, ra
|
|
|
|
|
+ jal __divdi3
|
|
|
|
|
+ neg a0, a0
|
|
|
|
|
+ jr t0
|
|
|
|
|
+
|
|
|
|
|
+ .globl __moddi3
|
|
|
|
|
+__moddi3:
|
|
|
|
|
+ move t0, ra
|
|
|
|
|
+ bltz a1, .L31
|
|
|
|
|
+ bltz a0, .L32
|
|
|
|
|
+.L30:
|
|
|
|
|
+ jal __udivdi3 /* The dividend is not negative. */
|
|
|
|
|
+ move a0, a1
|
|
|
|
|
+ jr t0
|
|
|
|
|
+.L31:
|
|
|
|
|
+ neg a1, a1
|
|
|
|
|
+ bgez a0, .L30
|
|
|
|
|
+.L32:
|
|
|
|
|
+ neg a0, a0
|
|
|
|
|
+ jal __udivdi3 /* The dividend is hella negative. */
|
|
|
|
|
+ neg a0, a1
|
|
|
|
|
+ jr t0
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __riscv64
|
|
|
|
|
+ /* continuation of __divsi3 */
|
|
|
|
|
+.L20:
|
|
|
|
|
+ sll t0, t0, 31
|
|
|
|
|
+ bne a0, t0, __divdi3
|
|
|
|
|
+ ret
|
|
|
|
|
+#endif
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/libgcc/config/riscv/mul.S gcc-5.2.0/libgcc/config/riscv/mul.S
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/libgcc/config/riscv/mul.S 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/libgcc/config/riscv/mul.S 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,21 @@
|
|
|
|
|
+ .text
|
|
|
|
|
+ .align 2
|
|
|
|
|
+
|
|
|
|
|
+#ifndef __riscv64
|
|
|
|
|
+/* Our RV64 64-bit routine is equivalent to our RV32 32-bit routine. */
|
|
|
|
|
+# define __muldi3 __mulsi3
|
|
|
|
|
+#endif
|
|
|
|
|
+
|
|
|
|
|
+ .globl __muldi3
|
|
|
|
|
+__muldi3:
|
|
|
|
|
+ mv a2, a0
|
|
|
|
|
+ li a0, 0
|
|
|
|
|
+.L1:
|
|
|
|
|
+ slli a3, a1, _RISCV_SZPTR-1
|
|
|
|
|
+ bgez a3, .L2
|
|
|
|
|
+ add a0, a0, a2
|
|
|
|
|
+.L2:
|
|
|
|
|
+ srli a1, a1, 1
|
|
|
|
|
+ slli a2, a2, 1
|
|
|
|
|
+ bnez a1, .L1
|
|
|
|
|
+ ret
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/libgcc/config/riscv/riscv-fp.c gcc-5.2.0/libgcc/config/riscv/riscv-fp.c
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/libgcc/config/riscv/riscv-fp.c 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/libgcc/config/riscv/riscv-fp.c 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,178 @@
|
|
|
|
|
+/* Functions needed for soft-float on riscv-linux. Based on
|
|
|
|
|
+ rs6000/ppc64-fp.c with TF types removed.
|
|
|
|
|
+
|
|
|
|
|
+ Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
|
|
|
|
|
+ 2000, 2001, 2002, 2003, 2004, 2006, 2009 Free Software Foundation,
|
|
|
|
|
+ Inc.
|
|
|
|
|
+
|
|
|
|
|
+This file is part of GCC.
|
|
|
|
|
+
|
|
|
|
|
+GCC is free software; you can redistribute it and/or modify it under
|
|
|
|
|
+the terms of the GNU General Public License as published by the Free
|
|
|
|
|
+Software Foundation; either version 3, or (at your option) any later
|
|
|
|
|
+version.
|
|
|
|
|
+
|
|
|
|
|
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
|
|
|
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
|
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
|
+for more details.
|
|
|
|
|
+
|
|
|
|
|
+Under Section 7 of GPL version 3, you are granted additional
|
|
|
|
|
+permissions described in the GCC Runtime Library Exception, version
|
|
|
|
|
+3.1, as published by the Free Software Foundation.
|
|
|
|
|
+
|
|
|
|
|
+You should have received a copy of the GNU General Public License and
|
|
|
|
|
+a copy of the GCC Runtime Library Exception along with this program;
|
|
|
|
|
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|
|
|
|
+<http://www.gnu.org/licenses/>. */
|
|
|
|
|
+
|
|
|
|
|
+#if defined(__riscv64)
|
|
|
|
|
+#include "fp-bit.h"
|
|
|
|
|
+
|
|
|
|
|
+extern DItype __fixdfdi (DFtype);
|
|
|
|
|
+extern DItype __fixsfdi (SFtype);
|
|
|
|
|
+extern USItype __fixunsdfsi (DFtype);
|
|
|
|
|
+extern USItype __fixunssfsi (SFtype);
|
|
|
|
|
+extern DFtype __floatdidf (DItype);
|
|
|
|
|
+extern DFtype __floatundidf (UDItype);
|
|
|
|
|
+extern SFtype __floatdisf (DItype);
|
|
|
|
|
+extern SFtype __floatundisf (UDItype);
|
|
|
|
|
+
|
|
|
|
|
+static DItype local_fixunssfdi (SFtype);
|
|
|
|
|
+static DItype local_fixunsdfdi (DFtype);
|
|
|
|
|
+
|
|
|
|
|
+DItype
|
|
|
|
|
+__fixdfdi (DFtype a)
|
|
|
|
|
+{
|
|
|
|
|
+ if (a < 0)
|
|
|
|
|
+ return - local_fixunsdfdi (-a);
|
|
|
|
|
+ return local_fixunsdfdi (a);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+DItype
|
|
|
|
|
+__fixsfdi (SFtype a)
|
|
|
|
|
+{
|
|
|
|
|
+ if (a < 0)
|
|
|
|
|
+ return - local_fixunssfdi (-a);
|
|
|
|
|
+ return local_fixunssfdi (a);
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+USItype
|
|
|
|
|
+__fixunsdfsi (DFtype a)
|
|
|
|
|
+{
|
|
|
|
|
+ if (a >= - (DFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
|
|
|
|
|
+ return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
|
|
|
|
|
+ - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1);
|
|
|
|
|
+ return (SItype) a;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+USItype
|
|
|
|
|
+__fixunssfsi (SFtype a)
|
|
|
|
|
+{
|
|
|
|
|
+ if (a >= - (SFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
|
|
|
|
|
+ return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
|
|
|
|
|
+ - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1);
|
|
|
|
|
+ return (SItype) a;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+DFtype
|
|
|
|
|
+__floatdidf (DItype u)
|
|
|
|
|
+{
|
|
|
|
|
+ DFtype d;
|
|
|
|
|
+
|
|
|
|
|
+ d = (SItype) (u >> (sizeof (SItype) * 8));
|
|
|
|
|
+ d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
|
|
|
|
|
+ d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
|
|
|
|
|
+
|
|
|
|
|
+ return d;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+DFtype
|
|
|
|
|
+__floatundidf (UDItype u)
|
|
|
|
|
+{
|
|
|
|
|
+ DFtype d;
|
|
|
|
|
+
|
|
|
|
|
+ d = (USItype) (u >> (sizeof (SItype) * 8));
|
|
|
|
|
+ d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
|
|
|
|
|
+ d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
|
|
|
|
|
+
|
|
|
|
|
+ return d;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+SFtype
|
|
|
|
|
+__floatdisf (DItype u)
|
|
|
|
|
+{
|
|
|
|
|
+ DFtype f;
|
|
|
|
|
+
|
|
|
|
|
+ if (53 < (sizeof (DItype) * 8)
|
|
|
|
|
+ && 53 > ((sizeof (DItype) * 8) - 53 + 24))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (! (- ((DItype) 1 << 53) < u
|
|
|
|
|
+ && u < ((DItype) 1 << 53)))
|
|
|
|
|
+ {
|
|
|
|
|
+ if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
|
|
|
|
|
+ {
|
|
|
|
|
+ u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
|
|
|
|
|
+ u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ f = (SItype) (u >> (sizeof (SItype) * 8));
|
|
|
|
|
+ f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
|
|
|
|
|
+ f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
|
|
|
|
|
+
|
|
|
|
|
+ return (SFtype) f;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+SFtype
|
|
|
|
|
+__floatundisf (UDItype u)
|
|
|
|
|
+{
|
|
|
|
|
+ DFtype f;
|
|
|
|
|
+
|
|
|
|
|
+ if (53 < (sizeof (DItype) * 8)
|
|
|
|
|
+ && 53 > ((sizeof (DItype) * 8) - 53 + 24))
|
|
|
|
|
+ {
|
|
|
|
|
+ if (u >= ((UDItype) 1 << 53))
|
|
|
|
|
+ {
|
|
|
|
|
+ if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
|
|
|
|
|
+ {
|
|
|
|
|
+ u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
|
|
|
|
|
+ u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ f = (USItype) (u >> (sizeof (SItype) * 8));
|
|
|
|
|
+ f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
|
|
|
|
|
+ f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
|
|
|
|
|
+
|
|
|
|
|
+ return (SFtype) f;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* This version is needed to prevent recursion; fixunsdfdi in libgcc
|
|
|
|
|
+ calls fixdfdi, which in turn calls calls fixunsdfdi. */
|
|
|
|
|
+
|
|
|
|
|
+static DItype
|
|
|
|
|
+local_fixunsdfdi (DFtype a)
|
|
|
|
|
+{
|
|
|
|
|
+ USItype hi, lo;
|
|
|
|
|
+
|
|
|
|
|
+ hi = a / (((UDItype) 1) << (sizeof (SItype) * 8));
|
|
|
|
|
+ lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8)));
|
|
|
|
|
+ return ((UDItype) hi << (sizeof (SItype) * 8)) | lo;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+/* This version is needed to prevent recursion; fixunssfdi in libgcc
|
|
|
|
|
+ calls fixsfdi, which in turn calls calls fixunssfdi. */
|
|
|
|
|
+
|
|
|
|
|
+static DItype
|
|
|
|
|
+local_fixunssfdi (SFtype original_a)
|
|
|
|
|
+{
|
|
|
|
|
+ DFtype a = original_a;
|
|
|
|
|
+ USItype hi, lo;
|
|
|
|
|
+
|
|
|
|
|
+ hi = a / (((UDItype) 1) << (sizeof (SItype) * 8));
|
|
|
|
|
+ lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8)));
|
|
|
|
|
+ return ((UDItype) hi << (sizeof (SItype) * 8)) | lo;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+#endif
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/libgcc/config/riscv/save-restore.S gcc-5.2.0/libgcc/config/riscv/save-restore.S
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/libgcc/config/riscv/save-restore.S 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/libgcc/config/riscv/save-restore.S 2015-07-17 22:36:52.319705931 +0200
|
|
|
|
|
@@ -0,0 +1,220 @@
|
|
|
|
|
+ .text
|
|
|
|
|
+
|
|
|
|
|
+ .globl __riscv_save_12
|
|
|
|
|
+ .globl __riscv_save_11
|
|
|
|
|
+ .globl __riscv_save_10
|
|
|
|
|
+ .globl __riscv_save_9
|
|
|
|
|
+ .globl __riscv_save_8
|
|
|
|
|
+ .globl __riscv_save_7
|
|
|
|
|
+ .globl __riscv_save_6
|
|
|
|
|
+ .globl __riscv_save_5
|
|
|
|
|
+ .globl __riscv_save_4
|
|
|
|
|
+ .globl __riscv_save_3
|
|
|
|
|
+ .globl __riscv_save_2
|
|
|
|
|
+ .globl __riscv_save_1
|
|
|
|
|
+ .globl __riscv_save_0
|
|
|
|
|
+
|
|
|
|
|
+ .globl __riscv_restore_12
|
|
|
|
|
+ .globl __riscv_restore_11
|
|
|
|
|
+ .globl __riscv_restore_10
|
|
|
|
|
+ .globl __riscv_restore_9
|
|
|
|
|
+ .globl __riscv_restore_8
|
|
|
|
|
+ .globl __riscv_restore_7
|
|
|
|
|
+ .globl __riscv_restore_6
|
|
|
|
|
+ .globl __riscv_restore_5
|
|
|
|
|
+ .globl __riscv_restore_4
|
|
|
|
|
+ .globl __riscv_restore_3
|
|
|
|
|
+ .globl __riscv_restore_2
|
|
|
|
|
+ .globl __riscv_restore_1
|
|
|
|
|
+ .globl __riscv_restore_0
|
|
|
|
|
+
|
|
|
|
|
+#ifdef __riscv64
|
|
|
|
|
+
|
|
|
|
|
+__riscv_save_12:
|
|
|
|
|
+ addi sp, sp, -112
|
|
|
|
|
+ li t1, 0
|
|
|
|
|
+ sd s11, 8(sp)
|
|
|
|
|
+ j .Ls10
|
|
|
|
|
+
|
|
|
|
|
+__riscv_save_11:
|
|
|
|
|
+__riscv_save_10:
|
|
|
|
|
+ addi sp, sp, -112
|
|
|
|
|
+ li t1, -16
|
|
|
|
|
+.Ls10:
|
|
|
|
|
+ sd s10, 16(sp)
|
|
|
|
|
+ sd s9, 24(sp)
|
|
|
|
|
+ j .Ls8
|
|
|
|
|
+
|
|
|
|
|
+__riscv_save_9:
|
|
|
|
|
+__riscv_save_8:
|
|
|
|
|
+ addi sp, sp, -112
|
|
|
|
|
+ li t1, -32
|
|
|
|
|
+.Ls8:
|
|
|
|
|
+ sd s8, 32(sp)
|
|
|
|
|
+ sd s7, 40(sp)
|
|
|
|
|
+ j .Ls6
|
|
|
|
|
+
|
|
|
|
|
+__riscv_save_7:
|
|
|
|
|
+__riscv_save_6:
|
|
|
|
|
+ addi sp, sp, -112
|
|
|
|
|
+ li t1, -48
|
|
|
|
|
+.Ls6:
|
|
|
|
|
+ sd s6, 48(sp)
|
|
|
|
|
+ sd s5, 56(sp)
|
|
|
|
|
+ j .Ls4
|
|
|
|
|
+
|
|
|
|
|
+__riscv_save_5:
|
|
|
|
|
+__riscv_save_4:
|
|
|
|
|
+ addi sp, sp, -112
|
|
|
|
|
+ li t1, -64
|
|
|
|
|
+.Ls4:
|
|
|
|
|
+ sd s4, 64(sp)
|
|
|
|
|
+ sd s3, 72(sp)
|
|
|
|
|
+ j .Ls2
|
|
|
|
|
+
|
|
|
|
|
+__riscv_save_3:
|
|
|
|
|
+__riscv_save_2:
|
|
|
|
|
+ addi sp, sp, -112
|
|
|
|
|
+ li t1, -80
|
|
|
|
|
+.Ls2:
|
|
|
|
|
+ sd s2, 80(sp)
|
|
|
|
|
+ sd s1, 88(sp)
|
|
|
|
|
+ sd s0, 96(sp)
|
|
|
|
|
+ sd ra, 104(sp)
|
|
|
|
|
+ sub sp, sp, t1
|
|
|
|
|
+ jr t0
|
|
|
|
|
+
|
|
|
|
|
+__riscv_save_1:
|
|
|
|
|
+__riscv_save_0:
|
|
|
|
|
+ addi sp, sp, -16
|
|
|
|
|
+ sd s0, 0(sp)
|
|
|
|
|
+ sd ra, 8(sp)
|
|
|
|
|
+ jr t0
|
|
|
|
|
+
|
|
|
|
|
+__riscv_restore_12:
|
|
|
|
|
+ ld s11, 8(sp)
|
|
|
|
|
+ addi sp, sp, 16
|
|
|
|
|
+
|
|
|
|
|
+__riscv_restore_11:
|
|
|
|
|
+__riscv_restore_10:
|
|
|
|
|
+ ld s10, 0(sp)
|
|
|
|
|
+ ld s9, 8(sp)
|
|
|
|
|
+ addi sp, sp, 16
|
|
|
|
|
+
|
|
|
|
|
+__riscv_restore_9:
|
|
|
|
|
+__riscv_restore_8:
|
|
|
|
|
+ ld s8, 0(sp)
|
|
|
|
|
+ ld s7, 8(sp)
|
|
|
|
|
+ addi sp, sp, 16
|
|
|
|
|
+
|
|
|
|
|
+__riscv_restore_7:
|
|
|
|
|
+__riscv_restore_6:
|
|
|
|
|
+ ld s6, 0(sp)
|
|
|
|
|
+ ld s5, 8(sp)
|
|
|
|
|
+ addi sp, sp, 16
|
|
|
|
|
+
|
|
|
|
|
+__riscv_restore_5:
|
|
|
|
|
+__riscv_restore_4:
|
|
|
|
|
+ ld s4, 0(sp)
|
|
|
|
|
+ ld s3, 8(sp)
|
|
|
|
|
+ addi sp, sp, 16
|
|
|
|
|
+
|
|
|
|
|
+__riscv_restore_3:
|
|
|
|
|
+__riscv_restore_2:
|
|
|
|
|
+ ld s2, 0(sp)
|
|
|
|
|
+ ld s1, 8(sp)
|
|
|
|
|
+ addi sp, sp, 16
|
|
|
|
|
+
|
|
|
|
|
+__riscv_restore_1:
|
|
|
|
|
+__riscv_restore_0:
|
|
|
|
|
+ ld s0, 0(sp)
|
|
|
|
|
+ ld ra, 8(sp)
|
|
|
|
|
+ addi sp, sp, 16
|
|
|
|
|
+ ret
|
|
|
|
|
+
|
|
|
|
|
+#else
|
|
|
|
|
+
|
|
|
|
|
+__riscv_save_12:
|
|
|
|
|
+ addi sp, sp, -64
|
|
|
|
|
+ li t1, 0
|
|
|
|
|
+ sw s11, 12(sp)
|
|
|
|
|
+ j .Ls10
|
|
|
|
|
+
|
|
|
|
|
+__riscv_save_11:
|
|
|
|
|
+__riscv_save_10:
|
|
|
|
|
+__riscv_save_9:
|
|
|
|
|
+__riscv_save_8:
|
|
|
|
|
+ addi sp, sp, -64
|
|
|
|
|
+ li t1, -16
|
|
|
|
|
+.Ls10:
|
|
|
|
|
+ sw s10, 16(sp)
|
|
|
|
|
+ sw s9, 20(sp)
|
|
|
|
|
+ sw s8, 24(sp)
|
|
|
|
|
+ sw s7, 28(sp)
|
|
|
|
|
+ j .Ls6
|
|
|
|
|
+
|
|
|
|
|
+__riscv_save_7:
|
|
|
|
|
+__riscv_save_6:
|
|
|
|
|
+__riscv_save_5:
|
|
|
|
|
+__riscv_save_4:
|
|
|
|
|
+ addi sp, sp, -64
|
|
|
|
|
+ li t1, -32
|
|
|
|
|
+.Ls6:
|
|
|
|
|
+ sw s6, 32(sp)
|
|
|
|
|
+ sw s5, 36(sp)
|
|
|
|
|
+ sw s4, 40(sp)
|
|
|
|
|
+ sw s3, 44(sp)
|
|
|
|
|
+ sw s2, 48(sp)
|
|
|
|
|
+ sw s1, 52(sp)
|
|
|
|
|
+ sw s0, 56(sp)
|
|
|
|
|
+ sw ra, 60(sp)
|
|
|
|
|
+ sub sp, sp, t1
|
|
|
|
|
+ jr t0
|
|
|
|
|
+
|
|
|
|
|
+__riscv_save_3:
|
|
|
|
|
+__riscv_save_2:
|
|
|
|
|
+__riscv_save_1:
|
|
|
|
|
+__riscv_save_0:
|
|
|
|
|
+ addi sp, sp, -16
|
|
|
|
|
+ sw s2, 0(sp)
|
|
|
|
|
+ sw s1, 4(sp)
|
|
|
|
|
+ sw s0, 8(sp)
|
|
|
|
|
+ sw ra, 12(sp)
|
|
|
|
|
+ jr t0
|
|
|
|
|
+
|
|
|
|
|
+__riscv_restore_12:
|
|
|
|
|
+ lw s11, 12(sp)
|
|
|
|
|
+ addi sp, sp, 16
|
|
|
|
|
+
|
|
|
|
|
+__riscv_restore_11:
|
|
|
|
|
+__riscv_restore_10:
|
|
|
|
|
+__riscv_restore_9:
|
|
|
|
|
+__riscv_restore_8:
|
|
|
|
|
+ lw s10, 0(sp)
|
|
|
|
|
+ lw s9, 4(sp)
|
|
|
|
|
+ lw s8, 8(sp)
|
|
|
|
|
+ lw s7, 12(sp)
|
|
|
|
|
+ addi sp, sp, 16
|
|
|
|
|
+
|
|
|
|
|
+__riscv_restore_7:
|
|
|
|
|
+__riscv_restore_6:
|
|
|
|
|
+__riscv_restore_5:
|
|
|
|
|
+__riscv_restore_4:
|
|
|
|
|
+ lw s6, 0(sp)
|
|
|
|
|
+ lw s5, 4(sp)
|
|
|
|
|
+ lw s4, 8(sp)
|
|
|
|
|
+ lw s3, 12(sp)
|
|
|
|
|
+ addi sp, sp, 16
|
|
|
|
|
+
|
|
|
|
|
+__riscv_restore_3:
|
|
|
|
|
+__riscv_restore_2:
|
|
|
|
|
+__riscv_restore_1:
|
|
|
|
|
+__riscv_restore_0:
|
|
|
|
|
+ lw s2, 0(sp)
|
|
|
|
|
+ lw s1, 4(sp)
|
|
|
|
|
+ lw s0, 8(sp)
|
|
|
|
|
+ lw ra, 12(sp)
|
|
|
|
|
+ addi sp, sp, 16
|
|
|
|
|
+ ret
|
|
|
|
|
+
|
|
|
|
|
+#endif
|
|
|
|
|
diff -urN empty/libgcc/config/riscv/t-dpbit gcc-5.2.0/libgcc/config/riscv/t-dpbit
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/libgcc/config/riscv/t-dpbit 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/libgcc/config/riscv/t-dpbit 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,4 @@
|
|
|
|
|
+LIB2ADD += dp-bit.c
|
|
|
|
|
+
|
|
|
|
|
+dp-bit.c: $(srcdir)/fp-bit.c
|
|
|
|
|
+ cat $(srcdir)/fp-bit.c > dp-bit.c
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/libgcc/config/riscv/t-elf gcc-5.2.0/libgcc/config/riscv/t-elf
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/libgcc/config/riscv/t-elf 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/libgcc/config/riscv/t-elf 2015-07-17 22:36:52.319705931 +0200
|
|
|
|
|
@@ -0,0 +1,4 @@
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+LIB2ADD += $(srcdir)/config/riscv/riscv-fp.c \
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+ $(srcdir)/config/riscv/save-restore.S \
|
2015-03-07 10:57:25 +01:00
|
|
|
|
+ $(srcdir)/config/riscv/mul.S \
|
|
|
|
|
+ $(srcdir)/config/riscv/div.S
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/libgcc/config/riscv/t-elf32 gcc-5.2.0/libgcc/config/riscv/t-elf32
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/libgcc/config/riscv/t-elf32 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/libgcc/config/riscv/t-elf32 2015-07-17 22:36:52.319705931 +0200
|
|
|
|
|
@@ -0,0 +1,2 @@
|
|
|
|
|
+HOST_LIBGCC2_CFLAGS += -m32
|
|
|
|
|
+CRTSTUFF_CFLAGS += -m32
|
|
|
|
|
diff -urN empty/libgcc/config/riscv/t-fpbit gcc-5.2.0/libgcc/config/riscv/t-fpbit
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/libgcc/config/riscv/t-fpbit 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/libgcc/config/riscv/t-fpbit 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,5 @@
|
|
|
|
|
+LIB2ADD += fp-bit.c
|
|
|
|
|
+
|
|
|
|
|
+fp-bit.c: $(srcdir)/fp-bit.c
|
|
|
|
|
+ echo '#define FLOAT' > fp-bit.c
|
|
|
|
|
+ cat $(srcdir)/fp-bit.c >> fp-bit.c
|
2015-07-17 23:35:54 +02:00
|
|
|
|
diff -urN empty/libgcc/config/riscv/t-tpbit gcc-5.2.0/libgcc/config/riscv/t-tpbit
|
2015-08-23 21:07:46 +02:00
|
|
|
|
--- gcc-5.2.0/libgcc/config/riscv/t-tpbit 1970-01-01 01:00:00.000000000 +0100
|
2015-07-17 23:35:54 +02:00
|
|
|
|
+++ gcc-5.2.0/libgcc/config/riscv/t-tpbit 2015-07-17 22:36:52.319705931 +0200
|
2015-03-07 10:57:25 +01:00
|
|
|
|
@@ -0,0 +1,10 @@
|
|
|
|
|
+LIB2ADD += tp-bit.c
|
|
|
|
|
+
|
|
|
|
|
+tp-bit.c: $(srcdir)/fp-bit.c
|
|
|
|
|
+ echo '#ifdef _RISCVEL' > tp-bit.c
|
|
|
|
|
+ echo '# define FLOAT_BIT_ORDER_MISMATCH' >> tp-bit.c
|
|
|
|
|
+ echo '#endif' >> tp-bit.c
|
|
|
|
|
+ echo '#if __LDBL_MANT_DIG__ == 113' >> tp-bit.c
|
|
|
|
|
+ echo '# define TFLOAT' >> tp-bit.c
|
|
|
|
|
+ cat $(srcdir)/fp-bit.c >> tp-bit.c
|
|
|
|
|
+ echo '#endif' >> tp-bit.c
|