public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH] Add C intrinsics for scalar crypto extension
@ 2023-11-27  8:34 Liao Shihua
  2023-11-29  1:31 ` Jeff Law
  2023-11-29 15:03 ` Christoph Müllner
  0 siblings, 2 replies; 6+ messages in thread
From: Liao Shihua @ 2023-11-27  8:34 UTC (permalink / raw)
  To: gcc-patches
  Cc: christoph.muellner, kito.cheng, shiyulong, jiawei, chenyixuan,
	craig.topper, palmer, jeffreyalaw, Liao Shihua

This patch add C intrinsics for scalar crypto extension.
Because of riscv-c-api (https://github.com/riscv-non-isa/riscv-c-api-doc/pull/44/files) includes zbkb/zbkc/zbkx's
intrinsics in bit manipulation extension, this patch only support zkn*/zks*'s intrinsics.

gcc/ChangeLog:

        * config.gcc: Add riscv_crypto.h
        * config/riscv/riscv_crypto.h: New file.

gcc/testsuite/ChangeLog:

        * gcc.target/riscv/zknd32.c: Use intrinsics instead of builtins.
        * gcc.target/riscv/zknd64.c: Likewise.
        * gcc.target/riscv/zkne32.c: Likewise.
        * gcc.target/riscv/zkne64.c: Likewise.
        * gcc.target/riscv/zknh-sha256-32.c: Likewise.
        * gcc.target/riscv/zknh-sha256-64.c: Likewise.
        * gcc.target/riscv/zknh-sha512-32.c: Likewise.
        * gcc.target/riscv/zknh-sha512-64.c: Likewise.
        * gcc.target/riscv/zksed32.c: Likewise.
        * gcc.target/riscv/zksed64.c: Likewise.
        * gcc.target/riscv/zksh32.c: Likewise.
        * gcc.target/riscv/zksh64.c: Likewise.

---
 gcc/config.gcc                                |   2 +-
 gcc/config/riscv/riscv_crypto.h               | 219 ++++++++++++++++++
 gcc/testsuite/gcc.target/riscv/zknd32.c       |   6 +-
 gcc/testsuite/gcc.target/riscv/zknd64.c       |  12 +-
 gcc/testsuite/gcc.target/riscv/zkne32.c       |   6 +-
 gcc/testsuite/gcc.target/riscv/zkne64.c       |  10 +-
 .../gcc.target/riscv/zknh-sha256-32.c         |  22 +-
 .../gcc.target/riscv/zknh-sha256-64.c         |  10 +-
 .../gcc.target/riscv/zknh-sha512-32.c         |  14 +-
 .../gcc.target/riscv/zknh-sha512-64.c         |  10 +-
 gcc/testsuite/gcc.target/riscv/zksed32.c      |   6 +-
 gcc/testsuite/gcc.target/riscv/zksed64.c      |   6 +-
 gcc/testsuite/gcc.target/riscv/zksh32.c       |   6 +-
 gcc/testsuite/gcc.target/riscv/zksh64.c       |   6 +-
 14 files changed, 288 insertions(+), 47 deletions(-)
 create mode 100644 gcc/config/riscv/riscv_crypto.h

diff --git a/gcc/config.gcc b/gcc/config.gcc
index b88591b6fd8..d67fe8b6a6f 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -548,7 +548,7 @@ riscv*)
 	extra_objs="${extra_objs} riscv-vector-builtins.o riscv-vector-builtins-shapes.o riscv-vector-builtins-bases.o"
 	extra_objs="${extra_objs} thead.o riscv-target-attr.o"
 	d_target_objs="riscv-d.o"
-	extra_headers="riscv_vector.h"
+	extra_headers="riscv_vector.h riscv_crypto.h"
 	target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.cc"
 	target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.h"
 	;;
diff --git a/gcc/config/riscv/riscv_crypto.h b/gcc/config/riscv/riscv_crypto.h
new file mode 100644
index 00000000000..149c1132e10
--- /dev/null
+++ b/gcc/config/riscv/riscv_crypto.h
@@ -0,0 +1,219 @@
+/* RISC-V 'K' Extension intrinsics include file.
+   Copyright (C) 2023 Free Software Foundation, Inc.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published
+   by the Free Software Foundation; either version 3, or (at your
+   option) any later version.
+
+   GCC is distributed in the hope that it will be useful, but WITHOUT
+   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+   License for more details.
+
+   Under Section 7 of GPL version 3, you are granted additional
+   permissions described in the GCC Runtime Library Exception, version
+   3.1, as published by the Free Software Foundation.
+
+   You should have received a copy of the GNU General Public License and
+   a copy of the GCC Runtime Library Exception along with this program;
+   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef __RISCV_CRYPTO_H
+#define __RISCV_CRYPTO_H
+
+#include <stdint.h>
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if defined(__riscv_zknd)
+#if __riscv_xlen == 32
+#define __riscv_aes32dsi(x, y, bs) __builtin_riscv_aes32dsi(x, y, bs)
+#define __riscv_aes32dsmi(x, y, bs) __builtin_riscv_aes32dsmi(x, y, bs)
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_aes64ds (uint64_t __x, uint64_t __y)
+{
+  return __builtin_riscv_aes64ds (__x, __y);
+}
+
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_aes64dsm (uint64_t __x, uint64_t __y)
+{
+  return __builtin_riscv_aes64dsm (__x, __y);
+}
+
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_aes64im (uint64_t __x)
+{
+  return __builtin_riscv_aes64im (__x);
+}
+#endif
+#endif // defined (__riscv_zknd)
+
+#if defined(__riscv_zkne)
+#if __riscv_xlen == 32
+#define __riscv_aes32esi(x, y, bs) __builtin_riscv_aes32esi(x, y, bs)
+#define __riscv_aes32esmi(x, y, bs) __builtin_riscv_aes32esmi(x, y, bs)
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_aes64es (uint64_t __x, uint64_t __y)
+{
+  return __builtin_riscv_aes64es (__x, __y);
+}
+
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_aes64esm (uint64_t __x, uint64_t __y)
+{
+  return __builtin_riscv_aes64esm (__x, __y);
+}
+#endif
+#endif // defined (__riscv_zknd)
+
+#if defined(__riscv_zknd) || defined(__riscv_zkne)
+#if __riscv_xlen == 64
+#define __riscv_aes64ks1i(x, rnum) __builtin_riscv_aes64ks1i(x, rnum)
+
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_aes64ks2 (uint64_t __x, uint64_t __y)
+{
+  return __builtin_riscv_aes64ks2 (__x, __y);
+}
+#endif
+#endif // defined (__riscv_zknd) || defined (__riscv_zkne)
+
+#if defined(__riscv_zknh)
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha256sig0 (uint32_t __x)
+{
+  return __builtin_riscv_sha256sig0 (__x);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha256sig1 (uint32_t __x)
+{
+  return __builtin_riscv_sha256sig1 (__x);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha256sum0 (uint32_t __x)
+{
+  return __builtin_riscv_sha256sum0 (__x);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha256sum1 (uint32_t __x)
+{
+  return __builtin_riscv_sha256sum1 (__x);
+}
+
+#if __riscv_xlen == 32
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sig0h (uint32_t __x, uint32_t __y)
+{
+  return __builtin_riscv_sha512sig0h (__x, __y);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sig0l (uint32_t __x, uint32_t __y)
+{
+  return __builtin_riscv_sha512sig0l (__x, __y);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sig1h (uint32_t __x, uint32_t __y)
+{
+  return __builtin_riscv_sha512sig1h (__x, __y);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sig1l (uint32_t __x, uint32_t __y)
+{
+  return __builtin_riscv_sha512sig1l (__x, __y);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sum0l (uint32_t __x, uint32_t __y)
+{
+  return __builtin_riscv_sha512sum0l (__x, __y);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sum0r (uint32_t __x, uint32_t __y)
+{
+  return __builtin_riscv_sha512sum0r (__x, __y);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sum1l (uint32_t __x, uint32_t __y)
+{
+  return __builtin_riscv_sha512sum1l (__x, __y);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sum1r (uint32_t __x, uint32_t __y)
+{
+  return __builtin_riscv_sha512sum1r (__x, __y);
+}
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sig0 (uint64_t __x)
+{
+  return __builtin_riscv_sha512sig0 (__x);
+}
+
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sig1 (uint64_t __x)
+{
+  return __builtin_riscv_sha512sig1 (__x);
+}
+
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sum0 (uint64_t __x)
+{
+  return __builtin_riscv_sha512sum0 (__x);
+}
+
+static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sha512sum1 (uint64_t __x)
+{
+  return __builtin_riscv_sha512sum1 (__x);
+}
+#endif
+#endif // defined (__riscv_zknh)
+
+#if defined(__riscv_zksh)
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sm3p0 (uint32_t __x)
+{
+  return __builtin_riscv_sm3p0 (__x);
+}
+
+static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
+__riscv_sm3p1 (uint32_t __x)
+{
+  return __builtin_riscv_sm3p1 (__x);
+}
+#endif // defined (__riscv_zksh)
+
+#if defined(__riscv_zksed)
+#define __riscv_sm4ed(x, y, bs) __builtin_riscv_sm4ed(x, y, bs);
+#define __riscv_sm4ks(x, y, bs) __builtin_riscv_sm4ks(x, y, bs);
+#endif // defined (__riscv_zksh)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/riscv/zknd32.c b/gcc/testsuite/gcc.target/riscv/zknd32.c
index e60c027e091..62b730a700f 100644
--- a/gcc/testsuite/gcc.target/riscv/zknd32.c
+++ b/gcc/testsuite/gcc.target/riscv/zknd32.c
@@ -2,16 +2,16 @@
 /* { dg-options "-O2 -march=rv32gc_zknd -mabi=ilp32d" } */
 /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
 
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
 
 uint32_t foo1(uint32_t rs1, uint32_t rs2, int bs)
 {
-    return __builtin_riscv_aes32dsi(rs1,rs2,bs);
+    return __riscv_aes32dsi(rs1,rs2,bs);
 }
 
 uint32_t foo2(uint32_t rs1, uint32_t rs2, int bs)
 {
-    return __builtin_riscv_aes32dsmi(rs1,rs2,bs);
+    return __riscv_aes32dsmi(rs1,rs2,bs);
 }
 
 /* { dg-final { scan-assembler-times "aes32dsi" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zknd64.c b/gcc/testsuite/gcc.target/riscv/zknd64.c
index 707418cd51e..e5f2be72bae 100644
--- a/gcc/testsuite/gcc.target/riscv/zknd64.c
+++ b/gcc/testsuite/gcc.target/riscv/zknd64.c
@@ -2,31 +2,31 @@
 /* { dg-options "-O2 -march=rv64gc_zknd -mabi=lp64" } */
 /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
 
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
 
 uint64_t foo1(uint64_t rs1, uint64_t rs2)
 {
-    return __builtin_riscv_aes64ds(rs1,rs2);
+    return __riscv_aes64ds(rs1,rs2);
 }
 
 uint64_t foo2(uint64_t rs1, uint64_t rs2)
 {
-    return __builtin_riscv_aes64dsm(rs1,rs2);
+    return __riscv_aes64dsm(rs1,rs2);
 }
 
 uint64_t foo3(uint64_t rs1, unsigned rnum)
 {
-    return __builtin_riscv_aes64ks1i(rs1,rnum);
+    return __riscv_aes64ks1i(rs1,rnum);
 }
 
 uint64_t foo4(uint64_t rs1, uint64_t rs2)
 {
-    return __builtin_riscv_aes64ks2(rs1,rs2);
+    return __riscv_aes64ks2(rs1,rs2);
 }
 
 uint64_t foo5(uint64_t rs1)
 {
-    return __builtin_riscv_aes64im(rs1);
+    return __riscv_aes64im(rs1);
 }
 
 /* { dg-final { scan-assembler-times "aes64ds\t" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zkne32.c b/gcc/testsuite/gcc.target/riscv/zkne32.c
index 252e9ffa43b..c3a7205a48b 100644
--- a/gcc/testsuite/gcc.target/riscv/zkne32.c
+++ b/gcc/testsuite/gcc.target/riscv/zkne32.c
@@ -2,16 +2,16 @@
 /* { dg-options "-O2 -march=rv32gc_zkne -mabi=ilp32d" } */
 /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
 
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
 
 uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
 {
-    return __builtin_riscv_aes32esi(rs1, rs2, bs);
+    return __riscv_aes32esi(rs1, rs2, bs);
 }
 
 uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
 {
-    return __builtin_riscv_aes32esmi(rs1, rs2, bs);
+    return __riscv_aes32esmi(rs1, rs2, bs);
 }
 
 /* { dg-final { scan-assembler-times "aes32esi" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zkne64.c b/gcc/testsuite/gcc.target/riscv/zkne64.c
index b25f6b5c29a..e99b21a46dd 100644
--- a/gcc/testsuite/gcc.target/riscv/zkne64.c
+++ b/gcc/testsuite/gcc.target/riscv/zkne64.c
@@ -2,26 +2,26 @@
 /* { dg-options "-O2 -march=rv64gc_zkne -mabi=lp64" } */
 /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
 
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
 
 uint64_t foo1(uint64_t rs1, uint64_t rs2)
 {
-    return __builtin_riscv_aes64es(rs1,rs2);
+    return __riscv_aes64es(rs1,rs2);
 }
 
 uint64_t foo2(uint64_t rs1, uint64_t rs2)
 {
-    return __builtin_riscv_aes64esm(rs1,rs2);
+    return __riscv_aes64esm(rs1,rs2);
 }
 
 uint64_t foo3(uint64_t rs1, unsigned rnum)
 {
-    return __builtin_riscv_aes64ks1i(rs1,rnum);
+    return __riscv_aes64ks1i(rs1,rnum);
 }
 
 uint64_t foo4(uint64_t rs1, uint64_t rs2)
 {
-    return __builtin_riscv_aes64ks2(rs1,rs2);
+    return __riscv_aes64ks2(rs1,rs2);
 }
 
 /* { dg-final { scan-assembler-times "aes64es\t" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c b/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
index c51b143a8a5..96e967fba96 100644
--- a/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
+++ b/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
@@ -2,7 +2,27 @@
 /* { dg-options "-O2 -march=rv32gc_zknh -mabi=ilp32d" } */
 /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
 
-#include "zknh-sha256-64.c"
+#include "riscv_crypto.h"
+
+unsigned int foo1(unsigned int rs1)
+{
+    return __riscv_sha256sig0(rs1);
+}
+
+unsigned int foo2(unsigned int rs1)
+{
+    return __riscv_sha256sig1(rs1);
+}
+
+unsigned int foo3(unsigned int rs1)
+{
+    return __riscv_sha256sum0(rs1);
+}
+
+unsigned int foo4(unsigned int rs1)
+{
+    return __riscv_sha256sum1(rs1);
+}
 
 /* { dg-final { scan-assembler-times "sha256sig0" 1 } } */
 /* { dg-final { scan-assembler-times "sha256sig1" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c b/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
index 2ef37601e6f..172b84421e2 100644
--- a/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
+++ b/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
@@ -2,24 +2,26 @@
 /* { dg-options "-O2 -march=rv64gc_zknh -mabi=lp64" } */
 /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
 
+#include "riscv_crypto.h"
+
 unsigned int foo1(unsigned int rs1)
 {
-    return __builtin_riscv_sha256sig0(rs1);
+    return __riscv_sha256sig0(rs1);
 }
 
 unsigned int foo2(unsigned int rs1)
 {
-    return __builtin_riscv_sha256sig1(rs1);
+    return __riscv_sha256sig1(rs1);
 }
 
 unsigned int foo3(unsigned int rs1)
 {
-    return __builtin_riscv_sha256sum0(rs1);
+    return __riscv_sha256sum0(rs1);
 }
 
 unsigned int foo4(unsigned int rs1)
 {
-    return __builtin_riscv_sha256sum1(rs1);
+    return __riscv_sha256sum1(rs1);
 }
 
 /* { dg-final { scan-assembler-times "sha256sig0" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c b/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
index f2bcae36a1f..e6fb298d6a7 100644
--- a/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
+++ b/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
@@ -2,36 +2,36 @@
 /* { dg-options "-O2 -march=rv32gc_zknh -mabi=ilp32" } */
 /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
 
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
 
 uint32_t foo1(uint32_t rs1, uint32_t rs2)
 {
-    return __builtin_riscv_sha512sig0h(rs1,rs2);
+    return __riscv_sha512sig0h(rs1,rs2);
 }
 
 uint32_t foo2(uint32_t rs1, uint32_t rs2)
 {
-    return __builtin_riscv_sha512sig0l(rs1,rs2);
+    return __riscv_sha512sig0l(rs1,rs2);
 }
 
 uint32_t foo3(uint32_t rs1, uint32_t rs2)
 {
-    return __builtin_riscv_sha512sig1h(rs1,rs2);
+    return __riscv_sha512sig1h(rs1,rs2);
 }
 
 uint32_t foo4(uint32_t rs1, uint32_t rs2)
 {
-    return __builtin_riscv_sha512sig1l(rs1,rs2);
+    return __riscv_sha512sig1l(rs1,rs2);
 }
 
 uint32_t foo5(uint32_t rs1, uint32_t rs2)
 {
-    return __builtin_riscv_sha512sum0r(rs1,rs2);
+    return __riscv_sha512sum0r(rs1,rs2);
 }
 
 uint32_t foo6(uint32_t rs1, uint32_t rs2)
 {
-    return __builtin_riscv_sha512sum1r(rs1,rs2);
+    return __riscv_sha512sum1r(rs1,rs2);
 }
 
 /* { dg-final { scan-assembler-times "sha512sig0h" 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c b/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
index 4f248575e66..c65c2043d08 100644
--- a/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
+++ b/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
@@ -2,26 +2,26 @@
 /* { dg-options "-O2 -march=rv64gc_zknh -mabi=lp64" } */
 /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
 
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
 
 uint64_t foo1(uint64_t rs1)
 {
-    return __builtin_riscv_sha512sig0(rs1);
+    return __riscv_sha512sig0(rs1);
 }
 
 uint64_t foo2(uint64_t rs1)
 {
-    return __builtin_riscv_sha512sig1(rs1);
+    return __riscv_sha512sig1(rs1);
 }
 
 uint64_t foo3(uint64_t rs1)
 {
-    return __builtin_riscv_sha512sum0(rs1);
+    return __riscv_sha512sum0(rs1);
 }
 
 uint64_t foo4(uint64_t rs1)
 {
-    return __builtin_riscv_sha512sum1(rs1);
+    return __riscv_sha512sum1(rs1);
 }
 
 
diff --git a/gcc/testsuite/gcc.target/riscv/zksed32.c b/gcc/testsuite/gcc.target/riscv/zksed32.c
index 0e8f01cd548..d63e0775391 100644
--- a/gcc/testsuite/gcc.target/riscv/zksed32.c
+++ b/gcc/testsuite/gcc.target/riscv/zksed32.c
@@ -2,16 +2,16 @@
 /* { dg-options "-O2 -march=rv32gc_zksed -mabi=ilp32" } */
 /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
 
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
 
 uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
 {
-    return __builtin_riscv_sm4ks(rs1,rs2,bs);
+    return __riscv_sm4ks(rs1,rs2,bs);
 }
 
 uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
 {
-    return __builtin_riscv_sm4ed(rs1,rs2,bs);
+    return __riscv_sm4ed(rs1,rs2,bs);
 }
 
 
diff --git a/gcc/testsuite/gcc.target/riscv/zksed64.c b/gcc/testsuite/gcc.target/riscv/zksed64.c
index 9e4d1961419..426122cf6eb 100644
--- a/gcc/testsuite/gcc.target/riscv/zksed64.c
+++ b/gcc/testsuite/gcc.target/riscv/zksed64.c
@@ -2,16 +2,16 @@
 /* { dg-options "-O2 -march=rv64gc_zksed -mabi=lp64" } */
 /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
 
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
 
 uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
 {
-    return __builtin_riscv_sm4ks(rs1,rs2,bs);
+    return __riscv_sm4ks(rs1,rs2,bs);
 }
 
 uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
 {
-    return __builtin_riscv_sm4ed(rs1,rs2,bs);
+    return __riscv_sm4ed(rs1,rs2,bs);
 }
 
 
diff --git a/gcc/testsuite/gcc.target/riscv/zksh32.c b/gcc/testsuite/gcc.target/riscv/zksh32.c
index c182e557a85..3d0d154ad1d 100644
--- a/gcc/testsuite/gcc.target/riscv/zksh32.c
+++ b/gcc/testsuite/gcc.target/riscv/zksh32.c
@@ -2,16 +2,16 @@
 /* { dg-options "-O2 -march=rv32gc_zksh -mabi=ilp32" } */
 /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
 
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
 
 uint32_t foo1(uint32_t rs1)
 {
-    return __builtin_riscv_sm3p0(rs1);
+    return __riscv_sm3p0(rs1);
 }
 
 uint32_t foo2(uint32_t rs1)
 {
-    return __builtin_riscv_sm3p1(rs1);
+    return __riscv_sm3p1(rs1);
 }
 
 
diff --git a/gcc/testsuite/gcc.target/riscv/zksh64.c b/gcc/testsuite/gcc.target/riscv/zksh64.c
index d794b39f77a..1398c1329f0 100644
--- a/gcc/testsuite/gcc.target/riscv/zksh64.c
+++ b/gcc/testsuite/gcc.target/riscv/zksh64.c
@@ -2,16 +2,16 @@
 /* { dg-options "-O2 -march=rv64gc_zksh -mabi=lp64" } */
 /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
 
-#include <stdint-gcc.h>
+#include "riscv_crypto.h"
 
 uint32_t foo1(uint32_t rs1)
 {
-    return __builtin_riscv_sm3p0(rs1);
+    return __riscv_sm3p0(rs1);
 }
 
 uint32_t foo2(uint32_t rs1)
 {
-    return __builtin_riscv_sm3p1(rs1);
+    return __riscv_sm3p1(rs1);
 }
 
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] Add C intrinsics for scalar crypto extension
  2023-11-27  8:34 [PATCH] Add C intrinsics for scalar crypto extension Liao Shihua
@ 2023-11-29  1:31 ` Jeff Law
  2023-11-29 15:03 ` Christoph Müllner
  1 sibling, 0 replies; 6+ messages in thread
From: Jeff Law @ 2023-11-29  1:31 UTC (permalink / raw)
  To: Liao Shihua, gcc-patches
  Cc: christoph.muellner, kito.cheng, shiyulong, jiawei, chenyixuan,
	craig.topper, palmer



On 11/27/23 01:34, Liao Shihua wrote:
> This patch add C intrinsics for scalar crypto extension.
> Because of riscv-c-api (https://github.com/riscv-non-isa/riscv-c-api-doc/pull/44/files) includes zbkb/zbkc/zbkx's
> intrinsics in bit manipulation extension, this patch only support zkn*/zks*'s intrinsics.
> 
> gcc/ChangeLog:
> 
>          * config.gcc: Add riscv_crypto.h
>          * config/riscv/riscv_crypto.h: New file.
> 
> gcc/testsuite/ChangeLog:
> 
>          * gcc.target/riscv/zknd32.c: Use intrinsics instead of builtins.
>          * gcc.target/riscv/zknd64.c: Likewise.
>          * gcc.target/riscv/zkne32.c: Likewise.
>          * gcc.target/riscv/zkne64.c: Likewise.
>          * gcc.target/riscv/zknh-sha256-32.c: Likewise.
>          * gcc.target/riscv/zknh-sha256-64.c: Likewise.
>          * gcc.target/riscv/zknh-sha512-32.c: Likewise.
>          * gcc.target/riscv/zknh-sha512-64.c: Likewise.
>          * gcc.target/riscv/zksed32.c: Likewise.
>          * gcc.target/riscv/zksed64.c: Likewise.
>          * gcc.target/riscv/zksh32.c: Likewise.
>          * gcc.target/riscv/zksh64.c: Likewise.
Last cycle we let a ton of vector intrinsics through after stage1 
closed.  I'm not keen to repeat that, but this looks pretty small and 
appears to just provide a mapping from the RV intrinsics to the builtin 
names within GCC.


I won't object to this one if Kito or Palmer want to see it go forward. 
I might object if more of these things get submitted later in 
stage3/stage4 :-)


It would be useful if future patches included "RISC-V" in the subject 
line.  Our Tuesday patchwork meeting focuses on patches with that tag in 
the subject line.  By using it you ensure it gets on the weekly agenda.

Thanks,
Jeff

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] Add C intrinsics for scalar crypto extension
  2023-11-27  8:34 [PATCH] Add C intrinsics for scalar crypto extension Liao Shihua
  2023-11-29  1:31 ` Jeff Law
@ 2023-11-29 15:03 ` Christoph Müllner
  2023-11-29 16:49   ` Liao Shihua
  1 sibling, 1 reply; 6+ messages in thread
From: Christoph Müllner @ 2023-11-29 15:03 UTC (permalink / raw)
  To: Liao Shihua
  Cc: gcc-patches, kito.cheng, shiyulong, jiawei, chenyixuan,
	craig.topper, palmer, jeffreyalaw

On Mon, Nov 27, 2023 at 9:36 AM Liao Shihua <shihua@iscas.ac.cn> wrote:
>
> This patch add C intrinsics for scalar crypto extension.
> Because of riscv-c-api (https://github.com/riscv-non-isa/riscv-c-api-doc/pull/44/files) includes zbkb/zbkc/zbkx's
> intrinsics in bit manipulation extension, this patch only support zkn*/zks*'s intrinsics.

Thanks for working on this!
Looking forward to seeing the second patch (covering bitmanip) soon as well!
A couple of comments can be found below.

>
> gcc/ChangeLog:
>
>         * config.gcc: Add riscv_crypto.h
>         * config/riscv/riscv_crypto.h: New file.
>
> gcc/testsuite/ChangeLog:
>
>         * gcc.target/riscv/zknd32.c: Use intrinsics instead of builtins.
>         * gcc.target/riscv/zknd64.c: Likewise.
>         * gcc.target/riscv/zkne32.c: Likewise.
>         * gcc.target/riscv/zkne64.c: Likewise.
>         * gcc.target/riscv/zknh-sha256-32.c: Likewise.
>         * gcc.target/riscv/zknh-sha256-64.c: Likewise.
>         * gcc.target/riscv/zknh-sha512-32.c: Likewise.
>         * gcc.target/riscv/zknh-sha512-64.c: Likewise.
>         * gcc.target/riscv/zksed32.c: Likewise.
>         * gcc.target/riscv/zksed64.c: Likewise.
>         * gcc.target/riscv/zksh32.c: Likewise.
>         * gcc.target/riscv/zksh64.c: Likewise.
>
> ---
>  gcc/config.gcc                                |   2 +-
>  gcc/config/riscv/riscv_crypto.h               | 219 ++++++++++++++++++
>  gcc/testsuite/gcc.target/riscv/zknd32.c       |   6 +-
>  gcc/testsuite/gcc.target/riscv/zknd64.c       |  12 +-
>  gcc/testsuite/gcc.target/riscv/zkne32.c       |   6 +-
>  gcc/testsuite/gcc.target/riscv/zkne64.c       |  10 +-
>  .../gcc.target/riscv/zknh-sha256-32.c         |  22 +-
>  .../gcc.target/riscv/zknh-sha256-64.c         |  10 +-
>  .../gcc.target/riscv/zknh-sha512-32.c         |  14 +-
>  .../gcc.target/riscv/zknh-sha512-64.c         |  10 +-
>  gcc/testsuite/gcc.target/riscv/zksed32.c      |   6 +-
>  gcc/testsuite/gcc.target/riscv/zksed64.c      |   6 +-
>  gcc/testsuite/gcc.target/riscv/zksh32.c       |   6 +-
>  gcc/testsuite/gcc.target/riscv/zksh64.c       |   6 +-
>  14 files changed, 288 insertions(+), 47 deletions(-)
>  create mode 100644 gcc/config/riscv/riscv_crypto.h
>
> diff --git a/gcc/config.gcc b/gcc/config.gcc
> index b88591b6fd8..d67fe8b6a6f 100644
> --- a/gcc/config.gcc
> +++ b/gcc/config.gcc
> @@ -548,7 +548,7 @@ riscv*)
>         extra_objs="${extra_objs} riscv-vector-builtins.o riscv-vector-builtins-shapes.o riscv-vector-builtins-bases.o"
>         extra_objs="${extra_objs} thead.o riscv-target-attr.o"
>         d_target_objs="riscv-d.o"
> -       extra_headers="riscv_vector.h"
> +       extra_headers="riscv_vector.h riscv_crypto.h"
>         target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.cc"
>         target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.h"
>         ;;
> diff --git a/gcc/config/riscv/riscv_crypto.h b/gcc/config/riscv/riscv_crypto.h
> new file mode 100644
> index 00000000000..149c1132e10
> --- /dev/null
> +++ b/gcc/config/riscv/riscv_crypto.h
> @@ -0,0 +1,219 @@
> +/* RISC-V 'K' Extension intrinsics include file.
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +
> +   This file is part of GCC.
> +
> +   GCC is free software; you can redistribute it and/or modify it
> +   under the terms of the GNU General Public License as published
> +   by the Free Software Foundation; either version 3, or (at your
> +   option) any later version.
> +
> +   GCC is distributed in the hope that it will be useful, but WITHOUT
> +   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
> +   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
> +   License for more details.
> +
> +   Under Section 7 of GPL version 3, you are granted additional
> +   permissions described in the GCC Runtime Library Exception, version
> +   3.1, as published by the Free Software Foundation.
> +
> +   You should have received a copy of the GNU General Public License and
> +   a copy of the GCC Runtime Library Exception along with this program;
> +   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#ifndef __RISCV_CRYPTO_H
> +#define __RISCV_CRYPTO_H
> +
> +#include <stdint.h>
> +
> +#if defined (__cplusplus)
> +extern "C" {
> +#endif
> +
> +#if defined(__riscv_zknd)
> +#if __riscv_xlen == 32
> +#define __riscv_aes32dsi(x, y, bs) __builtin_riscv_aes32dsi(x, y, bs)
> +#define __riscv_aes32dsmi(x, y, bs) __builtin_riscv_aes32dsmi(x, y, bs)
> +#endif
> +
> +#if __riscv_xlen == 64
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_aes64ds (uint64_t __x, uint64_t __y)
> +{
> +  return __builtin_riscv_aes64ds (__x, __y);
> +}

I don't understand why some intrinsic functions are implemented as
macros to builtins
and some are implemented as static inline wrappers around butilins.
Is there a particular reason that this is mixed?

> +
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_aes64dsm (uint64_t __x, uint64_t __y)
> +{
> +  return __builtin_riscv_aes64dsm (__x, __y);
> +}
> +
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_aes64im (uint64_t __x)
> +{
> +  return __builtin_riscv_aes64im (__x);
> +}
> +#endif
> +#endif // defined (__riscv_zknd)
> +
> +#if defined(__riscv_zkne)
> +#if __riscv_xlen == 32
> +#define __riscv_aes32esi(x, y, bs) __builtin_riscv_aes32esi(x, y, bs)
> +#define __riscv_aes32esmi(x, y, bs) __builtin_riscv_aes32esmi(x, y, bs)
> +#endif
> +
> +#if __riscv_xlen == 64
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_aes64es (uint64_t __x, uint64_t __y)
> +{
> +  return __builtin_riscv_aes64es (__x, __y);
> +}
> +
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_aes64esm (uint64_t __x, uint64_t __y)
> +{
> +  return __builtin_riscv_aes64esm (__x, __y);
> +}
> +#endif
> +#endif // defined (__riscv_zknd)

Copy and paste mistake in the comment (should be "__riscv_zkne")

> +
> +#if defined(__riscv_zknd) || defined(__riscv_zkne)
> +#if __riscv_xlen == 64
> +#define __riscv_aes64ks1i(x, rnum) __builtin_riscv_aes64ks1i(x, rnum)
> +
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_aes64ks2 (uint64_t __x, uint64_t __y)
> +{
> +  return __builtin_riscv_aes64ks2 (__x, __y);
> +}
> +#endif
> +#endif // defined (__riscv_zknd) || defined (__riscv_zkne)
> +
> +#if defined(__riscv_zknh)
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha256sig0 (uint32_t __x)
> +{
> +  return __builtin_riscv_sha256sig0 (__x);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha256sig1 (uint32_t __x)
> +{
> +  return __builtin_riscv_sha256sig1 (__x);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha256sum0 (uint32_t __x)
> +{
> +  return __builtin_riscv_sha256sum0 (__x);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha256sum1 (uint32_t __x)
> +{
> +  return __builtin_riscv_sha256sum1 (__x);
> +}
> +
> +#if __riscv_xlen == 32
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sig0h (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sig0h (__x, __y);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sig0l (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sig0l (__x, __y);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sig1h (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sig1h (__x, __y);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sig1l (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sig1l (__x, __y);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sum0l (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sum0l (__x, __y);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sum0r (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sum0r (__x, __y);
> +}

Why sum0l and sum0r?
The specification says sum0h and sum0l.

> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sum1l (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sum1l (__x, __y);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sum1r (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sum1r (__x, __y);
> +}

Why sum1l and sum1r?
The specification says sum1h and sum1l.


> +#endif
> +
> +#if __riscv_xlen == 64
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sig0 (uint64_t __x)
> +{
> +  return __builtin_riscv_sha512sig0 (__x);
> +}
> +
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sig1 (uint64_t __x)
> +{
> +  return __builtin_riscv_sha512sig1 (__x);
> +}
> +
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sum0 (uint64_t __x)
> +{
> +  return __builtin_riscv_sha512sum0 (__x);
> +}
> +
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sum1 (uint64_t __x)
> +{
> +  return __builtin_riscv_sha512sum1 (__x);
> +}
> +#endif
> +#endif // defined (__riscv_zknh)
> +
> +#if defined(__riscv_zksh)
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sm3p0 (uint32_t __x)
> +{
> +  return __builtin_riscv_sm3p0 (__x);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sm3p1 (uint32_t __x)
> +{
> +  return __builtin_riscv_sm3p1 (__x);
> +}
> +#endif // defined (__riscv_zksh)
> +
> +#if defined(__riscv_zksed)
> +#define __riscv_sm4ed(x, y, bs) __builtin_riscv_sm4ed(x, y, bs);
> +#define __riscv_sm4ks(x, y, bs) __builtin_riscv_sm4ks(x, y, bs);
> +#endif // defined (__riscv_zksh)

Wrong comment (should be "__riscv_zksed").

> +
> +#if defined(__cplusplus)
> +}
> +#endif
> +
> +#endif
> \ No newline at end of file
> diff --git a/gcc/testsuite/gcc.target/riscv/zknd32.c b/gcc/testsuite/gcc.target/riscv/zknd32.c
> index e60c027e091..62b730a700f 100644
> --- a/gcc/testsuite/gcc.target/riscv/zknd32.c
> +++ b/gcc/testsuite/gcc.target/riscv/zknd32.c
> @@ -2,16 +2,16 @@
>  /* { dg-options "-O2 -march=rv32gc_zknd -mabi=ilp32d" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint32_t foo1(uint32_t rs1, uint32_t rs2, int bs)
>  {
> -    return __builtin_riscv_aes32dsi(rs1,rs2,bs);
> +    return __riscv_aes32dsi(rs1,rs2,bs);
>  }
>
>  uint32_t foo2(uint32_t rs1, uint32_t rs2, int bs)
>  {
> -    return __builtin_riscv_aes32dsmi(rs1,rs2,bs);
> +    return __riscv_aes32dsmi(rs1,rs2,bs);
>  }
>
>  /* { dg-final { scan-assembler-times "aes32dsi" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/zknd64.c b/gcc/testsuite/gcc.target/riscv/zknd64.c
> index 707418cd51e..e5f2be72bae 100644
> --- a/gcc/testsuite/gcc.target/riscv/zknd64.c
> +++ b/gcc/testsuite/gcc.target/riscv/zknd64.c
> @@ -2,31 +2,31 @@
>  /* { dg-options "-O2 -march=rv64gc_zknd -mabi=lp64" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint64_t foo1(uint64_t rs1, uint64_t rs2)
>  {
> -    return __builtin_riscv_aes64ds(rs1,rs2);
> +    return __riscv_aes64ds(rs1,rs2);
>  }
>
>  uint64_t foo2(uint64_t rs1, uint64_t rs2)
>  {
> -    return __builtin_riscv_aes64dsm(rs1,rs2);
> +    return __riscv_aes64dsm(rs1,rs2);
>  }
>
>  uint64_t foo3(uint64_t rs1, unsigned rnum)
>  {
> -    return __builtin_riscv_aes64ks1i(rs1,rnum);
> +    return __riscv_aes64ks1i(rs1,rnum);
>  }
>
>  uint64_t foo4(uint64_t rs1, uint64_t rs2)
>  {
> -    return __builtin_riscv_aes64ks2(rs1,rs2);
> +    return __riscv_aes64ks2(rs1,rs2);
>  }
>
>  uint64_t foo5(uint64_t rs1)
>  {
> -    return __builtin_riscv_aes64im(rs1);
> +    return __riscv_aes64im(rs1);
>  }
>
>  /* { dg-final { scan-assembler-times "aes64ds\t" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/zkne32.c b/gcc/testsuite/gcc.target/riscv/zkne32.c
> index 252e9ffa43b..c3a7205a48b 100644
> --- a/gcc/testsuite/gcc.target/riscv/zkne32.c
> +++ b/gcc/testsuite/gcc.target/riscv/zkne32.c
> @@ -2,16 +2,16 @@
>  /* { dg-options "-O2 -march=rv32gc_zkne -mabi=ilp32d" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
>  {
> -    return __builtin_riscv_aes32esi(rs1, rs2, bs);
> +    return __riscv_aes32esi(rs1, rs2, bs);
>  }
>
>  uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
>  {
> -    return __builtin_riscv_aes32esmi(rs1, rs2, bs);
> +    return __riscv_aes32esmi(rs1, rs2, bs);
>  }
>
>  /* { dg-final { scan-assembler-times "aes32esi" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/zkne64.c b/gcc/testsuite/gcc.target/riscv/zkne64.c
> index b25f6b5c29a..e99b21a46dd 100644
> --- a/gcc/testsuite/gcc.target/riscv/zkne64.c
> +++ b/gcc/testsuite/gcc.target/riscv/zkne64.c
> @@ -2,26 +2,26 @@
>  /* { dg-options "-O2 -march=rv64gc_zkne -mabi=lp64" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint64_t foo1(uint64_t rs1, uint64_t rs2)
>  {
> -    return __builtin_riscv_aes64es(rs1,rs2);
> +    return __riscv_aes64es(rs1,rs2);
>  }
>
>  uint64_t foo2(uint64_t rs1, uint64_t rs2)
>  {
> -    return __builtin_riscv_aes64esm(rs1,rs2);
> +    return __riscv_aes64esm(rs1,rs2);
>  }
>
>  uint64_t foo3(uint64_t rs1, unsigned rnum)
>  {
> -    return __builtin_riscv_aes64ks1i(rs1,rnum);
> +    return __riscv_aes64ks1i(rs1,rnum);
>  }
>
>  uint64_t foo4(uint64_t rs1, uint64_t rs2)
>  {
> -    return __builtin_riscv_aes64ks2(rs1,rs2);
> +    return __riscv_aes64ks2(rs1,rs2);
>  }
>
>  /* { dg-final { scan-assembler-times "aes64es\t" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c b/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
> index c51b143a8a5..96e967fba96 100644
> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
> @@ -2,7 +2,27 @@
>  /* { dg-options "-O2 -march=rv32gc_zknh -mabi=ilp32d" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include "zknh-sha256-64.c"
> +#include "riscv_crypto.h"
> +
> +unsigned int foo1(unsigned int rs1)
> +{
> +    return __riscv_sha256sig0(rs1);
> +}
> +
> +unsigned int foo2(unsigned int rs1)
> +{
> +    return __riscv_sha256sig1(rs1);
> +}
> +
> +unsigned int foo3(unsigned int rs1)
> +{
> +    return __riscv_sha256sum0(rs1);
> +}
> +
> +unsigned int foo4(unsigned int rs1)
> +{
> +    return __riscv_sha256sum1(rs1);
> +}
>
>  /* { dg-final { scan-assembler-times "sha256sig0" 1 } } */
>  /* { dg-final { scan-assembler-times "sha256sig1" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c b/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
> index 2ef37601e6f..172b84421e2 100644
> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
> @@ -2,24 +2,26 @@
>  /* { dg-options "-O2 -march=rv64gc_zknh -mabi=lp64" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> +#include "riscv_crypto.h"
> +
>  unsigned int foo1(unsigned int rs1)
>  {
> -    return __builtin_riscv_sha256sig0(rs1);
> +    return __riscv_sha256sig0(rs1);
>  }
>
>  unsigned int foo2(unsigned int rs1)
>  {
> -    return __builtin_riscv_sha256sig1(rs1);
> +    return __riscv_sha256sig1(rs1);
>  }
>
>  unsigned int foo3(unsigned int rs1)
>  {
> -    return __builtin_riscv_sha256sum0(rs1);
> +    return __riscv_sha256sum0(rs1);
>  }
>
>  unsigned int foo4(unsigned int rs1)
>  {
> -    return __builtin_riscv_sha256sum1(rs1);
> +    return __riscv_sha256sum1(rs1);
>  }
>
>  /* { dg-final { scan-assembler-times "sha256sig0" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c b/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
> index f2bcae36a1f..e6fb298d6a7 100644
> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
> @@ -2,36 +2,36 @@
>  /* { dg-options "-O2 -march=rv32gc_zknh -mabi=ilp32" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint32_t foo1(uint32_t rs1, uint32_t rs2)
>  {
> -    return __builtin_riscv_sha512sig0h(rs1,rs2);
> +    return __riscv_sha512sig0h(rs1,rs2);
>  }
>
>  uint32_t foo2(uint32_t rs1, uint32_t rs2)
>  {
> -    return __builtin_riscv_sha512sig0l(rs1,rs2);
> +    return __riscv_sha512sig0l(rs1,rs2);
>  }
>
>  uint32_t foo3(uint32_t rs1, uint32_t rs2)
>  {
> -    return __builtin_riscv_sha512sig1h(rs1,rs2);
> +    return __riscv_sha512sig1h(rs1,rs2);
>  }
>
>  uint32_t foo4(uint32_t rs1, uint32_t rs2)
>  {
> -    return __builtin_riscv_sha512sig1l(rs1,rs2);
> +    return __riscv_sha512sig1l(rs1,rs2);
>  }
>
>  uint32_t foo5(uint32_t rs1, uint32_t rs2)
>  {
> -    return __builtin_riscv_sha512sum0r(rs1,rs2);
> +    return __riscv_sha512sum0r(rs1,rs2);
>  }
>
>  uint32_t foo6(uint32_t rs1, uint32_t rs2)
>  {
> -    return __builtin_riscv_sha512sum1r(rs1,rs2);
> +    return __riscv_sha512sum1r(rs1,rs2);
>  }
>
>  /* { dg-final { scan-assembler-times "sha512sig0h" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c b/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
> index 4f248575e66..c65c2043d08 100644
> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
> @@ -2,26 +2,26 @@
>  /* { dg-options "-O2 -march=rv64gc_zknh -mabi=lp64" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint64_t foo1(uint64_t rs1)
>  {
> -    return __builtin_riscv_sha512sig0(rs1);
> +    return __riscv_sha512sig0(rs1);
>  }
>
>  uint64_t foo2(uint64_t rs1)
>  {
> -    return __builtin_riscv_sha512sig1(rs1);
> +    return __riscv_sha512sig1(rs1);
>  }
>
>  uint64_t foo3(uint64_t rs1)
>  {
> -    return __builtin_riscv_sha512sum0(rs1);
> +    return __riscv_sha512sum0(rs1);
>  }
>
>  uint64_t foo4(uint64_t rs1)
>  {
> -    return __builtin_riscv_sha512sum1(rs1);
> +    return __riscv_sha512sum1(rs1);
>  }
>
>
> diff --git a/gcc/testsuite/gcc.target/riscv/zksed32.c b/gcc/testsuite/gcc.target/riscv/zksed32.c
> index 0e8f01cd548..d63e0775391 100644
> --- a/gcc/testsuite/gcc.target/riscv/zksed32.c
> +++ b/gcc/testsuite/gcc.target/riscv/zksed32.c
> @@ -2,16 +2,16 @@
>  /* { dg-options "-O2 -march=rv32gc_zksed -mabi=ilp32" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
>  {
> -    return __builtin_riscv_sm4ks(rs1,rs2,bs);
> +    return __riscv_sm4ks(rs1,rs2,bs);
>  }
>
>  uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
>  {
> -    return __builtin_riscv_sm4ed(rs1,rs2,bs);
> +    return __riscv_sm4ed(rs1,rs2,bs);
>  }
>
>
> diff --git a/gcc/testsuite/gcc.target/riscv/zksed64.c b/gcc/testsuite/gcc.target/riscv/zksed64.c
> index 9e4d1961419..426122cf6eb 100644
> --- a/gcc/testsuite/gcc.target/riscv/zksed64.c
> +++ b/gcc/testsuite/gcc.target/riscv/zksed64.c
> @@ -2,16 +2,16 @@
>  /* { dg-options "-O2 -march=rv64gc_zksed -mabi=lp64" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
>  {
> -    return __builtin_riscv_sm4ks(rs1,rs2,bs);
> +    return __riscv_sm4ks(rs1,rs2,bs);
>  }
>
>  uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
>  {
> -    return __builtin_riscv_sm4ed(rs1,rs2,bs);
> +    return __riscv_sm4ed(rs1,rs2,bs);
>  }
>
>
> diff --git a/gcc/testsuite/gcc.target/riscv/zksh32.c b/gcc/testsuite/gcc.target/riscv/zksh32.c
> index c182e557a85..3d0d154ad1d 100644
> --- a/gcc/testsuite/gcc.target/riscv/zksh32.c
> +++ b/gcc/testsuite/gcc.target/riscv/zksh32.c
> @@ -2,16 +2,16 @@
>  /* { dg-options "-O2 -march=rv32gc_zksh -mabi=ilp32" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint32_t foo1(uint32_t rs1)
>  {
> -    return __builtin_riscv_sm3p0(rs1);
> +    return __riscv_sm3p0(rs1);
>  }
>
>  uint32_t foo2(uint32_t rs1)
>  {
> -    return __builtin_riscv_sm3p1(rs1);
> +    return __riscv_sm3p1(rs1);
>  }
>
>
> diff --git a/gcc/testsuite/gcc.target/riscv/zksh64.c b/gcc/testsuite/gcc.target/riscv/zksh64.c
> index d794b39f77a..1398c1329f0 100644
> --- a/gcc/testsuite/gcc.target/riscv/zksh64.c
> +++ b/gcc/testsuite/gcc.target/riscv/zksh64.c
> @@ -2,16 +2,16 @@
>  /* { dg-options "-O2 -march=rv64gc_zksh -mabi=lp64" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint32_t foo1(uint32_t rs1)
>  {
> -    return __builtin_riscv_sm3p0(rs1);
> +    return __riscv_sm3p0(rs1);
>  }
>
>  uint32_t foo2(uint32_t rs1)
>  {
> -    return __builtin_riscv_sm3p1(rs1);
> +    return __riscv_sm3p1(rs1);
>  }
>
>
> --
> 2.34.1
>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] Add C intrinsics for scalar crypto extension
  2023-11-29 15:03 ` Christoph Müllner
@ 2023-11-29 16:49   ` Liao Shihua
  2023-11-29 17:58     ` Christoph Müllner
  0 siblings, 1 reply; 6+ messages in thread
From: Liao Shihua @ 2023-11-29 16:49 UTC (permalink / raw)
  To: Christoph Müllner
  Cc: gcc-patches, kito.cheng, shiyulong, jiawei, chenyixuan,
	craig.topper, palmer, jeffreyalaw

[-- Attachment #1: Type: text/plain, Size: 22922 bytes --]


在 2023/11/29 23:03, Christoph Müllner 写道:
> On Mon, Nov 27, 2023 at 9:36 AM Liao Shihua<shihua@iscas.ac.cn>  wrote:
>> This patch add C intrinsics for scalar crypto extension.
>> Because of riscv-c-api (https://github.com/riscv-non-isa/riscv-c-api-doc/pull/44/files) includes zbkb/zbkc/zbkx's
>> intrinsics in bit manipulation extension, this patch only support zkn*/zks*'s intrinsics.
> Thanks for working on this!
> Looking forward to seeing the second patch (covering bitmanip) soon as well!
> A couple of comments can be found below.


Thanks for your comments, Christoph. Typos will be corrected in the next 
patch.

The implementation of intrinsic is belonged to the implementation in the 
LLVM.(It does look a little strange)

I will unify the implementation method in the next patch.


>
>> gcc/ChangeLog:
>>
>>          * config.gcc: Add riscv_crypto.h
>>          * config/riscv/riscv_crypto.h: New file.
>>
>> gcc/testsuite/ChangeLog:
>>
>>          * gcc.target/riscv/zknd32.c: Use intrinsics instead of builtins.
>>          * gcc.target/riscv/zknd64.c: Likewise.
>>          * gcc.target/riscv/zkne32.c: Likewise.
>>          * gcc.target/riscv/zkne64.c: Likewise.
>>          * gcc.target/riscv/zknh-sha256-32.c: Likewise.
>>          * gcc.target/riscv/zknh-sha256-64.c: Likewise.
>>          * gcc.target/riscv/zknh-sha512-32.c: Likewise.
>>          * gcc.target/riscv/zknh-sha512-64.c: Likewise.
>>          * gcc.target/riscv/zksed32.c: Likewise.
>>          * gcc.target/riscv/zksed64.c: Likewise.
>>          * gcc.target/riscv/zksh32.c: Likewise.
>>          * gcc.target/riscv/zksh64.c: Likewise.
>>
>> ---
>>   gcc/config.gcc                                |   2 +-
>>   gcc/config/riscv/riscv_crypto.h               | 219 ++++++++++++++++++
>>   gcc/testsuite/gcc.target/riscv/zknd32.c       |   6 +-
>>   gcc/testsuite/gcc.target/riscv/zknd64.c       |  12 +-
>>   gcc/testsuite/gcc.target/riscv/zkne32.c       |   6 +-
>>   gcc/testsuite/gcc.target/riscv/zkne64.c       |  10 +-
>>   .../gcc.target/riscv/zknh-sha256-32.c         |  22 +-
>>   .../gcc.target/riscv/zknh-sha256-64.c         |  10 +-
>>   .../gcc.target/riscv/zknh-sha512-32.c         |  14 +-
>>   .../gcc.target/riscv/zknh-sha512-64.c         |  10 +-
>>   gcc/testsuite/gcc.target/riscv/zksed32.c      |   6 +-
>>   gcc/testsuite/gcc.target/riscv/zksed64.c      |   6 +-
>>   gcc/testsuite/gcc.target/riscv/zksh32.c       |   6 +-
>>   gcc/testsuite/gcc.target/riscv/zksh64.c       |   6 +-
>>   14 files changed, 288 insertions(+), 47 deletions(-)
>>   create mode 100644 gcc/config/riscv/riscv_crypto.h
>>
>> diff --git a/gcc/config.gcc b/gcc/config.gcc
>> index b88591b6fd8..d67fe8b6a6f 100644
>> --- a/gcc/config.gcc
>> +++ b/gcc/config.gcc
>> @@ -548,7 +548,7 @@ riscv*)
>>          extra_objs="${extra_objs} riscv-vector-builtins.o riscv-vector-builtins-shapes.o riscv-vector-builtins-bases.o"
>>          extra_objs="${extra_objs} thead.o riscv-target-attr.o"
>>          d_target_objs="riscv-d.o"
>> -       extra_headers="riscv_vector.h"
>> +       extra_headers="riscv_vector.h riscv_crypto.h"
>>          target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.cc"
>>          target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.h"
>>          ;;
>> diff --git a/gcc/config/riscv/riscv_crypto.h b/gcc/config/riscv/riscv_crypto.h
>> new file mode 100644
>> index 00000000000..149c1132e10
>> --- /dev/null
>> +++ b/gcc/config/riscv/riscv_crypto.h
>> @@ -0,0 +1,219 @@
>> +/* RISC-V 'K' Extension intrinsics include file.
>> +   Copyright (C) 2023 Free Software Foundation, Inc.
>> +
>> +   This file is part of GCC.
>> +
>> +   GCC is free software; you can redistribute it and/or modify it
>> +   under the terms of the GNU General Public License as published
>> +   by the Free Software Foundation; either version 3, or (at your
>> +   option) any later version.
>> +
>> +   GCC is distributed in the hope that it will be useful, but WITHOUT
>> +   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
>> +   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
>> +   License for more details.
>> +
>> +   Under Section 7 of GPL version 3, you are granted additional
>> +   permissions described in the GCC Runtime Library Exception, version
>> +   3.1, as published by the Free Software Foundation.
>> +
>> +   You should have received a copy of the GNU General Public License and
>> +   a copy of the GCC Runtime Library Exception along with this program;
>> +   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
>> +<http://www.gnu.org/licenses/>.  */
>> +
>> +#ifndef __RISCV_CRYPTO_H
>> +#define __RISCV_CRYPTO_H
>> +
>> +#include <stdint.h>
>> +
>> +#if defined (__cplusplus)
>> +extern "C" {
>> +#endif
>> +
>> +#if defined(__riscv_zknd)
>> +#if __riscv_xlen == 32
>> +#define __riscv_aes32dsi(x, y, bs) __builtin_riscv_aes32dsi(x, y, bs)
>> +#define __riscv_aes32dsmi(x, y, bs) __builtin_riscv_aes32dsmi(x, y, bs)
>> +#endif
>> +
>> +#if __riscv_xlen == 64
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_aes64ds (uint64_t __x, uint64_t __y)
>> +{
>> +  return __builtin_riscv_aes64ds (__x, __y);
>> +}
> I don't understand why some intrinsic functions are implemented as
> macros to builtins
> and some are implemented as static inline wrappers around butilins.
> Is there a particular reason that this is mixed?
>
>> +
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_aes64dsm (uint64_t __x, uint64_t __y)
>> +{
>> +  return __builtin_riscv_aes64dsm (__x, __y);
>> +}
>> +
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_aes64im (uint64_t __x)
>> +{
>> +  return __builtin_riscv_aes64im (__x);
>> +}
>> +#endif
>> +#endif // defined (__riscv_zknd)
>> +
>> +#if defined(__riscv_zkne)
>> +#if __riscv_xlen == 32
>> +#define __riscv_aes32esi(x, y, bs) __builtin_riscv_aes32esi(x, y, bs)
>> +#define __riscv_aes32esmi(x, y, bs) __builtin_riscv_aes32esmi(x, y, bs)
>> +#endif
>> +
>> +#if __riscv_xlen == 64
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_aes64es (uint64_t __x, uint64_t __y)
>> +{
>> +  return __builtin_riscv_aes64es (__x, __y);
>> +}
>> +
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_aes64esm (uint64_t __x, uint64_t __y)
>> +{
>> +  return __builtin_riscv_aes64esm (__x, __y);
>> +}
>> +#endif
>> +#endif // defined (__riscv_zknd)
> Copy and paste mistake in the comment (should be "__riscv_zkne")
>
>> +
>> +#if defined(__riscv_zknd) || defined(__riscv_zkne)
>> +#if __riscv_xlen == 64
>> +#define __riscv_aes64ks1i(x, rnum) __builtin_riscv_aes64ks1i(x, rnum)
>> +
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_aes64ks2 (uint64_t __x, uint64_t __y)
>> +{
>> +  return __builtin_riscv_aes64ks2 (__x, __y);
>> +}
>> +#endif
>> +#endif // defined (__riscv_zknd) || defined (__riscv_zkne)
>> +
>> +#if defined(__riscv_zknh)
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha256sig0 (uint32_t __x)
>> +{
>> +  return __builtin_riscv_sha256sig0 (__x);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha256sig1 (uint32_t __x)
>> +{
>> +  return __builtin_riscv_sha256sig1 (__x);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha256sum0 (uint32_t __x)
>> +{
>> +  return __builtin_riscv_sha256sum0 (__x);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha256sum1 (uint32_t __x)
>> +{
>> +  return __builtin_riscv_sha256sum1 (__x);
>> +}
>> +
>> +#if __riscv_xlen == 32
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sig0h (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sig0h (__x, __y);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sig0l (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sig0l (__x, __y);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sig1h (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sig1h (__x, __y);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sig1l (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sig1l (__x, __y);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sum0l (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sum0l (__x, __y);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sum0r (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sum0r (__x, __y);
>> +}
> Why sum0l and sum0r?
> The specification says sum0h and sum0l.
>
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sum1l (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sum1l (__x, __y);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sum1r (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sum1r (__x, __y);
>> +}
> Why sum1l and sum1r?
> The specification says sum1h and sum1l.
>
>
>> +#endif
>> +
>> +#if __riscv_xlen == 64
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sig0 (uint64_t __x)
>> +{
>> +  return __builtin_riscv_sha512sig0 (__x);
>> +}
>> +
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sig1 (uint64_t __x)
>> +{
>> +  return __builtin_riscv_sha512sig1 (__x);
>> +}
>> +
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sum0 (uint64_t __x)
>> +{
>> +  return __builtin_riscv_sha512sum0 (__x);
>> +}
>> +
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sum1 (uint64_t __x)
>> +{
>> +  return __builtin_riscv_sha512sum1 (__x);
>> +}
>> +#endif
>> +#endif // defined (__riscv_zknh)
>> +
>> +#if defined(__riscv_zksh)
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sm3p0 (uint32_t __x)
>> +{
>> +  return __builtin_riscv_sm3p0 (__x);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sm3p1 (uint32_t __x)
>> +{
>> +  return __builtin_riscv_sm3p1 (__x);
>> +}
>> +#endif // defined (__riscv_zksh)
>> +
>> +#if defined(__riscv_zksed)
>> +#define __riscv_sm4ed(x, y, bs) __builtin_riscv_sm4ed(x, y, bs);
>> +#define __riscv_sm4ks(x, y, bs) __builtin_riscv_sm4ks(x, y, bs);
>> +#endif // defined (__riscv_zksh)
> Wrong comment (should be "__riscv_zksed").
>
>> +
>> +#if defined(__cplusplus)
>> +}
>> +#endif
>> +
>> +#endif
>> \ No newline at end of file
>> diff --git a/gcc/testsuite/gcc.target/riscv/zknd32.c b/gcc/testsuite/gcc.target/riscv/zknd32.c
>> index e60c027e091..62b730a700f 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zknd32.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zknd32.c
>> @@ -2,16 +2,16 @@
>>   /* { dg-options "-O2 -march=rv32gc_zknd -mabi=ilp32d" } */
>>   /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>>
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>>
>>   uint32_t foo1(uint32_t rs1, uint32_t rs2, int bs)
>>   {
>> -    return __builtin_riscv_aes32dsi(rs1,rs2,bs);
>> +    return __riscv_aes32dsi(rs1,rs2,bs);
>>   }
>>
>>   uint32_t foo2(uint32_t rs1, uint32_t rs2, int bs)
>>   {
>> -    return __builtin_riscv_aes32dsmi(rs1,rs2,bs);
>> +    return __riscv_aes32dsmi(rs1,rs2,bs);
>>   }
>>
>>   /* { dg-final { scan-assembler-times "aes32dsi" 1 } } */
>> diff --git a/gcc/testsuite/gcc.target/riscv/zknd64.c b/gcc/testsuite/gcc.target/riscv/zknd64.c
>> index 707418cd51e..e5f2be72bae 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zknd64.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zknd64.c
>> @@ -2,31 +2,31 @@
>>   /* { dg-options "-O2 -march=rv64gc_zknd -mabi=lp64" } */
>>   /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>>
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>>
>>   uint64_t foo1(uint64_t rs1, uint64_t rs2)
>>   {
>> -    return __builtin_riscv_aes64ds(rs1,rs2);
>> +    return __riscv_aes64ds(rs1,rs2);
>>   }
>>
>>   uint64_t foo2(uint64_t rs1, uint64_t rs2)
>>   {
>> -    return __builtin_riscv_aes64dsm(rs1,rs2);
>> +    return __riscv_aes64dsm(rs1,rs2);
>>   }
>>
>>   uint64_t foo3(uint64_t rs1, unsigned rnum)
>>   {
>> -    return __builtin_riscv_aes64ks1i(rs1,rnum);
>> +    return __riscv_aes64ks1i(rs1,rnum);
>>   }
>>
>>   uint64_t foo4(uint64_t rs1, uint64_t rs2)
>>   {
>> -    return __builtin_riscv_aes64ks2(rs1,rs2);
>> +    return __riscv_aes64ks2(rs1,rs2);
>>   }
>>
>>   uint64_t foo5(uint64_t rs1)
>>   {
>> -    return __builtin_riscv_aes64im(rs1);
>> +    return __riscv_aes64im(rs1);
>>   }
>>
>>   /* { dg-final { scan-assembler-times "aes64ds\t" 1 } } */
>> diff --git a/gcc/testsuite/gcc.target/riscv/zkne32.c b/gcc/testsuite/gcc.target/riscv/zkne32.c
>> index 252e9ffa43b..c3a7205a48b 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zkne32.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zkne32.c
>> @@ -2,16 +2,16 @@
>>   /* { dg-options "-O2 -march=rv32gc_zkne -mabi=ilp32d" } */
>>   /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>>
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>>
>>   uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
>>   {
>> -    return __builtin_riscv_aes32esi(rs1, rs2, bs);
>> +    return __riscv_aes32esi(rs1, rs2, bs);
>>   }
>>
>>   uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
>>   {
>> -    return __builtin_riscv_aes32esmi(rs1, rs2, bs);
>> +    return __riscv_aes32esmi(rs1, rs2, bs);
>>   }
>>
>>   /* { dg-final { scan-assembler-times "aes32esi" 1 } } */
>> diff --git a/gcc/testsuite/gcc.target/riscv/zkne64.c b/gcc/testsuite/gcc.target/riscv/zkne64.c
>> index b25f6b5c29a..e99b21a46dd 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zkne64.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zkne64.c
>> @@ -2,26 +2,26 @@
>>   /* { dg-options "-O2 -march=rv64gc_zkne -mabi=lp64" } */
>>   /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>>
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>>
>>   uint64_t foo1(uint64_t rs1, uint64_t rs2)
>>   {
>> -    return __builtin_riscv_aes64es(rs1,rs2);
>> +    return __riscv_aes64es(rs1,rs2);
>>   }
>>
>>   uint64_t foo2(uint64_t rs1, uint64_t rs2)
>>   {
>> -    return __builtin_riscv_aes64esm(rs1,rs2);
>> +    return __riscv_aes64esm(rs1,rs2);
>>   }
>>
>>   uint64_t foo3(uint64_t rs1, unsigned rnum)
>>   {
>> -    return __builtin_riscv_aes64ks1i(rs1,rnum);
>> +    return __riscv_aes64ks1i(rs1,rnum);
>>   }
>>
>>   uint64_t foo4(uint64_t rs1, uint64_t rs2)
>>   {
>> -    return __builtin_riscv_aes64ks2(rs1,rs2);
>> +    return __riscv_aes64ks2(rs1,rs2);
>>   }
>>
>>   /* { dg-final { scan-assembler-times "aes64es\t" 1 } } */
>> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c b/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
>> index c51b143a8a5..96e967fba96 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
>> @@ -2,7 +2,27 @@
>>   /* { dg-options "-O2 -march=rv32gc_zknh -mabi=ilp32d" } */
>>   /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>>
>> -#include "zknh-sha256-64.c"
>> +#include "riscv_crypto.h"
>> +
>> +unsigned int foo1(unsigned int rs1)
>> +{
>> +    return __riscv_sha256sig0(rs1);
>> +}
>> +
>> +unsigned int foo2(unsigned int rs1)
>> +{
>> +    return __riscv_sha256sig1(rs1);
>> +}
>> +
>> +unsigned int foo3(unsigned int rs1)
>> +{
>> +    return __riscv_sha256sum0(rs1);
>> +}
>> +
>> +unsigned int foo4(unsigned int rs1)
>> +{
>> +    return __riscv_sha256sum1(rs1);
>> +}
>>
>>   /* { dg-final { scan-assembler-times "sha256sig0" 1 } } */
>>   /* { dg-final { scan-assembler-times "sha256sig1" 1 } } */
>> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c b/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
>> index 2ef37601e6f..172b84421e2 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
>> @@ -2,24 +2,26 @@
>>   /* { dg-options "-O2 -march=rv64gc_zknh -mabi=lp64" } */
>>   /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>>
>> +#include "riscv_crypto.h"
>> +
>>   unsigned int foo1(unsigned int rs1)
>>   {
>> -    return __builtin_riscv_sha256sig0(rs1);
>> +    return __riscv_sha256sig0(rs1);
>>   }
>>
>>   unsigned int foo2(unsigned int rs1)
>>   {
>> -    return __builtin_riscv_sha256sig1(rs1);
>> +    return __riscv_sha256sig1(rs1);
>>   }
>>
>>   unsigned int foo3(unsigned int rs1)
>>   {
>> -    return __builtin_riscv_sha256sum0(rs1);
>> +    return __riscv_sha256sum0(rs1);
>>   }
>>
>>   unsigned int foo4(unsigned int rs1)
>>   {
>> -    return __builtin_riscv_sha256sum1(rs1);
>> +    return __riscv_sha256sum1(rs1);
>>   }
>>
>>   /* { dg-final { scan-assembler-times "sha256sig0" 1 } } */
>> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c b/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
>> index f2bcae36a1f..e6fb298d6a7 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
>> @@ -2,36 +2,36 @@
>>   /* { dg-options "-O2 -march=rv32gc_zknh -mabi=ilp32" } */
>>   /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>>
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>>
>>   uint32_t foo1(uint32_t rs1, uint32_t rs2)
>>   {
>> -    return __builtin_riscv_sha512sig0h(rs1,rs2);
>> +    return __riscv_sha512sig0h(rs1,rs2);
>>   }
>>
>>   uint32_t foo2(uint32_t rs1, uint32_t rs2)
>>   {
>> -    return __builtin_riscv_sha512sig0l(rs1,rs2);
>> +    return __riscv_sha512sig0l(rs1,rs2);
>>   }
>>
>>   uint32_t foo3(uint32_t rs1, uint32_t rs2)
>>   {
>> -    return __builtin_riscv_sha512sig1h(rs1,rs2);
>> +    return __riscv_sha512sig1h(rs1,rs2);
>>   }
>>
>>   uint32_t foo4(uint32_t rs1, uint32_t rs2)
>>   {
>> -    return __builtin_riscv_sha512sig1l(rs1,rs2);
>> +    return __riscv_sha512sig1l(rs1,rs2);
>>   }
>>
>>   uint32_t foo5(uint32_t rs1, uint32_t rs2)
>>   {
>> -    return __builtin_riscv_sha512sum0r(rs1,rs2);
>> +    return __riscv_sha512sum0r(rs1,rs2);
>>   }
>>
>>   uint32_t foo6(uint32_t rs1, uint32_t rs2)
>>   {
>> -    return __builtin_riscv_sha512sum1r(rs1,rs2);
>> +    return __riscv_sha512sum1r(rs1,rs2);
>>   }
>>
>>   /* { dg-final { scan-assembler-times "sha512sig0h" 1 } } */
>> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c b/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
>> index 4f248575e66..c65c2043d08 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
>> @@ -2,26 +2,26 @@
>>   /* { dg-options "-O2 -march=rv64gc_zknh -mabi=lp64" } */
>>   /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>>
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>>
>>   uint64_t foo1(uint64_t rs1)
>>   {
>> -    return __builtin_riscv_sha512sig0(rs1);
>> +    return __riscv_sha512sig0(rs1);
>>   }
>>
>>   uint64_t foo2(uint64_t rs1)
>>   {
>> -    return __builtin_riscv_sha512sig1(rs1);
>> +    return __riscv_sha512sig1(rs1);
>>   }
>>
>>   uint64_t foo3(uint64_t rs1)
>>   {
>> -    return __builtin_riscv_sha512sum0(rs1);
>> +    return __riscv_sha512sum0(rs1);
>>   }
>>
>>   uint64_t foo4(uint64_t rs1)
>>   {
>> -    return __builtin_riscv_sha512sum1(rs1);
>> +    return __riscv_sha512sum1(rs1);
>>   }
>>
>>
>> diff --git a/gcc/testsuite/gcc.target/riscv/zksed32.c b/gcc/testsuite/gcc.target/riscv/zksed32.c
>> index 0e8f01cd548..d63e0775391 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zksed32.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zksed32.c
>> @@ -2,16 +2,16 @@
>>   /* { dg-options "-O2 -march=rv32gc_zksed -mabi=ilp32" } */
>>   /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>>
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>>
>>   uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
>>   {
>> -    return __builtin_riscv_sm4ks(rs1,rs2,bs);
>> +    return __riscv_sm4ks(rs1,rs2,bs);
>>   }
>>
>>   uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
>>   {
>> -    return __builtin_riscv_sm4ed(rs1,rs2,bs);
>> +    return __riscv_sm4ed(rs1,rs2,bs);
>>   }
>>
>>
>> diff --git a/gcc/testsuite/gcc.target/riscv/zksed64.c b/gcc/testsuite/gcc.target/riscv/zksed64.c
>> index 9e4d1961419..426122cf6eb 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zksed64.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zksed64.c
>> @@ -2,16 +2,16 @@
>>   /* { dg-options "-O2 -march=rv64gc_zksed -mabi=lp64" } */
>>   /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>>
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>>
>>   uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
>>   {
>> -    return __builtin_riscv_sm4ks(rs1,rs2,bs);
>> +    return __riscv_sm4ks(rs1,rs2,bs);
>>   }
>>
>>   uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
>>   {
>> -    return __builtin_riscv_sm4ed(rs1,rs2,bs);
>> +    return __riscv_sm4ed(rs1,rs2,bs);
>>   }
>>
>>
>> diff --git a/gcc/testsuite/gcc.target/riscv/zksh32.c b/gcc/testsuite/gcc.target/riscv/zksh32.c
>> index c182e557a85..3d0d154ad1d 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zksh32.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zksh32.c
>> @@ -2,16 +2,16 @@
>>   /* { dg-options "-O2 -march=rv32gc_zksh -mabi=ilp32" } */
>>   /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>>
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>>
>>   uint32_t foo1(uint32_t rs1)
>>   {
>> -    return __builtin_riscv_sm3p0(rs1);
>> +    return __riscv_sm3p0(rs1);
>>   }
>>
>>   uint32_t foo2(uint32_t rs1)
>>   {
>> -    return __builtin_riscv_sm3p1(rs1);
>> +    return __riscv_sm3p1(rs1);
>>   }
>>
>>
>> diff --git a/gcc/testsuite/gcc.target/riscv/zksh64.c b/gcc/testsuite/gcc.target/riscv/zksh64.c
>> index d794b39f77a..1398c1329f0 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zksh64.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zksh64.c
>> @@ -2,16 +2,16 @@
>>   /* { dg-options "-O2 -march=rv64gc_zksh -mabi=lp64" } */
>>   /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>>
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>>
>>   uint32_t foo1(uint32_t rs1)
>>   {
>> -    return __builtin_riscv_sm3p0(rs1);
>> +    return __riscv_sm3p0(rs1);
>>   }
>>
>>   uint32_t foo2(uint32_t rs1)
>>   {
>> -    return __builtin_riscv_sm3p1(rs1);
>> +    return __riscv_sm3p1(rs1);
>>   }
>>
>>
>> --
>> 2.34.1
>>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] Add C intrinsics for scalar crypto extension
  2023-11-29 16:49   ` Liao Shihua
@ 2023-11-29 17:58     ` Christoph Müllner
  2023-11-29 20:38       ` Craig Topper
  0 siblings, 1 reply; 6+ messages in thread
From: Christoph Müllner @ 2023-11-29 17:58 UTC (permalink / raw)
  To: Liao Shihua
  Cc: gcc-patches, kito.cheng, shiyulong, jiawei, chenyixuan,
	craig.topper, palmer, jeffreyalaw

On Wed, Nov 29, 2023 at 5:49 PM Liao Shihua <shihua@iscas.ac.cn> wrote:
>
>
> 在 2023/11/29 23:03, Christoph Müllner 写道:
>
> On Mon, Nov 27, 2023 at 9:36 AM Liao Shihua <shihua@iscas.ac.cn> wrote:
>
> This patch add C intrinsics for scalar crypto extension.
> Because of riscv-c-api (https://github.com/riscv-non-isa/riscv-c-api-doc/pull/44/files) includes zbkb/zbkc/zbkx's
> intrinsics in bit manipulation extension, this patch only support zkn*/zks*'s intrinsics.
>
> Thanks for working on this!
> Looking forward to seeing the second patch (covering bitmanip) soon as well!
> A couple of comments can be found below.
>
>
> Thanks for your comments, Christoph. Typos will be corrected in the next patch.
>
> The implementation of intrinsic is belonged to the implementation in the LLVM.(It does look a little strange)
>
> I will unify the implementation method in the next patch.
>
>
>
> gcc/ChangeLog:
>
>         * config.gcc: Add riscv_crypto.h
>         * config/riscv/riscv_crypto.h: New file.
>
> gcc/testsuite/ChangeLog:
>
>         * gcc.target/riscv/zknd32.c: Use intrinsics instead of builtins.
>         * gcc.target/riscv/zknd64.c: Likewise.
>         * gcc.target/riscv/zkne32.c: Likewise.
>         * gcc.target/riscv/zkne64.c: Likewise.
>         * gcc.target/riscv/zknh-sha256-32.c: Likewise.
>         * gcc.target/riscv/zknh-sha256-64.c: Likewise.
>         * gcc.target/riscv/zknh-sha512-32.c: Likewise.
>         * gcc.target/riscv/zknh-sha512-64.c: Likewise.
>         * gcc.target/riscv/zksed32.c: Likewise.
>         * gcc.target/riscv/zksed64.c: Likewise.
>         * gcc.target/riscv/zksh32.c: Likewise.
>         * gcc.target/riscv/zksh64.c: Likewise.
>
> ---
>  gcc/config.gcc                                |   2 +-
>  gcc/config/riscv/riscv_crypto.h               | 219 ++++++++++++++++++
>  gcc/testsuite/gcc.target/riscv/zknd32.c       |   6 +-
>  gcc/testsuite/gcc.target/riscv/zknd64.c       |  12 +-
>  gcc/testsuite/gcc.target/riscv/zkne32.c       |   6 +-
>  gcc/testsuite/gcc.target/riscv/zkne64.c       |  10 +-
>  .../gcc.target/riscv/zknh-sha256-32.c         |  22 +-
>  .../gcc.target/riscv/zknh-sha256-64.c         |  10 +-
>  .../gcc.target/riscv/zknh-sha512-32.c         |  14 +-
>  .../gcc.target/riscv/zknh-sha512-64.c         |  10 +-
>  gcc/testsuite/gcc.target/riscv/zksed32.c      |   6 +-
>  gcc/testsuite/gcc.target/riscv/zksed64.c      |   6 +-
>  gcc/testsuite/gcc.target/riscv/zksh32.c       |   6 +-
>  gcc/testsuite/gcc.target/riscv/zksh64.c       |   6 +-
>  14 files changed, 288 insertions(+), 47 deletions(-)
>  create mode 100644 gcc/config/riscv/riscv_crypto.h
>
> diff --git a/gcc/config.gcc b/gcc/config.gcc
> index b88591b6fd8..d67fe8b6a6f 100644
> --- a/gcc/config.gcc
> +++ b/gcc/config.gcc
> @@ -548,7 +548,7 @@ riscv*)
>         extra_objs="${extra_objs} riscv-vector-builtins.o riscv-vector-builtins-shapes.o riscv-vector-builtins-bases.o"
>         extra_objs="${extra_objs} thead.o riscv-target-attr.o"
>         d_target_objs="riscv-d.o"
> -       extra_headers="riscv_vector.h"
> +       extra_headers="riscv_vector.h riscv_crypto.h"
>         target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.cc"
>         target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.h"
>         ;;
> diff --git a/gcc/config/riscv/riscv_crypto.h b/gcc/config/riscv/riscv_crypto.h
> new file mode 100644
> index 00000000000..149c1132e10
> --- /dev/null
> +++ b/gcc/config/riscv/riscv_crypto.h
> @@ -0,0 +1,219 @@
> +/* RISC-V 'K' Extension intrinsics include file.
> +   Copyright (C) 2023 Free Software Foundation, Inc.
> +
> +   This file is part of GCC.
> +
> +   GCC is free software; you can redistribute it and/or modify it
> +   under the terms of the GNU General Public License as published
> +   by the Free Software Foundation; either version 3, or (at your
> +   option) any later version.
> +
> +   GCC is distributed in the hope that it will be useful, but WITHOUT
> +   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
> +   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
> +   License for more details.
> +
> +   Under Section 7 of GPL version 3, you are granted additional
> +   permissions described in the GCC Runtime Library Exception, version
> +   3.1, as published by the Free Software Foundation.
> +
> +   You should have received a copy of the GNU General Public License and
> +   a copy of the GCC Runtime Library Exception along with this program;
> +   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#ifndef __RISCV_CRYPTO_H
> +#define __RISCV_CRYPTO_H
> +
> +#include <stdint.h>
> +
> +#if defined (__cplusplus)
> +extern "C" {
> +#endif
> +
> +#if defined(__riscv_zknd)
> +#if __riscv_xlen == 32
> +#define __riscv_aes32dsi(x, y, bs) __builtin_riscv_aes32dsi(x, y, bs)
> +#define __riscv_aes32dsmi(x, y, bs) __builtin_riscv_aes32dsmi(x, y, bs)
> +#endif
> +
> +#if __riscv_xlen == 64
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_aes64ds (uint64_t __x, uint64_t __y)
> +{
> +  return __builtin_riscv_aes64ds (__x, __y);
> +}
>
> I don't understand why some intrinsic functions are implemented as
> macros to builtins
> and some are implemented as static inline wrappers around butilins.
> Is there a particular reason that this is mixed?
>
> +
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_aes64dsm (uint64_t __x, uint64_t __y)
> +{
> +  return __builtin_riscv_aes64dsm (__x, __y);
> +}
> +
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_aes64im (uint64_t __x)
> +{
> +  return __builtin_riscv_aes64im (__x);
> +}
> +#endif
> +#endif // defined (__riscv_zknd)
> +
> +#if defined(__riscv_zkne)
> +#if __riscv_xlen == 32
> +#define __riscv_aes32esi(x, y, bs) __builtin_riscv_aes32esi(x, y, bs)
> +#define __riscv_aes32esmi(x, y, bs) __builtin_riscv_aes32esmi(x, y, bs)
> +#endif
> +
> +#if __riscv_xlen == 64
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_aes64es (uint64_t __x, uint64_t __y)
> +{
> +  return __builtin_riscv_aes64es (__x, __y);
> +}
> +
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_aes64esm (uint64_t __x, uint64_t __y)
> +{
> +  return __builtin_riscv_aes64esm (__x, __y);
> +}
> +#endif
> +#endif // defined (__riscv_zknd)
>
> Copy and paste mistake in the comment (should be "__riscv_zkne")
>
> +
> +#if defined(__riscv_zknd) || defined(__riscv_zkne)
> +#if __riscv_xlen == 64
> +#define __riscv_aes64ks1i(x, rnum) __builtin_riscv_aes64ks1i(x, rnum)
> +
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_aes64ks2 (uint64_t __x, uint64_t __y)
> +{
> +  return __builtin_riscv_aes64ks2 (__x, __y);
> +}
> +#endif
> +#endif // defined (__riscv_zknd) || defined (__riscv_zkne)
> +
> +#if defined(__riscv_zknh)
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha256sig0 (uint32_t __x)
> +{
> +  return __builtin_riscv_sha256sig0 (__x);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha256sig1 (uint32_t __x)
> +{
> +  return __builtin_riscv_sha256sig1 (__x);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha256sum0 (uint32_t __x)
> +{
> +  return __builtin_riscv_sha256sum0 (__x);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha256sum1 (uint32_t __x)
> +{
> +  return __builtin_riscv_sha256sum1 (__x);
> +}
> +
> +#if __riscv_xlen == 32
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sig0h (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sig0h (__x, __y);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sig0l (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sig0l (__x, __y);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sig1h (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sig1h (__x, __y);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sig1l (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sig1l (__x, __y);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sum0l (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sum0l (__x, __y);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sum0r (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sum0r (__x, __y);
> +}
>
> Why sum0l and sum0r?
> The specification says sum0h and sum0l.

Note, that this was just fixed in the intrinsic spec:
  https://github.com/riscv-non-isa/riscv-c-api-doc/pull/58

>
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sum1l (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sum1l (__x, __y);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sum1r (uint32_t __x, uint32_t __y)
> +{
> +  return __builtin_riscv_sha512sum1r (__x, __y);
> +}
>
> Why sum1l and sum1r?
> The specification says sum1h and sum1l.

Note, that this was just fixed in the intrinsic spec:
  https://github.com/riscv-non-isa/riscv-c-api-doc/pull/58

>
>
> +#endif
> +
> +#if __riscv_xlen == 64
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sig0 (uint64_t __x)
> +{
> +  return __builtin_riscv_sha512sig0 (__x);
> +}
> +
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sig1 (uint64_t __x)
> +{
> +  return __builtin_riscv_sha512sig1 (__x);
> +}
> +
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sum0 (uint64_t __x)
> +{
> +  return __builtin_riscv_sha512sum0 (__x);
> +}
> +
> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sha512sum1 (uint64_t __x)
> +{
> +  return __builtin_riscv_sha512sum1 (__x);
> +}
> +#endif
> +#endif // defined (__riscv_zknh)
> +
> +#if defined(__riscv_zksh)
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sm3p0 (uint32_t __x)
> +{
> +  return __builtin_riscv_sm3p0 (__x);
> +}
> +
> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
> +__riscv_sm3p1 (uint32_t __x)
> +{
> +  return __builtin_riscv_sm3p1 (__x);
> +}
> +#endif // defined (__riscv_zksh)
> +
> +#if defined(__riscv_zksed)
> +#define __riscv_sm4ed(x, y, bs) __builtin_riscv_sm4ed(x, y, bs);
> +#define __riscv_sm4ks(x, y, bs) __builtin_riscv_sm4ks(x, y, bs);
> +#endif // defined (__riscv_zksh)
>
> Wrong comment (should be "__riscv_zksed").
>
> +
> +#if defined(__cplusplus)
> +}
> +#endif
> +
> +#endif
> \ No newline at end of file
> diff --git a/gcc/testsuite/gcc.target/riscv/zknd32.c b/gcc/testsuite/gcc.target/riscv/zknd32.c
> index e60c027e091..62b730a700f 100644
> --- a/gcc/testsuite/gcc.target/riscv/zknd32.c
> +++ b/gcc/testsuite/gcc.target/riscv/zknd32.c
> @@ -2,16 +2,16 @@
>  /* { dg-options "-O2 -march=rv32gc_zknd -mabi=ilp32d" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint32_t foo1(uint32_t rs1, uint32_t rs2, int bs)
>  {
> -    return __builtin_riscv_aes32dsi(rs1,rs2,bs);
> +    return __riscv_aes32dsi(rs1,rs2,bs);
>  }
>
>  uint32_t foo2(uint32_t rs1, uint32_t rs2, int bs)
>  {
> -    return __builtin_riscv_aes32dsmi(rs1,rs2,bs);
> +    return __riscv_aes32dsmi(rs1,rs2,bs);
>  }
>
>  /* { dg-final { scan-assembler-times "aes32dsi" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/zknd64.c b/gcc/testsuite/gcc.target/riscv/zknd64.c
> index 707418cd51e..e5f2be72bae 100644
> --- a/gcc/testsuite/gcc.target/riscv/zknd64.c
> +++ b/gcc/testsuite/gcc.target/riscv/zknd64.c
> @@ -2,31 +2,31 @@
>  /* { dg-options "-O2 -march=rv64gc_zknd -mabi=lp64" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint64_t foo1(uint64_t rs1, uint64_t rs2)
>  {
> -    return __builtin_riscv_aes64ds(rs1,rs2);
> +    return __riscv_aes64ds(rs1,rs2);
>  }
>
>  uint64_t foo2(uint64_t rs1, uint64_t rs2)
>  {
> -    return __builtin_riscv_aes64dsm(rs1,rs2);
> +    return __riscv_aes64dsm(rs1,rs2);
>  }
>
>  uint64_t foo3(uint64_t rs1, unsigned rnum)
>  {
> -    return __builtin_riscv_aes64ks1i(rs1,rnum);
> +    return __riscv_aes64ks1i(rs1,rnum);
>  }
>
>  uint64_t foo4(uint64_t rs1, uint64_t rs2)
>  {
> -    return __builtin_riscv_aes64ks2(rs1,rs2);
> +    return __riscv_aes64ks2(rs1,rs2);
>  }
>
>  uint64_t foo5(uint64_t rs1)
>  {
> -    return __builtin_riscv_aes64im(rs1);
> +    return __riscv_aes64im(rs1);
>  }
>
>  /* { dg-final { scan-assembler-times "aes64ds\t" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/zkne32.c b/gcc/testsuite/gcc.target/riscv/zkne32.c
> index 252e9ffa43b..c3a7205a48b 100644
> --- a/gcc/testsuite/gcc.target/riscv/zkne32.c
> +++ b/gcc/testsuite/gcc.target/riscv/zkne32.c
> @@ -2,16 +2,16 @@
>  /* { dg-options "-O2 -march=rv32gc_zkne -mabi=ilp32d" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
>  {
> -    return __builtin_riscv_aes32esi(rs1, rs2, bs);
> +    return __riscv_aes32esi(rs1, rs2, bs);
>  }
>
>  uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
>  {
> -    return __builtin_riscv_aes32esmi(rs1, rs2, bs);
> +    return __riscv_aes32esmi(rs1, rs2, bs);
>  }
>
>  /* { dg-final { scan-assembler-times "aes32esi" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/zkne64.c b/gcc/testsuite/gcc.target/riscv/zkne64.c
> index b25f6b5c29a..e99b21a46dd 100644
> --- a/gcc/testsuite/gcc.target/riscv/zkne64.c
> +++ b/gcc/testsuite/gcc.target/riscv/zkne64.c
> @@ -2,26 +2,26 @@
>  /* { dg-options "-O2 -march=rv64gc_zkne -mabi=lp64" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint64_t foo1(uint64_t rs1, uint64_t rs2)
>  {
> -    return __builtin_riscv_aes64es(rs1,rs2);
> +    return __riscv_aes64es(rs1,rs2);
>  }
>
>  uint64_t foo2(uint64_t rs1, uint64_t rs2)
>  {
> -    return __builtin_riscv_aes64esm(rs1,rs2);
> +    return __riscv_aes64esm(rs1,rs2);
>  }
>
>  uint64_t foo3(uint64_t rs1, unsigned rnum)
>  {
> -    return __builtin_riscv_aes64ks1i(rs1,rnum);
> +    return __riscv_aes64ks1i(rs1,rnum);
>  }
>
>  uint64_t foo4(uint64_t rs1, uint64_t rs2)
>  {
> -    return __builtin_riscv_aes64ks2(rs1,rs2);
> +    return __riscv_aes64ks2(rs1,rs2);
>  }
>
>  /* { dg-final { scan-assembler-times "aes64es\t" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c b/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
> index c51b143a8a5..96e967fba96 100644
> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
> @@ -2,7 +2,27 @@
>  /* { dg-options "-O2 -march=rv32gc_zknh -mabi=ilp32d" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include "zknh-sha256-64.c"
> +#include "riscv_crypto.h"
> +
> +unsigned int foo1(unsigned int rs1)
> +{
> +    return __riscv_sha256sig0(rs1);
> +}
> +
> +unsigned int foo2(unsigned int rs1)
> +{
> +    return __riscv_sha256sig1(rs1);
> +}
> +
> +unsigned int foo3(unsigned int rs1)
> +{
> +    return __riscv_sha256sum0(rs1);
> +}
> +
> +unsigned int foo4(unsigned int rs1)
> +{
> +    return __riscv_sha256sum1(rs1);
> +}
>
>  /* { dg-final { scan-assembler-times "sha256sig0" 1 } } */
>  /* { dg-final { scan-assembler-times "sha256sig1" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c b/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
> index 2ef37601e6f..172b84421e2 100644
> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
> @@ -2,24 +2,26 @@
>  /* { dg-options "-O2 -march=rv64gc_zknh -mabi=lp64" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> +#include "riscv_crypto.h"
> +
>  unsigned int foo1(unsigned int rs1)
>  {
> -    return __builtin_riscv_sha256sig0(rs1);
> +    return __riscv_sha256sig0(rs1);
>  }
>
>  unsigned int foo2(unsigned int rs1)
>  {
> -    return __builtin_riscv_sha256sig1(rs1);
> +    return __riscv_sha256sig1(rs1);
>  }
>
>  unsigned int foo3(unsigned int rs1)
>  {
> -    return __builtin_riscv_sha256sum0(rs1);
> +    return __riscv_sha256sum0(rs1);
>  }
>
>  unsigned int foo4(unsigned int rs1)
>  {
> -    return __builtin_riscv_sha256sum1(rs1);
> +    return __riscv_sha256sum1(rs1);
>  }
>
>  /* { dg-final { scan-assembler-times "sha256sig0" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c b/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
> index f2bcae36a1f..e6fb298d6a7 100644
> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
> @@ -2,36 +2,36 @@
>  /* { dg-options "-O2 -march=rv32gc_zknh -mabi=ilp32" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint32_t foo1(uint32_t rs1, uint32_t rs2)
>  {
> -    return __builtin_riscv_sha512sig0h(rs1,rs2);
> +    return __riscv_sha512sig0h(rs1,rs2);
>  }
>
>  uint32_t foo2(uint32_t rs1, uint32_t rs2)
>  {
> -    return __builtin_riscv_sha512sig0l(rs1,rs2);
> +    return __riscv_sha512sig0l(rs1,rs2);
>  }
>
>  uint32_t foo3(uint32_t rs1, uint32_t rs2)
>  {
> -    return __builtin_riscv_sha512sig1h(rs1,rs2);
> +    return __riscv_sha512sig1h(rs1,rs2);
>  }
>
>  uint32_t foo4(uint32_t rs1, uint32_t rs2)
>  {
> -    return __builtin_riscv_sha512sig1l(rs1,rs2);
> +    return __riscv_sha512sig1l(rs1,rs2);
>  }
>
>  uint32_t foo5(uint32_t rs1, uint32_t rs2)
>  {
> -    return __builtin_riscv_sha512sum0r(rs1,rs2);
> +    return __riscv_sha512sum0r(rs1,rs2);
>  }
>
>  uint32_t foo6(uint32_t rs1, uint32_t rs2)
>  {
> -    return __builtin_riscv_sha512sum1r(rs1,rs2);
> +    return __riscv_sha512sum1r(rs1,rs2);
>  }
>
>  /* { dg-final { scan-assembler-times "sha512sig0h" 1 } } */
> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c b/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
> index 4f248575e66..c65c2043d08 100644
> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
> @@ -2,26 +2,26 @@
>  /* { dg-options "-O2 -march=rv64gc_zknh -mabi=lp64" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint64_t foo1(uint64_t rs1)
>  {
> -    return __builtin_riscv_sha512sig0(rs1);
> +    return __riscv_sha512sig0(rs1);
>  }
>
>  uint64_t foo2(uint64_t rs1)
>  {
> -    return __builtin_riscv_sha512sig1(rs1);
> +    return __riscv_sha512sig1(rs1);
>  }
>
>  uint64_t foo3(uint64_t rs1)
>  {
> -    return __builtin_riscv_sha512sum0(rs1);
> +    return __riscv_sha512sum0(rs1);
>  }
>
>  uint64_t foo4(uint64_t rs1)
>  {
> -    return __builtin_riscv_sha512sum1(rs1);
> +    return __riscv_sha512sum1(rs1);
>  }
>
>
> diff --git a/gcc/testsuite/gcc.target/riscv/zksed32.c b/gcc/testsuite/gcc.target/riscv/zksed32.c
> index 0e8f01cd548..d63e0775391 100644
> --- a/gcc/testsuite/gcc.target/riscv/zksed32.c
> +++ b/gcc/testsuite/gcc.target/riscv/zksed32.c
> @@ -2,16 +2,16 @@
>  /* { dg-options "-O2 -march=rv32gc_zksed -mabi=ilp32" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
>  {
> -    return __builtin_riscv_sm4ks(rs1,rs2,bs);
> +    return __riscv_sm4ks(rs1,rs2,bs);
>  }
>
>  uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
>  {
> -    return __builtin_riscv_sm4ed(rs1,rs2,bs);
> +    return __riscv_sm4ed(rs1,rs2,bs);
>  }
>
>
> diff --git a/gcc/testsuite/gcc.target/riscv/zksed64.c b/gcc/testsuite/gcc.target/riscv/zksed64.c
> index 9e4d1961419..426122cf6eb 100644
> --- a/gcc/testsuite/gcc.target/riscv/zksed64.c
> +++ b/gcc/testsuite/gcc.target/riscv/zksed64.c
> @@ -2,16 +2,16 @@
>  /* { dg-options "-O2 -march=rv64gc_zksed -mabi=lp64" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
>  {
> -    return __builtin_riscv_sm4ks(rs1,rs2,bs);
> +    return __riscv_sm4ks(rs1,rs2,bs);
>  }
>
>  uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
>  {
> -    return __builtin_riscv_sm4ed(rs1,rs2,bs);
> +    return __riscv_sm4ed(rs1,rs2,bs);
>  }
>
>
> diff --git a/gcc/testsuite/gcc.target/riscv/zksh32.c b/gcc/testsuite/gcc.target/riscv/zksh32.c
> index c182e557a85..3d0d154ad1d 100644
> --- a/gcc/testsuite/gcc.target/riscv/zksh32.c
> +++ b/gcc/testsuite/gcc.target/riscv/zksh32.c
> @@ -2,16 +2,16 @@
>  /* { dg-options "-O2 -march=rv32gc_zksh -mabi=ilp32" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint32_t foo1(uint32_t rs1)
>  {
> -    return __builtin_riscv_sm3p0(rs1);
> +    return __riscv_sm3p0(rs1);
>  }
>
>  uint32_t foo2(uint32_t rs1)
>  {
> -    return __builtin_riscv_sm3p1(rs1);
> +    return __riscv_sm3p1(rs1);
>  }
>
>
> diff --git a/gcc/testsuite/gcc.target/riscv/zksh64.c b/gcc/testsuite/gcc.target/riscv/zksh64.c
> index d794b39f77a..1398c1329f0 100644
> --- a/gcc/testsuite/gcc.target/riscv/zksh64.c
> +++ b/gcc/testsuite/gcc.target/riscv/zksh64.c
> @@ -2,16 +2,16 @@
>  /* { dg-options "-O2 -march=rv64gc_zksh -mabi=lp64" } */
>  /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>
> -#include <stdint-gcc.h>
> +#include "riscv_crypto.h"
>
>  uint32_t foo1(uint32_t rs1)
>  {
> -    return __builtin_riscv_sm3p0(rs1);
> +    return __riscv_sm3p0(rs1);
>  }
>
>  uint32_t foo2(uint32_t rs1)
>  {
> -    return __builtin_riscv_sm3p1(rs1);
> +    return __riscv_sm3p1(rs1);
>  }
>
>
> --
> 2.34.1
>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] Add C intrinsics for scalar crypto extension
  2023-11-29 17:58     ` Christoph Müllner
@ 2023-11-29 20:38       ` Craig Topper
  0 siblings, 0 replies; 6+ messages in thread
From: Craig Topper @ 2023-11-29 20:38 UTC (permalink / raw)
  To: Christoph Müllner
  Cc: Liao Shihua, gcc-patches, kito.cheng, shiyulong, jiawei,
	chenyixuan, palmer, jeffreyalaw

[-- Attachment #1: Type: text/plain, Size: 24387 bytes --]

The intrinsics that use macros are the ones that require an integer constant expression for one of the arguments. Clang needs to be able to see the constant expression as an argument to the underlying builtin. Thus the macro.

Based on my previous x86 experience, gcc may only require a macro for -O0. There are many x86 intrinsics in gcc that have two versions based on whether __OPTIMIZE__ is defined. For example https://github.com/gcc-mirror/gcc/blob/master/gcc/config/i386/xmmintrin.h#L52

> On Nov 29, 2023, at 9:58 AM, Christoph Müllner <christoph.muellner@vrull.eu> wrote:
> 
> On Wed, Nov 29, 2023 at 5:49 PM Liao Shihua <shihua@iscas.ac.cn <mailto:shihua@iscas.ac.cn>> wrote:
>> 
>> 
>> 在 2023/11/29 23:03, Christoph Müllner 写道:
>> 
>> On Mon, Nov 27, 2023 at 9:36 AM Liao Shihua <shihua@iscas.ac.cn> wrote:
>> 
>> This patch add C intrinsics for scalar crypto extension.
>> Because of riscv-c-api (https://github.com/riscv-non-isa/riscv-c-api-doc/pull/44/files) includes zbkb/zbkc/zbkx's
>> intrinsics in bit manipulation extension, this patch only support zkn*/zks*'s intrinsics.
>> 
>> Thanks for working on this!
>> Looking forward to seeing the second patch (covering bitmanip) soon as well!
>> A couple of comments can be found below.
>> 
>> 
>> Thanks for your comments, Christoph. Typos will be corrected in the next patch.
>> 
>> The implementation of intrinsic is belonged to the implementation in the LLVM.(It does look a little strange)
>> 
>> I will unify the implementation method in the next patch.
>> 
>> 
>> 
>> gcc/ChangeLog:
>> 
>>        * config.gcc: Add riscv_crypto.h
>>        * config/riscv/riscv_crypto.h: New file.
>> 
>> gcc/testsuite/ChangeLog:
>> 
>>        * gcc.target/riscv/zknd32.c: Use intrinsics instead of builtins.
>>        * gcc.target/riscv/zknd64.c: Likewise.
>>        * gcc.target/riscv/zkne32.c: Likewise.
>>        * gcc.target/riscv/zkne64.c: Likewise.
>>        * gcc.target/riscv/zknh-sha256-32.c: Likewise.
>>        * gcc.target/riscv/zknh-sha256-64.c: Likewise.
>>        * gcc.target/riscv/zknh-sha512-32.c: Likewise.
>>        * gcc.target/riscv/zknh-sha512-64.c: Likewise.
>>        * gcc.target/riscv/zksed32.c: Likewise.
>>        * gcc.target/riscv/zksed64.c: Likewise.
>>        * gcc.target/riscv/zksh32.c: Likewise.
>>        * gcc.target/riscv/zksh64.c: Likewise.
>> 
>> ---
>> gcc/config.gcc                                |   2 +-
>> gcc/config/riscv/riscv_crypto.h               | 219 ++++++++++++++++++
>> gcc/testsuite/gcc.target/riscv/zknd32.c       |   6 +-
>> gcc/testsuite/gcc.target/riscv/zknd64.c       |  12 +-
>> gcc/testsuite/gcc.target/riscv/zkne32.c       |   6 +-
>> gcc/testsuite/gcc.target/riscv/zkne64.c       |  10 +-
>> .../gcc.target/riscv/zknh-sha256-32.c         |  22 +-
>> .../gcc.target/riscv/zknh-sha256-64.c         |  10 +-
>> .../gcc.target/riscv/zknh-sha512-32.c         |  14 +-
>> .../gcc.target/riscv/zknh-sha512-64.c         |  10 +-
>> gcc/testsuite/gcc.target/riscv/zksed32.c      |   6 +-
>> gcc/testsuite/gcc.target/riscv/zksed64.c      |   6 +-
>> gcc/testsuite/gcc.target/riscv/zksh32.c       |   6 +-
>> gcc/testsuite/gcc.target/riscv/zksh64.c       |   6 +-
>> 14 files changed, 288 insertions(+), 47 deletions(-)
>> create mode 100644 gcc/config/riscv/riscv_crypto.h
>> 
>> diff --git a/gcc/config.gcc b/gcc/config.gcc
>> index b88591b6fd8..d67fe8b6a6f 100644
>> --- a/gcc/config.gcc
>> +++ b/gcc/config.gcc
>> @@ -548,7 +548,7 @@ riscv*)
>>        extra_objs="${extra_objs} riscv-vector-builtins.o riscv-vector-builtins-shapes.o riscv-vector-builtins-bases.o"
>>        extra_objs="${extra_objs} thead.o riscv-target-attr.o"
>>        d_target_objs="riscv-d.o"
>> -       extra_headers="riscv_vector.h"
>> +       extra_headers="riscv_vector.h riscv_crypto.h"
>>        target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.cc"
>>        target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.h"
>>        ;;
>> diff --git a/gcc/config/riscv/riscv_crypto.h b/gcc/config/riscv/riscv_crypto.h
>> new file mode 100644
>> index 00000000000..149c1132e10
>> --- /dev/null
>> +++ b/gcc/config/riscv/riscv_crypto.h
>> @@ -0,0 +1,219 @@
>> +/* RISC-V 'K' Extension intrinsics include file.
>> +   Copyright (C) 2023 Free Software Foundation, Inc.
>> +
>> +   This file is part of GCC.
>> +
>> +   GCC is free software; you can redistribute it and/or modify it
>> +   under the terms of the GNU General Public License as published
>> +   by the Free Software Foundation; either version 3, or (at your
>> +   option) any later version.
>> +
>> +   GCC is distributed in the hope that it will be useful, but WITHOUT
>> +   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
>> +   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
>> +   License for more details.
>> +
>> +   Under Section 7 of GPL version 3, you are granted additional
>> +   permissions described in the GCC Runtime Library Exception, version
>> +   3.1, as published by the Free Software Foundation.
>> +
>> +   You should have received a copy of the GNU General Public License and
>> +   a copy of the GCC Runtime Library Exception along with this program;
>> +   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
>> +   <http://www.gnu.org/licenses/>.  */
>> +
>> +#ifndef __RISCV_CRYPTO_H
>> +#define __RISCV_CRYPTO_H
>> +
>> +#include <stdint.h>
>> +
>> +#if defined (__cplusplus)
>> +extern "C" {
>> +#endif
>> +
>> +#if defined(__riscv_zknd)
>> +#if __riscv_xlen == 32
>> +#define __riscv_aes32dsi(x, y, bs) __builtin_riscv_aes32dsi(x, y, bs)
>> +#define __riscv_aes32dsmi(x, y, bs) __builtin_riscv_aes32dsmi(x, y, bs)
>> +#endif
>> +
>> +#if __riscv_xlen == 64
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_aes64ds (uint64_t __x, uint64_t __y)
>> +{
>> +  return __builtin_riscv_aes64ds (__x, __y);
>> +}
>> 
>> I don't understand why some intrinsic functions are implemented as
>> macros to builtins
>> and some are implemented as static inline wrappers around butilins.
>> Is there a particular reason that this is mixed?
>> 
>> +
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_aes64dsm (uint64_t __x, uint64_t __y)
>> +{
>> +  return __builtin_riscv_aes64dsm (__x, __y);
>> +}
>> +
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_aes64im (uint64_t __x)
>> +{
>> +  return __builtin_riscv_aes64im (__x);
>> +}
>> +#endif
>> +#endif // defined (__riscv_zknd)
>> +
>> +#if defined(__riscv_zkne)
>> +#if __riscv_xlen == 32
>> +#define __riscv_aes32esi(x, y, bs) __builtin_riscv_aes32esi(x, y, bs)
>> +#define __riscv_aes32esmi(x, y, bs) __builtin_riscv_aes32esmi(x, y, bs)
>> +#endif
>> +
>> +#if __riscv_xlen == 64
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_aes64es (uint64_t __x, uint64_t __y)
>> +{
>> +  return __builtin_riscv_aes64es (__x, __y);
>> +}
>> +
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_aes64esm (uint64_t __x, uint64_t __y)
>> +{
>> +  return __builtin_riscv_aes64esm (__x, __y);
>> +}
>> +#endif
>> +#endif // defined (__riscv_zknd)
>> 
>> Copy and paste mistake in the comment (should be "__riscv_zkne")
>> 
>> +
>> +#if defined(__riscv_zknd) || defined(__riscv_zkne)
>> +#if __riscv_xlen == 64
>> +#define __riscv_aes64ks1i(x, rnum) __builtin_riscv_aes64ks1i(x, rnum)
>> +
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_aes64ks2 (uint64_t __x, uint64_t __y)
>> +{
>> +  return __builtin_riscv_aes64ks2 (__x, __y);
>> +}
>> +#endif
>> +#endif // defined (__riscv_zknd) || defined (__riscv_zkne)
>> +
>> +#if defined(__riscv_zknh)
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha256sig0 (uint32_t __x)
>> +{
>> +  return __builtin_riscv_sha256sig0 (__x);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha256sig1 (uint32_t __x)
>> +{
>> +  return __builtin_riscv_sha256sig1 (__x);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha256sum0 (uint32_t __x)
>> +{
>> +  return __builtin_riscv_sha256sum0 (__x);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha256sum1 (uint32_t __x)
>> +{
>> +  return __builtin_riscv_sha256sum1 (__x);
>> +}
>> +
>> +#if __riscv_xlen == 32
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sig0h (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sig0h (__x, __y);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sig0l (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sig0l (__x, __y);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sig1h (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sig1h (__x, __y);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sig1l (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sig1l (__x, __y);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sum0l (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sum0l (__x, __y);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sum0r (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sum0r (__x, __y);
>> +}
>> 
>> Why sum0l and sum0r?
>> The specification says sum0h and sum0l.
> 
> Note, that this was just fixed in the intrinsic spec:
>  https://github.com/riscv-non-isa/riscv-c-api-doc/pull/58
> 
>> 
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sum1l (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sum1l (__x, __y);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sum1r (uint32_t __x, uint32_t __y)
>> +{
>> +  return __builtin_riscv_sha512sum1r (__x, __y);
>> +}
>> 
>> Why sum1l and sum1r?
>> The specification says sum1h and sum1l.
> 
> Note, that this was just fixed in the intrinsic spec:
>  https://github.com/riscv-non-isa/riscv-c-api-doc/pull/58
> 
>> 
>> 
>> +#endif
>> +
>> +#if __riscv_xlen == 64
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sig0 (uint64_t __x)
>> +{
>> +  return __builtin_riscv_sha512sig0 (__x);
>> +}
>> +
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sig1 (uint64_t __x)
>> +{
>> +  return __builtin_riscv_sha512sig1 (__x);
>> +}
>> +
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sum0 (uint64_t __x)
>> +{
>> +  return __builtin_riscv_sha512sum0 (__x);
>> +}
>> +
>> +static __inline__ uint64_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sha512sum1 (uint64_t __x)
>> +{
>> +  return __builtin_riscv_sha512sum1 (__x);
>> +}
>> +#endif
>> +#endif // defined (__riscv_zknh)
>> +
>> +#if defined(__riscv_zksh)
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sm3p0 (uint32_t __x)
>> +{
>> +  return __builtin_riscv_sm3p0 (__x);
>> +}
>> +
>> +static __inline__ uint32_t __attribute__ ((__always_inline__, __nodebug__))
>> +__riscv_sm3p1 (uint32_t __x)
>> +{
>> +  return __builtin_riscv_sm3p1 (__x);
>> +}
>> +#endif // defined (__riscv_zksh)
>> +
>> +#if defined(__riscv_zksed)
>> +#define __riscv_sm4ed(x, y, bs) __builtin_riscv_sm4ed(x, y, bs);
>> +#define __riscv_sm4ks(x, y, bs) __builtin_riscv_sm4ks(x, y, bs);
>> +#endif // defined (__riscv_zksh)
>> 
>> Wrong comment (should be "__riscv_zksed").
>> 
>> +
>> +#if defined(__cplusplus)
>> +}
>> +#endif
>> +
>> +#endif
>> \ No newline at end of file
>> diff --git a/gcc/testsuite/gcc.target/riscv/zknd32.c b/gcc/testsuite/gcc.target/riscv/zknd32.c
>> index e60c027e091..62b730a700f 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zknd32.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zknd32.c
>> @@ -2,16 +2,16 @@
>> /* { dg-options "-O2 -march=rv32gc_zknd -mabi=ilp32d" } */
>> /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>> 
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>> 
>> uint32_t foo1(uint32_t rs1, uint32_t rs2, int bs)
>> {
>> -    return __builtin_riscv_aes32dsi(rs1,rs2,bs);
>> +    return __riscv_aes32dsi(rs1,rs2,bs);
>> }
>> 
>> uint32_t foo2(uint32_t rs1, uint32_t rs2, int bs)
>> {
>> -    return __builtin_riscv_aes32dsmi(rs1,rs2,bs);
>> +    return __riscv_aes32dsmi(rs1,rs2,bs);
>> }
>> 
>> /* { dg-final { scan-assembler-times "aes32dsi" 1 } } */
>> diff --git a/gcc/testsuite/gcc.target/riscv/zknd64.c b/gcc/testsuite/gcc.target/riscv/zknd64.c
>> index 707418cd51e..e5f2be72bae 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zknd64.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zknd64.c
>> @@ -2,31 +2,31 @@
>> /* { dg-options "-O2 -march=rv64gc_zknd -mabi=lp64" } */
>> /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>> 
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>> 
>> uint64_t foo1(uint64_t rs1, uint64_t rs2)
>> {
>> -    return __builtin_riscv_aes64ds(rs1,rs2);
>> +    return __riscv_aes64ds(rs1,rs2);
>> }
>> 
>> uint64_t foo2(uint64_t rs1, uint64_t rs2)
>> {
>> -    return __builtin_riscv_aes64dsm(rs1,rs2);
>> +    return __riscv_aes64dsm(rs1,rs2);
>> }
>> 
>> uint64_t foo3(uint64_t rs1, unsigned rnum)
>> {
>> -    return __builtin_riscv_aes64ks1i(rs1,rnum);
>> +    return __riscv_aes64ks1i(rs1,rnum);
>> }
>> 
>> uint64_t foo4(uint64_t rs1, uint64_t rs2)
>> {
>> -    return __builtin_riscv_aes64ks2(rs1,rs2);
>> +    return __riscv_aes64ks2(rs1,rs2);
>> }
>> 
>> uint64_t foo5(uint64_t rs1)
>> {
>> -    return __builtin_riscv_aes64im(rs1);
>> +    return __riscv_aes64im(rs1);
>> }
>> 
>> /* { dg-final { scan-assembler-times "aes64ds\t" 1 } } */
>> diff --git a/gcc/testsuite/gcc.target/riscv/zkne32.c b/gcc/testsuite/gcc.target/riscv/zkne32.c
>> index 252e9ffa43b..c3a7205a48b 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zkne32.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zkne32.c
>> @@ -2,16 +2,16 @@
>> /* { dg-options "-O2 -march=rv32gc_zkne -mabi=ilp32d" } */
>> /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>> 
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>> 
>> uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
>> {
>> -    return __builtin_riscv_aes32esi(rs1, rs2, bs);
>> +    return __riscv_aes32esi(rs1, rs2, bs);
>> }
>> 
>> uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
>> {
>> -    return __builtin_riscv_aes32esmi(rs1, rs2, bs);
>> +    return __riscv_aes32esmi(rs1, rs2, bs);
>> }
>> 
>> /* { dg-final { scan-assembler-times "aes32esi" 1 } } */
>> diff --git a/gcc/testsuite/gcc.target/riscv/zkne64.c b/gcc/testsuite/gcc.target/riscv/zkne64.c
>> index b25f6b5c29a..e99b21a46dd 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zkne64.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zkne64.c
>> @@ -2,26 +2,26 @@
>> /* { dg-options "-O2 -march=rv64gc_zkne -mabi=lp64" } */
>> /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>> 
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>> 
>> uint64_t foo1(uint64_t rs1, uint64_t rs2)
>> {
>> -    return __builtin_riscv_aes64es(rs1,rs2);
>> +    return __riscv_aes64es(rs1,rs2);
>> }
>> 
>> uint64_t foo2(uint64_t rs1, uint64_t rs2)
>> {
>> -    return __builtin_riscv_aes64esm(rs1,rs2);
>> +    return __riscv_aes64esm(rs1,rs2);
>> }
>> 
>> uint64_t foo3(uint64_t rs1, unsigned rnum)
>> {
>> -    return __builtin_riscv_aes64ks1i(rs1,rnum);
>> +    return __riscv_aes64ks1i(rs1,rnum);
>> }
>> 
>> uint64_t foo4(uint64_t rs1, uint64_t rs2)
>> {
>> -    return __builtin_riscv_aes64ks2(rs1,rs2);
>> +    return __riscv_aes64ks2(rs1,rs2);
>> }
>> 
>> /* { dg-final { scan-assembler-times "aes64es\t" 1 } } */
>> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c b/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
>> index c51b143a8a5..96e967fba96 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha256-32.c
>> @@ -2,7 +2,27 @@
>> /* { dg-options "-O2 -march=rv32gc_zknh -mabi=ilp32d" } */
>> /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>> 
>> -#include "zknh-sha256-64.c"
>> +#include "riscv_crypto.h"
>> +
>> +unsigned int foo1(unsigned int rs1)
>> +{
>> +    return __riscv_sha256sig0(rs1);
>> +}
>> +
>> +unsigned int foo2(unsigned int rs1)
>> +{
>> +    return __riscv_sha256sig1(rs1);
>> +}
>> +
>> +unsigned int foo3(unsigned int rs1)
>> +{
>> +    return __riscv_sha256sum0(rs1);
>> +}
>> +
>> +unsigned int foo4(unsigned int rs1)
>> +{
>> +    return __riscv_sha256sum1(rs1);
>> +}
>> 
>> /* { dg-final { scan-assembler-times "sha256sig0" 1 } } */
>> /* { dg-final { scan-assembler-times "sha256sig1" 1 } } */
>> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c b/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
>> index 2ef37601e6f..172b84421e2 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha256-64.c
>> @@ -2,24 +2,26 @@
>> /* { dg-options "-O2 -march=rv64gc_zknh -mabi=lp64" } */
>> /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>> 
>> +#include "riscv_crypto.h"
>> +
>> unsigned int foo1(unsigned int rs1)
>> {
>> -    return __builtin_riscv_sha256sig0(rs1);
>> +    return __riscv_sha256sig0(rs1);
>> }
>> 
>> unsigned int foo2(unsigned int rs1)
>> {
>> -    return __builtin_riscv_sha256sig1(rs1);
>> +    return __riscv_sha256sig1(rs1);
>> }
>> 
>> unsigned int foo3(unsigned int rs1)
>> {
>> -    return __builtin_riscv_sha256sum0(rs1);
>> +    return __riscv_sha256sum0(rs1);
>> }
>> 
>> unsigned int foo4(unsigned int rs1)
>> {
>> -    return __builtin_riscv_sha256sum1(rs1);
>> +    return __riscv_sha256sum1(rs1);
>> }
>> 
>> /* { dg-final { scan-assembler-times "sha256sig0" 1 } } */
>> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c b/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
>> index f2bcae36a1f..e6fb298d6a7 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha512-32.c
>> @@ -2,36 +2,36 @@
>> /* { dg-options "-O2 -march=rv32gc_zknh -mabi=ilp32" } */
>> /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>> 
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>> 
>> uint32_t foo1(uint32_t rs1, uint32_t rs2)
>> {
>> -    return __builtin_riscv_sha512sig0h(rs1,rs2);
>> +    return __riscv_sha512sig0h(rs1,rs2);
>> }
>> 
>> uint32_t foo2(uint32_t rs1, uint32_t rs2)
>> {
>> -    return __builtin_riscv_sha512sig0l(rs1,rs2);
>> +    return __riscv_sha512sig0l(rs1,rs2);
>> }
>> 
>> uint32_t foo3(uint32_t rs1, uint32_t rs2)
>> {
>> -    return __builtin_riscv_sha512sig1h(rs1,rs2);
>> +    return __riscv_sha512sig1h(rs1,rs2);
>> }
>> 
>> uint32_t foo4(uint32_t rs1, uint32_t rs2)
>> {
>> -    return __builtin_riscv_sha512sig1l(rs1,rs2);
>> +    return __riscv_sha512sig1l(rs1,rs2);
>> }
>> 
>> uint32_t foo5(uint32_t rs1, uint32_t rs2)
>> {
>> -    return __builtin_riscv_sha512sum0r(rs1,rs2);
>> +    return __riscv_sha512sum0r(rs1,rs2);
>> }
>> 
>> uint32_t foo6(uint32_t rs1, uint32_t rs2)
>> {
>> -    return __builtin_riscv_sha512sum1r(rs1,rs2);
>> +    return __riscv_sha512sum1r(rs1,rs2);
>> }
>> 
>> /* { dg-final { scan-assembler-times "sha512sig0h" 1 } } */
>> diff --git a/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c b/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
>> index 4f248575e66..c65c2043d08 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zknh-sha512-64.c
>> @@ -2,26 +2,26 @@
>> /* { dg-options "-O2 -march=rv64gc_zknh -mabi=lp64" } */
>> /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>> 
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>> 
>> uint64_t foo1(uint64_t rs1)
>> {
>> -    return __builtin_riscv_sha512sig0(rs1);
>> +    return __riscv_sha512sig0(rs1);
>> }
>> 
>> uint64_t foo2(uint64_t rs1)
>> {
>> -    return __builtin_riscv_sha512sig1(rs1);
>> +    return __riscv_sha512sig1(rs1);
>> }
>> 
>> uint64_t foo3(uint64_t rs1)
>> {
>> -    return __builtin_riscv_sha512sum0(rs1);
>> +    return __riscv_sha512sum0(rs1);
>> }
>> 
>> uint64_t foo4(uint64_t rs1)
>> {
>> -    return __builtin_riscv_sha512sum1(rs1);
>> +    return __riscv_sha512sum1(rs1);
>> }
>> 
>> 
>> diff --git a/gcc/testsuite/gcc.target/riscv/zksed32.c b/gcc/testsuite/gcc.target/riscv/zksed32.c
>> index 0e8f01cd548..d63e0775391 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zksed32.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zksed32.c
>> @@ -2,16 +2,16 @@
>> /* { dg-options "-O2 -march=rv32gc_zksed -mabi=ilp32" } */
>> /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>> 
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>> 
>> uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
>> {
>> -    return __builtin_riscv_sm4ks(rs1,rs2,bs);
>> +    return __riscv_sm4ks(rs1,rs2,bs);
>> }
>> 
>> uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
>> {
>> -    return __builtin_riscv_sm4ed(rs1,rs2,bs);
>> +    return __riscv_sm4ed(rs1,rs2,bs);
>> }
>> 
>> 
>> diff --git a/gcc/testsuite/gcc.target/riscv/zksed64.c b/gcc/testsuite/gcc.target/riscv/zksed64.c
>> index 9e4d1961419..426122cf6eb 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zksed64.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zksed64.c
>> @@ -2,16 +2,16 @@
>> /* { dg-options "-O2 -march=rv64gc_zksed -mabi=lp64" } */
>> /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>> 
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>> 
>> uint32_t foo1(uint32_t rs1, uint32_t rs2, unsigned bs)
>> {
>> -    return __builtin_riscv_sm4ks(rs1,rs2,bs);
>> +    return __riscv_sm4ks(rs1,rs2,bs);
>> }
>> 
>> uint32_t foo2(uint32_t rs1, uint32_t rs2, unsigned bs)
>> {
>> -    return __builtin_riscv_sm4ed(rs1,rs2,bs);
>> +    return __riscv_sm4ed(rs1,rs2,bs);
>> }
>> 
>> 
>> diff --git a/gcc/testsuite/gcc.target/riscv/zksh32.c b/gcc/testsuite/gcc.target/riscv/zksh32.c
>> index c182e557a85..3d0d154ad1d 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zksh32.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zksh32.c
>> @@ -2,16 +2,16 @@
>> /* { dg-options "-O2 -march=rv32gc_zksh -mabi=ilp32" } */
>> /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>> 
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>> 
>> uint32_t foo1(uint32_t rs1)
>> {
>> -    return __builtin_riscv_sm3p0(rs1);
>> +    return __riscv_sm3p0(rs1);
>> }
>> 
>> uint32_t foo2(uint32_t rs1)
>> {
>> -    return __builtin_riscv_sm3p1(rs1);
>> +    return __riscv_sm3p1(rs1);
>> }
>> 
>> 
>> diff --git a/gcc/testsuite/gcc.target/riscv/zksh64.c b/gcc/testsuite/gcc.target/riscv/zksh64.c
>> index d794b39f77a..1398c1329f0 100644
>> --- a/gcc/testsuite/gcc.target/riscv/zksh64.c
>> +++ b/gcc/testsuite/gcc.target/riscv/zksh64.c
>> @@ -2,16 +2,16 @@
>> /* { dg-options "-O2 -march=rv64gc_zksh -mabi=lp64" } */
>> /* { dg-skip-if "" { *-*-* } { "-g" "-flto"} } */
>> 
>> -#include <stdint-gcc.h>
>> +#include "riscv_crypto.h"
>> 
>> uint32_t foo1(uint32_t rs1)
>> {
>> -    return __builtin_riscv_sm3p0(rs1);
>> +    return __riscv_sm3p0(rs1);
>> }
>> 
>> uint32_t foo2(uint32_t rs1)
>> {
>> -    return __builtin_riscv_sm3p1(rs1);
>> +    return __riscv_sm3p1(rs1);
>> }
>> 
>> 
>> --
>> 2.34.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2023-11-29 20:38 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-27  8:34 [PATCH] Add C intrinsics for scalar crypto extension Liao Shihua
2023-11-29  1:31 ` Jeff Law
2023-11-29 15:03 ` Christoph Müllner
2023-11-29 16:49   ` Liao Shihua
2023-11-29 17:58     ` Christoph Müllner
2023-11-29 20:38       ` Craig Topper

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).