blob: 8cdaf24c80f274172b425529c391effe027f268c [file] [log] [blame]
commit 26ba347fbb0cf8abcb861aa558711f51455a0ec3
Author: Kazu Hirata <kazu@google.com>
Date: Fri May 6 14:26:12 2022 -0700
[AArch64] Add llvm/test/CodeGen/AArch64/i256-math.ll
This patch adds a test case for i256 additions and subtractions. I'm
leaving out multiplications for now, which would result in very long
sequences.
Differential Revision: https://reviews.llvm.org/D125125
diff --git a/llvm/test/CodeGen/AArch64/i256-math.ll b/llvm/test/CodeGen/AArch64/i256-math.ll
new file mode 100644
index 000000000000..6e5afb4dc311
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/i256-math.ll
@@ -0,0 +1,306 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
+
+declare { i256, i1 } @llvm.uadd.with.overflow.i256(i256, i256)
+declare i256 @llvm.uadd.sat.i256(i256, i256)
+
+declare { i256, i1 } @llvm.usub.with.overflow.i256(i256, i256)
+declare i256 @llvm.usub.sat.i256(i256, i256)
+
+declare { i256, i1 } @llvm.sadd.with.overflow.i256(i256, i256)
+declare i256 @llvm.sadd.sat.i256(i256, i256)
+
+declare { i256, i1 } @llvm.ssub.with.overflow.i256(i256, i256)
+declare i256 @llvm.ssub.sat.i256(i256, i256)
+
+define i256 @u256_add(i256 %x, i256 %y) {
+; CHECK-LABEL: u256_add:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adds x0, x0, x4
+; CHECK-NEXT: adcs x1, x1, x5
+; CHECK-NEXT: adcs x2, x2, x6
+; CHECK-NEXT: adcs x3, x3, x7
+; CHECK-NEXT: ret
+ %1 = add i256 %x, %y
+ ret i256 %1
+}
+
+define { i256, i8 } @u256_checked_add(i256 %x, i256 %y) {
+; CHECK-LABEL: u256_checked_add:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adds x0, x0, x4
+; CHECK-NEXT: adcs x1, x1, x5
+; CHECK-NEXT: adcs x2, x2, x6
+; CHECK-NEXT: adcs x3, x3, x7
+; CHECK-NEXT: cset w8, hs
+; CHECK-NEXT: eor w4, w8, #0x1
+; CHECK-NEXT: ret
+ %1 = tail call { i256, i1 } @llvm.uadd.with.overflow.i256(i256 %x, i256 %y)
+ %2 = extractvalue { i256, i1 } %1, 0
+ %3 = extractvalue { i256, i1 } %1, 1
+ %4 = xor i1 %3, true
+ %5 = zext i1 %4 to i8
+ %6 = insertvalue { i256, i8 } undef, i256 %2, 0
+ %7 = insertvalue { i256, i8 } %6, i8 %5, 1
+ ret { i256, i8 } %7
+}
+
+define { i256, i8 } @u256_overflowing_add(i256 %x, i256 %y) {
+; CHECK-LABEL: u256_overflowing_add:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adds x0, x0, x4
+; CHECK-NEXT: adcs x1, x1, x5
+; CHECK-NEXT: adcs x2, x2, x6
+; CHECK-NEXT: adcs x3, x3, x7
+; CHECK-NEXT: cset w4, hs
+; CHECK-NEXT: ret
+ %1 = tail call { i256, i1 } @llvm.uadd.with.overflow.i256(i256 %x, i256 %y)
+ %2 = extractvalue { i256, i1 } %1, 0
+ %3 = extractvalue { i256, i1 } %1, 1
+ %4 = zext i1 %3 to i8
+ %5 = insertvalue { i256, i8 } undef, i256 %2, 0
+ %6 = insertvalue { i256, i8 } %5, i8 %4, 1
+ ret { i256, i8 } %6
+}
+
+define i256 @u256_saturating_add(i256 %x, i256 %y) {
+; CHECK-LABEL: u256_saturating_add:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adds x8, x0, x4
+; CHECK-NEXT: adcs x9, x1, x5
+; CHECK-NEXT: adcs x10, x2, x6
+; CHECK-NEXT: adcs x11, x3, x7
+; CHECK-NEXT: cset w12, hs
+; CHECK-NEXT: cmp w12, #0
+; CHECK-NEXT: csinv x0, x8, xzr, eq
+; CHECK-NEXT: csinv x1, x9, xzr, eq
+; CHECK-NEXT: csinv x2, x10, xzr, eq
+; CHECK-NEXT: csinv x3, x11, xzr, eq
+; CHECK-NEXT: ret
+ %1 = tail call i256 @llvm.uadd.sat.i256(i256 %x, i256 %y)
+ ret i256 %1
+}
+
+define i256 @u256_sub(i256 %x, i256 %y) {
+; CHECK-LABEL: u256_sub:
+; CHECK: // %bb.0:
+; CHECK-NEXT: subs x0, x0, x4
+; CHECK-NEXT: sbcs x1, x1, x5
+; CHECK-NEXT: sbcs x2, x2, x6
+; CHECK-NEXT: sbcs x3, x3, x7
+; CHECK-NEXT: ret
+ %1 = sub i256 %x, %y
+ ret i256 %1
+}
+
+define { i256, i8 } @u256_checked_sub(i256 %x, i256 %y) {
+; CHECK-LABEL: u256_checked_sub:
+; CHECK: // %bb.0:
+; CHECK-NEXT: subs x0, x0, x4
+; CHECK-NEXT: sbcs x1, x1, x5
+; CHECK-NEXT: cset w8, lo
+; CHECK-NEXT: cmp wzr, w8
+; CHECK-NEXT: sbcs x2, x2, x6
+; CHECK-NEXT: cset w8, lo
+; CHECK-NEXT: cmp wzr, w8
+; CHECK-NEXT: sbcs x3, x3, x7
+; CHECK-NEXT: cset w8, lo
+; CHECK-NEXT: eor w4, w8, #0x1
+; CHECK-NEXT: ret
+ %1 = tail call { i256, i1 } @llvm.usub.with.overflow.i256(i256 %x, i256 %y)
+ %2 = extractvalue { i256, i1 } %1, 0
+ %3 = extractvalue { i256, i1 } %1, 1
+ %4 = xor i1 %3, true
+ %5 = zext i1 %4 to i8
+ %6 = insertvalue { i256, i8 } undef, i256 %2, 0
+ %7 = insertvalue { i256, i8 } %6, i8 %5, 1
+ ret { i256, i8 } %7
+}
+
+define { i256, i8 } @u256_overflowing_sub(i256 %x, i256 %y) {
+; CHECK-LABEL: u256_overflowing_sub:
+; CHECK: // %bb.0:
+; CHECK-NEXT: subs x0, x0, x4
+; CHECK-NEXT: sbcs x1, x1, x5
+; CHECK-NEXT: cset w8, lo
+; CHECK-NEXT: cmp wzr, w8
+; CHECK-NEXT: sbcs x2, x2, x6
+; CHECK-NEXT: cset w8, lo
+; CHECK-NEXT: cmp wzr, w8
+; CHECK-NEXT: sbcs x3, x3, x7
+; CHECK-NEXT: cset w4, lo
+; CHECK-NEXT: ret
+ %1 = tail call { i256, i1 } @llvm.usub.with.overflow.i256(i256 %x, i256 %y)
+ %2 = extractvalue { i256, i1 } %1, 0
+ %3 = extractvalue { i256, i1 } %1, 1
+ %4 = zext i1 %3 to i8
+ %5 = insertvalue { i256, i8 } undef, i256 %2, 0
+ %6 = insertvalue { i256, i8 } %5, i8 %4, 1
+ ret { i256, i8 } %6
+}
+
+define i256 @u256_saturating_sub(i256 %x, i256 %y) {
+; CHECK-LABEL: u256_saturating_sub:
+; CHECK: // %bb.0:
+; CHECK-NEXT: subs x8, x0, x4
+; CHECK-NEXT: sbcs x9, x1, x5
+; CHECK-NEXT: sbcs x10, x2, x6
+; CHECK-NEXT: sbcs x11, x3, x7
+; CHECK-NEXT: cset w12, lo
+; CHECK-NEXT: cmp w12, #0
+; CHECK-NEXT: csel x0, xzr, x8, ne
+; CHECK-NEXT: csel x1, xzr, x9, ne
+; CHECK-NEXT: csel x2, xzr, x10, ne
+; CHECK-NEXT: csel x3, xzr, x11, ne
+; CHECK-NEXT: ret
+ %1 = tail call i256 @llvm.usub.sat.i256(i256 %x, i256 %y)
+ ret i256 %1
+}
+
+define i256 @i256_add(i256 %x, i256 %y) {
+; CHECK-LABEL: i256_add:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adds x0, x0, x4
+; CHECK-NEXT: adcs x1, x1, x5
+; CHECK-NEXT: adcs x2, x2, x6
+; CHECK-NEXT: adcs x3, x3, x7
+; CHECK-NEXT: ret
+ %1 = add i256 %x, %y
+ ret i256 %1
+}
+
+define { i256, i8 } @i256_checked_add(i256 %x, i256 %y) {
+; CHECK-LABEL: i256_checked_add:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adds x0, x0, x4
+; CHECK-NEXT: adcs x1, x1, x5
+; CHECK-NEXT: adcs x2, x2, x6
+; CHECK-NEXT: adcs x3, x3, x7
+; CHECK-NEXT: cset w8, vs
+; CHECK-NEXT: eor w4, w8, #0x1
+; CHECK-NEXT: ret
+ %1 = tail call { i256, i1 } @llvm.sadd.with.overflow.i256(i256 %x, i256 %y)
+ %2 = extractvalue { i256, i1 } %1, 0
+ %3 = extractvalue { i256, i1 } %1, 1
+ %4 = xor i1 %3, true
+ %5 = zext i1 %4 to i8
+ %6 = insertvalue { i256, i8 } undef, i256 %2, 0
+ %7 = insertvalue { i256, i8 } %6, i8 %5, 1
+ ret { i256, i8 } %7
+}
+
+define { i256, i8 } @i256_overflowing_add(i256 %x, i256 %y) {
+; CHECK-LABEL: i256_overflowing_add:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adds x0, x0, x4
+; CHECK-NEXT: adcs x1, x1, x5
+; CHECK-NEXT: adcs x2, x2, x6
+; CHECK-NEXT: adcs x3, x3, x7
+; CHECK-NEXT: cset w4, vs
+; CHECK-NEXT: ret
+ %1 = tail call { i256, i1 } @llvm.sadd.with.overflow.i256(i256 %x, i256 %y)
+ %2 = extractvalue { i256, i1 } %1, 0
+ %3 = extractvalue { i256, i1 } %1, 1
+ %4 = zext i1 %3 to i8
+ %5 = insertvalue { i256, i8 } undef, i256 %2, 0
+ %6 = insertvalue { i256, i8 } %5, i8 %4, 1
+ ret { i256, i8 } %6
+}
+
+define i256 @i256_saturating_add(i256 %x, i256 %y) {
+; CHECK-LABEL: i256_saturating_add:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adds x8, x0, x4
+; CHECK-NEXT: adcs x9, x1, x5
+; CHECK-NEXT: adcs x10, x2, x6
+; CHECK-NEXT: adcs x11, x3, x7
+; CHECK-NEXT: cset w12, vs
+; CHECK-NEXT: asr x13, x11, #63
+; CHECK-NEXT: cmp w12, #0
+; CHECK-NEXT: csel x0, x13, x8, ne
+; CHECK-NEXT: eor x8, x13, #0x8000000000000000
+; CHECK-NEXT: csel x1, x13, x9, ne
+; CHECK-NEXT: csel x2, x13, x10, ne
+; CHECK-NEXT: csel x3, x8, x11, ne
+; CHECK-NEXT: ret
+ %1 = tail call i256 @llvm.sadd.sat.i256(i256 %x, i256 %y)
+ ret i256 %1
+}
+
+define i256 @i256_sub(i256 %x, i256 %y) {
+; CHECK-LABEL: i256_sub:
+; CHECK: // %bb.0:
+; CHECK-NEXT: subs x0, x0, x4
+; CHECK-NEXT: sbcs x1, x1, x5
+; CHECK-NEXT: sbcs x2, x2, x6
+; CHECK-NEXT: sbcs x3, x3, x7
+; CHECK-NEXT: ret
+ %1 = sub i256 %x, %y
+ ret i256 %1
+}
+
+define { i256, i8 } @i256_checked_sub(i256 %x, i256 %y) {
+; CHECK-LABEL: i256_checked_sub:
+; CHECK: // %bb.0:
+; CHECK-NEXT: subs x0, x0, x4
+; CHECK-NEXT: sbcs x1, x1, x5
+; CHECK-NEXT: cset w8, lo
+; CHECK-NEXT: cmp wzr, w8
+; CHECK-NEXT: sbcs x2, x2, x6
+; CHECK-NEXT: cset w8, lo
+; CHECK-NEXT: cmp wzr, w8
+; CHECK-NEXT: sbcs x3, x3, x7
+; CHECK-NEXT: cset w8, vs
+; CHECK-NEXT: eor w4, w8, #0x1
+; CHECK-NEXT: ret
+ %1 = tail call { i256, i1 } @llvm.ssub.with.overflow.i256(i256 %x, i256 %y)
+ %2 = extractvalue { i256, i1 } %1, 0
+ %3 = extractvalue { i256, i1 } %1, 1
+ %4 = xor i1 %3, true
+ %5 = zext i1 %4 to i8
+ %6 = insertvalue { i256, i8 } undef, i256 %2, 0
+ %7 = insertvalue { i256, i8 } %6, i8 %5, 1
+ ret { i256, i8 } %7
+}
+
+define { i256, i8 } @i256_overflowing_sub(i256 %x, i256 %y) {
+; CHECK-LABEL: i256_overflowing_sub:
+; CHECK: // %bb.0:
+; CHECK-NEXT: subs x0, x0, x4
+; CHECK-NEXT: sbcs x1, x1, x5
+; CHECK-NEXT: cset w8, lo
+; CHECK-NEXT: cmp wzr, w8
+; CHECK-NEXT: sbcs x2, x2, x6
+; CHECK-NEXT: cset w8, lo
+; CHECK-NEXT: cmp wzr, w8
+; CHECK-NEXT: sbcs x3, x3, x7
+; CHECK-NEXT: cset w4, vs
+; CHECK-NEXT: ret
+ %1 = tail call { i256, i1 } @llvm.ssub.with.overflow.i256(i256 %x, i256 %y)
+ %2 = extractvalue { i256, i1 } %1, 0
+ %3 = extractvalue { i256, i1 } %1, 1
+ %4 = zext i1 %3 to i8
+ %5 = insertvalue { i256, i8 } undef, i256 %2, 0
+ %6 = insertvalue { i256, i8 } %5, i8 %4, 1
+ ret { i256, i8 } %6
+}
+
+define i256 @i256_saturating_sub(i256 %x, i256 %y) {
+; CHECK-LABEL: i256_saturating_sub:
+; CHECK: // %bb.0:
+; CHECK-NEXT: subs x8, x0, x4
+; CHECK-NEXT: sbcs x9, x1, x5
+; CHECK-NEXT: sbcs x10, x2, x6
+; CHECK-NEXT: sbcs x11, x3, x7
+; CHECK-NEXT: cset w12, vs
+; CHECK-NEXT: asr x13, x11, #63
+; CHECK-NEXT: cmp w12, #0
+; CHECK-NEXT: csel x0, x13, x8, ne
+; CHECK-NEXT: eor x8, x13, #0x8000000000000000
+; CHECK-NEXT: csel x1, x13, x9, ne
+; CHECK-NEXT: csel x2, x13, x10, ne
+; CHECK-NEXT: csel x3, x8, x11, ne
+; CHECK-NEXT: ret
+ %1 = tail call i256 @llvm.ssub.sat.i256(i256 %x, i256 %y)
+ ret i256 %1
+}