Skip to content

Commit 588578e

Browse files
[riscv-tests] Updated tests for single lane
1 parent 0c18343 commit 588578e

File tree

178 files changed

+38409
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

178 files changed

+38409
-0
lines changed
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
#Copyright 2021 ETH Zurich and University of Bologna.
2+
#Solderpad Hardware License, Version 0.51, see LICENSE for details.
3+
#SPDX - License - Identifier : SHL - 0.51
4+
#
5+
#Author : Matheus Cavalcante < matheusd @iis.ee.ethz.ch>
6+
#Basile Bougenot < bbougenot @student.ethz.ch>
7+
8+
rv64uv_sc_tests = vaadd vaaddu vsadd vsaddu vsmul vssra vssrl vnclip vnclipu vadd
9+
vsub vrsub vwaddu vwsubu vwadd vwsub vsext vzext vadc vmadc vsbc vmsbc vand vor
10+
vxor vsll vsrl vsra vnsrl vnsra vmseq vmsne vmsltu vmslt vmsleu vmsle vmsgtu
11+
vmsgt vminu vmin vmaxu vmax vmul vmulh vmulhu vmulhsu vdivu vdiv vremu
12+
vrem vwmul vwmulu vwmulsu vmacc vnmsac vmadd vnmsub vwmaccu vwmacc
13+
vwmaccsu vwmaccus vmerge vmv vmvxs vmvsx vfmvfs vfmvsf vmvnrr
14+
vredsum vredmaxu vredmax vredminu vredmin vredand vredor
15+
vredxor vwredsumu vwredsum vfadd vfsub vfrsub vfwadd
16+
vfwsub vfmul vfdiv vfrdiv vfwmul vfmacc vfnmacc vfmsac
17+
vfnmsac vfmadd vfnmadd vfmsub vfnmsub vfwmacc
18+
vfwnmacc vfwmsac vfwnmsac vfsqrt vfmin vfmax vfredusum vfredosum vfredmin vfredmax
19+
vfwredusum vfwredosum vfclass vfsgnj vfsgnjn vfsgnjx vfmerge
20+
vfmv vmfeq vmfne vmflt vmfle vmfgt vmfge vfcvt vfwcvt vfncvt
21+
vmand vmnand vmandnot vmor vmnor vmornot vmxor vmxnor vslideup vslidedown
22+
vslide1up vfslide1up vslide1down vfslide1down vl
23+
vl1r vle1 vls vluxei vs
24+
vs1r vse1 vss vsuxei vsetivli vsetvli
25+
vsetvl vmsbf vmsof vmsif viota vid vcpop vfirst vle8
26+
vse8 vle16 vse16 vle32 vse32 vle64 vse64
27+
28+
#rv64uv_sc_tests = vaadd vaaddu vadc vasub vasubu vcompress vfirst vid viota \
29+
vl vlff vl_nocheck vlx vmsbf vmsif vmsof vpopc_m vrgather vsadd vsaddu \
30+
vsetvl vsetivli vsetvli vsmul vssra vssrl vssub vssubu vsux vsx
31+
32+
rv64uv_p_tests = $(
33+
addprefix
34+
rv64uv -
35+
p -
36+
,
37+
$(rv64uv_sc_tests))
38+
39+
spike_ctests +=
40+
$(rv64uv_p_tests)
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
// Copyright 2021 ETH Zurich and University of Bologna.
2+
// Solderpad Hardware License, Version 0.51, see LICENSE for details.
3+
// SPDX-License-Identifier: SHL-0.51
4+
//
5+
// Author: Matheus Cavalcante <matheusd@iis.ee.ethz.ch>
6+
// Basile Bougenot <bbougenot@student.ethz.ch>
7+
8+
#include "vector_macros.h"
9+
10+
void TEST_CASE1(void) {
11+
set_vxrm(0); // setting vxrm to rnu rounding mode
12+
VSET(4, e8, m1);
13+
VLOAD_8(v1, 1, -2, -3, 4);
14+
VLOAD_8(v2, 1, 2, -3, 3);
15+
__asm__ volatile("vaadd.vv v3, v1, v2" ::);
16+
VCMP_U8(1, v3, 1, 0, -3, 4);
17+
}
18+
19+
void TEST_CASE2(void) {
20+
set_vxrm(1); // setting vxrm to rne rounding mode
21+
VSET(4, e8, m1);
22+
VLOAD_8(v1, 1, -2, -3, 4);
23+
VLOAD_8(v2, 1, 9, -3, 5);
24+
VLOAD_8(v0, 0xA, 0x0, 0x0, 0x0);
25+
VCLEAR(v3);
26+
__asm__ volatile("vaadd.vv v3, v1, v2, v0.t" ::);
27+
VCMP_U8(2, v3, 0, 4, 0, 4);
28+
}
29+
30+
void TEST_CASE3(void) {
31+
set_vxrm(2); // setting vxrm to rdn rounding mode
32+
VSET(4, e32, m1);
33+
VLOAD_32(v1, 1, -2, 3, -4);
34+
const uint32_t scalar = 5;
35+
__asm__ volatile("vaadd.vx v3, v1, %[A]" ::[A] "r"(scalar));
36+
VCMP_U32(3, v3, 3, 1, 4, 0);
37+
}
38+
39+
// Dont use VCLEAR here, it results in a glitch where are values are off by 1
40+
void TEST_CASE4(void) {
41+
set_vxrm(3); // setting vxrm to rod rounding mode
42+
VSET(4, e32, m1);
43+
VLOAD_32(v1, 1, 2, 3, 4);
44+
const uint32_t scalar = 5;
45+
VLOAD_32(v0, 0xA, 0x0, 0x0, 0x0);
46+
VCLEAR(v3);
47+
__asm__ volatile("vaadd.vx v3, v1, %[A], v0.t" ::[A] "r"(scalar));
48+
VCMP_U32(4, v3, 0, 3, 0, 5);
49+
}
50+
51+
int main(void) {
52+
INIT_CHECK();
53+
enable_vec();
54+
TEST_CASE1();
55+
TEST_CASE2();
56+
TEST_CASE3();
57+
TEST_CASE4();
58+
EXIT_CHECK();
59+
}
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
// Copyright 2021 ETH Zurich and University of Bologna.
2+
// Solderpad Hardware License, Version 0.51, see LICENSE for details.
3+
// SPDX-License-Identifier: SHL-0.51
4+
//
5+
// Author: Matheus Cavalcante <matheusd@iis.ee.ethz.ch>
6+
// Basile Bougenot <bbougenot@student.ethz.ch>
7+
8+
#include "vector_macros.h"
9+
10+
void TEST_CASE1(void) {
11+
set_vxrm(0); // setting vxrm to rnu rounding mode
12+
VSET(4, e8, m1);
13+
VLOAD_8(v1, 1, 2, 3, 5);
14+
VLOAD_8(v2, 1, 3, 8, 4);
15+
__asm__ volatile("vaaddu.vv v3, v1, v2" ::);
16+
VCMP_U8(1, v3, 1, 3, 6, 5);
17+
}
18+
19+
void TEST_CASE2(void) {
20+
set_vxrm(1); // setting vxrm to rne rounding mode
21+
VSET(4, e8, m1);
22+
VLOAD_8(v1, 5, 8, 3, 7);
23+
VLOAD_8(v2, 7, 5, 3, 5);
24+
VLOAD_8(v0, 0x0A, 0x00, 0x00, 0x00);
25+
VCLEAR(v3);
26+
__asm__ volatile("vaaddu.vv v3, v1, v2, v0.t" ::);
27+
VCMP_U8(2, v3, 0, 6, 0, 6);
28+
}
29+
30+
void TEST_CASE3(void) {
31+
set_vxrm(2); // setting vxrm to rdn rounding mode
32+
VSET(4, e32, m1);
33+
VLOAD_32(v1, 1, 2, 3, 4);
34+
const uint32_t scalar = 5;
35+
__asm__ volatile("vaaddu.vx v3, v1, %[A]" ::[A] "r"(scalar));
36+
VCMP_U32(3, v3, 3, 3, 4, 4);
37+
}
38+
39+
// Dont use VCLEAR here, it results in a glitch where are values are off by 1
40+
void TEST_CASE4(void) {
41+
set_vxrm(3); // setting vxrm to rod rounding mode
42+
VSET(4, e32, m1);
43+
VLOAD_32(v1, 1, 2, 3, 4);
44+
const uint32_t scalar = 5;
45+
VLOAD_32(v0, 0xA, 0x0, 0x0, 0x0);
46+
VCLEAR(v3);
47+
__asm__ volatile("vaaddu.vx v3, v1, %[A], v0.t" ::[A] "r"(scalar));
48+
VCMP_U32(4, v3, 0, 3, 0, 5);
49+
}
50+
51+
int main(void) {
52+
INIT_CHECK();
53+
enable_vec();
54+
TEST_CASE1();
55+
TEST_CASE2();
56+
TEST_CASE3();
57+
TEST_CASE4();
58+
EXIT_CHECK();
59+
}
Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
// Copyright 2021 ETH Zurich and University of Bologna.
2+
// Solderpad Hardware License, Version 0.51, see LICENSE for details.
3+
// SPDX-License-Identifier: SHL-0.51
4+
//
5+
// Author: Matheus Cavalcante <matheusd@iis.ee.ethz.ch>
6+
// Basile Bougenot <bbougenot@student.ethz.ch>
7+
8+
#include "vector_macros.h"
9+
10+
void TEST_CASE1(void) {
11+
VSET(16, e8, m1);
12+
VLOAD_8(v1, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, -7, 7);
13+
VLOAD_8(v2, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, -8);
14+
VLOAD_8(v0, 0xAA, 0xAA);
15+
asm volatile("vadc.vvm v3, v1, v2, v0");
16+
VCMP_U8(1, v3, 9, 10, 9, 10, 9, 10, 9, 10, 2, 5, 6, 9, 10, 13, 0, 0);
17+
18+
VSET(16, e16, m2);
19+
VLOAD_16(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, -7, 7);
20+
VLOAD_16(v4, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, -8);
21+
VLOAD_8(v0, 0xAA, 0xAA);
22+
asm volatile("vadc.vvm v6, v2, v4, v0");
23+
VCMP_U16(2, v6, 9, 10, 9, 10, 9, 10, 9, 10, 2, 5, 6, 9, 10, 13, 0, 0);
24+
25+
VSET(16, e32, m4);
26+
VLOAD_32(v4, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, -7, 7);
27+
VLOAD_32(v8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, -8);
28+
VLOAD_8(v0, 0xAA, 0xAA);
29+
asm volatile("vadc.vvm v12, v4, v8, v0");
30+
VCMP_U32(3, v12, 9, 10, 9, 10, 9, 10, 9, 10, 2, 5, 6, 9, 10, 13, 0, 0);
31+
32+
VSET(16, e64, m8);
33+
VLOAD_64(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, -7, 7);
34+
VLOAD_64(v16, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, -8);
35+
VLOAD_8(v0, 0xAA, 0xAA);
36+
asm volatile("vadc.vvm v24, v8, v16, v0");
37+
VCMP_U64(4, v24, 9, 10, 9, 10, 9, 10, 9, 10, 2, 5, 6, 9, 10, 13, 0, 0);
38+
};
39+
40+
void TEST_CASE2(void) {
41+
const uint32_t scalar = 5;
42+
43+
VSET(16, e8, m1);
44+
VLOAD_8(v1, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
45+
VLOAD_8(v0, 0xAA, 0xAA);
46+
asm volatile("vadc.vxm v3, v1, %[A], v0" ::[A] "r"(scalar));
47+
VCMP_U8(5, v3, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);
48+
49+
VSET(16, e16, m2);
50+
VLOAD_16(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
51+
VLOAD_8(v0, 0xAA, 0xAA);
52+
asm volatile("vadc.vxm v4, v2, %[A], v0" ::[A] "r"(scalar));
53+
VCMP_U16(6, v4, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);
54+
55+
VSET(16, e32, m4);
56+
VLOAD_32(v4, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
57+
VLOAD_8(v0, 0xAA, 0xAA);
58+
asm volatile("vadc.vxm v8, v4, %[A], v0" ::[A] "r"(scalar));
59+
VCMP_U32(7, v8, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);
60+
61+
VSET(16, e64, m8);
62+
VLOAD_64(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
63+
VLOAD_8(v0, 0xAA, 0xAA);
64+
asm volatile("vadc.vxm v16, v8, %[A], v0" ::[A] "r"(scalar));
65+
VCMP_U64(8, v16, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);
66+
};
67+
68+
void TEST_CASE3(void) {
69+
VSET(16, e8, m1);
70+
VLOAD_8(v1, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
71+
VLOAD_8(v0, 0xAA, 0xAA);
72+
asm volatile("vadc.vim v3, v1, 5, v0");
73+
VCMP_U8(9, v3, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);
74+
75+
VSET(16, e16, m2);
76+
VLOAD_16(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
77+
VLOAD_8(v0, 0xAA, 0xAA);
78+
asm volatile("vadc.vim v4, v2, 5, v0");
79+
VCMP_U16(10, v4, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);
80+
81+
VSET(16, e32, m4);
82+
VLOAD_32(v4, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
83+
VLOAD_8(v0, 0xAA, 0xAA);
84+
asm volatile("vadc.vim v8, v4, 5, v0");
85+
VCMP_U32(11, v8, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);
86+
87+
VSET(16, e64, m8);
88+
VLOAD_64(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
89+
VLOAD_8(v0, 0xAA, 0xAA);
90+
asm volatile("vadc.vim v16, v8, 5, v0");
91+
VCMP_U64(12, v16, 6, 8, 8, 10, 10, 12, 12, 14, 6, 8, 8, 10, 10, 12, 12, 14);
92+
};
93+
94+
int main(void) {
95+
INIT_CHECK();
96+
enable_vec();
97+
98+
TEST_CASE1();
99+
TEST_CASE2();
100+
TEST_CASE3();
101+
102+
EXIT_CHECK();
103+
}

0 commit comments

Comments
 (0)