Skip to content

Commit b0be0c9

Browse files
authored
fix riscv convert float to fp16 warning (#6525)
1 parent bdac11b commit b0be0c9

23 files changed

+160
-160
lines changed

src/layer/riscv/bias_riscv_zfh.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ int Bias_riscv::forward_inplace_fp16s(Mat& bottom_top_blob, const Option& opt) c
2525
for (int q = 0; q < channels; q++)
2626
{
2727
__fp16* ptr = bottom_top_blob.channel(q);
28-
__fp16 bias = bias_data[q];
28+
__fp16 bias = (__fp16)bias_data[q];
2929

3030
#if __riscv_zvfh
3131
int n = size;

src/layer/riscv/celu_riscv_zfh.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -32,12 +32,12 @@ int CELU_riscv::forward_inplace_fp16s(Mat& bottom_top_blob, const Option& opt) c
3232
size_t vl = __riscv_vsetvl_e16m8(n);
3333

3434
vfloat16m8_t _p = __riscv_vle16_v_f16m8(ptr, vl);
35-
vbool2_t _mask = __riscv_vmfgt_vf_f16m8_b2(_p, 0.f, vl);
35+
vbool2_t _mask = __riscv_vmfgt_vf_f16m8_b2(_p, (__fp16)0.f, vl);
3636

37-
vfloat16m8_t _q = __riscv_vfdiv_vf_f16m8(_p, alpha, vl);
37+
vfloat16m8_t _q = __riscv_vfdiv_vf_f16m8(_p, (__fp16)alpha, vl);
3838
_q = exp_ps(_q, vl);
39-
_q = __riscv_vfsub_vf_f16m8(_q, 1.f, vl);
40-
_q = __riscv_vfmul_vf_f16m8(_q, alpha, vl);
39+
_q = __riscv_vfsub_vf_f16m8(_q, (__fp16)1.f, vl);
40+
_q = __riscv_vfmul_vf_f16m8(_q, (__fp16)alpha, vl);
4141

4242
vfloat16m8_t _res = __riscv_vmerge_vvm_f16m8(_q, _p, _mask, vl);
4343
__riscv_vse16_v_f16m8(ptr, _res, vl);

src/layer/riscv/convolution1d_riscv_zfh.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ int Convolution1D_riscv::forward_fp16s(const Mat& bottom_blob, Mat& top_blob, co
143143
{
144144
float val = (float)*slptr++;
145145
vfloat16m1_t _w0 = __riscv_vle16_v_f16m1(kptr, vl);
146-
_sum = __riscv_vfwmacc_vf_f32m2(_sum, val, _w0, vl);
146+
_sum = __riscv_vfwmacc_vf_f32m2(_sum, (__fp16)val, _w0, vl);
147147

148148
kptr += packn;
149149
}
@@ -186,7 +186,7 @@ int Convolution1D_riscv::forward_fp16s(const Mat& bottom_blob, Mat& top_blob, co
186186
{
187187
float val = (float)sptr[0];
188188
vfloat16m1_t _w = __riscv_vle16_v_f16m1(kptr, vl);
189-
_sum = __riscv_vfwmacc_vf_f32m2(_sum, val, _w, vl);
189+
_sum = __riscv_vfwmacc_vf_f32m2(_sum, (__fp16)val, _w, vl);
190190

191191
sptr += dilation_w;
192192
kptr += packn;
@@ -353,7 +353,7 @@ int Convolution1D_riscv::forward_fp16sa(const Mat& bottom_blob, Mat& top_blob, c
353353

354354
for (int j = 0; j < outw; j++)
355355
{
356-
vfloat16m1_t _sum = __riscv_vfmv_v_f_f16m1(0.f, vl);
356+
vfloat16m1_t _sum = __riscv_vfmv_v_f_f16m1((__fp16)0.f, vl);
357357

358358
if (bias_term)
359359
{
@@ -400,7 +400,7 @@ int Convolution1D_riscv::forward_fp16sa(const Mat& bottom_blob, Mat& top_blob, c
400400

401401
for (int j = 0; j < outw; j++)
402402
{
403-
vfloat16m1_t _sum = __riscv_vfmv_v_f_f16m1(0.f, vl);
403+
vfloat16m1_t _sum = __riscv_vfmv_v_f_f16m1((__fp16)0.f, vl);
404404

405405
if (bias_term)
406406
{
@@ -443,14 +443,14 @@ int Convolution1D_riscv::forward_fp16sa(const Mat& bottom_blob, Mat& top_blob, c
443443

444444
for (int j = 0; j < outw; j++)
445445
{
446-
__fp16 sum = 0.f;
446+
__fp16 sum = (__fp16)0.f;
447447

448448
if (bias_term)
449449
{
450450
sum = ((const __fp16*)bias_data_fp16)[p];
451451
}
452452

453-
vfloat16m1_t _sum = __riscv_vfmv_v_f_f16m1(0.f, vl);
453+
vfloat16m1_t _sum = __riscv_vfmv_v_f_f16m1((__fp16)0.f, vl);
454454

455455
const __fp16* kptr = weight_data_fp16.channel(p);
456456

@@ -471,7 +471,7 @@ int Convolution1D_riscv::forward_fp16sa(const Mat& bottom_blob, Mat& top_blob, c
471471

472472
sum = __riscv_vfmv_f_s_f16m1_f16(__riscv_vfredusum_vs_f16m1_f16m1(_sum, __riscv_vfmv_s_f_f16m1(sum, vl), vl));
473473

474-
sum = activation_ss(sum, activation_type, activation_params);
474+
sum = (__fp16)activation_ss(sum, activation_type, activation_params);
475475

476476
outptr[j] = sum;
477477
}

src/layer/riscv/convolution_3x3_pack1ton_fp16s.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ static void conv3x3s1_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob,
1818
{
1919
Mat out0 = top_blob.channel(p);
2020

21-
vfloat16m1_t _bias0 = bias ? __riscv_vle16_v_f16m1(bias + p * packn, vl) : __riscv_vfmv_v_f_f16m1(0.f, vl);
21+
vfloat16m1_t _bias0 = bias ? __riscv_vle16_v_f16m1(bias + p * packn, vl) : __riscv_vfmv_v_f_f16m1((__fp16)0.f, vl);
2222
out0.fill(_bias0);
2323

2424
const __fp16* k0 = kernel.channel(p);
@@ -296,7 +296,7 @@ static void conv3x3s2_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob,
296296
{
297297
Mat out0 = top_blob.channel(p);
298298

299-
vfloat16m1_t _bias0 = bias ? __riscv_vle16_v_f16m1(bias + p * packn, vl) : __riscv_vfmv_v_f_f16m1(0.f, vl);
299+
vfloat16m1_t _bias0 = bias ? __riscv_vle16_v_f16m1(bias + p * packn, vl) : __riscv_vfmv_v_f_f16m1((__fp16)0.f, vl);
300300
out0.fill(_bias0);
301301

302302
const __fp16* k0 = kernel.channel(p);

src/layer/riscv/convolution_7x7_pack1ton_fp16s.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ static void conv7x7s2_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob,
2222
{
2323
Mat out0 = top_blob.channel(p);
2424

25-
vfloat16m1_t _bias0 = bias ? __riscv_vle16_v_f16m1(bias + p * packn, vl) : __riscv_vfmv_v_f_f16m1(0.f, vl);
25+
vfloat16m1_t _bias0 = bias ? __riscv_vle16_v_f16m1(bias + p * packn, vl) : __riscv_vfmv_v_f_f16m1((__fp16)0.f, vl);
2626
out0.fill(_bias0);
2727

2828
for (int q = 0; q < inch; q++)

src/layer/riscv/convolution_pack1ton_fp16s.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ static void convolution_pack1ton_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob
6565
{
6666
float val = (float)sptr[space_ofs[k]];
6767
vfloat16m1_t _w = __riscv_vle16_v_f16m1(kptr, vl);
68-
_sum = __riscv_vfwmacc_vf_f32m2(_sum, val, _w, vl);
68+
_sum = __riscv_vfwmacc_vf_f32m2(_sum, (__fp16)val, _w, vl);
6969

7070
kptr += packn;
7171
}
@@ -126,7 +126,7 @@ static void convolution_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blo
126126
{
127127
for (int j = 0; j < outw; j++)
128128
{
129-
vfloat16m1_t _sum = __riscv_vfmv_v_f_f16m1(0.f, vl);
129+
vfloat16m1_t _sum = __riscv_vfmv_v_f_f16m1((__fp16)0.f, vl);
130130

131131
if (bias_data_ptr)
132132
{

src/layer/riscv/convolution_packn_fp16s.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ static void convolution_packn_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, c
7171
vfloat16m1_t _w0 = __riscv_vle16_v_f16m1(kptr, vl);
7272
// _sum = __riscv_vfwmacc_vf_f32m2(_sum, val, _w0, vl);
7373

74-
vfloat32m2_t _qwq = __riscv_vfwmul_vf_f32m2(_w0, val, vl);
74+
vfloat32m2_t _qwq = __riscv_vfwmul_vf_f32m2(_w0, (__fp16)val, vl);
7575
_sum = __riscv_vfadd_vv_f32m2(_sum, _qwq, vl);
7676

7777
kptr += packn;
@@ -134,7 +134,7 @@ static void convolution_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob,
134134
{
135135
for (int j = 0; j < outw; j++)
136136
{
137-
vfloat16m1_t _sum = __riscv_vfmv_v_f_f16m1(0.f, vl);
137+
vfloat16m1_t _sum = __riscv_vfmv_v_f_f16m1((__fp16)0.f, vl);
138138

139139
if (bias_data_ptr)
140140
{

src/layer/riscv/convolution_packnto1_fp16s.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ static void convolution_packnto1_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob
8585
sum = __riscv_vfmv_f_s_f32m1_f32(__riscv_vfredusum_vs_f32m2_f32m1(_sum, __riscv_vfmv_s_f_f32m1(sum, vl), vl));
8686
#endif
8787

88-
sum = activation_ss(sum, activation_type, activation_params);
88+
sum = (__fp16)activation_ss(sum, activation_type, activation_params);
8989

9090
outptr[j] = (__fp16)sum;
9191
}
@@ -140,14 +140,14 @@ static void convolution_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blo
140140
{
141141
for (int j = 0; j < outw; j++)
142142
{
143-
__fp16 sum = 0.f;
143+
__fp16 sum = (__fp16)0.f;
144144

145145
if (bias_data_ptr)
146146
{
147147
sum = bias_data_ptr[p];
148148
}
149149

150-
vfloat16m1_t _sum = __riscv_vfmv_v_f_f16m1(0.f, vl);
150+
vfloat16m1_t _sum = __riscv_vfmv_v_f_f16m1((__fp16)0.f, vl);
151151

152152
const __fp16* kptr = weight_data_fp16.channel(p);
153153

@@ -169,7 +169,7 @@ static void convolution_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blo
169169

170170
sum = __riscv_vfmv_f_s_f16m1_f16(__riscv_vfredusum_vs_f16m1_f16m1(_sum, __riscv_vfmv_s_f_f16m1(sum, vl), vl));
171171

172-
sum = activation_ss(sum, activation_type, activation_params);
172+
sum = (__fp16)activation_ss(sum, activation_type, activation_params);
173173

174174
outptr[j] = sum;
175175
}

src/layer/riscv/convolution_sgemm_fp16s.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ static void im2col_sgemm_fp16sa_rvv(const Mat& bottom_im2col, Mat& top_blob, con
109109
__fp16* outptr6 = top_blob.channel(p + 6);
110110
__fp16* outptr7 = top_blob.channel(p + 7);
111111

112-
const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
112+
const __fp16 zeros[8] = {(__fp16)0.f, (__fp16)0.f, (__fp16)0.f, (__fp16)0.f, (__fp16)0.f, (__fp16)0.f, (__fp16)0.f, (__fp16)0.f};
113113
const __fp16* biasptr = bias ? bias + p : zeros;
114114

115115
int i = 0;
@@ -224,7 +224,7 @@ static void im2col_sgemm_fp16sa_rvv(const Mat& bottom_im2col, Mat& top_blob, con
224224
__fp16* outptr2 = top_blob.channel(p + 2);
225225
__fp16* outptr3 = top_blob.channel(p + 3);
226226

227-
const __fp16 zeros[4] = {0.f, 0.f, 0.f, 0.f};
227+
const __fp16 zeros[4] = {(__fp16)0.f, (__fp16)0.f, (__fp16)0.f, (__fp16)0.f};
228228
const __fp16* biasptr = bias ? bias + p : zeros;
229229

230230
int i = 0;
@@ -302,7 +302,7 @@ static void im2col_sgemm_fp16sa_rvv(const Mat& bottom_im2col, Mat& top_blob, con
302302
{
303303
__fp16* outptr0 = top_blob.channel(p);
304304

305-
const __fp16 bias0 = bias ? bias[p] : 0.f;
305+
const __fp16 bias0 = bias ? (__fp16)bias[p] : (__fp16)0.f;
306306

307307
int i = 0;
308308
for (; i + (packn - 1) < size; i += packn)
@@ -352,7 +352,7 @@ static void im2col_sgemm_fp16sa_rvv(const Mat& bottom_im2col, Mat& top_blob, con
352352
{
353353
__fp16* outptr0 = top_blob.channel(p);
354354

355-
const __fp16 bias0 = bias ? bias[p] : 0.f;
355+
const __fp16 bias0 = bias ? (__fp16)bias[p] : (__fp16)0.f;
356356

357357
for (int i = 0; i < size; i++)
358358
{

src/layer/riscv/convolution_sgemm_pack1ton_fp16s.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ static void im2col_sgemm_pack1ton_fp16sa_rvv(const Mat& bottom_im2col, Mat& top_
5252

5353
int nn = inch * maxk; // inch always > 0
5454

55-
vfloat16m1_t _sum = __riscv_vfmv_v_f_f16m1(0.f, vl);
55+
vfloat16m1_t _sum = __riscv_vfmv_v_f_f16m1((__fp16)0.f, vl);
5656

5757
if (bias)
5858
{

0 commit comments

Comments
 (0)