diff --git a/drv/hisi_comp.c b/drv/hisi_comp.c index cad21fa8..cf8d3151 100644 --- a/drv/hisi_comp.c +++ b/drv/hisi_comp.c @@ -372,6 +372,7 @@ static int check_enable_store_buf(struct wd_comp_msg *msg, __u32 out_size, int h static int get_sgl_from_pool(handle_t h_qp, struct comp_sgl *c_sgl, struct wd_mm_ops *mm_ops) { handle_t h_sgl_pool; + int ret; h_sgl_pool = hisi_qm_get_sglpool(h_qp, mm_ops); if (unlikely(!h_sgl_pool)) { @@ -380,21 +381,19 @@ static int get_sgl_from_pool(handle_t h_qp, struct comp_sgl *c_sgl, struct wd_mm } c_sgl->in = hisi_qm_get_hw_sgl(h_sgl_pool, c_sgl->list_src); - if (unlikely(!c_sgl->in)) { - WD_ERR("failed to get hw sgl in!\n"); - return -WD_ENOMEM; - } + if (unlikely(WD_IS_ERR(c_sgl->in))) + return WD_PTR_ERR(c_sgl->in); c_sgl->out = hisi_qm_get_hw_sgl(h_sgl_pool, c_sgl->list_dst); - if (unlikely(!c_sgl->out)) { - WD_ERR("failed to get hw sgl out!\n"); + if (unlikely(WD_IS_ERR(c_sgl->out))) { + ret = WD_PTR_ERR(c_sgl->out); goto err_free_sgl_in; } if (c_sgl->seq_start) { c_sgl->out_seq = hisi_qm_get_hw_sgl(h_sgl_pool, c_sgl->seq_start); - if (unlikely(!c_sgl->out_seq)) { - WD_ERR("failed to get hw sgl out for sequences!\n"); + if (unlikely(WD_IS_ERR(c_sgl->out_seq))) { + ret = WD_PTR_ERR(c_sgl->out_seq); goto err_free_sgl_out; } } @@ -405,7 +404,7 @@ static int get_sgl_from_pool(handle_t h_qp, struct comp_sgl *c_sgl, struct wd_mm hisi_qm_put_hw_sgl(h_sgl_pool, c_sgl->out); err_free_sgl_in: hisi_qm_put_hw_sgl(h_sgl_pool, c_sgl->in); - return -WD_ENOMEM; + return ret; } static void free_hw_sgl(handle_t h_qp, struct comp_sgl *c_sgl, struct wd_mm_ops *mm_ops) @@ -859,9 +858,11 @@ static int lz77_zstd_buf_check(struct wd_comp_msg *msg) } if (unlikely(msg->stream_mode == WD_COMP_STATEFUL && msg->comp_lv == WD_COMP_L9 && - seq_avail_out <= PRICE_MIN_OUT_SIZE)) { + seq_avail_out <= PRICE_MIN_OUT_SIZE + ZSTD_FREQ_DATA_SIZE + + ZSTD_LIT_RESV_SIZE)) { WD_ERR("invalid: out_len(%u) not enough, %u bytes are minimum in price mode!\n", - out_size, PRICE_MIN_OUT_SIZE + lits_size); + out_size, PRICE_MIN_OUT_SIZE + ZSTD_FREQ_DATA_SIZE + + ZSTD_LIT_RESV_SIZE + lits_size); return -WD_EINVAL; } @@ -885,8 +886,8 @@ static int lz77_only_buf_check(struct wd_comp_msg *msg) __u32 lits_size = in_size + ZSTD_LIT_RESV_SIZE; __u32 seq_avail_out = out_size - lits_size; - /* lits_size need to be less than 8M when use pbuffer */ - if (unlikely(lits_size > HZ_MAX_SIZE)) { + /* in_size need to be less than 8M minus the literal calculation reserved space */ + if (unlikely(in_size > HZ_MAX_SIZE - ZSTD_LIT_RESV_SIZE)) { WD_ERR("invalid: in_len(%u) of lz77_only is out of range!\n", in_size); return -WD_EINVAL; } @@ -1030,9 +1031,11 @@ static int lz77_zstd_buf_check_sgl(struct wd_comp_msg *msg, __u32 lits_size) } if (unlikely(msg->stream_mode == WD_COMP_STATEFUL && msg->comp_lv == WD_COMP_L9 && - seq_avail_out <= PRICE_MIN_OUT_SIZE)) { + seq_avail_out <= PRICE_MIN_OUT_SIZE + ZSTD_FREQ_DATA_SIZE + + ZSTD_LIT_RESV_SIZE)) { WD_ERR("invalid: out_len(%u) not enough, %u bytes are minimum in price mode!\n", - out_size, PRICE_MIN_OUT_SIZE + lits_size); + out_size, PRICE_MIN_OUT_SIZE + ZSTD_FREQ_DATA_SIZE + + ZSTD_LIT_RESV_SIZE + lits_size); return -WD_EINVAL; } @@ -1050,7 +1053,7 @@ static int lz77_only_buf_check_sgl(struct wd_comp_msg *msg, __u32 lits_size) * the dfx information. The literals and sequences data need to be written * to an independent sgl splited from list_dst. */ - if (unlikely(lits_size < in_size + ZSTD_LIT_RESV_SIZE)) { + if (unlikely(lits_size < (__u64)in_size + ZSTD_LIT_RESV_SIZE)) { WD_ERR("invalid: output is not enough for literals, at least %u bytes!\n", ZSTD_LIT_RESV_SIZE + lits_size); return -WD_EINVAL; @@ -1548,7 +1551,8 @@ static int hisi_zip_comp_send(struct wd_alg_driver *drv, handle_t ctx, void *com hisi_set_msg_id(h_qp, &msg->tag); ret = fill_zip_comp_sqe(qp, msg, &sqe); if (unlikely(ret < 0)) { - WD_ERR("failed to fill zip sqe, ret = %d!\n", ret); + if (ret != -WD_EBUSY) + WD_ERR("failed to fill zip sqe, ret = %d!\n", ret); return ret; } ret = hisi_qm_send(h_qp, &sqe, 1, &count); @@ -1652,7 +1656,7 @@ static int parse_zip_sqe(struct hisi_qp *qp, struct hisi_zip_sqe *sqe, recv_msg->req.status = 0; if (unlikely(status != 0 && status != HZ_NEGACOMPRESS && - status != HZ_CRC_ERR && status != HZ_DECOMP_END)) { + status != HZ_DECOMP_END)) { if (status == ERR_DSTLEN_OUT) WD_DEBUG("bad request(ctx_st=0x%x, status=0x%x, algorithm type=%u)!\n", ctx_st, status, type); diff --git a/drv/hisi_dae.c b/drv/hisi_dae.c index 62cc168c..d86b9f64 100644 --- a/drv/hisi_dae.c +++ b/drv/hisi_dae.c @@ -33,7 +33,6 @@ /* hash table */ #define HASH_TABLE_HEAD_TAIL_SIZE 8 -#define HASH_TABLE_EMPTY_SIZE 4 /* hash agg operations col max num */ #define DAE_AGG_COL_ALG_MAX_NUM 2 @@ -160,13 +159,18 @@ static void fill_hashagg_merge_output_order(struct dae_sqe *sqe, struct dae_ext_ { struct hashagg_ctx *agg_ctx = msg->priv; struct hashagg_col_data *cols_data = &agg_ctx->cols_data; + __u32 out_cols_num = cols_data->output_num; struct hashagg_output_src *output_src; __u32 offset = 0; __u32 i; - output_src = cols_data->rehash_output; + output_src = cols_data->normal_output; + if (cols_data->is_count_all) { + sqe->counta_vld = DAE_HASH_COUNT_ALL; + out_cols_num--; + } - for (i = 0; i < cols_data->output_num; i++) { + for (i = 0; i < out_cols_num; i++) { ext_sqe->out_from_in_idx |= (__u64)output_src[i].out_from_in_idx << offset; ext_sqe->out_optype |= (__u64)output_src[i].out_optype << offset; offset += DAE_COL_BIT_NUM; @@ -370,7 +374,7 @@ static void fill_hashagg_merge_input_data(struct dae_sqe *sqe, struct dae_ext_sq struct hashagg_ctx *agg_ctx = msg->priv; struct hashagg_col_data *cols_data = &agg_ctx->cols_data; - fill_hashagg_data_info(sqe, ext_sqe, cols_data->output_data, msg->agg_cols_num); + fill_hashagg_data_info(sqe, ext_sqe, cols_data->input_data, msg->agg_cols_num); } static void fill_hashagg_ext_addr(struct dae_sqe *sqe, struct dae_ext_sqe *ext_sqe, diff --git a/drv/hisi_dae_join_gather.c b/drv/hisi_dae_join_gather.c index 92fae1a9..8c45f57d 100644 --- a/drv/hisi_dae_join_gather.c +++ b/drv/hisi_dae_join_gather.c @@ -19,7 +19,7 @@ #define PROBE_INDEX_ROW_SIZE 4 /* align size */ -#define DAE_KEY_ALIGN_SIZE 8 +#define DAE_KEY_ALIGN_SIZE 4 #define DAE_BREAKPOINT_SIZE 81920 #define DAE_ADDR_INDEX_SHIFT 1 @@ -28,7 +28,6 @@ #define HASH_TABLE_INDEX_NUM 1 #define HASH_TABLE_MAX_INDEX_NUM 15 #define HASH_TABLE_INDEX_SIZE 12 -#define HASH_TABLE_EMPTY_SIZE 4 #define GATHER_ROW_BATCH_EMPTY_SIZE 4 /* DAE hardware protocol data */ @@ -173,6 +172,9 @@ static void fill_join_table_data(struct dae_sqe *sqe, struct dae_addr_list *addr } sqe->table_row_size = ctx->hash_table_row_size; + /* Initialize these fields for hardware check*/ + sqe->src_table_width = ctx->table_data.table_width; + sqe->dst_table_width = ctx->table_data.table_width; if (table_data_src) { sqe->src_table_width = table_data_src->table_width; @@ -814,6 +816,7 @@ static int join_get_table_rowsize(struct join_gather_col_data *cols_data, for (i = 0; i < key_num; i++) row_count_size += get_data_type_size(key_data[i].hw_type, 0); + row_count_size += HASH_TABLE_EMPTY_SIZE; row_count_size = ALIGN(row_count_size, DAE_KEY_ALIGN_SIZE); row_count_size += HASH_TABLE_HEAD_TAIL_SIZE + cols_data->index_num * HASH_TABLE_INDEX_SIZE; diff --git a/drv/hisi_qm_udrv.c b/drv/hisi_qm_udrv.c index a47c5961..6da98875 100644 --- a/drv/hisi_qm_udrv.c +++ b/drv/hisi_qm_udrv.c @@ -844,7 +844,7 @@ static struct hisi_sgl *hisi_qm_sgl_pop(struct hisi_sgl_pool *pool) if (pool->top == 0) { pthread_spin_unlock(&pool->lock); - WD_ERR("invalid: the sgl pool is empty!\n"); + WD_DEBUG("debug: the sgl pool is empty now!\n"); return NULL; } @@ -936,22 +936,23 @@ void *hisi_qm_get_hw_sgl(handle_t sgl_pool, struct wd_datalist *sgl) struct wd_datalist *tmp = sgl; struct hisi_sgl *head, *next, *cur; struct wd_mm_ops *mm_ops; + void *ret = NULL; __u32 i = 0; if (!pool || !sgl) { WD_ERR("invalid: hw sgl pool or sgl is NULL!\n"); - return NULL; + return WD_ERR_PTR(-WD_EINVAL); } if (pool->mm_ops && !pool->mm_ops->iova_map) { WD_ERR("invalid: mm_ops iova_map function is NULL!\n"); - return NULL; + return WD_ERR_PTR(-WD_EINVAL); } mm_ops = pool->mm_ops; head = hisi_qm_sgl_pop(pool); if (!head) - return NULL; + return WD_ERR_PTR(-WD_EBUSY); cur = head; tmp = sgl; @@ -964,6 +965,7 @@ void *hisi_qm_get_hw_sgl(handle_t sgl_pool, struct wd_datalist *sgl) if (tmp->len > HISI_MAX_SIZE_IN_SGE) { WD_ERR("invalid: the data len is %u!\n", tmp->len); + ret = WD_ERR_PTR(-WD_EINVAL); goto err_out; } @@ -975,6 +977,7 @@ void *hisi_qm_get_hw_sgl(handle_t sgl_pool, struct wd_datalist *sgl) if (!cur->sge_entries[i].buff) { WD_ERR("invalid: the iova map addr of sge is NULL!\n"); + ret = WD_ERR_PTR(-WD_EINVAL); goto err_out; } @@ -993,7 +996,7 @@ void *hisi_qm_get_hw_sgl(handle_t sgl_pool, struct wd_datalist *sgl) if (i == pool->sge_num && tmp->next) { next = hisi_qm_sgl_pop(pool); if (!next) { - WD_ERR("invalid: the sgl pool is not enough!\n"); + ret = WD_ERR_PTR(-WD_EBUSY); goto err_out; } if (mm_ops) @@ -1012,15 +1015,17 @@ void *hisi_qm_get_hw_sgl(handle_t sgl_pool, struct wd_datalist *sgl) } /* There is no data, recycle the hardware sgl head to pool */ - if (!head->entry_sum_in_chain) + if (!head->entry_sum_in_chain) { + ret = WD_ERR_PTR(-WD_EINVAL); goto err_out; + } hisi_qm_dump_sgl(head); return head; err_out: hisi_qm_put_hw_sgl(sgl_pool, head); - return NULL; + return ret; } handle_t hisi_qm_get_sglpool(handle_t h_qp, struct wd_mm_ops *mm_ops) diff --git a/drv/hisi_sec.c b/drv/hisi_sec.c index 53bf3340..c8b831cd 100644 --- a/drv/hisi_sec.c +++ b/drv/hisi_sec.c @@ -1305,20 +1305,17 @@ static int hisi_sec_fill_sgl(handle_t h_sgl_pool, __u8 **in, __u8 **out, void *hw_sgl_out; hw_sgl_in = hisi_qm_get_hw_sgl(h_sgl_pool, (struct wd_datalist *)(*in)); - if (!hw_sgl_in) { - WD_ERR("failed to get sgl in for hw_v2!\n"); - return -WD_EINVAL; - } + if (WD_IS_ERR(hw_sgl_in)) + return WD_PTR_ERR(hw_sgl_in); if (type == WD_DIGEST) { hw_sgl_out = *out; } else { hw_sgl_out = hisi_qm_get_hw_sgl(h_sgl_pool, (struct wd_datalist *)(*out)); - if (!hw_sgl_out) { - WD_ERR("failed to get hw sgl out for hw_v2!\n"); + if (WD_IS_ERR(hw_sgl_out)) { hisi_qm_put_hw_sgl(h_sgl_pool, hw_sgl_in); - return -WD_EINVAL; + return WD_PTR_ERR(hw_sgl_out); } sqe->sdm_addr_type |= SEC_SGL_SDM_MASK; @@ -1338,10 +1335,8 @@ static int hisi_sec_fill_sgl_v3(handle_t h_sgl_pool, __u8 **in, __u8 **out, void *hw_sgl_out; hw_sgl_in = hisi_qm_get_hw_sgl(h_sgl_pool, (struct wd_datalist *)(*in)); - if (!hw_sgl_in) { - WD_ERR("failed to get sgl in for hw_v3!\n"); - return -WD_EINVAL; - } + if (WD_IS_ERR(hw_sgl_in)) + return WD_PTR_ERR(hw_sgl_in); if (type == WD_DIGEST) { hw_sgl_out = *out; @@ -1349,10 +1344,9 @@ static int hisi_sec_fill_sgl_v3(handle_t h_sgl_pool, __u8 **in, __u8 **out, } else { hw_sgl_out = hisi_qm_get_hw_sgl(h_sgl_pool, (struct wd_datalist *)(*out)); - if (!hw_sgl_out) { - WD_ERR("failed to get hw sgl out for hw_v3!\n"); + if (WD_IS_ERR(hw_sgl_out)) { hisi_qm_put_hw_sgl(h_sgl_pool, hw_sgl_in); - return -WD_EINVAL; + return WD_PTR_ERR(hw_sgl_out); } /* diff --git a/include/wd.h b/include/wd.h index abc745dc..7e92e41f 100644 --- a/include/wd.h +++ b/include/wd.h @@ -31,6 +31,7 @@ extern "C" { #define WD_CTX_CNT_NUM 1024 #define WD_IPC_KEY 0x500011 #define CRYPTO_MAX_ALG_NAME 128 +#define NUMA_NO_NODE (-1) typedef unsigned char __u8; typedef unsigned int __u32; diff --git a/v1/drv/hisi_zip_udrv.c b/v1/drv/hisi_zip_udrv.c index 44e15450..903df1c7 100644 --- a/v1/drv/hisi_zip_udrv.c +++ b/v1/drv/hisi_zip_udrv.c @@ -494,6 +494,11 @@ static int fill_zip_buffer_size_deflate(void *ssqe, struct wcrypto_comp_msg *msg return -WD_EINVAL; } + if (unlikely(!msg->avail_out)) { + WD_ERR("The avai_out is error (%u)!\n", msg->avail_out); + return -WD_EINVAL; + } + if (unlikely(msg->data_fmt != WD_SGL_BUF && msg->avail_out > MAX_BUFFER_SIZE)) { WD_ERR("warning: avail_out is out of range (%u), will set 8MB size max!\n", diff --git a/v1/wd_comp.c b/v1/wd_comp.c index 169f1b4a..bcdab09c 100644 --- a/v1/wd_comp.c +++ b/v1/wd_comp.c @@ -54,6 +54,10 @@ static void fill_comp_msg(struct wcrypto_comp_ctx *ctx, msg->checksum = opdata->checksum; msg->tag = ctx->ctx_id; msg->status = 0; + + if (msg->stream_mode == WCRYPTO_COMP_STATEFUL && + opdata->stream_pos == WCRYPTO_COMP_STREAM_NEW && ctx->ctx_buf) + memset(ctx->ctx_buf, 0, MAX_CTX_RSV_SIZE); } static int ctx_params_check(struct wd_queue *q, struct wcrypto_comp_ctx_setup *setup) @@ -94,7 +98,7 @@ static int ctx_params_check(struct wd_queue *q, struct wcrypto_comp_ctx_setup *s return -WD_EINVAL; } - if (setup->stream_mode > WCRYPTO_FINISH) { + if (setup->stream_mode > WCRYPTO_COMP_STATEFUL) { WD_ERR("err: stream_mode is invalid!\n"); return -WD_EINVAL; } diff --git a/wd.c b/wd.c index bf83ca15..7f21dc02 100644 --- a/wd.c +++ b/wd.c @@ -828,19 +828,24 @@ struct uacce_dev *wd_get_accel_dev(const char *alg_name) { struct uacce_dev_list *list, *head; struct uacce_dev *dev = NULL, *target = NULL; - int cpu = sched_getcpu(); - int node = numa_node_of_cpu(cpu); + unsigned int node; int ctx_num, tmp; int dis = 1024; int max = 0; + /* Under default conditions in a VM, the node value is 0 */ + if (getcpu(NULL, &node) || node == (unsigned int)NUMA_NO_NODE) { + WD_ERR("invalid: failed to get numa node id for uacce device!\n"); + return NULL; + } + head = wd_get_accel_list(alg_name); if (!head) return NULL; list = head; while (list) { - tmp = numa_distance(node, list->dev->numa_id); + tmp = numa_distance((int)node, list->dev->numa_id); ctx_num = wd_get_avail_ctx(list->dev); if ((dis > tmp && ctx_num > 0) || (dis == tmp && ctx_num > max)) { diff --git a/wd_bmm.c b/wd_bmm.c index 12c3bcf8..462a638e 100644 --- a/wd_bmm.c +++ b/wd_bmm.c @@ -100,9 +100,8 @@ handle_t wd_find_ctx(const char *alg_name) struct mem_ctx_node *close_node = NULL; struct mem_ctx_node *node; int min_distance = 0xFFFF; - int cpu = sched_getcpu(); - int nid = numa_node_of_cpu(cpu); handle_t h_ctx = 0; + unsigned int nid; int numa_dis; if (!alg_name) { @@ -110,17 +109,23 @@ handle_t wd_find_ctx(const char *alg_name) return 0; } + /* Under default conditions in a VM, the node value is 0 */ + if (getcpu(NULL, &nid) || nid == (unsigned int)NUMA_NO_NODE) { + WD_ERR("invalid: failed to get numa node for memory pool!\n"); + return 0; + } + pthread_mutex_lock(&g_mem_ctx_mutex); TAILQ_FOREACH(node, &g_mem_ctx_list, list_node) { if (node->used == false && strstr(node->alg_name, alg_name)) { - if (node->numa_id == nid) { + if (node->numa_id == (int)nid) { h_ctx = node->h_ctx; node->used = true; break; } /* Query the queue with the shortest NUMA distance */ - numa_dis = numa_distance(nid, node->numa_id); + numa_dis = numa_distance((int)nid, node->numa_id); if (numa_dis < min_distance) { min_distance = numa_dis; close_node = node; diff --git a/wd_sched.c b/wd_sched.c index ec1e7b69..19936fd6 100644 --- a/wd_sched.c +++ b/wd_sched.c @@ -176,11 +176,14 @@ static handle_t session_sched_init(handle_t h_sched_ctx, void *sched_param) { struct wd_sched_ctx *sched_ctx = (struct wd_sched_ctx *)h_sched_ctx; struct sched_params *param = (struct sched_params *)sched_param; - int cpu = sched_getcpu(); - int node = numa_node_of_cpu(cpu); struct sched_key *skey; + unsigned int node; - if (node < 0) { + if (getcpu(NULL, &node)) { + WD_ERR("failed to get node, errno %d!\n", errno); + return (handle_t)(-errno); + } + if (node == (unsigned int)NUMA_NO_NODE) { WD_ERR("invalid: failed to get numa node!\n"); return (handle_t)(-WD_EINVAL); } @@ -538,12 +541,15 @@ static handle_t session_dev_sched_init(handle_t h_sched_ctx, void *sched_param) { struct wd_sched_ctx *sched_ctx = (struct wd_sched_ctx *)h_sched_ctx; struct sched_params *param = (struct sched_params *)sched_param; - int cpu = sched_getcpu(); - int node = numa_node_of_cpu(cpu); struct sched_key *skey; + unsigned int node; - if (node < 0) { - WD_ERR("invalid: failed to get numa node!\n"); + if (getcpu(NULL, &node)) { + WD_ERR("failed to get numa node, errno %d!\n", errno); + return (handle_t)(-errno); + } + if (node == (unsigned int)NUMA_NO_NODE) { + WD_ERR("invalid: failed to get numa node for dev sched init!\n"); return (handle_t)(-WD_EINVAL); } diff --git a/wd_util.c b/wd_util.c index 375984f0..bf82fc14 100644 --- a/wd_util.c +++ b/wd_util.c @@ -2867,6 +2867,7 @@ static int wd_alg_ctx_init(struct wd_init_attrs *attrs) static int wd_alg_ce_ctx_init(struct wd_init_attrs *attrs) { struct wd_ctx_config *ctx_config = attrs->ctx_config; + struct wd_ce_ctx *ctx; ctx_config->ctx_num = 1; ctx_config->ctxs = calloc(ctx_config->ctx_num, sizeof(struct wd_ctx)); @@ -2875,11 +2876,13 @@ static int wd_alg_ce_ctx_init(struct wd_init_attrs *attrs) return -WD_ENOMEM; } - ctx_config->ctxs[0].ctx = (handle_t)calloc(1, sizeof(struct wd_ce_ctx)); - if (!ctx_config->ctxs[0].ctx) { + ctx = calloc(1, sizeof(struct wd_ce_ctx)); + if (!ctx) { free(ctx_config->ctxs); return -WD_ENOMEM; } + ctx->fd = -1; + ctx_config->ctxs[0].ctx = (handle_t)ctx; return WD_SUCCESS; } @@ -2925,6 +2928,7 @@ static int wd_alg_init_sve_ctx(struct wd_ctx_config *ctx_config) if (!ctx_sync) goto free_ctxs; + ctx_sync->fd = -1; ctx_config->ctxs[WD_SOFT_SYNC_CTX].op_type = 0; ctx_config->ctxs[WD_SOFT_SYNC_CTX].ctx_mode = CTX_MODE_SYNC; ctx_config->ctxs[WD_SOFT_SYNC_CTX].ctx = (handle_t)ctx_sync; @@ -2933,6 +2937,7 @@ static int wd_alg_init_sve_ctx(struct wd_ctx_config *ctx_config) if (!ctx_async) goto free_ctx_sync; + ctx_async->fd = -1; ctx_config->ctxs[WD_SOFT_ASYNC_CTX].op_type = 0; ctx_config->ctxs[WD_SOFT_ASYNC_CTX].ctx_mode = CTX_MODE_ASYNC; ctx_config->ctxs[WD_SOFT_ASYNC_CTX].ctx = (handle_t)ctx_async;