PandA-2024.02
06_softmax_a.wrapper.c
Go to the documentation of this file.
1 #include "c_backend_api.h"
2 
7 
8 int32_t fused_nn_softmax( void* args, void* arg_type_ids, int32_t num_args);
9 #ifdef BAMBU_PROFILING
10 extern void __builtin_bambu_time_start();
11 extern void __builtin_bambu_time_stop();
12 #endif
13 
14 int32_t fused_nn_softmax_wrapper(float* placeholder, float* tensor)
15 {
16  int32_t res;
17  a0[0].data = placeholder;
18  a1[0].data = tensor;
19  param[0].v_handle = a0;
20  param[1].v_handle = a1;
21 #ifdef BAMBU_PROFILING
23 #endif
24 
25  res = fused_nn_softmax(param, 0, 0);
26 
27 #ifdef BAMBU_PROFILING
29 #endif
30 
31  return res;
32 }
int32_t fused_nn_softmax(void *args, void *arg_type_ids, int32_t num_args)
Definition: 06_softmax_a.cc:7
void __builtin_bambu_time_start()
void * v_handle
Union type of values being passed through API and function calls.
TVMArray a2[1]
void * data
The opaque data pointer points to the allocated data. This will be CUDA device pointer or cl_mem hand...
Definition: dlpack.h:131
TVMValue param[3]
int32_t fused_nn_softmax_wrapper(float *placeholder, float *tensor)
TVMArray a0[1]
TVMArray a1[1]
Plain C Tensor object, does not manage memory.
Definition: dlpack.h:111
void __builtin_bambu_time_stop()

Generated on Mon Feb 12 2024 13:02:50 for PandA-2024.02 by doxygen 1.8.13