PandA-2024.02
02_vecmul_b.wrapper.c
Go to the documentation of this file.
1 #include "c_backend_api.h"
2 
7 
8 int32_t fused_multiply( void* args, void* arg_type_ids, int32_t num_args);
9 #ifdef BAMBU_PROFILING
10 extern void __builtin_bambu_time_start();
11 extern void __builtin_bambu_time_stop();
12 #endif
13 
14 int32_t fused_multiply_wrapper(float* placeholder, float* placeholder1, float* T_multiply)
15 {
16  int32_t res;
17  a0[0].data = placeholder;
18  a1[0].data = placeholder1;
19  a2[0].data = T_multiply;
20  param[0].v_handle = a0;
21  param[1].v_handle = a1;
22  param[2].v_handle = a2;
23 #ifdef BAMBU_PROFILING
25 #endif
26 
27  res = fused_multiply(param, 0, 0);
28 
29 #ifdef BAMBU_PROFILING
31 #endif
32 
33  return res;
34 }
TVMArray a2[1]
void __builtin_bambu_time_start()
void * v_handle
TVMArray a1[1]
Union type of values being passed through API and function calls.
int32_t fused_multiply(void *args, void *arg_type_ids, int32_t num_args)
Definition: 01_vecmul_a.cc:7
void * data
The opaque data pointer points to the allocated data. This will be CUDA device pointer or cl_mem hand...
Definition: dlpack.h:131
int32_t fused_multiply_wrapper(float *placeholder, float *placeholder1, float *T_multiply)
TVMArray a0[1]
TVMValue param[3]
Plain C Tensor object, does not manage memory.
Definition: dlpack.h:111
void __builtin_bambu_time_stop()

Generated on Mon Feb 12 2024 13:02:50 for PandA-2024.02 by doxygen 1.8.13