22 void kernel1(int32_t ax0_ax1_fused_ax2_fused,
float* placeholder,
float* T_layout_trans)
25 for (ax3 = 0; ax3 < 8; ++ax3)
27 T_layout_trans[((ax0_ax1_fused_ax2_fused * 8) + ax3)] = placeholder[((ax0_ax1_fused_ax2_fused * 8) + ax3)];
32 void kernel2(int32_t ax0_ax1_fused,
float* placeholder,
float* T_layout_trans)
37 for (ax2 = 0; ax2 < 8; ++ax2) {
38 for (ax3 = 0; ax3 < 8; ++ax3) {
39 T_layout_trans[(((ax0_ax1_fused * 64) + (ax2 * 8)) + ax3)] = placeholder[(((ax2 * 16) + (ax3 * 2)) + ax0_ax1_fused)];
45 void kernel3(int32_t i1_i2_fused,
float *data_pad,
float* placeholder)
48 for (i3 = 0; i3 < 10; ++i3) {
49 data_pad[((i1_i2_fused * 10) + i3)] = (((((1 <= i1_i2_fused) && (i1_i2_fused < 9)) && (1 <= i3)) && (i3 < 9)) ? placeholder[(((i1_i2_fused * 8) + i3) - 9)] : 0.000000e+00f);
54 void kernel4(int32_t n_oc_chunk_fused_oh_fused,
float *data_pad,
float* placeholder1,
float* conv2d_NCHWc)
56 float2 conv2d_NCHWc_global[8];
57 conv2d_NCHWc_global[0] = ((
float2)(0.000000e+00f, 0.000000e+00f));
58 conv2d_NCHWc_global[1] = ((
float2)(0.000000e+00f, 0.000000e+00f));
59 conv2d_NCHWc_global[2] = ((
float2)(0.000000e+00f, 0.000000e+00f));
60 conv2d_NCHWc_global[3] = ((
float2)(0.000000e+00f, 0.000000e+00f));
61 conv2d_NCHWc_global[4] = ((
float2)(0.000000e+00f, 0.000000e+00f));
62 conv2d_NCHWc_global[5] = ((
float2)(0.000000e+00f, 0.000000e+00f));
63 conv2d_NCHWc_global[6] = ((
float2)(0.000000e+00f, 0.000000e+00f));
64 conv2d_NCHWc_global[7] = ((
float2)(0.000000e+00f, 0.000000e+00f));
67 for (kh = 0; kh < 3; ++kh) {
68 for (kw = 0; kw < 3; ++kw) {
69 conv2d_NCHWc_global[0] = (conv2d_NCHWc_global[0] + (((
float2)(data_pad[(((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw)], data_pad[(((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw)])) * ((
float2*)(placeholder1 + ((kh * 6) + (kw * 2))))[0]));
70 conv2d_NCHWc_global[1] = (conv2d_NCHWc_global[1] + (((
float2)(data_pad[((((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw) + 1)], data_pad[((((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw) + 1)])) * ((
float2*)(placeholder1 + ((kh * 6) + (kw * 2))))[0]));
71 conv2d_NCHWc_global[2] = (conv2d_NCHWc_global[2] + (((
float2)(data_pad[((((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw) + 2)], data_pad[((((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw) + 2)])) * ((
float2*)(placeholder1 + ((kh * 6) + (kw * 2))))[0]));
72 conv2d_NCHWc_global[3] = (conv2d_NCHWc_global[3] + (((
float2)(data_pad[((((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw) + 3)], data_pad[((((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw) + 3)])) * ((
float2*)(placeholder1 + ((kh * 6) + (kw * 2))))[0]));
73 conv2d_NCHWc_global[4] = (conv2d_NCHWc_global[4] + (((
float2)(data_pad[((((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw) + 4)], data_pad[((((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw) + 4)])) * ((
float2*)(placeholder1 + ((kh * 6) + (kw * 2))))[0]));
74 conv2d_NCHWc_global[5] = (conv2d_NCHWc_global[5] + (((
float2)(data_pad[((((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw) + 5)], data_pad[((((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw) + 5)])) * ((
float2*)(placeholder1 + ((kh * 6) + (kw * 2))))[0]));
75 conv2d_NCHWc_global[6] = (conv2d_NCHWc_global[6] + (((
float2)(data_pad[((((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw) + 6)], data_pad[((((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw) + 6)])) * ((
float2*)(placeholder1 + ((kh * 6) + (kw * 2))))[0]));
76 conv2d_NCHWc_global[7] = (conv2d_NCHWc_global[7] + (((
float2)(data_pad[((((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw) + 7)], data_pad[((((kh * 10) + (n_oc_chunk_fused_oh_fused * 10)) + kw) + 7)])) * ((
float2*)(placeholder1 + ((kh * 6) + (kw * 2))))[0]));
80 for (ow_inner = 0; ow_inner < 8; ++ow_inner) {
81 ((
float2*)(conv2d_NCHWc + ((n_oc_chunk_fused_oh_fused * 16) + (ow_inner * 2))))[0] = ((
float2*)((
float*)conv2d_NCHWc_global + (ow_inner * 2)))[0];
86 void parallel1(
float* placeholder,
float* T_layout_trans)
88 int32_t ax0_ax1_fused_ax2_fused;
90 #pragma omp parallel for 91 for (ax0_ax1_fused_ax2_fused = 0; ax0_ax1_fused_ax2_fused < 8; ++ax0_ax1_fused_ax2_fused)
93 kernel1(ax0_ax1_fused_ax2_fused, placeholder, T_layout_trans);
98 void parallel2(
float* placeholder,
float* T_layout_trans)
100 int32_t ax0_ax1_fused;
101 #pragma omp parallel for 102 for (ax0_ax1_fused = 0; ax0_ax1_fused < 2; ++ax0_ax1_fused)
104 kernel2(ax0_ax1_fused, placeholder, T_layout_trans);
109 void parallel3(
float *data_pad,
float* placeholder)
112 #pragma omp parallel for 113 for (i1_i2_fused = 0; i1_i2_fused < 10; ++i1_i2_fused)
115 kernel3(i1_i2_fused, data_pad, placeholder);
120 void parallel4(
float *data_pad,
float* placeholder1,
float* conv2d_NCHWc)
122 int32_t n_oc_chunk_fused_oh_fused;
123 #pragma omp parallel for 124 for (n_oc_chunk_fused_oh_fused = 0; n_oc_chunk_fused_oh_fused < 8; ++n_oc_chunk_fused_oh_fused)
126 kernel4(n_oc_chunk_fused_oh_fused, data_pad, placeholder1, conv2d_NCHWc);
133 void* arg0 = (((
TVMValue*)args)[0].v_handle);
134 void* arg1 = (((
TVMValue*)args)[1].v_handle);
135 float* placeholder = (
float*)(((
TVMArray*)arg0)[0].data);
136 float* T_layout_trans = (
float*)(((
TVMArray*)arg1)[0].data);
137 parallel2(placeholder, T_layout_trans);
144 void* arg0 = (((
TVMValue*)args)[0].v_handle);
145 void* arg1 = (((
TVMValue*)args)[1].v_handle);
146 float* placeholder = (
float*)(((
TVMArray*)arg0)[0].data);
147 float* T_layout_trans = (
float*)(((
TVMArray*)arg1)[0].data);
149 parallel1(placeholder, T_layout_trans);
156 void* arg0 = (((
TVMValue*)args)[0].v_handle);
157 void* arg1 = (((
TVMValue*)args)[1].v_handle);
158 void* arg2 = (((
TVMValue*)args)[2].v_handle);
159 float* placeholder = (
float*)(((
TVMArray*)arg0)[0].data);
160 float* placeholder1 = (
float*)(((
TVMArray*)arg1)[0].data);
161 float* conv2d_NCHWc = (
float*)(((
TVMArray*)arg2)[0].data);
164 parallel3(data_pad, placeholder);
166 parallel4(data_pad, placeholder1, conv2d_NCHWc);
173 int32_t res1, res2, res3;
182 float out_fused_layout_transform_2[64];
186 a1[0].
data = out_fused_layout_transform_2;
190 b0[0].
data = out_fused_layout_transform_2;
192 b2[0].
data = out_conv;
197 c0[0].
data = out_conv;
198 c1[0].
data = T_fused_layout_transform_1;
203 #ifdef BAMBU_PROFILING 207 res =
conv(param0, param1, param2);
209 #ifdef BAMBU_PROFILING TVM_DLL int32_t fused_layout_transform_1(void *args, void *arg_type_ids, int32_t num_args)
void __builtin_bambu_time_start()
Union type of values being passed through API and function calls.
__attribute__((noinline))
Convert the given fixedpt number to a decimal string.
TVM_DLL int32_t fused_layout_transform_2(void *args, void *arg_type_ids, int32_t num_args)
void * data
The opaque data pointer points to the allocated data. This will be CUDA device pointer or cl_mem hand...
int32_t fused_conv2d_wrapper(float *X, float *p0, float *T_fused_layout_transform_1)
int32_t conv(TVMValue *param0, TVMValue *param1, TVMValue *param2)
TVM_DLL int32_t fused_nn_contrib_conv2d_NCHWc(void *args, void *arg_type_ids, int32_t num_args)
Plain C Tensor object, does not manage memory.
void __builtin_bambu_time_stop()