31 #include <tensorflow/c/c_api.h>
52 unsigned char *graph_data =
NULL;
54 long size, bytes_read;
69 if (bytes_read !=
size){
74 graph_buf = TF_NewBuffer();
75 graph_buf->data = (
void *)graph_data;
76 graph_buf->length =
size;
86 int64_t input_dims[] = {1,
input->height,
input->width,
input->channels};
100 return TF_AllocateTensor(dt, input_dims, 4,
101 input_dims[1] * input_dims[2] * input_dims[3] *
size);
107 TF_SessionOptions *sess_opts;
108 const TF_Operation *init_op = TF_GraphOperationByName(tf_model->
graph,
"init");
111 tf_model->
input.oper = TF_GraphOperationByName(tf_model->
graph, input_name);
112 if (!tf_model->
input.oper){
115 tf_model->
input.index = 0;
133 for (
int i = 0;
i < nb_output; ++
i) {
134 tf_model->
outputs[
i].oper = TF_GraphOperationByName(tf_model->
graph, output_names[
i]);
164 sess_opts = TF_NewSessionOptions();
166 TF_DeleteSessionOptions(sess_opts);
167 if (TF_GetCode(tf_model->
status) != TF_OK)
178 if (TF_GetCode(tf_model->
status) != TF_OK)
189 TF_Buffer *graph_def;
190 TF_ImportGraphDefOptions *graph_opts;
196 tf_model->
graph = TF_NewGraph();
197 tf_model->
status = TF_NewStatus();
198 graph_opts = TF_NewImportGraphDefOptions();
199 TF_GraphImportGraphDef(tf_model->
graph, graph_def, graph_opts, tf_model->
status);
200 TF_DeleteImportGraphDefOptions(graph_opts);
201 TF_DeleteBuffer(graph_def);
202 if (TF_GetCode(tf_model->
status) != TF_OK){
203 TF_DeleteGraph(tf_model->
graph);
204 TF_DeleteStatus(tf_model->
status);
211 #define NAME_BUFFER_SIZE 256
217 TF_OperationDescription *op_desc;
219 int64_t strides[] = {1, 1, 1, 1};
230 op_desc = TF_NewOperation(tf_model->
graph,
"Const", name_buffer);
231 TF_SetAttrType(op_desc,
"dtype", TF_FLOAT);
232 dims[0] =
params->output_num;
233 dims[1] =
params->kernel_size;
234 dims[2] =
params->kernel_size;
235 dims[3] =
params->input_num;
237 tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len,
size *
sizeof(
float));
238 memcpy(TF_TensorData(tensor),
params->kernel,
size *
sizeof(
float));
239 TF_SetAttrTensor(op_desc,
"value", tensor, tf_model->
status);
240 if (TF_GetCode(tf_model->
status) != TF_OK){
243 op = TF_FinishOperation(op_desc, tf_model->
status);
244 if (TF_GetCode(tf_model->
status) != TF_OK){
249 op_desc = TF_NewOperation(tf_model->
graph,
"Transpose", name_buffer);
251 TF_AddInput(op_desc,
input);
252 input.oper = transpose_op;
253 TF_AddInput(op_desc,
input);
254 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
255 TF_SetAttrType(op_desc,
"Tperm", TF_INT32);
256 op = TF_FinishOperation(op_desc, tf_model->
status);
257 if (TF_GetCode(tf_model->
status) != TF_OK){
262 op_desc = TF_NewOperation(tf_model->
graph,
"Conv2D", name_buffer);
263 input.oper = *cur_op;
264 TF_AddInput(op_desc,
input);
266 TF_AddInput(op_desc,
input);
267 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
268 TF_SetAttrIntList(op_desc,
"strides", strides, 4);
269 TF_SetAttrString(op_desc,
"padding",
"VALID", 5);
270 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
271 if (TF_GetCode(tf_model->
status) != TF_OK){
276 op_desc = TF_NewOperation(tf_model->
graph,
"Const", name_buffer);
277 TF_SetAttrType(op_desc,
"dtype", TF_FLOAT);
278 dims[0] =
params->output_num;
280 tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len,
params->output_num *
sizeof(
float));
281 memcpy(TF_TensorData(tensor),
params->biases,
params->output_num *
sizeof(
float));
282 TF_SetAttrTensor(op_desc,
"value", tensor, tf_model->
status);
283 if (TF_GetCode(tf_model->
status) != TF_OK){
286 op = TF_FinishOperation(op_desc, tf_model->
status);
287 if (TF_GetCode(tf_model->
status) != TF_OK){
292 op_desc = TF_NewOperation(tf_model->
graph,
"BiasAdd", name_buffer);
293 input.oper = *cur_op;
294 TF_AddInput(op_desc,
input);
296 TF_AddInput(op_desc,
input);
297 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
298 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
299 if (TF_GetCode(tf_model->
status) != TF_OK){
304 switch (
params->activation){
306 op_desc = TF_NewOperation(tf_model->
graph,
"Relu", name_buffer);
309 op_desc = TF_NewOperation(tf_model->
graph,
"Tanh", name_buffer);
312 op_desc = TF_NewOperation(tf_model->
graph,
"Sigmoid", name_buffer);
317 input.oper = *cur_op;
318 TF_AddInput(op_desc,
input);
319 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
320 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
321 if (TF_GetCode(tf_model->
status) != TF_OK){
331 TF_OperationDescription *op_desc;
336 op_desc = TF_NewOperation(tf_model->
graph,
"DepthToSpace", name_buffer);
337 input.oper = *cur_op;
339 TF_AddInput(op_desc,
input);
340 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
341 TF_SetAttrInt(op_desc,
"block_size",
params->block_size);
342 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
343 if (TF_GetCode(tf_model->
status) != TF_OK){
356 for (layer = 0; layer < conv_network->
layers_num; ++layer){
359 pad +=
params->kernel_size >> 1;
370 TF_OperationDescription *op_desc;
373 int64_t pads_shape[] = {4, 2};
377 op_desc = TF_NewOperation(tf_model->
graph,
"Const",
"pads");
378 TF_SetAttrType(op_desc,
"dtype", TF_INT32);
379 tensor = TF_AllocateTensor(TF_INT32, pads_shape, 2, 4 * 2 *
sizeof(
int32_t));
380 pads = (
int32_t *)TF_TensorData(tensor);
381 pads[0] = 0; pads[1] = 0;
382 pads[2] = pad; pads[3] = pad;
383 pads[4] = pad; pads[5] = pad;
384 pads[6] = 0; pads[7] = 0;
385 TF_SetAttrTensor(op_desc,
"value", tensor, tf_model->
status);
386 if (TF_GetCode(tf_model->
status) != TF_OK){
389 op = TF_FinishOperation(op_desc, tf_model->
status);
390 if (TF_GetCode(tf_model->
status) != TF_OK){
394 op_desc = TF_NewOperation(tf_model->
graph,
"MirrorPad",
"mirror_pad");
395 input.oper = *cur_op;
396 TF_AddInput(op_desc,
input);
398 TF_AddInput(op_desc,
input);
399 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
400 TF_SetAttrType(op_desc,
"Tpaddings", TF_INT32);
401 TF_SetAttrString(op_desc,
"mode",
"SYMMETRIC", 9);
402 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
403 if (TF_GetCode(tf_model->
status) != TF_OK){
413 TF_OperationDescription *op_desc;
415 TF_Operation *transpose_op;
419 int64_t transpose_perm_shape[] = {4};
420 int64_t input_shape[] = {1, -1, -1, -1};
433 tf_model->
graph = TF_NewGraph();
434 tf_model->
status = TF_NewStatus();
436 #define CLEANUP_ON_ERROR(tf_model) \
438 TF_DeleteGraph(tf_model->graph); \
439 TF_DeleteStatus(tf_model->status); \
443 op_desc = TF_NewOperation(tf_model->
graph,
"Placeholder",
"x");
444 TF_SetAttrType(op_desc,
"dtype", TF_FLOAT);
445 TF_SetAttrShape(op_desc,
"shape", input_shape, 4);
446 op = TF_FinishOperation(op_desc, tf_model->
status);
447 if (TF_GetCode(tf_model->
status) != TF_OK){
455 op_desc = TF_NewOperation(tf_model->
graph,
"Const",
"transpose_perm");
456 TF_SetAttrType(op_desc,
"dtype", TF_INT32);
457 tensor = TF_AllocateTensor(TF_INT32, transpose_perm_shape, 1, 4 *
sizeof(
int32_t));
463 TF_SetAttrTensor(op_desc,
"value", tensor, tf_model->
status);
464 if (TF_GetCode(tf_model->
status) != TF_OK){
467 transpose_op = TF_FinishOperation(op_desc, tf_model->
status);
469 for (layer = 0; layer < conv_network->
layers_num; ++layer){
491 op_desc = TF_NewOperation(tf_model->
graph,
"Identity",
"y");
493 TF_AddInput(op_desc,
input);
494 TF_FinishOperation(op_desc, tf_model->
status);
495 if (TF_GetCode(tf_model->
status) != TF_OK){
529 model->
model = (
void *)tf_model;
557 if (TF_GetCode(tf_model->
status) != TF_OK){
561 for (uint32_t
i = 0;
i < nb; ++
i) {
576 tf_model = (
TFModel *)(*model)->model;
578 TF_DeleteGraph(tf_model->
graph);
585 TF_DeleteStatus(tf_model->
status);