Skip to content

Commit

Permalink
refresh repository to delete huge files
Browse files Browse the repository at this point in the history
  • Loading branch information
iwatake2222 committed Jul 23, 2020
1 parent aa3c372 commit 9447cf3
Show file tree
Hide file tree
Showing 40 changed files with 3,338 additions and 0 deletions.
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
.vscode
build/
resource/
third_party/tensorflow_prebuilt/
third_party/edgetpu_prebuilt/
12 changes: 12 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
[submodule "third_party/tensorflow"]
path = third_party/tensorflow
url = https://github.com/tensorflow/tensorflow
[submodule "third_party/edgetpu"]
path = third_party/edgetpu
url = https://github.com/google-coral/edgetpu
[submodule "third_party/glog"]
path = third_party/glog
url = https://github.com/google/glog
[submodule "third_party/abseil-cpp"]
path = third_party/abseil-cpp
url = https://github.com/abseil/abseil-cpp
46 changes: 46 additions & 0 deletions InferenceHelper/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
cmake_minimum_required(VERSION 3.0)

set(LibraryName "InferenceHelper")

set(TFLITE_DELEGATE_EDGETPU off CACHE BOOL "With Tflite Delegate EdgeTPU? [on/off]")
set(TFLITE_DELEGATE_GPU off CACHE BOOL "With Tflite Delegate GPU? [on/off]")
set(TFLITE_DELEGATE_XNNPACK off CACHE BOOL "With Tflite Delegate XNNPACK? [on/off]")

# Create library
add_library (${LibraryName}
InferenceHelper.h
InferenceHelper.cpp
InferenceHelperTensorflowLite.h
InferenceHelperTensorflowLite.cpp
)

# For Tensorflow Lite
include(${CMAKE_CURRENT_LIST_DIR}/../third_party/cmakes/tflite.cmake)
target_include_directories(${LibraryName} PUBLIC ${TFLITE_INC})
target_link_libraries(${LibraryName} ${TFLITE_LIB})

# For Tensorflow Lite Delegate(Edge TPU)
if(TFLITE_DELEGATE_EDGETPU)
include(${CMAKE_SOURCE_DIR}/../third_party/cmakes/tflite_edgetpu.cmake)
target_include_directories(${LibraryName} PUBLIC ${TFLITE_EDGETPI_INC})
target_link_libraries(${LibraryName} ${TFLITE_EDGETPI_LIB})
add_definitions(-DTFLITE_DELEGATE_EDGETPU)
endif()

# For Tensorflow Lite Delegate(GPU)
if(TFLITE_DELEGATE_GPU)
find_package(OpenCL)
if(OpenCL_Found)
target_include_directories(${LibraryName} PUBLIC ${OpenCL_INCLUDE_DIRS})
target_link_libraries(${LibraryName} ${OpenCL_LIBRARIES})
endif()
include(${CMAKE_SOURCE_DIR}/../third_party/cmakes/tflite_gpu.cmake)
target_include_directories(${LibraryName} PUBLIC ${TFLITE_GPU_INC})
target_link_libraries(${LibraryName} ${TFLITE_GPU_LIB} EGL GLESv2)
add_definitions(-DTFLITE_DELEGATE_GPU)
endif()

# For Tensorflow Lite Delegate(XNNPACK)
if(TFLITE_DELEGATE_XNNPACK)
add_definitions(-DTFLITE_DELEGATE_XNNPACK)
endif()
86 changes: 86 additions & 0 deletions InferenceHelper/InferenceHelper.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
/*** Include ***/
#include <stdio.h>
#include <stdlib.h>
#include <string>

#include "InferenceHelper.h"
#include "InferenceHelperTensorflowLite.h"

#if defined(ANDROID) || defined(__ANDROID__)
#include <android/log.h>
#define TAG "MyApp_NDK"
#define PRINT(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
#else
#define PRINT(...) printf(__VA_ARGS__)
#endif

InferenceHelper* InferenceHelper::create(const InferenceHelper::HELPER_TYPE type)
{
InferenceHelper* p = NULL;
switch (type) {
case TENSORFLOW_LITE:
PRINT("[InferenceHelper] Use TensorflowLite\n");
p = new InferenceHelperTensorflowLite();
break;
case TENSORFLOW_LITE_EDGETPU:
PRINT("[InferenceHelper] Use TensorflowLite EdgeTPU Delegate\n");
p = new InferenceHelperTensorflowLite();
break;
case TENSORFLOW_LITE_GPU:
PRINT("[InferenceHelper] Use TensorflowLite GPU Delegate\n");
p = new InferenceHelperTensorflowLite();
break;
case TENSORFLOW_LITE_XNNPACK:
PRINT("[InferenceHelper] Use TensorflowLite XNNPACK Delegate\n");
p = new InferenceHelperTensorflowLite();
break;
default:
PRINT("not supported yet");
exit(1);
break;
}
p->m_helperType = type;
return p;
}


TensorInfo::TensorInfo()
{
index = -1;
type = TENSOR_TYPE_NONE;
data = NULL;;
dims.clear();
quant.scale = 0;
quant.zeroPoint = 0;
m_dataFp32 = NULL;
}

TensorInfo::~TensorInfo()
{
if (m_dataFp32 != NULL) {
delete[] m_dataFp32;
}
}

float* TensorInfo::getDataAsFloat()
{
if (type == TENSOR_TYPE_UINT8) {
int dataNum = 1;
for (int i = 0; i < (int)dims.size(); i++) dataNum *= dims[i];
if (m_dataFp32 == NULL) {
m_dataFp32 = new float[dataNum];
}
for (int i = 0; i < dataNum; i++) {
const uint8_t* valUint8 = (uint8_t*)data;
float valFloat = (valUint8[i] - quant.zeroPoint) * quant.scale;
m_dataFp32[i] = valFloat;
}
return m_dataFp32;
} else if (type == TENSOR_TYPE_FP32) {
return (float*)data;
} else {
PRINT("invalid call");
return NULL;
}
}

68 changes: 68 additions & 0 deletions InferenceHelper/InferenceHelper.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@

#ifndef INFERENCE_HELPER_
#define INFERENCE_HELPER_

#include <vector>

class TensorInfo {
public:
typedef enum {
TENSOR_TYPE_NONE,
TENSOR_TYPE_UINT8,
TENSOR_TYPE_FP32,
TENSOR_TYPE_INT32,
TENSOR_TYPE_INT64,
} TENSOR_TYPE;

public:
TensorInfo();
~TensorInfo();
float* getDataAsFloat();

public:
int index;
TENSOR_TYPE type;
void *data;
std::vector<int> dims;
struct {
float scale;
int zeroPoint;
} quant;

private:
float *m_dataFp32;

};

class InferenceHelper {
public:
typedef enum {
TENSORFLOW_LITE,
TENSORFLOW_LITE_EDGETPU,
TENSORFLOW_LITE_GPU,
TENSORFLOW_LITE_XNNPACK,
NCNN,
NCNN_VULKAN,
MNN,
OPEN_CV,
OPEN_CV_OPENCL,
TENSOR_RT,
} HELPER_TYPE;


public:
virtual ~InferenceHelper() {}
virtual int initialize(const char *modelFilename, const int numThreads) = 0;
virtual int finalize(void) = 0;
virtual int inference(void) = 0;
virtual int getTensorByName(const char *name, TensorInfo *tensorInfo) = 0;
virtual int getTensorByIndex(const int index, TensorInfo *tensorInfo) = 0;
virtual int setBufferToTensorByName(const char *name, const char *data, const unsigned int dataSize) = 0;
virtual int setBufferToTensorByIndex(const int index, const char *data, const unsigned int dataSize) = 0;
static InferenceHelper* create(const HELPER_TYPE typeFw);

protected:
HELPER_TYPE m_helperType;
};

#endif
Loading

0 comments on commit 9447cf3

Please sign in to comment.