Skip to content

Commit

Permalink
fix lint issues
Browse files Browse the repository at this point in the history
  • Loading branch information
saurabhkale17 committed Aug 8, 2023
1 parent a99d2fb commit dc95a11
Show file tree
Hide file tree
Showing 9 changed files with 78 additions and 85 deletions.
22 changes: 11 additions & 11 deletions onnxruntime/core/providers/openvino/backend_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -75,17 +75,17 @@ BackendManager::BackendManager(const onnxruntime::Node& fused_node,
LOGS_DEFAULT(INFO) << "[OpenVINO-EP] Model has symbolic input dims";
if (GetGlobalContext().device_type.find("CPU") != std::string::npos ||
GetGlobalContext().device_type.find("GPU") != std::string::npos) {
LOGS_DEFAULT(INFO) << "[OpenVINO-EP] Starting backend initialization. "
<< "Creating backend Dynamic Shapes";
try {
concrete_backend_ = BackendFactory::MakeBackend(*model_proto_,
GetGlobalContext(),
subgraph_context_);
} catch (std::string const& msg) {
throw msg;
}
LOGS_DEFAULT(INFO) << "[OpenVINO-EP] "
<< "Backend created for graph " << subgraph_context_.subgraph_name;
LOGS_DEFAULT(INFO) << "[OpenVINO-EP] Starting backend initialization. "
<< "Creating backend Dynamic Shapes";
try {
concrete_backend_ = BackendFactory::MakeBackend(*model_proto_,
GetGlobalContext(),
subgraph_context_);
} catch (std::string const& msg) {
throw msg;
}
LOGS_DEFAULT(INFO) << "[OpenVINO-EP] "
<< "Backend created for graph " << subgraph_context_.subgraph_name;
}
} else {
LOGS_DEFAULT(INFO) << "[OpenVINO-EP] Model has concrete input dims. Initializing backend for graph " << subgraph_context_.subgraph_name;
Expand Down
73 changes: 34 additions & 39 deletions onnxruntime/core/providers/openvino/backends/basic_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,9 @@ BasicBackend::BasicBackend(const ONNX_NAMESPACE::ModelProto& model_proto,
// Setting OpenCL queue throttling for GPU
EnableGPUThrottling(device_config);


// Enable streams; default=1 unless ovverriden by user config
EnableStreams();


#ifndef NDEBUG
if (IsDebugEnabled()) {
std::string file_name = subgraph_context.subgraph_name + "_static.onnx";
Expand All @@ -68,7 +66,7 @@ BasicBackend::BasicBackend(const ONNX_NAMESPACE::ModelProto& model_proto,
}
#else
#if defined(OPENVINO_2023_0) || (OPENVINO_2023_1)
if (global_context_.enable_dynamic_shapes == false && dev_prec!="CPU_FP16") {
if (!subgraph_context_.has_dynamic_input_shape && dev_prec != "CPU_FP16") {
const std::string model = model_proto.SerializeAsString();
exe_network_ = global_context_.ie_core.LoadNetwork(model, hw_target, device_config, subgraph_context_.subgraph_name);
LOGS_DEFAULT(INFO) << log_tag << "Loaded model to the plugin";
Expand Down Expand Up @@ -104,25 +102,25 @@ BasicBackend::BasicBackend(const ONNX_NAMESPACE::ModelProto& model_proto,
inferRequestsQueue_ = std::unique_ptr<InferRequestsQueue>(new InferRequestsQueue(exe_network_, nireq));
}

bool BasicBackend::ValidateSubgraph(std::map<std::string, std::shared_ptr<ov::Node>> & const_outputs_map) {
if (const_outputs_map.size() == subgraph_context_.output_names.size())
subgraph_context_.is_constant = true;
if (subgraph_context_.is_constant) {
LOGS_DEFAULT(INFO) << log_tag << "The subgraph is a const. Directly moving to Infer stage.";
return true;
}
return false;
bool BasicBackend::ValidateSubgraph(std::map<std::string, std::shared_ptr<ov::Node>>& const_outputs_map) {
if (const_outputs_map.size() == subgraph_context_.output_names.size())
subgraph_context_.is_constant = true;
if (subgraph_context_.is_constant) {
LOGS_DEFAULT(INFO) << log_tag << "The subgraph is a const. Directly moving to Infer stage.";
return true;
}
return false;
}

void BasicBackend::PopulateConfigValue(ov::AnyMap & device_config) {
device_config = {};
// Set inference precision based on device precision for OV backend
if (global_context_.precision_str.find("FP16")!= std::string::npos && global_context_.device_type == "GPU"){
device_config.emplace(ov::hint::inference_precision("f16"));
}
if (global_context_.precision_str.find("FP32")!= std::string::npos){
device_config.emplace(ov::hint::inference_precision("f32"));
}
void BasicBackend::PopulateConfigValue(ov::AnyMap& device_config) {
device_config = {};
// Set inference precision based on device precision for OV backend
if (global_context_.precision_str.find("FP16") != std::string::npos && global_context_.device_type == "GPU") {
device_config.emplace(ov::hint::inference_precision("f16"));
}
if (global_context_.precision_str.find("FP32") != std::string::npos) {
device_config.emplace(ov::hint::inference_precision("f32"));
}
#ifndef NDEBUG
if (openvino_ep::backend_utils::IsDebugEnabled()) {
device_config.emplace(ov::enable_profiling(true));
Expand Down Expand Up @@ -167,24 +165,22 @@ void BasicBackend::EnableStreams() {
global_context_.ie_core.SetStreams(global_context_.device_type, global_context_.num_streams);
}


// Starts an asynchronous inference request for data in slice indexed by batch_slice_idx on
// an Infer Request indexed by infer_req_idx
void BasicBackend::StartAsyncInference(Ort::KernelContext & context, OVInferRequestPtr infer_request) {
try {
auto graph_input_info = exe_network_.Get().inputs();
int input_idx = 0;
for (auto input_info_iter = graph_input_info.begin();
input_info_iter != graph_input_info.end(); ++input_info_iter) {
auto input_names = input_info_iter->get_names();
std::string onnx_input_name;
std::string input_name;
// use names retrieved from original ONNX model to assign the right onnx input name for the graph
for (auto it = subgraph_context_.input_names.begin(); it != subgraph_context_.input_names.end(); ++it) {
if (it->second == input_idx) {
onnx_input_name = it->first;
break;
}
// Starts an asynchronous inference request for data in slice indexed by batch_slice_idx on
// an Infer Request indexed by infer_req_idx
void BasicBackend::StartAsyncInference(Ort::KernelContext& context, OVInferRequestPtr infer_request) {
try {
auto graph_input_info = exe_network_.Get().inputs();
int input_idx = 0;
for (auto input_info_iter = graph_input_info.begin();
input_info_iter != graph_input_info.end(); ++input_info_iter) {
auto input_names = input_info_iter->get_names();
std::string onnx_input_name;
std::string input_name;
// use names retrieved from original ONNX model to assign the right onnx input name for the graph
for (auto it = subgraph_context_.input_names.begin(); it != subgraph_context_.input_names.end(); ++it) {
if (it->second == input_idx) {
onnx_input_name = it->first;
break;
}
}
// using the input name retrieved from ONNX original to match with the input names returned by OV tensors
Expand All @@ -195,7 +191,6 @@ void BasicBackend::EnableStreams() {
}
size_t batch_slice_idx = 0;
if (subgraph_context_.has_dynamic_input_shape &&
global_context_.enable_dynamic_shapes == true &&
(global_context_.device_type.find("CPU") != std::string::npos ||
global_context_.device_type.find("GPU") != std::string::npos)) {
auto tensor = context.GetInput(subgraph_context_.input_names.at(input_name));
Expand Down
39 changes: 19 additions & 20 deletions onnxruntime/core/providers/openvino/openvino_provider_factory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,13 @@
#include "core/providers/openvino/openvino_execution_provider.h"
#include "core/providers/openvino/openvino_provider_factory_creator.h"


namespace onnxruntime {
struct OpenVINOProviderFactory : IExecutionProviderFactory {
OpenVINOProviderFactory(const char* device_type, bool enable_vpu_fast_compile,
const char* device_id, size_t num_of_threads,
const char* cache_dir, int num_streams, void* context,
bool enable_opencl_throttling, bool enable_dynamic_shapes)
: enable_vpu_fast_compile_(enable_vpu_fast_compile), num_of_threads_(num_of_threads), num_streams_(num_streams),context_(context), enable_opencl_throttling_(enable_opencl_throttling), enable_dynamic_shapes_(enable_dynamic_shapes) {
: enable_vpu_fast_compile_(enable_vpu_fast_compile), num_of_threads_(num_of_threads), num_streams_(num_streams), context_(context), enable_opencl_throttling_(enable_opencl_throttling), enable_dynamic_shapes_(enable_dynamic_shapes) {
device_type_ = (device_type == nullptr) ? "" : device_type;
device_id_ = (device_id == nullptr) ? "" : device_id;
cache_dir_ = (cache_dir == nullptr) ? "" : cache_dir;
Expand Down Expand Up @@ -79,47 +78,47 @@ struct OpenVINO_Provider : Provider {
if (provider_options_map.find("device_type") != provider_options_map.end()) {
device_type = provider_options_map.at("device_type").c_str();
}
if (provider_options_map.find("device_id") != provider_options_map.end()){
if (provider_options_map.find("device_id") != provider_options_map.end()) {
device_id = provider_options_map.at("device_id").c_str();
}
if(provider_options_map.find("cache_dir") != provider_options_map.end()){
if (provider_options_map.find("cache_dir") != provider_options_map.end()) {
cache_dir = provider_options_map.at("cache_dir").c_str();
}
if(provider_options_map.find("context") != provider_options_map.end()){
context = (void*) provider_options_map.at("context").c_str();
if (provider_options_map.find("context") != provider_options_map.end()) {
context = (void*)provider_options_map.at("context").c_str();
}

if(provider_options_map.find("num_of_threads") != provider_options_map.end()){
if (provider_options_map.find("num_of_threads") != provider_options_map.end()) {
num_of_threads = std::stoi(provider_options_map.at("num_of_threads"));
}

if(provider_options_map.find("num_streams") != provider_options_map.end()){
if (provider_options_map.find("num_streams") != provider_options_map.end()) {
num_streams = std::stoi(provider_options_map.at("num_streams"));
}
std::string bool_flag="";
if(provider_options_map.find("enable_vpu_fast_compile") != provider_options_map.end()){
std::string bool_flag = "";
if (provider_options_map.find("enable_vpu_fast_compile") != provider_options_map.end()) {
bool_flag = provider_options_map.at("enable_vpu_fast_compile");
if(bool_flag=="true" || bool_flag=="True")
if (bool_flag == "true" || bool_flag == "True")
enable_vpu_fast_compile = true;
else if(bool_flag=="false" || bool_flag=="False")
else if (bool_flag == "false" || bool_flag == "False")
enable_vpu_fast_compile = false;
bool_flag="";
bool_flag = "";
}

if(provider_options_map.find("enable_opencl_throttling") != provider_options_map.end()){
if (provider_options_map.find("enable_opencl_throttling") != provider_options_map.end()) {
bool_flag = provider_options_map.at("enable_opencl_throttling");
if(bool_flag=="true" || bool_flag=="True")
if (bool_flag == "true" || bool_flag == "True")
enable_opencl_throttling = true;
else if(bool_flag=="false" || bool_flag=="False")
else if (bool_flag == "false" || bool_flag == "False")
enable_opencl_throttling = false;
bool_flag="";
bool_flag = "";
}

if(provider_options_map.find("enable_dynamic_shapes") != provider_options_map.end()){
if (provider_options_map.find("enable_dynamic_shapes") != provider_options_map.end()) {
bool_flag = provider_options_map.at("enable_dynamic_shapes");
if(bool_flag=="true" || bool_flag=="True")
if (bool_flag == "true" || bool_flag == "True")
enable_dynamic_shapes = true;
else if(bool_flag=="false" || bool_flag=="False")
else if (bool_flag == "false" || bool_flag == "False")
enable_dynamic_shapes = false;
}
return std::make_shared<OpenVINOProviderFactory>(device_type,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
#include "core/providers/providers.h"
#include "core/framework/provider_options.h"


struct OrtOpenVINOProviderOptions;

namespace onnxruntime {
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/core/providers/openvino/ov_versions/capability.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,15 @@ GetCapability::GetCapability(const GraphViewer& graph_viewer_param, std::string
} else if (version_param == "V_2023_0") {
data_ops_ = new DataOps(graph_viewer_, V_2023_0, device_type_);
} else if (version_param == "V_2023_1") {
data_ops_ = new DataOps(graph_viewer_, V_2023_1, device_type_);
data_ops_ = new DataOps(graph_viewer_, V_2023_1, device_type_);
} else {
data_ops_ = new DataOps(graph_viewer_, V_2023_1, device_type_);
}
}

std::vector<std::unique_ptr<ComputeCapability>> GetCapability::Execute() {
std::vector<std::unique_ptr<ComputeCapability>> result;

// Check if it is a subgraph
if (graph_viewer_.IsSubgraph() && graph_viewer_.Name() == "tf2onnx") {
return result;
Expand Down
8 changes: 4 additions & 4 deletions onnxruntime/core/providers/openvino/ov_versions/data_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1028,10 +1028,10 @@ bool DataOps::node_is_supported(const std::map<std::string, std::set<std::string
// Zero dimension check
for (const auto& dim : shape->dim()) {
if (utils::HasDimValue(dim) && dim.dim_value() == 0) {
if (((device_id_.find("CPU") != std::string::npos) || (device_id_.find("GPU") != std::string::npos) ) &&
((optype == "Expand") || (optype == "Equal") ||
(optype == "Slice") || (optype == "Concat") ||
(optype == "Shape"))) {
if (((device_id_.find("CPU") != std::string::npos) || (device_id_.find("GPU") != std::string::npos)) &&
((optype == "Expand") || (optype == "Equal") ||
(optype == "Slice") || (optype == "Concat") ||
(optype == "Shape"))) {
return;
}
has_unsupported_dimension = true;
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/python/onnxruntime_pybind_schema.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ void addGlobalSchemaFunctions(pybind11::module& m) {
#endif
#ifdef USE_OPENVINO
[]() {
ProviderOptions provider_options_map;
return onnxruntime::OpenVINOProviderFactoryCreator::Create(&provider_options_map);
ProviderOptions provider_options_map;
return onnxruntime::OpenVINOProviderFactoryCreator::Create(&provider_options_map);
}(),
#endif
#ifdef USE_TENSORRT
Expand Down
8 changes: 4 additions & 4 deletions onnxruntime/python/onnxruntime_pybind_state.cc
Original file line number Diff line number Diff line change
Expand Up @@ -789,19 +789,19 @@ std::unique_ptr<IExecutionProvider> CreateExecutionProviderInstance(
continue;
} else if (option.first == "enable_vpu_fast_compile") {
if (!(option.second == "True" || option.second == "true" ||
option.second == "False" || option.second == "false")) {
option.second == "False" || option.second == "false")) {
ORT_THROW("Invalid value passed for enable_vpu_fast_compile: ", option.second);
}
OV_provider_options_map[option.first] = option.second;
} else if (option.first == "enable_opencl_throttling") {
if (!(option.second == "True" || option.second == "true" ||
option.second == "False" || option.second == "false")) {
option.second == "False" || option.second == "false")) {
ORT_THROW("Invalid value passed for enable_opencl_throttling: ", option.second);
}
OV_provider_options_map[option.first] = option.second;
} else if (option.first == "enable_dynamic_shapes") {
if (!(option.second == "True" || option.second == "true" ||
option.second == "False" || option.second == "false")) {
option.second == "False" || option.second == "false")) {
ORT_THROW("Invalid value passed for enable_dynamic_shapes: ", option.second);
}
OV_provider_options_map[option.first] = option.second;
Expand All @@ -826,7 +826,7 @@ std::unique_ptr<IExecutionProvider> CreateExecutionProviderInstance(
}
}
if (std::shared_ptr<IExecutionProviderFactory> openvino_provider_factory = onnxruntime::OpenVINOProviderFactoryCreator::Create(
&OV_provider_options_map)) {
&OV_provider_options_map)) {
auto p = openvino_provider_factory->CreateProvider();
// Reset global variables config to avoid it being accidentally passed on to the next session
openvino_device_type.clear();
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/test/perftest/ort_test_session.cc
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,7 @@ OnnxRuntimeTestSession::OnnxRuntimeTestSession(Ort::Env& env, std::random_device
#else
ORT_THROW("TensorRT is not supported in this build\n");
#endif
} else if (provider_name == onnxruntime::kOpenVINOExecutionProvider) {
} else if (provider_name == onnxruntime::kOpenVINOExecutionProvider) {
#ifdef USE_OPENVINO
#ifdef _MSC_VER
std::string ov_string = ToUTF8String(performance_test_config.run_config.ep_runtime_config_string);
Expand Down Expand Up @@ -502,7 +502,7 @@ OnnxRuntimeTestSession::OnnxRuntimeTestSession(Ort::Env& env, std::random_device
} else {
ov_options[key] = value;
}
}else {
} else {
ORT_THROW("[ERROR] [OpenVINO] wrong key type entered. Choose from the following runtime key options that are available for OpenVINO. ['device_type', 'device_id', 'enable_vpu_fast_compile', 'num_of_threads', 'cache_dir', 'num_streams', 'enable_opencl_throttling|true'] \n");
}
}
Expand Down

0 comments on commit dc95a11

Please sign in to comment.