Merge pull request #2620 from Rjasuja:dev/rjasuja/initial_openvino_push

LiteRT-PiperOrigin-RevId: 784626146
This commit is contained in:
Copybara-Service 2025-07-18 10:32:33 -07:00
commit 24b7d959b6
15 changed files with 1915 additions and 0 deletions

View File

@ -223,3 +223,10 @@ google_tensor()
load("//third_party/litert_gpu:workspace.bzl", "litert_gpu")
litert_gpu()
load("//third_party/intel_openvino:openvino.bzl", "openvino_configure")
openvino_configure(
name = "intel_openvino",
build_file = "//third_party/intel_openvino:openvino.bazel",
)

View File

@ -208,6 +208,7 @@ UBUNTU_EXCLUDED_TARGETS=(
"-//tflite/tools/benchmark/experimental/delegate_performance/android/..."
"-//tflite/tools/benchmark/experimental/firebase/android/..."
# Note: dont need to exclude ios as ios starts with BAZEL.apple
"-//litert/vendors/intel_openvino/..."
)
UBUNTU_EXCLUDED_EXPERIMENTAL_TARGETS=(

View File

@ -93,6 +93,8 @@ LITERT_EXCLUDED_TARGETS=(
"-//litert/tools:dump_test"
# Requires c++20.
"-//litert/tools:apply_plugin_test"
# Enable once openvino
"-//litert/vendors/intel_openvino/..."
)

18
litert/vendors/intel_openvino/BUILD vendored Normal file
View File

@ -0,0 +1,18 @@
# Copyright (C) 2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package(
default_visibility = ["//litert:__subpackages__"],
)

View File

@ -0,0 +1,165 @@
# Copyright (C) 2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("//litert/build_common:litert_build_defs.bzl", "litert_dynamic_lib", "litert_lib", "litert_test")
package(
# copybara:uncomment default_applicable_licenses = ["@org_tensorflow//tensorflow:license"],
default_visibility = ["//litert:__subpackages__"],
)
config_setting(
name = "openvino_native_dir_set",
values = {"define": "OPENVINO_NATIVE_DIR_SET=true"},
)
litert_dynamic_lib(
name = "openvino_compiler_plugin",
srcs = ["openvino_compiler_plugin.cc"],
hdrs = [
"//litert/vendors/c:litert_compiler_plugin.h",
],
export_litert_only = True,
shared_lib_name = "intel_openvino_compiler_plugin_so",
so_name = "libLiteRtCompilerPlugin_IntelOpenvino.so",
tags = [
"nobuilder",
"notap",
],
ungrte = True,
visibility = ["//litert:__subpackages__"],
deps = [
":graph_iterator",
"//litert/c:litert_common",
"//litert/c:litert_op_code",
"//litert/cc:litert_expected",
"//litert/cc:litert_macros",
"//litert/cc:litert_model",
] + select({
":openvino_native_dir_set": ["@intel_openvino//:openvino"],
"//conditions:default": [],
}),
)
litert_lib(
name = "graph_iterator",
srcs = ["graph_iterator.cc"],
hdrs = ["graph_iterator.h"],
tags = [
"nobuilder",
"notap",
],
deps = [
":decoder",
"//litert/c:litert_logging",
"//litert/c:litert_model",
"//litert/c:litert_op_code",
"//litert/cc:litert_element_type",
"//litert/cc:litert_model",
] + select({
":openvino_native_dir_set": ["@intel_openvino//:openvino"],
"//conditions:default": [],
}),
)
litert_lib(
name = "decoder",
srcs = ["decoder.cc"],
hdrs = ["decoder.h"],
tags = [
"nobuilder",
"notap",
],
deps = [
"//litert/c:litert_logging",
"//litert/c:litert_options",
"//litert/cc:litert_model",
"//litert/tools:dump",
] + select({
":openvino_native_dir_set": ["@intel_openvino//:openvino"],
"//conditions:default": [],
}),
)
litert_test(
name = "decoder_test",
srcs = [
"decoder_test.cc",
],
data = [
"//litert/test:mlir_test_data",
"//litert/test:tflite_test_data",
],
tags = [
"nobuilder",
"notap",
],
deps = [
":decoder",
"//litert/test:common",
"//litert/test:matchers_oss",
"//litert/test:test_models",
"//litert/vendors/cc:litert_compiler_plugin",
"@com_google_absl//absl/log:absl_check",
"@com_google_absl//absl/strings:string_view",
] + select({
":openvino_native_dir_set": ["@intel_openvino//:openvino"],
"//conditions:default": [],
}),
)
litert_test(
name = "openvino_compiler_plugin_test",
srcs = [
"openvino_compiler_plugin_test.cc",
],
data = [
"//litert/test:mlir_test_data",
"//litert/test:tflite_test_data",
],
tags = [
# Tests with ungrte deps do not currently work on forge.
"no-remote-exec",
"notap",
"no_oss",
"nobuilder",
],
target_compatible_with = select({
"@platforms//os:android": [],
"@platforms//os:linux": [],
"//conditions:default": ["@platforms//:incompatible"],
}),
ungrte = True,
use_sys_malloc = True,
deps = [
":openvino_compiler_plugin", # buildcleaner: keep
"//litert/c:litert_common",
"//litert/c:litert_logging",
"//litert/c:litert_op_code",
"//litert/cc:litert_expected",
"//litert/cc:litert_macros",
"//litert/cc:litert_model",
"//litert/cc:litert_model_predicates",
"//litert/test:common",
"//litert/test:matchers_oss",
"//litert/test:test_models",
"//litert/vendors/cc:litert_compiler_plugin",
"@com_google_absl//absl/log:absl_check",
"@com_google_absl//absl/strings:string_view",
] + select({
":openvino_native_dir_set": ["@intel_openvino//:openvino"],
"//conditions:default": [],
}),
)

View File

@ -0,0 +1,702 @@
// Copyright (C) 2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "litert/vendors/intel_openvino/compiler/decoder.h"
#include <map>
#include "litert/c/litert_op_options.h"
#include "litert/tools/dump.h"
namespace litert {
namespace openvino {
// This has been picked from the openvino build:
// build/src/frontends/tensorflow_lite/src/schema_generated.h
constexpr std::array<std::pair<LiteRtOpCode, const char*>, 159> kLitertOvMap{
{{kLiteRtOpCodeTflAdd, "ADD"},
{kLiteRtOpCodeTflAveragePool2d, "AVERAGE_POOL_2D"},
{kLiteRtOpCodeTflConcatenation, "CONCATENATION"},
{kLiteRtOpCodeTflConv2d, "CONV_2D"},
{kLiteRtOpCodeTflDepthwiseConv2d, "DEPTHWISE_CONV_2D"},
{kLiteRtOpCodeTflDepthToSpace, "DEPTH_TO_SPACE"},
{kLiteRtOpCodeTflDequantize, "DEQUANTIZE"},
{kLiteRtOpCodeTflEmbeddingLookup, "EMBEDDING_LOOKUP"},
{kLiteRtOpCodeTflFloor, "FLOOR"},
{kLiteRtOpCodeTflFullyConnected, "FULLY_CONNECTED"},
{kLiteRtOpCodeTflHashtableLookup, "HASHTABLE_LOOKUP"},
{kLiteRtOpCodeTflL2Normalization, "L2_NORMALIZATION"},
{kLiteRtOpCodeTflL2Pool2d, "L2_POOL_2D"},
{kLiteRtOpCodeTflLocalResponseNormalization,
"LOCAL_RESPONSE_NORMALIZATION"},
{kLiteRtOpCodeTflLogistic, "LOGISTIC"},
{kLiteRtOpCodeTflLshProjection, "LSH_PROJECTION"},
{kLiteRtOpCodeTflLstm, "LSTM"},
{kLiteRtOpCodeTflMaxPool2d, "MAX_POOL_2D"},
{kLiteRtOpCodeTflMul, "MUL"},
{kLiteRtOpCodeTflRelu, "RELU"},
{kLiteRtOpCodeTflReluN1To1, "RELU_N1_TO_1"},
{kLiteRtOpCodeTflRelu6, "RELU6"},
{kLiteRtOpCodeTflReshape, "RESHAPE"},
{kLiteRtOpCodeTflResizeBilinear, "RESIZE_BILINEAR"},
{kLiteRtOpCodeTflRnn, "RNN"},
{kLiteRtOpCodeTflSoftmax, "SOFTMAX"},
{kLiteRtOpCodeTflSpaceToDepth, "SPACE_TO_DEPTH"},
{kLiteRtOpCodeTflSvdf, "SVDF"},
{kLiteRtOpCodeTflTanh, "TANH"},
{kLiteRtOpCodeTflConcatEmbeddings, "CONCAT_EMBEDDINGS"},
{kLiteRtOpCodeTflSkipGram, "SKIP_GRAM"},
{kLiteRtOpCodeTflCall, "CALL"},
{kLiteRtOpCodeTflCustom, "CUSTOM"},
{kLiteRtOpCodeTflEmbeddingLookupSparse, "EMBEDDING_LOOKUP_SPARSE"},
{kLiteRtOpCodeTflPad, "PAD"},
{kLiteRtOpCodeTflUnidirectionalSequenceRnn, "UNIDIRECTIONAL_SEQUENCE_RNN"},
{kLiteRtOpCodeTflGather, "GATHER"},
{kLiteRtOpCodeTflBatchToSpaceNd, "BATCH_TO_SPACE_ND"},
{kLiteRtOpCodeTflSpaceToBatchNd, "SPACE_TO_BATCH_ND"},
{kLiteRtOpCodeTflTranspose, "TRANSPOSE"},
{kLiteRtOpCodeTflMean, "MEAN"},
{kLiteRtOpCodeTflSub, "SUB"},
{kLiteRtOpCodeTflDiv, "DIV"},
{kLiteRtOpCodeTflSqueeze, "SQUEEZE"},
{kLiteRtOpCodeTflUnidirectionalSequenceLstm,
"UNIDIRECTIONAL_SEQUENCE_LSTM"},
{kLiteRtOpCodeTflStridedSlice, "STRIDED_SLICE"},
{kLiteRtOpCodeTflBidirectionalSequenceRnn, "BIDIRECTIONAL_SEQUENCE_RNN"},
{kLiteRtOpCodeTflExp, "EXP"},
{kLiteRtOpCodeTflTopkV2, "TOPK_V2"},
{kLiteRtOpCodeTflSplit, "SPLIT"},
{kLiteRtOpCodeTflLogSoftmax, "LOG_SOFTMAX"},
{kLiteRtOpCodeTflDelegate, "DELEGATE"},
{kLiteRtOpCodeTflBidirectionalSequenceLstm, "BIDIRECTIONAL_SEQUENCE_LSTM"},
{kLiteRtOpCodeTflCast, "CAST"},
{kLiteRtOpCodeTflPrelu, "PRELU"},
{kLiteRtOpCodeTflMaximum, "MAXIMUM"},
{kLiteRtOpCodeTflArgMax, "ARG_MAX"},
{kLiteRtOpCodeTflMinimum, "MINIMUM"},
{kLiteRtOpCodeTflLess, "LESS"},
{kLiteRtOpCodeTflNeg, "NEG"},
{kLiteRtOpCodeTflPadv2, "PADV2"},
{kLiteRtOpCodeTflGreater, "GREATER"},
{kLiteRtOpCodeTflGreaterEqual, "GREATER_EQUAL"},
{kLiteRtOpCodeTflLessEqual, "LESS_EQUAL"},
{kLiteRtOpCodeTflSelect, "SELECT"},
{kLiteRtOpCodeTflSlice, "SLICE"},
{kLiteRtOpCodeTflSin, "SIN"},
{kLiteRtOpCodeTflTransposeConv, "TRANSPOSE_CONV"},
{kLiteRtOpCodeTflSparseToDense, "SPARSE_TO_DENSE"},
{kLiteRtOpCodeTflTile, "TILE"},
{kLiteRtOpCodeTflExpandDims, "EXPAND_DIMS"},
{kLiteRtOpCodeTflEqual, "EQUAL"},
{kLiteRtOpCodeTflNotEqual, "NOT_EQUAL"},
{kLiteRtOpCodeTflLog, "LOG"},
{kLiteRtOpCodeTflSum, "SUM"},
{kLiteRtOpCodeTflSqrt, "SQRT"},
{kLiteRtOpCodeTflRsqrt, "RSQRT"},
{kLiteRtOpCodeTflShape, "SHAPE"},
{kLiteRtOpCodeTflPow, "POW"},
{kLiteRtOpCodeTflArgMin, "ARG_MIN"},
{kLiteRtOpCodeTflFakeQuant, "FAKE_QUANT"},
{kLiteRtOpCodeTflReduceProd, "REDUCE_PROD"},
{kLiteRtOpCodeTflReduceMax, "REDUCE_MAX"},
{kLiteRtOpCodeTflPack, "PACK"},
{kLiteRtOpCodeTflLogicalOr, "LOGICAL_OR"},
{kLiteRtOpCodeTflOneHot, "ONE_HOT"},
{kLiteRtOpCodeTflLogicalAnd, "LOGICAL_AND"},
{kLiteRtOpCodeTflLogicalNot, "LOGICAL_NOT"},
{kLiteRtOpCodeTflUnpack, "UNPACK"},
{kLiteRtOpCodeTflReduceMin, "REDUCE_MIN"},
{kLiteRtOpCodeTflFloorDiv, "FLOOR_DIV"},
{kLiteRtOpCodeTflReduceAny, "REDUCE_ANY"},
{kLiteRtOpCodeTflSquare, "SQUARE"},
{kLiteRtOpCodeTflZerosLike, "ZEROS_LIKE"},
{kLiteRtOpCodeTflFill, "FILL"},
{kLiteRtOpCodeTflFloorMod, "FLOOR_MOD"},
{kLiteRtOpCodeTflRange, "RANGE"},
{kLiteRtOpCodeTflResizeNearestNeighbor, "RESIZE_NEAREST_NEIGHBOR"},
{kLiteRtOpCodeTflLeakyRelu, "LEAKY_RELU"},
{kLiteRtOpCodeTflSquaredDifference, "SQUARED_DIFFERENCE"},
{kLiteRtOpCodeTflMirrorPad, "MIRROR_PAD"},
{kLiteRtOpCodeTflAbs, "ABS"},
{kLiteRtOpCodeTflSplitV, "SPLIT_V"},
{kLiteRtOpCodeTflUnique, "UNIQUE"},
{kLiteRtOpCodeTflCeil, "CEIL"},
{kLiteRtOpCodeTflReverseV2, "REVERSE_V2"},
{kLiteRtOpCodeTflAddN, "ADD_N"},
{kLiteRtOpCodeTflGatherNd, "GATHER_ND"},
{kLiteRtOpCodeTflCos, "COS"},
{kLiteRtOpCodeTflWhere, "WHERE"},
{kLiteRtOpCodeTflRank, "RANK"},
{kLiteRtOpCodeTflElu, "ELU"},
{kLiteRtOpCodeTflReverseSequence, "REVERSE_SEQUENCE"},
{kLiteRtOpCodeTflMatrixDiag, "MATRIX_DIAG"},
{kLiteRtOpCodeTflQuantize, "QUANTIZE"},
{kLiteRtOpCodeTflMatrixSetDiag, "MATRIX_SET_DIAG"},
{kLiteRtOpCodeTflRound, "ROUND"},
{kLiteRtOpCodeTflHardSwish, "HARD_SWISH"},
{kLiteRtOpCodeTflIf, "IF"},
{kLiteRtOpCodeTflWhile, "WHILE"},
{kLiteRtOpCodeTflNonMaxSuppressionV4, "NON_MAX_SUPPRESSION_V4"},
{kLiteRtOpCodeTflNonMaxSuppressionV5, "NON_MAX_SUPPRESSION_V5"},
{kLiteRtOpCodeTflScatterNd, "SCATTER_ND"},
{kLiteRtOpCodeTflSelectV2, "SELECT_V2"},
{kLiteRtOpCodeTflDensify, "DENSIFY"},
{kLiteRtOpCodeTflSegmentSum, "SEGMENT_SUM"},
{kLiteRtOpCodeTflBatchMatmul, "BATCH_MATMUL"},
{kLiteRtOpCodeTflPlaceholderForGreaterOpCodeTfls,
"PLACEHOLDER_FOR_GREATER_OP_CODES"},
{kLiteRtOpCodeTflCumsum, "CUMSUM"},
{kLiteRtOpCodeTflCallOnce, "CALL_ONCE"},
{kLiteRtOpCodeTflBroadcastTo, "BROADCAST_TO"},
{kLiteRtOpCodeTflRfft2d, "RFFT2D"},
{kLiteRtOpCodeTflConv3d, "CONV_3D"},
{kLiteRtOpCodeTflImag, "IMAG"},
{kLiteRtOpCodeTflReal, "REAL"},
{kLiteRtOpCodeTflComplexAbs, "COMPLEX_ABS"},
{kLiteRtOpCodeTflHashtable, "HASHTABLE"},
{kLiteRtOpCodeTflHashtableFind, "HASHTABLE_FIND"},
{kLiteRtOpCodeTflHashtableImport, "HASHTABLE_IMPORT"},
{kLiteRtOpCodeTflHashtableSize, "HASHTABLE_SIZE"},
{kLiteRtOpCodeTflReduceAll, "REDUCE_ALL"},
{kLiteRtOpCodeTflConv3dTranspose, "CONV_3D_TRANSPOSE"},
{kLiteRtOpCodeTflVarHandle, "VAR_HANDLE"},
{kLiteRtOpCodeTflReadVariable, "READ_VARIABLE"},
{kLiteRtOpCodeTflAssignVariable, "ASSIGN_VARIABLE"},
{kLiteRtOpCodeTflBroadcastArgs, "BROADCAST_ARGS"},
{kLiteRtOpCodeTflRandomStandardNormal, "RANDOM_STANDARD_NORMAL"},
{kLiteRtOpCodeTflBucketize, "BUCKETIZE"},
{kLiteRtOpCodeTflRandomUniform, "RANDOM_UNIFORM"},
{kLiteRtOpCodeTflMultinomial, "MULTINOMIAL"},
{kLiteRtOpCodeTflGelu, "GELU"},
{kLiteRtOpCodeTflDynamicUpdateSlice, "DYNAMIC_UPDATE_SLICE"},
{kLiteRtOpCodeTflRelu0To1, "RELU_0_TO_1"},
{kLiteRtOpCodeTflUnsortedSegmentProd, "UNSORTED_SEGMENT_PROD"},
{kLiteRtOpCodeTflUnsortedSegmentMax, "UNSORTED_SEGMENT_MAX"},
{kLiteRtOpCodeTflUnsortedSegmentSum, "UNSORTED_SEGMENT_SUM"},
{kLiteRtOpCodeTflAtan2, "ATAN2"},
{kLiteRtOpCodeTflUnsortedSegmentMin, "UNSORTED_SEGMENT_MIN"},
{kLiteRtOpCodeTflSign, "SIGN"}}};
constexpr const char* GetOvOpType(const LiteRtOpCode op_code) {
for (const auto& entry : kLitertOvMap) {
if (entry.first == op_code) return entry.second;
}
return "";
}
DecoderOperation::DecoderOperation(
std::vector<ov::frontend::tensorflow_lite::TensorMetaInfo>
input_tensor_info,
std::vector<ov::frontend::tensorflow_lite::TensorMetaInfo>
output_tensor_info,
const litert::Op& litert_op, size_t node_index)
: input_tensor_info_(input_tensor_info),
output_tensor_info_(output_tensor_info),
litert_op_(litert_op.Get()),
litert_op_code_(litert_op.Code()) {
op_type_ = GetOvOpType(litert_op_code_);
op_name_ = op_type_ + "_id_" + std::to_string(node_index);
LITERT_LOG(LITERT_VERBOSE, "op_type(%s) op_name(%s)", op_type_.c_str(),
op_name_.c_str());
}
#define ERROR_LOG_STR(attr, op_name) \
litert::Unexpected( \
kLiteRtStatusErrorRuntimeFailure, \
"Failed to get " + std::string(attr) + " for " + std::string(op_name))
ov::Any DecoderOperation::get_attribute(const std::string& name) const {
LITERT_LOG(LITERT_VERBOSE, "get_attr %s for %s", name.c_str(),
op_name_.c_str());
switch (litert_op_code_) {
case LiteRtOpCode::kLiteRtOpCodeTflConv2d:
if (name == "strides") {
int32_t stride_w;
LITERT_RETURN_IF_ERROR(
LiteRtGetConv2dStrideWOption(litert_op_, &stride_w),
ERROR_LOG_STR("stride_w", op_name_.c_str()));
int32_t stride_h;
LITERT_RETURN_IF_ERROR(
LiteRtGetConv2dStrideHOption(litert_op_, &stride_h),
ERROR_LOG_STR("stride_h", op_name_.c_str()));
return std::vector<int64_t>{1, stride_h, stride_w, 1};
} else if (name == "padding") {
uint32_t padding;
LITERT_RETURN_IF_ERROR(
LiteRtGetConv2dPaddingOption(litert_op_, &padding),
ERROR_LOG_STR("padding", op_name_.c_str()));
return std::string(
tflite::EnumNamePadding(static_cast<tflite::Padding>(padding)));
} else if (name == "dilations") {
int32_t dilation_w_factor;
LITERT_RETURN_IF_ERROR(
LiteRtGetConv2dDilationWOption(litert_op_, &dilation_w_factor),
ERROR_LOG_STR("dilation_w_factor", op_name_.c_str()));
int32_t dilation_h_factor;
LITERT_RETURN_IF_ERROR(
LiteRtGetConv2dDilationHOption(litert_op_, &dilation_h_factor),
ERROR_LOG_STR("dilation_h_factor", op_name_.c_str()));
return std::vector<int64_t>{1, dilation_h_factor, dilation_w_factor, 1};
} else if (name == "activation") {
uint32_t fused_activation;
LITERT_RETURN_IF_ERROR(
LiteRtGetConv2dFusedActivationOption(litert_op_, &fused_activation),
ERROR_LOG_STR("fused_activation", op_name_.c_str()));
return tflite::EnumNameActivationFunctionType(
static_cast<tflite::ActivationFunctionType>(fused_activation));
} else if (name == "data_format") {
return "NHWC";
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflDepthwiseConv2d:
if (name == "strides") {
int32_t stride_w;
LITERT_RETURN_IF_ERROR(
LiteRtGetDepthwiseConv2dStrideWOption(litert_op_, &stride_w),
ERROR_LOG_STR("stride_w", op_name_.c_str()));
int32_t stride_h;
LITERT_RETURN_IF_ERROR(
LiteRtGetDepthwiseConv2dStrideHOption(litert_op_, &stride_h),
ERROR_LOG_STR("stride_h", op_name_.c_str()));
return std::vector<int64_t>{1, stride_h, stride_w, 1};
} else if (name == "padding") {
uint32_t padding;
LITERT_RETURN_IF_ERROR(
LiteRtGetDepthwiseConv2dPaddingOption(litert_op_, &padding),
ERROR_LOG_STR("padding", op_name_.c_str()));
return std::string(
tflite::EnumNamePadding(static_cast<tflite::Padding>(padding)));
} else if (name == "dilations") {
int32_t dilation_w_factor;
LITERT_RETURN_IF_ERROR(
LiteRtGetDepthwiseConv2dDilationWOption(litert_op_,
&dilation_w_factor),
ERROR_LOG_STR("dilation_w_factor", op_name_.c_str()));
int32_t dilation_h_factor;
LITERT_RETURN_IF_ERROR(
LiteRtGetDepthwiseConv2dDilationHOptions(litert_op_,
&dilation_h_factor),
ERROR_LOG_STR("dilation_h_factor", op_name_.c_str()));
return std::vector<int64_t>{1, dilation_h_factor, dilation_w_factor, 1};
} else if (name == "activation") {
uint32_t fused_activation;
LITERT_RETURN_IF_ERROR(
LiteRtGetDepthwiseConv2dFusedActivationOption(litert_op_,
&fused_activation),
ERROR_LOG_STR("fused_activation", op_name_.c_str()));
return tflite::EnumNameActivationFunctionType(
static_cast<tflite::ActivationFunctionType>(fused_activation));
} else if (name == "group") {
// This information(depth_multiplier) is marked as redundant in litert.
// TODO: Need to check what is the correct value to be returned.
return 0;
} else if (name == "data_format") {
return "NHWC";
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflSplit:
if (name == "num_split") {
int32_t num_split;
LITERT_RETURN_IF_ERROR(
LiteRtGetSplitNumSplitsOption(litert_op_, &num_split),
ERROR_LOG_STR("num_split", op_name_.c_str()));
return static_cast<int64_t>(num_split);
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflFullyConnected:
if (name == "weights_format") {
uint32_t weights_format;
LITERT_RETURN_IF_ERROR(
LiteRtGetFullyConnectedWeightsFormatOption(litert_op_,
&weights_format),
ERROR_LOG_STR("weights_format", op_name_.c_str()));
return static_cast<int8_t>(weights_format);
} else if (name == "keep_num_dims") {
bool keep_num_dims;
LITERT_RETURN_IF_ERROR(
LiteRtGetFullyConnectedKeepNumDimsOption(litert_op_,
&keep_num_dims),
ERROR_LOG_STR("keep_num_dims", op_name_.c_str()));
return keep_num_dims;
} else if (name == "fused_activation_function") {
uint32_t fused_activation;
LITERT_RETURN_IF_ERROR(
LiteRtGetFullyConnectedFusedActivationOption(litert_op_,
&fused_activation),
ERROR_LOG_STR("fused_activation", op_name_.c_str()));
return tflite::EnumNameActivationFunctionType(
static_cast<tflite::ActivationFunctionType>(fused_activation));
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflAdd:
if (name == "fused_activation_function") {
uint32_t fused_activation;
LITERT_RETURN_IF_ERROR(
LiteRtGetAddFusedActivationOption(litert_op_, &fused_activation),
ERROR_LOG_STR("fused_activation", op_name_.c_str()));
return tflite::EnumNameActivationFunctionType(
static_cast<tflite::ActivationFunctionType>(fused_activation));
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflReshape:
if (name == "new_shape") {
const int32_t* reshape_new_shape;
int32_t new_shape_size;
LITERT_RETURN_IF_ERROR(
LiteRtGetReshapeNewShapeOption(litert_op_, &reshape_new_shape,
&new_shape_size),
ERROR_LOG_STR("new_shape", op_name_.c_str()));
std::vector<int64_t> new_shape(new_shape_size);
for (int i = 0; i < new_shape_size; ++i) {
new_shape[i] = reshape_new_shape[i];
}
return new_shape;
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflMean:
if (name == "keep_dims") {
bool keep_dims;
LITERT_RETURN_IF_ERROR(
LiteRtGetMeanKeepDimsOption(litert_op_, &keep_dims),
ERROR_LOG_STR("keep_dims", op_name_.c_str()));
return keep_dims;
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflResizeBilinear:
if (name == "align_corners") {
bool align_corners;
LITERT_RETURN_IF_ERROR(
LiteRtGetResizeBilinearAlignCornersOption(litert_op_,
&align_corners),
ERROR_LOG_STR("align_corners", op_name_.c_str()));
return align_corners;
} else if (name == "half_pixel_centers") {
bool half_pixel_centers;
LITERT_RETURN_IF_ERROR(
LiteRtGetResizeBilinearHalfPixelCenterOption(litert_op_,
&half_pixel_centers),
ERROR_LOG_STR("half_pixel_centers", op_name_.c_str()));
return half_pixel_centers;
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflResizeNearestNeighbor:
if (name == "align_corners") {
bool align_corners;
LITERT_RETURN_IF_ERROR(
LiteRtGetResizeNearestNeighborAlignCornersOption(litert_op_,
&align_corners),
ERROR_LOG_STR("align_corners", op_name_.c_str()));
return align_corners;
} else if (name == "half_pixel_centers") {
bool half_pixel_centers;
LITERT_RETURN_IF_ERROR(
LiteRtGetResizeNearestNeighborHalfPixelCenterOption(
litert_op_, &half_pixel_centers),
ERROR_LOG_STR("half_pixel_centers", op_name_.c_str()));
return half_pixel_centers;
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflConcatenation:
if (name == "axis") {
int32_t axis;
LITERT_RETURN_IF_ERROR(
LiteRtGetConcatenationAxisOption(litert_op_, &axis),
ERROR_LOG_STR("axis", op_name_.c_str()));
return axis;
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflMaxPool2d:
if (name == "strides") {
int32_t stride_w;
LITERT_RETURN_IF_ERROR(
LiteRtGetMaxPool2dStrideWOption(litert_op_, &stride_w),
ERROR_LOG_STR("stride_w", op_name_.c_str()));
int32_t stride_h;
LITERT_RETURN_IF_ERROR(
LiteRtGetMaxPool2dStrideHOption(litert_op_, &stride_h),
ERROR_LOG_STR("stride_h", op_name_.c_str()));
return std::vector<int64_t>{1, stride_h, stride_w, 1};
} else if (name == "padding") {
uint32_t padding;
LITERT_RETURN_IF_ERROR(
LiteRtGetMaxPool2dPaddingOption(litert_op_, &padding),
ERROR_LOG_STR("padding", op_name_.c_str()));
return std::string(
tflite::EnumNamePadding(static_cast<tflite::Padding>(padding)));
} else if (name == "ksize") {
int32_t filter_width;
LITERT_RETURN_IF_ERROR(
LiteRtGetMaxPool2dFilterWidthOption(litert_op_, &filter_width),
ERROR_LOG_STR("filter_width", op_name_.c_str()));
int32_t filter_height;
LITERT_RETURN_IF_ERROR(
LiteRtGetMaxPool2dFilterHeightOption(litert_op_, &filter_height),
ERROR_LOG_STR("filter_height", op_name_.c_str()));
return std::vector<int64_t>{1, filter_height, filter_width, 1};
} else if (name == "activation") {
uint32_t fused_activation;
LITERT_RETURN_IF_ERROR(
LiteRtGetMaxPool2dFusedActivationOption(litert_op_,
&fused_activation),
ERROR_LOG_STR("fused_activation", op_name_.c_str()));
return tflite::EnumNameActivationFunctionType(
static_cast<tflite::ActivationFunctionType>(fused_activation));
} else if (name == "data_format") {
return "NHWC";
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflAveragePool2d:
if (name == "strides") {
int32_t stride_w;
LITERT_RETURN_IF_ERROR(
LiteRtGetAveragePool2dStrideWOption(litert_op_, &stride_w),
ERROR_LOG_STR("stride_w", op_name_.c_str()));
int32_t stride_h;
LITERT_RETURN_IF_ERROR(
LiteRtGetAveragePool2dStrideHOption(litert_op_, &stride_h),
ERROR_LOG_STR("stride_h", op_name_.c_str()));
return std::vector<int64_t>{1, stride_h, stride_w, 1};
} else if (name == "padding") {
uint32_t padding;
LITERT_RETURN_IF_ERROR(
LiteRtGetAveragePool2dPaddingOption(litert_op_, &padding),
ERROR_LOG_STR("padding", op_name_.c_str()));
return std::string(
tflite::EnumNamePadding(static_cast<tflite::Padding>(padding)));
} else if (name == "ksize") {
int32_t filter_width;
LITERT_RETURN_IF_ERROR(
LiteRtGetAveragePool2dFilterWidthOption(litert_op_, &filter_width),
ERROR_LOG_STR("filter_width", op_name_.c_str()));
int32_t filter_height;
LITERT_RETURN_IF_ERROR(
LiteRtGetAveragePool2dFilterHeightOption(litert_op_,
&filter_height),
ERROR_LOG_STR("filter_height", op_name_.c_str()));
return std::vector<int64_t>{1, filter_height, filter_width, 1};
} else if (name == "activation") {
uint32_t fused_activation;
LITERT_RETURN_IF_ERROR(
LiteRtGetAveragePool2dFusedActivationOption(litert_op_,
&fused_activation),
ERROR_LOG_STR("fused_activation", op_name_.c_str()));
return tflite::EnumNameActivationFunctionType(
static_cast<tflite::ActivationFunctionType>(fused_activation));
} else if (name == "data_format") {
return "NHWC";
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflMul:
if (name == "fused_activation_function") {
uint32_t fused_activation;
LITERT_RETURN_IF_ERROR(
LiteRtGetMulFusedActivationOption(litert_op_, &fused_activation),
ERROR_LOG_STR("fused_activation", op_name_.c_str()));
return tflite::EnumNameActivationFunctionType(
static_cast<tflite::ActivationFunctionType>(fused_activation));
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflTransposeConv:
if (name == "strides") {
int32_t stride_w;
LITERT_RETURN_IF_ERROR(
LiteRtGetTransposeConvStrideWOption(litert_op_, &stride_w),
ERROR_LOG_STR("stride_w", op_name_.c_str()));
int32_t stride_h;
LITERT_RETURN_IF_ERROR(
LiteRtGetTransposeConvStrideHOption(litert_op_, &stride_h),
ERROR_LOG_STR("stride_h", op_name_.c_str()));
return std::vector<int64_t>{1, stride_h, stride_w, 1};
} else if (name == "padding") {
uint32_t padding;
LITERT_RETURN_IF_ERROR(
LiteRtGetTransposeConvPaddingOption(litert_op_, &padding),
ERROR_LOG_STR("padding", op_name_.c_str()));
return std::string(
tflite::EnumNamePadding(static_cast<tflite::Padding>(padding)));
} else if (name == "dilations") {
// TODO: This information is not available in litert. Returning value
// similar to OV tflite decoder.
return std::vector<int64_t>{1, 1, 1, 1};
} else if (name == "activation") {
uint32_t fused_activation;
LITERT_RETURN_IF_ERROR(
LiteRtGetTransposeConvFusedActivationOption(litert_op_,
&fused_activation),
ERROR_LOG_STR("fused_activation", op_name_.c_str()));
return tflite::EnumNameActivationFunctionType(
static_cast<tflite::ActivationFunctionType>(fused_activation));
} else if (name == "data_format") {
return "NHWC";
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflSoftmax:
if (name == "beta") {
float beta;
LITERT_RETURN_IF_ERROR(LiteRtGetSoftmaxBetaOption(litert_op_, &beta),
ERROR_LOG_STR("beta", op_name_.c_str()));
return beta;
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflMirrorPad:
if (name == "mode") {
// TODO: Currently litert_options doesn't provide an option for this.
// Hence hardcoding to "REFLECT" mode.
return std::string("REFLECT");
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflStridedSlice:
if (name == "begin_mask") {
int32_t begin_mask;
LITERT_RETURN_IF_ERROR(
LiteRtGetStridedSliceBeginMaskOption(litert_op_, &begin_mask),
ERROR_LOG_STR("begin_mask", op_name_.c_str()));
return begin_mask;
} else if (name == "end_mask") {
int32_t end_mask;
LITERT_RETURN_IF_ERROR(
LiteRtGetStridedSliceEndMaskOption(litert_op_, &end_mask),
ERROR_LOG_STR("end_mask", op_name_.c_str()));
return end_mask;
} else if (name == "new_axis_mask") {
int32_t new_axis_mask;
LITERT_RETURN_IF_ERROR(
LiteRtGetStridedSliceNewAxisMaskOption(litert_op_, &new_axis_mask),
ERROR_LOG_STR("new_axis_mask", op_name_.c_str()));
return new_axis_mask;
} else if (name == "ellipsis_mask") {
int32_t ellipsis_mask;
LITERT_RETURN_IF_ERROR(
LiteRtGetStridedSliceEllipsisMaskOption(litert_op_, &ellipsis_mask),
ERROR_LOG_STR("ellipsis_mask", op_name_.c_str()));
return ellipsis_mask;
} else if (name == "shrink_axis_mask") {
int32_t shrink_axis_mask;
LITERT_RETURN_IF_ERROR(
LiteRtGetStridedSliceShrinkAxisMaskOption(litert_op_,
&shrink_axis_mask),
ERROR_LOG_STR("shrink_axis_mask", op_name_.c_str()));
return shrink_axis_mask;
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflDepthToSpace:
if (name == "block_size") {
int32_t block_size;
LITERT_RETURN_IF_ERROR(
LiteRtGetDepthToSpaceBlockSizeOption(litert_op_, &block_size),
ERROR_LOG_STR("block_size", op_name_.c_str()));
return block_size;
} else if (name == "data_format") {
return "NHWC";
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflGather:
if (name == "axis") {
int32_t axis;
LITERT_RETURN_IF_ERROR(LiteRtGetGatherAxisOption(litert_op_, &axis),
ERROR_LOG_STR("axis", op_name_.c_str()));
return axis;
} else if (name == "batch_dims") {
int32_t batch_dims;
LITERT_RETURN_IF_ERROR(
LiteRtGetGatherBatchDimsOption(litert_op_, &batch_dims),
ERROR_LOG_STR("batch_dims", op_name_.c_str()));
return batch_dims;
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflBatchMatmul:
if (name == "adj_x") {
bool adj_x;
LITERT_RETURN_IF_ERROR(
LiteRtGetBatchMatmulAdjXOption(litert_op_, &adj_x),
ERROR_LOG_STR("adj_x", op_name_.c_str()));
return adj_x;
} else if (name == "adj_y") {
bool adj_y;
LITERT_RETURN_IF_ERROR(
LiteRtGetBatchMatmulAdjYOption(litert_op_, &adj_y),
ERROR_LOG_STR("adj_y", op_name_.c_str()));
return adj_y;
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflLeakyRelu:
if (name == "alpha") {
float alpha;
LITERT_RETURN_IF_ERROR(
LiteRtGetLeakyReluAlphaOption(litert_op_, &alpha),
ERROR_LOG_STR("alpha", op_name_.c_str()));
return alpha;
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
case LiteRtOpCode::kLiteRtOpCodeTflPack:
if (name == "axis") {
int32_t axis;
LITERT_RETURN_IF_ERROR(LiteRtGetPackAxisOption(litert_op_, &axis),
ERROR_LOG_STR("axis", op_name_.c_str()));
return axis;
} else {
LITERT_LOG(LITERT_ERROR, "Unsupported attribute %s", name.c_str());
return nullptr;
}
default:
LITERT_LOG(LITERT_ERROR, "Unsupported op type %s", op_type_.c_str());
return nullptr;
}
}
} // namespace openvino
} // namespace litert

View File

@ -0,0 +1,159 @@
// Copyright (C) 2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ODML_LITERT_LITERT_VENDORS_OPENVINO_COMPILER_DECODER_H_
#define ODML_LITERT_LITERT_VENDORS_OPENVINO_COMPILER_DECODER_H_
#include <openvino/frontend/tensorflow_lite/decoder.hpp>
#include <string>
#include <vector>
#include "litert/c/litert_logging.h"
#include "litert/cc/litert_macros.h"
#include "litert/cc/litert_model.h"
namespace litert {
namespace openvino {
class DecoderOperation
: public ov::frontend::tensorflow_lite::DecoderBaseOperation {
public:
// TODO: in/out _tensor_info copy has to be avoided
explicit DecoderOperation(
std::vector<ov::frontend::tensorflow_lite::TensorMetaInfo>
input_tensor_info,
std::vector<ov::frontend::tensorflow_lite::TensorMetaInfo>
output_tensor_info,
const litert::Op &litert_op, size_t node_index);
virtual ~DecoderOperation() = default;
// DecoderBase Interface implementations :
/// \brief Get attribute value by name
ov::Any get_attribute(const std::string &name) const override;
/// \brief Get a number of inputs
size_t get_input_size() const override { return input_tensor_info_.size(); }
/// \brief Get a producer name and its output port index
void get_input_node(size_t input_port_idx, std::string &producer_name,
std::string &producer_output_port_name,
size_t &producer_output_port_index) const override {
// TODO: Needs implementation ? Benchmark/demo app worked fine even without
// it.
return;
}
/// \brief Get operation type
const std::string &get_op_type() const override { return op_type_; }
/// \brief Get node name
const std::string &get_op_name() const override { return op_name_; }
// DecoderBaseOperation Interface implementations :
/// \brief Get input tensor name by index
std::string get_input_tensor_name(size_t idx) const override {
return input_tensor_info_[idx].m_tensor_name;
}
/// \brief Get input tensor type by index
ov::element::Type get_input_tensor_type(size_t idx) const override {
return input_tensor_info_[idx].m_element_type;
}
/// \brief Get output tensor name by index
std::string get_output_tensor_name(size_t idx) const override {
return output_tensor_info_[idx].m_tensor_name;
}
/// \brief Get output tensor type by index
ov::element::Type get_output_tensor_type(size_t idx) const override {
return output_tensor_info_[idx].m_element_type;
}
/// \brief Get input tensor info
ov::frontend::tensorflow_lite::TensorMetaInfo get_input_tensor_info(
size_t idx) const override {
return input_tensor_info_[idx];
}
/// \brief Get output tensor info_
ov::frontend::tensorflow_lite::TensorMetaInfo get_output_tensor_info(
size_t idx) const override {
return output_tensor_info_[idx];
}
/// \brief Get a number of outputs
size_t get_output_size() const override { return output_tensor_info_.size(); }
private:
std::string op_type_;
std::string op_name_;
std::vector<ov::frontend::tensorflow_lite::TensorMetaInfo> input_tensor_info_;
std::vector<ov::frontend::tensorflow_lite::TensorMetaInfo>
output_tensor_info_;
const LiteRtOp litert_op_;
const LiteRtOpCode litert_op_code_;
};
class DecoderTensor : public ov::frontend::tensorflow_lite::DecoderBaseTensor {
public:
explicit DecoderTensor(
ov::frontend::tensorflow_lite::TensorMetaInfo tensor_meta_info,
int64_t input_index, int64_t output_index)
: m_tensor_meta_info(tensor_meta_info),
input_index_(input_index),
output_index_(output_index) {};
ov::frontend::tensorflow_lite::TensorMetaInfo get_tensor_info()
const override {
return m_tensor_meta_info;
}
/// \brief Get input index for tensor
int64_t get_input_idx() const override { return input_index_; }
/// \brief Get output index for tensor
int64_t get_output_idx() const override { return output_index_; }
/// \brief No attributes for tensor
ov::Any get_attribute(const std::string &name) const override {
LITERT_LOG(LITERT_ERROR, "get_attribute not implemented");
}
/// \brief No inputs for tensor
size_t get_input_size() const override {
LITERT_LOG(LITERT_ERROR, "get_input_size not implemented");
}
/// \brief No input nodes for tensor
void get_input_node(size_t input_port_idx, std::string &producer_name,
std::string &producer_output_port_name,
size_t &producer_output_port_index) const override {
LITERT_LOG(LITERT_ERROR, "get_input_node not implemented");
}
/// \brief No operation for tensor
const std::string &get_op_type() const override {
LITERT_LOG(LITERT_ERROR, "get_op_type not implemented");
};
/// \brief No operation name for tensor
const std::string &get_op_name() const override {
LITERT_LOG(LITERT_ERROR, "get_op_name not implemented");
};
private:
ov::frontend::tensorflow_lite::TensorMetaInfo m_tensor_meta_info;
int64_t input_index_;
int64_t output_index_;
};
} // namespace openvino
} // namespace litert
#endif // ODML_LITERT_LITERT_VENDORS_OPENVINO_COMPILER_DECODER_H_

View File

@ -0,0 +1,92 @@
// Copyright (C) 2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "openvino/frontend/tensorflow_lite/decoder.hpp"
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include <gtest/gtest.h>
#include "litert/c/litert_common.h"
#include "litert/c/litert_logging.h"
#include "litert/c/litert_model.h"
#include "litert/c/litert_op_code.h"
#include "litert/cc/litert_model.h"
#include "litert/test/common.h"
#include "litert/test/matchers.h"
#include "litert/test/test_models.h"
#include "litert/vendors/intel_openvino/compiler/decoder.h"
namespace litert {
namespace openvino {
using ::testing::Values;
TEST(TestLiteOvDecoder, ConstructDecoderOp) {
auto model =
testing::LoadTestFileModel("simple_conv_2d_fused_relu_op.tflite");
auto graph = model.Subgraph(0);
size_t index = 0;
for (const auto& op : graph->Ops()) {
auto sample_ov_decode_op = DecoderOperation(
/*input_tensor_info=*/std::vector<
ov::frontend::tensorflow_lite::TensorMetaInfo>(),
/*output_tensor_info=*/
std::vector<ov::frontend::tensorflow_lite::TensorMetaInfo>(),
/*litert_op=*/op, index++);
}
}
TEST(TestLiteOvDecoder, VerifyDecoderConv2dOp) {
auto model =
testing::LoadTestFileModel("simple_conv_2d_fused_relu_op.tflite");
auto graph = model.Subgraph(0);
size_t index = 0;
for (const auto& op : graph->Ops()) {
auto sample_ov_decode_op = DecoderOperation(
/*input_tensor_info=*/std::vector<
ov::frontend::tensorflow_lite::TensorMetaInfo>(),
/*output_tensor_info=*/
std::vector<ov::frontend::tensorflow_lite::TensorMetaInfo>(),
/*litert_op=*/op, index++);
ASSERT_EQ(sample_ov_decode_op.get_input_size(), 0);
std::vector<int64_t> strides_vec =
sample_ov_decode_op.get_attribute("strides").as<std::vector<int64_t>>();
LITERT_LOG(LITERT_INFO, "Stride values : %ld %ld %ld %ld", strides_vec[0],
strides_vec[1], strides_vec[2], strides_vec[3]);
ASSERT_EQ(strides_vec, std::vector<int64_t>({1, 1, 1, 1}));
std::string padding_str =
sample_ov_decode_op.get_attribute("padding").as<std::string>();
LITERT_LOG(LITERT_INFO, "Padding : %s", padding_str.c_str());
ASSERT_EQ(padding_str, "SAME");
std::vector<int64_t> dilations_vec =
sample_ov_decode_op.get_attribute("dilations")
.as<std::vector<int64_t>>();
LITERT_LOG(LITERT_INFO, "Dilation values : %ld %ld %ld %ld",
dilations_vec[0], dilations_vec[1], dilations_vec[2],
dilations_vec[3]);
ASSERT_EQ(dilations_vec, std::vector<int64_t>({1, 1, 1, 1}));
std::string activation_str =
sample_ov_decode_op.get_attribute("activation").as<std::string>();
LITERT_LOG(LITERT_INFO, "Activation : %s", activation_str.c_str());
ASSERT_EQ(activation_str, "RELU");
}
}
} // namespace openvino
} // namespace litert

View File

@ -0,0 +1,216 @@
// Copyright (C) 2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "litert/vendors/intel_openvino/compiler/graph_iterator.h"
#include <string>
namespace litert {
namespace openvino {
ov::element::Type MapLiteTypeToOV(const litert::ElementType element_type) {
ov::element::Type ov_type;
switch (element_type) {
case litert::ElementType::Bool:
ov_type = ov::element::boolean;
break;
case litert::ElementType::Int4:
ov_type = ov::element::i4;
break;
case litert::ElementType::Int8:
ov_type = ov::element::i8;
break;
case litert::ElementType::Int16:
ov_type = ov::element::i16;
break;
case litert::ElementType::Int32:
ov_type = ov::element::i32;
break;
case litert::ElementType::Int64:
ov_type = ov::element::i64;
break;
case litert::ElementType::UInt8:
ov_type = ov::element::u8;
break;
case litert::ElementType::UInt16:
ov_type = ov::element::u16;
break;
case litert::ElementType::UInt32:
ov_type = ov::element::u32;
break;
case litert::ElementType::UInt64:
ov_type = ov::element::u64;
break;
case litert::ElementType::Float16:
ov_type = ov::element::f16;
break;
case litert::ElementType::Float32:
ov_type = ov::element::f32;
break;
case litert::ElementType::Float64:
ov_type = ov::element::f64;
break;
case litert::ElementType::BFloat16:
ov_type = ov::element::bf16;
break;
default:
ov_type = ov::element::undefined;
}
return ov_type;
}
LiteRtStatus GetOVTensorShape(const litert::Tensor& litert_tensor,
std::vector<int64_t>& ov_shape_vec) {
if (litert_tensor.TypeId() != kLiteRtRankedTensorType)
return kLiteRtStatusErrorInvalidArgument;
const auto ranked_tensor_type = litert_tensor.RankedTensorType();
if (!ranked_tensor_type) {
LITERT_LOG(LITERT_ERROR, "%s", ranked_tensor_type.Error().Message().data());
return ranked_tensor_type.Error().Status();
}
const auto tensor_layout = ranked_tensor_type->Layout();
if (tensor_layout.Rank() == 0) {
return kLiteRtStatusErrorUnsupported;
} else {
ov_shape_vec.resize(tensor_layout.Rank());
for (int i = 0; i < ov_shape_vec.size(); i++)
ov_shape_vec[i] = tensor_layout.Dimensions()[i];
}
return kLiteRtStatusOk;
}
size_t GraphIteratorDelegate::size() const {
return iterator_indices_.input_index_ + iterator_indices_.output_index_ +
iterator_indices_.op_index_;
}
void GraphIteratorDelegate::reset() { node_index_ = 0; }
void GraphIteratorDelegate::next() { node_index_++; }
bool GraphIteratorDelegate::is_end() const {
if (node_index_ == size())
return true;
else
return false;
}
bool fill_tensor_meta(
ov::frontend::tensorflow_lite::TensorMetaInfo& tensor_meta_info,
const litert::Tensor& litert_tensor) {
std::vector<int64_t> shape_vec;
const ElementType type = litert_tensor.ElementType();
ov::element::Type ov_element_type = MapLiteTypeToOV(type);
if (GetOVTensorShape(litert_tensor, shape_vec) != kLiteRtStatusOk) {
LITERT_LOG(LITERT_INFO, "Unsupported tensor element shape");
}
if (litert_tensor.QTypeId() == kLiteRtQuantizationPerTensor) {
const auto& quantization = litert_tensor.PerTensorQuantization();
auto ov_quantization =
std::make_shared<ov::frontend::tensorflow_lite::QuantizationInfo>();
ov_quantization->set_scale({quantization.scale});
ov_quantization->set_zero_point({quantization.zero_point});
tensor_meta_info.m_quantization_info = ov_quantization;
} else if (litert_tensor.QTypeId() == kLiteRtQuantizationPerChannel) {
const auto& quantization = litert_tensor.PerChannelQuantization();
auto ov_quantization =
std::make_shared<ov::frontend::tensorflow_lite::QuantizationInfo>();
std::vector<float> scale_vec(quantization.num_channels, 0);
std::vector<int64_t> zero_point_vec(quantization.num_channels, 0);
for (int i = 0; i < quantization.num_channels; i++) {
scale_vec[i] = quantization.scales[i];
zero_point_vec[i] = quantization.zero_points[i];
}
ov_quantization->set_scale(scale_vec);
ov_quantization->set_zero_point(zero_point_vec);
ov_quantization->set_axis(quantization.quantized_dimension);
tensor_meta_info.m_quantization_info = ov_quantization;
} else if (litert_tensor.QTypeId() != kLiteRtQuantizationNone) {
LITERT_LOG(LITERT_ERROR, "Unsupported Quantization type %d ",
litert_tensor.QTypeId());
return false;
}
ov::PartialShape tensor_shape{shape_vec};
tensor_meta_info.m_partial_shape = tensor_shape;
tensor_meta_info.m_element_type = ov_element_type;
tensor_meta_info.m_tensor_name = std::string(litert_tensor.Name());
return true;
}
std::shared_ptr<ov::frontend::tensorflow_lite::DecoderBase>
GraphIteratorDelegate::get_decoder() const {
ov::frontend::tensorflow_lite::TensorMetaInfo tensor_meta_info;
if (node_index_ < iterator_indices_.input_index_) {
const auto& input_vec = subgraph_ptr_->Inputs();
const auto& input = input_vec[node_index_];
int64_t input_index = node_index_;
int64_t output_index = -1;
if (!fill_tensor_meta(tensor_meta_info, input)) {
return nullptr;
}
return std::make_shared<litert::openvino::DecoderTensor>(
tensor_meta_info, input_index, output_index);
} else if (node_index_ >= iterator_indices_.input_index_ &&
node_index_ < iterator_indices_.input_index_ +
iterator_indices_.output_index_) {
const auto& output_vec = subgraph_ptr_->Outputs();
const auto& output =
output_vec[node_index_ - iterator_indices_.input_index_];
int64_t input_index = -1;
int64_t output_index = node_index_;
if (!fill_tensor_meta(tensor_meta_info, output)) {
return nullptr;
}
return std::make_shared<litert::openvino::DecoderTensor>(
tensor_meta_info, input_index, output_index);
} else {
const auto& op_vec = subgraph_ptr_->Ops();
const auto& op = op_vec[node_index_ - iterator_indices_.input_index_ -
iterator_indices_.output_index_];
std::vector<ov::frontend::tensorflow_lite::TensorMetaInfo> input_meta_info;
std::vector<ov::frontend::tensorflow_lite::TensorMetaInfo> output_meta_info;
for (const auto& input : op.Inputs()) {
if (!fill_tensor_meta(tensor_meta_info, input)) {
return nullptr;
}
if (input.HasWeights()) {
LITERT_LOG(LITERT_VERBOSE, "Data is static or constant for op %d",
op.Code());
tensor_meta_info.m_tensor_data = input.Weights().Bytes().data();
if (op.Code() != LiteRtOpCode::kLiteRtOpCodeTflConv2d &&
op.Code() != LiteRtOpCode::kLiteRtOpCodeTflDepthwiseConv2d &&
op.Code() != LiteRtOpCode::kLiteRtOpCodeTflMul &&
op.Code() != LiteRtOpCode::kLiteRtOpCodeTflAdd &&
op.Code() != LiteRtOpCode::kLiteRtOpCodeTflFullyConnected)
tensor_meta_info.m_quantization_info = nullptr;
}
input_meta_info.push_back(tensor_meta_info);
}
for (const auto& output : op.Outputs()) {
if (!fill_tensor_meta(tensor_meta_info, output)) {
return nullptr;
}
output_meta_info.push_back(tensor_meta_info);
}
return std::make_shared<litert::openvino::DecoderOperation>(
input_meta_info, output_meta_info, op, node_index_);
}
}
} // namespace openvino
} // namespace litert

View File

@ -0,0 +1,103 @@
// Copyright (C) 2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ODML_LITERT_LITERT_VENDORS_OPENVINO_COMPILER_GRAPH_ITERATOR_H_
#define ODML_LITERT_LITERT_VENDORS_OPENVINO_COMPILER_GRAPH_ITERATOR_H_
#include <memory>
#include <unordered_set>
#include <vector>
#include "litert/c/litert_logging.h"
#include "litert/c/litert_model.h"
#include "litert/cc/litert_model.h"
#include "litert/vendors/intel_openvino/compiler/decoder.h"
#include "third_party/openvino/frontend/tensorflow_lite/decoder.hpp"
#include "third_party/openvino/frontend/tensorflow_lite/graph_iterator.hpp"
#include "third_party/openvino/frontend/tensorflow_lite/quantization_info.hpp"
namespace litert {
namespace openvino {
struct OVGraphIndices {
int32_t input_index_ = 0;
int32_t output_index_ = 0;
int32_t const_index_ = 0;
int32_t op_index_ = 0;
};
// GraphIteratorDelegate traverses through the graph/subgraph i/o's and ops.
// Objective of this class is to create TensorMetaInfo structures to pass to
// DecoderTensor to manage I/Os and fill the op specific information
// in DecoderOperation objects. OpenVINO tensorflow lite frontend takes
// the responsibility for creating OV op nodes.
class GraphIteratorDelegate
: public ov::frontend::tensorflow_lite::GraphIterator {
public:
GraphIteratorDelegate(const litert::Subgraph* graph) : subgraph_ptr_(graph) {
for (const auto& input : subgraph_ptr_->Inputs()) {
if (input.IsSubgraphInput()) {
iterator_indices_.input_index_++;
} else if (input.IsConstant()) {
iterator_indices_.const_index_++;
}
}
for (const auto& output : subgraph_ptr_->Outputs()) {
if (output.IsSubgraphOutput()) {
iterator_indices_.output_index_++;
}
}
for (const auto& op : subgraph_ptr_->Ops()) {
iterator_indices_.op_index_++;
}
}
~GraphIteratorDelegate() = default;
/// \brief Get a number of operation nodes in the graph
size_t size() const override;
/// \brief Set iterator to the start position
void reset() override;
/// \brief Move to the next node in the graph
void next() override;
/// \brief Returns true if iterator goes out of the range of available nodes
bool is_end() const override;
/// \brief Return a pointer to a decoder of the current node
std::shared_ptr<ov::frontend::tensorflow_lite::DecoderBase> get_decoder()
const override;
/// \brief Returns the number of sub-graphs that can be enumerated with
/// get_subgraph
size_t get_subgraph_size() const override { return 0; }
/// \brief Returns iterator for a subgraph created on demand
/// If there is no query for specific sub-graph iterator shouldn't be created
/// idx should be in range 0..get_subgraph_size()-1
std::shared_ptr<ov::frontend::tensorflow_lite::GraphIterator> get_subgraph(
size_t idx) const override {};
private:
size_t node_index_ = 0;
const litert::Subgraph* subgraph_ptr_;
struct OVGraphIndices iterator_indices_;
};
} // namespace openvino
} // namespace litert
#endif // ODML_LITERT_LITERT_VENDORS_OPENVINO_COMPILER_GRAPH_ITERATOR_H_

View File

@ -0,0 +1,274 @@
// Copyright (C) 2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <cstddef>
#include <cstdlib>
#include <string>
#include <vector>
#include "absl/strings/string_view.h" // from @com_google_absl
#include "litert/c/litert_common.h"
#include "litert/c/litert_logging.h"
#include "litert/c/litert_model.h"
#include "litert/c/litert_op_code.h"
#include "litert/cc/litert_macros.h"
#include "litert/cc/litert_model.h"
#include "litert/vendors/c/litert_compiler_plugin.h"
#include "litert/vendors/intel_openvino/compiler/graph_iterator.h"
#include "third_party/openvino/frontend/tensorflow_lite/frontend.hpp"
#include "third_party/openvino/openvino.hpp"
namespace {
constexpr char kPluginManufacturer[] = "IntelOpenVINO";
constexpr const char *kPluginSocModels[] = {
"NPU2700",
}; // get the name for plugin soc model
constexpr LiteRtOpCode kSupportedOps[] = {
kLiteRtOpCodeTflConv2d,
kLiteRtOpCodeTflDepthwiseConv2d,
kLiteRtOpCodeTflSplit,
kLiteRtOpCodeTflFullyConnected,
kLiteRtOpCodeTflAdd,
kLiteRtOpCodeTflReshape,
kLiteRtOpCodeTflMean,
kLiteRtOpCodeTflResizeBilinear,
kLiteRtOpCodeTflResizeNearestNeighbor,
kLiteRtOpCodeTflConcatenation,
kLiteRtOpCodeTflMaxPool2d,
kLiteRtOpCodeTflAveragePool2d,
kLiteRtOpCodeTflMul,
kLiteRtOpCodeTflTransposeConv,
kLiteRtOpCodeTflSoftmax,
kLiteRtOpCodeTflMirrorPad,
kLiteRtOpCodeTflStridedSlice,
kLiteRtOpCodeTflDepthToSpace,
kLiteRtOpCodeTflGather,
kLiteRtOpCodeTflBatchMatmul,
kLiteRtOpCodeTflLeakyRelu,
kLiteRtOpCodeTflPack,
// These ops donot call get_attribute
kLiteRtOpCodeTflDequantize,
kLiteRtOpCodeTflLogistic,
kLiteRtOpCodeTflRelu,
kLiteRtOpCodeTflTanh,
kLiteRtOpCodeTflPad,
kLiteRtOpCodeTflTranspose,
kLiteRtOpCodeTflSlice,
kLiteRtOpCodeTflQuantize,
};
// clang format on
constexpr auto kNumPluginSocModels =
sizeof(kPluginSocModels) / sizeof(kPluginSocModels[0]);
} // namespace
LiteRtStatus LiteRtGetCompilerPluginVersion(LiteRtApiVersion *api_version) {
if (api_version == nullptr) {
return kLiteRtStatusErrorInvalidArgument;
}
api_version->major = LITERT_API_VERSION_MAJOR;
api_version->minor = LITERT_API_VERSION_MINOR;
api_version->patch = LITERT_API_VERSION_PATCH;
return kLiteRtStatusOk;
}
const char *LiteRtGetCompilerPluginSocManufacturer() {
return kPluginManufacturer;
}
LiteRtStatus LiteRtGetCompilerPluginSupportedHardware(
LiteRtCompilerPlugin compiler_plugin,
LiteRtHwAccelerators *supported_hardware) {
if (!compiler_plugin || !supported_hardware) {
return kLiteRtStatusErrorInvalidArgument;
}
*supported_hardware = kLiteRtHwAcceleratorNpu;
return kLiteRtStatusOk;
}
LiteRtStatus LiteRtGetNumCompilerPluginSupportedSocModels(
LiteRtCompilerPlugin compiler_plugin,
LiteRtParamIndex *num_supported_soc_models) {
if (compiler_plugin == nullptr || num_supported_soc_models == nullptr) {
return kLiteRtStatusErrorInvalidArgument;
}
*num_supported_soc_models = kNumPluginSocModels;
return kLiteRtStatusOk;
}
LiteRtStatus LiteRtGetCompilerPluginSupportedSocModel(
LiteRtCompilerPlugin compiler_plugin, LiteRtParamIndex soc_model_idx,
const char **soc_model_name) {
if (compiler_plugin == nullptr || soc_model_idx >= kNumPluginSocModels ||
soc_model_name == nullptr) {
return kLiteRtStatusErrorInvalidArgument;
}
*soc_model_name = kPluginSocModels[soc_model_idx];
return kLiteRtStatusOk;
}
// Compiled Result Definition
/// \brief Define storage of compiled result object for OV compiler plugin
struct LiteRtCompiledResultT {
std::vector<std::string> byte_code;
std::vector<std::string> graph_names;
};
LiteRtStatus LiteRtGetCompiledResultByteCode(
LiteRtCompiledResult compiled_result, LiteRtParamIndex byte_code_idx,
const void **byte_code, size_t *byte_code_size) {
const char *raw_data_ptr = compiled_result->byte_code[byte_code_idx].data();
*byte_code = static_cast<void *>(const_cast<char *>(raw_data_ptr));
*byte_code_size = compiled_result->byte_code[byte_code_idx].length();
return kLiteRtStatusOk;
}
LiteRtStatus LiteRtGetCompiledResultCallInfo(
LiteRtCompiledResult compiled_result, LiteRtParamIndex call_idx,
const void **call_info, size_t *call_info_size,
LiteRtParamIndex *byte_code_idx) {
if (call_idx >= compiled_result->graph_names.size()) {
return kLiteRtStatusErrorIndexOOB;
}
auto &graph_name = compiled_result->graph_names[call_idx];
*call_info = graph_name.data();
*call_info_size = graph_name.size();
*byte_code_idx = call_idx;
return kLiteRtStatusOk;
}
LiteRtStatus LiteRtGetNumCompiledResultCalls(
LiteRtCompiledResult compiled_result, LiteRtParamIndex *num_calls) {
*num_calls = compiled_result->graph_names.size();
return kLiteRtStatusOk;
}
void LiteRtDestroyCompiledResult(LiteRtCompiledResult compiled_result) {
delete compiled_result;
}
LiteRtStatus LiteRtCompiledResultNumByteCodeModules(
LiteRtCompiledResult compiled_result, LiteRtParamIndex *num_byte_code) {
if (!compiled_result || !num_byte_code) {
return kLiteRtStatusErrorInvalidArgument;
}
*num_byte_code = compiled_result->byte_code.size();
return kLiteRtStatusOk;
}
// Plugin Definition
/// \brief Define Compiler plugin APIs
struct LiteRtCompilerPluginT {
LiteRtEnvironmentOptions env;
LiteRtOptions options;
};
LiteRtStatus LiteRtCreateCompilerPlugin(LiteRtCompilerPlugin *compiler_plugin,
LiteRtEnvironmentOptions env,
LiteRtOptions options) {
LiteRtSetMinLoggerSeverity(LiteRtGetDefaultLogger(), LITERT_INFO);
auto *plugin = new LiteRtCompilerPluginT;
plugin->env = env;
plugin->options = options;
*compiler_plugin = plugin;
return kLiteRtStatusOk;
}
void LiteRtDestroyCompilerPlugin(LiteRtCompilerPlugin compiler_plugin) {
delete compiler_plugin;
}
bool IsOpSupported(const ::litert::Op &op) {
for (const auto &supportedOp : kSupportedOps) {
if (op.Code() == supportedOp) return true;
}
return false;
}
#ifdef __cplusplus
extern "C" {
#endif
LiteRtStatus LiteRtCompilerPluginPartition(LiteRtCompilerPlugin compiler_plugin,
const char *soc_model,
LiteRtSubgraph subgraph,
LiteRtOpList selected_ops) {
::litert::Subgraph graph(subgraph);
// TODO(rjasuja): Enhance implementation for Partition() call
for (const auto &op : graph.Ops()) {
if (!IsOpSupported(op)) {
LITERT_LOG(LITERT_ERROR, "op type %d is not supported", op.Code());
continue;
}
LITERT_RETURN_IF_ERROR(LiteRtPushOp(selected_ops, op.Get(), 0));
}
return kLiteRtStatusOk;
}
#ifdef __cplusplus
} /* end extern "C" */
#endif
LiteRtStatus LiteRtCompilerPluginCompile(
LiteRtCompilerPlugin compiler_plugin, const char *soc_model,
LiteRtModel partitions, LiteRtCompiledResult *compiled_result) {
auto model = litert::Model::CreateFromNonOwnedHandle(partitions);
const auto num_partitions = model.NumSubgraphs();
auto result = std::make_unique<LiteRtCompiledResultT>();
result->byte_code.resize(num_partitions);
result->graph_names.resize(num_partitions);
auto tflite_fe = std::make_shared<ov::frontend::tensorflow_lite::FrontEnd>();
// TODO: Update this hard coded path to an env option passed from LiteRT
// framework
ov::Core core;
for (int partition_idx = 0; partition_idx < num_partitions; ++partition_idx) {
auto graph_name = absl::StrFormat("Partition_%d", partition_idx);
litert::Expected<litert::Subgraph> expected_subgraph =
model.Subgraph(partition_idx);
if (expected_subgraph.HasValue()) {
std::shared_ptr<ov::frontend::tensorflow_lite::GraphIterator>
graph_delegate =
std::make_shared<litert::openvino::GraphIteratorDelegate>(
&expected_subgraph.Value());
auto input_model = tflite_fe->load(graph_delegate);
LITERT_LOG(LITERT_INFO, "Model loaded");
auto model = tflite_fe->convert(input_model);
// TODO: pass the device string from env options
std::string device = "NPU";
std::ostringstream oss;
auto compiled_model = core.compile_model(model, device);
compiled_model.export_model(oss);
LITERT_LOG(LITERT_INFO, "Model export done");
result->byte_code[partition_idx] = oss.str();
result->graph_names.emplace_back(graph_name);
} else {
LITERT_LOG(LITERT_INFO, "Failed to retrieve Subgraph");
return kLiteRtStatusErrorCompilation;
}
}
*compiled_result = result.release();
// TODO: Add support for caching
return kLiteRtStatusOk;
}

View File

@ -0,0 +1,89 @@
// Copyright (C) 2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cstddef>
#include <cstdint>
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h" // from @com_google_absl
#include "litert/c/litert_common.h"
#include "litert/c/litert_logging.h"
#include "litert/c/litert_model.h"
#include "litert/c/litert_op_code.h"
#include "litert/cc/litert_model.h"
#include "litert/test/common.h"
#include "litert/test/matchers.h"
#include "litert/test/test_models.h"
#include "litert/vendors/c/litert_compiler_plugin.h"
#include "litert/vendors/cc/litert_compiler_plugin.h"
#include "third_party/openvino/frontend/tensorflow_lite/frontend.hpp"
namespace litert {
namespace {
using ::testing::Values;
const auto kSupportedOps = Values("add_simple.tflite");
const auto kSupportedSocModels = Values("NPU2700");
TEST(TestOVPlugin, PartitionAddGraph) {
auto plugin = CreatePlugin();
auto model = testing::LoadTestFileModel("add_simple.tflite");
LITERT_ASSERT_OK_AND_ASSIGN(auto subgraph, model.Subgraph(0));
LiteRtOpListT selected_op_list;
LITERT_ASSERT_OK(LiteRtCompilerPluginPartition(
plugin.get(), /*soc_model=*/nullptr, subgraph.Get(), &selected_op_list));
const auto selected_ops = selected_op_list.Values();
ASSERT_EQ(selected_ops.size(), 1);
EXPECT_EQ(selected_ops[0].first->OpCode(), kLiteRtOpCodeTflAdd);
}
TEST(TestOVPlugin, CompileAddSubgraph) {
auto plugin = CreatePlugin();
auto model = testing::LoadTestFileModel("add_simple.tflite");
LiteRtCompiledResult compiled;
LITERT_ASSERT_OK(LiteRtCompilerPluginCompile(plugin.get(), "NPU2700",
model.Get(), &compiled));
const void* byte_code;
size_t byte_code_size;
LITERT_ASSERT_OK(LiteRtGetCompiledResultByteCode(
compiled, /*byte_code_idx=*/0, &byte_code, &byte_code_size));
absl::string_view byte_code_string(reinterpret_cast<const char*>(byte_code),
byte_code_size);
ASSERT_FALSE(byte_code_string.empty());
const void* op_data;
size_t op_data_size;
LiteRtParamIndex byte_code_idx;
LITERT_ASSERT_OK(LiteRtGetCompiledResultCallInfo(
compiled, /*call_idx=*/0, &op_data, &op_data_size, &byte_code_idx));
absl::string_view op_data_string(reinterpret_cast<const char*>(op_data),
op_data_size);
LiteRtDestroyCompiledResult(compiled);
}
} // namespace
} // namespace litert

23
third_party/intel_openvino/BUILD vendored Normal file
View File

@ -0,0 +1,23 @@
# Copyright (C) 2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Dummy BUILD file to make this directory a package.
package(
# copybara:uncomment default_applicable_licenses = ["@org_tensorflow//tensorflow:license"],
default_visibility = ["//litert:__subpackages__"],
)
licenses(["notice"])

View File

@ -0,0 +1,26 @@
# openvino_build_content.bazel
# This file defines the BUILD content for the @openvino_native_sdk repository.
# Paths within this file are relative to the root of the external repository
# (which is the symlinked "openvino" directory in this case).
cc_library(
name = "openvino",
hdrs = glob([
"openvino/runtime/include/**/*.h", # Gathers all .h files recursively
"openvino/runtime/include/ie/cpp/**/*.h",
"openvino/runtime/include/ie/**/*.h",
]),
srcs = [
"openvino/runtime/lib/intel64/libopenvino.so",
"openvino/runtime/lib/intel64/libopenvino_tensorflow_lite_frontend.so",
"openvino/runtime/lib/intel64/libc++_shared.so",
],
# Important: This strips the prefix so users include like <ie/cpp/foo.h>
strip_include_prefix = "openvino/runtime/include",
includes = [
"openvino/runtime/include/ie/cpp",
"openvino/runtime/include/ie",
"openvino/runtime/include",
],
visibility = ["//visibility:public"],
)

38
third_party/intel_openvino/openvino.bzl vendored Normal file
View File

@ -0,0 +1,38 @@
# Copyright 2025 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Workspace definition for Openvino."""
def _openvino_native_impl(repository_ctx):
openvino_native_dir = repository_ctx.os.environ.get("OPENVINO_NATIVE_DIR")
if openvino_native_dir:
repository_ctx.symlink(openvino_native_dir, "openvino")
build_file_content = repository_ctx.read(repository_ctx.attr.build_file)
repository_ctx.file("BUILD", build_file_content)
else:
# Variable not set, create an empty BUILD file
repository_ctx.file("BUILD", "# OPENVINO_NATIVE_DIR not set, skipping OpenVINO setup.")
openvino_configure = repository_rule(
implementation = _openvino_native_impl,
local = True,
environ = ["OPENVINO_NATIVE_DIR"],
attrs = {
# Define an attribute to hold the label of the external BUILD file content
"build_file": attr.label(
doc = "The label of the BUILD file content to be written.",
allow_single_file = True, # This attribute expects a single file
mandatory = True,
),
},
)