Add LeakyRelu support to MTK compiler plugin

From [1], we can see LeakyRelu is supported by MTK NPU, and this is done by treating it as PRelu.
Hence, we can do the same thing in LiteRT and add it to the compiler plugin support list.

[1] https://chromium.googlesource.com/chromiumos/platform/tflite/+/refs/heads/main/delegate/mtk_neuron/neuron_delegate_kernel.cc#883

LiteRT-PiperOrigin-RevId: 853875388
This commit is contained in:
Tommy Chiang 2026-01-08 14:01:07 -08:00 committed by Copybara-Service
parent 69f98c7b66
commit 6fd753e657
5 changed files with 25 additions and 0 deletions

View File

@ -62,6 +62,7 @@ the `VerifyCommonOp` function.
| `kLiteRtOpCodeTflPad` | Legalized to `NEURON_PAD`. |
| `kLiteRtOpCodeTflPadv2` | Legalized to `NEURON_PAD_V2`. |
| `kLiteRtOpCodeTflPrelu` | Legalized to `NEURON_PRELU`. |
| `kLiteRtOpCodeTflLeakyRelu` | Legalized to `NEURON_PRELU` via treating alpha in LeakyRelu as the input tensor of Prelu. |
| `kLiteRtOpCodeTflQuantize` | Legalized to `NEURON_QUANTIZE`. |
| `kLiteRtOpCodeTflReduceMax` | Legalized to `NEURON_REDUCE_MAX`. Supports `keep_dims` attribute. |
| `kLiteRtOpCodeTflRelu` | Legalized to `NEURON_RELU`. |

View File

@ -125,6 +125,7 @@ constexpr LiteRtOpCode kSupportedOps[] = {
kLiteRtOpCodeTflDiv,
kLiteRtOpCodeTflCast,
kLiteRtOpCodeTflPrelu,
kLiteRtOpCodeTflLeakyRelu,
kLiteRtOpCodeTflMaximum,
kLiteRtOpCodeTflRelu,
kLiteRtOpCodeTflAbs,

View File

@ -50,6 +50,7 @@ const auto kSupportedOps = Values(
"simple_softmax_op.tflite",
"simple_mean_op.tflite",
"simple_gelu_op.tflite",
"simple_leaky_relu_op.tflite",
"simple_pad.tflite",
"simple_logistic.tflite",
"simple_sum_op.tflite",

View File

@ -271,6 +271,11 @@ Expected<void> CreateModel(const NeuronAdapterApi& neuron_adapter_api,
status = LegalizeCommonOp(neuron_adapter_api, model, *operand_map, op,
NEURON_PRELU);
break;
case kLiteRtOpCodeTflLeakyRelu:
status =
LegalizeOp(neuron_adapter_api, model, *operand_map, op,
NEURON_PRELU, std::make_tuple(AddLeakyReluAlphaOption));
break;
case kLiteRtOpCodeTflMaximum:
status = LegalizeCommonOp(neuron_adapter_api, model, *operand_map, op,
NEURON_MAXIMUM);

View File

@ -417,6 +417,23 @@ inline Expected<uint32_t> AddDivFuseActivationOption(const litert::Op& op,
return operand_map.AddScalarInt32(fuse);
}
//==============================================================================
// kLiteRtOpCodeTflLeakyRelu
//==============================================================================
inline Expected<uint32_t> AddLeakyReluAlphaOption(const litert::Op& op,
OperandMap& operand_map) {
std::vector<uint32_t> tensor_shape = {1};
int32_t alpha_idx = -1;
LITERT_ASSIGN_OR_RETURN(alpha_idx,
operand_map.RegisterExtraData(sizeof(float)));
LITERT_RETURN_IF_ERROR(LiteRtGetLeakyReluAlphaOption(
op.Get(), (float*)operand_map.GetExtraData(alpha_idx)))
<< "Fails to get LiteRtGetLeakyReluAlphaOption";
return operand_map.AddTensorByType(NEURON_TENSOR_FLOAT32, tensor_shape,
operand_map.GetExtraData(alpha_idx),
sizeof(float));
} // namespace litert::mediatek
} // namespace litert::mediatek
#endif // ODML_LITERT_LITERT_VENDORS_MEDIATEK_COMPILER_LEGALIZATIONS_LEGALIZE_HELPER_H_