Add JavaScript API (node-addon) for Google MedAsr model (#2955)

This commit is contained in:
Fangjun Kuang 2025-12-29 12:51:39 +08:00 committed by GitHub
parent a53d9eec12
commit 2f8fb50a24
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 85 additions and 0 deletions

View File

@ -10,6 +10,16 @@ arch=$(node -p "require('os').arch()")
platform=$(node -p "require('os').platform()")
node_version=$(node -p "process.versions.node.split('.')[0]")
echo "----------non-streaming ASR Google MedASR CTC----------"
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-medasr-ctc-en-int8-2025-12-25.tar.bz2
tar xvf sherpa-onnx-medasr-ctc-en-int8-2025-12-25.tar.bz2
rm sherpa-onnx-medasr-ctc-en-int8-2025-12-25.tar.bz2
node ./test_asr_non_streaming_medasr_ctc.js
rm -rf sherpa-onnx-medasr-ctc-en-int8-2025-12-25
echo "----------non-streaming ASR Omnilingual ASR CTC----------"
curl -SL -O https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-omnilingual-asr-1600-languages-300M-ctc-int8-2025-11-12.tar.bz2

View File

@ -94,6 +94,22 @@ GetOfflineOmnilingualAsrCtcModelConfig(Napi::Object obj) {
return c;
}
static SherpaOnnxOfflineMedAsrCtcModelConfig GetOfflineMedAsrCtcModelConfig(
Napi::Object obj) {
SherpaOnnxOfflineMedAsrCtcModelConfig c;
memset(&c, 0, sizeof(c));
if (!obj.Has("medasr") || !obj.Get("medasr").IsObject()) {
return c;
}
Napi::Object o = obj.Get("medasr").As<Napi::Object>();
SHERPA_ONNX_ASSIGN_ATTR_STR(model, model);
return c;
}
static SherpaOnnxOfflineDolphinModelConfig GetOfflineDolphinModelConfig(
Napi::Object obj) {
SherpaOnnxOfflineDolphinModelConfig c;
@ -260,6 +276,7 @@ static SherpaOnnxOfflineModelConfig GetOfflineModelConfig(Napi::Object obj) {
c.canary = GetOfflineCanaryModelConfig(o);
c.wenet_ctc = GetOfflineWenetCtcModelConfig(o);
c.omnilingual = GetOfflineOmnilingualAsrCtcModelConfig(o);
c.medasr = GetOfflineMedAsrCtcModelConfig(o);
SHERPA_ONNX_ASSIGN_ATTR_STR(tokens, tokens);
SHERPA_ONNX_ASSIGN_ATTR_INT32(num_threads, numThreads);
@ -354,6 +371,7 @@ static void FreeConfig(const SherpaOnnxOfflineRecognizerConfig &c) {
SHERPA_ONNX_DELETE_C_STR(c.model_config.wenet_ctc.model);
SHERPA_ONNX_DELETE_C_STR(c.model_config.omnilingual.model);
SHERPA_ONNX_DELETE_C_STR(c.model_config.medasr.model);
SHERPA_ONNX_DELETE_C_STR(c.model_config.tokens);
SHERPA_ONNX_DELETE_C_STR(c.model_config.provider);

View File

@ -126,6 +126,7 @@ The following tables list the examples in this folder.
|[./test_asr_non_streaming_nemo_ctc.js](./test_asr_non_streaming_nemo_ctc.js)|Non-streaming speech recognition from a file using a [NeMo](https://github.com/NVIDIA/NeMo) CTC model with greedy search|
|[./test_asr_non_streaming_wenet_ctc.js](./test_asr_non_streaming_wenet_ctc.js)|Non-streaming speech recognition from a file using a [u2pp_conformer_yue](https://huggingface.co/ASLP-lab/WSYue-ASR/tree/main/u2pp_conformer_yue) CTC model with greedy search|
|[./test_asr_non_streaming_omnilingual_asr_ctc.js](./test_asr_non_streaming_omnilingual_asr_ctc.js)|Non-streaming speech recognition from a file using a [Omnilingual-ASR](https://github.com/facebookresearch/omnilingual-asr) CTC model with greedy search|
|[./test_asr_non_streaming_medasr_ctc.js](./test_asr_non_streaming_medasr_ctc.js)|Non-streaming speech recognition from a file using a [Google MedASR](https://github.com/google-health/medasr) CTC model with greedy search|
|[./test_asr_non_streaming_nemo_canary.js](./test_asr_non_streaming_nemo_canary.js)|Non-streaming speech recognition from a file using a [NeMo](https://github.com/NVIDIA/NeMo) [Canary](https://k2-fsa.github.io/sherpa/onnx/nemo/canary.html#sherpa-onnx-nemo-canary-180m-flash-en-es-de-fr-int8-english-spanish-german-french) model|
|[./test_asr_non_streaming_zipformer_ctc.js](./test_asr_non_streaming_zipformer_ctc.js)|Non-streaming speech recognition from a file using a Zipformer CTC model with greedy search|
|[./test_asr_non_streaming_nemo_parakeet_tdt_v2.js](./test_asr_non_streaming_nemo_parakeet_tdt_v2.js)|Non-streaming speech recognition from a file using a [NeMo](https://github.com/NVIDIA/NeMo) [parakeet-tdt-0.6b-v2](https://k2-fsa.github.io/sherpa/onnx/pretrained_models/offline-transducer/nemo-transducer-models.html#sherpa-onnx-nemo-parakeet-tdt-0-6b-v2-int8-english) model with greedy search|
@ -428,6 +429,16 @@ npm install naudiodon2
node ./test_vad_asr_non_streaming_nemo_ctc_microphone.js
```
### Non-streaming speech recognition with Google MedASR CTC models
```bash
wget https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-medasr-ctc-en-int8-2025-12-25.tar.bz2
tar xvf sherpa-onnx-medasr-ctc-en-int8-2025-12-25.tar.bz2
rm sherpa-onnx-medasr-ctc-en-int8-2025-12-25.tar.bz2
node ./test_asr_non_streaming_medasr_ctc.js
```
### Non-streaming speech recognition with Omnilingual ASR CTC models
```bash

View File

@ -0,0 +1,46 @@
// Copyright (c) 2025 Xiaomi Corporation
const sherpa_onnx = require('sherpa-onnx-node');
// Please download test files from
// https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
const config = {
'featConfig': {
'sampleRate': 16000,
'featureDim': 80,
},
'modelConfig': {
'medasr': {
'model': './sherpa-onnx-medasr-ctc-en-int8-2025-12-25/model.int8.onnx',
},
'tokens': './sherpa-onnx-medasr-ctc-en-int8-2025-12-25/tokens.txt',
'numThreads': 2,
'provider': 'cpu',
'debug': 1,
}
};
const waveFilename =
'./sherpa-onnx-medasr-ctc-en-int8-2025-12-25/test_wavs/0.wav';
const recognizer = new sherpa_onnx.OfflineRecognizer(config);
console.log('Started')
let start = Date.now();
const stream = recognizer.createStream();
const wave = sherpa_onnx.readWave(waveFilename);
stream.acceptWaveform({sampleRate: wave.sampleRate, samples: wave.samples});
recognizer.decode(stream);
const result = recognizer.getResult(stream);
let stop = Date.now();
console.log('Done')
const elapsed_seconds = (stop - start) / 1000;
const duration = wave.samples.length / wave.sampleRate;
const real_time_factor = elapsed_seconds / duration;
console.log('Wave duration', duration.toFixed(3), 'seconds')
console.log('Elapsed', elapsed_seconds.toFixed(3), 'seconds')
console.log(
`RTF = ${elapsed_seconds.toFixed(3)}/${duration.toFixed(3)} =`,
real_time_factor.toFixed(3))
console.log(waveFilename)
console.log('result\n', result)