mirror of
https://github.com/k2-fsa/sherpa-onnx.git
synced 2026-01-09 07:41:06 +08:00
This PR adds support for Wenet non-streaming CTC models to sherpa-onnx by introducing the SherpaOnnxOfflineWenetCtcModelConfig struct and integrating it across all language bindings and APIs. The implementation follows the same pattern as other CTC model types like Zipformer CTC. - Introduces SherpaOnnxOfflineWenetCtcModelConfig struct with a single model field for the ONNX model path - Adds the new config to SherpaOnnxOfflineModelConfig and updates all language bindings (C++, Pascal, Kotlin, Java, Go, C#, Swift, JavaScript, etc.) - Provides comprehensive examples and tests across all supported platforms and languages
69 lines
1.8 KiB
Swift
69 lines
1.8 KiB
Swift
import AVFoundation
|
|
|
|
extension AudioBuffer {
|
|
func array() -> [Float] {
|
|
return Array(UnsafeBufferPointer(self))
|
|
}
|
|
}
|
|
|
|
extension AVAudioPCMBuffer {
|
|
func array() -> [Float] {
|
|
return self.audioBufferList.pointee.mBuffers.array()
|
|
}
|
|
}
|
|
|
|
func run() {
|
|
let model =
|
|
"./sherpa-onnx-wenetspeech-yue-u2pp-conformer-ctc-zh-en-cantonese-int8-2025-09-10/model.int8.onnx"
|
|
let tokens =
|
|
"./sherpa-onnx-wenetspeech-yue-u2pp-conformer-ctc-zh-en-cantonese-int8-2025-09-10/tokens.txt"
|
|
|
|
let wenetCtc = sherpaOnnxOfflineWenetCtcModelConfig(
|
|
model: model
|
|
)
|
|
|
|
let modelConfig = sherpaOnnxOfflineModelConfig(
|
|
tokens: tokens,
|
|
debug: 0,
|
|
wenetCtc: wenetCtc
|
|
)
|
|
|
|
let featConfig = sherpaOnnxFeatureConfig(
|
|
sampleRate: 16000,
|
|
featureDim: 80
|
|
)
|
|
var config = sherpaOnnxOfflineRecognizerConfig(
|
|
featConfig: featConfig,
|
|
modelConfig: modelConfig
|
|
)
|
|
|
|
let recognizer = SherpaOnnxOfflineRecognizer(config: &config)
|
|
|
|
let filePath =
|
|
"./sherpa-onnx-wenetspeech-yue-u2pp-conformer-ctc-zh-en-cantonese-int8-2025-09-10/test_wavs/yue-0.wav"
|
|
let fileURL: NSURL = NSURL(fileURLWithPath: filePath)
|
|
let audioFile = try! AVAudioFile(forReading: fileURL as URL)
|
|
|
|
let audioFormat = audioFile.processingFormat
|
|
assert(audioFormat.channelCount == 1)
|
|
assert(audioFormat.commonFormat == AVAudioCommonFormat.pcmFormatFloat32)
|
|
|
|
let audioFrameCount = UInt32(audioFile.length)
|
|
let audioFileBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: audioFrameCount)
|
|
|
|
try! audioFile.read(into: audioFileBuffer!)
|
|
let array: [Float]! = audioFileBuffer?.array()
|
|
let result = recognizer.decode(samples: array, sampleRate: Int(audioFormat.sampleRate))
|
|
print("\nresult is:\n\(result.text)")
|
|
if result.timestamps.count != 0 {
|
|
print("\ntimestamps is:\n\(result.timestamps)")
|
|
}
|
|
}
|
|
|
|
@main
|
|
struct App {
|
|
static func main() {
|
|
run()
|
|
}
|
|
}
|