mirror of
https://github.com/alphacep/vosk-api.git
synced 2026-02-05 04:49:53 +08:00
* methods get_model_by_name, get_model_by_lang, get_model were added into the model class * importing modules changed to using components; introduced constant MODELS_HOME_DIR; simplified code * added new model folders into init; changed samples and transcriber bin for new mode loader * changed back in cli.py lang arg to args.lang * added 3 directories instead of 1 to check for models * cli.py: added 3 args instead of 1 for model; __init__.py: changed script get_model_path for run get_model_by_name/lang inside current directory * deleted default env var * cli.py: changed arg_name; __init__.py: changed const name, changed model loading only for last directory * deleted unused method * changed by_name, by_lang methods, added download_model method * deleted env variable initialization * deleted print() * deteled unused modules * added progress_bar, added folder AppData/Local/vosk for model search * changed download_model methond; added my_hook method
53 lines
1.4 KiB
Python
Executable File
53 lines
1.4 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
from vosk import Model, KaldiRecognizer, SetLogLevel
|
|
import sys
|
|
import os
|
|
import wave
|
|
import subprocess
|
|
import srt
|
|
import json
|
|
import datetime
|
|
|
|
SetLogLevel(-1)
|
|
|
|
sample_rate=16000
|
|
model = Model(lang="en-us")
|
|
rec = KaldiRecognizer(model, sample_rate)
|
|
rec.SetWords(True)
|
|
|
|
process = subprocess.Popen(['ffmpeg', '-loglevel', 'quiet', '-i',
|
|
sys.argv[1],
|
|
'-ar', str(sample_rate) , '-ac', '1', '-f', 's16le', '-'],
|
|
stdout=subprocess.PIPE)
|
|
|
|
|
|
WORDS_PER_LINE = 7
|
|
|
|
def transcribe():
|
|
results = []
|
|
subs = []
|
|
while True:
|
|
data = process.stdout.read(4000)
|
|
if len(data) == 0:
|
|
break
|
|
if rec.AcceptWaveform(data):
|
|
results.append(rec.Result())
|
|
results.append(rec.FinalResult())
|
|
|
|
for i, res in enumerate(results):
|
|
jres = json.loads(res)
|
|
if not 'result' in jres:
|
|
continue
|
|
words = jres['result']
|
|
for j in range(0, len(words), WORDS_PER_LINE):
|
|
line = words[j : j + WORDS_PER_LINE]
|
|
s = srt.Subtitle(index=len(subs),
|
|
content=" ".join([l['word'] for l in line]),
|
|
start=datetime.timedelta(seconds=line[0]['start']),
|
|
end=datetime.timedelta(seconds=line[-1]['end']))
|
|
subs.append(s)
|
|
return subs
|
|
|
|
print (srt.compose(transcribe()))
|