web2py.transcription/modules/transcription_tools/__init__.py

129 lines
3.6 KiB
Python
Raw Normal View History

2021-10-08 15:26:30 +02:00
# REQUIREMENTS
#
# module srt
# module vosk
# language model
#
# INSTALL
#
# cd web2py/applications/transcription
# pip3 install -t modules srt
# pip3 install -t modules vosk
# cd private
# wget https://alphacephei.com/vosk/models/vosk-model-de-0.21.zip
# unzip vosk-model-de-0.21.zip
2021-10-08 15:18:30 +02:00
from vosk import Model, KaldiRecognizer, SetLogLevel
from webvtt import WebVTT, Caption
2021-10-08 15:18:30 +02:00
import sys
import os
import wave
import subprocess
import srt
import json
import datetime
import textwrap
2021-10-08 15:18:30 +02:00
def vtt_single_line(model_path, media_path):
2021-10-08 15:18:30 +02:00
sample_rate = 16000
2021-10-08 15:39:51 +02:00
model = Model(model_path)
2021-10-08 15:18:30 +02:00
rec = KaldiRecognizer(model, sample_rate)
rec.SetWords(True)
# 16bit mono with ffmpeg
2021-10-08 15:18:30 +02:00
process = subprocess.Popen(['ffmpeg', '-loglevel', 'quiet', '-i',
media_path,
'-ar', str(sample_rate),
'-ac', '1', '-f', 's16le', '-'],
2021-10-08 15:18:30 +02:00
stdout=subprocess.PIPE)
WORDS_PER_LINE = 7
def transcribe():
results = []
subs = []
while True:
data = process.stdout.read(4000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
results.append(rec.Result())
results.append(rec.FinalResult())
for i, res in enumerate(results):
jres = json.loads(res)
if not 'result' in jres:
continue
words = jres['result']
for j in range(0, len(words), WORDS_PER_LINE):
line = words[j: j + WORDS_PER_LINE]
s = srt.Subtitle(
index=len(subs),
2021-10-08 15:18:30 +02:00
content=" ".join([l['word'] for l in line]),
start=datetime.timedelta(seconds=line[0]['start']),
end=datetime.timedelta(seconds=line[-1]['end'])
)
2021-10-08 15:18:30 +02:00
subs.append(s)
return subs
srt_str = srt.compose(transcribe()) # create srt string
2021-10-08 15:18:30 +02:00
# webvtt from srt with ffmepg
process1 = subprocess.Popen(
['ffmpeg', '-loglevel', 'quiet', '-i', '-', '-f', 'webvtt', '-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
webvtt = process1.communicate(input=bytes(srt_str, 'utf-8'))[0]
return (webvtt)
def vtt(model_path, media_path):
sample_rate = 16000
model = Model(model_path)
rec = KaldiRecognizer(model, sample_rate)
rec.SetWords(True)
WORDS_PER_LINE = 7
def timeString(seconds):
minutes = seconds / 60
seconds = seconds % 60
hours = int(minutes / 60)
minutes = int(minutes % 60)
return '%i:%02i:%06.3f' % (hours, minutes, seconds)
def transcribe():
command = ['ffmpeg', '-nostdin', '-loglevel', 'quiet', '-i', media_path,
'-ar', str(sample_rate), '-ac', '1', '-f', 's16le', '-']
process = subprocess.Popen(command, stdout=subprocess.PIPE)
results = []
while True:
data = process.stdout.read(4000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
results.append(rec.Result())
results.append(rec.FinalResult())
vtt = WebVTT()
for i, res in enumerate(results):
words = json.loads(res).get('result')
if not words:
continue
start = timeString(words[0]['start'])
end = timeString(words[-1]['end'])
content = ' '.join([w['word'] for w in words])
caption = Caption(start, end, textwrap.fill(content))
vtt.captions.append(caption)
return(vtt.content)
return(transcribe())