🚀 英文自動語音識別序列到序列模型
本模型支持輸出規範化文本、標記時間戳以及對多說話者進行分段,能夠高效且準確地處理英文語音識別任務。
🚀 快速開始
安裝依賴
代碼示例
from transformers import SpeechEncoderDecoderModel
from transformers import AutoFeatureExtractor, AutoTokenizer, GenerationConfig
import torchaudio
import torch
model_path = 'nguyenvulebinh/wav2vec2-bartpho'
model = SpeechEncoderDecoderModel.from_pretrained(model_path).eval()
feature_extractor = AutoFeatureExtractor.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path)
if torch.cuda.is_available():
model = model.cuda()
def decode_tokens(token_ids, skip_special_tokens=True, time_precision=0.02):
timestamp_begin = tokenizer.vocab_size
outputs = [[]]
for token in token_ids:
if token >= timestamp_begin:
timestamp = f" |{(token - timestamp_begin) * time_precision:.2f}| "
outputs.append(timestamp)
outputs.append([])
else:
outputs[-1].append(token)
outputs = [
s if isinstance(s, str) else tokenizer.decode(s, skip_special_tokens=skip_special_tokens) for s in outputs
]
return "".join(outputs).replace("< |", "<|").replace("| >", "|>")
def decode_wav(audio_wavs, asr_model, prefix=""):
device = next(asr_model.parameters()).device
input_values = feature_extractor.pad(
[{"input_values": feature} for feature in audio_wavs],
padding=True,
max_length=None,
pad_to_multiple_of=None,
return_tensors="pt",
)
output_beam_ids = asr_model.generate(
input_values['input_values'].to(device),
attention_mask=input_values['attention_mask'].to(device),
decoder_input_ids=tokenizer.batch_encode_plus([prefix] * len(audio_wavs), return_tensors="pt")['input_ids'][..., :-1].to(device),
generation_config=GenerationConfig(decoder_start_token_id=tokenizer.bos_token_id),
max_length=250,
num_beams=25,
no_repeat_ngram_size=4,
num_return_sequences=1,
early_stopping=True,
return_dict_in_generate=True,
output_scores=True,
)
output_text = [decode_tokens(sequence) for sequence in output_beam_ids.sequences]
return output_text
print(decode_wav([torchaudio.load('sample.wav')[0].squeeze()], model))
📄 許可證
本項目採用CC BY-NC 4.0許可證。
📚 引用說明
本倉庫使用了以下論文中的思想。如果使用此模型來幫助產生已發表的研究結果或將其集成到其他軟件中,請引用該論文。
@INPROCEEDINGS{10446589,
author={Nguyen, Thai-Binh and Waibel, Alexander},
booktitle={ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
title={Synthetic Conversations Improve Multi-Talker ASR},
year={2024},
volume={},
number={},
pages={10461-10465},
keywords={Systematics;Error analysis;Knowledge based systems;Oral communication;Signal processing;Data models;Acoustics;multi-talker;asr;synthetic conversation},
doi={10.1109/ICASSP48485.2024.10446589}
}
📞 聯繫方式
如果您有任何問題或建議,可以通過以下方式聯繫我們:
- 郵箱:nguyenvulebinh@gmail.com
- 關注Twitter:
