2022-05-29 01:10:19 +00:00
|
|
|
import os
|
2022-01-28 06:19:29 +00:00
|
|
|
import re
|
2023-03-16 14:41:04 +00:00
|
|
|
import json
|
2022-01-28 06:19:29 +00:00
|
|
|
|
|
|
|
import inflect
|
|
|
|
import torch
|
|
|
|
from tokenizers import Tokenizer
|
|
|
|
|
|
|
|
|
|
|
|
# Regular expression matching whitespace:
|
|
|
|
from unidecode import unidecode
|
|
|
|
|
|
|
|
_whitespace_re = re.compile(r'\s+')
|
|
|
|
|
|
|
|
|
|
|
|
# List of (regular expression, replacement) pairs for abbreviations:
|
|
|
|
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
|
|
|
|
('mrs', 'misess'),
|
|
|
|
('mr', 'mister'),
|
|
|
|
('dr', 'doctor'),
|
|
|
|
('st', 'saint'),
|
|
|
|
('co', 'company'),
|
|
|
|
('jr', 'junior'),
|
|
|
|
('maj', 'major'),
|
|
|
|
('gen', 'general'),
|
|
|
|
('drs', 'doctors'),
|
|
|
|
('rev', 'reverend'),
|
|
|
|
('lt', 'lieutenant'),
|
|
|
|
('hon', 'honorable'),
|
|
|
|
('sgt', 'sergeant'),
|
|
|
|
('capt', 'captain'),
|
|
|
|
('esq', 'esquire'),
|
|
|
|
('ltd', 'limited'),
|
|
|
|
('col', 'colonel'),
|
|
|
|
('ft', 'fort'),
|
|
|
|
]]
|
|
|
|
|
|
|
|
|
|
|
|
def expand_abbreviations(text):
|
|
|
|
for regex, replacement in _abbreviations:
|
|
|
|
text = re.sub(regex, replacement, text)
|
|
|
|
return text
|
|
|
|
|
|
|
|
|
|
|
|
_inflect = inflect.engine()
|
|
|
|
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
|
|
|
|
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
|
|
|
|
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
|
|
|
|
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
|
|
|
|
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
|
|
|
|
_number_re = re.compile(r'[0-9]+')
|
|
|
|
|
|
|
|
|
|
|
|
def _remove_commas(m):
|
|
|
|
return m.group(1).replace(',', '')
|
|
|
|
|
|
|
|
|
|
|
|
def _expand_decimal_point(m):
|
|
|
|
return m.group(1).replace('.', ' point ')
|
|
|
|
|
|
|
|
|
|
|
|
def _expand_dollars(m):
|
|
|
|
match = m.group(1)
|
|
|
|
parts = match.split('.')
|
|
|
|
if len(parts) > 2:
|
|
|
|
return match + ' dollars' # Unexpected format
|
|
|
|
dollars = int(parts[0]) if parts[0] else 0
|
|
|
|
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
|
|
|
|
if dollars and cents:
|
|
|
|
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
|
|
|
|
cent_unit = 'cent' if cents == 1 else 'cents'
|
|
|
|
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
|
|
|
|
elif dollars:
|
|
|
|
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
|
|
|
|
return '%s %s' % (dollars, dollar_unit)
|
|
|
|
elif cents:
|
|
|
|
cent_unit = 'cent' if cents == 1 else 'cents'
|
|
|
|
return '%s %s' % (cents, cent_unit)
|
|
|
|
else:
|
|
|
|
return 'zero dollars'
|
|
|
|
|
|
|
|
|
|
|
|
def _expand_ordinal(m):
|
|
|
|
return _inflect.number_to_words(m.group(0))
|
|
|
|
|
|
|
|
|
|
|
|
def _expand_number(m):
|
|
|
|
num = int(m.group(0))
|
|
|
|
if num > 1000 and num < 3000:
|
|
|
|
if num == 2000:
|
|
|
|
return 'two thousand'
|
|
|
|
elif num > 2000 and num < 2010:
|
|
|
|
return 'two thousand ' + _inflect.number_to_words(num % 100)
|
|
|
|
elif num % 100 == 0:
|
|
|
|
return _inflect.number_to_words(num // 100) + ' hundred'
|
|
|
|
else:
|
|
|
|
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
|
|
|
|
else:
|
|
|
|
return _inflect.number_to_words(num, andword='')
|
|
|
|
|
|
|
|
|
|
|
|
def normalize_numbers(text):
|
|
|
|
text = re.sub(_comma_number_re, _remove_commas, text)
|
|
|
|
text = re.sub(_pounds_re, r'\1 pounds', text)
|
|
|
|
text = re.sub(_dollars_re, _expand_dollars, text)
|
|
|
|
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
|
|
|
|
text = re.sub(_ordinal_re, _expand_ordinal, text)
|
|
|
|
text = re.sub(_number_re, _expand_number, text)
|
|
|
|
return text
|
|
|
|
|
|
|
|
|
|
|
|
def expand_numbers(text):
|
|
|
|
return normalize_numbers(text)
|
|
|
|
|
|
|
|
|
|
|
|
def lowercase(text):
|
|
|
|
return text.lower()
|
|
|
|
|
|
|
|
|
|
|
|
def collapse_whitespace(text):
|
|
|
|
return re.sub(_whitespace_re, ' ', text)
|
|
|
|
|
|
|
|
|
|
|
|
def convert_to_ascii(text):
|
|
|
|
return unidecode(text)
|
|
|
|
|
|
|
|
|
|
|
|
def basic_cleaners(text):
|
|
|
|
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
|
|
|
|
text = lowercase(text)
|
|
|
|
text = collapse_whitespace(text)
|
|
|
|
return text
|
|
|
|
|
|
|
|
|
|
|
|
def transliteration_cleaners(text):
|
|
|
|
'''Pipeline for non-English text that transliterates to ASCII.'''
|
|
|
|
text = convert_to_ascii(text)
|
|
|
|
text = lowercase(text)
|
|
|
|
text = collapse_whitespace(text)
|
|
|
|
return text
|
|
|
|
|
|
|
|
|
|
|
|
def english_cleaners(text):
|
|
|
|
'''Pipeline for English text, including number and abbreviation expansion.'''
|
|
|
|
text = convert_to_ascii(text)
|
|
|
|
text = lowercase(text)
|
|
|
|
text = expand_numbers(text)
|
|
|
|
text = expand_abbreviations(text)
|
|
|
|
text = collapse_whitespace(text)
|
|
|
|
text = text.replace('"', '')
|
|
|
|
return text
|
|
|
|
|
2022-05-13 16:30:02 +00:00
|
|
|
|
2022-03-27 03:32:12 +00:00
|
|
|
def lev_distance(s1, s2):
|
|
|
|
if len(s1) > len(s2):
|
|
|
|
s1, s2 = s2, s1
|
|
|
|
|
|
|
|
distances = range(len(s1) + 1)
|
|
|
|
for i2, c2 in enumerate(s2):
|
|
|
|
distances_ = [i2 + 1]
|
|
|
|
for i1, c1 in enumerate(s1):
|
|
|
|
if c1 == c2:
|
|
|
|
distances_.append(distances[i1])
|
|
|
|
else:
|
|
|
|
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
|
|
|
|
distances = distances_
|
|
|
|
return distances[-1]
|
2022-01-28 06:19:29 +00:00
|
|
|
|
2022-05-13 16:30:02 +00:00
|
|
|
|
2022-05-29 01:10:19 +00:00
|
|
|
DEFAULT_VOCAB_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/tokenizer.json')
|
|
|
|
|
|
|
|
|
2022-01-28 06:19:29 +00:00
|
|
|
class VoiceBpeTokenizer:
|
2023-03-16 04:33:03 +00:00
|
|
|
def __init__(self, vocab_file=DEFAULT_VOCAB_FILE, preprocess=None):
|
2023-03-17 20:03:02 +00:00
|
|
|
with open(vocab_file, 'r', encoding='utf-8') as f:
|
|
|
|
vocab = json.load(f)
|
|
|
|
|
|
|
|
self.language = vocab['model']['language'] if 'language' in vocab['model'] else None
|
|
|
|
|
2023-03-16 04:33:03 +00:00
|
|
|
if preprocess is None:
|
2023-03-17 20:03:02 +00:00
|
|
|
self.preprocess = 'pre_tokenizer' in vocab and vocab['pre_tokenizer']
|
2023-03-16 04:33:03 +00:00
|
|
|
else:
|
|
|
|
self.preprocess = preprocess
|
2022-01-28 06:19:29 +00:00
|
|
|
if vocab_file is not None:
|
|
|
|
self.tokenizer = Tokenizer.from_file(vocab_file)
|
|
|
|
|
|
|
|
def preprocess_text(self, txt):
|
2023-03-17 20:03:02 +00:00
|
|
|
if self.language == 'ja':
|
|
|
|
import pykakasi
|
|
|
|
|
|
|
|
kks = pykakasi.kakasi()
|
|
|
|
results = kks.convert(txt)
|
|
|
|
words = []
|
|
|
|
|
|
|
|
for result in results:
|
|
|
|
words.append(result['kana'])
|
|
|
|
|
|
|
|
txt = " ".join(words)
|
|
|
|
txt = basic_cleaners(txt)
|
|
|
|
else:
|
|
|
|
txt = english_cleaners(txt)
|
2022-01-28 06:19:29 +00:00
|
|
|
return txt
|
|
|
|
|
|
|
|
def encode(self, txt):
|
2023-03-16 04:33:03 +00:00
|
|
|
if self.preprocess:
|
|
|
|
txt = self.preprocess_text(txt)
|
2022-01-28 06:19:29 +00:00
|
|
|
txt = txt.replace(' ', '[SPACE]')
|
|
|
|
return self.tokenizer.encode(txt).ids
|
|
|
|
|
|
|
|
def decode(self, seq):
|
|
|
|
if isinstance(seq, torch.Tensor):
|
|
|
|
seq = seq.cpu().numpy()
|
|
|
|
txt = self.tokenizer.decode(seq, skip_special_tokens=False).replace(' ', '')
|
|
|
|
txt = txt.replace('[SPACE]', ' ')
|
|
|
|
txt = txt.replace('[STOP]', '')
|
|
|
|
txt = txt.replace('[UNK]', '')
|
|
|
|
return txt
|