[fast_inference] 回退策略,减少padding影响,开放选项,同步代码 (#986)
* Update README * Optimize-English-G2P * docs: change akward expression * docs: update Changelog_KO.md * Fix CN punc in EN,add 's match * Adjust normalize and g2p logic * Update zh_CN.json * Update README (#827) Update README.md Update some outdated file paths and commands * 修复英文多音字,调整字典热加载,新增姓名匹配 (#869) * Fix homograph dict * Add JSON in dict * Adjust hot dict to hot reload * Add English name dict * Adjust get name dict logic * Make API Great Again (#894) * Add zh/jp/en mix * Optimize code readability and formatted output. * Try OGG streaming * Add stream mode arg * Add media type arg * Add cut punc arg * Eliminate punc risk * Update README (#895) * Update README * Update README * update README * update README * fix typo s/Licence /License (#904) * fix reformat cmd (#917) Co-authored-by: starylan <starylan@outlook.com> * Update README.md * Normalize chinese arithmetic operations (#947) * 改变训练和推理时的mask策略,以修复当batch_size>1时,产生的复读现象 * 同步main分支代码,增加“保持随机”选项 * 在colab中运行colab_webui.ipynb发生的uvr5模型缺失问题 (#968) 在colab中使用git下载uvr5模型时报错: fatal: destination path 'uvr5_weights' already exists and is not an empty directory. 通过在下载前将原本从本仓库下载的uvr5_weights文件夹删除可以解决问题。 * [ASR] 修复FasterWhisper遍历输入路径失败 (#956) * remove glob * rename * reset mirror pos * 回退mask策略; 回退pad策略; 在T2SBlock中添加padding_mask,以减少pad的影响; 开放repetition_penalty参数,让用户自行调整重复惩罚的强度; 增加parallel_infer参数,用于开启或关闭并行推理,关闭时与0307版本保持一致; 在webui中增加“保持随机”选项; 同步main分支代码。 * 删除无用注释 --------- Co-authored-by: Lion <drain.daters.0p@icloud.com> Co-authored-by: RVC-Boss <129054828+RVC-Boss@users.noreply.github.com> Co-authored-by: KamioRinn <snowsdream@live.com> Co-authored-by: Pengoose <pengoose_dev@naver.com> Co-authored-by: Yuan-Man <68322456+Yuan-ManX@users.noreply.github.com> Co-authored-by: XXXXRT666 <157766680+XXXXRT666@users.noreply.github.com> Co-authored-by: KamioRinn <63162909+KamioRinn@users.noreply.github.com> Co-authored-by: Lion-Wu <130235128+Lion-Wu@users.noreply.github.com> Co-authored-by: digger yu <digger-yu@outlook.com> Co-authored-by: SapphireLab <36986837+SapphireLab@users.noreply.github.com> Co-authored-by: starylan <starylan@outlook.com> Co-authored-by: shadow01a <141255649+shadow01a@users.noreply.github.com>
This commit is contained in:
232173
GPT_SoVITS/text/cmudict.rep
232173
GPT_SoVITS/text/cmudict.rep
File diff suppressed because it is too large
Load Diff
@@ -1 +1,2 @@
|
||||
CHATGPT CH AE1 T JH IY1 P IY1 T IY1
|
||||
CHATGPT CH AE1 T JH IY1 P IY1 T IY1
|
||||
JSON JH EY1 S AH0 N
|
||||
Binary file not shown.
@@ -1,18 +1,26 @@
|
||||
import pickle
|
||||
import os
|
||||
import re
|
||||
import wordsegment
|
||||
from g2p_en import G2p
|
||||
|
||||
from string import punctuation
|
||||
|
||||
from text import symbols
|
||||
|
||||
import unicodedata
|
||||
from builtins import str as unicode
|
||||
from g2p_en.expand import normalize_numbers
|
||||
from nltk.tokenize import TweetTokenizer
|
||||
word_tokenize = TweetTokenizer().tokenize
|
||||
from nltk import pos_tag
|
||||
|
||||
current_file_path = os.path.dirname(__file__)
|
||||
CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep")
|
||||
CMU_DICT_FAST_PATH = os.path.join(current_file_path, "cmudict-fast.rep")
|
||||
CMU_DICT_HOT_PATH = os.path.join(current_file_path, "engdict-hot.rep")
|
||||
CACHE_PATH = os.path.join(current_file_path, "engdict_cache.pickle")
|
||||
_g2p = G2p()
|
||||
NAMECACHE_PATH = os.path.join(current_file_path, "namedict_cache.pickle")
|
||||
|
||||
arpa = {
|
||||
"AH0",
|
||||
@@ -90,7 +98,7 @@ arpa = {
|
||||
|
||||
|
||||
def replace_phs(phs):
|
||||
rep_map = {";": ",", ":": ",", "'": "-", '"': "-"}
|
||||
rep_map = {"'": "-"}
|
||||
phs_new = []
|
||||
for ph in phs:
|
||||
if ph in symbols:
|
||||
@@ -112,7 +120,7 @@ def read_dict():
|
||||
if line_index >= start_line:
|
||||
line = line.strip()
|
||||
word_split = line.split(" ")
|
||||
word = word_split[0]
|
||||
word = word_split[0].lower()
|
||||
|
||||
syllable_split = word_split[1].split(" - ")
|
||||
g2p_dict[word] = []
|
||||
@@ -132,16 +140,11 @@ def read_dict_new():
|
||||
line = f.readline()
|
||||
line_index = 1
|
||||
while line:
|
||||
if line_index >= 49:
|
||||
if line_index >= 57:
|
||||
line = line.strip()
|
||||
word_split = line.split(" ")
|
||||
word = word_split[0]
|
||||
|
||||
syllable_split = word_split[1].split(" - ")
|
||||
g2p_dict[word] = []
|
||||
for syllable in syllable_split:
|
||||
phone_split = syllable.split(" ")
|
||||
g2p_dict[word].append(phone_split)
|
||||
word = word_split[0].lower()
|
||||
g2p_dict[word] = [word_split[1].split(" ")]
|
||||
|
||||
line_index = line_index + 1
|
||||
line = f.readline()
|
||||
@@ -153,14 +156,16 @@ def read_dict_new():
|
||||
if line_index >= 0:
|
||||
line = line.strip()
|
||||
word_split = line.split(" ")
|
||||
word = word_split[0]
|
||||
word = word_split[0].lower()
|
||||
if word not in g2p_dict:
|
||||
g2p_dict[word] = []
|
||||
g2p_dict[word].append(word_split[1:])
|
||||
g2p_dict[word] = [word_split[1:]]
|
||||
|
||||
line_index = line_index + 1
|
||||
line = f.readline()
|
||||
|
||||
return g2p_dict
|
||||
|
||||
def hot_reload_hot(g2p_dict):
|
||||
with open(CMU_DICT_HOT_PATH) as f:
|
||||
line = f.readline()
|
||||
line_index = 1
|
||||
@@ -168,14 +173,13 @@ def read_dict_new():
|
||||
if line_index >= 0:
|
||||
line = line.strip()
|
||||
word_split = line.split(" ")
|
||||
word = word_split[0]
|
||||
#if word not in g2p_dict:
|
||||
g2p_dict[word] = []
|
||||
g2p_dict[word].append(word_split[1:])
|
||||
word = word_split[0].lower()
|
||||
# 自定义发音词直接覆盖字典
|
||||
g2p_dict[word] = [word_split[1:]]
|
||||
|
||||
line_index = line_index + 1
|
||||
line = f.readline()
|
||||
|
||||
|
||||
return g2p_dict
|
||||
|
||||
|
||||
@@ -192,43 +196,167 @@ def get_dict():
|
||||
g2p_dict = read_dict_new()
|
||||
cache_dict(g2p_dict, CACHE_PATH)
|
||||
|
||||
g2p_dict = hot_reload_hot(g2p_dict)
|
||||
|
||||
return g2p_dict
|
||||
|
||||
|
||||
eng_dict = get_dict()
|
||||
def get_namedict():
|
||||
if os.path.exists(NAMECACHE_PATH):
|
||||
with open(NAMECACHE_PATH, "rb") as pickle_file:
|
||||
name_dict = pickle.load(pickle_file)
|
||||
else:
|
||||
name_dict = {}
|
||||
|
||||
return name_dict
|
||||
|
||||
|
||||
def text_normalize(text):
|
||||
# todo: eng text normalize
|
||||
return text.replace(";", ",")
|
||||
# 适配中文及 g2p_en 标点
|
||||
rep_map = {
|
||||
"[;::,;]": ",",
|
||||
'["’]': "'",
|
||||
"。": ".",
|
||||
"!": "!",
|
||||
"?": "?",
|
||||
}
|
||||
for p, r in rep_map.items():
|
||||
text = re.sub(p, r, text)
|
||||
|
||||
# 来自 g2p_en 文本格式化处理
|
||||
# 增加大写兼容
|
||||
text = unicode(text)
|
||||
text = normalize_numbers(text)
|
||||
text = ''.join(char for char in unicodedata.normalize('NFD', text)
|
||||
if unicodedata.category(char) != 'Mn') # Strip accents
|
||||
text = re.sub("[^ A-Za-z'.,?!\-]", "", text)
|
||||
text = re.sub(r"(?i)i\.e\.", "that is", text)
|
||||
text = re.sub(r"(?i)e\.g\.", "for example", text)
|
||||
|
||||
return text
|
||||
|
||||
|
||||
class en_G2p(G2p):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# 分词初始化
|
||||
wordsegment.load()
|
||||
|
||||
# 扩展过时字典, 添加姓名字典
|
||||
self.cmu = get_dict()
|
||||
self.namedict = get_namedict()
|
||||
|
||||
# 剔除读音错误的几个缩写
|
||||
for word in ["AE", "AI", "AR", "IOS", "HUD", "OS"]:
|
||||
del self.cmu[word.lower()]
|
||||
|
||||
# 修正多音字
|
||||
self.homograph2features["read"] = (['R', 'IY1', 'D'], ['R', 'EH1', 'D'], 'VBP')
|
||||
self.homograph2features["complex"] = (['K', 'AH0', 'M', 'P', 'L', 'EH1', 'K', 'S'], ['K', 'AA1', 'M', 'P', 'L', 'EH0', 'K', 'S'], 'JJ')
|
||||
|
||||
|
||||
def __call__(self, text):
|
||||
# tokenization
|
||||
words = word_tokenize(text)
|
||||
tokens = pos_tag(words) # tuples of (word, tag)
|
||||
|
||||
# steps
|
||||
prons = []
|
||||
for o_word, pos in tokens:
|
||||
# 还原 g2p_en 小写操作逻辑
|
||||
word = o_word.lower()
|
||||
|
||||
if re.search("[a-z]", word) is None:
|
||||
pron = [word]
|
||||
# 先把单字母推出去
|
||||
elif len(word) == 1:
|
||||
# 单读 A 发音修正, 这里需要原格式 o_word 判断大写
|
||||
if o_word == "A":
|
||||
pron = ['EY1']
|
||||
else:
|
||||
pron = self.cmu[word][0]
|
||||
# g2p_en 原版多音字处理
|
||||
elif word in self.homograph2features: # Check homograph
|
||||
pron1, pron2, pos1 = self.homograph2features[word]
|
||||
if pos.startswith(pos1):
|
||||
pron = pron1
|
||||
# pos1比pos长仅出现在read
|
||||
elif len(pos) < len(pos1) and pos == pos1[:len(pos)]:
|
||||
pron = pron1
|
||||
else:
|
||||
pron = pron2
|
||||
else:
|
||||
# 递归查找预测
|
||||
pron = self.qryword(o_word)
|
||||
|
||||
prons.extend(pron)
|
||||
prons.extend([" "])
|
||||
|
||||
return prons[:-1]
|
||||
|
||||
|
||||
def qryword(self, o_word):
|
||||
word = o_word.lower()
|
||||
|
||||
# 查字典, 单字母除外
|
||||
if len(word) > 1 and word in self.cmu: # lookup CMU dict
|
||||
return self.cmu[word][0]
|
||||
|
||||
# 单词仅首字母大写时查找姓名字典
|
||||
if o_word.istitle() and word in self.namedict:
|
||||
return self.namedict[word][0]
|
||||
|
||||
# oov 长度小于等于 3 直接读字母
|
||||
if len(word) <= 3:
|
||||
phones = []
|
||||
for w in word:
|
||||
# 单读 A 发音修正, 此处不存在大写的情况
|
||||
if w == "a":
|
||||
phones.extend(['EY1'])
|
||||
else:
|
||||
phones.extend(self.cmu[w][0])
|
||||
return phones
|
||||
|
||||
# 尝试分离所有格
|
||||
if re.match(r"^([a-z]+)('s)$", word):
|
||||
phones = self.qryword(word[:-2])
|
||||
# P T K F TH HH 无声辅音结尾 's 发 ['S']
|
||||
if phones[-1] in ['P', 'T', 'K', 'F', 'TH', 'HH']:
|
||||
phones.extend(['S'])
|
||||
# S Z SH ZH CH JH 擦声结尾 's 发 ['IH1', 'Z'] 或 ['AH0', 'Z']
|
||||
elif phones[-1] in ['S', 'Z', 'SH', 'ZH', 'CH', 'JH']:
|
||||
phones.extend(['AH0', 'Z'])
|
||||
# B D G DH V M N NG L R W Y 有声辅音结尾 's 发 ['Z']
|
||||
# AH0 AH1 AH2 EY0 EY1 EY2 AE0 AE1 AE2 EH0 EH1 EH2 OW0 OW1 OW2 UH0 UH1 UH2 IY0 IY1 IY2 AA0 AA1 AA2 AO0 AO1 AO2
|
||||
# ER ER0 ER1 ER2 UW0 UW1 UW2 AY0 AY1 AY2 AW0 AW1 AW2 OY0 OY1 OY2 IH IH0 IH1 IH2 元音结尾 's 发 ['Z']
|
||||
else:
|
||||
phones.extend(['Z'])
|
||||
return phones
|
||||
|
||||
# 尝试进行分词,应对复合词
|
||||
comps = wordsegment.segment(word.lower())
|
||||
|
||||
# 无法分词的送回去预测
|
||||
if len(comps)==1:
|
||||
return self.predict(word)
|
||||
|
||||
# 可以分词的递归处理
|
||||
return [phone for comp in comps for phone in self.qryword(comp)]
|
||||
|
||||
|
||||
_g2p = en_G2p()
|
||||
|
||||
|
||||
def g2p(text):
|
||||
phones = []
|
||||
words = re.split(r"([,;.\-\?\!\s+])", text)
|
||||
for w in words:
|
||||
if w.upper() in eng_dict:
|
||||
phns = eng_dict[w.upper()]
|
||||
for ph in phns:
|
||||
phones += ph
|
||||
else:
|
||||
phone_list = list(filter(lambda p: p != " ", _g2p(w)))
|
||||
for ph in phone_list:
|
||||
if ph in arpa:
|
||||
phones.append(ph)
|
||||
else:
|
||||
phones.append(ph)
|
||||
# g2p_en 整段推理,剔除不存在的arpa返回
|
||||
phone_list = _g2p(text)
|
||||
phones = [ph if ph != "<unk>" else "UNK" for ph in phone_list if ph not in [" ", "<pad>", "UW", "</s>", "<s>"]]
|
||||
|
||||
return replace_phs(phones)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# print(get_dict())
|
||||
print(g2p("hello"))
|
||||
print(g2p("In this; paper, we propose 1 DSPGAN, a GAN-based universal vocoder."))
|
||||
# all_phones = set()
|
||||
# for k, syllables in eng_dict.items():
|
||||
# for group in syllables:
|
||||
# for ph in group:
|
||||
# all_phones.add(ph)
|
||||
# print(all_phones)
|
||||
print(g2p(text_normalize("e.g. I used openai's AI tool to draw a picture.")))
|
||||
print(g2p(text_normalize("In this; paper, we propose 1 DSPGAN, a GAN-based universal vocoder.")))
|
||||
BIN
GPT_SoVITS/text/namedict_cache.pickle
Normal file
BIN
GPT_SoVITS/text/namedict_cache.pickle
Normal file
Binary file not shown.
@@ -106,6 +106,29 @@ def replace_default_num(match):
|
||||
return verbalize_digit(number, alt_one=True)
|
||||
|
||||
|
||||
# 加减乘除
|
||||
RE_ASMD = re.compile(
|
||||
r'((-?)((\d+)(\.\d+)?)|(\.(\d+)))([\+\-\×÷=])((-?)((\d+)(\.\d+)?)|(\.(\d+)))')
|
||||
asmd_map = {
|
||||
'+': '加',
|
||||
'-': '减',
|
||||
'×': '乘',
|
||||
'÷': '除',
|
||||
'=': '等于'
|
||||
}
|
||||
|
||||
|
||||
def replace_asmd(match) -> str:
|
||||
"""
|
||||
Args:
|
||||
match (re.Match)
|
||||
Returns:
|
||||
str
|
||||
"""
|
||||
result = match.group(1) + asmd_map[match.group(8)] + match.group(9)
|
||||
return result
|
||||
|
||||
|
||||
# 数字表达式
|
||||
# 纯小数
|
||||
RE_DECIMAL_NUM = re.compile(r'(-?)((\d+)(\.\d+))' r'|(\.(\d+))')
|
||||
@@ -155,7 +178,13 @@ def replace_number(match) -> str:
|
||||
# match.group(1) and match.group(8) are copy from RE_NUMBER
|
||||
|
||||
RE_RANGE = re.compile(
|
||||
r'((-?)((\d+)(\.\d+)?)|(\.(\d+)))[-~]((-?)((\d+)(\.\d+)?)|(\.(\d+)))')
|
||||
r"""
|
||||
(?<![\d\+\-\×÷=]) # 使用反向前瞻以确保数字范围之前没有其他数字和操作符
|
||||
((-?)((\d+)(\.\d+)?)) # 匹配范围起始的负数或正数(整数或小数)
|
||||
[-~] # 匹配范围分隔符
|
||||
((-?)((\d+)(\.\d+)?)) # 匹配范围结束的负数或正数(整数或小数)
|
||||
(?![\d\+\-\×÷=]) # 使用正向前瞻以确保数字范围之后没有其他数字和操作符
|
||||
""", re.VERBOSE)
|
||||
|
||||
|
||||
def replace_range(match) -> str:
|
||||
@@ -165,7 +194,7 @@ def replace_range(match) -> str:
|
||||
Returns:
|
||||
str
|
||||
"""
|
||||
first, second = match.group(1), match.group(8)
|
||||
first, second = match.group(1), match.group(6)
|
||||
first = RE_NUMBER.sub(replace_number, first)
|
||||
second = RE_NUMBER.sub(replace_number, second)
|
||||
result = f"{first}到{second}"
|
||||
|
||||
@@ -34,6 +34,7 @@ from .num import RE_PERCENTAGE
|
||||
from .num import RE_POSITIVE_QUANTIFIERS
|
||||
from .num import RE_RANGE
|
||||
from .num import RE_TO_RANGE
|
||||
from .num import RE_ASMD
|
||||
from .num import replace_default_num
|
||||
from .num import replace_frac
|
||||
from .num import replace_negative_num
|
||||
@@ -42,6 +43,7 @@ from .num import replace_percentage
|
||||
from .num import replace_positive_quantifier
|
||||
from .num import replace_range
|
||||
from .num import replace_to_range
|
||||
from .num import replace_asmd
|
||||
from .phonecode import RE_MOBILE_PHONE
|
||||
from .phonecode import RE_NATIONAL_UNIFORM_NUMBER
|
||||
from .phonecode import RE_TELEPHONE
|
||||
@@ -67,7 +69,7 @@ class TextNormalizer():
|
||||
if lang == "zh":
|
||||
text = text.replace(" ", "")
|
||||
# 过滤掉特殊字符
|
||||
text = re.sub(r'[——《》【】<=>{}()()#&@“”^_|\\]', '', text)
|
||||
text = re.sub(r'[——《》【】<>{}()()#&@“”^_|\\]', '', text)
|
||||
text = self.SENTENCE_SPLITOR.sub(r'\1\n', text)
|
||||
text = text.strip()
|
||||
sentences = [sentence.strip() for sentence in re.split(r'\n+', text)]
|
||||
@@ -142,6 +144,11 @@ class TextNormalizer():
|
||||
sentence = RE_NATIONAL_UNIFORM_NUMBER.sub(replace_phone, sentence)
|
||||
|
||||
sentence = RE_RANGE.sub(replace_range, sentence)
|
||||
|
||||
# 处理加减乘除
|
||||
while RE_ASMD.search(sentence):
|
||||
sentence = RE_ASMD.sub(replace_asmd, sentence)
|
||||
|
||||
sentence = RE_INTEGER.sub(replace_negative_num, sentence)
|
||||
sentence = RE_DECIMAL_NUM.sub(replace_number, sentence)
|
||||
sentence = RE_POSITIVE_QUANTIFIERS.sub(replace_positive_quantifier,
|
||||
|
||||
Reference in New Issue
Block a user