Compare commits
13 Commits
20240821v2
...
revert-166
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c441fac22f | ||
|
|
38cd881578 | ||
|
|
5efb960898 | ||
|
|
78c68d46cb | ||
|
|
192ea6f6c9 | ||
|
|
0c000191b3 | ||
|
|
570da092c9 | ||
|
|
40cd22e69d | ||
|
|
3488cffd68 | ||
|
|
d67bbd2166 | ||
|
|
f35f6e9b5e | ||
|
|
7dac47ca95 | ||
|
|
2a9512a63e |
@@ -854,6 +854,7 @@ class Text2SemanticDecoder(nn.Module):
|
||||
|
||||
if idx == 0:
|
||||
xy_attn_mask = None
|
||||
if(idx<11):###至少预测出10个token不然不给停止(0.4s)
|
||||
logits = logits[:, :-1]
|
||||
|
||||
samples = sample(
|
||||
|
||||
@@ -182,6 +182,11 @@ class TTS_Config:
|
||||
|
||||
|
||||
def _load_configs(self, configs_path: str)->dict:
|
||||
if os.path.exists(configs_path):
|
||||
...
|
||||
else:
|
||||
print(i18n("路径不存在,使用默认配置"))
|
||||
self.save_configs(configs_path)
|
||||
with open(configs_path, 'r') as f:
|
||||
configs = yaml.load(f, Loader=yaml.FullLoader)
|
||||
|
||||
@@ -208,6 +213,10 @@ class TTS_Config:
|
||||
"cnhuhbert_base_path": self.cnhuhbert_base_path,
|
||||
}
|
||||
return self.config
|
||||
|
||||
def update_version(self, version:str)->None:
|
||||
self.version = version
|
||||
self.languages = self.v2_languages if self.version=="v2" else self.v1_languages
|
||||
|
||||
def __str__(self):
|
||||
self.configs = self.update_configs()
|
||||
@@ -295,13 +304,14 @@ class TTS:
|
||||
def init_vits_weights(self, weights_path: str):
|
||||
print(f"Loading VITS weights from {weights_path}")
|
||||
self.configs.vits_weights_path = weights_path
|
||||
self.configs.save_configs()
|
||||
dict_s2 = torch.load(weights_path, map_location=self.configs.device)
|
||||
hps = dict_s2["config"]
|
||||
if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
|
||||
self.configs.version = "v1"
|
||||
self.configs.update_version("v1")
|
||||
else:
|
||||
self.configs.version = "v2"
|
||||
self.configs.update_version("v2")
|
||||
self.configs.save_configs()
|
||||
|
||||
hps["model"]["version"] = self.configs.version
|
||||
self.configs.filter_length = hps["data"]["filter_length"]
|
||||
self.configs.segment_size = hps["train"]["segment_size"]
|
||||
@@ -637,7 +647,7 @@ class TTS:
|
||||
"text": "", # str.(required) text to be synthesized
|
||||
"text_lang: "", # str.(required) language of the text to be synthesized
|
||||
"ref_audio_path": "", # str.(required) reference audio path
|
||||
"aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker synthesis
|
||||
"aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion
|
||||
"prompt_text": "", # str.(optional) prompt text for the reference audio
|
||||
"prompt_lang": "", # str.(required) language of the prompt text for the reference audio
|
||||
"top_k": 5, # int. top k sampling
|
||||
@@ -748,7 +758,8 @@ class TTS:
|
||||
phones, bert_features, norm_text = \
|
||||
self.text_preprocessor.segment_and_extract_feature_for_text(
|
||||
prompt_text,
|
||||
prompt_lang)
|
||||
prompt_lang,
|
||||
self.configs.version)
|
||||
self.prompt_cache["phones"] = phones
|
||||
self.prompt_cache["bert_features"] = bert_features
|
||||
self.prompt_cache["norm_text"] = norm_text
|
||||
@@ -760,7 +771,7 @@ class TTS:
|
||||
t1 = ttime()
|
||||
data:list = None
|
||||
if not return_fragment:
|
||||
data = self.text_preprocessor.preprocess(text, text_lang, text_split_method)
|
||||
data = self.text_preprocessor.preprocess(text, text_lang, text_split_method, self.configs.version)
|
||||
if len(data) == 0:
|
||||
yield self.configs.sampling_rate, np.zeros(int(self.configs.sampling_rate),
|
||||
dtype=np.int16)
|
||||
|
||||
@@ -55,9 +55,9 @@ class TextPreprocessor:
|
||||
self.tokenizer = tokenizer
|
||||
self.device = device
|
||||
|
||||
def preprocess(self, text:str, lang:str, text_split_method:str, version:str="v1")->List[Dict]:
|
||||
def preprocess(self, text:str, lang:str, text_split_method:str, version:str="v2")->List[Dict]:
|
||||
print(i18n("############ 切分文本 ############"))
|
||||
text = self.replace_consecutive_punctuation(text) # 变量命名应该是写错了
|
||||
text = self.replace_consecutive_punctuation(text)
|
||||
texts = self.pre_seg_text(text, lang, text_split_method)
|
||||
result = []
|
||||
print(i18n("############ 提取文本Bert特征 ############"))
|
||||
@@ -204,7 +204,7 @@ class TextPreprocessor:
|
||||
phone_level_feature = torch.cat(phone_level_feature, dim=0)
|
||||
return phone_level_feature.T
|
||||
|
||||
def clean_text_inf(self, text:str, language:str, version:str="v1"):
|
||||
def clean_text_inf(self, text:str, language:str, version:str="v2"):
|
||||
phones, word2ph, norm_text = clean_text(text, language, version)
|
||||
phones = cleaned_text_to_sequence(phones, version)
|
||||
return phones, word2ph, norm_text
|
||||
|
||||
1
GPT_SoVITS/configs/.gitignore
vendored
Normal file
1
GPT_SoVITS/configs/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.yaml
|
||||
737
GPT_SoVITS/export_torch_script.py
Normal file
737
GPT_SoVITS/export_torch_script.py
Normal file
@@ -0,0 +1,737 @@
|
||||
# modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py
|
||||
# reference: https://github.com/lifeiteng/vall-e
|
||||
from typing import Optional
|
||||
from my_utils import load_audio
|
||||
from text import cleaned_text_to_sequence
|
||||
import torch
|
||||
import torchaudio
|
||||
|
||||
from torch import IntTensor, LongTensor, Tensor, nn
|
||||
from torch.nn import functional as F
|
||||
|
||||
from transformers import AutoModelForMaskedLM, AutoTokenizer
|
||||
from feature_extractor import cnhubert
|
||||
|
||||
from AR.models.t2s_lightning_module import Text2SemanticLightningModule
|
||||
from module.models_onnx import SynthesizerTrn
|
||||
|
||||
|
||||
|
||||
import os
|
||||
import soundfile
|
||||
|
||||
default_config = {
|
||||
"embedding_dim": 512,
|
||||
"hidden_dim": 512,
|
||||
"num_head": 8,
|
||||
"num_layers": 12,
|
||||
"num_codebook": 8,
|
||||
"p_dropout": 0.0,
|
||||
"vocab_size": 1024 + 1,
|
||||
"phoneme_vocab_size": 512,
|
||||
"EOS": 1024,
|
||||
}
|
||||
|
||||
def get_raw_t2s_model(dict_s1) -> Text2SemanticLightningModule:
|
||||
config = dict_s1["config"]
|
||||
config["model"]["dropout"] = float(config["model"]["dropout"])
|
||||
t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
|
||||
t2s_model.load_state_dict(dict_s1["weight"])
|
||||
t2s_model = t2s_model.eval()
|
||||
return t2s_model
|
||||
|
||||
@torch.jit.script
|
||||
def logits_to_probs(
|
||||
logits,
|
||||
previous_tokens: Optional[torch.Tensor] = None,
|
||||
temperature: float = 1.0,
|
||||
top_k: Optional[int] = None,
|
||||
top_p: Optional[int] = None,
|
||||
repetition_penalty: float = 1.0,
|
||||
):
|
||||
# if previous_tokens is not None:
|
||||
# previous_tokens = previous_tokens.squeeze()
|
||||
# print(logits.shape,previous_tokens.shape)
|
||||
# pdb.set_trace()
|
||||
if previous_tokens is not None and repetition_penalty != 1.0:
|
||||
previous_tokens = previous_tokens.long()
|
||||
score = torch.gather(logits, dim=1, index=previous_tokens)
|
||||
score = torch.where(
|
||||
score < 0, score * repetition_penalty, score / repetition_penalty
|
||||
)
|
||||
logits.scatter_(dim=1, index=previous_tokens, src=score)
|
||||
|
||||
if top_p is not None and top_p < 1.0:
|
||||
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
||||
cum_probs = torch.cumsum(
|
||||
torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1
|
||||
)
|
||||
sorted_indices_to_remove = cum_probs > top_p
|
||||
sorted_indices_to_remove[:, 0] = False # keep at least one option
|
||||
indices_to_remove = sorted_indices_to_remove.scatter(
|
||||
dim=1, index=sorted_indices, src=sorted_indices_to_remove
|
||||
)
|
||||
logits = logits.masked_fill(indices_to_remove, -float("Inf"))
|
||||
|
||||
logits = logits / max(temperature, 1e-5)
|
||||
|
||||
if top_k is not None:
|
||||
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
|
||||
pivot = v[: , -1].unsqueeze(-1)
|
||||
logits = torch.where(logits < pivot, -float("Inf"), logits)
|
||||
|
||||
probs = torch.nn.functional.softmax(logits, dim=-1)
|
||||
return probs
|
||||
|
||||
@torch.jit.script
|
||||
def multinomial_sample_one_no_sync(probs_sort):
|
||||
# Does multinomial sampling without a cuda synchronization
|
||||
q = torch.randn_like(probs_sort)
|
||||
return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
|
||||
|
||||
@torch.jit.script
|
||||
def sample(
|
||||
logits,
|
||||
previous_tokens,
|
||||
temperature: float = 1.0,
|
||||
top_k: Optional[int] = None,
|
||||
top_p: Optional[int] = None,
|
||||
repetition_penalty: float = 1.0,
|
||||
):
|
||||
probs = logits_to_probs(
|
||||
logits=logits, previous_tokens=previous_tokens, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty
|
||||
)
|
||||
idx_next = multinomial_sample_one_no_sync(probs)
|
||||
return idx_next, probs
|
||||
|
||||
|
||||
@torch.jit.script
|
||||
def spectrogram_torch(y:Tensor, n_fft:int, sampling_rate:int, hop_size:int, win_size:int, center:bool=False):
|
||||
hann_window = torch.hann_window(win_size,device=y.device,dtype=y.dtype)
|
||||
y = torch.nn.functional.pad(
|
||||
y.unsqueeze(1),
|
||||
(int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
|
||||
mode="reflect",
|
||||
)
|
||||
y = y.squeeze(1)
|
||||
spec = torch.stft(
|
||||
y,
|
||||
n_fft,
|
||||
hop_length=hop_size,
|
||||
win_length=win_size,
|
||||
window=hann_window,
|
||||
center=center,
|
||||
pad_mode="reflect",
|
||||
normalized=False,
|
||||
onesided=True,
|
||||
return_complex=False,
|
||||
)
|
||||
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
||||
return spec
|
||||
|
||||
|
||||
class DictToAttrRecursive(dict):
|
||||
def __init__(self, input_dict):
|
||||
super().__init__(input_dict)
|
||||
for key, value in input_dict.items():
|
||||
if isinstance(value, dict):
|
||||
value = DictToAttrRecursive(value)
|
||||
self[key] = value
|
||||
setattr(self, key, value)
|
||||
|
||||
def __getattr__(self, item):
|
||||
try:
|
||||
return self[item]
|
||||
except KeyError:
|
||||
raise AttributeError(f"Attribute {item} not found")
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if isinstance(value, dict):
|
||||
value = DictToAttrRecursive(value)
|
||||
super(DictToAttrRecursive, self).__setitem__(key, value)
|
||||
super().__setattr__(key, value)
|
||||
|
||||
def __delattr__(self, item):
|
||||
try:
|
||||
del self[item]
|
||||
except KeyError:
|
||||
raise AttributeError(f"Attribute {item} not found")
|
||||
|
||||
@torch.jit.script
|
||||
class T2SMLP:
|
||||
def __init__(self, w1, b1, w2, b2):
|
||||
self.w1 = w1
|
||||
self.b1 = b1
|
||||
self.w2 = w2
|
||||
self.b2 = b2
|
||||
|
||||
def forward(self, x):
|
||||
x = F.relu(F.linear(x, self.w1, self.b1))
|
||||
x = F.linear(x, self.w2, self.b2)
|
||||
return x
|
||||
|
||||
@torch.jit.script
|
||||
class T2SBlock:
|
||||
def __init__(
|
||||
self,
|
||||
num_heads: int,
|
||||
hidden_dim: int,
|
||||
mlp: T2SMLP,
|
||||
qkv_w,
|
||||
qkv_b,
|
||||
out_w,
|
||||
out_b,
|
||||
norm_w1,
|
||||
norm_b1,
|
||||
norm_eps1: float,
|
||||
norm_w2,
|
||||
norm_b2,
|
||||
norm_eps2: float,
|
||||
):
|
||||
self.num_heads = num_heads
|
||||
self.mlp = mlp
|
||||
self.hidden_dim: int = hidden_dim
|
||||
self.qkv_w = qkv_w
|
||||
self.qkv_b = qkv_b
|
||||
self.out_w = out_w
|
||||
self.out_b = out_b
|
||||
self.norm_w1 = norm_w1
|
||||
self.norm_b1 = norm_b1
|
||||
self.norm_eps1 = norm_eps1
|
||||
self.norm_w2 = norm_w2
|
||||
self.norm_b2 = norm_b2
|
||||
self.norm_eps2 = norm_eps2
|
||||
|
||||
self.false = torch.tensor(False, dtype=torch.bool)
|
||||
|
||||
@torch.jit.ignore
|
||||
def to_mask(self, x:torch.Tensor, padding_mask:Optional[torch.Tensor]):
|
||||
if padding_mask is None:
|
||||
return x
|
||||
|
||||
if padding_mask.dtype == torch.bool:
|
||||
return x.masked_fill(padding_mask, 0)
|
||||
else:
|
||||
return x * padding_mask
|
||||
|
||||
def process_prompt(self, x:torch.Tensor, attn_mask : torch.Tensor, padding_mask:Optional[torch.Tensor]=None):
|
||||
q, k, v = F.linear(self.to_mask(x, padding_mask), self.qkv_w, self.qkv_b).chunk(3, dim=-1)
|
||||
|
||||
batch_size = q.shape[0]
|
||||
q_len = q.shape[1]
|
||||
kv_len = k.shape[1]
|
||||
|
||||
q = self.to_mask(q, padding_mask)
|
||||
k_cache = self.to_mask(k, padding_mask)
|
||||
v_cache = self.to_mask(v, padding_mask)
|
||||
|
||||
q = q.view(batch_size, q_len, self.num_heads, -1).transpose(1, 2)
|
||||
k = k_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
|
||||
v = v_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
|
||||
|
||||
attn = F.scaled_dot_product_attention(q, k, v, ~attn_mask)
|
||||
|
||||
attn = attn.permute(2, 0, 1, 3).reshape(batch_size*q_len, self.hidden_dim)
|
||||
attn = attn.view(q_len, batch_size, self.hidden_dim).transpose(1, 0)
|
||||
attn = F.linear(self.to_mask(attn, padding_mask), self.out_w, self.out_b)
|
||||
|
||||
if padding_mask is not None:
|
||||
for i in range(batch_size):
|
||||
# mask = padding_mask[i,:,0]
|
||||
if self.false.device!= padding_mask.device:
|
||||
self.false = self.false.to(padding_mask.device)
|
||||
idx = torch.where(padding_mask[i,:,0]==self.false)[0]
|
||||
x_item = x[i,idx,:].unsqueeze(0)
|
||||
attn_item = attn[i,idx,:].unsqueeze(0)
|
||||
x_item = x_item + attn_item
|
||||
x_item = F.layer_norm(
|
||||
x_item, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1
|
||||
)
|
||||
x_item = x_item + self.mlp.forward(x_item)
|
||||
x_item = F.layer_norm(
|
||||
x_item,
|
||||
[self.hidden_dim],
|
||||
self.norm_w2,
|
||||
self.norm_b2,
|
||||
self.norm_eps2,
|
||||
)
|
||||
x[i,idx,:] = x_item.squeeze(0)
|
||||
x = self.to_mask(x, padding_mask)
|
||||
else:
|
||||
x = x + attn
|
||||
x = F.layer_norm(
|
||||
x, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1
|
||||
)
|
||||
x = x + self.mlp.forward(x)
|
||||
x = F.layer_norm(
|
||||
x,
|
||||
[self.hidden_dim],
|
||||
self.norm_w2,
|
||||
self.norm_b2,
|
||||
self.norm_eps2,
|
||||
)
|
||||
return x, k_cache, v_cache
|
||||
|
||||
def decode_next_token(self, x:torch.Tensor, k_cache:torch.Tensor, v_cache:torch.Tensor):
|
||||
q, k, v = F.linear(x, self.qkv_w, self.qkv_b).chunk(3, dim=-1)
|
||||
|
||||
k_cache = torch.cat([k_cache, k], dim=1)
|
||||
v_cache = torch.cat([v_cache, v], dim=1)
|
||||
|
||||
batch_size = q.shape[0]
|
||||
q_len = q.shape[1]
|
||||
kv_len = k_cache.shape[1]
|
||||
|
||||
q = q.view(batch_size, q_len, self.num_heads, -1).transpose(1, 2)
|
||||
k = k_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
|
||||
v = v_cache.view(batch_size, kv_len, self.num_heads, -1).transpose(1, 2)
|
||||
|
||||
attn = F.scaled_dot_product_attention(q, k, v)
|
||||
|
||||
attn = attn.permute(2, 0, 1, 3).reshape(batch_size*q_len, self.hidden_dim)
|
||||
attn = attn.view(q_len, batch_size, self.hidden_dim).transpose(1, 0)
|
||||
attn = F.linear(attn, self.out_w, self.out_b)
|
||||
|
||||
x = x + attn
|
||||
x = F.layer_norm(
|
||||
x, [self.hidden_dim], self.norm_w1, self.norm_b1, self.norm_eps1
|
||||
)
|
||||
x = x + self.mlp.forward(x)
|
||||
x = F.layer_norm(
|
||||
x,
|
||||
[self.hidden_dim],
|
||||
self.norm_w2,
|
||||
self.norm_b2,
|
||||
self.norm_eps2,
|
||||
)
|
||||
return x, k_cache, v_cache
|
||||
|
||||
@torch.jit.script
|
||||
class T2STransformer:
|
||||
def __init__(self, num_blocks : int, blocks: list[T2SBlock]):
|
||||
self.num_blocks : int = num_blocks
|
||||
self.blocks = blocks
|
||||
|
||||
def process_prompt(
|
||||
self, x:torch.Tensor, attn_mask : torch.Tensor,padding_mask : Optional[torch.Tensor]=None):
|
||||
k_cache : list[torch.Tensor] = []
|
||||
v_cache : list[torch.Tensor] = []
|
||||
for i in range(self.num_blocks):
|
||||
x, k_cache_, v_cache_ = self.blocks[i].process_prompt(x, attn_mask, padding_mask)
|
||||
k_cache.append(k_cache_)
|
||||
v_cache.append(v_cache_)
|
||||
return x, k_cache, v_cache
|
||||
|
||||
def decode_next_token(
|
||||
self, x:torch.Tensor,
|
||||
k_cache: list[torch.Tensor],
|
||||
v_cache: list[torch.Tensor]):
|
||||
for i in range(self.num_blocks):
|
||||
x, k_cache[i], v_cache[i] = self.blocks[i].decode_next_token(x, k_cache[i], v_cache[i])
|
||||
return x, k_cache, v_cache
|
||||
|
||||
class VitsModel(nn.Module):
|
||||
def __init__(self, vits_path):
|
||||
super().__init__()
|
||||
dict_s2 = torch.load(vits_path,map_location="cpu")
|
||||
self.hps = dict_s2["config"]
|
||||
if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
|
||||
self.hps["model"]["version"] = "v1"
|
||||
else:
|
||||
self.hps["model"]["version"] = "v2"
|
||||
|
||||
self.hps = DictToAttrRecursive(self.hps)
|
||||
self.hps.model.semantic_frame_rate = "25hz"
|
||||
self.vq_model = SynthesizerTrn(
|
||||
self.hps.data.filter_length // 2 + 1,
|
||||
self.hps.train.segment_size // self.hps.data.hop_length,
|
||||
n_speakers=self.hps.data.n_speakers,
|
||||
**self.hps.model
|
||||
)
|
||||
self.vq_model.eval()
|
||||
self.vq_model.load_state_dict(dict_s2["weight"], strict=False)
|
||||
|
||||
def forward(self, text_seq, pred_semantic, ref_audio):
|
||||
refer = spectrogram_torch(
|
||||
ref_audio,
|
||||
self.hps.data.filter_length,
|
||||
self.hps.data.sampling_rate,
|
||||
self.hps.data.hop_length,
|
||||
self.hps.data.win_length,
|
||||
center=False
|
||||
)
|
||||
return self.vq_model(pred_semantic, text_seq, refer)[0, 0]
|
||||
|
||||
class T2SModel(nn.Module):
|
||||
def __init__(self,raw_t2s:Text2SemanticLightningModule):
|
||||
super(T2SModel, self).__init__()
|
||||
self.model_dim = raw_t2s.model.model_dim
|
||||
self.embedding_dim = raw_t2s.model.embedding_dim
|
||||
self.num_head = raw_t2s.model.num_head
|
||||
self.num_layers = raw_t2s.model.num_layers
|
||||
self.vocab_size = raw_t2s.model.vocab_size
|
||||
self.phoneme_vocab_size = raw_t2s.model.phoneme_vocab_size
|
||||
# self.p_dropout = float(raw_t2s.model.p_dropout)
|
||||
self.EOS:int = int(raw_t2s.model.EOS)
|
||||
self.norm_first = raw_t2s.model.norm_first
|
||||
assert self.EOS == self.vocab_size - 1
|
||||
self.hz = 50
|
||||
|
||||
self.bert_proj = raw_t2s.model.bert_proj
|
||||
self.ar_text_embedding = raw_t2s.model.ar_text_embedding
|
||||
self.ar_text_position = raw_t2s.model.ar_text_position
|
||||
self.ar_audio_embedding = raw_t2s.model.ar_audio_embedding
|
||||
self.ar_audio_position = raw_t2s.model.ar_audio_position
|
||||
|
||||
# self.t2s_transformer = T2STransformer(self.num_layers, blocks)
|
||||
# self.t2s_transformer = raw_t2s.model.t2s_transformer
|
||||
|
||||
blocks = []
|
||||
h = raw_t2s.model.h
|
||||
|
||||
for i in range(self.num_layers):
|
||||
layer = h.layers[i]
|
||||
t2smlp = T2SMLP(
|
||||
layer.linear1.weight,
|
||||
layer.linear1.bias,
|
||||
layer.linear2.weight,
|
||||
layer.linear2.bias
|
||||
)
|
||||
|
||||
block = T2SBlock(
|
||||
self.num_head,
|
||||
self.model_dim,
|
||||
t2smlp,
|
||||
layer.self_attn.in_proj_weight,
|
||||
layer.self_attn.in_proj_bias,
|
||||
layer.self_attn.out_proj.weight,
|
||||
layer.self_attn.out_proj.bias,
|
||||
layer.norm1.weight,
|
||||
layer.norm1.bias,
|
||||
layer.norm1.eps,
|
||||
layer.norm2.weight,
|
||||
layer.norm2.bias,
|
||||
layer.norm2.eps
|
||||
)
|
||||
|
||||
blocks.append(block)
|
||||
|
||||
self.t2s_transformer = T2STransformer(self.num_layers, blocks)
|
||||
|
||||
# self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False)
|
||||
self.ar_predict_layer = raw_t2s.model.ar_predict_layer
|
||||
# self.loss_fct = nn.CrossEntropyLoss(reduction="sum")
|
||||
self.max_sec = raw_t2s.config["data"]["max_sec"]
|
||||
self.top_k = int(raw_t2s.config["inference"]["top_k"])
|
||||
self.early_stop_num = torch.LongTensor([self.hz * self.max_sec])
|
||||
|
||||
def forward(self,prompts:LongTensor, ref_seq:LongTensor, text_seq:LongTensor, ref_bert:torch.Tensor, text_bert:torch.Tensor):
|
||||
bert = torch.cat([ref_bert.T, text_bert.T], 1)
|
||||
all_phoneme_ids = torch.cat([ref_seq, text_seq], 1)
|
||||
bert = bert.unsqueeze(0)
|
||||
|
||||
x = self.ar_text_embedding(all_phoneme_ids)
|
||||
x = x + self.bert_proj(bert.transpose(1, 2))
|
||||
x:torch.Tensor = self.ar_text_position(x)
|
||||
|
||||
early_stop_num = self.early_stop_num
|
||||
|
||||
|
||||
#[1,N,512] [1,N]
|
||||
# y, k, v, y_emb, x_example = self.first_stage_decoder(x, prompts)
|
||||
y = prompts
|
||||
# x_example = x[:,:,0] * 0.0
|
||||
|
||||
x_len = x.shape[1]
|
||||
x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
|
||||
|
||||
y_emb = self.ar_audio_embedding(y)
|
||||
y_len = y_emb.shape[1]
|
||||
prefix_len = y.shape[1]
|
||||
y_pos = self.ar_audio_position(y_emb)
|
||||
xy_pos = torch.concat([x, y_pos], dim=1)
|
||||
|
||||
bsz = x.shape[0]
|
||||
src_len = x_len + y_len
|
||||
x_attn_mask_pad = F.pad(
|
||||
x_attn_mask,
|
||||
(0, y_len), ###xx的纯0扩展到xx纯0+xy纯1,(x,x+y)
|
||||
value=True,
|
||||
)
|
||||
y_attn_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y)
|
||||
torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
|
||||
(x_len, 0),
|
||||
value=False,
|
||||
)
|
||||
xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0)\
|
||||
.unsqueeze(0)\
|
||||
.expand(bsz*self.num_head, -1, -1)\
|
||||
.view(bsz, self.num_head, src_len, src_len)\
|
||||
.to(device=x.device, dtype=torch.bool)
|
||||
|
||||
idx = 0
|
||||
|
||||
xy_dec, k_cache, v_cache = self.t2s_transformer.process_prompt(xy_pos, xy_attn_mask, None)
|
||||
|
||||
logits = self.ar_predict_layer(xy_dec[:, -1])
|
||||
logits = logits[:, :-1]
|
||||
samples = sample(logits, y, top_k=self.top_k, top_p=1, repetition_penalty=1.35, temperature=1.0)[0]
|
||||
y = torch.concat([y, samples], dim=1)
|
||||
y_emb = self.ar_audio_embedding(y[:, -1:])
|
||||
xy_pos = y_emb * self.ar_audio_position.x_scale + self.ar_audio_position.alpha * self.ar_audio_position.pe[:, y_len + idx].to(dtype=y_emb.dtype,device=y_emb.device)
|
||||
|
||||
stop = False
|
||||
# for idx in range(1, 50):
|
||||
for idx in range(1, 1500):
|
||||
#[1, N] [N_layer, N, 1, 512] [N_layer, N, 1, 512] [1, N, 512] [1] [1, N, 512] [1, N]
|
||||
# y, k, v, y_emb, logits, samples = self.stage_decoder(y, k, v, y_emb, x_example)
|
||||
xy_dec, k_cache, v_cache = self.t2s_transformer.decode_next_token(xy_pos, k_cache, v_cache)
|
||||
logits = self.ar_predict_layer(xy_dec[:, -1])
|
||||
|
||||
if(idx<11):###至少预测出10个token不然不给停止(0.4s)
|
||||
logits = logits[:, :-1]
|
||||
|
||||
samples = sample(logits, y, top_k=self.top_k, top_p=1, repetition_penalty=1.35, temperature=1.0)[0]
|
||||
|
||||
y = torch.concat([y, samples], dim=1)
|
||||
|
||||
if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
|
||||
stop = True
|
||||
if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
|
||||
stop = True
|
||||
if stop:
|
||||
if y.shape[1] == 0:
|
||||
y = torch.concat([y, torch.zeros_like(samples)], dim=1)
|
||||
break
|
||||
|
||||
y_emb = self.ar_audio_embedding(y[:, -1:])
|
||||
xy_pos = y_emb * self.ar_audio_position.x_scale + self.ar_audio_position.alpha * self.ar_audio_position.pe[:, y_len + idx].to(dtype=y_emb.dtype,device=y_emb.device)
|
||||
|
||||
return y[:, -idx:].unsqueeze(0)
|
||||
|
||||
bert_path = os.environ.get(
|
||||
"bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large"
|
||||
)
|
||||
cnhubert_base_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base"
|
||||
cnhubert.cnhubert_base_path = cnhubert_base_path
|
||||
|
||||
@torch.jit.script
|
||||
def build_phone_level_feature(res:Tensor, word2ph:IntTensor):
|
||||
phone_level_feature = []
|
||||
for i in range(word2ph.shape[0]):
|
||||
repeat_feature = res[i].repeat(word2ph[i].item(), 1)
|
||||
phone_level_feature.append(repeat_feature)
|
||||
phone_level_feature = torch.cat(phone_level_feature, dim=0)
|
||||
# [sum(word2ph), 1024]
|
||||
return phone_level_feature
|
||||
|
||||
class MyBertModel(torch.nn.Module):
|
||||
def __init__(self, bert_model):
|
||||
super(MyBertModel, self).__init__()
|
||||
self.bert = bert_model
|
||||
|
||||
def forward(self, input_ids:torch.Tensor, attention_mask:torch.Tensor, token_type_ids:torch.Tensor, word2ph:IntTensor):
|
||||
outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
|
||||
res = torch.cat(outputs["hidden_states"][-3:-2], -1)[0][1:-1]
|
||||
return build_phone_level_feature(res, word2ph)
|
||||
|
||||
class SSLModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.ssl = cnhubert.get_model().model
|
||||
|
||||
def forward(self, ref_audio_16k)-> torch.Tensor:
|
||||
ssl_content = self.ssl(ref_audio_16k)["last_hidden_state"].transpose(1, 2)
|
||||
return ssl_content
|
||||
|
||||
class ExportSSLModel(torch.nn.Module):
|
||||
def __init__(self,ssl:SSLModel):
|
||||
super().__init__()
|
||||
self.ssl = ssl
|
||||
|
||||
def forward(self, ref_audio:torch.Tensor):
|
||||
return self.ssl(ref_audio)
|
||||
|
||||
@torch.jit.export
|
||||
def resample(self,ref_audio:torch.Tensor,src_sr:int,dst_sr:int)->torch.Tensor:
|
||||
audio = resamplex(ref_audio,src_sr,dst_sr).float()
|
||||
return audio
|
||||
|
||||
def export_bert(ref_bert_inputs):
|
||||
ref_bert_inputs = {
|
||||
'input_ids': ref_bert_inputs['input_ids'],
|
||||
'attention_mask': ref_bert_inputs['attention_mask'],
|
||||
'token_type_ids': ref_bert_inputs['token_type_ids'],
|
||||
'word2ph': ref_bert_inputs['word2ph']
|
||||
}
|
||||
bert_model = AutoModelForMaskedLM.from_pretrained(bert_path,output_hidden_states=True)
|
||||
my_bert_model = MyBertModel(bert_model)
|
||||
|
||||
my_bert_model = torch.jit.trace(my_bert_model,example_kwarg_inputs=ref_bert_inputs)
|
||||
my_bert_model.save("onnx/bert_model.pt")
|
||||
print('#### exported bert ####')
|
||||
|
||||
def export(gpt_path, vits_path):
|
||||
tokenizer = AutoTokenizer.from_pretrained(bert_path)
|
||||
|
||||
ref_bert_inputs = tokenizer("声音,是有温度的.夜晚的声音,会发光", return_tensors="pt")
|
||||
ref_seq = torch.LongTensor([cleaned_text_to_sequence(['sh','eng1','y','in1',',','sh','i4','y','ou3','w','en1','d','u4','d','e','.','y','e4','w','an3','d','e','sh','eng1','y','in1',',','h','ui4','f','a1','g','uang1'],version='v2')])
|
||||
ref_bert_inputs['word2ph'] = torch.Tensor([2,2,1,2,2,2,2,2,1,2,2,2,2,2,1,2,2,2]).int()
|
||||
|
||||
text_berf_inputs = tokenizer("大家好,我有一个春晚问题.", return_tensors="pt")
|
||||
text_seq = torch.LongTensor([cleaned_text_to_sequence(["d", "a4", "j", "ia1", "h", "ao3",",","w","o3","y", "ou3","y","i2","g","e4","q","i2","g","uai4","w","en4","t","i2","."],version='v2')])
|
||||
text_berf_inputs['word2ph'] = torch.Tensor([2,2,2,1,2,2,2,2,2,2,2,2,1]).int()
|
||||
|
||||
bert_model = AutoModelForMaskedLM.from_pretrained(bert_path,output_hidden_states=True)
|
||||
|
||||
bert = MyBertModel(bert_model)
|
||||
|
||||
# export_bert(ref_bert_inputs)
|
||||
|
||||
ref_audio = torch.tensor([load_audio("output/denoise_opt/chen1.mp4_0000033600_0000192000.wav", 16000)]).float()
|
||||
ssl = SSLModel()
|
||||
s = ExportSSLModel(torch.jit.trace(ssl,example_inputs=(ref_audio)))
|
||||
torch.jit.script(s).save("onnx/xw/ssl_model.pt")
|
||||
print('#### exported ssl ####')
|
||||
|
||||
ref_bert = bert(**ref_bert_inputs)
|
||||
text_bert = bert(**text_berf_inputs)
|
||||
ssl_content = ssl(ref_audio)
|
||||
|
||||
# vits_path = "SoVITS_weights_v2/xw_e8_s216.pth"
|
||||
vits = VitsModel(vits_path)
|
||||
vits.eval()
|
||||
|
||||
# gpt_path = "GPT_weights_v2/xw-e15.ckpt"
|
||||
dict_s1 = torch.load(gpt_path, map_location="cpu")
|
||||
raw_t2s = get_raw_t2s_model(dict_s1)
|
||||
t2s_m = T2SModel(raw_t2s)
|
||||
t2s_m.eval()
|
||||
t2s = torch.jit.script(t2s_m)
|
||||
print('#### script t2s_m ####')
|
||||
|
||||
print("vits.hps.data.sampling_rate:",vits.hps.data.sampling_rate)
|
||||
gpt_sovits = GPT_SoVITS(t2s,vits)
|
||||
gpt_sovits.eval()
|
||||
ref_audio_sr = s.resample(ref_audio,16000,32000)
|
||||
print('ref_audio_sr:',ref_audio_sr.shape)
|
||||
|
||||
gpt_sovits_export = torch.jit.trace(
|
||||
gpt_sovits,
|
||||
example_inputs=(
|
||||
ssl_content,
|
||||
ref_audio_sr,
|
||||
ref_seq,
|
||||
text_seq,
|
||||
ref_bert,
|
||||
text_bert),
|
||||
check_trace=False) # 默认是True 但是 check 的时候可能是随机生成的一个奇怪维度的值,导致报错
|
||||
|
||||
gpt_sovits_export.save("onnx/xw/gpt_sovits_model.pt")
|
||||
print('#### exported gpt_sovits ####')
|
||||
|
||||
@torch.jit.script
|
||||
def parse_audio(ref_audio):
|
||||
ref_audio_16k = torchaudio.functional.resample(ref_audio,48000,16000).float()#.to(ref_audio.device)
|
||||
ref_audio_sr = torchaudio.functional.resample(ref_audio,48000,32000).float()#.to(ref_audio.device)
|
||||
return ref_audio_16k,ref_audio_sr
|
||||
|
||||
@torch.jit.script
|
||||
def resamplex(ref_audio:torch.Tensor,src_sr:int,dst_sr:int)->torch.Tensor:
|
||||
return torchaudio.functional.resample(ref_audio,src_sr,dst_sr).float()
|
||||
|
||||
class GPT_SoVITS(nn.Module):
|
||||
def __init__(self, t2s:T2SModel,vits:VitsModel):
|
||||
super().__init__()
|
||||
self.t2s = t2s
|
||||
self.vits = vits
|
||||
|
||||
def forward(self, ssl_content:torch.Tensor, ref_audio_sr:torch.Tensor, ref_seq:Tensor, text_seq:Tensor, ref_bert:Tensor, text_bert:Tensor):
|
||||
codes = self.vits.vq_model.extract_latent(ssl_content.float())
|
||||
prompt_semantic = codes[0, 0]
|
||||
prompts = prompt_semantic.unsqueeze(0)
|
||||
|
||||
pred_semantic = self.t2s(prompts, ref_seq, text_seq, ref_bert, text_bert)
|
||||
audio = self.vits(text_seq, pred_semantic, ref_audio_sr)
|
||||
return audio
|
||||
|
||||
def test(gpt_path, vits_path):
|
||||
tokenizer = AutoTokenizer.from_pretrained(bert_path)
|
||||
bert_model = AutoModelForMaskedLM.from_pretrained(bert_path,output_hidden_states=True)
|
||||
bert = MyBertModel(bert_model)
|
||||
# bert = torch.jit.load("onnx/bert_model.pt",map_location='cuda')
|
||||
|
||||
# gpt_path = "GPT_weights_v2/xw-e15.ckpt"
|
||||
dict_s1 = torch.load(gpt_path, map_location="cpu")
|
||||
raw_t2s = get_raw_t2s_model(dict_s1)
|
||||
t2s = T2SModel(raw_t2s)
|
||||
t2s.eval()
|
||||
# t2s = torch.jit.load("onnx/xw/t2s_model.pt",map_location='cuda')
|
||||
|
||||
# vits_path = "SoVITS_weights_v2/xw_e8_s216.pth"
|
||||
vits = VitsModel(vits_path)
|
||||
vits.eval()
|
||||
|
||||
ssl = ExportSSLModel(SSLModel())
|
||||
ssl.eval()
|
||||
|
||||
gpt_sovits = GPT_SoVITS(t2s,vits)
|
||||
|
||||
# vits = torch.jit.load("onnx/xw/vits_model.pt",map_location='cuda')
|
||||
# ssl = torch.jit.load("onnx/xw/ssl_model.pt",map_location='cuda')
|
||||
|
||||
|
||||
ref_bert_inputs = tokenizer("声音,是有温度的.夜晚的声音,会发光", return_tensors="pt")
|
||||
ref_seq = torch.LongTensor([cleaned_text_to_sequence(['sh','eng1','y','in1',',','sh','i4','y','ou3','w','en1','d','u4','d','e','.','y','e4','w','an3','d','e','sh','eng1','y','in1',',','h','ui4','f','a1','g','uang1'],version='v2')])
|
||||
ref_bert_inputs['word2ph'] = torch.Tensor([2,2,1,2,2,2,2,2,1,2,2,2,2,2,1,2,2,2]).int()
|
||||
|
||||
text_berf_inputs = tokenizer("大家好,我有一个春晚问题.", return_tensors="pt")
|
||||
text_seq = torch.LongTensor([cleaned_text_to_sequence(["d", "a4", "j", "ia1", "h", "ao3",",","w","o3","y", "ou3","y","i2","g","e4","q","i2","g","uai4","w","en4","t","i2","."],version='v2')])
|
||||
text_berf_inputs['word2ph'] = torch.Tensor([2,2,2,1,2,2,2,2,2,2,2,2,1]).int()
|
||||
|
||||
ref_bert = bert(
|
||||
ref_bert_inputs['input_ids'],
|
||||
ref_bert_inputs['attention_mask'],
|
||||
ref_bert_inputs['token_type_ids'],
|
||||
ref_bert_inputs['word2ph']
|
||||
)
|
||||
|
||||
text_bert = bert(text_berf_inputs['input_ids'],
|
||||
text_berf_inputs['attention_mask'],
|
||||
text_berf_inputs['token_type_ids'],
|
||||
text_berf_inputs['word2ph'])
|
||||
|
||||
#[1,N]
|
||||
ref_audio = torch.tensor([load_audio("output/denoise_opt/chen1.mp4_0000033600_0000192000.wav", 16000)]).float()
|
||||
print('ref_audio:',ref_audio.shape)
|
||||
|
||||
ref_audio_sr = ssl.resample(ref_audio,16000,32000)
|
||||
print('start ssl')
|
||||
ssl_content = ssl(ref_audio)
|
||||
|
||||
print('start gpt_sovits:')
|
||||
with torch.no_grad():
|
||||
audio = gpt_sovits(ssl_content, ref_audio_sr, ref_seq, text_seq, ref_bert, text_bert)
|
||||
print('start write wav')
|
||||
soundfile.write("out.wav", audio.detach().cpu().numpy(), 32000)
|
||||
|
||||
# audio = vits(text_seq, pred_semantic1, ref_audio)
|
||||
# soundfile.write("out.wav", audio, 32000)
|
||||
|
||||
import text
|
||||
import json
|
||||
|
||||
def export_symbel(version='v2'):
|
||||
if version=='v1':
|
||||
symbols = text._symbol_to_id_v1
|
||||
with open(f"onnx/symbols_v1.json", "w") as file:
|
||||
json.dump(symbols, file, indent=4)
|
||||
else:
|
||||
symbols = text._symbol_to_id_v2
|
||||
with open(f"onnx/symbols_v2.json", "w") as file:
|
||||
json.dump(symbols, file, indent=4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
export(gpt_path="GPT_weights_v2/chen1-e15.ckpt", vits_path="SoVITS_weights_v2/chen1_e8_s208.pth")
|
||||
# test(gpt_path="GPT_weights_v2/chen1-e15.ckpt", vits_path="SoVITS_weights_v2/chen1_e8_s208.pth")
|
||||
# export_symbel()
|
||||
@@ -21,6 +21,11 @@ import LangSegment, os, re, sys, json
|
||||
import pdb
|
||||
import torch
|
||||
|
||||
try:
|
||||
import gradio.analytics as analytics
|
||||
analytics.version_check = lambda:None
|
||||
except:...
|
||||
|
||||
version=os.environ.get("version","v2")
|
||||
pretrained_sovits_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", "GPT_SoVITS/pretrained_models/s2G488k.pth"]
|
||||
pretrained_gpt_name=["GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"]
|
||||
@@ -392,7 +397,8 @@ def merge_short_text_in_array(texts, threshold):
|
||||
##ref_wav_path+prompt_text+prompt_language+text(单个)+text_language+top_k+top_p+temperature
|
||||
# cache_tokens={}#暂未实现清理机制
|
||||
cache= {}
|
||||
def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free = False,speed=1,if_freeze=False,inp_refs=123):
|
||||
def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free
|
||||
=False,speed=1,if_freeze=False,inp_refs=None):
|
||||
global cache
|
||||
if ref_wav_path:pass
|
||||
else:gr.Warning(i18n('请上传参考音频'))
|
||||
|
||||
@@ -23,6 +23,11 @@ logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
|
||||
import pdb
|
||||
import torch
|
||||
|
||||
try:
|
||||
import gradio.analytics as analytics
|
||||
analytics.version_check = lambda:None
|
||||
except:...
|
||||
|
||||
|
||||
infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
|
||||
infer_ttswebui = int(infer_ttswebui)
|
||||
|
||||
@@ -4,8 +4,8 @@ from torch import nn
|
||||
from torch.nn import functional as F
|
||||
|
||||
from module import commons
|
||||
from module.modules import LayerNorm
|
||||
|
||||
from typing import Optional
|
||||
|
||||
class LayerNorm(nn.Module):
|
||||
def __init__(self, channels, eps=1e-5):
|
||||
@@ -59,6 +59,7 @@ class Encoder(nn.Module):
|
||||
# self.cond_layer = weight_norm(cond_layer, name='weight')
|
||||
# self.gin_channels = 256
|
||||
self.cond_layer_idx = self.n_layers
|
||||
self.spk_emb_linear = nn.Linear(256, self.hidden_channels)
|
||||
if "gin_channels" in kwargs:
|
||||
self.gin_channels = kwargs["gin_channels"]
|
||||
if self.gin_channels != 0:
|
||||
@@ -98,22 +99,36 @@ class Encoder(nn.Module):
|
||||
)
|
||||
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
||||
|
||||
def forward(self, x, x_mask, g=None):
|
||||
# def forward(self, x, x_mask, g=None):
|
||||
# attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
||||
# x = x * x_mask
|
||||
# for i in range(self.n_layers):
|
||||
# if i == self.cond_layer_idx and g is not None:
|
||||
# g = self.spk_emb_linear(g.transpose(1, 2))
|
||||
# g = g.transpose(1, 2)
|
||||
# x = x + g
|
||||
# x = x * x_mask
|
||||
# y = self.attn_layers[i](x, x, attn_mask)
|
||||
# y = self.drop(y)
|
||||
# x = self.norm_layers_1[i](x + y)
|
||||
|
||||
# y = self.ffn_layers[i](x, x_mask)
|
||||
# y = self.drop(y)
|
||||
# x = self.norm_layers_2[i](x + y)
|
||||
# x = x * x_mask
|
||||
# return x
|
||||
|
||||
def forward(self, x, x_mask):
|
||||
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
||||
x = x * x_mask
|
||||
for i in range(self.n_layers):
|
||||
if i == self.cond_layer_idx and g is not None:
|
||||
g = self.spk_emb_linear(g.transpose(1, 2))
|
||||
g = g.transpose(1, 2)
|
||||
x = x + g
|
||||
x = x * x_mask
|
||||
y = self.attn_layers[i](x, x, attn_mask)
|
||||
for attn_layers,norm_layers_1,ffn_layers,norm_layers_2 in zip(self.attn_layers,self.norm_layers_1,self.ffn_layers,self.norm_layers_2):
|
||||
y = attn_layers(x, x, attn_mask)
|
||||
y = self.drop(y)
|
||||
x = self.norm_layers_1[i](x + y)
|
||||
x = norm_layers_1(x + y)
|
||||
|
||||
y = self.ffn_layers[i](x, x_mask)
|
||||
y = ffn_layers(x, x_mask)
|
||||
y = self.drop(y)
|
||||
x = self.norm_layers_2[i](x + y)
|
||||
x = norm_layers_2(x + y)
|
||||
x = x * x_mask
|
||||
return x
|
||||
|
||||
@@ -172,17 +187,18 @@ class MultiHeadAttention(nn.Module):
|
||||
self.conv_k.weight.copy_(self.conv_q.weight)
|
||||
self.conv_k.bias.copy_(self.conv_q.bias)
|
||||
|
||||
def forward(self, x, c, attn_mask=None):
|
||||
def forward(self, x, c, attn_mask:Optional[torch.Tensor]=None):
|
||||
q = self.conv_q(x)
|
||||
k = self.conv_k(c)
|
||||
v = self.conv_v(c)
|
||||
|
||||
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
||||
# x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
||||
x, _ = self.attention(q, k, v, mask=attn_mask)
|
||||
|
||||
x = self.conv_o(x)
|
||||
return x
|
||||
|
||||
def attention(self, query, key, value, mask=None):
|
||||
def attention(self, query, key, value, mask:Optional[torch.Tensor]=None):
|
||||
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
||||
b, d, t_s, _ = (*key.size(), query.size(2))
|
||||
query = query.view(b, self.n_heads, self.k_channels, -1).transpose(2, 3)
|
||||
@@ -304,7 +320,7 @@ class FFN(nn.Module):
|
||||
filter_channels,
|
||||
kernel_size,
|
||||
p_dropout=0.0,
|
||||
activation=None,
|
||||
activation="",
|
||||
causal=False,
|
||||
):
|
||||
super().__init__()
|
||||
@@ -316,10 +332,11 @@ class FFN(nn.Module):
|
||||
self.activation = activation
|
||||
self.causal = causal
|
||||
|
||||
if causal:
|
||||
self.padding = self._causal_padding
|
||||
else:
|
||||
self.padding = self._same_padding
|
||||
# 从上下文看这里一定是 False
|
||||
# if causal:
|
||||
# self.padding = self._causal_padding
|
||||
# else:
|
||||
# self.padding = self._same_padding
|
||||
|
||||
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
||||
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
||||
@@ -334,6 +351,9 @@ class FFN(nn.Module):
|
||||
x = self.drop(x)
|
||||
x = self.conv_2(self.padding(x * x_mask))
|
||||
return x * x_mask
|
||||
|
||||
def padding(self, x):
|
||||
return self._same_padding(x)
|
||||
|
||||
def _causal_padding(self, x):
|
||||
if self.kernel_size == 1:
|
||||
@@ -352,3 +372,35 @@ class FFN(nn.Module):
|
||||
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
||||
x = F.pad(x, commons.convert_pad_shape(padding))
|
||||
return x
|
||||
|
||||
|
||||
class MRTE(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
content_enc_channels=192,
|
||||
hidden_size=512,
|
||||
out_channels=192,
|
||||
kernel_size=5,
|
||||
n_heads=4,
|
||||
ge_layer=2,
|
||||
):
|
||||
super(MRTE, self).__init__()
|
||||
self.cross_attention = MultiHeadAttention(hidden_size, hidden_size, n_heads)
|
||||
self.c_pre = nn.Conv1d(content_enc_channels, hidden_size, 1)
|
||||
self.text_pre = nn.Conv1d(content_enc_channels, hidden_size, 1)
|
||||
self.c_post = nn.Conv1d(hidden_size, out_channels, 1)
|
||||
|
||||
def forward(self, ssl_enc, ssl_mask, text, text_mask, ge):
|
||||
attn_mask = text_mask.unsqueeze(2) * ssl_mask.unsqueeze(-1)
|
||||
|
||||
ssl_enc = self.c_pre(ssl_enc * ssl_mask)
|
||||
text_enc = self.text_pre(text * text_mask)
|
||||
x = (
|
||||
self.cross_attention(
|
||||
ssl_enc * ssl_mask, text_enc * text_mask, attn_mask
|
||||
)
|
||||
+ ssl_enc
|
||||
+ ge
|
||||
)
|
||||
x = self.c_post(x * ssl_mask)
|
||||
return x
|
||||
|
||||
@@ -13,10 +13,10 @@ def get_padding(kernel_size, dilation=1):
|
||||
return int((kernel_size * dilation - dilation) / 2)
|
||||
|
||||
|
||||
def convert_pad_shape(pad_shape):
|
||||
l = pad_shape[::-1]
|
||||
pad_shape = [item for sublist in l for item in sublist]
|
||||
return pad_shape
|
||||
# def convert_pad_shape(pad_shape):
|
||||
# l = pad_shape[::-1]
|
||||
# pad_shape = [item for sublist in l for item in sublist]
|
||||
# return pad_shape
|
||||
|
||||
|
||||
def intersperse(lst, item):
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import copy
|
||||
import math
|
||||
from typing import Optional
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
@@ -11,9 +12,10 @@ from module import attentions_onnx as attentions
|
||||
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
||||
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
||||
from module.commons import init_weights, get_padding
|
||||
from module.mrte_model import MRTE
|
||||
from module.quantize import ResidualVectorQuantizer
|
||||
from text import symbols
|
||||
# from text import symbols
|
||||
from text import symbols as symbols_v1
|
||||
from text import symbols2 as symbols_v2
|
||||
from torch.cuda.amp import autocast
|
||||
|
||||
|
||||
@@ -182,6 +184,7 @@ class TextEncoder(nn.Module):
|
||||
kernel_size,
|
||||
p_dropout,
|
||||
latent_channels=192,
|
||||
version="v2",
|
||||
):
|
||||
super().__init__()
|
||||
self.out_channels = out_channels
|
||||
@@ -192,6 +195,7 @@ class TextEncoder(nn.Module):
|
||||
self.kernel_size = kernel_size
|
||||
self.p_dropout = p_dropout
|
||||
self.latent_channels = latent_channels
|
||||
self.version = version
|
||||
|
||||
self.ssl_proj = nn.Conv1d(768, hidden_channels, 1)
|
||||
|
||||
@@ -207,9 +211,14 @@ class TextEncoder(nn.Module):
|
||||
self.encoder_text = attentions.Encoder(
|
||||
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
||||
)
|
||||
|
||||
if self.version == "v1":
|
||||
symbols = symbols_v1.symbols
|
||||
else:
|
||||
symbols = symbols_v2.symbols
|
||||
self.text_embedding = nn.Embedding(len(symbols), hidden_channels)
|
||||
|
||||
self.mrte = MRTE()
|
||||
self.mrte = attentions.MRTE()
|
||||
|
||||
self.encoder2 = attentions.Encoder(
|
||||
hidden_channels,
|
||||
@@ -240,25 +249,6 @@ class TextEncoder(nn.Module):
|
||||
m, logs = torch.split(stats, self.out_channels, dim=1)
|
||||
return y, m, logs, y_mask
|
||||
|
||||
def extract_latent(self, x):
|
||||
x = self.ssl_proj(x)
|
||||
quantized, codes, commit_loss, quantized_list = self.quantizer(x)
|
||||
return codes.transpose(0, 1)
|
||||
|
||||
def decode_latent(self, codes, y_mask, refer, refer_mask, ge):
|
||||
quantized = self.quantizer.decode(codes)
|
||||
|
||||
y = self.vq_proj(quantized) * y_mask
|
||||
y = self.encoder_ssl(y * y_mask, y_mask)
|
||||
|
||||
y = self.mrte(y, y_mask, refer, refer_mask, ge)
|
||||
|
||||
y = self.encoder2(y * y_mask, y_mask)
|
||||
|
||||
stats = self.proj(y) * y_mask
|
||||
m, logs = torch.split(stats, self.out_channels, dim=1)
|
||||
return y, m, logs, y_mask, quantized
|
||||
|
||||
|
||||
class ResidualCouplingBlock(nn.Module):
|
||||
def __init__(
|
||||
@@ -439,7 +429,7 @@ class Generator(torch.nn.Module):
|
||||
if gin_channels != 0:
|
||||
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
||||
|
||||
def forward(self, x, g=None):
|
||||
def forward(self, x, g:Optional[torch.Tensor]=None):
|
||||
x = self.conv_pre(x)
|
||||
if g is not None:
|
||||
x = x + self.cond(g)
|
||||
@@ -817,6 +807,7 @@ class SynthesizerTrn(nn.Module):
|
||||
use_sdp=True,
|
||||
semantic_frame_rate=None,
|
||||
freeze_quantizer=None,
|
||||
version="v2",
|
||||
**kwargs
|
||||
):
|
||||
super().__init__()
|
||||
@@ -837,6 +828,7 @@ class SynthesizerTrn(nn.Module):
|
||||
self.segment_size = segment_size
|
||||
self.n_speakers = n_speakers
|
||||
self.gin_channels = gin_channels
|
||||
self.version = version
|
||||
|
||||
self.use_sdp = use_sdp
|
||||
self.enc_p = TextEncoder(
|
||||
@@ -847,6 +839,7 @@ class SynthesizerTrn(nn.Module):
|
||||
n_layers,
|
||||
kernel_size,
|
||||
p_dropout,
|
||||
version=version,
|
||||
)
|
||||
self.dec = Generator(
|
||||
inter_channels,
|
||||
@@ -858,22 +851,24 @@ class SynthesizerTrn(nn.Module):
|
||||
upsample_kernel_sizes,
|
||||
gin_channels=gin_channels,
|
||||
)
|
||||
self.enc_q = PosteriorEncoder(
|
||||
spec_channels,
|
||||
inter_channels,
|
||||
hidden_channels,
|
||||
5,
|
||||
1,
|
||||
16,
|
||||
gin_channels=gin_channels,
|
||||
)
|
||||
# self.enc_q = PosteriorEncoder(
|
||||
# spec_channels,
|
||||
# inter_channels,
|
||||
# hidden_channels,
|
||||
# 5,
|
||||
# 1,
|
||||
# 16,
|
||||
# gin_channels=gin_channels,
|
||||
# )
|
||||
self.flow = ResidualCouplingBlock(
|
||||
inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels
|
||||
)
|
||||
|
||||
self.ref_enc = modules.MelStyleEncoder(
|
||||
spec_channels, style_vector_dim=gin_channels
|
||||
)
|
||||
# self.version=os.environ.get("version","v1")
|
||||
if self.version == "v1":
|
||||
self.ref_enc = modules.MelStyleEncoder(spec_channels, style_vector_dim=gin_channels)
|
||||
else:
|
||||
self.ref_enc = modules.MelStyleEncoder(704, style_vector_dim=gin_channels)
|
||||
|
||||
ssl_dim = 768
|
||||
self.ssl_dim = ssl_dim
|
||||
@@ -894,7 +889,10 @@ class SynthesizerTrn(nn.Module):
|
||||
|
||||
def forward(self, codes, text, refer):
|
||||
refer_mask = torch.ones_like(refer[:1,:1,:])
|
||||
ge = self.ref_enc(refer * refer_mask, refer_mask)
|
||||
if (self.version == "v1"):
|
||||
ge = self.ref_enc(refer * refer_mask, refer_mask)
|
||||
else:
|
||||
ge = self.ref_enc(refer[:, :704] * refer_mask, refer_mask)
|
||||
|
||||
quantized = self.quantizer.decode(codes)
|
||||
if self.semantic_frame_rate == "25hz":
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
from module.models_onnx import SynthesizerTrn, symbols
|
||||
from module.models_onnx import SynthesizerTrn, symbols_v1, symbols_v2
|
||||
from AR.models.t2s_lightning_module_onnx import Text2SemanticLightningModule
|
||||
import torch
|
||||
import torchaudio
|
||||
from torch import nn
|
||||
from feature_extractor import cnhubert
|
||||
cnhubert_base_path = "pretrained_models/chinese-hubert-base"
|
||||
cnhubert.cnhubert_base_path=cnhubert_base_path
|
||||
|
||||
cnhubert_base_path = "GPT_SoVITS/pretrained_models/chinese-hubert-base"
|
||||
cnhubert.cnhubert_base_path = cnhubert_base_path
|
||||
ssl_model = cnhubert.get_model()
|
||||
from text import cleaned_text_to_sequence
|
||||
import soundfile
|
||||
@@ -196,6 +197,11 @@ class VitsModel(nn.Module):
|
||||
super().__init__()
|
||||
dict_s2 = torch.load(vits_path,map_location="cpu")
|
||||
self.hps = dict_s2["config"]
|
||||
if dict_s2['weight']['enc_p.text_embedding.weight'].shape[0] == 322:
|
||||
self.hps["model"]["version"] = "v1"
|
||||
else:
|
||||
self.hps["model"]["version"] = "v2"
|
||||
|
||||
self.hps = DictToAttrRecursive(self.hps)
|
||||
self.hps.model.semantic_frame_rate = "25hz"
|
||||
self.vq_model = SynthesizerTrn(
|
||||
@@ -267,13 +273,13 @@ class SSLModel(nn.Module):
|
||||
return self.ssl.model(ref_audio_16k)["last_hidden_state"].transpose(1, 2)
|
||||
|
||||
|
||||
def export(vits_path, gpt_path, project_name):
|
||||
def export(vits_path, gpt_path, project_name, vits_model="v2"):
|
||||
vits = VitsModel(vits_path)
|
||||
gpt = T2SModel(gpt_path, vits)
|
||||
gpt_sovits = GptSoVits(vits, gpt)
|
||||
ssl = SSLModel()
|
||||
ref_seq = torch.LongTensor([cleaned_text_to_sequence(["n", "i2", "h", "ao3", ",", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4"])])
|
||||
text_seq = torch.LongTensor([cleaned_text_to_sequence(["w", "o3", "sh", "i4", "b", "ai2", "y", "e4", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4"])])
|
||||
ref_seq = torch.LongTensor([cleaned_text_to_sequence(["n", "i2", "h", "ao3", ",", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4"],version=vits_model)])
|
||||
text_seq = torch.LongTensor([cleaned_text_to_sequence(["w", "o3", "sh", "i4", "b", "ai2", "y", "e4", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4", "w", "o3", "sh", "i4", "b", "ai2", "y", "e4"],version=vits_model)])
|
||||
ref_bert = torch.randn((ref_seq.shape[1], 1024)).float()
|
||||
text_bert = torch.randn((text_seq.shape[1], 1024)).float()
|
||||
ref_audio = torch.randn((1, 48000 * 5)).float()
|
||||
@@ -287,34 +293,38 @@ def export(vits_path, gpt_path, project_name):
|
||||
pass
|
||||
|
||||
ssl_content = ssl(ref_audio_16k).float()
|
||||
|
||||
debug = False
|
||||
|
||||
# debug = False
|
||||
debug = True
|
||||
|
||||
# gpt_sovits.export(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content, project_name)
|
||||
|
||||
if debug:
|
||||
a, b = gpt_sovits(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content, debug=debug)
|
||||
soundfile.write("out1.wav", a.cpu().detach().numpy(), vits.hps.data.sampling_rate)
|
||||
soundfile.write("out2.wav", b[0], vits.hps.data.sampling_rate)
|
||||
return
|
||||
|
||||
a = gpt_sovits(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content).detach().cpu().numpy()
|
||||
else:
|
||||
a = gpt_sovits(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content).detach().cpu().numpy()
|
||||
soundfile.write("out.wav", a, vits.hps.data.sampling_rate)
|
||||
|
||||
soundfile.write("out.wav", a, vits.hps.data.sampling_rate)
|
||||
|
||||
gpt_sovits.export(ref_seq, text_seq, ref_bert, text_bert, ref_audio_sr, ssl_content, project_name)
|
||||
if vits_model == "v1":
|
||||
symbols = symbols_v1
|
||||
else:
|
||||
symbols = symbols_v2
|
||||
|
||||
MoeVSConf = {
|
||||
"Folder" : f"{project_name}",
|
||||
"Name" : f"{project_name}",
|
||||
"Type" : "GPT-SoVits",
|
||||
"Rate" : vits.hps.data.sampling_rate,
|
||||
"NumLayers": gpt.t2s_model.num_layers,
|
||||
"EmbeddingDim": gpt.t2s_model.embedding_dim,
|
||||
"Dict": "BasicDict",
|
||||
"BertPath": "chinese-roberta-wwm-ext-large",
|
||||
"Symbol": symbols,
|
||||
"AddBlank": False
|
||||
}
|
||||
|
||||
"Folder": f"{project_name}",
|
||||
"Name": f"{project_name}",
|
||||
"Type": "GPT-SoVits",
|
||||
"Rate": vits.hps.data.sampling_rate,
|
||||
"NumLayers": gpt.t2s_model.num_layers,
|
||||
"EmbeddingDim": gpt.t2s_model.embedding_dim,
|
||||
"Dict": "BasicDict",
|
||||
"BertPath": "chinese-roberta-wwm-ext-large",
|
||||
# "Symbol": symbols,
|
||||
"AddBlank": False,
|
||||
}
|
||||
|
||||
MoeVSConfJson = json.dumps(MoeVSConf)
|
||||
with open(f"onnx/{project_name}.json", 'w') as MoeVsConfFile:
|
||||
json.dump(MoeVSConf, MoeVsConfFile, indent = 4)
|
||||
|
||||
@@ -27,7 +27,7 @@ if is_g2pw:
|
||||
print("当前使用g2pw进行拼音推理")
|
||||
from text.g2pw import G2PWPinyin, correct_pronunciation
|
||||
parent_directory = os.path.dirname(current_file_path)
|
||||
g2pw = G2PWPinyin(model_dir="GPT_SoVITS/text/G2PWModel",model_source="GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large",v_to_u=False, neutral_tone_with_five=True)
|
||||
g2pw = G2PWPinyin(model_dir="GPT_SoVITS/text/G2PWModel",model_source=os.environ.get("bert_path","GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large"),v_to_u=False, neutral_tone_with_five=True)
|
||||
|
||||
rep_map = {
|
||||
":": ",",
|
||||
|
||||
@@ -45021,4 +45021,6 @@
|
||||
黄冠野服: ['huang2', 'guan4', 'ye3', 'fu2']
|
||||
黄发台背: ['huang2', 'fa1', 'tai2', 'bei4']
|
||||
鼎铛玉石: ['ding3', 'cheng1', 'yu4', 'shi2']
|
||||
齿豁头童: ['chi3', 'huo1', 'tou2', 'tong2']
|
||||
齿豁头童: ['chi3', 'huo1', 'tou2', 'tong2']
|
||||
牦牛: ['mao2', 'niu2']
|
||||
牦: ['mao2']
|
||||
Binary file not shown.
@@ -186,6 +186,7 @@ def replace_positive_quantifier(match) -> str:
|
||||
match_2: str = match_2 if match_2 else ""
|
||||
quantifiers: str = match.group(3)
|
||||
number: str = num2str(number)
|
||||
number = "两" if number == "二" else number
|
||||
result = f"{number}{match_2}{quantifiers}"
|
||||
return result
|
||||
|
||||
|
||||
@@ -184,8 +184,8 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
|
||||
|
||||
#### Integrated Package Users
|
||||
|
||||
Double-click `go-webui.bat`or use `go-webui.ps`
|
||||
if you want to switch to V1,then double-click`go-webui-v1.bat` or use `go-webui-v1.ps`
|
||||
Double-click `go-webui.bat`or use `go-webui.ps1`
|
||||
if you want to switch to V1,then double-click`go-webui-v1.bat` or use `go-webui-v1.ps1`
|
||||
|
||||
#### Others
|
||||
|
||||
@@ -220,7 +220,7 @@ Or maunally switch version in WebUI
|
||||
|
||||
#### Integrated Package Users
|
||||
|
||||
Double-click `go-webui-v2.bat` or use `go-webui-v2.ps` ,then open the inference webui at `1-GPT-SoVITS-TTS/1C-inference`
|
||||
Double-click `go-webui-v2.bat` or use `go-webui-v2.ps1` ,then open the inference webui at `1-GPT-SoVITS-TTS/1C-inference`
|
||||
|
||||
#### Others
|
||||
|
||||
|
||||
12
api_v2.py
12
api_v2.py
@@ -24,7 +24,7 @@ POST:
|
||||
"text": "", # str.(required) text to be synthesized
|
||||
"text_lang: "", # str.(required) language of the text to be synthesized
|
||||
"ref_audio_path": "", # str.(required) reference audio path
|
||||
"aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker synthesis
|
||||
"aux_ref_audio_paths": [], # list.(optional) auxiliary reference audio paths for multi-speaker tone fusion
|
||||
"prompt_text": "", # str.(optional) prompt text for the reference audio
|
||||
"prompt_lang": "", # str.(required) language of the prompt text for the reference audio
|
||||
"top_k": 5, # int. top k sampling
|
||||
@@ -34,7 +34,6 @@ POST:
|
||||
"batch_size": 1, # int. batch size for inference
|
||||
"batch_threshold": 0.75, # float. threshold for batch splitting.
|
||||
"split_bucket: True, # bool. whether to split the batch into multiple buckets.
|
||||
"return_fragment": False, # bool. step by step return the audio fragment.
|
||||
"speed_factor":1.0, # float. control the speed of the synthesized audio.
|
||||
"streaming_mode": False, # bool. whether to return a streaming response.
|
||||
"seed": -1, # int. random seed for reproducibility.
|
||||
@@ -254,13 +253,13 @@ def check_params(req:dict):
|
||||
if (text_lang in [None, ""]) :
|
||||
return JSONResponse(status_code=400, content={"message": "text_lang is required"})
|
||||
elif text_lang.lower() not in tts_config.languages:
|
||||
return JSONResponse(status_code=400, content={"message": "text_lang is not supported"})
|
||||
return JSONResponse(status_code=400, content={"message": f"text_lang: {text_lang} is not supported in version {tts_config.version}"})
|
||||
if (prompt_lang in [None, ""]) :
|
||||
return JSONResponse(status_code=400, content={"message": "prompt_lang is required"})
|
||||
elif prompt_lang.lower() not in tts_config.languages:
|
||||
return JSONResponse(status_code=400, content={"message": "prompt_lang is not supported"})
|
||||
return JSONResponse(status_code=400, content={"message": f"prompt_lang: {prompt_lang} is not supported in version {tts_config.version}"})
|
||||
if media_type not in ["wav", "raw", "ogg", "aac"]:
|
||||
return JSONResponse(status_code=400, content={"message": "media_type is not supported"})
|
||||
return JSONResponse(status_code=400, content={"message": f"media_type: {media_type} is not supported"})
|
||||
elif media_type == "ogg" and not streaming_mode:
|
||||
return JSONResponse(status_code=400, content={"message": "ogg format is not supported in non-streaming mode"})
|
||||
|
||||
@@ -302,13 +301,14 @@ async def tts_handle(req:dict):
|
||||
"""
|
||||
|
||||
streaming_mode = req.get("streaming_mode", False)
|
||||
return_fragment = req.get("return_fragment", False)
|
||||
media_type = req.get("media_type", "wav")
|
||||
|
||||
check_res = check_params(req)
|
||||
if check_res is not None:
|
||||
return check_res
|
||||
|
||||
if streaming_mode:
|
||||
if streaming_mode or return_fragment:
|
||||
req["return_fragment"] = True
|
||||
|
||||
try:
|
||||
|
||||
@@ -181,8 +181,8 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|zh|我爱玩原神。
|
||||
|
||||
#### 整合包用户
|
||||
|
||||
双击`go-webui.bat`或者使用`go-webui.ps`
|
||||
若想使用V1,则双击`go-webui-v1.bat`或者使用`go-webui-v1.ps`
|
||||
双击`go-webui.bat`或者使用`go-webui.ps1`
|
||||
若想使用V1,则双击`go-webui-v1.bat`或者使用`go-webui-v1.ps1`
|
||||
|
||||
#### 其他
|
||||
|
||||
@@ -217,7 +217,7 @@ python webui.py v1 <language(optional)>
|
||||
|
||||
#### 整合包用户
|
||||
|
||||
双击 `go-webui.bat` 或者使用 `go-webui.ps` ,然后在 `1-GPT-SoVITS-TTS/1C-推理` 中打开推理webUI
|
||||
双击 `go-webui.bat` 或者使用 `go-webui.ps1` ,然后在 `1-GPT-SoVITS-TTS/1C-推理` 中打开推理webUI
|
||||
|
||||
#### 其他
|
||||
|
||||
|
||||
@@ -171,8 +171,8 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
|
||||
|
||||
#### 統合パッケージ利用者
|
||||
|
||||
`go-webui.bat`をダブルクリックするか、`go-webui.ps`を使用します。
|
||||
V1に切り替えたい場合は、`go-webui-v1.bat`をダブルクリックするか、`go-webui-v1.ps`を使用してください。
|
||||
`go-webui.bat`をダブルクリックするか、`go-webui.ps1`を使用します。
|
||||
V1に切り替えたい場合は、`go-webui-v1.bat`をダブルクリックするか、`go-webui-v1.ps1`を使用してください。
|
||||
|
||||
#### その他
|
||||
|
||||
@@ -207,7 +207,7 @@ python webui.py v1 <言語(オプション)>
|
||||
|
||||
#### 統合パッケージ利用者
|
||||
|
||||
`go-webui-v2.bat`をダブルクリックするか、`go-webui-v2.ps`を使用して、`1-GPT-SoVITS-TTS/1C-inference`で推論webuiを開きます。
|
||||
`go-webui-v2.bat`をダブルクリックするか、`go-webui-v2.ps1`を使用して、`1-GPT-SoVITS-TTS/1C-inference`で推論webuiを開きます。
|
||||
|
||||
#### その他
|
||||
|
||||
|
||||
@@ -175,8 +175,8 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
|
||||
|
||||
#### 통합 패키지 사용자
|
||||
|
||||
`go-webui.bat`을 더블 클릭하거나 `go-webui.ps`를 사용하십시오.
|
||||
V1으로 전환하려면, `go-webui-v1.bat`을 더블 클릭하거나 `go-webui-v1.ps`를 사용하십시오.
|
||||
`go-webui.bat`을 더블 클릭하거나 `go-webui.ps1`를 사용하십시오.
|
||||
V1으로 전환하려면, `go-webui-v1.bat`을 더블 클릭하거나 `go-webui-v1.ps1`를 사용하십시오.
|
||||
|
||||
#### 기타
|
||||
|
||||
@@ -211,7 +211,7 @@ python webui.py v1 <언어(옵션)>
|
||||
|
||||
#### 통합 패키지 사용자
|
||||
|
||||
`go-webui-v2.bat`을 더블 클릭하거나 `go-webui-v2.ps`를 사용한 다음 `1-GPT-SoVITS-TTS/1C-inference`에서 추론 webui를 엽니다.
|
||||
`go-webui-v2.bat`을 더블 클릭하거나 `go-webui-v2.ps1`를 사용한 다음 `1-GPT-SoVITS-TTS/1C-inference`에서 추론 webui를 엽니다.
|
||||
|
||||
#### 기타
|
||||
|
||||
|
||||
@@ -172,8 +172,8 @@ D:\GPT-SoVITS\xxx/xxx.wav|xxx|en|I like playing Genshin.
|
||||
|
||||
#### Entegre Paket Kullanıcıları
|
||||
|
||||
`go-webui.bat` dosyasına çift tıklayın veya `go-webui.ps` kullanın.
|
||||
V1'e geçmek istiyorsanız, `go-webui-v1.bat` dosyasına çift tıklayın veya `go-webui-v1.ps` kullanın.
|
||||
`go-webui.bat` dosyasına çift tıklayın veya `go-webui.ps1` kullanın.
|
||||
V1'e geçmek istiyorsanız, `go-webui-v1.bat` dosyasına çift tıklayın veya `go-webui-v1.ps1` kullanın.
|
||||
|
||||
#### Diğerleri
|
||||
|
||||
@@ -208,7 +208,7 @@ veya WebUI'de manuel olarak sürüm değiştirin.
|
||||
|
||||
#### Entegre Paket Kullanıcıları
|
||||
|
||||
`go-webui-v2.bat` dosyasına çift tıklayın veya `go-webui-v2.ps` kullanın, ardından çıkarım webui'sini `1-GPT-SoVITS-TTS/1C-inference` adresinde açın.
|
||||
`go-webui-v2.bat` dosyasına çift tıklayın veya `go-webui-v2.ps1` kullanın, ardından çıkarım webui'sini `1-GPT-SoVITS-TTS/1C-inference` adresinde açın.
|
||||
|
||||
#### Diğerleri
|
||||
|
||||
@@ -330,4 +330,4 @@ python ./tools/asr/fasterwhisper_asr.py -i <girdi> -o <çıktı> -l <dil>
|
||||
|
||||
<a href="https://github.com/RVC-Boss/GPT-SoVITS/graphs/contributors" target="_blank">
|
||||
<img src="https://contrib.rocks/image?repo=RVC-Boss/GPT-SoVITS" />
|
||||
</a>
|
||||
</a>
|
||||
|
||||
@@ -33,3 +33,4 @@ ko_pron
|
||||
opencc; sys_platform != 'linux'
|
||||
opencc==1.1.1; sys_platform == 'linux'
|
||||
python_mecab_ko; sys_platform != 'win32'
|
||||
fastapi<0.112.2
|
||||
|
||||
@@ -4,6 +4,11 @@ import json
|
||||
import os
|
||||
import uuid
|
||||
|
||||
try:
|
||||
import gradio.analytics as analytics
|
||||
analytics.version_check = lambda:None
|
||||
except:...
|
||||
|
||||
import librosa
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
|
||||
@@ -14,6 +14,11 @@ from mdxnet import MDXNetDereverb
|
||||
from vr import AudioPre, AudioPreDeEcho
|
||||
from bsroformer import BsRoformer_Loader
|
||||
|
||||
try:
|
||||
import gradio.analytics as analytics
|
||||
analytics.version_check = lambda:None
|
||||
except:...
|
||||
|
||||
weight_uvr5_root = "tools/uvr5/uvr5_weights"
|
||||
uvr5_names = []
|
||||
for name in os.listdir(weight_uvr5_root):
|
||||
|
||||
Reference in New Issue
Block a user