Add files via upload
This commit is contained in:
50
GPT_SoVITS/prepare_datasets/0-pipeline.py
Normal file
50
GPT_SoVITS/prepare_datasets/0-pipeline.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import os,torch,sys
|
||||
from subprocess import Popen
|
||||
now_dir = os.getcwd()
|
||||
sys.path.append(now_dir)
|
||||
from config import text_path,wav_dir,n_card,n_process_per_card,exp_name,n_parts,exp_dir
|
||||
os.makedirs("%s/logs_s1"%exp_dir,exist_ok=True)
|
||||
os.makedirs("%s/logs_s2"%exp_dir,exist_ok=True)
|
||||
##############step1
|
||||
ps=[]
|
||||
for i_part in range(n_parts):
|
||||
cmd="python prepare/1-get-text.py %s %s %s %s %s %s"%(text_path,wav_dir,exp_name,i_part,n_parts,i_part%n_card)
|
||||
print(cmd)
|
||||
p = Popen(cmd, shell=True)
|
||||
ps.append(p)
|
||||
for p in ps:
|
||||
p.wait()
|
||||
|
||||
opt=[]
|
||||
for i_part in range(n_parts):
|
||||
txt_path = "%s/2-name2text-%s.txt" % (exp_dir, i_part)
|
||||
with open(txt_path,"r")as f:
|
||||
opt+=f.read().strip("\n").split("\n")
|
||||
os.remove(txt_path)
|
||||
with open("%s/2-name2text.txt"%exp_dir,"w")as f:f.write("\n".join(opt)+"\n")
|
||||
|
||||
############step2
|
||||
ps=[]
|
||||
for i_part in range(n_parts):
|
||||
cmd="python prepare/2-get-hubert-wav32k.py %s %s %s %s %s %s"%(text_path,wav_dir,exp_name,i_part,n_parts,i_part%n_card)
|
||||
print(cmd)
|
||||
p = Popen(cmd, shell=True)
|
||||
ps.append(p)
|
||||
for p in ps:
|
||||
p.wait()
|
||||
#############step3
|
||||
ps=[]
|
||||
for i_part in range(n_parts):
|
||||
cmd="python prepare/3-get-semantic.py %s %s %s %s %s"%(text_path,exp_name,i_part,n_parts,i_part%n_card)
|
||||
print(cmd)
|
||||
p = Popen(cmd, shell=True)
|
||||
ps.append(p)
|
||||
for p in ps:
|
||||
p.wait()
|
||||
opt=["item_name semantic_audio"]
|
||||
for i_part in range(n_parts):
|
||||
semantic_path = "%s/6-name2semantic-%s.tsv" % (exp_dir, i_part)
|
||||
with open(semantic_path,"r")as f:
|
||||
opt+=f.read().strip("\n").split("\n")
|
||||
os.remove(semantic_path)
|
||||
with open("%s/6-name2semantic.tsv"%exp_dir,"w")as f:f.write("\n".join(opt)+"\n")
|
||||
109
GPT_SoVITS/prepare_datasets/1-get-text.py
Normal file
109
GPT_SoVITS/prepare_datasets/1-get-text.py
Normal file
@@ -0,0 +1,109 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
|
||||
inp_text= os.environ.get("inp_text")
|
||||
inp_wav_dir= os.environ.get("inp_wav_dir")
|
||||
exp_name= os.environ.get("exp_name")
|
||||
i_part= os.environ.get("i_part")
|
||||
all_parts= os.environ.get("all_parts")
|
||||
os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES")
|
||||
opt_dir= os.environ.get("opt_dir")
|
||||
bert_pretrained_dir= os.environ.get("bert_pretrained_dir")
|
||||
is_half=eval(os.environ.get("is_half","True"))
|
||||
import sys,numpy as np,traceback,pdb
|
||||
import os.path
|
||||
from glob import glob
|
||||
from tqdm import tqdm
|
||||
from text.cleaner import clean_text
|
||||
import torch
|
||||
from transformers import AutoModelForMaskedLM, AutoTokenizer
|
||||
import numpy as np
|
||||
|
||||
# inp_text=sys.argv[1]
|
||||
# inp_wav_dir=sys.argv[2]
|
||||
# exp_name=sys.argv[3]
|
||||
# i_part=sys.argv[4]
|
||||
# all_parts=sys.argv[5]
|
||||
# os.environ["CUDA_VISIBLE_DEVICES"]=sys.argv[6]#i_gpu
|
||||
# opt_dir="/data/docker/liujing04/gpt-vits/fine_tune_dataset/%s"%exp_name
|
||||
# bert_pretrained_dir="/data/docker/liujing04/bert-vits2/Bert-VITS2-master20231106/bert/chinese-roberta-wwm-ext-large"
|
||||
|
||||
from time import time as ttime
|
||||
import shutil
|
||||
def my_save(fea,path):#####fix issue: torch.save doesn't support chinese path
|
||||
dir=os.path.dirname(path)
|
||||
name=os.path.basename(path)
|
||||
tmp_path="%s/%s%s.pth"%(dir,ttime(),i_part)
|
||||
torch.save(fea,tmp_path)
|
||||
shutil.move(tmp_path,"%s/%s"%(dir,name))
|
||||
|
||||
txt_path="%s/2-name2text-%s.txt"%(opt_dir,i_part)
|
||||
if(os.path.exists(txt_path)==False):
|
||||
bert_dir="%s/3-bert"%(opt_dir)
|
||||
os.makedirs(opt_dir,exist_ok=True)
|
||||
os.makedirs(bert_dir,exist_ok=True)
|
||||
device="cuda:0"
|
||||
tokenizer = AutoTokenizer.from_pretrained(bert_pretrained_dir)
|
||||
bert_model=AutoModelForMaskedLM.from_pretrained(bert_pretrained_dir)
|
||||
if (is_half == True):
|
||||
bert_model = bert_model.half().to(device)
|
||||
else:
|
||||
bert_model = bert_model.to(device)
|
||||
def get_bert_feature(text, word2ph):
|
||||
with torch.no_grad():
|
||||
inputs = tokenizer(text, return_tensors="pt")
|
||||
for i in inputs:
|
||||
inputs[i] = inputs[i].to(device)
|
||||
res = bert_model(**inputs, output_hidden_states=True)
|
||||
res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
|
||||
|
||||
assert len(word2ph) == len(text)
|
||||
phone_level_feature = []
|
||||
for i in range(len(word2ph)):
|
||||
repeat_feature = res[i].repeat(word2ph[i], 1)
|
||||
phone_level_feature.append(repeat_feature)
|
||||
|
||||
phone_level_feature = torch.cat(phone_level_feature, dim=0)
|
||||
|
||||
return phone_level_feature.T
|
||||
def process(data,res):
|
||||
for name,text,lan in data:
|
||||
try:
|
||||
name=os.path.basename(name)
|
||||
phones, word2ph, norm_text=clean_text(text.replace("%", '-').replace('¥', ','),lan)
|
||||
path_bert="%s/%s.pt"%(bert_dir,name)
|
||||
if (os.path.exists(path_bert) == False and lan == "zh"):
|
||||
bert_feature = get_bert_feature(norm_text, word2ph)
|
||||
assert bert_feature.shape[-1] == len(phones)
|
||||
# torch.save(bert_feature, path_bert)
|
||||
my_save(bert_feature, path_bert)
|
||||
phones = " ".join(phones)
|
||||
# res.append([name,phones])
|
||||
res.append([name,phones, word2ph, norm_text])
|
||||
except:
|
||||
print(name, text, traceback.format_exc())
|
||||
|
||||
todo=[]
|
||||
res=[]
|
||||
with open(inp_text,"r",encoding="utf8")as f:
|
||||
lines=f.read().strip("\n").split("\n")
|
||||
|
||||
language_v1_to_language_v2={
|
||||
"ZH":"zh"
|
||||
}
|
||||
for line in lines[int(i_part)::int(all_parts)]:
|
||||
try:
|
||||
wav_name,spk_name,language,text=line.split("|")
|
||||
# todo.append([name,text,"zh"])
|
||||
todo.append([wav_name,text,language_v1_to_language_v2.get(language,language)])
|
||||
except:
|
||||
print(line,traceback.format_exc())
|
||||
|
||||
process(todo,res)
|
||||
opt=[]
|
||||
for name,phones, word2ph, norm_text in res:
|
||||
opt.append("%s\t%s\t%s\t%s"%(name,phones, word2ph, norm_text))
|
||||
with open(txt_path,"w",encoding="utf8")as f:
|
||||
f.write("\n".join(opt)+"\n")
|
||||
|
||||
94
GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py
Normal file
94
GPT_SoVITS/prepare_datasets/2-get-hubert-wav32k.py
Normal file
@@ -0,0 +1,94 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys,os
|
||||
inp_text= os.environ.get("inp_text")
|
||||
inp_wav_dir= os.environ.get("inp_wav_dir")
|
||||
exp_name= os.environ.get("exp_name")
|
||||
i_part= os.environ.get("i_part")
|
||||
all_parts= os.environ.get("all_parts")
|
||||
os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES")
|
||||
from feature_extractor import cnhubert
|
||||
opt_dir= os.environ.get("opt_dir")
|
||||
cnhubert.cnhubert_base_path= os.environ.get("cnhubert_base_dir")
|
||||
is_half=eval(os.environ.get("is_half","True"))
|
||||
|
||||
import pdb,traceback,numpy as np,logging
|
||||
from scipy.io import wavfile
|
||||
import librosa,torch
|
||||
now_dir = os.getcwd()
|
||||
sys.path.append(now_dir)
|
||||
from my_utils import load_audio
|
||||
|
||||
# from config import cnhubert_base_path
|
||||
# cnhubert.cnhubert_base_path=cnhubert_base_path
|
||||
# inp_text=sys.argv[1]
|
||||
# inp_wav_dir=sys.argv[2]
|
||||
# exp_name=sys.argv[3]
|
||||
# i_part=sys.argv[4]
|
||||
# all_parts=sys.argv[5]
|
||||
# os.environ["CUDA_VISIBLE_DEVICES"]=sys.argv[6]
|
||||
# cnhubert.cnhubert_base_path=sys.argv[7]
|
||||
# opt_dir="/data/docker/liujing04/gpt-vits/fine_tune_dataset/%s"%exp_name
|
||||
|
||||
from time import time as ttime
|
||||
import shutil
|
||||
def my_save(fea,path):#####fix issue: torch.save doesn't support chinese path
|
||||
dir=os.path.dirname(path)
|
||||
name=os.path.basename(path)
|
||||
tmp_path="%s/%s%s.pth"%(dir,ttime(),i_part)
|
||||
torch.save(fea,tmp_path)
|
||||
shutil.move(tmp_path,"%s/%s"%(dir,name))
|
||||
|
||||
hubert_dir="%s/4-cnhubert"%(opt_dir)
|
||||
wav32dir="%s/5-wav32k"%(opt_dir)
|
||||
os.makedirs(opt_dir,exist_ok=True)
|
||||
os.makedirs(hubert_dir,exist_ok=True)
|
||||
os.makedirs(wav32dir,exist_ok=True)
|
||||
|
||||
maxx=0.95
|
||||
alpha=0.5
|
||||
device="cuda:0"
|
||||
model=cnhubert.get_model()
|
||||
if(is_half==True):
|
||||
model=model.half().to(device)
|
||||
else:
|
||||
model = model.to(device)
|
||||
def name2go(wav_name):
|
||||
hubert_path="%s/%s.pt"%(hubert_dir,wav_name)
|
||||
if(os.path.exists(hubert_path)):return
|
||||
wav_path="%s/%s"%(inp_wav_dir,wav_name)
|
||||
tmp_audio = load_audio(wav_path, 32000)
|
||||
tmp_max = np.abs(tmp_audio).max()
|
||||
if tmp_max > 2.2:
|
||||
print("%s-%s-%s-filtered" % (idx0, idx1, tmp_max))
|
||||
return
|
||||
tmp_audio32 = (tmp_audio / tmp_max * (maxx * alpha*32768)) + ((1 - alpha)*32768) * tmp_audio
|
||||
tmp_audio = librosa.resample(
|
||||
tmp_audio32, orig_sr=32000, target_sr=16000
|
||||
)
|
||||
tensor_wav16 = torch.from_numpy(tmp_audio)
|
||||
if (is_half == True):
|
||||
tensor_wav16=tensor_wav16.half().to(device)
|
||||
else:
|
||||
tensor_wav16 = tensor_wav16.to(device)
|
||||
ssl=model.model(tensor_wav16.unsqueeze(0))["last_hidden_state"].transpose(1,2).cpu()#torch.Size([1, 768, 215])
|
||||
if np.isnan(ssl.detach().numpy()).sum()!= 0:return
|
||||
wavfile.write(
|
||||
"%s/%s"%(wav32dir,wav_name),
|
||||
32000,
|
||||
tmp_audio32.astype("int16"),
|
||||
)
|
||||
# torch.save(ssl,hubert_path )
|
||||
my_save(ssl,hubert_path )
|
||||
|
||||
with open(inp_text,"r",encoding="utf8")as f:
|
||||
lines=f.read().strip("\n").split("\n")
|
||||
|
||||
for line in lines[int(i_part)::int(all_parts)]:
|
||||
try:
|
||||
# wav_name,text=line.split("\t")
|
||||
wav_name, spk_name, language, text = line.split("|")
|
||||
wav_name=os.path.basename(wav_name)
|
||||
name2go(wav_name)
|
||||
except:
|
||||
print(line,traceback.format_exc())
|
||||
81
GPT_SoVITS/prepare_datasets/3-get-semantic.py
Normal file
81
GPT_SoVITS/prepare_datasets/3-get-semantic.py
Normal file
@@ -0,0 +1,81 @@
|
||||
import os
|
||||
inp_text= os.environ.get("inp_text")
|
||||
exp_name= os.environ.get("exp_name")
|
||||
i_part= os.environ.get("i_part")
|
||||
all_parts= os.environ.get("all_parts")
|
||||
os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES")
|
||||
opt_dir= os.environ.get("opt_dir")
|
||||
pretrained_s2G= os.environ.get("pretrained_s2G")
|
||||
s2config_path= os.environ.get("s2config_path")
|
||||
is_half=eval(os.environ.get("is_half","True"))
|
||||
import math,traceback
|
||||
import multiprocessing
|
||||
import sys,pdb
|
||||
now_dir = os.getcwd()
|
||||
sys.path.append(now_dir)
|
||||
from random import shuffle
|
||||
import torch.multiprocessing as mp
|
||||
from glob import glob
|
||||
from tqdm import tqdm
|
||||
import logging,librosa,utils,torch
|
||||
from module.models import SynthesizerTrn
|
||||
logging.getLogger("numba").setLevel(logging.WARNING)
|
||||
# from config import pretrained_s2G
|
||||
|
||||
# inp_text=sys.argv[1]
|
||||
# exp_name=sys.argv[2]
|
||||
# i_part=sys.argv[3]
|
||||
# all_parts=sys.argv[4]
|
||||
# os.environ["CUDA_VISIBLE_DEVICES"]=sys.argv[5]
|
||||
# opt_dir="/data/docker/liujing04/gpt-vits/fine_tune_dataset/%s"%exp_name
|
||||
|
||||
|
||||
hubert_dir="%s/4-cnhubert"%(opt_dir)
|
||||
semantic_path="%s/6-name2semantic-%s.tsv"%(opt_dir,i_part)
|
||||
if(os.path.exists(semantic_path)==False):
|
||||
os.makedirs(opt_dir,exist_ok=True)
|
||||
|
||||
device="cuda:0"
|
||||
hps = utils.get_hparams_from_file(s2config_path)
|
||||
vq_model = SynthesizerTrn(
|
||||
hps.data.filter_length // 2 + 1,
|
||||
hps.train.segment_size // hps.data.hop_length,
|
||||
n_speakers=hps.data.n_speakers,
|
||||
**hps.model)
|
||||
if(is_half==True):
|
||||
vq_model=vq_model.half().to(device)
|
||||
else:
|
||||
vq_model = vq_model.to(device)
|
||||
vq_model.eval()
|
||||
# utils.load_checkpoint(utils.latest_checkpoint_path(hps.s2_ckpt_dir, "G_*.pth"), vq_model, None, True)
|
||||
# utils.load_checkpoint(pretrained_s2G, vq_model, None, True)
|
||||
print(vq_model.load_state_dict(torch.load(pretrained_s2G,map_location="cpu")["weight"], strict=False))
|
||||
|
||||
def name2go(wav_name,lines):
|
||||
hubert_path = "%s/%s.pt" % (hubert_dir, wav_name)
|
||||
if(os.path.exists(hubert_path)==False):return
|
||||
ssl_content = torch.load(hubert_path, map_location="cpu")
|
||||
if(is_half==True):
|
||||
ssl_content=ssl_content.half().to(device)
|
||||
else:
|
||||
ssl_content = ssl_content.to(device)
|
||||
codes = vq_model.extract_latent(ssl_content)
|
||||
semantic = " ".join([str(i) for i in codes[0, 0, :].tolist()])
|
||||
lines.append("%s\t%s"%(wav_name,semantic))
|
||||
|
||||
with open(inp_text,"r",encoding="utf8")as f:
|
||||
lines=f.read().strip("\n").split("\n")
|
||||
|
||||
lines1=[]
|
||||
for line in lines[int(i_part)::int(all_parts)]:
|
||||
# print(line)
|
||||
try:
|
||||
# wav_name,text=line.split("\t")
|
||||
wav_name, spk_name, language, text = line.split("|")
|
||||
wav_name=os.path.basename(wav_name)
|
||||
# name2go(name,lines1)
|
||||
name2go(wav_name,lines1)
|
||||
except:
|
||||
print(line,traceback.format_exc())
|
||||
with open(semantic_path,"w",encoding="utf8")as f:f.write("\n".join(lines1))
|
||||
|
||||
Reference in New Issue
Block a user