more code refactor

This commit is contained in:
Blaise
2024-01-16 17:14:18 +01:00
parent 0d92575115
commit 0d3d47f3c3
44 changed files with 4516 additions and 2623 deletions

View File

@@ -1,50 +1,81 @@
import os,torch,sys
import os, torch, sys
from subprocess import Popen
now_dir = os.getcwd()
sys.path.append(now_dir)
from config import text_path,wav_dir,n_card,n_process_per_card,exp_name,n_parts,exp_dir
os.makedirs("%s/logs_s1"%exp_dir,exist_ok=True)
os.makedirs("%s/logs_s2"%exp_dir,exist_ok=True)
from config import (
text_path,
wav_dir,
n_card,
exp_name,
n_parts,
exp_dir,
)
os.makedirs("%s/logs_s1" % exp_dir, exist_ok=True)
os.makedirs("%s/logs_s2" % exp_dir, exist_ok=True)
##############step1
ps=[]
ps = []
for i_part in range(n_parts):
cmd="python prepare/1-get-text.py %s %s %s %s %s %s"%(text_path,wav_dir,exp_name,i_part,n_parts,i_part%n_card)
cmd = "python prepare/1-get-text.py %s %s %s %s %s %s" % (
text_path,
wav_dir,
exp_name,
i_part,
n_parts,
i_part % n_card,
)
print(cmd)
p = Popen(cmd, shell=True)
ps.append(p)
for p in ps:
p.wait()
opt=[]
opt = []
for i_part in range(n_parts):
txt_path = "%s/2-name2text-%s.txt" % (exp_dir, i_part)
with open(txt_path,"r")as f:
opt+=f.read().strip("\n").split("\n")
with open(txt_path, "r") as f:
opt += f.read().strip("\n").split("\n")
os.remove(txt_path)
with open("%s/2-name2text.txt"%exp_dir,"w")as f:f.write("\n".join(opt)+"\n")
with open("%s/2-name2text.txt" % exp_dir, "w") as f:
f.write("\n".join(opt) + "\n")
############step2
ps=[]
ps = []
for i_part in range(n_parts):
cmd="python prepare/2-get-hubert-wav32k.py %s %s %s %s %s %s"%(text_path,wav_dir,exp_name,i_part,n_parts,i_part%n_card)
cmd = "python prepare/2-get-hubert-wav32k.py %s %s %s %s %s %s" % (
text_path,
wav_dir,
exp_name,
i_part,
n_parts,
i_part % n_card,
)
print(cmd)
p = Popen(cmd, shell=True)
ps.append(p)
for p in ps:
p.wait()
#############step3
ps=[]
ps = []
for i_part in range(n_parts):
cmd="python prepare/3-get-semantic.py %s %s %s %s %s"%(text_path,exp_name,i_part,n_parts,i_part%n_card)
cmd = "python prepare/3-get-semantic.py %s %s %s %s %s" % (
text_path,
exp_name,
i_part,
n_parts,
i_part % n_card,
)
print(cmd)
p = Popen(cmd, shell=True)
ps.append(p)
for p in ps:
p.wait()
opt=["item_name semantic_audio"]
opt = ["item_name semantic_audio"]
for i_part in range(n_parts):
semantic_path = "%s/6-name2semantic-%s.tsv" % (exp_dir, i_part)
with open(semantic_path,"r")as f:
opt+=f.read().strip("\n").split("\n")
with open(semantic_path, "r") as f:
opt += f.read().strip("\n").split("\n")
os.remove(semantic_path)
with open("%s/6-name2semantic.tsv"%exp_dir,"w")as f:f.write("\n".join(opt)+"\n")
with open("%s/6-name2semantic.tsv" % exp_dir, "w") as f:
f.write("\n".join(opt) + "\n")

View File

@@ -2,16 +2,16 @@
import os
inp_text= os.environ.get("inp_text")
inp_wav_dir= os.environ.get("inp_wav_dir")
exp_name= os.environ.get("exp_name")
i_part= os.environ.get("i_part")
all_parts= os.environ.get("all_parts")
os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES")
opt_dir= os.environ.get("opt_dir")
bert_pretrained_dir= os.environ.get("bert_pretrained_dir")
is_half=eval(os.environ.get("is_half","True"))
import sys,numpy as np,traceback,pdb
inp_text = os.environ.get("inp_text")
inp_wav_dir = os.environ.get("inp_wav_dir")
exp_name = os.environ.get("exp_name")
i_part = os.environ.get("i_part")
all_parts = os.environ.get("all_parts")
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ.get("_CUDA_VISIBLE_DEVICES")
opt_dir = os.environ.get("opt_dir")
bert_pretrained_dir = os.environ.get("bert_pretrained_dir")
is_half = eval(os.environ.get("is_half", "True"))
import sys, numpy as np, traceback, pdb
import os.path
from glob import glob
from tqdm import tqdm
@@ -31,25 +31,29 @@ import numpy as np
from time import time as ttime
import shutil
def my_save(fea,path):#####fix issue: torch.save doesn't support chinese path
dir=os.path.dirname(path)
name=os.path.basename(path)
tmp_path="%s/%s%s.pth"%(dir,ttime(),i_part)
torch.save(fea,tmp_path)
shutil.move(tmp_path,"%s/%s"%(dir,name))
txt_path="%s/2-name2text-%s.txt"%(opt_dir,i_part)
if(os.path.exists(txt_path)==False):
bert_dir="%s/3-bert"%(opt_dir)
os.makedirs(opt_dir,exist_ok=True)
os.makedirs(bert_dir,exist_ok=True)
device="cuda:0"
def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path
dir = os.path.dirname(path)
name = os.path.basename(path)
tmp_path = "%s/%s%s.pth" % (dir, ttime(), i_part)
torch.save(fea, tmp_path)
shutil.move(tmp_path, "%s/%s" % (dir, name))
txt_path = "%s/2-name2text-%s.txt" % (opt_dir, i_part)
if os.path.exists(txt_path) == False:
bert_dir = "%s/3-bert" % (opt_dir)
os.makedirs(opt_dir, exist_ok=True)
os.makedirs(bert_dir, exist_ok=True)
device = "cuda:0"
tokenizer = AutoTokenizer.from_pretrained(bert_pretrained_dir)
bert_model=AutoModelForMaskedLM.from_pretrained(bert_pretrained_dir)
if (is_half == True):
bert_model = AutoModelForMaskedLM.from_pretrained(bert_pretrained_dir)
if is_half == True:
bert_model = bert_model.half().to(device)
else:
bert_model = bert_model.to(device)
def get_bert_feature(text, word2ph):
with torch.no_grad():
inputs = tokenizer(text, return_tensors="pt")
@@ -67,51 +71,55 @@ if(os.path.exists(txt_path)==False):
phone_level_feature = torch.cat(phone_level_feature, dim=0)
return phone_level_feature.T
def process(data,res):
for name,text,lan in data:
def process(data, res):
for name, text, lan in data:
try:
name=os.path.basename(name)
phones, word2ph, norm_text=clean_text(text.replace("%", '-').replace('', ','),lan)
path_bert="%s/%s.pt"%(bert_dir,name)
if (os.path.exists(path_bert) == False and lan == "zh"):
name = os.path.basename(name)
phones, word2ph, norm_text = clean_text(
text.replace("%", "-").replace("", ","), lan
)
path_bert = "%s/%s.pt" % (bert_dir, name)
if os.path.exists(path_bert) == False and lan == "zh":
bert_feature = get_bert_feature(norm_text, word2ph)
assert bert_feature.shape[-1] == len(phones)
# torch.save(bert_feature, path_bert)
my_save(bert_feature, path_bert)
phones = " ".join(phones)
# res.append([name,phones])
res.append([name,phones, word2ph, norm_text])
res.append([name, phones, word2ph, norm_text])
except:
print(name, text, traceback.format_exc())
todo=[]
res=[]
with open(inp_text,"r",encoding="utf8")as f:
lines=f.read().strip("\n").split("\n")
todo = []
res = []
with open(inp_text, "r", encoding="utf8") as f:
lines = f.read().strip("\n").split("\n")
language_v1_to_language_v2={
"ZH":"zh",
"zh":"zh",
"JP":"ja",
"jp":"ja",
"JA":"ja",
"ja":"ja",
"EN":"en",
"en":"en",
"En":"en",
language_v1_to_language_v2 = {
"ZH": "zh",
"zh": "zh",
"JP": "ja",
"jp": "ja",
"JA": "ja",
"ja": "ja",
"EN": "en",
"en": "en",
"En": "en",
}
for line in lines[int(i_part)::int(all_parts)]:
for line in lines[int(i_part) :: int(all_parts)]:
try:
wav_name,spk_name,language,text=line.split("|")
wav_name, spk_name, language, text = line.split("|")
# todo.append([name,text,"zh"])
todo.append([wav_name,text,language_v1_to_language_v2.get(language,language)])
todo.append(
[wav_name, text, language_v1_to_language_v2.get(language, language)]
)
except:
print(line,traceback.format_exc())
process(todo,res)
opt=[]
for name,phones, word2ph, norm_text in res:
opt.append("%s\t%s\t%s\t%s"%(name,phones, word2ph, norm_text))
with open(txt_path,"w",encoding="utf8")as f:
f.write("\n".join(opt)+"\n")
print(line, traceback.format_exc())
process(todo, res)
opt = []
for name, phones, word2ph, norm_text in res:
opt.append("%s\t%s\t%s\t%s" % (name, phones, word2ph, norm_text))
with open(txt_path, "w", encoding="utf8") as f:
f.write("\n".join(opt) + "\n")

View File

@@ -1,20 +1,23 @@
# -*- coding: utf-8 -*-
import sys,os
inp_text= os.environ.get("inp_text")
inp_wav_dir= os.environ.get("inp_wav_dir")
exp_name= os.environ.get("exp_name")
i_part= os.environ.get("i_part")
all_parts= os.environ.get("all_parts")
os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES")
from feature_extractor import cnhubert
opt_dir= os.environ.get("opt_dir")
cnhubert.cnhubert_base_path= os.environ.get("cnhubert_base_dir")
is_half=eval(os.environ.get("is_half","True"))
import sys, os
import pdb,traceback,numpy as np,logging
inp_text = os.environ.get("inp_text")
inp_wav_dir = os.environ.get("inp_wav_dir")
exp_name = os.environ.get("exp_name")
i_part = os.environ.get("i_part")
all_parts = os.environ.get("all_parts")
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ.get("_CUDA_VISIBLE_DEVICES")
from feature_extractor import cnhubert
opt_dir = os.environ.get("opt_dir")
cnhubert.cnhubert_base_path = os.environ.get("cnhubert_base_dir")
is_half = eval(os.environ.get("is_half", "True"))
import pdb, traceback, numpy as np, logging
from scipy.io import wavfile
import librosa,torch
import librosa, torch
now_dir = os.getcwd()
sys.path.append(now_dir)
from my_utils import load_audio
@@ -32,63 +35,75 @@ from my_utils import load_audio
from time import time as ttime
import shutil
def my_save(fea,path):#####fix issue: torch.save doesn't support chinese path
dir=os.path.dirname(path)
name=os.path.basename(path)
tmp_path="%s/%s%s.pth"%(dir,ttime(),i_part)
torch.save(fea,tmp_path)
shutil.move(tmp_path,"%s/%s"%(dir,name))
hubert_dir="%s/4-cnhubert"%(opt_dir)
wav32dir="%s/5-wav32k"%(opt_dir)
os.makedirs(opt_dir,exist_ok=True)
os.makedirs(hubert_dir,exist_ok=True)
os.makedirs(wav32dir,exist_ok=True)
maxx=0.95
alpha=0.5
device="cuda:0"
model=cnhubert.get_model()
if(is_half==True):
model=model.half().to(device)
def my_save(fea, path): #####fix issue: torch.save doesn't support chinese path
dir = os.path.dirname(path)
name = os.path.basename(path)
tmp_path = "%s/%s%s.pth" % (dir, ttime(), i_part)
torch.save(fea, tmp_path)
shutil.move(tmp_path, "%s/%s" % (dir, name))
hubert_dir = "%s/4-cnhubert" % (opt_dir)
wav32dir = "%s/5-wav32k" % (opt_dir)
os.makedirs(opt_dir, exist_ok=True)
os.makedirs(hubert_dir, exist_ok=True)
os.makedirs(wav32dir, exist_ok=True)
maxx = 0.95
alpha = 0.5
device = "cuda:0"
model = cnhubert.get_model()
if is_half == True:
model = model.half().to(device)
else:
model = model.to(device)
def name2go(wav_name):
hubert_path="%s/%s.pt"%(hubert_dir,wav_name)
if(os.path.exists(hubert_path)):return
wav_path="%s/%s"%(inp_wav_dir,wav_name)
hubert_path = "%s/%s.pt" % (hubert_dir, wav_name)
if os.path.exists(hubert_path):
return
wav_path = "%s/%s" % (inp_wav_dir, wav_name)
tmp_audio = load_audio(wav_path, 32000)
tmp_max = np.abs(tmp_audio).max()
if tmp_max > 2.2:
print("%s-%s-%s-filtered" % (idx0, idx1, tmp_max))
return
tmp_audio32 = (tmp_audio / tmp_max * (maxx * alpha*32768)) + ((1 - alpha)*32768) * tmp_audio
tmp_audio = librosa.resample(
tmp_audio32, orig_sr=32000, target_sr=16000
)
tmp_audio32 = (tmp_audio / tmp_max * (maxx * alpha * 32768)) + (
(1 - alpha) * 32768
) * tmp_audio
tmp_audio = librosa.resample(tmp_audio32, orig_sr=32000, target_sr=16000)
tensor_wav16 = torch.from_numpy(tmp_audio)
if (is_half == True):
tensor_wav16=tensor_wav16.half().to(device)
if is_half == True:
tensor_wav16 = tensor_wav16.half().to(device)
else:
tensor_wav16 = tensor_wav16.to(device)
ssl=model.model(tensor_wav16.unsqueeze(0))["last_hidden_state"].transpose(1,2).cpu()#torch.Size([1, 768, 215])
if np.isnan(ssl.detach().numpy()).sum()!= 0:return
ssl = (
model.model(tensor_wav16.unsqueeze(0))["last_hidden_state"]
.transpose(1, 2)
.cpu()
) # torch.Size([1, 768, 215])
if np.isnan(ssl.detach().numpy()).sum() != 0:
return
wavfile.write(
"%s/%s"%(wav32dir,wav_name),
"%s/%s" % (wav32dir, wav_name),
32000,
tmp_audio32.astype("int16"),
)
# torch.save(ssl,hubert_path )
my_save(ssl,hubert_path )
my_save(ssl, hubert_path)
with open(inp_text,"r",encoding="utf8")as f:
lines=f.read().strip("\n").split("\n")
for line in lines[int(i_part)::int(all_parts)]:
with open(inp_text, "r", encoding="utf8") as f:
lines = f.read().strip("\n").split("\n")
for line in lines[int(i_part) :: int(all_parts)]:
try:
# wav_name,text=line.split("\t")
wav_name, spk_name, language, text = line.split("|")
wav_name=os.path.basename(wav_name)
wav_name = os.path.basename(wav_name)
name2go(wav_name)
except:
print(line,traceback.format_exc())
print(line, traceback.format_exc())

View File

@@ -1,24 +1,27 @@
import os
inp_text= os.environ.get("inp_text")
exp_name= os.environ.get("exp_name")
i_part= os.environ.get("i_part")
all_parts= os.environ.get("all_parts")
os.environ["CUDA_VISIBLE_DEVICES"]= os.environ.get("_CUDA_VISIBLE_DEVICES")
opt_dir= os.environ.get("opt_dir")
pretrained_s2G= os.environ.get("pretrained_s2G")
s2config_path= os.environ.get("s2config_path")
is_half=eval(os.environ.get("is_half","True"))
import math,traceback
inp_text = os.environ.get("inp_text")
exp_name = os.environ.get("exp_name")
i_part = os.environ.get("i_part")
all_parts = os.environ.get("all_parts")
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ.get("_CUDA_VISIBLE_DEVICES")
opt_dir = os.environ.get("opt_dir")
pretrained_s2G = os.environ.get("pretrained_s2G")
s2config_path = os.environ.get("s2config_path")
is_half = eval(os.environ.get("is_half", "True"))
import math, traceback
import multiprocessing
import sys,pdb
import sys, pdb
now_dir = os.getcwd()
sys.path.append(now_dir)
from random import shuffle
import torch.multiprocessing as mp
from glob import glob
from tqdm import tqdm
import logging,librosa,utils,torch
import logging, librosa, utils, torch
from module.models import SynthesizerTrn
logging.getLogger("numba").setLevel(logging.WARNING)
# from config import pretrained_s2G
@@ -30,52 +33,58 @@ logging.getLogger("numba").setLevel(logging.WARNING)
# opt_dir="/data/docker/liujing04/gpt-vits/fine_tune_dataset/%s"%exp_name
hubert_dir="%s/4-cnhubert"%(opt_dir)
semantic_path="%s/6-name2semantic-%s.tsv"%(opt_dir,i_part)
if(os.path.exists(semantic_path)==False):
os.makedirs(opt_dir,exist_ok=True)
hubert_dir = "%s/4-cnhubert" % (opt_dir)
semantic_path = "%s/6-name2semantic-%s.tsv" % (opt_dir, i_part)
if os.path.exists(semantic_path) == False:
os.makedirs(opt_dir, exist_ok=True)
device="cuda:0"
device = "cuda:0"
hps = utils.get_hparams_from_file(s2config_path)
vq_model = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model)
if(is_half==True):
vq_model=vq_model.half().to(device)
**hps.model
)
if is_half == True:
vq_model = vq_model.half().to(device)
else:
vq_model = vq_model.to(device)
vq_model.eval()
# utils.load_checkpoint(utils.latest_checkpoint_path(hps.s2_ckpt_dir, "G_*.pth"), vq_model, None, True)
# utils.load_checkpoint(pretrained_s2G, vq_model, None, True)
print(vq_model.load_state_dict(torch.load(pretrained_s2G,map_location="cpu")["weight"], strict=False))
print(
vq_model.load_state_dict(
torch.load(pretrained_s2G, map_location="cpu")["weight"], strict=False
)
)
def name2go(wav_name,lines):
def name2go(wav_name, lines):
hubert_path = "%s/%s.pt" % (hubert_dir, wav_name)
if(os.path.exists(hubert_path)==False):return
if os.path.exists(hubert_path) == False:
return
ssl_content = torch.load(hubert_path, map_location="cpu")
if(is_half==True):
ssl_content=ssl_content.half().to(device)
if is_half == True:
ssl_content = ssl_content.half().to(device)
else:
ssl_content = ssl_content.to(device)
codes = vq_model.extract_latent(ssl_content)
semantic = " ".join([str(i) for i in codes[0, 0, :].tolist()])
lines.append("%s\t%s"%(wav_name,semantic))
lines.append("%s\t%s" % (wav_name, semantic))
with open(inp_text,"r",encoding="utf8")as f:
lines=f.read().strip("\n").split("\n")
with open(inp_text, "r", encoding="utf8") as f:
lines = f.read().strip("\n").split("\n")
lines1=[]
for line in lines[int(i_part)::int(all_parts)]:
lines1 = []
for line in lines[int(i_part) :: int(all_parts)]:
# print(line)
try:
# wav_name,text=line.split("\t")
wav_name, spk_name, language, text = line.split("|")
wav_name=os.path.basename(wav_name)
wav_name = os.path.basename(wav_name)
# name2go(name,lines1)
name2go(wav_name,lines1)
name2go(wav_name, lines1)
except:
print(line,traceback.format_exc())
with open(semantic_path,"w",encoding="utf8")as f:f.write("\n".join(lines1))
print(line, traceback.format_exc())
with open(semantic_path, "w", encoding="utf8") as f:
f.write("\n".join(lines1))