Merge branch 'main' of https://github.com/pengooseDev/GPT-SoVITS
This commit is contained in:
31
tools/asr/config.py
Normal file
31
tools/asr/config.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import os
|
||||
|
||||
def check_fw_local_models():
|
||||
'''
|
||||
启动时检查本地是否有 Faster Whisper 模型.
|
||||
'''
|
||||
model_size_list = [
|
||||
"tiny", "tiny.en",
|
||||
"base", "base.en",
|
||||
"small", "small.en",
|
||||
"medium", "medium.en",
|
||||
"large", "large-v1",
|
||||
"large-v2", "large-v3"]
|
||||
for i, size in enumerate(model_size_list):
|
||||
if os.path.exists(f'tools/asr/models/faster-whisper-{size}'):
|
||||
model_size_list[i] = size + '-local'
|
||||
return model_size_list
|
||||
|
||||
asr_dict = {
|
||||
"达摩 ASR (中文)": {
|
||||
'lang': ['zh'],
|
||||
'size': ['large'],
|
||||
'path': 'funasr_asr.py',
|
||||
},
|
||||
"Faster Whisper (多语种)": {
|
||||
'lang': ['auto', 'zh', 'en', 'ja'],
|
||||
'size': check_fw_local_models(),
|
||||
'path': 'fasterwhisper_asr.py'
|
||||
}
|
||||
}
|
||||
|
||||
107
tools/asr/fasterwhisper_asr.py
Normal file
107
tools/asr/fasterwhisper_asr.py
Normal file
@@ -0,0 +1,107 @@
|
||||
import argparse
|
||||
import os
|
||||
os.environ["HF_ENDPOINT"]="https://hf-mirror.com"
|
||||
import traceback
|
||||
import requests
|
||||
from glob import glob
|
||||
|
||||
from faster_whisper import WhisperModel
|
||||
from tqdm import tqdm
|
||||
|
||||
from tools.asr.config import check_fw_local_models
|
||||
from tools.asr.funasr_asr import only_asr
|
||||
|
||||
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
|
||||
|
||||
language_code_list = [
|
||||
"af", "am", "ar", "as", "az",
|
||||
"ba", "be", "bg", "bn", "bo",
|
||||
"br", "bs", "ca", "cs", "cy",
|
||||
"da", "de", "el", "en", "es",
|
||||
"et", "eu", "fa", "fi", "fo",
|
||||
"fr", "gl", "gu", "ha", "haw",
|
||||
"he", "hi", "hr", "ht", "hu",
|
||||
"hy", "id", "is", "it", "ja",
|
||||
"jw", "ka", "kk", "km", "kn",
|
||||
"ko", "la", "lb", "ln", "lo",
|
||||
"lt", "lv", "mg", "mi", "mk",
|
||||
"ml", "mn", "mr", "ms", "mt",
|
||||
"my", "ne", "nl", "nn", "no",
|
||||
"oc", "pa", "pl", "ps", "pt",
|
||||
"ro", "ru", "sa", "sd", "si",
|
||||
"sk", "sl", "sn", "so", "sq",
|
||||
"sr", "su", "sv", "sw", "ta",
|
||||
"te", "tg", "th", "tk", "tl",
|
||||
"tr", "tt", "uk", "ur", "uz",
|
||||
"vi", "yi", "yo", "zh", "yue",
|
||||
"auto"]
|
||||
|
||||
def execute_asr(input_folder, output_folder, model_size, language,precision):
|
||||
if '-local' in model_size:
|
||||
model_size = model_size[:-6]
|
||||
model_path = f'tools/asr/models/faster-whisper-{model_size}'
|
||||
else:
|
||||
model_path = model_size
|
||||
if language == 'auto':
|
||||
language = None #不设置语种由模型自动输出概率最高的语种
|
||||
print("loading faster whisper model:",model_size,model_path)
|
||||
try:
|
||||
model = WhisperModel(model_path, device="cuda", compute_type=precision)
|
||||
except:
|
||||
return print(traceback.format_exc())
|
||||
output = []
|
||||
output_file_name = os.path.basename(input_folder)
|
||||
output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
|
||||
|
||||
if not os.path.exists(output_folder):
|
||||
os.makedirs(output_folder)
|
||||
|
||||
for file in tqdm(glob(os.path.join(input_folder, '**/*.wav'), recursive=True)):
|
||||
try:
|
||||
segments, info = model.transcribe(
|
||||
audio = file,
|
||||
beam_size = 5,
|
||||
vad_filter = True,
|
||||
vad_parameters = dict(min_silence_duration_ms=700),
|
||||
language = language)
|
||||
text = ''
|
||||
|
||||
if info.language == "zh":
|
||||
print("检测为中文文本,转funasr处理")
|
||||
text = only_asr(file)
|
||||
|
||||
if text == '':
|
||||
for segment in segments:
|
||||
text += segment.text
|
||||
output.append(f"{file}|{output_file_name}|{info.language.upper()}|{text}")
|
||||
except:
|
||||
return print(traceback.format_exc())
|
||||
|
||||
with open(output_file_path, "w", encoding="utf-8") as f:
|
||||
f.write("\n".join(output))
|
||||
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")
|
||||
return output_file_path
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-i", "--input_folder", type=str, required=True,
|
||||
help="Path to the folder containing WAV files.")
|
||||
parser.add_argument("-o", "--output_folder", type=str, required=True,
|
||||
help="Output folder to store transcriptions.")
|
||||
parser.add_argument("-s", "--model_size", type=str, default='large-v3',
|
||||
choices=check_fw_local_models(),
|
||||
help="Model Size of Faster Whisper")
|
||||
parser.add_argument("-l", "--language", type=str, default='ja',
|
||||
choices=language_code_list,
|
||||
help="Language of the audio files.")
|
||||
parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16','float32'],
|
||||
help="fp16 or fp32")
|
||||
|
||||
cmd = parser.parse_args()
|
||||
output_file_path = execute_asr(
|
||||
input_folder = cmd.input_folder,
|
||||
output_folder = cmd.output_folder,
|
||||
model_size = cmd.model_size,
|
||||
language = cmd.language,
|
||||
precision = cmd.precision,
|
||||
)
|
||||
76
tools/asr/funasr_asr.py
Normal file
76
tools/asr/funasr_asr.py
Normal file
@@ -0,0 +1,76 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import traceback
|
||||
from tqdm import tqdm
|
||||
|
||||
from funasr import AutoModel
|
||||
|
||||
path_asr = 'tools/asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'
|
||||
path_vad = 'tools/asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch'
|
||||
path_punc = 'tools/asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch'
|
||||
path_asr = path_asr if os.path.exists(path_asr) else "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||
path_vad = path_vad if os.path.exists(path_vad) else "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
|
||||
path_punc = path_punc if os.path.exists(path_punc) else "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
|
||||
|
||||
model = AutoModel(
|
||||
model = path_asr,
|
||||
model_revision = "v2.0.4",
|
||||
vad_model = path_vad,
|
||||
vad_model_revision = "v2.0.4",
|
||||
punc_model = path_punc,
|
||||
punc_model_revision = "v2.0.4",
|
||||
)
|
||||
|
||||
def only_asr(input_file):
|
||||
try:
|
||||
text = model.generate(input=input_file)[0]["text"]
|
||||
except:
|
||||
text = ''
|
||||
print(traceback.format_exc())
|
||||
return text
|
||||
|
||||
def execute_asr(input_folder, output_folder, model_size, language):
|
||||
input_file_names = os.listdir(input_folder)
|
||||
input_file_names.sort()
|
||||
|
||||
output = []
|
||||
output_file_name = os.path.basename(input_folder)
|
||||
|
||||
for name in tqdm(input_file_names):
|
||||
try:
|
||||
text = model.generate(input="%s/%s"%(input_folder, name))[0]["text"]
|
||||
output.append(f"{input_folder}/{name}|{output_file_name}|{language.upper()}|{text}")
|
||||
except:
|
||||
print(traceback.format_exc())
|
||||
|
||||
output_folder = output_folder or "output/asr_opt"
|
||||
os.makedirs(output_folder, exist_ok=True)
|
||||
output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
|
||||
|
||||
with open(output_file_path, "w", encoding="utf-8") as f:
|
||||
f.write("\n".join(output))
|
||||
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")
|
||||
return output_file_path
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-i", "--input_folder", type=str, required=True,
|
||||
help="Path to the folder containing WAV files.")
|
||||
parser.add_argument("-o", "--output_folder", type=str, required=True,
|
||||
help="Output folder to store transcriptions.")
|
||||
parser.add_argument("-s", "--model_size", type=str, default='large',
|
||||
help="Model Size of FunASR is Large")
|
||||
parser.add_argument("-l", "--language", type=str, default='zh', choices=['zh'],
|
||||
help="Language of the audio files.")
|
||||
parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16','float32'],
|
||||
help="fp16 or fp32")#还没接入
|
||||
|
||||
cmd = parser.parse_args()
|
||||
execute_asr(
|
||||
input_folder = cmd.input_folder,
|
||||
output_folder = cmd.output_folder,
|
||||
model_size = cmd.model_size,
|
||||
language = cmd.language,
|
||||
)
|
||||
2
tools/asr/models/.gitignore
vendored
Normal file
2
tools/asr/models/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*
|
||||
!.gitignore
|
||||
@@ -1,27 +0,0 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
|
||||
from modelscope.pipelines import pipeline
|
||||
from modelscope.utils.constant import Tasks
|
||||
import sys,os,traceback
|
||||
dir=sys.argv[1]
|
||||
# opt_name=dir.split("\\")[-1].split("/")[-1]
|
||||
opt_name=os.path.basename(dir)
|
||||
inference_pipeline = pipeline(
|
||||
task=Tasks.auto_speech_recognition,
|
||||
model='tools/damo_asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
|
||||
vad_model='tools/damo_asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch',
|
||||
punc_model='tools/damo_asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch',
|
||||
)
|
||||
|
||||
opt=[]
|
||||
for name in os.listdir(dir):
|
||||
try:
|
||||
text = inference_pipeline(audio_in="%s/%s"%(dir,name))["text"]
|
||||
opt.append("%s/%s|%s|ZH|%s"%(dir,name,opt_name,text))
|
||||
except:
|
||||
print(traceback.format_exc())
|
||||
|
||||
opt_dir="output/asr_opt"
|
||||
os.makedirs(opt_dir,exist_ok=True)
|
||||
with open("%s/%s.list"%(opt_dir,opt_name),"w",encoding="utf-8")as f:f.write("\n".join(opt))
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import platform,os,traceback
|
||||
import ffmpeg
|
||||
import numpy as np
|
||||
|
||||
@@ -7,15 +8,24 @@ def load_audio(file, sr):
|
||||
# https://github.com/openai/whisper/blob/main/whisper/audio.py#L26
|
||||
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
|
||||
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
|
||||
file = (
|
||||
file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
|
||||
) # 防止小白拷路径头尾带了空格和"和回车
|
||||
file = clean_path(file) # 防止小白拷路径头尾带了空格和"和回车
|
||||
if os.path.exists(file) == False:
|
||||
raise RuntimeError(
|
||||
"You input a wrong audio path that does not exists, please fix it!"
|
||||
)
|
||||
out, _ = (
|
||||
ffmpeg.input(file, threads=0)
|
||||
.output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr)
|
||||
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
|
||||
)
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
raise RuntimeError(f"Failed to load audio: {e}")
|
||||
|
||||
return np.frombuffer(out, np.float32).flatten()
|
||||
|
||||
|
||||
def clean_path(path_str):
|
||||
if platform.system() == 'Windows':
|
||||
path_str = path_str.replace('/', '\\')
|
||||
return path_str.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
|
||||
|
||||
@@ -11,7 +11,7 @@ def slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_
|
||||
if os.path.isfile(inp):
|
||||
input=[inp]
|
||||
elif os.path.isdir(inp):
|
||||
input=["%s/%s"%(inp,name)for name in sorted(list(os.listdir(inp)))]
|
||||
input=[os.path.join(inp, name) for name in sorted(list(os.listdir(inp)))]
|
||||
else:
|
||||
return "输入路径存在但既不是文件也不是文件夹"
|
||||
slicer = Slicer(
|
||||
@@ -35,7 +35,7 @@ def slice(inp,opt_root,threshold,min_length,min_interval,hop_size,max_sil_kept,_
|
||||
if(tmp_max>1):chunk/=tmp_max
|
||||
chunk = (chunk / tmp_max * (_max * alpha)) + (1 - alpha) * chunk
|
||||
wavfile.write(
|
||||
"%s/%s_%s_%s.wav" % (opt_root, name, start, end),
|
||||
"%s/%s_%010d_%010d.wav" % (opt_root, name, start, end),
|
||||
32000,
|
||||
# chunk.astype(np.float32),
|
||||
(chunk * 32767).astype(np.int16),
|
||||
|
||||
@@ -79,6 +79,7 @@ def b_change_index(index, batch):
|
||||
|
||||
|
||||
def b_next_index(index, batch):
|
||||
b_save_file()
|
||||
if (index + batch) <= g_max_json_index:
|
||||
return index + batch , *b_change_index(index + batch, batch)
|
||||
else:
|
||||
@@ -86,6 +87,7 @@ def b_next_index(index, batch):
|
||||
|
||||
|
||||
def b_previous_index(index, batch):
|
||||
b_save_file()
|
||||
if (index - batch) >= 0:
|
||||
return index - batch , *b_change_index(index - batch, batch)
|
||||
else:
|
||||
@@ -108,6 +110,7 @@ def b_submit_change(*text_list):
|
||||
|
||||
def b_delete_audio(*checkbox_list):
|
||||
global g_data_json, g_index, g_max_json_index
|
||||
b_save_file()
|
||||
change = False
|
||||
for i, checkbox in reversed(list(enumerate(checkbox_list))):
|
||||
if g_index + i < len(g_data_json):
|
||||
@@ -119,8 +122,8 @@ def b_delete_audio(*checkbox_list):
|
||||
if g_index > g_max_json_index:
|
||||
g_index = g_max_json_index
|
||||
g_index = g_index if g_index >= 0 else 0
|
||||
# if change:
|
||||
# b_save_file()
|
||||
if change:
|
||||
b_save_file()
|
||||
# return gr.Slider(value=g_index, maximum=(g_max_json_index if g_max_json_index>=0 else 0)), *b_change_index(g_index, g_batch)
|
||||
return {"value":g_index,"__type__":"update","maximum":(g_max_json_index if g_max_json_index>=0 else 0)},*b_change_index(g_index, g_batch)
|
||||
|
||||
@@ -170,6 +173,7 @@ def b_audio_split(audio_breakpoint, *checkbox_list):
|
||||
|
||||
def b_merge_audio(interval_r, *checkbox_list):
|
||||
global g_data_json , g_max_json_index
|
||||
b_save_file()
|
||||
checked_index = []
|
||||
audios_path = []
|
||||
audios_text = []
|
||||
@@ -294,6 +298,7 @@ def set_global(load_json, load_list, json_key_text, json_key_path, batch):
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description='Process some integers.')
|
||||
parser.add_argument('--load_json', default="None", help='source file, like demo.json')
|
||||
parser.add_argument('--is_share', default="False", help='whether webui is_share=True')
|
||||
parser.add_argument('--load_list', default="None", help='source file, like demo.list')
|
||||
parser.add_argument('--webui_port_subfix', default=9871, help='source file, like demo.list')
|
||||
parser.add_argument('--json_key_text', default="text", help='the text key name in json, Default: text')
|
||||
@@ -488,5 +493,6 @@ if __name__ == "__main__":
|
||||
server_name="0.0.0.0",
|
||||
inbrowser=True,
|
||||
quiet=True,
|
||||
share=eval(args.is_share),
|
||||
server_port=int(args.webui_port_subfix)
|
||||
)
|
||||
@@ -43,8 +43,8 @@ def wave_to_spectrogram(
|
||||
wave_left = np.asfortranarray(wave[0])
|
||||
wave_right = np.asfortranarray(wave[1])
|
||||
|
||||
spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length)
|
||||
spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
|
||||
spec_left = librosa.stft(wave_left, n_fft=n_fft, hop_length=hop_length)
|
||||
spec_right = librosa.stft(wave_right, n_fft=n_fft, hop_length=hop_length)
|
||||
|
||||
spec = np.asfortranarray([spec_left, spec_right])
|
||||
|
||||
@@ -78,7 +78,7 @@ def wave_to_spectrogram_mt(
|
||||
kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length},
|
||||
)
|
||||
thread.start()
|
||||
spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
|
||||
spec_right = librosa.stft(wave_right, n_fft=n_fft, hop_length=hop_length)
|
||||
thread.join()
|
||||
|
||||
spec = np.asfortranarray([spec_left, spec_right])
|
||||
@@ -230,27 +230,31 @@ def cache_or_load(mix_path, inst_path, mp):
|
||||
|
||||
if d == len(mp.param["band"]): # high-end band
|
||||
X_wave[d], _ = librosa.load(
|
||||
mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"]
|
||||
mix_path,
|
||||
sr = bp["sr"],
|
||||
mono = False,
|
||||
dtype = np.float32,
|
||||
res_type = bp["res_type"]
|
||||
)
|
||||
y_wave[d], _ = librosa.load(
|
||||
inst_path,
|
||||
bp["sr"],
|
||||
False,
|
||||
dtype=np.float32,
|
||||
res_type=bp["res_type"],
|
||||
sr = bp["sr"],
|
||||
mono = False,
|
||||
dtype = np.float32,
|
||||
res_type = bp["res_type"],
|
||||
)
|
||||
else: # lower bands
|
||||
X_wave[d] = librosa.resample(
|
||||
X_wave[d + 1],
|
||||
mp.param["band"][d + 1]["sr"],
|
||||
bp["sr"],
|
||||
res_type=bp["res_type"],
|
||||
orig_sr = mp.param["band"][d + 1]["sr"],
|
||||
target_sr = bp["sr"],
|
||||
res_type = bp["res_type"],
|
||||
)
|
||||
y_wave[d] = librosa.resample(
|
||||
y_wave[d + 1],
|
||||
mp.param["band"][d + 1]["sr"],
|
||||
bp["sr"],
|
||||
res_type=bp["res_type"],
|
||||
orig_sr = mp.param["band"][d + 1]["sr"],
|
||||
target_sr = bp["sr"],
|
||||
res_type = bp["res_type"],
|
||||
)
|
||||
|
||||
X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d])
|
||||
@@ -401,9 +405,9 @@ def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None):
|
||||
mp.param["mid_side_b2"],
|
||||
mp.param["reverse"],
|
||||
),
|
||||
bp["sr"],
|
||||
sr,
|
||||
res_type="sinc_fastest",
|
||||
orig_sr = bp["sr"],
|
||||
target_sr = sr,
|
||||
res_type = "sinc_fastest",
|
||||
)
|
||||
else: # mid
|
||||
spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
|
||||
@@ -418,8 +422,8 @@ def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None):
|
||||
mp.param["reverse"],
|
||||
),
|
||||
)
|
||||
# wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest")
|
||||
wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy")
|
||||
# wave = librosa.core.resample(wave2, orig_sr=bp['sr'], target_sr=sr, res_type="sinc_fastest")
|
||||
wave = librosa.core.resample(wave2, orig_sr=bp["sr"], target_sr=sr, res_type="scipy")
|
||||
|
||||
return wave.T
|
||||
|
||||
@@ -506,8 +510,8 @@ def ensembling(a, specs):
|
||||
def stft(wave, nfft, hl):
|
||||
wave_left = np.asfortranarray(wave[0])
|
||||
wave_right = np.asfortranarray(wave[1])
|
||||
spec_left = librosa.stft(wave_left, nfft, hop_length=hl)
|
||||
spec_right = librosa.stft(wave_right, nfft, hop_length=hl)
|
||||
spec_left = librosa.stft(wave_left, n_fft=nfft, hop_length=hl)
|
||||
spec_right = librosa.stft(wave_right, n_fft=nfft, hop_length=hl)
|
||||
spec = np.asfortranarray([spec_left, spec_right])
|
||||
|
||||
return spec
|
||||
@@ -569,10 +573,10 @@ if __name__ == "__main__":
|
||||
if d == len(mp.param["band"]): # high-end band
|
||||
wave[d], _ = librosa.load(
|
||||
args.input[i],
|
||||
bp["sr"],
|
||||
False,
|
||||
dtype=np.float32,
|
||||
res_type=bp["res_type"],
|
||||
sr = bp["sr"],
|
||||
mono = False,
|
||||
dtype = np.float32,
|
||||
res_type = bp["res_type"],
|
||||
)
|
||||
|
||||
if len(wave[d].shape) == 1: # mono to stereo
|
||||
@@ -580,9 +584,9 @@ if __name__ == "__main__":
|
||||
else: # lower bands
|
||||
wave[d] = librosa.resample(
|
||||
wave[d + 1],
|
||||
mp.param["band"][d + 1]["sr"],
|
||||
bp["sr"],
|
||||
res_type=bp["res_type"],
|
||||
orig_sr = mp.param["band"][d + 1]["sr"],
|
||||
target_sr = bp["sr"],
|
||||
res_type = bp["res_type"],
|
||||
)
|
||||
|
||||
spec[d] = wave_to_spectrogram(
|
||||
|
||||
@@ -24,7 +24,7 @@ def make_padding(width, cropsize, offset):
|
||||
|
||||
def inference(X_spec, device, model, aggressiveness, data):
|
||||
"""
|
||||
data : dic configs
|
||||
data : dic configs
|
||||
"""
|
||||
|
||||
def _execute(
|
||||
|
||||
@@ -239,7 +239,7 @@ class Predictor:
|
||||
|
||||
|
||||
class MDXNetDereverb:
|
||||
def __init__(self, chunks, device):
|
||||
def __init__(self, chunks):
|
||||
self.onnx = "%s/uvr5_weights/onnx_dereverb_By_FoxJoy"%os.path.dirname(os.path.abspath(__file__))
|
||||
self.shifts = 10 # 'Predict with randomised equivariant stabilisation'
|
||||
self.mixing = "min_mag" # ['default','min_mag','max_mag']
|
||||
@@ -250,7 +250,7 @@ class MDXNetDereverb:
|
||||
self.n_fft = 6144
|
||||
self.denoise = True
|
||||
self.pred = Predictor(self)
|
||||
self.device = device
|
||||
self.device = cpu
|
||||
|
||||
def _path_audio_(self, input, vocal_root, others_root, format, is_hp3=False):
|
||||
self.pred.prediction(input, vocal_root, others_root, format)
|
||||
|
||||
@@ -61,19 +61,19 @@ class AudioPre:
|
||||
_,
|
||||
) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑
|
||||
music_file,
|
||||
bp["sr"],
|
||||
False,
|
||||
dtype=np.float32,
|
||||
res_type=bp["res_type"],
|
||||
sr = bp["sr"],
|
||||
mono = False,
|
||||
dtype = np.float32,
|
||||
res_type = bp["res_type"],
|
||||
)
|
||||
if X_wave[d].ndim == 1:
|
||||
X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
|
||||
else: # lower bands
|
||||
X_wave[d] = librosa.core.resample(
|
||||
X_wave[d + 1],
|
||||
self.mp.param["band"][d + 1]["sr"],
|
||||
bp["sr"],
|
||||
res_type=bp["res_type"],
|
||||
orig_sr = self.mp.param["band"][d + 1]["sr"],
|
||||
target_sr = bp["sr"],
|
||||
res_type = bp["res_type"],
|
||||
)
|
||||
# Stft of wave source
|
||||
X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
|
||||
@@ -110,6 +110,9 @@ class AudioPre:
|
||||
y_spec_m = pred * X_phase
|
||||
v_spec_m = X_spec_m - y_spec_m
|
||||
|
||||
if is_hp3 == True:
|
||||
ins_root,vocal_root = vocal_root,ins_root
|
||||
|
||||
if ins_root is not None:
|
||||
if self.data["high_end_process"].startswith("mirroring"):
|
||||
input_high_end_ = spec_utils.mirroring(
|
||||
@@ -242,19 +245,19 @@ class AudioPreDeEcho:
|
||||
_,
|
||||
) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑
|
||||
music_file,
|
||||
bp["sr"],
|
||||
False,
|
||||
dtype=np.float32,
|
||||
res_type=bp["res_type"],
|
||||
sr = bp["sr"],
|
||||
mono = False,
|
||||
dtype = np.float32,
|
||||
res_type = bp["res_type"],
|
||||
)
|
||||
if X_wave[d].ndim == 1:
|
||||
X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
|
||||
else: # lower bands
|
||||
X_wave[d] = librosa.core.resample(
|
||||
X_wave[d + 1],
|
||||
self.mp.param["band"][d + 1]["sr"],
|
||||
bp["sr"],
|
||||
res_type=bp["res_type"],
|
||||
orig_sr = self.mp.param["band"][d + 1]["sr"],
|
||||
target_sr = bp["sr"],
|
||||
res_type = bp["res_type"],
|
||||
)
|
||||
# Stft of wave source
|
||||
X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
|
||||
|
||||
@@ -5,7 +5,8 @@ from tools.i18n.i18n import I18nAuto
|
||||
i18n = I18nAuto()
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
import ffmpeg
|
||||
import librosa,ffmpeg
|
||||
import soundfile as sf
|
||||
import torch
|
||||
import sys
|
||||
from mdxnet import MDXNetDereverb
|
||||
@@ -18,8 +19,9 @@ for name in os.listdir(weight_uvr5_root):
|
||||
uvr5_names.append(name.replace(".pth", ""))
|
||||
|
||||
device=sys.argv[1]
|
||||
is_half=sys.argv[2]
|
||||
|
||||
is_half=eval(sys.argv[2])
|
||||
webui_port_uvr5=int(sys.argv[3])
|
||||
is_share=eval(sys.argv[4])
|
||||
|
||||
def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0):
|
||||
infos = []
|
||||
@@ -31,25 +33,24 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format
|
||||
save_root_ins = (
|
||||
save_root_ins.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
|
||||
)
|
||||
is_hp3 = "HP3" in model_name
|
||||
if model_name == "onnx_dereverb_By_FoxJoy":
|
||||
pre_fun = MDXNetDereverb(15, device)
|
||||
pre_fun = MDXNetDereverb(15)
|
||||
else:
|
||||
func = AudioPre if "DeEcho" not in model_name else AudioPreDeEcho
|
||||
pre_fun = func(
|
||||
agg=int(agg),
|
||||
model_path=os.path.join(
|
||||
weight_uvr5_root, model_name + ".pth"
|
||||
),
|
||||
model_path=os.path.join(weight_uvr5_root, model_name + ".pth"),
|
||||
device=device,
|
||||
is_half=is_half,
|
||||
)
|
||||
is_hp3 = "HP3" in model_name
|
||||
if inp_root != "":
|
||||
paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)]
|
||||
else:
|
||||
paths = [path.name for path in paths]
|
||||
for path in paths:
|
||||
inp_path = os.path.join(inp_root, path)
|
||||
if(os.path.isfile(inp_path)==False):continue
|
||||
need_reformat = 1
|
||||
done = 0
|
||||
try:
|
||||
@@ -60,7 +61,7 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format
|
||||
):
|
||||
need_reformat = 0
|
||||
pre_fun._path_audio_(
|
||||
inp_path, save_root_ins, save_root_vocal, format0, is_hp3=is_hp3
|
||||
inp_path, save_root_ins, save_root_vocal, format0,is_hp3
|
||||
)
|
||||
done = 1
|
||||
except:
|
||||
@@ -79,23 +80,15 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format
|
||||
try:
|
||||
if done == 0:
|
||||
pre_fun._path_audio_(
|
||||
inp_path, save_root_ins, save_root_vocal, format0
|
||||
inp_path, save_root_ins, save_root_vocal, format0,is_hp3
|
||||
)
|
||||
infos.append("%s->Success" % (os.path.basename(inp_path)))
|
||||
yield "\n".join(infos)
|
||||
except:
|
||||
try:
|
||||
if done == 0:
|
||||
pre_fun._path_audio_(
|
||||
inp_path, save_root_ins, save_root_vocal, format0
|
||||
)
|
||||
infos.append("%s->Success" % (os.path.basename(inp_path)))
|
||||
yield "\n".join(infos)
|
||||
except:
|
||||
infos.append(
|
||||
"%s->%s" % (os.path.basename(inp_path), traceback.format_exc())
|
||||
)
|
||||
yield "\n".join(infos)
|
||||
infos.append(
|
||||
"%s->%s" % (os.path.basename(inp_path), traceback.format_exc())
|
||||
)
|
||||
yield "\n".join(infos)
|
||||
except:
|
||||
infos.append(traceback.format_exc())
|
||||
yield "\n".join(infos)
|
||||
@@ -109,16 +102,15 @@ def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format
|
||||
del pre_fun
|
||||
except:
|
||||
traceback.print_exc()
|
||||
print("clean_empty_cache")
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
logger.info("Executed torch.cuda.empty_cache()")
|
||||
yield "\n".join(infos)
|
||||
|
||||
|
||||
with gr.Blocks(title="RVC WebUI") as app:
|
||||
with gr.Blocks(title="UVR5 WebUI") as app:
|
||||
gr.Markdown(
|
||||
value=
|
||||
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>."
|
||||
i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.")
|
||||
)
|
||||
with gr.Tabs():
|
||||
with gr.TabItem(i18n("伴奏人声分离&去混响&去回声")):
|
||||
@@ -143,7 +135,7 @@ with gr.Blocks(title="RVC WebUI") as app:
|
||||
minimum=0,
|
||||
maximum=20,
|
||||
step=1,
|
||||
label="人声提取激进程度",
|
||||
label=i18n("人声提取激进程度"),
|
||||
value=10,
|
||||
interactive=True,
|
||||
visible=False, # 先不开放调整
|
||||
@@ -179,6 +171,7 @@ with gr.Blocks(title="RVC WebUI") as app:
|
||||
app.queue(concurrency_count=511, max_size=1022).launch(
|
||||
server_name="0.0.0.0",
|
||||
inbrowser=True,
|
||||
server_port=9873,
|
||||
share=is_share,
|
||||
server_port=webui_port_uvr5,
|
||||
quiet=True,
|
||||
)
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user