Code refactor + remove unused imports
This commit is contained in:
@@ -12,8 +12,9 @@ import numpy as np
|
||||
from scipy.io.wavfile import read
|
||||
import torch
|
||||
import logging
|
||||
logging.getLogger('numba').setLevel(logging.ERROR)
|
||||
logging.getLogger('matplotlib').setLevel(logging.ERROR)
|
||||
|
||||
logging.getLogger("numba").setLevel(logging.ERROR)
|
||||
logging.getLogger("matplotlib").setLevel(logging.ERROR)
|
||||
|
||||
MATPLOTLIB_FLAG = False
|
||||
|
||||
@@ -23,13 +24,17 @@ logger = logging
|
||||
|
||||
def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False):
|
||||
assert os.path.isfile(checkpoint_path)
|
||||
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
|
||||
iteration = checkpoint_dict['iteration']
|
||||
learning_rate = checkpoint_dict['learning_rate']
|
||||
if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None:
|
||||
optimizer.load_state_dict(checkpoint_dict['optimizer'])
|
||||
saved_state_dict = checkpoint_dict['model']
|
||||
if hasattr(model, 'module'):
|
||||
checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
|
||||
iteration = checkpoint_dict["iteration"]
|
||||
learning_rate = checkpoint_dict["learning_rate"]
|
||||
if (
|
||||
optimizer is not None
|
||||
and not skip_optimizer
|
||||
and checkpoint_dict["optimizer"] is not None
|
||||
):
|
||||
optimizer.load_state_dict(checkpoint_dict["optimizer"])
|
||||
saved_state_dict = checkpoint_dict["model"]
|
||||
if hasattr(model, "module"):
|
||||
state_dict = model.module.state_dict()
|
||||
else:
|
||||
state_dict = model.state_dict()
|
||||
@@ -39,41 +44,63 @@ def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False
|
||||
# assert "quantizer" not in k
|
||||
# print("load", k)
|
||||
new_state_dict[k] = saved_state_dict[k]
|
||||
assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape)
|
||||
assert saved_state_dict[k].shape == v.shape, (
|
||||
saved_state_dict[k].shape,
|
||||
v.shape,
|
||||
)
|
||||
except:
|
||||
traceback.print_exc()
|
||||
print("error, %s is not in the checkpoint" % k)#shape不对也会,比如text_embedding当cleaner修改时
|
||||
print(
|
||||
"error, %s is not in the checkpoint" % k
|
||||
) # shape不对也会,比如text_embedding当cleaner修改时
|
||||
new_state_dict[k] = v
|
||||
if hasattr(model, 'module'):
|
||||
if hasattr(model, "module"):
|
||||
model.module.load_state_dict(new_state_dict)
|
||||
else:
|
||||
model.load_state_dict(new_state_dict)
|
||||
print("load ")
|
||||
logger.info("Loaded checkpoint '{}' (iteration {})".format(
|
||||
checkpoint_path, iteration))
|
||||
logger.info(
|
||||
"Loaded checkpoint '{}' (iteration {})".format(checkpoint_path, iteration)
|
||||
)
|
||||
return model, optimizer, learning_rate, iteration
|
||||
|
||||
|
||||
def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
|
||||
logger.info("Saving model and optimizer state at iteration {} to {}".format(
|
||||
iteration, checkpoint_path))
|
||||
if hasattr(model, 'module'):
|
||||
logger.info(
|
||||
"Saving model and optimizer state at iteration {} to {}".format(
|
||||
iteration, checkpoint_path
|
||||
)
|
||||
)
|
||||
if hasattr(model, "module"):
|
||||
state_dict = model.module.state_dict()
|
||||
else:
|
||||
state_dict = model.state_dict()
|
||||
torch.save({'model': state_dict,
|
||||
'iteration': iteration,
|
||||
'optimizer': optimizer.state_dict(),
|
||||
'learning_rate': learning_rate}, checkpoint_path)
|
||||
torch.save(
|
||||
{
|
||||
"model": state_dict,
|
||||
"iteration": iteration,
|
||||
"optimizer": optimizer.state_dict(),
|
||||
"learning_rate": learning_rate,
|
||||
},
|
||||
checkpoint_path,
|
||||
)
|
||||
|
||||
|
||||
def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
|
||||
def summarize(
|
||||
writer,
|
||||
global_step,
|
||||
scalars={},
|
||||
histograms={},
|
||||
images={},
|
||||
audios={},
|
||||
audio_sampling_rate=22050,
|
||||
):
|
||||
for k, v in scalars.items():
|
||||
writer.add_scalar(k, v, global_step)
|
||||
for k, v in histograms.items():
|
||||
writer.add_histogram(k, v, global_step)
|
||||
for k, v in images.items():
|
||||
writer.add_image(k, v, global_step, dataformats='HWC')
|
||||
writer.add_image(k, v, global_step, dataformats="HWC")
|
||||
for k, v in audios.items():
|
||||
writer.add_audio(k, v, global_step, audio_sampling_rate)
|
||||
|
||||
@@ -90,23 +117,23 @@ def plot_spectrogram_to_numpy(spectrogram):
|
||||
global MATPLOTLIB_FLAG
|
||||
if not MATPLOTLIB_FLAG:
|
||||
import matplotlib
|
||||
|
||||
matplotlib.use("Agg")
|
||||
MATPLOTLIB_FLAG = True
|
||||
mpl_logger = logging.getLogger('matplotlib')
|
||||
mpl_logger = logging.getLogger("matplotlib")
|
||||
mpl_logger.setLevel(logging.WARNING)
|
||||
import matplotlib.pylab as plt
|
||||
import numpy as np
|
||||
|
||||
fig, ax = plt.subplots(figsize=(10, 2))
|
||||
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
|
||||
interpolation='none')
|
||||
im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
|
||||
plt.colorbar(im, ax=ax)
|
||||
plt.xlabel("Frames")
|
||||
plt.ylabel("Channels")
|
||||
plt.tight_layout()
|
||||
|
||||
fig.canvas.draw()
|
||||
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
||||
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
|
||||
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
||||
plt.close()
|
||||
return data
|
||||
@@ -116,26 +143,28 @@ def plot_alignment_to_numpy(alignment, info=None):
|
||||
global MATPLOTLIB_FLAG
|
||||
if not MATPLOTLIB_FLAG:
|
||||
import matplotlib
|
||||
|
||||
matplotlib.use("Agg")
|
||||
MATPLOTLIB_FLAG = True
|
||||
mpl_logger = logging.getLogger('matplotlib')
|
||||
mpl_logger = logging.getLogger("matplotlib")
|
||||
mpl_logger.setLevel(logging.WARNING)
|
||||
import matplotlib.pylab as plt
|
||||
import numpy as np
|
||||
|
||||
fig, ax = plt.subplots(figsize=(6, 4))
|
||||
im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
|
||||
interpolation='none')
|
||||
im = ax.imshow(
|
||||
alignment.transpose(), aspect="auto", origin="lower", interpolation="none"
|
||||
)
|
||||
fig.colorbar(im, ax=ax)
|
||||
xlabel = 'Decoder timestep'
|
||||
xlabel = "Decoder timestep"
|
||||
if info is not None:
|
||||
xlabel += '\n\n' + info
|
||||
xlabel += "\n\n" + info
|
||||
plt.xlabel(xlabel)
|
||||
plt.ylabel('Encoder timestep')
|
||||
plt.ylabel("Encoder timestep")
|
||||
plt.tight_layout()
|
||||
|
||||
fig.canvas.draw()
|
||||
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
||||
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
|
||||
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
||||
plt.close()
|
||||
return data
|
||||
@@ -147,16 +176,31 @@ def load_wav_to_torch(full_path):
|
||||
|
||||
|
||||
def load_filepaths_and_text(filename, split="|"):
|
||||
with open(filename, encoding='utf-8') as f:
|
||||
with open(filename, encoding="utf-8") as f:
|
||||
filepaths_and_text = [line.strip().split(split) for line in f]
|
||||
return filepaths_and_text
|
||||
|
||||
|
||||
def get_hparams(init=True, stage=1):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-c', '--config', type=str, default="./configs/s2.json",help='JSON file for configuration')
|
||||
parser.add_argument('-p', '--pretrain', type=str, required=False,default=None,help='pretrain dir')
|
||||
parser.add_argument('-rs', '--resume_step', type=int, required=False,default=None,help='resume step')
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--config",
|
||||
type=str,
|
||||
default="./configs/s2.json",
|
||||
help="JSON file for configuration",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--pretrain", type=str, required=False, default=None, help="pretrain dir"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-rs",
|
||||
"--resume_step",
|
||||
type=int,
|
||||
required=False,
|
||||
default=None,
|
||||
help="resume step",
|
||||
)
|
||||
# parser.add_argument('-e', '--exp_dir', type=str, required=False,default=None,help='experiment directory')
|
||||
# parser.add_argument('-g', '--pretrained_s2G', type=str, required=False,default=None,help='pretrained sovits gererator weights')
|
||||
# parser.add_argument('-d', '--pretrained_s2D', type=str, required=False,default=None,help='pretrained sovits discriminator weights')
|
||||
@@ -172,7 +216,7 @@ def get_hparams(init=True, stage=1):
|
||||
hparams.pretrain = args.pretrain
|
||||
hparams.resume_step = args.resume_step
|
||||
# hparams.data.exp_dir = args.exp_dir
|
||||
if stage ==1:
|
||||
if stage == 1:
|
||||
model_dir = hparams.s1_ckpt_dir
|
||||
else:
|
||||
model_dir = hparams.s2_ckpt_dir
|
||||
@@ -186,29 +230,38 @@ def get_hparams(init=True, stage=1):
|
||||
return hparams
|
||||
|
||||
|
||||
|
||||
def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True):
|
||||
def clean_checkpoints(path_to_models="logs/44k/", n_ckpts_to_keep=2, sort_by_time=True):
|
||||
"""Freeing up space by deleting saved ckpts
|
||||
|
||||
Arguments:
|
||||
path_to_models -- Path to the model directory
|
||||
n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth
|
||||
sort_by_time -- True -> chronologically delete ckpts
|
||||
False -> lexicographically delete ckpts
|
||||
"""
|
||||
Arguments:
|
||||
path_to_models -- Path to the model directory
|
||||
n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth
|
||||
sort_by_time -- True -> chronologically delete ckpts
|
||||
False -> lexicographically delete ckpts
|
||||
"""
|
||||
import re
|
||||
ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))]
|
||||
name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1)))
|
||||
time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f)))
|
||||
|
||||
ckpts_files = [
|
||||
f
|
||||
for f in os.listdir(path_to_models)
|
||||
if os.path.isfile(os.path.join(path_to_models, f))
|
||||
]
|
||||
name_key = lambda _f: int(re.compile("._(\d+)\.pth").match(_f).group(1))
|
||||
time_key = lambda _f: os.path.getmtime(os.path.join(path_to_models, _f))
|
||||
sort_key = time_key if sort_by_time else name_key
|
||||
x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')],
|
||||
key=sort_key)
|
||||
to_del = [os.path.join(path_to_models, fn) for fn in
|
||||
(x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])]
|
||||
x_sorted = lambda _x: sorted(
|
||||
[f for f in ckpts_files if f.startswith(_x) and not f.endswith("_0.pth")],
|
||||
key=sort_key,
|
||||
)
|
||||
to_del = [
|
||||
os.path.join(path_to_models, fn)
|
||||
for fn in (x_sorted("G")[:-n_ckpts_to_keep] + x_sorted("D")[:-n_ckpts_to_keep])
|
||||
]
|
||||
del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}")
|
||||
del_routine = lambda x: [os.remove(x), del_info(x)]
|
||||
rs = [del_routine(fn) for fn in to_del]
|
||||
|
||||
|
||||
def get_hparams_from_dir(model_dir):
|
||||
config_save_path = os.path.join(model_dir, "config.json")
|
||||
with open(config_save_path, "r") as f:
|
||||
@@ -228,12 +281,15 @@ def get_hparams_from_file(config_path):
|
||||
hparams = HParams(**config)
|
||||
return hparams
|
||||
|
||||
|
||||
def check_git_hash(model_dir):
|
||||
source_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
if not os.path.exists(os.path.join(source_dir, ".git")):
|
||||
logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
|
||||
source_dir
|
||||
))
|
||||
logger.warn(
|
||||
"{} is not a git repository, therefore hash value comparison will be ignored.".format(
|
||||
source_dir
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
cur_hash = subprocess.getoutput("git rev-parse HEAD")
|
||||
@@ -242,8 +298,11 @@ def check_git_hash(model_dir):
|
||||
if os.path.exists(path):
|
||||
saved_hash = open(path).read()
|
||||
if saved_hash != cur_hash:
|
||||
logger.warn("git hash values are different. {}(saved) != {}(current)".format(
|
||||
saved_hash[:8], cur_hash[:8]))
|
||||
logger.warn(
|
||||
"git hash values are different. {}(saved) != {}(current)".format(
|
||||
saved_hash[:8], cur_hash[:8]
|
||||
)
|
||||
)
|
||||
else:
|
||||
open(path, "w").write(cur_hash)
|
||||
|
||||
@@ -263,7 +322,7 @@ def get_logger(model_dir, filename="train.log"):
|
||||
return logger
|
||||
|
||||
|
||||
class HParams():
|
||||
class HParams:
|
||||
def __init__(self, **kwargs):
|
||||
for k, v in kwargs.items():
|
||||
if type(v) == dict:
|
||||
@@ -294,5 +353,10 @@ class HParams():
|
||||
def __repr__(self):
|
||||
return self.__dict__.__repr__()
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(load_wav_to_torch('/home/fish/wenetspeech/dataset_vq/Y0000022499_wHFSeHEx9CM/S00261.flac'))
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(
|
||||
load_wav_to_torch(
|
||||
"/home/fish/wenetspeech/dataset_vq/Y0000022499_wHFSeHEx9CM/S00261.flac"
|
||||
)
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user