Merge branch 'main' into main

This commit is contained in:
RVC-Boss
2024-01-21 16:02:57 +08:00
committed by GitHub
80 changed files with 7561 additions and 3219 deletions

View File

@@ -1,27 +0,0 @@
import json
import locale
import os
def load_language_list(language):
with open(f"./i18n/locale/{language}.json", "r", encoding="utf-8") as f:
language_list = json.load(f)
return language_list
class I18nAuto:
def __init__(self, language=None):
if language in ["Auto", None]:
language = locale.getdefaultlocale()[
0
] # getlocale can't identify the system's language ((None, None))
if not os.path.exists(f"./i18n/locale/{language}.json"):
language = "en_US"
self.language = language
self.language_map = load_language_list(language)
def __call__(self, key):
return self.language_map.get(key, key)
def __repr__(self):
return "Use Language: " + self.language

View File

@@ -1,93 +1,277 @@
{
"很遗憾您这没有能用的显卡来支持您训练": "Unfortunately you do not have a working graphics card to support your training",
"UVR5已开启": "UVR5 is on",
"UVR5已关闭": "UVR5 is turned off",
"本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.": "This software is open source under the MIT license. The author does not have any control over the software. Those who use the software and disseminate the sounds exported by the software are fully responsible. <br>If you do not agree with this clause, you cannot use or quote any code and code in the software package. File. See root directory <b>LICENSE</b> for details.",
"0-前置数据集获取工具": "0- Pre-front dataset acquisition tools",
"0a-UVR5人声伴奏分离&去混响去延迟工具": "0A-UVR5 voice accompaniment separation & to the sound of reverberation and delay tools",
"是否开启UVR5-WebUI": "Whether to turn on UVR5-WEBUI",
"UVR5进程输出信息": "UVR5 process output information",
"0b-语音切分工具": "0b-voice cutting tool",
"音频自动切分输入路径,可文件可文件夹": "Audio automatic cutting into the input path, the file and the folder",
"切分后的子音频的输出根目录": "The output root directory of the sub -audio output",
"threshold:音量小于这个值视作静音的备选切割点": "Threshold: The volume is less than this value as a mute alternative cutting point",
"min_length:每段最小多长,如果第一段太短一直和后面段连起来直到超过这个值": "min_length: The minimum length of each paragraph, if the first paragraph is too short to connect with the latter section until this value exceeds this value",
"min_interval:最短切割间隔": "min_interval: the shortest cutting interval",
"hop_size:怎么算音量曲线,越小精度越大计算量越高(不是精度越大效果越好)": "HOP_SIZE: How to calculate the volume curve, the smaller the accuracy, the higher the calculation amount (not the more accuracy, the better the effect, the better)",
"max_sil_kept:切完后静音最多留多长": "max_sil_kept: After cutting, how long is the mute at most",
"开启语音切割": "Open voice cutting",
"终止语音切割": "Termination of voice cutting",
"max:归一化后最大值多少": "MAX: How much is the maximum value after a normalization?",
"alpha_mix:混多少比例归一化后音频进来": "alpha_mix: How many percentages are mixed back, and the audio comes in",
"切割使用的进程数": "Number of processes for cutting",
"语音切割进程输出信息": "Voice cutting process output information",
"0c-中文批量离线ASR工具": "0c- Chinese batch offline ASR tool",
"开启离线批量ASR": "Open offline batch ASR",
"终止ASR进程": "Terminate the ASR process",
"批量ASR(中文only)输入文件夹路径": "Batch ASR (Chinese only) input folder path",
"ASR进程输出信息": "ASR process output information",
"0d-语音文本校对标注工具": "0d-voice text school pairing tool",
"是否开启打标WebUI": "Whether to open the marking webui",
"打标数据标注文件路径": "Playing data label file path",
"打标工具进程输出信息": "Playing tool process output information",
"1-GPT-SoVITS-TTS": "1-GPT-SOVITS-TTS",
"*实验/模型名": "*Experiment/model name",
"显卡信息": "Graphics card information",
"预训练的SoVITS-G模型路径": "Pre-trained SOVITS-G model path",
"预训练的SoVITS-D模型路径": "Pre-trained SOVITS-D model path",
"预训练的GPT模型路径": "Pre -training GPT model path",
"1A-训练集格式化工具": "1A-training collection tool",
"输出logs/实验名目录下应有23456开头的文件和文件夹": "Output LOGS/experimental name directory should have files and folders starting with 23456",
"*文本标注文件": "*Text label file",
"*训练集音频文件目录": "*Training set audio file directory",
"训练集音频文件目录 拼接 list文件里波形对应的文件名。": "Training the file name corresponding to the waveform of the waveform in the List file of the audio file",
"1Aa-文本内容": "1AA-text content",
"GPU卡号以-分割,每个卡号一个进程": "GPU card number is divided by-division, each card number is one process",
"预训练的中文BERT模型路径": "Pre -training Chinese bert model path",
"开启文本获取": "Get the text to get",
"终止文本获取进程": "Termination text acquisition process",
"文本进程输出信息": "Text process output information",
"1Ab-SSL自监督特征提取": "1AB-SSL self-supervision feature extraction",
"预训练的SSL模型路径": "Pre -training SSL model path",
"开启SSL提取": "Open SSL extraction",
"终止SSL提取进程": "Termid SSL extraction process",
"SSL进程输出信息": "SSL process output information",
"1Ac-语义token提取": "1AC-semantic token extraction",
"开启语义token提取": "Open semantic token extraction",
"终止语义token提取进程": "Terminate semantics token extraction process",
"语义token提取进程输出信息": "Semantic token extraction process output information",
"1Aabc-训练集格式化一键三连": "1AABC-Training Collection Formulate One-button Three Companies",
"开启一键三连": "Open one button and three consecutive",
"终止一键三连": "Termine one button and three companies",
"一键三连进程输出信息": "One -click three -line process output information",
"1B-微调训练": "1B-fine-tuning training",
"1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。": "1ba-sovits training. The model file for sharing is output under SOVITS_WEIGHTS",
"每张显卡的batch_size": "Batch_size of each graphics card",
"总训练轮数total_epoch不建议太高": "Total_epoch, the total training wheel is not recommended to be too high",
"文本模块学习率权重": "The weight of the text module learning rate",
"保存频率save_every_epoch": "Save the frequency save_every_epoch",
"是否仅保存最新的ckpt文件以节省硬盘空间": "Whether to save the latest CKPT file to save hard disk space",
"是否在每次保存时间点将最终小模型保存至weights文件夹": "Whether to save the final small model to the Weights folder at each time of saving time",
"开启SoVITS训练": "Open SOVITS training",
"终止SoVITS训练": "Terminate SOVITS training",
"SoVITS训练进程输出信息": "Sovits training process output information",
"1Bb-GPT训练。用于分享的模型文件输出在GPT_weights下。": "1BB-GPT training. The model file for sharing is output under GPT_WEIGHTS",
"总训练轮数total_epoch": "Total_epoch, total training wheel",
"开启GPT训练": "Turn on GPT training",
"终止GPT训练": "Termid GPT training",
"GPT训练进程输出信息": "GPT training process output information",
"1C-推理": "1C-reasoning",
"选择训练完存放在SoVITS_weights和GPT_weights下的模型。默认的一个是底模体验5秒Zero Shot TTS用。": "Select the models stored in Sovits_weights and GPT_WEIGHTS. The default is the bottom mold, experience for 5 seconds Zero Shot TTS",
"*GPT模型列表": "*GPT model list",
"*SoVITS模型列表": "*Sovits model list",
"GPU卡号,只能填1个整数": "GPU card number, can only fill in one integer",
"刷新模型路径": "Refresh the model path",
"是否开启TTS推理WebUI": "Whether to turn on the TTS reasoning webui",
"TTS推理WebUI进程输出信息": "TTS reasoning webui process output information",
"2-GPT-SoVITS-变声": "2-gpt-sovits-sound change",
"施工中,请静候佳音": "During the construction, please wait for good sound",
"TTS推理进程已开启": "TTS inference process has been started",
"TTS推理进程已关闭": "TTS inference process has been closed",
"打标工具WebUI已开启": "The marking tool WebUI is turned on",
"打标工具WebUI已关闭": "The marking tool WebUI has been closed"
}
{
'': 'Unfortunately, there is no compatible GPU available to support your training.',
'UVR5': 'UVR5 opened ',
'UVR5': 'UVR5 closed',
'MIT, , 使. <br>, 使. <b>LICENSE</b>.': 'This software is open source under the MIT license. The author does not have any control over the software. Users who use the software and distribute the sounds exported by the software are solely responsible. <br>If you do not agree with this clause, you cannot use or reference any codes and files within the software package. See the root directory <b>Agreement-LICENSE.txt</b> for details.',
'0-': '0-Fech dataset',
'0a-UVR5&': '0a-UVR5 webui (for vocal separation, deecho, dereverb and denoise)',
'UVR5-WebUI': 'Open UVR5-WebUI',
'UVR5': 'UVR5 process output log',
'0b-': '0b-Audio slicer',
'': 'Audio slicer input (file or folder)',
'': 'Audio slicer output folder',
'threshold:': 'Noise gate threshold (loudness below this value will be treated as noise',
'min_length:': 'Minimum length',
'min_interval:': 'Minumum interval for audio cutting',
'hop_size:线': 'hop_size: FO hop size, the smaller the value, the higher the accuracy',
'max_sil_kept:': 'Maximum length for silence to be kept',
'': 'Start audio slicer',
'': 'Stop audio cutting',
'max:': 'Loudness multiplier after normalized',
'alpha_mix:': 'alpha_mix: proportion of normalized audio merged into dataset',
'使': 'CPU threads used for audio slicing',
'': 'Audio slicer output log',
'0c-线ASR': '0c-Chinese ASR tool',
'线ASR': 'Start batch ASR',
'ASR': 'Stop ASR task',
'ASR(only)': 'Batch ASR (Chinese only) input folder',
'ASR': 'ASR output log',
'0d-': '0d-Speech to text proofreading tool',
'WebUI': 'Open labelling WebUI',
'': 'path to proofreading text file',
'': 'Proofreading tool output log',
'1-GPT-SoVITS-TTS': '1-GPT-SOVITS-TTS',
'*/': '*Experiment/model name',
'': 'GPU Information',
'SoVITS-G': 'Pretrained SoVITS-G model path',
'SoVITS-D': 'Pretrained SoVITS-D model path',
'GPT': 'Pretrained GPT model path',
'1A-': '1A-Dataset formatting',
'logs/23456': 'output folder (logs/{experiment name}) should have files and folders starts with 23456.',
'*': '*Text labelling file',
'*': '*Audio dataset folder',
' list': 'Training the file name corresponding to the waveform of the waveform in the List file of the audio file',
'1Aa-': '1Aa-Text',
'GPU-': 'GPU number is separated by -, each GPU will run one process ',
'BERT': ' Pretrained BERT model path',
'': 'Start speech-to-text',
'': 'Stop speech-to-text',
'': 'Text processing output',
'1Ab-SSL': '1Ab-SSL self-supervised feature extraction',
'SSL': 'Pretrained SSL model path',
'SSL': 'Start SSL extracting',
'SSL': 'Stop SSL extraction',
'SSL': 'SSL output log',
'1Ac-token': '1Ac-semantics token extraction',
'token': 'Start semantics token extraction',
'token': 'Stop semantics token extraction',
'token': 'Sematics token extraction output log',
'1Aabc-': '1Aabc-One-click formatting',
'': 'Start one-click formatting',
'': 'Stop one-click formatting',
'': 'One-click formatting output',
'1B-': '1B-Fine-tuned training',
'1Ba-SoVITSSoVITS_weights': '1Ba-SoVITS training. The model is located in SoVITS_weights.',
'batch_size': 'Batch size per GPU:',
'total_epoch': 'Total epochs, do not increase to a value that is too high',
'': 'Text model learning rate weighting',
'save_every_epoch': 'Save frequency (save_every_epoch):',
'ckpt': "Save only the latest '.ckpt' file to save disk space:",
'weights': "Save a small final model to the 'weights' folder at each save point:",
'SoVITS': 'Start SoVITS training',
'SoVITS': 'Stop SoVITS training',
'SoVITS': 'SoVITS training output log',
'1Bb-GPTGPT_weights': '1Bb-GPT training. The model is located in GPT_weights.',
'total_epoch': 'Total training epochs (total_epoch):',
'GPT': 'Start GPT training',
'GPT': 'Stop GPT training',
'GPT': 'GPT training output log',
'1C-': '1C-inference',
'SoVITS_weightsGPT_weights5Zero Shot TTS': 'Choose the models from SoVITS_weights and GPT_weights. The default one is a pretrain, so you can experience zero shot TTS.',
'*GPT': '*GPT models list',
'*SoVITS': '*SoVITS models list',
'GPU,1': 'GPU number, can only input ONE integer',
'': 'refreshing model paths',
'TTSWebUI': 'Open TTS inference WEBUI',
'TTSWebUI': 'TTS inference webui output log',
'2-GPT-SoVITS-': '2-GPT-SoVITS-Voice Changer',
'': 'In construction, please wait',
'TTS': 'TTS inference process is opened',
'TTS': 'TTS inference process closed',
'WebUI': 'proofreading tool webui is opened',
'WebUI': 'proofreading tool webui is closed',
'MIT, , 使. , 使. LICENSE.': 'This software is under MIT licence. The author does not have any control for this software. Users are solely reponsible for all voices thats being converted and/or distributed. If you disagree with this Terms and Conditions, you cannot use or cite any files or code in this file. Please check LICENSE. for more info.',
'*': '*Please upload and fill reference information',
'*': '*Please fill the text that needs inference',
'ASR%s': 'ASR training started: %s',
'GPT': 'Finished GPT training',
'GPT%s': 'GPT training started: %s',
'SSL': 'SSL extracting',
'SSL': 'SSL extraction finished',
'SoVITS': 'SoVITS training finished',
'SoVITS%s': 'SoVITS training started%s',
'': 'An error has occured during One-click formatting',
'': 'Finished one-click formatting',
'': 'Chinese',
'50': 'Cut per 50 characters',
'': 'Cut per 5 sentences',
'': 'Text after sliced',
'': 'Slicing audio',
'': 'finished audio slicing',
'': 'Text for reference audio',
'': 'Language for reference audio',
'': 'Start inference',
'': 'Mixed languages input will be supported soon.',
'ASR': ' An ASR task is already in progress, please stop before starting the next task',
'GPT': 'A GPT training task is already in progress, please stop before starting the next task',
'SSL': 'A SSL extraction task is already in progress, please stop before starting the next task',
'SoVITS': 'A SoVITS training task is already in progress, please stop before starting the next task',
'': 'An ASR task is already in progress, please stop before starting the next task',
'': 'An audio slicing task is already in progress, please stop before starting the next task',
'': 'A TTS proofreading task is already in progress, please stop before starting the next task',
'token': 'A semantics token extraction task is already in progress, please stop before starting the next task',
'ASR': 'ASR task has been stopped',
'GPT': 'GPT training has been stopped',
'SoVITS': 'SoVITS training has been stopped',
'1a': 'All 1a tasks has been stopped',
'1b': 'All 1b tasks has been stopped',
'': 'All one-clicking formatting tasks has been stopped',
'': 'All audio slicing tasks has been stopped',
'token': 'All semantics token tasks has been stopped',
'': '',
'': 'Text slicer tool, since there will be issues when infering long texts, so it is advised to cut first. When infering, it will infer respectively then combined together.',
'': 'Text processing',
'': 'Finished text processing',
'': 'Japanese',
'': 'English',
'token': 'Semantics token extracting',
'token': 'Finished semantics token extraction',
'': 'Please upload reference audio',
'': 'No input file or directory',
'': 'Input directory exists, but it is not a file or a folder',
'': 'Inference Result',
'1a-done': 'Progress1a-done',
'1a-done, 1b-ing': 'Progress1a-done, 1b-ing',
'1a-ing': 'Progress1a-ing',
'1a1b-done': 'Progress1a1b-done',
'1a1b-done, 1cing': 'Progress1a1b-done, 1cing',
'all-done': 'Progressall-done',
'': 'Inference text that needs to be sliced',
'': 'Inference text',
'': 'Inference text language',
'>=3使harvest使使': 'If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.',
'A': 'Weight (w) for Model A:',
'A': 'Path to Model A:',
'B': 'Path to Model B:',
'E:\\+\\\\src': 'C:\\Users\\Desktop\\src',
'F0线, , , F0': 'F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:',
'Index Rate': 'Index Rate',
'Onnx': 'Export Onnx',
'Onnx': 'Onnx Export Path:',
'RVC': 'RVC Model Path:',
'ckpt': 'ckpt Processing',
'harvest': 'Number of CPU processes used for harvest pitch algorithm',
'index': 'index',
'pth': 'pth',
'rmvpe-使,0-0-1使0211': "Enter the GPU index(es) separated by '-', e.g., 0-0-1 to use 2 processes in GPU0 and 1 process in GPU1",
'step1: . logs, , , , , . ': "Step 1: Fill in the experimental configuration. Experimental data is stored in the 'logs' folder, with each experiment having a separate folder. Manually enter the experiment name path, which contains the experimental configuration, logs, and trained model files.",
'step1:': 'Step 1: Processing data',
'step2:&': 'step2:Pitch extraction & feature extraction',
'step2a: , 2wav; . ': 'Step 2a: Automatically traverse all files in the training folder that can be decoded into audio and perform slice normalization. Generates 2 wav folders in the experiment directory. Currently, only single-singer/speaker training is supported.',
'step2b: 使CPU(), 使GPU()': 'Step 2b: Use CPU to extract pitch (if the model has pitch), use GPU to extract features (select GPU index):',
'step3: , ': 'Step 3: Fill in the training settings and start training the model and index',
'step3a:': 'Step 3a: Model training started',
'': 'One-click training',
', , ': 'Multiple audio files can also be imported. If a folder path exists, this input is ignored.',
' 使UVR5 <br> E:\\codes\\py39\\vits_vc_gpu\\() <br> <br>1HP5HP2HP3HP3HP2 <br>2HP5 <br> 3by FoxJoy<br>\u2003\u2003(1)MDX-Net(onnx_dereverb):<br>&emsp;(234)DeEcho:AggressiveNormalDeReverb<br>/<br>1DeEcho-DeReverb2DeEcho2<br>2MDX-Net-Dereverb<br>3MDX-NetDeEcho-Aggressive': 'Batch processing for vocal accompaniment separation using the UVR5 model.<br>Example of a valid folder path format: D:\\path\\to\\input\\folder (copy it from the file manager address bar).<br>The model is divided into three categories:<br>1. Preserve vocals: Choose this option for audio without harmonies. It preserves vocals better than HP5. It includes two built-in models: HP2 and HP3. HP3 may slightly leak accompaniment but preserves vocals slightly better than HP2.<br>2. Preserve main vocals only: Choose this option for audio with harmonies. It may weaken the main vocals. It includes one built-in model: HP5.<br>3. De-reverb and de-delay models (by FoxJoy):<br>\u2003\u2003(1) MDX-Net: The best choice for stereo reverb removal but cannot remove mono reverb;<br>&emsp;(234) DeEcho: Removes delay effects. Aggressive mode removes more thoroughly than Normal mode. DeReverb additionally removes reverb and can remove mono reverb, but not very effectively for heavily reverberated high-frequency content.<br>De-reverb/de-delay notes:<br>1. The processing time for the DeEcho-DeReverb model is approximately twice as long as the other two DeEcho models.<br>2. The MDX-Net-Dereverb model is quite slow.<br>3. The recommended cleanest configuration is to apply MDX-Net first and then DeEcho-Aggressive.',
'-使, 0-1-2 使012': "Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2:",
'&&': 'Vocals/Accompaniment Separation & Reverberation Removal',
'使': '使',
'使': '使',
'': 'Save name:',
', ': 'Save file name (default: same as the source file):',
'': 'Saved model name (without extension):',
'artifact0.5': 'Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:',
'': 'Modify',
'(weights)': "Modify model information (only supported for small model files extracted from the 'weights' folder)",
'': 'Stop audio conversion',
'': 'All processes have been completed!',
'': 'Refresh voice list and index path',
'': 'Load model',
'D': 'Load pre-trained base model D path:',
'G': 'Load pre-trained base model G path:',
'': 'Single Inference',
'': 'Unload voice to save GPU memory:',
'(, , 12-12)': 'Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12):',
'0': 'Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:',
'': 'No',
'': '',
'': 'Response threshold',
'': 'loudness factor',
'': 'Process data',
'Onnx': 'Export Onnx Model',
'': 'Export file format',
'': 'FAQ (Frequently Asked Questions)',
'': 'General settings',
'': 'Start audio conversion',
'': 'Performance settings',
'': 'Batch Inference',
', , , (opt). ': "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').",
'': 'Specify the output folder for vocals:',
'': 'Specify output folder:',
'': 'Specify the output folder for accompaniment:',
'(ms):': 'Inference time (ms):',
'': 'Inferencing voice:',
'': 'Extract',
'使CPU': 'Number of CPU processes used for pitch extraction and data processing:',
'': 'Yes',
'. 10min, ': 'Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement:',
'': 'View',
'(weights)': "View model information (only supported for small model files extracted from the 'weights' folder)",
'': 'Search feature ratio (controls accent strength, too high has artifacting):',
'': 'Model',
'': 'Model Inference',
'(logs),,': "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:",
'': 'Whether the model has pitch guidance:',
'(, )': 'Whether the model has pitch guidance (required for singing, optional for speech):',
',10': 'Whether the model has pitch guidance (1: yes, 0: no):',
'': 'Model architecture version:',
', ': 'Model fusion, can be used to test timbre fusion',
'': 'Path to Model:',
'': 'Fade length',
'': 'Version',
'': 'Feature extraction',
',使': 'Path to the feature index file. Leave blank to use the selected result from the dropdown:',
'+12key, -12key, . ': 'Recommended +12 key for male to female conversion, and -12 key for female to male conversion. If the sound range goes too far and the voice is distorted, you can also adjust it to the appropriate range by yourself.',
'': 'Target sample rate:',
'(ms):': 'Algorithmic delays(ms):',
'index,(dropdown)': 'Auto-detect index path and select from the dropdown:',
'': 'Fusion',
'': 'Model information to be modified:',
'': 'Model information to be placed:',
'': 'Train',
'': 'Train model',
'': 'Train feature index',
', train.log': "Training complete. You can check the training logs in the console or the 'train.log' file under the experiment folder.",
'id': 'Please specify the speaker/singer ID:',
'index': 'Please choose the .index file',
'pth': 'Please choose the .pth file',
'id': 'Select Speaker/Singer ID:',
'': 'Convert',
'': 'Enter the experiment name:',
'': 'Enter the path of the audio folder to be processed:',
'()': 'Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):',
'()': 'Enter the path of the audio file to be processed (default is the correct format example):',
'1使': 'Adjust the volume envelope scaling. Closer to 0, the more it mimicks the volume of the original vocals. Can help mask noise and make volume sound more natural when set relatively low. Closer to 1 will be more of a consistently loud volume:',
'': 'Input voice monitor',
'': 'Enter the path of the training folder:',
'': 'Input device',
'': 'Input noise reduction',
'': 'Output information',
'': 'Output converted voice',
'': 'Output device',
'': 'Output noise reduction',
'(,)': 'Export audio (click on the three dots in the lower right corner to download)',
'.index': 'Select the .index file',
'.pth': 'Select the .pth file',
',pm,harvest,crepeGPU': ',pm,harvest,crepeGPU',
',pm,harvest,crepeGPU,rmvpeGPU': "Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'harvest': better bass but extremely slow; 'crepe': better quality but GPU intensive), 'rmvpe': best quality, and little GPU requirement",
':pm,CPUdio,harvest,rmvpeCPU/GPU': "Select the pitch extraction algorithm: when extracting singing, you can use 'pm' to speed up. For high-quality speech with fast performance, but worse CPU usage, you can use 'dio'. 'harvest' results in better quality but is slower. 'rmvpe' has the best results and consumes less CPU/GPU",
':': ':',
'': 'Sample length',
'': 'Reload device list',
'': 'Pitch settings',
'(使)': 'Audio device (please use the same type of driver)',
'': 'pitch detection algorithm',
'': 'Extra inference time'
}

View File

@@ -1,47 +0,0 @@
import json
import os
from collections import OrderedDict
# Define the standard file name
standard_file = "locale/zh_CN.json"
# Find all JSON files in the directory
dir_path = "locale/"
languages = [
os.path.join(dir_path, f)
for f in os.listdir(dir_path)
if f.endswith(".json") and f != standard_file
]
# Load the standard file
with open(standard_file, "r", encoding="utf-8") as f:
standard_data = json.load(f, object_pairs_hook=OrderedDict)
# Loop through each language file
for lang_file in languages:
# Load the language file
with open(lang_file, "r", encoding="utf-8") as f:
lang_data = json.load(f, object_pairs_hook=OrderedDict)
# Find the difference between the language file and the standard file
diff = set(standard_data.keys()) - set(lang_data.keys())
miss = set(lang_data.keys()) - set(standard_data.keys())
# Add any missing keys to the language file
for key in diff:
lang_data[key] = key
# Del any extra keys to the language file
for key in miss:
del lang_data[key]
# Sort the keys of the language file to match the order of the standard file
lang_data = OrderedDict(
sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0]))
)
# Save the updated language file
with open(lang_file, "w", encoding="utf-8") as f:
json.dump(lang_data, f, ensure_ascii=False, indent=4, sort_keys=True)
f.write("\n")

View File

@@ -1,75 +0,0 @@
import ast
import glob
import json
from collections import OrderedDict
def extract_i18n_strings(node):
i18n_strings = []
if (
isinstance(node, ast.Call)
and isinstance(node.func, ast.Name)
and node.func.id == "i18n"
):
for arg in node.args:
if isinstance(arg, ast.Str):
i18n_strings.append(arg.s)
for child_node in ast.iter_child_nodes(node):
i18n_strings.extend(extract_i18n_strings(child_node))
return i18n_strings
# scan the directory for all .py files (recursively)
# for each file, parse the code into an AST
# for each AST, extract the i18n strings
strings = []
for filename in glob.iglob("**/*.py", recursive=True):
with open(filename, "r") as f:
code = f.read()
if "I18nAuto" in code:
tree = ast.parse(code)
i18n_strings = extract_i18n_strings(tree)
print(filename, len(i18n_strings))
strings.extend(i18n_strings)
code_keys = set(strings)
"""
n_i18n.py
gui_v1.py 26
app.py 16
infer-web.py 147
scan_i18n.py 0
i18n.py 0
lib/train/process_ckpt.py 1
"""
print()
print("Total unique:", len(code_keys))
standard_file = "i18n/locale/zh_CN.json"
with open(standard_file, "r", encoding="utf-8") as f:
standard_data = json.load(f, object_pairs_hook=OrderedDict)
standard_keys = set(standard_data.keys())
# Define the standard file name
unused_keys = standard_keys - code_keys
print("Unused keys:", len(unused_keys))
for unused_key in unused_keys:
print("\t", unused_key)
missing_keys = code_keys - standard_keys
print("Missing keys:", len(missing_keys))
for missing_key in missing_keys:
print("\t", missing_key)
code_keys_dict = OrderedDict()
for s in strings:
code_keys_dict[s] = s
# write back
with open(standard_file, "w", encoding="utf-8") as f:
json.dump(code_keys_dict, f, ensure_ascii=False, indent=4, sort_keys=True)
f.write("\n")