示例#1
0
文件: train.py 项目: X-CCS/zhrtvc
def load_checkpoint(checkpoint_path, model, optimizer):
    assert os.path.isfile(checkpoint_path)
    print("Loading checkpoint '{}'".format(checkpoint_path))
    checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
    # 显示模型结构
    # from torchkeras import summary
    # summary(checkpoint_dict, input_shape=(145,512))
    # print("checkpoint_dict的内容:",checkpoint_dict)
    # print("checkpoint_dict的类型:",type(checkpoint_dict))
    # with open("./checkpoint_dict.txt",'w') as fw:
    #     fw.write(str(checkpoint_dict))

    # 原始的模型导入不成功
    # model.load_state_dict(checkpoint_dict['state_dict']) # 原始
    # learning_rate = checkpoint_dict['learning_rate'] # 原始
    # iteration = checkpoint_dict['iteration'] # 原始
    # optimizer.load_state_dict(checkpoint_dict['optimizer']) # 原始

    model.load_state_dict(checkpoint_dict.state_dict())
    hparams = create_hparams(args.hparams_json, level=args.hparams_level)
    learning_rate = hparams.learning_rate
    # print("learning_rate的内容:",learning_rate)
    
    iteration = hparams.iters_per_checkpoint
    # print("iteration的内容:",iteration)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
                                 weight_decay=hparams.weight_decay)
    # print("optimizer的内容:",optimizer)

    print("Loaded checkpoint '{}' from iteration {}".format(
        checkpoint_path, iteration))
    return model, optimizer, learning_rate, iteration
示例#2
0
from mellotron.hparams import create_hparams
from mellotron.train import train, json_dump, parse_args

args = parse_args()

os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda

if __name__ == '__main__':
    try:
        from setproctitle import setproctitle

        setproctitle('zhrtvc-mellotron-train')
    except ImportError:
        pass

    hparams = create_hparams(args.hparams)

    torch.backends.cudnn.enabled = hparams.cudnn_enabled
    torch.backends.cudnn.benchmark = hparams.cudnn_benchmark

    print("FP16 Run:", hparams.fp16_run)
    print("Dynamic Loss Scaling:", hparams.dynamic_loss_scaling)
    print("Distributed Run:", hparams.distributed_run)
    print("cuDNN Enabled:", hparams.cudnn_enabled)
    print("cuDNN Benchmark:", hparams.cudnn_benchmark)

    meta_folder = os.path.join(args.output_directory, 'metadata')
    os.makedirs(meta_folder, exist_ok=True)

    path = os.path.join(meta_folder, "args.json")
    obj = args.__dict__
示例#3
0
import torch
import aukit
import unidecode
import yaml
import librosa

from waveglow import inference as waveglow
from melgan import inference as melgan
from mellotron import inference as mellotron
from utils.argutils import locals2dict

from mellotron.layers import TacotronSTFT
from mellotron.hparams import create_hparams

# 用griffinlim声码器
_hparams = create_hparams()
_stft = TacotronSTFT(_hparams.filter_length, _hparams.hop_length,
                     _hparams.win_length, _hparams.n_mel_channels,
                     _hparams.sampling_rate, _hparams.mel_fmin,
                     _hparams.mel_fmax)

_use_waveglow = 0

_device = 'cuda' if torch.cuda.is_available() else 'cpu'
filename_formatter_re = re.compile(r'[\s\\/:*?"<>|\']+')


def plot_mel_alignment_gate_audio(mel,
                                  alignment,
                                  gate,
                                  audio,
示例#4
0
import yaml
import torch

from mellotron.hparams import create_hparams
from mellotron.train import train, json_dump, yaml_dump

if __name__ == '__main__':
    try:
        from setproctitle import setproctitle

        setproctitle('zhrtvc-mellotron-train')
    except ImportError:
        pass

    hparams = create_hparams(args.hparams_json, level=args.hparams_level)

    torch.backends.cudnn.enabled = hparams.cudnn_enabled
    torch.backends.cudnn.benchmark = hparams.cudnn_benchmark

    print("FP16 Run:", hparams.fp16_run)
    print("Dynamic Loss Scaling:", hparams.dynamic_loss_scaling)
    print("Distributed Run:", hparams.distributed_run)
    print("cuDNN Enabled:", hparams.cudnn_enabled)
    print("cuDNN Benchmark:", hparams.cudnn_benchmark)

    meta_folder = os.path.join(args.output_directory, 'metadata')
    os.makedirs(meta_folder, exist_ok=True)

    stem_path = os.path.join(meta_folder, "args")
    obj = args.__dict__
示例#5
0
from mellotron.hparams import create_hparams
from mellotron.train import train, json_dump, parse_args, yaml_dump

args = parse_args()

os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda

if __name__ == '__main__':
    try:
        from setproctitle import setproctitle

        setproctitle('zhrtvc-mellotron-train')
    except ImportError:
        pass

    hparams = create_hparams(args.hparams_json)

    torch.backends.cudnn.enabled = hparams.cudnn_enabled
    torch.backends.cudnn.benchmark = hparams.cudnn_benchmark

    print("FP16 Run:", hparams.fp16_run)
    print("Dynamic Loss Scaling:", hparams.dynamic_loss_scaling)
    print("Distributed Run:", hparams.distributed_run)
    print("cuDNN Enabled:", hparams.cudnn_enabled)
    print("cuDNN Benchmark:", hparams.cudnn_benchmark)

    meta_folder = os.path.join(args.output_directory, 'metadata')
    os.makedirs(meta_folder, exist_ok=True)

    stem_path = os.path.join(meta_folder, "args")
    obj = args.__dict__