Пример #1
0
def main():
  """Assume Single Node Multi GPUs Training Only"""
  assert torch.cuda.is_available(), "CPU training is not allowed."

  n_gpus = torch.cuda.device_count()
  os.environ['MASTER_ADDR'] = 'localhost'
  os.environ['MASTER_PORT'] = '80000'

  hps = utils.get_hparams()
  mp.spawn(train_and_eval, nprocs=n_gpus, args=(n_gpus, hps,))
Пример #2
0
def main():
    hps = utils.get_hparams()
    logger = utils.get_logger(hps.model_dir)
    logger.info(hps)
    utils.check_git_hash(hps.model_dir)

    torch.manual_seed(hps.train.seed)

    train_dataset = TextMelLoader(hps.data.training_files, hps.data)
    collate_fn = TextMelCollate(1)
    train_loader = DataLoader(train_dataset,
                              num_workers=8,
                              shuffle=True,
                              batch_size=hps.train.batch_size,
                              pin_memory=True,
                              drop_last=True,
                              collate_fn=collate_fn)

    generator = FlowGenerator_DDI(speaker_dim=hps.model.speaker_embedding,
                                  n_vocab=len(symbols),
                                  out_channels=hps.data.n_mel_channels,
                                  **hps.model).cuda()
    optimizer_g = commons.Adam(generator.parameters(),
                               scheduler=hps.train.scheduler,
                               dim_model=hps.model.hidden_channels,
                               warmup_steps=hps.train.warmup_steps,
                               lr=hps.train.learning_rate,
                               betas=hps.train.betas,
                               eps=hps.train.eps)

    generator.train()
    for batch_idx, (x, x_lengths, y, y_lengths,
                    speaker_embedding) in enumerate(train_loader):
        x, x_lengths = x.cuda(), x_lengths.cuda()
        y, y_lengths = y.cuda(), y_lengths.cuda()
        speaker_embedding = speaker_embedding.cuda()

        _ = generator(x, x_lengths, speaker_embedding, y, y_lengths, gen=False)
        break

    utils.save_checkpoint(generator, optimizer_g, hps.train.learning_rate, 0,
                          os.path.join(hps.model_dir, "ddi_G.pth"))
Пример #3
0
def main():
  # list of hparams to test
  optimizer_list = ['adam', 'sgd']
  num_units_list = [32, 128]
  dropout_list = [0.0, 0.4, 0.8]

  exp_summary = create_experiment_summary(optimizer_list, num_units_list,
                                          dropout_list)
  root_writer = tf.summary.create_file_writer('runs/tuning')
  with root_writer.as_default():
    tf.summary.import_event(
        tf.compat.v1.Event(summary=exp_summary).SerializeToString())

  run = 0
  for optimizer in optimizer_list:
    for num_units in num_units_list:
      for dropout in dropout_list:
        hparams = get_hparams(
            num_units=num_units, optimizer=optimizer, dropout=dropout, run=run)
        print_run_info(hparams)
        train_and_test(hparams)
        run += 1
Пример #4
0
def main():
    hparams = get_hparams()
    datasets = get_dataset(hparams)

    model = keras.models.Sequential([
        keras.layers.Flatten(),
        keras.layers.Dense(hparams.num_units, activation='relu'),
        keras.layers.Dropout(hparams.dropout),
        keras.layers.Dense(hparams.num_units, activation='relu'),
        keras.layers.Dropout(hparams.dropout),
        keras.layers.Dense(hparams.num_classes, activation='softmax')
    ])

    optimizer = get_optimizer(hparams)

    model.compile(optimizer=optimizer,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(x=datasets['train'],
              y=None,
              epochs=hparams.epochs,
              validation_data=datasets['test'])
Пример #5
0
def main():
  hps = utils.get_hparams()
  logger = utils.get_logger(hps.model_dir)
  logger.info(hps)
  utils.check_git_hash(hps.model_dir)

  torch.manual_seed(hps.train.seed)

  train_dataset = TextMelLoader(hps.data.training_files, hps.data)
  collate_fn = TextMelCollate(1)
  train_loader = DataLoader(train_dataset, num_workers=8, shuffle=True,
      batch_size=hps.train.batch_size, pin_memory=True,
      drop_last=True, collate_fn=collate_fn)

  generator = FlowGenerator_DDI(
      len(symbols), 
      out_channels=hps.data.n_mel_channels,
      **hps.model).cuda()
  optimizer_g = commons.Adam(generator.parameters(), scheduler=hps.train.scheduler, dim_model=hps.model.hidden_channels, warmup_steps=hps.train.warmup_steps, lr=hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps)
   
  generator.train()
  for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(train_loader):
    x, x_lengths = x.cuda(), x_lengths.cuda()
    y, y_lengths = y.cuda(), y_lengths.cuda()

    _ = generator(x, x_lengths, y, y_lengths, gen=False)
    break

  # check for pretrained and load it without a an optimizer
  pretrained_checkpoint_path = os.path.join(hps.model_dir, "pretrained.pth")
  if os.path.isfile(pretrained_checkpoint_path):
    logger.info("Loading pretrained checkpoint: %s" % pretrained_checkpoint_path)
    model, optimizer, learning_rate, iteration = utils.load_checkpoint(pretrained_checkpoint_path, generator)
    utils.save_checkpoint(model, optimizer_g, hps.train.learning_rate, 0, os.path.join(hps.model_dir, "ddi_G.pth"))
  else:
    utils.save_checkpoint(generator, optimizer_g, hps.train.learning_rate, 0, os.path.join(hps.model_dir, "ddi_G.pth"))
Пример #6
0
import os
import json
import argparse
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from data import LJspeechDataset, collate_fn, collate_fn_synthesize

import models
import commons
import utils

hps = utils.get_hparams()
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)

use_cuda = hps.train.use_cuda and torch.cuda.is_available()
torch.manual_seed(hps.train.seed)
device = torch.device("cuda" if use_cuda else "cpu")

kwargs = {'num_workers': 8, 'pin_memory': True} if use_cuda else {}
train_dataset = LJspeechDataset(hps.data.data_path, True, 0.1)
test_dataset = LJspeechDataset(hps.data.data_path, False, 0.1)
train_loader = DataLoader(train_dataset,
                          batch_size=hps.train.batch_size,
                          shuffle=True,
                          collate_fn=collate_fn,
                          **kwargs)
test_loader = DataLoader(test_dataset,
Пример #7
0
def main():
    hparams = get_hparams(output_root='runs/custom')
    train_and_test(hparams)
Пример #8
0
def main():
  hparams = get_hparams()
  train_and_eval(hparams)