コード例 #1
0
def get_loader(data_path='data/vctk',
               max_seq_len=1000,
               batch_size=64,
               nspk=22):
    dataset = NpzFolder(data_path + '/numpy_features_valid', nspk == 1)
    loader = NpzLoader(dataset,
                       max_seq_len=max_seq_len,
                       batch_size=batch_size,
                       num_workers=4,
                       pin_memory=True)
    return loader
コード例 #2
0
def get_loaders(data_path='data/vctk',
                max_seq_len=1000,
                batch_size=64,
                nspk=22):
    # wrap train dataset
    train_dataset = NpzFolder(data_path + '/numpy_features', nspk == 1)
    train_loader = NpzLoader(train_dataset,
                             max_seq_len=max_seq_len,
                             batch_size=batch_size,
                             num_workers=4,
                             pin_memory=True,
                             shuffle=True)

    # wrap validation dataset
    valid_dataset = NpzFolder(data_path + '/numpy_features_valid', nspk == 1)
    valid_loader = NpzLoader(valid_dataset,
                             max_seq_len=max_seq_len,
                             batch_size=batch_size,
                             num_workers=4,
                             pin_memory=True)

    return train_loader, valid_loader
コード例 #3
0
ファイル: eval_mcd.py プロジェクト: RichardSterry/msc-project
def main():
    parser = argparse.ArgumentParser(description='PyTorch Loop')
    # Env options:
    parser.add_argument('--epochs', type=int, default=92, metavar='N',
                        help='number of epochs to train (default: 92)')
    parser.add_argument('--seed', type=int, default=10, metavar='S',
                        help='random seed (default: 3)')
    parser.add_argument('--expName', type=str, default='vctk', metavar='E',
                        help='Experiment name')
    parser.add_argument('--data', default='data/vctk',
                        metavar='D', type=str, help='Data path')
    parser.add_argument('--checkpoint', default='',
                        metavar='C', type=str, help='Checkpoint path')
    parser.add_argument('--gpu', default=0,
                        metavar='G', type=int, help='GPU device ID')
    # Data options
    parser.add_argument('--max-seq-len', type=int, default=1000,
                        help='Max sequence length for tbptt')
    parser.add_argument('--batch-size', type=int, default=64,
                        help='Batch size')
    # Model options
    parser.add_argument('--nspk', type=int, default=22,
                        help='Number of speakers')

    # init
    args = parser.parse_args()
    args.expName = os.path.join('checkpoints', args.expName)
    torch.cuda.set_device(args.gpu)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    logging = create_output_dir(args)

    # data
    valid_dataset = NpzFolder(args.data + '/numpy_features_valid', args.nspk == 1)
    valid_loader = NpzLoader(valid_dataset,
                             max_seq_len=args.max_seq_len,
                             batch_size=args.batch_size,
                             num_workers=4,
                             pin_memory=True)

    # load model
    model, norm = model_def(args.checkpoint, gpu=args.gpu, valid_loader=valid_loader)

    # Begin!
    eval_loss = evaluate(model, norm, valid_loader, logging)
コード例 #4
0
# init
args = parser.parse_args()
args.expName = os.path.join('checkpoints', args.expName)
torch.cuda.set_device(args.gpu)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logging = create_output_dir(args)
vis = visdom.Visdom(env=args.expName)


# data
logging.info("Building dataset.")
train_dataset = NpzFolder(args.data + '/numpy_features', args.nspk == 1)
train_loader = NpzLoader(train_dataset,
                         max_seq_len=args.max_seq_len,
                         batch_size=args.batch_size,
                         num_workers=4,
                         pin_memory=True,
                         shuffle=True)

valid_dataset = NpzFolder(args.data + '/numpy_features_valid', args.nspk == 1)
valid_loader = NpzLoader(valid_dataset,
                         max_seq_len=args.max_seq_len,
                         batch_size=args.batch_size,
                         num_workers=4,
                         pin_memory=True)

logging.info("Dataset ready!")


def train(model, criterion, optimizer, epoch, train_losses):
    total = 0   # Reset every plot_every