Beispiel #1
0
def load_dataset(args):
    train_dataset = KORDataset(args.data_path, True, 0.1)
    test_dataset = KORDataset(args.data_path, False, 0.1)

    collate_fn1 = lambda batch: collate_fn_tr(batch, args.max_time_steps, args.
                                              hop_length)
    collate_fn2 = lambda batch: collate_fn_synth(batch, args.hop_length)

    train_loader = DataLoader(train_dataset,
                              batch_size=args.bsz,
                              shuffle=True,
                              collate_fn=collate_fn1,
                              num_workers=args.num_workers,
                              pin_memory=True)
    test_loader = DataLoader(test_dataset,
                             batch_size=args.bsz,
                             collate_fn=collate_fn1,
                             num_workers=args.num_workers,
                             pin_memory=True)
    synth_loader = DataLoader(test_dataset,
                              batch_size=1,
                              collate_fn=collate_fn2,
                              num_workers=args.num_workers,
                              pin_memory=True)

    print('num of train samples', len(train_loader))
    print('num of test samples', len(test_loader))

    return train_loader, test_loader, synth_loader
Beispiel #2
0
def load_dataset(args):
    collate_fn2 = lambda batch: collate_fn_synth(batch, args.hop_length)
    test_dataset = KORDataset(args.data_path, False, 0.1)
    synth_loader = DataLoader(test_dataset, batch_size=1, collate_fn=collate_fn2,
                            num_workers=args.num_workers, pin_memory=True)
    print('sr', args.sr)
    return synth_loader
def load_dataset(args):
    train_dataset = KORDataset(args.data_path, True, 0.1)
    test_dataset = KORDataset(args.data_path, False, 0.1)

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    else:
        train_sampler = None

    collate_fn1 = lambda batch: collate_fn_tr(batch, args.max_time_steps, args.hop_length)
    collate_fn2 = lambda batch: collate_fn_synth(batch, args.hop_length)

    train_loader = DataLoader(train_dataset, batch_size=args.bsz, shuffle=(train_sampler is None), collate_fn=collate_fn1,
                              num_workers=args.num_workers, pin_memory=True, sampler=train_sampler)
    test_loader = DataLoader(test_dataset, batch_size=args.bsz, collate_fn=collate_fn1,
                             num_workers=args.num_workers, pin_memory=True)
    synth_loader = DataLoader(test_dataset, batch_size=1, collate_fn=collate_fn2,
                              num_workers=args.num_workers, pin_memory=True)

    print('num of train samples', len(train_loader))
    print('num of test samples', len(test_loader))

    return train_loader, test_loader, synth_loader
def load_dataset(args):
    train_dataset = KORDataset(args.data_path, True, 0.1)
    test_dataset = KORDataset(args.data_path, False, 0.1)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              collate_fn=collate_fn,
                              num_workers=args.num_workers,
                              pin_memory=True)
    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             collate_fn=collate_fn,
                             num_workers=args.num_workers,
                             pin_memory=True)
    synth_loader = DataLoader(test_dataset,
                              batch_size=1,
                              collate_fn=collate_fn_synthesize,
                              num_workers=args.num_workers,
                              pin_memory=True)

    print('num of train samples', len(train_loader))
    print('num of test samples', len(test_loader))

    return train_loader, test_loader, synth_loader
Beispiel #5
0
# Checkpoint dir
if not os.path.isdir(args.save):
    os.makedirs(args.save)
if not os.path.isdir(args.loss):
    os.makedirs(args.loss)
if not os.path.isdir(args.sample_path):
    os.makedirs(args.sample_path)
if not os.path.isdir(os.path.join(args.save, args.model_name)):
    os.makedirs(os.path.join(args.save, args.model_name))

use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")

# LOAD DATASETS
train_dataset = KORDataset(args.data_path, True, 0.1)
test_dataset = KORDataset(args.data_path, False, 0.1)
# collate_fn1 = lambda batch: collate_fn_tr(batch, 16000, 256)
train_loader = DataLoader(train_dataset,
                          batch_size=args.batch_size,
                          shuffle=True,
                          collate_fn=collate_fn,
                          num_workers=args.num_workers,
                          pin_memory=True)
test_loader = DataLoader(test_dataset,
                         batch_size=args.batch_size,
                         collate_fn=collate_fn,
                         num_workers=args.num_workers,
                         pin_memory=True)