def get_dataloaders(args):
    train_set = GTZAN(phase='train',
                      min_segments=args['train_segments'],
                      randomized=True,
                      overlap=args['overlap'])
    val_set = GTZAN(phase='val',
                    min_segments=args['inference_segments'],
                    overlap=args['overlap'])
    test_set = GTZAN(phase='test',
                     min_segments=args['inference_segments'],
                     overlap=args['overlap'])

    train_loader = DataLoader(train_set,
                              batch_size=args['batch_size'],
                              shuffle=True,
                              num_workers=args['num_workers'],
                              pin_memory=True)
    val_loader = DataLoader(val_set,
                            batch_size=args['batch_size'],
                            num_workers=args['num_workers'],
                            pin_memory=True)
    test_loader = DataLoader(test_set,
                             batch_size=args['batch_size'],
                             num_workers=args['num_workers'],
                             pin_memory=True)
    dataloaders = {
        'train': train_loader,
        'val': val_loader,
        'test': test_loader,
    }

    return dataloaders
def get_datasets(args):
    dataset = GTZAN(phase='all', min_segments=args['train_segments'])
    train_set = GTZAN(phase='all',
                      min_segments=args['train_segments'],
                      randomized=True,
                      overlap=args['overlap'])
    val_set = GTZAN(phase='all',
                    min_segments=args['inference_segments'],
                    overlap=args['overlap'])
    test_set = GTZAN(phase='all',
                     min_segments=args['inference_segments'],
                     overlap=args['overlap'])

    return {
        'all': dataset,
        'train': train_set,
        'val': val_set,
        'test': test_set,
    }
Exemple #3
0
NUM_EARLY_STOPPING_PATIENCE = 50

DEVICE = torch.device('cuda:0')

# In[4]:

# CNN pretrain
criterion = nn.NLLLoss()
net = DenseInception(1, 32, 3, cnn_pretrain=True).to(DEVICE)
best_net = DenseInception(1, 32, 3, cnn_pretrain=True).to(DEVICE)
if not osp.exists(PRETRAIN_CHECKPOINT):
    optimizer = optim.Adam(net.parameters(), lr=LR)

    train_set = GTZAN(phase='train',
                      min_segments=PRETRAIN_SEGMENTS,
                      randomized=True,
                      overlap=OVERLAP,
                      noise_rate=1e-3)
    val_set = GTZAN(phase='val', min_segments=10, overlap=OVERLAP)
    test_set = GTZAN(phase='test', min_segments=10, overlap=OVERLAP)
    manual_seed()
    train_loader = DataLoader(train_set,
                              batch_size=PRETRAIN_BATCH_SIZE,
                              shuffle=True,
                              num_workers=9,
                              pin_memory=True,
                              worker_init_fn=lambda x: manual_seed(x))
    val_loader = DataLoader(val_set,
                            batch_size=PRETRAIN_BATCH_SIZE,
                            num_workers=9,
                            pin_memory=True)
Exemple #4
0
            loss = running_loss / running_segments
            acc = running_corrects / running_samples
            seg_acc = running_seg_corrects / running_segments

            progress.set_postfix(OrderedDict(
                phase=phase,
                loss='{:.4f}'.format(loss),
                acc='{:.2%}'.format(acc),
                seg_acc='{:.2%}'.format(seg_acc)))

    return loss, acc, seg_acc


criterion = nn.CrossEntropyLoss()
cv_results = []
dataset = GTZAN(phase='all', min_segments=SEGMENTS)
train_set = GTZAN(phase='all', min_segments=SEGMENTS)
test_set = GTZAN(phase='all', min_segments=SEGMENTS)

skf = StratifiedKFold(NUM_KFOLD, shuffle=True, random_state=1234)
for kfold, (train_index, test_index) in enumerate(skf.split(dataset.X, dataset.Y)):
    train_set.X, train_set.Y = dataset.X[train_index], dataset.Y[train_index]
    test_set.X, test_set.Y = dataset.X[test_index], dataset.Y[test_index]
    
    train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=9, pin_memory=True)
    test_loader = DataLoader(test_set, batch_size=BATCH_SIZE, num_workers=9, pin_memory=True)
    dataloaders = {
        'train': train_loader,
        'test': test_loader,
    }
    
            loss = running_loss / running_segments
            acc = running_corrects / running_samples
            seg_acc = running_seg_corrects / running_segments

            progress.set_postfix(OrderedDict(
                phase=phase,
                loss='{:.4f}'.format(loss),
                acc='{:.2%}'.format(acc),
                seg_acc='{:.2%}'.format(seg_acc)))

    return loss, acc, seg_acc


criterion = nn.CrossEntropyLoss()
cv_results = []
dataset = GTZAN(phase='all', min_segments=SEGMENTS)
train_set = GTZAN(phase='all', min_segments=SEGMENTS, randomized=True, overlap=OVERLAP)
test_set = GTZAN(phase='all', min_segments=SEGMENTS, overlap=OVERLAP)

skf = StratifiedKFold(NUM_KFOLD, shuffle=True, random_state=1234)
for kfold, (train_index, test_index) in enumerate(skf.split(dataset.X, dataset.Y)):
    if kfold not in (6, 9):
        continue
    train_set.X, train_set.Y = dataset.X[train_index], dataset.Y[train_index]
    test_set.X, test_set.Y = dataset.X[test_index], dataset.Y[test_index]
    
    train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=9, pin_memory=True)
    test_loader = DataLoader(test_set, batch_size=BATCH_SIZE, num_workers=9, pin_memory=True)
    dataloaders = {
        'train': train_loader,
        'test': test_loader,
Exemple #6
0
            loss = running_loss / running_segments
            acc = running_corrects / running_samples
            seg_acc = running_seg_corrects / running_segments

            progress.set_postfix(
                OrderedDict(phase=phase,
                            loss='{:.4f}'.format(loss),
                            acc='{:.2%}'.format(acc),
                            seg_acc='{:.2%}'.format(seg_acc)))

    return loss, acc, seg_acc


criterion = nn.CrossEntropyLoss()
cv_results = []
dataset = GTZAN(phase='all', min_segments=SEGMENTS)
train_set = GTZAN(phase='all', min_segments=SEGMENTS, overlap=0.5)
test_set = GTZAN(phase='all', min_segments=SEGMENTS, overlap=0.5)

skf = StratifiedKFold(NUM_KFOLD, shuffle=True, random_state=1234)
for kfold, (train_index,
            test_index) in enumerate(skf.split(dataset.X, dataset.Y)):
    train_set.X, train_set.Y = dataset.X[train_index], dataset.Y[train_index]
    test_set.X, test_set.Y = dataset.X[test_index], dataset.Y[test_index]

    train_loader = DataLoader(train_set,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              num_workers=9,
                              pin_memory=True)
    test_loader = DataLoader(test_set,
            seg_acc = running_seg_corrects / running_segments

            progress.set_postfix(
                OrderedDict(phase=phase,
                            loss='{:.4f}'.format(loss),
                            acc='{:.2%}'.format(acc),
                            seg_acc='{:.2%}'.format(seg_acc)))

    return loss, acc, seg_acc


# In[5]:

criterion = nn.CrossEntropyLoss()
cv_results = []
dataset = GTZAN(phase='all', min_segments=SEGMENTS)
train_set = GTZAN(phase='all', min_segments=SEGMENTS, randomized=True)
test_set = GTZAN(phase='all', min_segments=SEGMENTS)

skf = StratifiedKFold(NUM_KFOLD, shuffle=True, random_state=1234)
for kfold, (train_index,
            test_index) in enumerate(skf.split(dataset.X, dataset.Y)):
    train_set.X, train_set.Y = dataset.X[train_index], dataset.Y[train_index]
    test_set.X, test_set.Y = dataset.X[test_index], dataset.Y[test_index]
    train_loader = DataLoader(train_set,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              num_workers=9)
    test_loader = DataLoader(test_set, batch_size=BATCH_SIZE, num_workers=9)

    dataloaders = {