def make_loaders(args):
    """
    Run preprocessing and make loaders
    Take some data from the train set to make a validation set
    """
    kwargs = {'num_workers': 2, 'pin_memory': True} if args.cuda else {}
    x = np.load(os.path.join(args.dataset,
                             'train_feats.npy')).astype(np.float32)
    y = np.load(os.path.join(args.dataset, 'train_labels.npy'))
    valid = 1000
    x, xval = x[:-valid], x[-valid:]
    y, yval = y[:-valid], y[-valid:]
    xtest = np.load(os.path.join(args.dataset,
                                 'test_feats.npy')).astype(np.float32)
    ytest = np.zeros((xtest.shape[0], ), dtype=y.dtype)
    xt = np.concatenate([xval, xtest], axis=0)
    x, xt = cifar_10_preprocess(x, xt)
    xval, xtest = xt[:valid], xt[valid:]
    train_loader = DataLoader(TensorDataset(torch.from_numpy(x),
                                            torch.from_numpy(y)),
                              batch_size=args.batch_size,
                              shuffle=True,
                              **kwargs)
    val_loader = DataLoader(TensorDataset(torch.from_numpy(xval),
                                          torch.from_numpy(yval)),
                            batch_size=args.batch_size,
                            shuffle=True,
                            **kwargs)
    test_loader = DataLoader(TensorDataset(torch.from_numpy(xtest),
                                           torch.from_numpy(ytest)),
                             batch_size=args.batch_size,
                             shuffle=False,
                             **kwargs)
    return train_loader, val_loader, test_loader
Example #2
0
    def __init__(self, dataType):
        """
        Args:
            csv_file (string): Path to the csv file with annotations.
            root_dir (string): Directory with all the images.
            transform (callable, optional): Optional transform to be applied
                on a sample.
        """
        xTrain = np.load("dataset/train_feats.npy")
        xTest = np.load("dataset/test_feats.npy")
        train_labels = np.load("dataset/train_labels.npy")
        #test_labels = np.zeros(10000)

        # Preprocess training and test data to normalize
        xTrain, xTest = P.cifar_10_preprocess(xTrain, xTest, image_size=32)

        xTest = xTrain[:1000]
        xTrain = xTrain[1000:11000]
        yTest = train_labels[:1000]
        yTrain = train_labels[1000:11000]

        if dataType == 1:
            self.trainX = xTrain
            self.trainY = yTrain
        if dataType == 2:
            self.trainX = xTest
            self.trainY = yTest
Example #3
0
def test_cifar_10_preprocess():
    fixture = load_fixture('cifar_10_preprocess.npz')
    with unittest.mock.patch('numpy.linalg') as linalg:
        linalg.svd = unittest.mock.MagicMock(return_value=(fixture['U'], fixture['S'], fixture['V']))
        o0, o1 = P.cifar_10_preprocess(fixture['x0'], fixture['x1'], 10)
        assert linalg.svd.called
        assert_close(linalg.svd.call_args[0][0], fixture['sigma'], atol=0.1, rtol=0.01)
    assert_close(o0, fixture['o0'], atol=1e-2)
    assert_close(o1, fixture['o1'], atol=1e-2)
Example #4
0
def main(nepochs):
    X = np.load("/home/ubuntu/ALL-CNN-C/dataset/train_feats.npy")
    print("loading X done")
    Y = np.load("/home/ubuntu/ALL-CNN-C/dataset/train_labels.npy")
    print("loading Y done")
    test_X = np.load("/home/ubuntu/ALL-CNN-C/dataset/test_feats.npy")
    print("loading test_X done")
    X, test_X = P.cifar_10_preprocess(X, test_X)
    print("preprocessing done")

    train_data = CostomDataset(X, Y, test_X, "train")
    test_data = CostomDataset(X, Y, test_X, "test")

    train_loader = Data.DataLoader(dataset=train_data,
                                   batch_size=batch_size,
                                   shuffle=True,
                                   num_workers=4)
    test_loader = Data.DataLoader(dataset=test_data,
                                  batch_size=batch_size,
                                  shuffle=False,
                                  num_workers=4)

    model = ALL_CNN_C()
    print("model: ", model)
    if torch.cuda.is_available():
        print("model in GPU mode")
        model = model.cuda()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=learning_rate,
                          momentum=0.9,
                          weight_decay=0.001)
    start_epoch = 0
    model, optimizer, start_epoch, loss = load_model(
        load_epoch, load_step, load_loss, model, optimizer,
        "/home/ubuntu/ALL-CNN-C/weights/")

    for epoch in range(start_epoch, nepochs):
        model.train()
        print("########## epoch {} ##########".format(epoch))
        train(train_loader, model, criterion, optimizer, epoch)
    print("########## Finished Training ##########")
    predictions = test(test_loader, model, criterion)
    print("########## Finished Testing ##########")
    write_results(predictions)
Example #5
0
def main():
    train_feats1 = np.load('./train_data.npy')
    train_labels1 = np.load('./train_labels.npy')
    test_feats1 = np.load('./test_data.npy')
    test_labels = np.load('./test_labels.npy')
    train_data1, test_data = P.cifar_10_preprocess(train_feats1, test_feats1)

    train_data = train_data1[0:40000]
    train_labels = train_labels1[0:40000]
    dev_data = train_data1[40000:50000]
    dev_labels = train_labels1[40000:50000]

    gpu = torch.cuda.is_available()
    model = AINDense()
    model.apply(weights_init)
    #model.load_state_dict(torch.load('myModel'))

    n_iters = 10
    batch_size = 64
    train_generator, dev_generator, test_generator = get_data_loaders(
        batch_size, train_data, train_labels, dev_data, dev_labels, test_data,
        test_labels)
    training_routine(model, n_iters, gpu, train_generator, dev_generator,
                     test_generator)