Exemple #1
0
    pca_all = utils.PCA(D=train_data_all_flat,
                        n_components=train_data_all_flat.shape[1])

    logger.info('Whitening data...')
    train_data_all_flat = pca_all.transform(D=train_data_all_flat,
                                            whiten=True,
                                            ZCA=True)
    train_data_all = train_data_all_flat.T.reshape(train_data_all.shape[0:2] +
                                                   (-1, ))

    test_data_flat = pca_all.transform(D=test_data_flat, whiten=True, ZCA=True)
    test_data = test_data_flat.T.reshape(test_data.shape[0:2] + (-1, ))

    # Datasets
    train_set = utils.NumpyDataset(train_data_all,
                                   train_labels_all,
                                   transform=utils.NumpyToTensor())
    val_set = utils.NumpyDataset(test_data,
                                 test_labels,
                                 transform=utils.NumpyToTensor())
    test_set = utils.NumpyDataset(test_data,
                                  test_labels,
                                  transform=utils.NumpyToTensor())
    # Data loaders
    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=8)
    val_loader = DataLoader(val_set,
                            batch_size=batch_size,
                            shuffle=False,
Exemple #2
0
    # load square aid
    f = h5py.File(data_directory + '/aid' + str(resize_size[0]) + '.h5', 'r')
    data = f['images'][()]
    labels = f['labels'][()]
    class_names = f.attrs['class_names']
    f.close()

    # Normalize data
    data = utils.normalize(data)

    data = data.reshape(data.shape[0], data.shape[1], resize_size[0],
                        resize_size[1])

    # Datasets
    dataset = utils.NumpyDataset(data, labels, transform=utils.NumpyToTensor())

    # Run the experiments
    for seed in seeds:
        # Data loaders
        logger.info('Split data with seed {}'.format(seed))
        torch.manual_seed(seed)
        np.random.seed(seed)
        train_indices = []
        val_indices = []
        for cls in np.unique(labels):
            indices = np.where(labels == cls)
            indices = np.random.permutation(indices[0])
            train_indices.append(
                indices[:int(len(indices) * (1 - validating_ratio))])
            val_indices.append(
Exemple #3
0
        data = f['images'][()]
        labels = f['labels'][()]
        index_matrix = torch.tensor(f['index_matrix'][()])
        class_names = f.attrs['class_names']
        f.close()

        data_shifted = np.zeros(data.shape[0:2] + index_matrix.shape).astype(
            np.float32)
        for i in range(index_matrix.shape[0]):
            for j in range(index_matrix.shape[1]):
                if not int(index_matrix[i, j]) == -1:
                    data_shifted[:, :, i, j] = data[:, :,
                                                    int(index_matrix[i, j])]

        sh_dataset = utils.NumpyDataset(data_shifted,
                                        labels,
                                        transform=utils.NumpyToTensor())
        hex_dataset = utils.NumpyDataset(data,
                                         labels,
                                         transform=utils.NumpyToTensor())
        sh_loader = DataLoader(sh_dataset,
                               batch_size=batch_size,
                               shuffle=False)
        hex_loader = DataLoader(hex_dataset,
                                batch_size=batch_size,
                                shuffle=False)

        logger.info('batch size: {} iterations: {}'.format(
            batch_size, len(hex_loader)))

        index_matrix = index_matrix.unsqueeze_(0).unsqueeze_(0)