Exemple #1
0
def create_dataloaders_rand(file, seed, total_im, n_selected, batch_size, norm=True, cuda=True):
    dat = StaticImageSet(file, 'images', 'responses')
    #idx = (dat.neurons.area == 'V1') & (dat.neurons.layer == 'L2/3')
    dat.transforms = [ToTensor(cuda=cuda)]
    if norm:
        dat.transforms.append(Normalized(np.where(dat.tiers == 'train')[0], dat.responses, cuda=cuda))
    np.random.seed(seed)
    selected_indexes = np.random.choice(np.where(dat.tiers == 'train')[0], size=n_selected, replace=False)
    selected_set = Subset(dat, selected_indexes)
    train_loader = DataLoader(selected_set,
                              batch_size=batch_size)

    train_loader.img_shape = dat.img_shape
    train_loader.n_neurons = dat.n_neurons
    train_loader.transformed_mean = torch.tensor(dat[()].responses.mean(axis=0)).cuda() #dat.transformed_mean()

    val_loader = DataLoader(dat,
                            sampler=SubsetRandomSampler(np.where(dat.tiers == 'validation')[0]),
                            batch_size=batch_size)
    val_loader.img_shape = dat.img_shape
    val_loader.n_neurons = dat.n_neurons

    test_loader = DataLoader(dat,
                             sampler=SubsetRandomSampler(np.where(dat.tiers == 'test')[0]),
                             batch_size=batch_size)

    test_loader.img_shape = dat.img_shape
    test_loader.n_neurons = dat.n_neurons

    loaders = {'train': train_loader, 'val': val_loader, 'test': test_loader}

    return loaders
Exemple #2
0
def create_dataloaders_synth(file, seed, selected_idx, batch_size, norm=True, cuda=True):
    dat = StaticImageSet(file, 'images', 'responses')
    dat.transforms = [ToTensor(cuda=cuda)]
    if norm:
        dat.transforms.append(Normalized(np.where(dat.tiers == 'train')[0], dat.responses, cuda=cuda))
    np.random.seed(seed)
    selected_set = Subset(dat, selected_idx)
    train_loader = DataLoader(selected_set,
                              batch_size=batch_size)
    train_loader.img_shape = dat.img_shape
    train_loader.n_neurons = dat.n_neurons

    train_loader.img_shape = dat.img_shape
    train_loader.n_neurons = dat.n_neurons
    train_loader.transformed_mean = torch.as_tensor(dat[()].responses.mean(axis=0)).cuda()

    val_loader = DataLoader(dat,
                            sampler=SubsetRandomSampler(np.where(dat.tiers == 'validation')[0]),
                            batch_size=batch_size)
    val_loader.img_shape = dat.img_shape
    val_loader.n_neurons = dat.n_neurons

    test_loader = DataLoader(dat,
                             sampler=SubsetRandomSampler(np.where(dat.tiers == 'test')[0]),
                             batch_size=batch_size)

    test_loader.img_shape = dat.img_shape
    test_loader.n_neurons = dat.n_neurons

    loaders = {'train': train_loader, 'val': val_loader, 'test': test_loader}

    return loaders
Exemple #3
0
def create_dataloaders_al(file='', seed=0, selected_idx=set([]), batch_size=64, norm=False, cuda=False):
    np.random.seed(seed)
    dat = StaticImageSet(file, 'images', 'responses')
    idx = (dat.neurons.area == 'V1') & (dat.neurons.layer == 'L2/3')
    dat.transforms = [Subsample(np.where(idx)[0]), ToTensor(cuda=True)]
    if norm:
        dat.transforms.append(Normalized(np.where(dat.tiers == 'train')[0], dat.responses, cuda=cuda))

    selected_set = Subset(dat, selected_idx)
    train_loader = DataLoader(selected_set,
                              batch_size=batch_size)
    train_loader.img_shape = dat.img_shape
    train_loader.n_neurons = dat.n_neurons
    _, train_loader.transformed_mean = dat.transformed_mean()

    val_loader = DataLoader(dat,
                            sampler=SubsetRandomSampler(np.where(dat.tiers == 'validation')[0]),
                            batch_size=batch_size)
    val_loader.img_shape = dat.img_shape
    val_loader.n_neurons = dat.n_neurons

    test_loader = DataLoader(dat,
                             sampler=SubsetRandomSampler(np.where(dat.tiers == 'test')[0]),
                             batch_size=batch_size)

    test_loader.img_shape = dat.img_shape
    test_loader.n_neurons = dat.n_neurons

    loaders = {'train': train_loader, 'val': val_loader, 'test': test_loader}

    return loaders
aa = dict(architect_name="matthias Depoortere",
          email="depoortere.matthias@gmail;com",
          affiliation="sinzlab",
          dj_username="******")
Fabrikant().insert1(aa, skip_duplicates=True)
dat = StaticImageSet('/notebooks/data/static20892-3-14-preproc0.h5', 'images',
                     'responses')
Seed().insert([{'seed': 13}], skip_duplicates=True)

my_dat = StaticImageSet('/notebooks/data/static20892-3-14-preproc0.h5',
                        'images', 'responses')
idx = (my_dat.neurons.area == 'V1') & (my_dat.neurons.layer == 'L2/3')
my_dat.transforms = [
    Subsample(np.where(idx)[0]),
    ToTensor(cuda=True),
    Normalized(np.where(dat.tiers == 'train')[0], dat.responses, cuda=True)
]

test_images = my_dat[()].images[np.where(my_dat.tiers == "test")]
model_hashes = []

for i in range(8):
    model_config = load_obj('best_model_config')
    model_config['random_seed'] = i
    model_config['gpu_id'] = 0
    model_config['dropout_p'] = 0.8

    model_hash = make_hash(model_config)
    model_hashes.append(model_hash)
    model_entry = dict(configurator="nnsetup.models.create_model",