예제 #1
0
def create_dataloaders_rand(file, seed, total_im, n_selected, batch_size, norm=True, cuda=True):
    dat = StaticImageSet(file, 'images', 'responses')
    #idx = (dat.neurons.area == 'V1') & (dat.neurons.layer == 'L2/3')
    dat.transforms = [ToTensor(cuda=cuda)]
    if norm:
        dat.transforms.append(Normalized(np.where(dat.tiers == 'train')[0], dat.responses, cuda=cuda))
    np.random.seed(seed)
    selected_indexes = np.random.choice(np.where(dat.tiers == 'train')[0], size=n_selected, replace=False)
    selected_set = Subset(dat, selected_indexes)
    train_loader = DataLoader(selected_set,
                              batch_size=batch_size)

    train_loader.img_shape = dat.img_shape
    train_loader.n_neurons = dat.n_neurons
    train_loader.transformed_mean = torch.tensor(dat[()].responses.mean(axis=0)).cuda() #dat.transformed_mean()

    val_loader = DataLoader(dat,
                            sampler=SubsetRandomSampler(np.where(dat.tiers == 'validation')[0]),
                            batch_size=batch_size)
    val_loader.img_shape = dat.img_shape
    val_loader.n_neurons = dat.n_neurons

    test_loader = DataLoader(dat,
                             sampler=SubsetRandomSampler(np.where(dat.tiers == 'test')[0]),
                             batch_size=batch_size)

    test_loader.img_shape = dat.img_shape
    test_loader.n_neurons = dat.n_neurons

    loaders = {'train': train_loader, 'val': val_loader, 'test': test_loader}

    return loaders
예제 #2
0
def create_dataloaders_al(file='', seed=0, selected_idx=set([]), batch_size=64, norm=False, cuda=False):
    np.random.seed(seed)
    dat = StaticImageSet(file, 'images', 'responses')
    idx = (dat.neurons.area == 'V1') & (dat.neurons.layer == 'L2/3')
    dat.transforms = [Subsample(np.where(idx)[0]), ToTensor(cuda=True)]
    if norm:
        dat.transforms.append(Normalized(np.where(dat.tiers == 'train')[0], dat.responses, cuda=cuda))

    selected_set = Subset(dat, selected_idx)
    train_loader = DataLoader(selected_set,
                              batch_size=batch_size)
    train_loader.img_shape = dat.img_shape
    train_loader.n_neurons = dat.n_neurons
    _, train_loader.transformed_mean = dat.transformed_mean()

    val_loader = DataLoader(dat,
                            sampler=SubsetRandomSampler(np.where(dat.tiers == 'validation')[0]),
                            batch_size=batch_size)
    val_loader.img_shape = dat.img_shape
    val_loader.n_neurons = dat.n_neurons

    test_loader = DataLoader(dat,
                             sampler=SubsetRandomSampler(np.where(dat.tiers == 'test')[0]),
                             batch_size=batch_size)

    test_loader.img_shape = dat.img_shape
    test_loader.n_neurons = dat.n_neurons

    loaders = {'train': train_loader, 'val': val_loader, 'test': test_loader}

    return loaders
예제 #3
0
def create_dataloaders_synth(file, seed, selected_idx, batch_size, norm=True, cuda=True):
    dat = StaticImageSet(file, 'images', 'responses')
    dat.transforms = [ToTensor(cuda=cuda)]
    if norm:
        dat.transforms.append(Normalized(np.where(dat.tiers == 'train')[0], dat.responses, cuda=cuda))
    np.random.seed(seed)
    selected_set = Subset(dat, selected_idx)
    train_loader = DataLoader(selected_set,
                              batch_size=batch_size)
    train_loader.img_shape = dat.img_shape
    train_loader.n_neurons = dat.n_neurons

    train_loader.img_shape = dat.img_shape
    train_loader.n_neurons = dat.n_neurons
    train_loader.transformed_mean = torch.as_tensor(dat[()].responses.mean(axis=0)).cuda()

    val_loader = DataLoader(dat,
                            sampler=SubsetRandomSampler(np.where(dat.tiers == 'validation')[0]),
                            batch_size=batch_size)
    val_loader.img_shape = dat.img_shape
    val_loader.n_neurons = dat.n_neurons

    test_loader = DataLoader(dat,
                             sampler=SubsetRandomSampler(np.where(dat.tiers == 'test')[0]),
                             batch_size=batch_size)

    test_loader.img_shape = dat.img_shape
    test_loader.n_neurons = dat.n_neurons

    loaders = {'train': train_loader, 'val': val_loader, 'test': test_loader}

    return loaders
def main():
    file = h5py.File('/notebooks/data/static20892-3-14-preproc0.h5', "r")
    dat = StaticImageSet('/notebooks/data/static20892-3-14-preproc0.h5',
                         'images', 'responses')
    img_shape = dat.img_shape[2:]
    gabor_rf = gen_gabor_RF(img_shape, rf_shape, n=n_neurons, seed=random_seed)
    images = dat[()].images
    images = images.reshape(6000, 36, 64)
    firing_rates = compute_activity_simple(images / 255, gabor_rf)
    dist = Poisson(torch.tensor(firing_rates))
    responses = dist.sample().numpy()
    data_file = h5py.File("toy_dataset.hdf5", "w")

    im_set = data_file.create_dataset('images', data=dat[()].images)
    response_set = data_file.create_dataset('responses', data=responses)
    tier_set = data_file.create_dataset('tiers', data=file['tiers'])

    data_file.close()
예제 #5
0

def load_obj(file):
    with open('./data/' + file + '.pkl', 'rb') as f:
        return pickle.load(f)


Fabrikant().insert1(dict(fabrikant_name='Matthias Depoortere',
                         email="*****@*****.**",
                         affiliation='sinzlab',
                         dj_username="******"),
                    skip_duplicates=True)

Seed().insert([{'seed': 13}], skip_duplicates=True)

dat = StaticImageSet('/notebooks/toy_data/toy_dataset.hdf5', 'images',
                     'responses')
TOTAL_IM = np.where(dat.tiers == 'train')[0].size

model_config = load_obj('best_model_config')
model_config['random_seed'] = 5
model_entry = dict(model_fn="nnsetup.models.create_model",
                   model_config=model_config,
                   model_fabrikant="Matthias Depoortere",
                   model_comment="Best model hp on full dataset")
Model().add_entry(**model_entry)

trainer_config = load_obj('best_train_config')
trainer_entry = dict(trainer_fn="nnsetup.trainer.train_model",
                     trainer_config=trainer_config,
                     trainer_fabrikant="Matthias Depoortere",
                     trainer_comment="best trainer on full dataset")
def load_obj(file):
    with open('./data/' + file + '.pkl', 'rb') as f:
        return pickle.load(f)


def calc_var(preds):
    vars = np.stack(preds, axis=0).std(axis=0).mean(axis=0)
    return vars


aa = dict(architect_name="matthias Depoortere",
          email="depoortere.matthias@gmail;com",
          affiliation="sinzlab",
          dj_username="******")
Fabrikant().insert1(aa, skip_duplicates=True)
dat = StaticImageSet('/notebooks/data/static20892-3-14-preproc0.h5', 'images',
                     'responses')
Seed().insert([{'seed': 13}], skip_duplicates=True)

my_dat = LabeledImageSet('/notebooks/data/static20892-3-14-preproc0.h5',
                         'images', 'responses')
idx = (my_dat.neurons.area == 'V1') & (my_dat.neurons.layer == 'L2/3')
my_dat.transforms = [
    Subsample(np.where(idx)[0]),
    ToTensor(cuda=True),
    Normalized(np.where(dat.tiers == 'train')[0], dat.responses, cuda=True)
]

TOTAL_IM = np.where(dat.tiers == 'train')[0].size
MAX_IM = TOTAL_IM
N_AQUIRE = 50
n_im = 500
from nnfabrik.main import *
from nnsetup.al_tools import load_latest_model


def load_obj(file):
    with open('./data/' + file + '.pkl', 'rb') as f:
        return pickle.load(f)


aa = dict(architect_name="matthias Depoortere",
          email="depoortere.matthias@gmail;com",
          affiliation="sinzlab",
          dj_username="******")
Fabrikant().insert1(aa, skip_duplicates=True)
dat = StaticImageSet('/notebooks/data/static20892-3-14-preproc0.h5', 'images',
                     'responses')
Seed().insert([{'seed': 13}], skip_duplicates=True)

my_dat = StaticImageSet('/notebooks/data/static20892-3-14-preproc0.h5',
                        'images', 'responses')
idx = (my_dat.neurons.area == 'V1') & (my_dat.neurons.layer == 'L2/3')
my_dat.transforms = [
    Subsample(np.where(idx)[0]),
    ToTensor(cuda=True),
    Normalized(np.where(dat.tiers == 'train')[0], dat.responses, cuda=True)
]

test_images = my_dat[()].images[np.where(my_dat.tiers == "test")]
model_hashes = []

for i in range(8):