コード例 #1
0
def get_dataloaders(data_dir,
                    valid_part,
                    batch_size,
                    image_transforms,
                    show_transform=False,
                    show_sample=False):
    '''
        Divide ImageFolder with train set into train and validation parts using random shuffling.
    '''
    np.random.seed(12)
    torch.manual_seed(12)
    data = {
        'train': datasets.ImageFolder(data_dir, image_transforms['train']),
        'val': datasets.ImageFolder(data_dir, image_transforms['val']),
    }
    train_idx, valid_idx = [], []
    counts = (data['train'].targets.count(i)
              for i in data['train'].class_to_idx.values())
    acc = 0
    for numb in counts:
        valid_split = int(np.floor(valid_part * numb))
        indices = list(range(acc, acc + numb))
        acc += numb
        np.random.shuffle(indices)
        train_idx.extend(indices[:numb - valid_split])
        valid_idx.extend(indices[numb - valid_split:])

    train_sampler = SubsetRandomSampler(train_idx)
    valid_sampler = SubsetRandomSampler(valid_idx)

    if show_transform:
        show_transforms(data_dir, 0, 0, image_transforms['train'])
        # exit()

    # visualize some images
    if show_sample:
        sample_loader = DataLoader(
            data['train'],
            batch_size=9,
            sampler=train_sampler,
        )
        data_iter = iter(sample_loader)
        images, labels = data_iter.next()
        plot_images(images, labels, data['train'].classes)
        # exit()

    # Dataloader iterators
    dataloaders = {
        'train':
        DataLoader(data['train'],
                   batch_size=batch_size,
                   sampler=train_sampler,
                   drop_last=True),
        'val':
        DataLoader(data['val'],
                   batch_size=batch_size,
                   sampler=valid_sampler,
                   drop_last=True),
    }
    return dataloaders
コード例 #2
0
ファイル: spikes.py プロジェクト: zsomko/visualcortex
def spiketrains(name, make = 1, plot = 1, mode = 0):
   # mode: 0 is poisson
   #       1 is integration
   spike_file = "txt/%s_spikes_spiketrain.npy"%name
   contr_file = "txt/%s_spikes_contrasts.npy"%name
   spike_img  = "images/%s_spiketrains.png"%name
   l  = 100 # number of samples for each patch
   contrNr = 10
   imgDim = 16
   uDim = 248
   dt = 0.02 # 20ms
   same_int = 20 # how many integration points for one sample, in 20 ms
   time_interp = arange(0, (l-1)*dt+dt/same_int, dt/same_int)
   len_interp = len(time_interp)
   if(make == 1):
      k  = 10 # 10 Hz
      n  = 1.1 # the exponent of nonlinearity
      ut = 1.9 # the treshold potential
      
      imgs = load('../patches/txt/patches_vanhateren_256.npy') #loadtxt('../patches/txt/patches_vanhateren_64.txt')
      imgs = imgs.reshape(size(imgs)/imgDim**2, imgDim, imgDim)
      imgNr = contrNr * size(imgs,0)
      imgDB = zeros((imgNr, imgDim, imgDim))
      for i in range(0, int(imgNr/contrNr)):
         for j in range(0, contrNr):
            imgDB[i*contrNr + j] = imgs[i] * ( (j+1) * 2.0/contrNr)
      #plot_images(imgDB, "", "./images/imgDB.png",contr=False, rng=[0,3])

      start = 0
      end = 9
      pointNr = 20
      param_folder = "../parameters"
      sigma = load("%s/sigma.npy"%param_folder)
      A = load("%s/filters.npy"%param_folder)
      C = load("%s/cov.npy"%param_folder)
       
#      A = identity(dim**2) #numpy.random.random((16,16))
#      C = identity(dim**2)
      sum_cov = zeros(imgNr)
      z_arr = zeros(imgNr)
      spike_arr = zeros((imgNr, len_interp, uDim))
      [sat, icorr, s, s2] = init_pux(A, C, start, end, pointNr, s2=sigma**2)
      print ("done init pux")
      for ni in range(0, imgNr): # imgNr
         print ("img nr.%d"%ni)
         [c, m, z] = pux_integration(imgDB[ni], sat, icorr, s, s2, start, end, pointNr)
         z_arr[ni] = z
         #sampling based on these values
         samps = mixt_samp(c, m, s, l)
         rates = rate_from_pot(samps, k, ut, n)
         fnc = interpolate.interp1d(arange(0,l*dt,dt), rates, kind="cubic", axis=0)
         rates_interp = fnc(time_interp)
         rates_interp = rates_interp*(rates_interp>0)
#         plt.plot(time_interp, rates_interp[:,3])
#         plt.show()
         if(mode == 0):
            spike_arr[ni] = poisson(rates_interp * dt/same_int)
         else:
            spike_arr[ni] = integrate(rates_interp * dt/same_int) 
      #save spike_arr in sparse format       
      f = open(spike_file,'wb')
      pickle.dump(sparse.coo_matrix(spike_arr.flatten()), f, -1)
      f.close()
      print (sum(spike_arr.reshape(imgNr*len_interp, uDim)))
      save(contr_file, z_arr)
      print ("done generating files")
   else:
      #load spike_arr from sparse format
      f = open(spike_file,'rb')
      spike_arr = pickle.load(f).todense().reshape(imgNr, len_interp, uDim)
      z_arr = load(contr_file)
      
   if (plot == 1):
      small_spike_arr = zeros((imgNr, l, uDim))
      for i in range(0, l):
         small_spike_arr[:,i,:] = sum(spike_arr[:,same_int*i:same_int*(i+1),:] , axis = 1)
      plot_images(small_spike_arr, "", spike_img, contr=False, rng=[0,100])
コード例 #3
0
ファイル: spikes.py プロジェクト: zsomko/visualcortex
def gspiketrains(name, make = 1, plot = 1, mode = 0):
   # mode: 0 is poisson
   #       1 is integration   
   spike_file = "txt/%s_spikes_spiketrain.npy"%name
   samp_file = "txt/%s_samps_spiketrain.npy"%name
   rate_file = "txt/%s_rates_spiketrain.npy"%name
   spike_img  = "images/%s_spiketrains.png"%name
   l  = 200 # number of samples for each patch
   imgNr = 16*16
   dim   = 2 # 8
   dt = 0.02 # 20ms
   same_int = 20 # how many integration points for one sample, in 20 ms
   time_interp = arange(0, (l-1)*dt+dt/same_int, dt/same_int)
   len_interp = len(time_interp)
   if(make == 1):
      C = rand(dim**2,dim**2)
      C[0:2,0:2] = array([[1,0],[0,1]])
      
      k  = 10 # 10 Hz
      n  = 1.1 # the exponent of nonlinearity
      ut = 1.9 # the treshold potential
         
      z_arr = zeros(imgNr)
      spike_arr = zeros((imgNr, len_interp, dim**2))
      samp_arr = zeros((imgNr, l, dim**2))
      rate_arr = zeros((imgNr, l, dim**2))
      ma = uniform(0, 1, dim**2)
      ma[0:2] = [0.5, 0.5]
      for ni in range(0, imgNr):
         print (ni)
         a = ni/11.0 - 2
         m = ma*ni/11.0 + a
         samps = multivariate_normal(m,C,l)
         samp_arr[ni] = samps
         rates = rate_from_pot(samps, k, ut, n)
         rate_arr[ni] = rates
         fnc = interpolate.interp1d(arange(0,l*dt,dt), rates, kind="cubic", axis=0)
         rates_interp = fnc(time_interp)
         rates_interp = rates_interp*(rates_interp>0)
#         plt.plot(time_interp, rates_interp[:,3])
#         plt.show()
         if(mode == 0):
            spike_arr[ni] = poisson(rates_interp * dt/same_int)
         else:
            spike_arr[ni] = integrate(rates_interp * dt/same_int)  
      #save spike_arr in sparse format
      f = open(spike_file,'wb')
      pickle.dump(sparse.coo_matrix(spike_arr.flatten()), f, -1)
      f.close()
      f = open(samp_file,'wb')
      pickle.dump(sparse.coo_matrix(samp_arr.flatten()), f, -1)
      f.close()
      f = open(rate_file,'wb')
      pickle.dump(sparse.coo_matrix(rate_arr.flatten()), f, -1)
      f.close()
   else:
      #load spike_arr from sparse format
      f = open(spike_file,'rb')
      spike_arr = pickle.load(f).todense().reshape(imgNr, len_interp, dim**2)
   if (plot == 1):
      small_spike_arr = zeros((imgNr, l, dim**2))
      for i in range(0, l):
         small_spike_arr[:,i,:] = sum(spike_arr[:,same_int*i:same_int*(i+1),:] , axis = 1)
      plot_images(small_spike_arr, "", spike_img, contr=False, rng=[0,10])
コード例 #4
0
ファイル: train.py プロジェクト: chriscremer/Other_Code
def train(exp_dict):

    S = exp_dict

    torch.manual_seed(999)

    data_dict = {}
    data_dict['steps'] = []
    data_dict['warmup'] = []
    data_dict['welbo'] = []

    if exp_dict['train_encoder_only']:
        opt_all = optim.Adam(S['vae'].encoder.parameters(), lr=.0001)
        print('training encoder only')
    else:
        opt_all = optim.Adam(S['vae'].parameters(), lr=.0001)
        print('training encoder and decoder')

    start_time = time.time()
    step = 0
    for step in range(0, S['max_steps'] + 1):

        batch = get_batch(S['train_x'], S['batch_size'])
        warmup = min((step + S['load_step']) / float(S['warmup_steps']), 1.)

        outputs = S['vae'].forward(batch, warmup=warmup)

        opt_all.zero_grad()
        loss = -outputs['welbo']
        loss.backward()
        opt_all.step()

        if step % S['display_step'] == 0:
            print(
                # 'S:{:5d}'.format(step+load_step),
                'S:{:5d}'.format(step),
                'T:{:.2f}'.format(time.time() - start_time),
                # 'BPD:{:.4f}'.format(LL_to_BPD(outputs['elbo'].data.item())),
                'welbo:{:.4f}'.format(outputs['welbo'].data.item()),
                # 'elbo:{:.4f}'.format(outputs['elbo'].data.item()),
                # 'lpx:{:.4f}'.format(outputs['logpx'].data.item()),
                # 'lpz:{:.4f}'.format(outputs['logpz'].data.item()),
                # 'lqz:{:.4f}'.format(outputs['logqz'].data.item()),
                # 'lpx_v:{:.4f}'.format(valid_outputs['logpx'].data.item()),
                # 'lpz_v:{:.4f}'.format(valid_outputs['logpz'].data.item()),
                # 'lqz_v:{:.4f}'.format(valid_outputs['logqz'].data.item()),
                # 'warmup:{:.4f}'.format(warmup),
            )

            start_time = time.time()

            # model.eval()
            # with torch.no_grad():
            #     valid_outputs = model.forward(x=valid_x[:50].cuda(), warmup=1., inf_net=infnet_valid)
            #     svhn_outputs = model.forward(x=svhn[:50].cuda(), warmup=1., inf_net=infnet_svhn)
            # model.train()

            if step > S['start_storing_data_step']:

                data_dict['steps'].append(step)
                data_dict['warmup'].append(warmup)
                data_dict['welbo'].append(to_print(outputs['welbo']))

            if step % S['trainingplot_steps'] == 0 and step > 0 and len(
                    data_dict['steps']) > 2:

                plot_curve2(data_dict, S['exp_dir'])

            if step % S['save_params_step'] == 0 and step > 0:

                S['vae'].encoder.save_params_v3(S['params_dir'],
                                                step,
                                                name='encoder_params')
                S['vae'].generator.save_params_v3(S['params_dir'],
                                                  step,
                                                  name='generator_params')

                # model.save_params_v3(save_dir=params_dir, step=step+load_step)
                # infnet_valid.save_params_v3(save_dir=params_dir, step=step+load_step, name='valid')
                # infnet_svhn.save_params_v3(save_dir=params_dir, step=step+load_step, name='svhn')

            if step % S['viz_steps'] == 0 and step > 0:

                recon = to_print(outputs['x_hat'])  #[B,784]

                plot_images(to_print(batch), recon, S['images_dir'], step)
コード例 #5
0
ファイル: train.py プロジェクト: chriscremer/Other_Code
def train(exp_dict):

    S = exp_dict
    model = S['vae']

    # torch.manual_seed(999)
    
    data_dict = {}
    data_dict['steps'] = []
    data_dict['welbo'] = []
    data_dict['lpx'] = []
    data_dict['lpz'] = []
    data_dict['lqz'] = []
    data_dict['warmup'] = []
    data_dict['lr'] = []

    lr=.004

    if exp_dict['train_encoder_only']:
        opt_all = optim.Adam(model.encoder.parameters(), lr=lr, weight_decay=.0000001)
        print ('training encoder only')
    else:
        opt_all = optim.Adam(model.parameters(), lr=lr, weight_decay=.0000001)
        lr_sched = lr_scheduler.StepLR(opt_all, step_size=1, gamma=0.999995)

    start_time = time.time()
    step = 0
    for step in range(0, S['max_steps'] + 1):

        img_batch, text_batch = get_batch(image_dataset=S['train_x'], text_dataset=S['train_y'], batch_size=S['batch_size'])
        # warmup = min((step+S['load_step']) / float(S['warmup_steps']), 1.)
        # warmup = max(50. - (step / float(S['warmup_steps'])), 1.)
        warmup = 1.

        outputs = model.forward(img_batch, text_batch, warmup=warmup)

        opt_all.zero_grad()
        loss = -outputs['welbo']
        loss.backward()
        opt_all.step()
        lr_sched.step()




        if step%S['display_step']==0:
            print(
                # 'S:{:5d}'.format(step+load_step),
                'S:{:5d}'.format(step),
                'T:{:.2f}'.format(time.time() - start_time),
                # 'BPD:{:.4f}'.format(LL_to_BPD(outputs['elbo'].data.item())),
                'welbo:{:.4f}'.format(outputs['welbo'].data.item()),
                'elbo:{:.4f}'.format(outputs['elbo'].data.item()),
                'lpx:{:.4f}'.format(outputs['logpxz'].data.item()),
                'lpz:{:.4f}'.format(outputs['logpz'].data.item()),
                'lqz:{:.4f}'.format(outputs['logqz'].data.item()),
                # 'lpx_v:{:.4f}'.format(valid_outputs['logpx'].data.item()),
                # 'lpz_v:{:.4f}'.format(valid_outputs['logpz'].data.item()),
                # 'lqz_v:{:.4f}'.format(valid_outputs['logqz'].data.item()),
                'warmup:{:.2f}'.format(warmup),
                )

            start_time = time.time()


                # model.eval()
                # with torch.no_grad():
                #     valid_outputs = model.forward(x=valid_x[:50].cuda(), 
                            # warmup=1., inf_net=infnet_valid)
                #     svhn_outputs = model.forward(x=svhn[:50].cuda(), 
                            # warmup=1., inf_net=infnet_svhn)
                # model.train()


            if step > S['start_storing_data_step']:

                data_dict['steps'].append(step)
                data_dict['warmup'].append(warmup)
                data_dict['welbo'].append(to_print(outputs['welbo']))
                data_dict['lpx'].append(to_print(outputs['logpxz']))
                data_dict['lpz'].append(to_print(outputs['logpz']))
                data_dict['lqz'].append(to_print(outputs['logqz']))
                data_dict['lr'].append(lr_sched.get_lr()[0])



            if step % S['trainingplot_steps'] ==0 and step > 0 and len(data_dict['steps']) > 2:

                plot_curve2(data_dict, S['exp_dir'])

            if step % S['save_params_step'] ==0 and step > 0:
                
                # model.encoder.save_params_v3(S['params_dir'], step, name='encoder_params')
                # model.generator.save_params_v3(S['params_dir'], step, name='generator_params')
                model.save_params_v3(S['params_dir'], step, name='model_params')

                # model.save_params_v3(save_dir=params_dir, step=step+load_step)
                # infnet_valid.save_params_v3(save_dir=params_dir, step=step+load_step, name='valid')
                # infnet_svhn.save_params_v3(save_dir=params_dir, step=step+load_step, name='svhn')

            if step % S['viz_steps']==0 and step > 0: 

                recon = to_print(outputs['x_hat']) #[B,784]
                plot_images(to_print(img_batch), recon, S['images_dir'], step)


            if (step % (int(S['viz_steps']/5))) ==0 and step > 0: 

                image1 = img_batch[0].view(1,3,112,112)
                image2 = img_batch[1].view(1,3,112,112)
                text1 = text_batch[0].view(1,9)
                text2 = text_batch[1].view(1,9)
                z = model.get_z(image1, text1)
                new_image1 = model.generate_given_z_y(y=text1, z=z)
                new_image2 = model.generate_given_z_y(y=text2, z=z)

                plot_images_dif_zs(to_print(image1), to_print(image2), 
                                    get_sentence(question_idx_to_token, text1[0], [3,4]), 
                                    get_sentence(question_idx_to_token, text2[0], [3,4]), 
                                    to_print(new_image1), to_print(new_image2),
                                    image_dir=S['images_dir'], step=step)