def test_word_and_pronunciation_pairs_are_correct_size():
    np_words = np.load(CMU_NP_WORDS_FILE_PATH)
    np_pronunciations = np.load(CMU_NP_PRONUNCIATIONS_FILE_PATH)
    num_pairs = get_number_of_word_pronunciation_pairs()
    assert np_words.shape[0] == np_pronunciations.shape[0] == num_pairs
    assert np_words.shape[1] == MAX_WORD_SIZE
    assert np_pronunciations.shape[1] == MAX_PRONUNCIATION_SIZE
def lars_regression_noise_ipyparallel(pars): 
    import numpy as np
    import os
    import sys
    import gc
        
    
    Y_name,C_name,noise_sn,idxs_C, idxs_Y=pars
    Y=np.load(Y_name,mmap_mode='r')
    Y=np.array(Y[idxs_Y,:])
    C=np.load(C_name,mmap_mode='r')
    C=np.array(C)
    _,T=np.shape(C)
    #sys.stdout = open(str(os.getpid()) + ".out", "w")
    st=time.time()
    As=[]    
    #print "*****************:" + str(idxs_Y[0]) + ',' + str(idxs_Y[-1])
    sys.stdout.flush()    
    for y,px in zip(Y,idxs_Y):  
        #print str(time.time()-st) + ": Pixel" + str(px)
        sys.stdout.flush()    
        c=C[idxs_C[px],:]
        if np.size(c)>0:             
            sn=noise_sn[px]**2*T            
            _,_,a,_,_=lars_regression_noise(y, c.T, 1, sn)
            if not np.isscalar(a):                
                a=a.T  
                 
            As.append((px,idxs_C[px],a))
    
    del Y
    del C
    gc.collect()
    
    return As#As
def _data_generator(powerfilevec, **cmdargs):
  head_skip = cmdargs['head_skip']
  ismerged = len(powerfilevec) == 1
  if ismerged:
    print 'Processing merged frequency power data'
    npz = np.load(powerfilevec[0])
    filenamelist = npz.keys()
    print '\t%d merged files'%(len(filenamelist))
    print '\t%d will be skipped'%(head_skip)

    filenamelist.sort()

    for fname in filenamelist[head_skip:]:
      datadict = npz[fname].item()
      data = datadict['data']
      header = datadict['header']
      yield(fname, data, header)
  else:
    powerfilevec.sort()
    for pfile in powerfilevec[head_skip:]:
      npz = np.load(pfile)
      npzfile = np.load(pfile)
      data = npzfile['data']
      header = npzfile['header'].item()
      if type(header) == str:
        import json
        header = json.loads(header)

      yield (pfile, data, header)
Exemple #4
0
	def generateConnectionMatrix(self, o_shape, generate):
		if self.file == 'default':
			self.setFName(o_shape)
		
		try: 
			if generate:
				np.load('asd')
			else:
				Wi=np.load(self.file)
				print '[info] Weights loaded from file!'
				print 'Shape = ' + str(Wi.shape)
		except IOError:
			print "[info] Weights file wasn't found. Generating new connections"
			kern1 = gkern2(self.shape,self.sigma)
			#kern1 = np.zeros(filter_shape)
			Wi = kernel2connection(kern1, self.i_shape, o_shape)
			#Wi /= np.sum(Wi,1).reshape((Wi.shape[0],1))*15
			print 'Shape = ' + str(Wi.shape)
			if np.sum(Wi,1)[0] != 1:
				Wi /= np.sum(Wi,1).reshape((Wi.shape[0],1))*self.k
			np.save(self.file,Wi)
		#if not self.R:
		#	Wi *= -(np.identity(Wi.shape[0])-1)

		return Wi
Exemple #5
0
def compute_signif_conf_Z_list(cor_mat_file,conf_cor_mat_file,coords_file):       
        
    import rpy,os
    import nibabel as nib
    import numpy as np
    
    from dmgraphanalysis.utils_cor import export_List_net_from_list,export_Louvain_net_from_list
    from dmgraphanalysis.utils_cor import return_signif_conf_net_list
    from dmgraphanalysis.utils_plot import plot_cormat
    
    print "loading cor_mat_file"
    
    cor_mat = np.load(cor_mat_file)
    
    print "loading conf_cor_mat_file"
    
    conf_cor_mat = np.load(conf_cor_mat_file)
    
    print 'load coords'
    
    coords = np.array(np.loadtxt(coords_file),dtype = int)
    
    print "computing net_list by thresholding conf_cor_mat based on distance and net_threshold"
    
    net_list,binary_signif_matrix = return_signif_conf_net_list(cor_mat,conf_cor_mat)
    
    print binary_signif_matrix.shape
    
    print "saving binary_signif_matrix"
    
    binary_signif_matrix_file = os.path.abspath('binary_signif_matrix.npy')
    
    np.save(binary_signif_matrix_file,binary_signif_matrix)
    
    print "plotting binary_signif_matrix"
    
    plot_binary_signif_matrix_file = os.path.abspath('binary_signif_matrix.eps')
    
    plot_cormat(plot_binary_signif_matrix_file,binary_signif_matrix,list_labels = [])
    
    ## Z correl_mat as list of edges
    
    print "saving net_list as list of edges"
    
    net_List_file = os.path.abspath('net_List_signif_conf.txt')
    
    export_List_net_from_list(net_List_file,net_list)
    
    ### Z correl_mat as Louvain format
    
    print "saving net_list as Louvain format"
    
    net_Louvain_file = os.path.abspath('net_Louvain_signif_conf.txt')
    
    export_Louvain_net_from_list(net_Louvain_file,net_list,coords)
    
    #net_List_file = ''
    #net_Louvain_file = ''
    
    return net_List_file, net_Louvain_file
Exemple #6
0
def generate_samplesfmri(num_ppl,num_nodes,num_scans):
    print "-----------------------"
    print "generating fMRI samples"
    print "-----------------------"
    if not os.path.exists('Samples_fMRI'):
        os.makedirs('Samples_fMRI')
    if not os.path.exists('Theta_fMRI'):
        os.makedirs('Theta_fMRI')  
    ppl=range(0,num_ppl)
    Wf=np.load('Wf')
    ThetaFiles=[]
    count=0
    for i in ppl:
        dir='person'+str(i)
        ages=np.load(dir+'/age')
        for age in ages:
            X=np.load(dir+'/b'+str(age)+'.npy')
            theta=np.dot(Wf,X)
            filename='yf_'+str(i)+'_'+str(age)
            thetaname='th_'+filename
            ThetaFiles.append(thetaname)
            np.save('Theta_fMRI/'+thetaname,theta)
            Y=np.random.multivariate_normal([0]*num_nodes,theta,num_scans)
            np.save('Samples_fMRI/'+filename,Y.T)
            count=count+1
    count=count*num_scans
    np.save('Theta_fMRI/ThetaFiles',ThetaFiles)
    print str(count)+" Samples of fMRI created"
Exemple #7
0
def dobetterstuff(inpath):
    data_files = [f for f in os.listdir(inpath) if f.endswith('.mel.npy')]
    random.shuffle(data_files)
    artists = set([f[:18] for f in data_files])
    artist_string_to_id = dict([(s,i) for i, s in enumerate(artists)])

    def get_split(datafiles___, splitpercent):
        # gen = filtered_stratified_split(datafiles___,
        #                                 sklearn.cross_validation.StratifiedShuffleSplit,
        #                                 [1] * len(datafiles___), n_iterations=1, test_size=splitpercent)
        gen = sklearn.cross_validation.ShuffleSplit(len(datafiles___), 1, splitpercent)
        for i_trs, i_tes in gen:
            return [datafiles___[i] for i in i_trs],  [datafiles___[i] for i in i_tes]

    training_files, test_files =  get_split(data_files, .2)
    training_files, validation_files = get_split(training_files, .2)

    print training_files
    print test_files
    print validation_files

    train_set_y = np.hstack([[artist_string_to_id[f[:18]]] * 129 for f in training_files])
    train_set_x = np.vstack([np.load(os.path.join(inpath, f)) for f in training_files])
    test_set_y = np.hstack([[artist_string_to_id[f[:18]]] * 129 for f in test_files])
    test_set_x = np.vstack([np.load(os.path.join(inpath, f)) for f in test_files])
    validation_set_y = np.hstack([[artist_string_to_id[f[:18]]] * 129 for f in validation_files])
    validation_set_x = np.vstack([np.load(os.path.join(inpath, f)) for f in validation_files])

    datasets = [(train_set_x, train_set_y), (validation_set_x, validation_set_y), (test_set_x, test_set_y)]
    return datasets
def plotForce():
    figure(size=3,aspect=0.5)
    subplot(1,2,1)
    from EvalTraj import plotFF
    plotFF(vp=351,t=28,f=900,cm=0.6,foffset=8)
    subplot_annotate()
    
    subplot(1,2,2)
    for i in [1,2,3,4]:
        R=np.squeeze(np.load('Rdpse%d.npy'%i))
        R=stats.nanmedian(R,axis=2)[:,1:,:]
        dps=np.linspace(-1,1,201)[1:]
        plt.plot(dps,R[:,:,2].mean(0));
    plt.legend([0,0.1,0.2,0.3],loc=3) 
    i=2
    R=np.squeeze(np.load('Rdpse%d.npy'%i))
    R=stats.nanmedian(R,axis=2)[:,1:,:]
    mn=np.argmin(R,axis=1)
    y=np.random.randn(mn.shape[0])*0.00002+0.0438
    plt.plot(np.sort(dps[mn[:,2]]),y,'+',mew=1,ms=6,mec=[ 0.39  ,  0.76,  0.64])
    plt.xlabel('Displacement of Force Origin')
    plt.ylabel('Average Net Force Magnitude')
    hh=dps[mn[:,2]]
    err=np.std(hh)/np.sqrt(hh.shape[0])*stats.t.ppf(0.975,hh.shape[0])
    err2=np.std(hh)/np.sqrt(hh.shape[0])*stats.t.ppf(0.75,hh.shape[0])
    m=np.mean(hh)
    print m, m-err,m+err
    np.save('force',[m, m-err,m+err,m-err2,m+err2])
    plt.xlim([-0.5,0.5])
    plt.ylim([0.0435,0.046])
    plt.grid(b=True,axis='x')
    subplot_annotate()
    def load(filename):

        filename_extension = filename.split(".")[-1]

        if filename_extension == "npz":
            filename_npz = filename.replace(".npz", "")+".npz"
            filename_data = filename.replace(".npz", "")+".npy"
        elif filename_extension == "npy":
            filename_npz = filename.replace(".npy", "")+".npz"
            filename_data = filename.replace(".npy", "")+".npy"

        try:
            file_content = np.load(filename_npz)
            vectors_shape = (file_content["np_twoform_3"].size,file_content["np_twoform_0"].size,file_content["np_twoform_1"].size)
            vectors = TwoformVectorsEigenvectors(np.memmap(filename_data, dtype=np.complex128, mode='c', shape=vectors_shape))
        except:
            print("Falling back to load_npz")
            data_dict = AutocorrelationFunctionIO.load_npz(filename)

            if "twoform_4" in data_dict:
                return data_dict
            else:
                print("Loading wavefronts")
                file_content = np.load(filename_npz)
                vectors = TwoformVectorsWavefronts(file_content["np_twoform_0"],file_content["np_twoform_1"], filename)

        data_dict = dict()
        for key in file_content.keys():
            data_dict[key.replace("np_", "")] = file_content[key]

        data_dict["twoform_4"] = vectors

        return data_dict
Exemple #10
0
    def resample( self, in_path, idx_out, Ylm_out, doMergeB0 ) :
        if doMergeB0:
            nS = 1+self.scheme.dwi_count
            merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
        else:
            nS = self.scheme.nS
            merge_idx = np.arange(nS)
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['wmr'] = np.zeros( (len(self.Rs),181,181,nS,), dtype=np.float32 )
        KERNELS['wmh'] = np.zeros( (len(self.ICVFs),181,181,nS,), dtype=np.float32 )
        KERNELS['iso'] = np.zeros( (len(self.d_ISOs),nS,), dtype=np.float32 )

        nATOMS = len(self.Rs) + len(self.ICVFs) + len(self.d_ISOs)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Cylinder(s)
        for i in xrange(len(self.Rs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['wmr'][i,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )[:,:,merge_idx]
            progress.update()

        # Zeppelin(s)
        for i in xrange(len(self.ICVFs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['wmh'][i,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )[:,:,merge_idx]
            progress.update()

        # Ball(s)
        for i in xrange(len(self.d_ISOs)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['iso'][i,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )[merge_idx]
            progress.update()

        return KERNELS
Exemple #11
0
def test_btable_prepare():

    sq2 = np.sqrt(2) / 2.
    bvals = 1500 * np.ones(7)
    bvals[0] = 0
    bvecs = np.array([[0, 0, 0],
                      [1, 0, 0],
                      [0, 1, 0],
                      [0, 0, 1],
                      [sq2, sq2, 0],
                      [sq2, 0, sq2],
                      [0, sq2, sq2]])
    bt = gradient_table(bvals, bvecs)
    npt.assert_array_equal(bt.bvecs, bvecs)
    bt.info
    fimg, fbvals, fbvecs = get_data('small_64D')
    bvals = np.load(fbvals)
    bvecs = np.load(fbvecs)
    bvecs = np.where(np.isnan(bvecs), 0, bvecs)
    bt = gradient_table(bvals, bvecs)
    npt.assert_array_equal(bt.bvecs, bvecs)
    bt2 = gradient_table(bvals, bvecs.T)
    npt.assert_array_equal(bt2.bvecs, bvecs)
    btab = np.concatenate((bvals[:, None], bvecs), axis=1)
    bt3 = gradient_table(btab)
    npt.assert_array_equal(bt3.bvecs, bvecs)
    npt.assert_array_equal(bt3.bvals, bvals)
    bt4 = gradient_table(btab.T)
    npt.assert_array_equal(bt4.bvecs, bvecs)
    npt.assert_array_equal(bt4.bvals, bvals)
    # Test for proper inputs (expects either bvals/bvecs or 4 by n):
    assert_raises(ValueError, gradient_table, bvecs)
def do_stan(data_dir, j, seed, T, counts):
    print 'Doing Stan.'
    time_stan = []
    metric_stan = []
    for i in [5,10,15,20]:
        print '\t{}'.format(i)
        stan_file = join(data_dir, 'lda_stan_kwargs.py')
        out_file = join(data_dir, 'stan_ll_{}.npz'.format(j))
        phi_file = join(data_dir, 'stan_phi_{}.npz'.format(j))
        theta_file = join(data_dir, 'stan_theta_{}.npz'.format(j))
        with open(join(data_dir, 'lda_stan_0.py'), 'r') as fin, open(
                stan_file, 'w') as fout:
            fout.write("kwargs = {{'seed': {}, 'iter':{} }}\n".format(seed, i))
            fout.write("out_file = '{}'\n".format(out_file))
            fout.write("phi_file = '{}'\n".format(phi_file))
            fout.write("theta_file = '{}'\n".format(theta_file))
            fout.write(fin.read())

        start = time.time()
        call_cmd('python3 {}'.format(stan_file))
        time_stan.append(time.time()-start)

        mu = np.load(theta_file)['arr_0']
        phi = np.load(phi_file)['arr_0']
        ll = ull(mu, phi, T, counts)
        metric_stan.append(ll)
    return time_stan, metric_stan
Exemple #13
0
    def resample( self, in_path, idx_out, Ylm_out, doMergeB0 ) :
        if doMergeB0:
            nS = 1+self.scheme.dwi_count
            merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx))
        else:
            nS = self.scheme.nS
            merge_idx = np.arange(nS)
        KERNELS = {}
        KERNELS['model'] = self.id
        KERNELS['D']     = np.zeros( (len(self.d_perps),181,181,nS), dtype=np.float32 )
        KERNELS['CSF']   = np.zeros( (len(self.d_isos),nS), dtype=np.float32 )

        nATOMS = len(self.d_perps) + len(self.d_isos)
        progress = ProgressBar( n=nATOMS, prefix="   ", erase=True )

        # Tensor compartment(s)
        for i in xrange(len(self.d_perps)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['D'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )[:,:,merge_idx]
            progress.update()

        # Isotropic compartment(s)
        for i in xrange(len(self.d_isos)) :
            lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) )
            KERNELS['CSF'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )[merge_idx]
            progress.update()

        return KERNELS
Exemple #14
0
def exercise_4_1():

    exp_t = np.load('exp_t.npy')
    exp_somav = np.load('exp_v.npy')
    exp_somav -=  exp_somav[0]
    exp_somav /= abs(exp_somav.max())

    soma_rall, dend_rall = return_ball_and_stick_soma()
    stim = insert_current_clamp(soma_rall(0.5))
    t, v_rall = run_simulation(soma_rall(0.5))
    v_rall -= v_rall[0]
    v_rall /= abs(v_rall.max())

    soma_ball = return_ball_soma()
    stim_ball = insert_current_clamp(soma_ball(0.5))
    t_ball, v_ball = run_simulation(soma_ball(0.5))
    v_ball -= v_ball[0]
    v_ball /= abs(v_ball.max())

    fig = plt.figure()
    ax1 = fig.add_subplot(111, xlabel="Time [ms]", ylabel="Voltage [mV]")
    ax1.plot(t, exp_somav, 'gray', label='"Experiment"')
    ax1.plot(t, v_rall, 'g', label='Rall')
    ax1.plot(t_ball, v_ball, 'b', label='ball')
    plt.legend(loc=4, frameon=False)

    plt.savefig('exercise_4_1_.png')
    plt.show()
Exemple #15
0
def load_data(data_locations, file_idx_location, base_directory='data/cached/'):
    '''
    Loads data from each of the five blocks.

    Args:
        data_locations (list<str>): Locations to the data files.
        file_idx_location (str): Location of the fileidx.mat file.
    Returns:
        list<numpy.ndarray>, list of the data from different blocks.
        list<numpy.ndarray>, list of the labels from different blocks.
    '''

    if not base_directory.endswith('/'):
        base_directory = base_directory + '/'

    all_data = list()
    all_labels = list()

    try:
        if use_cached and os.path.exists(base_directory):

            for block in range(1, 5+1):

                data = np.load(base_directory+'data-{}.npy'.format(block))
                labels = np.load(base_directory+'labels-{}.npy'.format(block))

                all_data.append(data)
                all_labels.append(labels)

            return all_data, all_labels

    except Exception, error:
        print error
        print 'Unable to load cached files. Loading from .mat...'
Exemple #16
0
def morph_triplets_cooccur_mat(matrix):
    # Multiply the learned coefficient matrix
    np_matrix = np.load('../mat/np_mat.npy')
    vp_matrix = np.load('../mat/vp_mat.npy')
    np_matrix = (np_matrix > 0.95) * np_matrix
    vp_matrix = (vp_matrix > 0.95) * vp_matrix
    return np.dot(np.dot(np_matrix, matrix), vp_matrix)
Exemple #17
0
def load_dataset():
    
    train = numpy.load(DATA_DIR + train_filename)
    validate = numpy.load(DATA_DIR + valid_filename)
    test = numpy.load(DATA_DIR + test_filename)
    
    return train, validate, test
Exemple #18
0
    def consolidate_games(self, name, samples):
        print('>>> Creating consolidated numpy arrays')

        if self.use_generator:
            print('>>> Return generator')
            generator = DataGenerator(self.data_dir, samples)
            return generator

        files_needed = set(file_name for file_name, index in samples)
        print('>>> Total number of files: ' + str(len(files_needed)))

        file_names = []
        for zip_file_name in files_needed:
            file_name = zip_file_name.replace('.zip', '') + name
            file_names.append(file_name)

        feature_list = []
        label_list = []
        for file_name in file_names:
            X = np.load(self.data_dir + '/' + file_name + '_features.npy')
            y = np.load(self.data_dir + '/' + file_name + '_labels.npy')
            feature_list.append(X)
            label_list.append(y)
            print('>>> Done')

        features = np.concatenate(feature_list, axis=0)
        labels = np.concatenate(label_list, axis=0)

        feature_file = self.data_dir + '/' + str(self.num_planes) + '_plane_features_' + name
        label_file = self.data_dir + '/' + str(self.num_planes) + '_plane_labels_' + name

        np.save(feature_file, features)
        np.save(label_file, labels)

        return features, labels
    def test_g_solve(self):
        """
        Tests :func:`colour_hdri.calibration.debevec1997.g_solve` definition.
        """

        image_stack = ImageStack.from_files(JPG_IMAGES)
        L_l = np.log(average_luminance(image_stack.f_number,
                                       image_stack.exposure_time,
                                       image_stack.iso))
        samples = samples_Grossberg2003(image_stack.data)

        for i in range(3):
            g, lE = g_solve(samples[..., i], L_l)

            # Lower precision for unit tests under *travis-ci*.
            np.testing.assert_allclose(
                g[0:-2],
                np.load(os.path.join(
                    CALIBRATION_DIRECTORY,
                    'test_g_solve_g_{0}.npy'.format(i)))[0:-2],
                rtol=0.001,
                atol=0.001)

            # Lower precision for unit tests under *travis-ci*.
            np.testing.assert_allclose(
                lE[1:],
                np.load(os.path.join(
                    CALIBRATION_DIRECTORY,
                    'test_g_solve_lE_{0}.npy'.format(i)))[1:],
                rtol=0.001,
                atol=0.001)
def choose_training_set():
    ref_id = np.load("%s/ref_id.npz" %DATA_DIR)['arr_0']
    ref_flux = np.load("%s/ref_flux_norm.npz" %DATA_DIR)['arr_0']
    ref_ivar = np.load("%s/ref_ivar_norm.npz" %DATA_DIR)['arr_0']
    ref_label = np.load("%s/ref_label.npz" %DATA_DIR)['arr_0']
    
    # randomly pick 80% of the objects to be the training set
    nobj = len(ref_id)
    assignments = np.random.randint(10, size=nobj)
    # if you're < 8, you're training
    choose = assignments < 8
    tr_id = ref_id[choose]
    tr_flux = ref_flux[choose]
    tr_ivar = ref_ivar[choose]
    tr_label = ref_label[choose]
    np.savez("%s/tr_id.npz" %DATA_DIR, tr_id)
    np.savez("%s/tr_flux_norm.npz" %DATA_DIR, tr_flux)
    np.savez("%s/tr_ivar_norm.npz" %DATA_DIR, tr_ivar)
    np.savez("%s/tr_label.npz" %DATA_DIR, tr_label)

    val_id = ref_id[~choose]
    val_flux = ref_flux[~choose]
    val_ivar = ref_ivar[~choose]
    val_label = ref_label[~choose]
    np.savez("%s/val_id.npz" %DATA_DIR, val_id)
    np.savez("%s/val_flux_norm.npz" %DATA_DIR, val_flux)
    np.savez("%s/val_ivar_norm.npz" %DATA_DIR, val_ivar)
    np.savez("%s/val_label.npz" %DATA_DIR, val_label)
Exemple #21
0
def _cum_net_resp(node_lis, instance=0):
    r"""Function to compute the cumulative network response by reading \
    saved energy .npy files.

    :type node_lis: np.ndarray
    :param node_lis: List of nodes (ints) to read from
    :type instance: int
    :param instance: Instance flag for parallelisation, defaults to 0.

    :returns: np.ndarray cum_net_resp, list of indeces used

    .. note:: This is an internal function to ease parallel processing and \
        should not be called directly.
    """
    import os

    cum_net_resp = np.load('tmp' + str(instance) +
                           '/node_' + str(node_lis[0]) + '.npy')[0]
    os.remove('tmp' + str(instance) + '/node_' + str(node_lis[0]) + '.npy')
    indeces = np.ones(len(cum_net_resp)) * node_lis[0]
    for i in node_lis[1:]:
        node_energy = np.load('tmp' + str(instance) + '/node_' +
                              str(i) + '.npy')[0]
        updated_indeces = np.argmax([cum_net_resp, node_energy], axis=0)
        temp = np.array([cum_net_resp, node_energy])
        cum_net_resp = np.array([temp[updated_indeces[j]][j]
                                 for j in xrange(len(updated_indeces))])
        del temp, node_energy
        updated_indeces[updated_indeces == 1] = i
        indeces = updated_indeces
        os.remove('tmp' + str(instance) + '/node_' + str(i) + '.npy')
    return cum_net_resp, indeces
Exemple #22
0
def save_errors(filename, running_error, err_type='error'):
    running_error = np.asarray(running_error)
    savename = filename.split('.')
    savename = savename[0] + err_type + '.npz'
    if err_type == 'error':
        if os.path.isfile(savename):
            arr = np.load(savename)['running_error']
            running_error = np.hstack((arr, running_error))
    elif err_type == 'acc':
        if os.path.isfile(savename):
            arr = np.load(savename)['running_error']
            running_error = np.hstack((arr, running_error))
    elif err_type == 'val_acc':
        if os.path.isfile(savename):
            arr = np.load(savename)['running_error']
            running_error = np.hstack((arr, running_error))
    np.savez(savename, running_error=running_error)
    fig = plt.figure()
    plt.plot(running_error)
    plt.xlabel('Iterations')
    if err_type == 'error':
        plt.ylabel('Error')
    elif err_type == 'acc':
        plt.ylabel('Accuracy')
    elif err_type == 'val_acc':
        plt.ylabel('Validation Accuracy')
    plt.savefig(savename.replace('.npz','.png'))
    plt.close()
Exemple #23
0
	def load_data(self):
		os.chdir(self.data_in)
		if self.add_photos:
			try:
				self.otherframes = np.load("otherframes.npy")
			except:
				print "No otherframes.npy file found... using all the frames in src_imgs/."
				self.otherframes = None

			if self.otherframes is not None:
				otherframe_strings =  ["%08d.jpg" %i for i in self.otherframes]
				self.photos = otherframe_strings
			else: 
				self.photos =[f for f in os.listdir("src_imgs") if os.path.isfile(os.path.join("src_imgs", f)) and os.path.splitext(f)[1].lower()==".jpg"]

		else:
			try:
				self.keyframes = np.load("keyframes.npy")
			except:
				print "No keyframes.npy file found... using all the frames in src_imgs/."
				self.keyframes = None

			if self.keyframes is not None:
				keyframe_strings =  ["%08d.jpg" %i for i in self.keyframes]
				self.photos = keyframe_strings
			else: 
				self.photos =[f for f in os.listdir("src_imgs") if os.path.isfile(os.path.join("src_imgs", f)) and os.path.splitext(f)[1].lower()==".jpg"]
		os.chdir(self.currentDir)
Exemple #24
0
    def load(cls, fname, mmap=None):
        """
        Load a previously saved object from file (also see `save`).

        If the object was saved with large arrays stored separately, you can load
        these arrays via mmap (shared memory) using `mmap='r'`. Default: don't use
        mmap, load large arrays as normal objects.

        """
        logger.info("loading %s object from %s" % (cls.__name__, fname))
        subname = lambda suffix: fname + '.' + suffix + '.npy'
        obj = unpickle(fname)
        for attrib in getattr(obj, '__numpys', []):
            logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap))
            setattr(obj, attrib, numpy.load(subname(attrib), mmap_mode=mmap))
        for attrib in getattr(obj, '__scipys', []):
            logger.info("loading %s from %s with mmap=%s" % (attrib, subname(attrib), mmap))
            sparse = unpickle(subname(attrib))
            sparse.data = numpy.load(subname(attrib) + '.data.npy', mmap_mode=mmap)
            sparse.indptr = numpy.load(subname(attrib) + '.indptr.npy', mmap_mode=mmap)
            sparse.indices = numpy.load(subname(attrib) + '.indices.npy', mmap_mode=mmap)
            setattr(obj, attrib, sparse)
        for attrib in getattr(obj, '__ignoreds', []):
            logger.info("setting ignored attribute %s to None" % (attrib))
            setattr(obj, attrib, None)
        return obj
def GLM_column(featDir, colInd):
    '''
    Returns a particular column of a GLM model matrix.

    Input Parameters:
          featDir:     The .feat directory where the GLM model file exists.
                       If the motion scrubbed version of the GLM (GLM_model_ms.npz)
                       exists, then it is used to produce the output. Otherwise,
                       the original version of the GLM (GLM_model.npz) is used.
          colInd:      The column index indicating the column to be returned. As in
                       the typical Python convention, 0 corresponds to the first column.

    Returns:
          Y:           A 1D array from the GLM design matrix.
    '''
    # file business
    fGLM = os.path.join(featDir, 'GLM_model.npz')
    fGLM_ms = os.path.join(featDir, 'GLM_model_ms.npz')
    # loading the appropriate file
    if os.path.isfile(fGLM_ms):
        infile = np.load(fGLM_ms)
    else:
        infile = np.load(fGLM)
    # and extracting the column
    X = infile['X']
    Y = X[:,colInd]
    return Y
Exemple #26
0
def data_load(dirname):
	''' load feature, label pairs from data direcotry

	dirname: string, data directory name
	Return: tuple of numpy arrays, feature, label pairs. '''
	npylist = os.listdir(dirname)
	estilen = len(npylist)
	L = []
	P = []
	epoch = 0
	for k, featname in enumerate(npylist):
		if k * 10 / estilen > epoch:
			epoch = k*10/estilen
			print 'loading', epoch*10, '%'
		if not featname.startswith('p'):
			continue
		labename = 'l'+featname[1:]
		feat = numpy.load(os.path.join(dirname, featname))
		labe = numpy.load(os.path.join(dirname, labename))
                try:
                        if L == []:
                                P = feat 
                                L = labe
                        else:
                                P = numpy.concatenate((P, feat))
                                L = numpy.concatenate((L, labe))
                except:
                        print featname,
                        print ' numpy array shape does not match ',
                        print feat.shape
	return P, L 
Exemple #27
0
def decode_predictions(preds, top=5):
    LABELS = None
    if len(preds.shape) == 2:
        if preds.shape[1] == 2622:
            fpath = get_file('rcmalli_vggface_labels_v1.npy',
                             V1_LABELS_PATH,
                             cache_subdir=VGGFACE_DIR)
            LABELS = np.load(fpath)
        elif preds.shape[1] == 8631:
            fpath = get_file('rcmalli_vggface_labels_v2.npy',
                             V2_LABELS_PATH,
                             cache_subdir=VGGFACE_DIR)
            LABELS = np.load(fpath)
        else:
            raise ValueError('`decode_predictions` expects '
                             'a batch of predictions '
                             '(i.e. a 2D array of shape (samples, 2622)) for V1 or '
                             '(samples, 8631) for V2.'
                             'Found array with shape: ' + str(preds.shape))
    else:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 2622)) for V1 or '
                         '(samples, 8631) for V2.'
                         'Found array with shape: ' + str(preds.shape))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [[str(LABELS[i].encode('utf8')), pred[i]] for i in top_indices]
        result.sort(key=lambda x: x[1], reverse=True)
        results.append(result)
    return results
Exemple #28
0
 def load_field_values(self, file_prefix):
     if self.dX is None or self.dY is None or self.dZ is None:
         self.init_field(0.0)
     
     try:
         dX = np.load('{}dX.npy'.format(file_prefix))
         dY = np.load('{}dY.npy'.format(file_prefix))
         dZ = np.load('{}dZ.npy'.format(file_prefix))
         '''
         min_x = np.amin(np.fabs(dX[dX.nonzero()]))
         if min_x < self.dX_min:
             self.dX_min = min_x
         min_y = np.amin(np.fabs(dY[dY.nonzero()]))
         if min_y < self.dY_min:
             self.dY_min = min_y
         min_z = np.amin(np.fabs(dZ[dZ.nonzero()]))
         if min_z < self.dZ_min:
             self.dZ_min = min_z
         '''
         self.dX += dX
         self.dY += dY
         self.dZ += dZ
         
         return True
     except IOError:
         return False
def test_enhance_neurites_gradient_volume(image, module, workspace):
    resources = os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "resources"))

    data = numpy.load(os.path.join(resources, "neurite.npy"))

    data = skimage.exposure.rescale_intensity(1.0 * data)

    data = numpy.tile(data, (3, 1)).reshape(3, *data.shape)

    image.pixel_data = data

    image.dimensions = 3

    module.method.value = "Enhance"

    module.enhance_method.value = "Neurites"

    module.neurite_choice.value = "Line structures"

    module.object_size.value = 8

    module.run(workspace)

    output = workspace.image_set.get_image("output")

    actual = output.pixel_data

    expected = numpy.load(os.path.join(resources, "enhanced_neurite.npy"))

    expected = numpy.tile(expected, (3, 1)).reshape(3, *expected.shape)

    numpy.testing.assert_array_almost_equal(expected, actual)
Exemple #30
0
def main(T=int(1e2)):
    A = np.load("A.npy")
    b = np.load("b.npy")

    # modify regularization parameters below
    x_sg, error_sg, l1_sg = descent(subgradient, A, b, reg=1e-1, T=T)
    x_fw, error_fw, l1_fw = descent(frank_wolfe, A, b, reg=1.5, T=T)
    # add BTLS experiments

    # add plots for BTLS
    plt.clf()
    plt.plot(error_sg, label='Subgradient')
    plt.plot(error_fw, label='Frank-Wolfe')
    plt.title('Error')
    plt.legend()
    plt.xlabel('iteration number')
    plt.ylabel('Error')
    plt.savefig('error.eps')

    plt.clf()
    plt.plot(l1_sg, label='Subgradient')
    plt.plot(l1_fw, label='Frank-Wolfe')
    plt.title("$\ell^1$ Norm")
    plt.legend()
    plt.xlabel('iteration number')
    plt.ylabel('Norm')
    plt.savefig('l1.eps')
def rmt_input():
    ############################# Load RMT Data
    rmt_data_mass = []
    rmt_data_decay = []
    rmt_data_phi = []
    for i in range(0, 40):
        array_mass = (np.load('mass_data/nparray{0}.npy'.format(i)))
        rmt_data_mass.append(array_mass)
        array_decay = (np.load('decay_data/nparray{0}.npy'.format(i)))
        rmt_data_decay.append(array_decay)
        array_phi = (np.load('phi_data/nparray{0}.npy'.format(i)))
        rmt_data_phi.append(array_phi)
    #############################

    ############################# Define RMT Paramter Space
    beta_k = np.linspace(0.01, 1.0, len(rmt_data_mass[0]))
    beta_m = np.linspace(0.01, 1.0, len(rmt_data_mass))
    x_sp = []
    y_sp = []
    for j in range(0, len(rmt_data_mass)):
        temp = [beta_m[j]] * len(rmt_data_mass[0])
        y_sp.append(temp)
        for i in range(0, len(rmt_data_mass[0])):
            x_sp.append(beta_k[i])
    #############################
    y_sp = np.reshape(y_sp, (len(rmt_data_mass) * len(rmt_data_mass[0])))

    gld_one_mass = []
    gld_two_mass = []
    gld_three_mass = []
    gld_four_mass = []
    gld_one_decay = []
    gld_two_decay = []
    gld_three_decay = []
    gld_four_decay = []
    gld_one_phi = []
    gld_two_phi = []
    gld_three_phi = []
    gld_four_phi = []

    for i in range(0, len(rmt_data_mass)):
        for j in range(0, len(rmt_data_mass[0])):
            gld_one_mass.append(rmt_data_mass[i][j][0])
            gld_one_decay.append(rmt_data_decay[i][j][0])
            gld_one_phi.append(rmt_data_phi[i][j][0])
            gld_two_mass.append(rmt_data_mass[i][j][1])
            gld_two_decay.append(rmt_data_decay[i][j][1])
            gld_two_phi.append(rmt_data_phi[i][j][1])
            gld_three_mass.append(rmt_data_mass[i][j][2])
            gld_three_decay.append(rmt_data_decay[i][j][2])
            gld_three_phi.append(rmt_data_phi[i][j][2])
            gld_four_mass.append(rmt_data_mass[i][j][3])
            gld_four_decay.append(rmt_data_decay[i][j][3])
            gld_four_phi.append(rmt_data_phi[i][j][3])

    gld_one_mass = np.reshape(gld_one_mass,
                              (len(rmt_data_mass), len(rmt_data_mass[0])))
    gld_two_mass = np.reshape(gld_two_mass,
                              (len(rmt_data_mass), len(rmt_data_mass[0])))
    gld_three_mass = np.reshape(gld_three_mass,
                                (len(rmt_data_mass), len(rmt_data_mass[0])))
    gld_four_mass = np.reshape(gld_four_mass,
                               (len(rmt_data_mass), len(rmt_data_mass[0])))

    gld_one_decay = np.reshape(gld_one_decay,
                               (len(rmt_data_decay), len(rmt_data_decay[0])))
    gld_two_decay = np.reshape(gld_two_decay,
                               (len(rmt_data_decay), len(rmt_data_decay[0])))
    gld_three_decay = np.reshape(gld_three_decay,
                                 (len(rmt_data_decay), len(rmt_data_decay[0])))
    gld_four_decay = np.reshape(gld_four_decay,
                                (len(rmt_data_decay), len(rmt_data_decay[0])))

    gld_one_phi = np.reshape(gld_one_phi,
                             (len(rmt_data_phi), len(rmt_data_phi[0])))
    gld_two_phi = np.reshape(gld_two_phi,
                             (len(rmt_data_phi), len(rmt_data_phi[0])))
    gld_three_phi = np.reshape(gld_three_phi,
                               (len(rmt_data_phi), len(rmt_data_phi[0])))
    gld_four_phi = np.reshape(gld_four_phi,
                              (len(rmt_data_phi), len(rmt_data_phi[0])))

    spline1m = sp.interpolate.Rbf(x_sp,
                                  y_sp,
                                  gld_one_mass,
                                  function='multiquadric',
                                  smooth=2,
                                  episilon=2)
    spline2m = sp.interpolate.Rbf(x_sp,
                                  y_sp,
                                  gld_two_mass,
                                  function='multiquadric',
                                  smooth=2,
                                  episilon=2)
    spline3m = sp.interpolate.Rbf(x_sp,
                                  y_sp,
                                  gld_three_mass,
                                  function='multiquadric',
                                  smooth=2,
                                  episilon=2)
    spline4m = sp.interpolate.Rbf(x_sp,
                                  y_sp,
                                  gld_four_mass,
                                  function='multiquadric',
                                  smooth=2,
                                  episilon=2)

    spline1f = sp.interpolate.Rbf(x_sp,
                                  y_sp,
                                  gld_one_decay,
                                  function='multiquadric',
                                  smooth=2,
                                  episilon=2)
    spline2f = sp.interpolate.Rbf(x_sp,
                                  y_sp,
                                  gld_two_decay,
                                  function='multiquadric',
                                  smooth=2,
                                  episilon=2)
    spline3f = sp.interpolate.Rbf(x_sp,
                                  y_sp,
                                  gld_three_decay,
                                  function='multiquadric',
                                  smooth=2,
                                  episilon=2)
    spline4f = sp.interpolate.Rbf(x_sp,
                                  y_sp,
                                  gld_four_decay,
                                  function='multiquadric',
                                  smooth=2,
                                  episilon=2)

    spline1p = sp.interpolate.Rbf(x_sp,
                                  y_sp,
                                  gld_one_phi,
                                  function='multiquadric',
                                  smooth=2,
                                  episilon=2)
    spline2p = sp.interpolate.Rbf(x_sp,
                                  y_sp,
                                  gld_two_phi,
                                  function='multiquadric',
                                  smooth=2,
                                  episilon=2)
    spline3p = sp.interpolate.Rbf(x_sp,
                                  y_sp,
                                  gld_three_phi,
                                  function='multiquadric',
                                  smooth=2,
                                  episilon=2)
    spline4p = sp.interpolate.Rbf(x_sp,
                                  y_sp,
                                  gld_four_phi,
                                  function='multiquadric',
                                  smooth=2,
                                  episilon=2)

    return (spline1m, spline2m, spline3m, spline4m, spline1f, spline2f,
            spline3f, spline4f, spline1p, spline2p, spline3p, spline4p)
    np.savez(path + EXP_NAME, [nsamples, err_train, err_validation, e_test])

    return e_test


if __name__ == '__main__':

    if len(sys.argv) > 1:
        sigma = int(sys.argv[1])
        order = int(sys.argv[2])
        sigma_noise = float(sys.argv[3])
        grid = [(sigma, order, sigma_noise)]
    else:
        grid = pgrid()

    path = 'results/histogram/'
    os.makedirs(path, exist_ok=True)

    for sigma, order, sigma_noise in grid:
        print('Launch experiment for sigma={}, order={}, noise={}'.format(sigma, order, sigma_noise))
        res = single_experiment(sigma, order, sigma_noise, path)
        filepath = os.path.join(path, 'histogram_results_list_sigma{}'.format(sigma))
        new_data = [order, sigma_noise, res]
        if os.path.isfile(filepath+'.npz'):
            results = np.load(filepath+'.npz')['data'].tolist()
        else:
            results = []
        results.append(new_data)
        np.savez(filepath, data=results)

Exemple #33
0
 def _load(self, key):
     return np.load(self.index_dict[key])
def generate_sawyerhurdle_dataset(variant,
                                  segmented=False,
                                  segmentation_method='unet'):
    from multiworld.core.image_env import ImageEnv, unormalize_image

    env_id = variant.get('env_id', None)
    N = variant.get('N', 10000)
    test_p = variant.get('test_p', 0.9)
    imsize = variant.get('imsize', 84)
    num_channels = variant.get('num_channels', 3)
    init_camera = variant.get('init_camera', None)
    segmentation_kwargs = variant.get('segmentation_kwargs', {})

    pjhome = os.environ['PJHOME']
    seg_name = 'seg-' + segmentation_method if segmented else 'no-seg'
    data_file_path = osp.join(pjhome, 'data/local/pre-train-vae',
                              '{}-{}-{}.npy'.format(env_id, seg_name, N))
    puck_pos_path = osp.join(
        pjhome, 'data/local/pre-train-vae',
        '{}-{}-{}-puck-pos.npy'.format(env_id, seg_name, N))

    if osp.exists(data_file_path):
        all_data = np.load(data_file_path)
        if len(all_data) >= N:
            print("load stored data at: ", data_file_path)
            n = int(len(all_data) * test_p)
            train_dataset = all_data[:n]
            test_dataset = all_data[n:]
            puck_pos = np.load(puck_pos_path)
            info = {'puck_pos': puck_pos}
            return train_dataset, test_dataset, info

    if segmented:
        print("generating vae dataset with segmented images using method: ",
              segmentation_method)
        if segmentation_method == 'unet':
            segment_func = segment_image_unet
        else:
            raise NotImplementedError
    else:
        print("generating vae dataset with original images")

    assert env_id is not None
    import gym
    import multiworld
    multiworld.register_all_envs()
    env = gym.make(env_id)

    if not isinstance(env, ImageEnv):
        env = ImageEnv(
            env,
            imsize,
            init_camera=init_camera,
            transpose=True,
            normalize=True,
        )

    info = {}
    env.reset()
    info['env'] = env

    dataset = np.zeros((N, imsize * imsize * num_channels), dtype=np.uint8)
    puck_pos = np.zeros((N, 2), dtype=np.float)

    for i in range(N):
        print("sawyer hurdle custom vae data set generation, number: ", i)
        if env_id == 'SawyerPushHurdle-v0':
            obs, puck_p = _generate_sawyerhurdle_dataset(env,
                                                         return_puck_pos=True)
        elif env_id == 'SawyerPushHurdleMiddle-v0':
            obs, puck_p = _generate_sawyerhurdlemiddle_dataset(
                env, return_puck_pos=True)
        else:
            raise NotImplementedError
        img = obs[
            'image_observation']  # NOTE yufei: this is already normalized image, of detype np.float64.

        if segmented:
            dataset[i, :] = segment_func(img,
                                         normalize=False,
                                         **segmentation_kwargs)
        else:
            dataset[i, :] = unormalize_image(img)
        puck_pos[i] = puck_p

    n = int(N * test_p)
    train_dataset = dataset[:n, :]
    test_dataset = dataset[n:, :]

    info['puck_pos'] = puck_pos

    if N >= 2000:
        print('save data to: ', data_file_path)
        all_data = np.concatenate([train_dataset, test_dataset], axis=0)
        np.save(data_file_path, all_data)
        np.save(puck_pos_path, puck_pos)

    return train_dataset, test_dataset, info
def read_model_data(filename):
    model = np.load(filename)
    return model
Exemple #36
0
args = parser.parse_args()

# Custom object needed for inference and training
custom_objects = {'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': depth_loss_function}

# Load model into GPU / CPU
print('Loading model...')
model = load_model(args.model, custom_objects=custom_objects, compile=False)

# Load test data
print('Loading test data...', end='')
import numpy as np
from data import extract_zip
data = extract_zip('input.zip')
from io import BytesIO
rgb = np.load(BytesIO(data['eigen_test_rgb.npy']))
depth = np.load(BytesIO(data['eigen_test_depth.npy']))
crop = np.load(BytesIO(data['eigen_test_crop.npy']))
print('Test data loaded.\n')

start = time.time()
print('Testing...')

e = evaluate(model, rgb, depth, crop, batch_size=6)

print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format('a1', 'a2', 'a3', 'rel', 'rms', 'log_10'))
print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".format(e[0],e[1],e[2],e[3],e[4],e[5]))

end = time.time()
print('\nTest time', end-start, 's')
Exemple #37
0
#SavePatch('poyang_predata/patchdata', T1Patches, T2Patches)
#SavePatch('river_predata/patchdata', T1Patches, T2Patches)
#SavePatch('paviau_allfile/paviau_predata/matsamples/patchdata', T1Patches, T2Patches)
#保存nopatch文件
#SavePatch('poyang_allfile/poyang_predata/matsamples/nopatchdata', T1, T2)


#加载npy文件并转为mat格式
##加载zerosneighbor
#mat1 = np.load('manaus_predata/patchdata/T1patches.npy')
#mat2 = np.load('manaus_predata/patchdata/T2patches.npy')
#io.savemat('manaus_predata/patchdata/T1patches.mat', {'T1': mat1})
#io.savemat('manaus_predata/patchdata/T2patches.mat', {'T2': mat2})

##加载sameneighbor
mat1 = np.load('paviau_allfile/paviau_predata/matsamples/patchdata/T1patches_sameneighbor.npy')
mat2 = np.load('paviau_allfile/paviau_predata/matsamples/patchdata/T2patches_sameneighbor.npy')
#io.savemat('paviau_allfile/paviau_predata/matsamples/patchdata/T1patches_sameneighbor.mat', {'T1': mat1})
#io.savemat('paviau_allfile/paviau_predata/matsamples/patchdata/T2patches_sameneighbor.mat', {'T2': mat2})

#保存真实变化标签
#真实标签true
#true = mh.colors.rgb2gray(mat3)
true = mat3
for i in range(0,true.shape[0]):
   for j in range(0,true.shape[1]):
        if true[i][j] == 255:
            true[i][j] = 1
        else:
            true[i][j] = 0 
           
Exemple #38
0
plt.show()

##############Q3###################
test1=[]
with open('locationData.csv','r')as f:
    for line in f:
        numbers=[]
        for number in line.split(' '):
            numbers.append(float(number))
        test1.append(numbers)
test1=np.array(test1)
print((test1==data).all())
print((test1==data).any())

##############Q4###################
mat = loadmat("twoClassData.mat")
print(mat.keys())
X = mat["X"]
y = mat["y"].ravel()
X0=X[y==0,:]
X1=X[y==1,:]
plt.figure()
plt.scatter(X0[:,0],X0[:,1],c='red',edgecolors='black')
plt.scatter(X1[:,0],X1[:,1],c='blue',edgecolors='black')
plt.show()

##############Q5###################
x=np.load('x.npy')
y=np.load('y.npy')
A = np.vstack([x, np.ones(len(x))]).T
print(np.linalg.lstsq(A, y,rcond=None)[0])
cwd = os.getcwd()
print cwd
print os.listdir(os.path.join(cwd, 'data'))


# In[79]:


import numpy as np
import time


# In[80]:


much_data = np.load('./data/muchdata-32-32-32.npy', encoding = 'latin1')


# In[81]:


print len(much_data)
print len(much_data[0][0])
print len(much_data[0][1])


# In[82]:


train_data = much_data[:16]
validation_data = much_data[16:24]
Exemple #40
0
 def parse_array(self):
     try:
         self.cs_array = np.load(self.cs_filename)
     except OSError:  # not immediately obvious that this is the exception that gets raised...
         raise
     return self
    def fit(self, sentences, cc_matrix=None, learning_rate=1e-4, reg=0.1, xmax=100, alpha=0.75, epochs=10, gd=False):
        # build co-occurrence matrix : cc_matrix
        # paper calls it X, so we will call it X, instead of calling
        # the training data X
        # TODO: would it be better to use a sparse matrix?
        t0 = datetime.now()
        V = self.V
        D = self.D

        if not os.path.exists(cc_matrix):
            X = np.zeros((V, V))
            N = len(sentences)
            print("number of sentences to process:", N)
            it = 0
            for sentence in sentences:
                it += 1
                if it % 10000 == 0:
                    print("processed", it, "/", N)
                n = len(sentence)
                for i in range(n):
                    # i is not the word index!!!
                    # j is not the word index!!!
                    # i just points to which element of the sequence (sentence) we're looking at
                    wi = sentence[i]

                    start = max(0, i - self.context_sz)
                    end = min(n, i + self.context_sz)

                    # we can either choose only one side as context, or both
                    # here we are doing both

                    # make sure "start" and "end" tokens are part of some context
                    # otherwise their f(X) will be 0 (denominator in bias update)
                    if i - self.context_sz < 0:
                        points = 1.0 / (i + 1)
                        X[wi,0] += points
                        X[0,wi] += points
                    if i + self.context_sz > n:
                        points = 1.0 / (n - i)
                        X[wi,1] += points
                        X[1,wi] += points

                    # left side
                    for j in range(start, i):
                        wj = sentence[j]
                        points = 1.0 / (i - j) # this is +ve
                        X[wi,wj] += points
                        X[wj,wi] += points

                    # right side
                    for j in range(i + 1, end):
                        wj = sentence[j]
                        points = 1.0 / (j - i) # this is +ve
                        X[wi,wj] += points
                        X[wj,wi] += points

            # save the cc matrix because it takes forever to create
            np.save(cc_matrix, X)
        else:
            X = np.load(cc_matrix)

        print("max in X:", X.max())

        # weighting
        fX = np.zeros((V, V))
        fX[X < xmax] = (X[X < xmax] / float(xmax)) ** alpha
        fX[X >= xmax] = 1

        print("max in f(X):", fX.max())

        # target
        logX = np.log(X + 1)

        print("max in log(X):", logX.max())

        print("time to build co-occurrence matrix:", (datetime.now() - t0))

        # initialize weights
        W = np.random.randn(V, D) / np.sqrt(V + D)
        b = np.zeros(V)
        U = np.random.randn(V, D) / np.sqrt(V + D)
        c = np.zeros(V)
        mu = logX.mean()


        costs = []
        sentence_indexes = range(len(sentences))
        for epoch in range(epochs):
            delta = W.dot(U.T) + b.reshape(V, 1) + c.reshape(1, V) + mu - logX
            cost = ( fX * delta * delta ).sum()
            costs.append(cost)
            print("epoch:", epoch, "cost:", cost)

            if gd:
                # gradient descent method
                # update W
                # oldW = W.copy()
                for i in range(V):
                    # for j in range(V):
                    #     W[i] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j])*U[j]
                    W[i] -= learning_rate*(fX[i,:]*delta[i,:]).dot(U)
                W -= learning_rate*reg*W
                # print "updated W"

                # update b
                for i in range(V):
                    # for j in range(V):
                    #     b[i] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j])
                    b[i] -= learning_rate*fX[i,:].dot(delta[i,:])
                # b -= learning_rate*reg*b
                # print "updated b"

                # update U
                for j in range(V):
                    # for i in range(V):
                    #     U[j] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j])*W[i]
                    U[j] -= learning_rate*(fX[:,j]*delta[:,j]).dot(W)
                U -= learning_rate*reg*U
                # print "updated U"

                # update c
                for j in range(V):
                    # for i in range(V):
                    #     c[j] -= learning_rate*fX[i,j]*(W[i].dot(U[j]) + b[i] + c[j] + mu - logX[i,j])
                    c[j] -= learning_rate*fX[:,j].dot(delta[:,j])
                # c -= learning_rate*reg*c
                # print "updated c"

            else:
                # ALS method

                # update W
                # fast way
                # t0 = datetime.now()
                for i in range(V):
                    # matrix = reg*np.eye(D) + np.sum((fX[i,j]*np.outer(U[j], U[j]) for j in range(V)), axis=0)
                    matrix = reg*np.eye(D) + (fX[i,:]*U.T).dot(U)
                    # assert(np.abs(matrix - matrix2).sum() < 1e-5)
                    vector = (fX[i,:]*(logX[i,:] - b[i] - c - mu)).dot(U)
                    W[i] = np.linalg.solve(matrix, vector)
                # print "fast way took:", (datetime.now() - t0)

                # slow way
                # t0 = datetime.now()
                # for i in range(V):
                #     matrix2 = reg*np.eye(D)
                #     vector2 = 0
                #     for j in range(V):
                #         matrix2 += fX[i,j]*np.outer(U[j], U[j])
                #         vector2 += fX[i,j]*(logX[i,j] - b[i] - c[j])*U[j]
                # print "slow way took:", (datetime.now() - t0)

                    # assert(np.abs(matrix - matrix2).sum() < 1e-5)
                    # assert(np.abs(vector - vector2).sum() < 1e-5)
                    # W[i] = np.linalg.solve(matrix, vector)
                # print "updated W"

                # update b
                for i in range(V):
                    denominator = fX[i,:].sum() + reg
                    # assert(denominator > 0)
                    numerator = fX[i,:].dot(logX[i,:] - W[i].dot(U.T) - c - mu)
                    # for j in range(V):
                    #     numerator += fX[i,j]*(logX[i,j] - W[i].dot(U[j]) - c[j])
                    b[i] = numerator / denominator
                # print "updated b"

                # update U
                for j in range(V):
                    # matrix = reg*np.eye(D) + np.sum((fX[i,j]*np.outer(W[i], W[i]) for i in range(V)), axis=0)
                    matrix = reg*np.eye(D) + (fX[:,j]*W.T).dot(W)
                    # assert(np.abs(matrix - matrix2).sum() < 1e-8)
                    vector = (fX[:,j]*(logX[:,j] - b - c[j] - mu)).dot(W)
                    # matrix = reg*np.eye(D)
                    # vector = 0
                    # for i in range(V):
                    #     matrix += fX[i,j]*np.outer(W[i], W[i])
                    #     vector += fX[i,j]*(logX[i,j] - b[i] - c[j])*W[i]
                    U[j] = np.linalg.solve(matrix, vector)
                # print "updated U"

                # update c
                for j in range(V):
                    denominator = fX[:,j].sum() + reg
                    numerator = fX[:,j].dot(logX[:,j] - W.dot(U[j]) - b  - mu)
                    # for i in range(V):
                    #     numerator += fX[i,j]*(logX[i,j] - W[i].dot(U[j]) - b[i])
                    c[j] = numerator / denominator
                # print "updated c"

        self.W = W
        self.U = U

        plt.plot(costs)
        plt.show()
Exemple #42
0
                        help='ooooooooooo')
    parser.add_argument('--input_c', type=int, default=10,
    #parser.add_argument('--input_c', type=int, default=512,
                        help='ooooooooooo')
    parser.add_argument('--input_w', type=int, default=31,
    #parser.add_argument('--input_w', type=int, default=31,
                        help='ooooooooooo')
    parser.add_argument('--input_h', type=int, default=512,
    #parser.add_argument('--input_h', type=int, default=10,
                        help='ooooooooooo')
    args = parser.parse_args()


    fixed_images = np.zeros([args.batch_size,args.input_h,args.input_w,args.input_c])
    for i in range(args.batch_size):
        fixed_images[i,:,:,:]=np.load(os.path.join(args.dataset_path,"%06d.npz"%(i+7481)))['arr_0'].transpose(0,3,2,1)[0,:,:,:]/NORMALIZE_CONST
        #fixed_images[i,:,:,:]=np.load(os.path.join(args.dataset_path,"%06d.npz"%(i+7481)))['arr_0'][0,:,:,:]/NORMALIZE_CONST

    tf.set_random_seed(326)
    np.random.seed(326)
    fixed_noise = np.random.randn(args.batch_size, args.z_dim)

    mnistWganInv = MnistWganInv(
        x_dim=784, z_dim=args.z_dim, w=args.input_w, h=args.input_h, c=args.input_c, latent_dim=args.latent_dim,
        nf=args.nf, batch_size=args.batch_size, c_gp_x=args.c_gp_x, lamda=args.lamda,
        output_path=args.output_path)

    saver = tf.train.Saver(max_to_keep=10)


    config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
Exemple #43
0
 def setUp(self):
     self.bin = pj(program_path, "BarTracker")
     self.act = np.load(pj(ACTIVATIONS_PATH, 'sample.bar_tracker.npz'))
     self.beats = [[0.091, 1], [0.8, 2], [1.481, 3], [2.148, 1]]
     self.downbeats = [0.091, 2.148]
Exemple #44
0
import sys
import numpy as np
import os.path as path

if __name__ == "__main__" :
	argv = sys.argv
	argc = len(argv);
	if argc != 3 :
		print "PARAMETER ERROR"
		print "usage : %s %s %s" % (argv[0], "viewBasedData", "shapeBasedData")
		sys.exit(-1)

	viewBasedData = np.load(argv[1])
	shapeBasedData = np.load(argv[2])
	jointData = np.append(viewBasedData, shapeBasedData, axis = 1)

	[baseName, fileName] = path.split(argv[1])
	dataName = path.join(baseName, "viewShapeData.npy")
	np.save(dataName, jointData)
	
Exemple #45
0
import numpy as np

a = np.load('/home/wcy/MADRL/maps/map_pool16.npy')
print(a)
    #     learning_rate=5e-4,
    #     reg=0.1,
    #     epochs=500,
    #     gd=True,
    # )
    model.save(we_file)

if __name__ == '__main__':
    we = 'glove_model_50.npz'
    w2i = 'glove_word2idx_50.json'
    # we = 'glove_model_brown.npz'
    # w2i = 'glove_word2idx_brown.json'
    main(we, w2i, use_brown=False)
    
    # load back embeddings
    npz = np.load(we)
    W1 = npz['arr_0']
    W2 = npz['arr_1']

    with open(w2i) as f:
        word2idx = json.load(f)
        idx2word = {i:w for w,i in word2idx.items()}

    for concat in (True, False):
        print("** concat:", concat)

        if concat:
            We = np.hstack([W1, W2.T])
        else:
            We = (W1 + W2.T) / 2
Exemple #47
0
def analyze(dataset_loader: typing.Tuple[str, str, tuple, dict],
            loss_loader: typing.Tuple[str, str, tuple, dict], folder: str,
            settings: typing.Union[str, aparams.AnalysisSettings],
            accuracy_style: str, cores: int):
    """Performs the requested analysis of the given folder, which is assumed to
    have been generated from model_manager.train. The output folder structure
    is as follows:

    .. code:: none

        folder/
            analysis/
                hparams/
                    lr/
                        i/  (where i=0,1,...)
                            lr_vs_perf.(img)
                            lr_vs_smoothed_perf.(img)
                            lr_vs_perf_deriv.(img)
                            lr_vs_smoothed_perf_deriv.(img)

                        lr_vs_perf_*.(img)
                        lr_vs_smoothed_perf_*.(img)
                        lr_vs_perf_deriv_*.(img)
                        lr_vs_smoothed_perf_deriv_*.(img)
                        lr_vs_lse_smoothed_perf_then_deriv_*.(img)
                        lr_vs_lse_smoothed_perf_then_deriv_then_smooth_*.(img)
                    batch/
                        i/ (where i=0,1,...)
                            batch_vs_perf.(img)
                            batch_vs_smoothed_perf.(img)
                            batch_vs_perf_deriv.(img)
                            batch_vs_smoothed_perf_deriv.(img)

                        batch_vs_perf_*.(img)
                        batch_vs_smoothed_perf_*.(img)
                        batch_vs_perf_derivs_*.(img)
                        batch_vs_smoothed_perf_derivs_*.(img)
                        batch_vs_lse_smoothed_perf_then_derivs_*.(img)

                    TODO videos & animations
                trials/
                    i/  (where i=0,1,...)
                        epoch_vs_loss_train.(img) (*)
                        epoch_vs_loss_val.(img) (*)
                        epoch_vs_perf_train.(img) (*)
                        epoch_vs_perf_val.(img) (*)

                        pca3dvis_train_draft/
                            Only produced if settings.typical_run_pca3dvis and
                            settings.typical_run_pca3dvis_draft are set, and
                            only done on trial 0
                        pca3dvis_train/
                            Only produced if settings.typical_run_pca3dvis and
                            not settings.typical_run_pca3dvis_draft, and only
                            done on trial 0

                    epoch_vs_loss_train_*.(img) (*)
                    epoch_vs_loss_val_*.(img) (*)
                    epoch_vs_smoothed_loss_train_*.(img) (*)
                    epoch_vs_smoothed_loss_val_*.(img) (*)
                    epoch_vs_perf_train_*.(img) (*)
                    epoch_vs_smoothed_perf_train_*.(img) (*)
                    epoch_vs_perf_val_*.(img) (*)
                    epoch_vs_smoothed_perf_val_*.(img) (*)


                    (*)
                        Only produced if throughtime.npz is available for
                        the trial and settings.training_metric_images is
                        set

                    TODO more summary of trials
                    TODO text & videos & animations

                html/
                    See text_analysis.py for details

    :param dataset_loader: the module and corresponding attribute that gives a
        training and validation dataset when invoked with the specified
        arguments and keyword arguments.

    :param loss_loader: the module and corresponding attribute that gives a
        loss nn.Module when invoked with the specified arguments and keyword
        arguments

    :param folder: where the model manager saved the trials to analyze

    :param settings: the settings for analysis, or the name of the preset to
        use. Common preset names are `none`, `text`, `images`, `animations`,
        and `videos`. For the full list, see the ignite_simple.analarams
        module.

    :param accuracy_style: how performance was calculated. one of
        'classification', 'multiclass', and 'inv-loss'. See train for
        details.

    :param cores: the number of physical cores this can assume are available
        to speed up analysis.
    """
    if cores == 'all':
        cores = psutil.cpu_count(logical=False)

    dataset_loader = utils.fix_imports(dataset_loader)
    loss_loader = utils.fix_imports(loss_loader)

    settings = aparams.get_settings(settings)
    if settings.suppress_extra_images:
        imgs_to_produce = text_analysis.HTML_REFD_IMAGES
    else:
        imgs_to_produce = None

    filter_folder = os.path.join(folder, 'analysis')
    imgs_filter = _rawplot_filter(filter_folder, imgs_to_produce)

    logger = logging.getLogger(__name__)

    logger.info('Analyzing %s...', folder)

    perf_name = ('Accuracy' if accuracy_style in ('classification, multiclass')
                 else 'Inverse Loss')
    perf_name_short = {
        'classification': 'Accuracy (%)',
        'multiclass': 'Subset Accuracy Score',
    }.get(accuracy_style, '1/(loss+1)')

    tasks = []
    if settings.hparam_selection_specific_imgs:
        lr_folder = os.path.join(folder, 'analysis', 'hparams', 'lr')
        batch_folder = os.path.join(folder, 'analysis', 'hparams', 'batch')
        for suffix in ('', '2'):
            source = os.path.join(folder, 'hparams', f'lr_vs_perf{suffix}.npz')
            if not os.path.exists(source):
                continue

            out_folder = lr_folder + suffix
            with np.load(source) as infile:
                num_trials = infile['perfs'].shape[0]

            for trial in range(num_trials):
                real_out = os.path.join(out_folder, str(trial))
                os.makedirs(real_out, exist_ok=True)

                have_imgs = futils.fig_exists(
                    os.path.join(real_out, 'lr_vs_perf'), imgs_filter)
                if have_imgs:
                    continue

                tasks.extend([
                    dispatcher.Task(__name__, '_rawplot', (
                        source,
                        futils.make_vs_title('Learning Rate', 'Inverse Loss'),
                        'Learning Rate',
                        '1/(loss+1)',
                        'lrs',
                        slice(None),
                        'perfs',
                        slice(trial, trial + 1),
                        os.path.join(real_out, 'lr_vs_perf'),
                    ), {
                        'folder': filter_folder,
                        'images': imgs_to_produce
                    }, 1),
                    dispatcher.Task(
                        __name__, '_rawplot',
                        (source,
                         futils.make_vs_title('Learning Rate', 'Inverse Loss'),
                         'Learning Rate', '1/(loss+1)', 'lrs', slice(None),
                         'smoothed_perfs', slice(trial, trial + 1),
                         os.path.join(real_out, 'lr_vs_smoothed_perf')), {
                             'folder': filter_folder,
                             'images': imgs_to_produce
                         }, 1),
                    dispatcher.Task(
                        __name__, '_rawplot',
                        (source,
                         futils.make_vs_title('Learning Rate',
                                              'Inverse Loss Deriv.'),
                         'Learning Rate', '1/(loss+1) Deriv wrt. LR', 'lrs',
                         slice(None), 'perf_derivs', slice(trial, trial + 1),
                         os.path.join(real_out, 'lr_vs_perf_deriv')), {
                             'folder': filter_folder,
                             'images': imgs_to_produce
                         }, 1),
                    dispatcher.Task(
                        __name__, '_rawplot',
                        (source,
                         futils.make_vs_title(
                             'Learning Rate',
                             'Inverse Loss Deriv. (Smoothed)'),
                         'Learning Rate', '1/(loss+1) Deriv wrt. LR', 'lrs',
                         slice(None), 'smoothed_perf_derivs',
                         slice(trial, trial + 1),
                         os.path.join(real_out, 'lr_vs_smoothed_perf_deriv')),
                        {
                            'folder': filter_folder,
                            'images': imgs_to_produce
                        }, 1),
                ])

            for reduction in REDUCTIONS:
                have_imgs = futils.fig_exists(
                    os.path.join(out_folder, f'lr_vs_perf_{reduction}'),
                    imgs_filter)
                if have_imgs:
                    continue

                tasks.extend([
                    dispatcher.Task(
                        __name__, '_rawplot',
                        (source,
                         futils.make_vs_title('Learning Rate', 'Inverse Loss'),
                         'Learning Rate', '1/(loss+1)', 'lrs', slice(None),
                         'perfs', slice(None),
                         os.path.join(out_folder,
                                      'lr_vs_perf_' + reduction), reduction), {
                                          'folder': filter_folder,
                                          'images': imgs_to_produce
                                      }, 1),
                    dispatcher.Task(
                        __name__, '_rawplot',
                        (source,
                         futils.make_vs_title('Learning Rate',
                                              'Inverse Loss (Smoothed)'),
                         'Learning Rate', '1/(loss+1)', 'lrs', slice(None),
                         'smoothed_perfs', slice(None),
                         os.path.join(out_folder, 'lr_vs_smoothed_perf_' +
                                      reduction), reduction), {
                                          'folder': filter_folder,
                                          'images': imgs_to_produce
                                      }, 1),
                    dispatcher.Task(
                        __name__, '_rawplot',
                        (source,
                         futils.make_vs_title('Learning Rate',
                                              'Inverse Loss Deriv.'),
                         'Learning Rate', '1/(loss+1) Deriv wrt. LR', 'lrs',
                         slice(None), 'perf_derivs', slice(None),
                         os.path.join(out_folder, 'lr_vs_perf_deriv_' +
                                      reduction), reduction), {
                                          'folder': filter_folder,
                                          'images': imgs_to_produce
                                      }, 1),
                    dispatcher.Task(
                        __name__, '_rawplot',
                        (source,
                         futils.make_vs_title(
                             'Learning Rate',
                             'Inverse Loss Deriv. (Smoothed)'),
                         'Learning Rate', '1/(loss+1) Deriv wrt. LR', 'lrs',
                         slice(None), 'smoothed_perf_derivs', slice(None),
                         os.path.join(out_folder,
                                      'lr_vs_smoothed_perf_deriv_' +
                                      reduction), reduction), {
                                          'folder': filter_folder,
                                          'images': imgs_to_produce
                                      }, 1),
                ])

            if not futils.fig_exists(
                    os.path.join(out_folder,
                                 'lr_vs_lse_smoothed_perf_then_deriv'),
                    imgs_filter):
                tasks.append(
                    dispatcher.Task(
                        __name__, '_rawplot',
                        (source,
                         futils.make_vs_title(
                             'LR', 'Deriv of LSE of Smoothed 1/(loss+1)'),
                         'Learning Rate', '1/(loss+1) Deriv wrt. LR', 'lrs',
                         slice(None), 'lse_smoothed_perf_then_derivs',
                         slice(None),
                         os.path.join(out_folder,
                                      'lr_vs_lse_smoothed_perf_then_deriv')), {
                                          'folder': filter_folder,
                                          'images': imgs_to_produce
                                      }, 1))

            if not futils.fig_exists(
                    os.path.join(
                        out_folder,
                        'lr_vs_lse_smoothed_perf_then_deriv_then_smooth'),
                    imgs_filter):
                tasks.append(
                    dispatcher.Task(__name__, '_rawplot', (
                        source,
                        futils.make_vs_title(
                            'LR',
                            'Deriv of LSE of Smoothed 1/(loss+1) (Smoothed)'),
                        'Learning Rate', '1/(loss+1) Deriv wrt. LR', 'lrs',
                        slice(None),
                        'lse_smoothed_perf_then_derivs_then_smooth',
                        slice(None),
                        os.path.join(
                            out_folder,
                            'lr_vs_lse_smoothed_perf_then_deriv_then_smooth')),
                                    {
                                        'folder': filter_folder,
                                        'images': imgs_to_produce
                                    }, 1))

            for y_varname, y_lab_long, y_lab_short in SELECTED_RANGE_YS:
                outfile_wo_ext = os.path.join(out_folder,
                                              f'lr_range_{y_varname}')
                if futils.fig_exists(outfile_wo_ext, imgs_filter):
                    continue
                tasks.append(
                    dispatcher.Task(__name__, '_highlight_selected_lr_range',
                                    (source, y_lab_long, y_lab_short,
                                     y_varname, outfile_wo_ext), {
                                         'folder': filter_folder,
                                         'images': imgs_to_produce
                                     }, 1))

        source = os.path.join(folder, 'hparams', 'bs_vs_perf.npz')
        out_folder = batch_folder
        with np.load(source) as infile:
            num_trials = infile['perfs'].shape[0]

        for trial in range(num_trials):
            real_out = os.path.join(out_folder, str(trial))
            os.makedirs(real_out, exist_ok=True)

            have_imgs = futils.fig_exists(
                os.path.join(real_out, 'batch_vs_perf'), imgs_filter)
            if have_imgs:
                continue

            tasks.extend([
                dispatcher.Task(
                    __name__, '_rawplot',
                    (source, futils.make_vs_title(
                        'Batch Size',
                        'Inverse Loss'), 'Batch Size', '1/(loss+1)', 'bss',
                     slice(None), 'perfs', slice(trial, trial + 1),
                     os.path.join(real_out, 'batch_vs_perf')), {
                         'folder': filter_folder,
                         'images': imgs_to_produce
                     }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (source,
                     futils.make_vs_title('Batch Size',
                                          'Inverse Loss (Smoothed)'),
                     'Batch Size', '1/(loss+1)', 'bss', slice(None),
                     'smoothed_perfs', slice(trial, trial + 1),
                     os.path.join(real_out, 'batch_vs_smoothed_perf')), {
                         'folder': filter_folder,
                         'images': imgs_to_produce
                     }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (source,
                     futils.make_vs_title('Batch Size', 'Inverse Loss Deriv.'),
                     'Batch Size', '1/(loss+1) Deriv wrt. BS', 'bss',
                     slice(None), 'perf_derivs', slice(trial, trial + 1),
                     os.path.join(real_out, 'batch_vs_perf_deriv')), {
                         'folder': filter_folder,
                         'images': imgs_to_produce
                     }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (source,
                     futils.make_vs_title('Batch Size',
                                          'Inverse Loss Deriv. (Smoothed)'),
                     'Batch Size', '1/(loss+1) Deriv wrt. LR', 'bss',
                     slice(None), 'smoothed_perf_derivs',
                     slice(trial, trial + 1),
                     os.path.join(real_out, 'batch_vs_smoothed_perf_deriv')), {
                         'folder': filter_folder,
                         'images': imgs_to_produce
                     }, 1),
            ])

        for reduction in REDUCTIONS:
            have_imgs = futils.fig_exists(
                os.path.join(out_folder, f'batch_vs_perf_{reduction}'))
            if have_imgs:
                continue
            tasks.extend([
                dispatcher.Task(
                    __name__, '_rawplot',
                    (source, futils.make_vs_title(
                        'Batch Size', 'Inverse Loss'), 'Batch Size',
                     '1/(loss+1)', 'bss', slice(None), 'perfs', slice(None),
                     os.path.join(out_folder,
                                  'batch_vs_perf_' + reduction), reduction), {
                                      'folder': filter_folder,
                                      'images': imgs_to_produce
                                  }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (source,
                     futils.make_vs_title('Batch Size',
                                          'Inverse Loss (Smoothed)'),
                     'Batch Size', '1/(loss+1)', 'bss', slice(None),
                     'smoothed_perfs', slice(None),
                     os.path.join(out_folder, 'batch_vs_smoothed_perf_' +
                                  reduction), reduction), {
                                      'folder': filter_folder,
                                      'images': imgs_to_produce
                                  }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (source,
                     futils.make_vs_title('Batch Size', 'Inverse Loss Deriv.'),
                     'Batch Size', '1/(loss+1) Deriv wrt. BS', 'bss',
                     slice(None), 'perf_derivs', slice(None),
                     os.path.join(out_folder, 'batch_vs_perf_deriv_' +
                                  reduction), reduction), {
                                      'folder': filter_folder,
                                      'images': imgs_to_produce
                                  }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (source,
                     futils.make_vs_title('Batch Size',
                                          'Inverse Loss Deriv. (Smoothed)'),
                     'Batch Size', '1/(loss+1) Deriv wrt. BS', 'bss',
                     slice(None), 'smoothed_perf_derivs', slice(None),
                     os.path.join(out_folder, 'batch_vs_smoothed_perf_deriv_' +
                                  reduction), reduction), {
                                      'folder': filter_folder,
                                      'images': imgs_to_produce
                                  }, 1),
            ])

        if not futils.fig_exists(
                os.path.join(out_folder,
                             'batch_vs_lse_smoothed_perf_then_deriv'),
                imgs_filter):
            tasks.append(
                dispatcher.Task(
                    __name__, '_rawplot',
                    (source,
                     futils.make_vs_title(
                         'BS', 'Deriv of LSE of Smoothed 1/(loss+1)'),
                     'Batch Size', '1/(loss+1) Deriv wrt. BS', 'bss',
                     slice(None), 'lse_smoothed_perf_then_derivs', slice(None),
                     os.path.join(out_folder,
                                  'batch_vs_lse_smoothed_perf_then_deriv')), {
                                      'folder': filter_folder,
                                      'images': imgs_to_produce
                                  }, 1))

        for y_varname, y_lab_long, y_lab_short in SELECTED_RANGE_YS:
            outfile_wo_ext = os.path.join(out_folder,
                                          f'batch_range_{y_varname}')
            if futils.fig_exists(outfile_wo_ext, imgs_filter):
                continue
            tasks.append(
                dispatcher.Task(__name__, '_highlight_selected_batch_range',
                                (source, y_lab_long, y_lab_short, y_varname,
                                 outfile_wo_ext), {
                                     'folder': filter_folder,
                                     'images': imgs_to_produce
                                 }, 1))
    if settings.training_metric_imgs:
        trials = -1
        trials_source_folder = os.path.join(folder, 'trials')
        while os.path.exists(
                os.path.join(trials_source_folder, str(trials + 1))):
            trials += 1
        trial_out_folder = os.path.join(folder, 'analysis', 'trials')

        for trial in range(trials):
            trial_src = os.path.join(trials_source_folder, str(trial),
                                     'throughtime.npz')
            if not os.path.exists(trial_src):
                continue

            trial_out = os.path.join(trial_out_folder, str(trial))
            have_imgs = futils.fig_exists(
                os.path.join(trial_out, f'epoch_vs_loss_train'), imgs_filter)
            if have_imgs:
                continue

            os.makedirs(trial_out, exist_ok=True)
            tasks.extend([
                dispatcher.Task(
                    __name__, '_rawplot',
                    (trial_src, futils.make_vs_title(
                        'Epoch', 'Loss (Train)'), 'Epoch', 'Loss', 'epochs',
                     slice(None), 'losses_train', slice(None),
                     os.path.join(trial_out, 'epoch_vs_loss_train')), {
                         'folder': filter_folder,
                         'images': imgs_to_produce
                     }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (trial_src,
                     futils.make_vs_title('Epoch', 'Loss (Validation)'),
                     'Epoch', 'Loss', 'epochs', slice(None), 'losses_val',
                     slice(None), os.path.join(trial_out,
                                               'epoch_vs_loss_val')), {
                                                   'folder': filter_folder,
                                                   'images': imgs_to_produce
                                               }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (trial_src,
                     futils.make_vs_title('Epoch', f'{perf_name} (Train)'),
                     'Epoch', perf_name_short, 'epochs', slice(None),
                     'perfs_train', slice(None),
                     os.path.join(trial_out, 'epoch_vs_perf_train')), {
                         'folder': filter_folder,
                         'images': imgs_to_produce
                     }, 1),
                dispatcher.Task(__name__, '_rawplot',
                                (trial_src,
                                 futils.make_vs_title(
                                     'Epoch', f'{perf_name} (Validation)'),
                                 'Epoch', perf_name_short, 'epochs',
                                 slice(None), 'perfs_val', slice(None),
                                 os.path.join(trial_out, 'epoch_vs_perf_val')),
                                {
                                    'folder': filter_folder,
                                    'images': imgs_to_produce
                                }, 1),
            ])

        trial_out = trial_out_folder
        trial_src = os.path.join(folder, 'throughtimes.npz')
        for reduction in REDUCTIONS:
            have_imgs = futils.fig_exists(
                os.path.join(trial_out, f'epoch_vs_loss_train_{reduction}'),
                imgs_filter)
            if have_imgs:
                continue

            os.makedirs(trial_out, exist_ok=True)

            tasks.extend([
                dispatcher.Task(
                    __name__, '_rawplot',
                    (trial_src, futils.make_vs_title(
                        'Epoch', 'Loss (Train)'), 'Epoch', 'Loss', 'epochs',
                     slice(None), 'losses_train', slice(None),
                     os.path.join(
                         trial_out,
                         f'epoch_vs_loss_train_{reduction}'), reduction), {
                             'folder': filter_folder,
                             'images': imgs_to_produce
                         }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (trial_src,
                     futils.make_vs_title('Epoch', 'Loss (Train, Smoothed)'),
                     'Epoch', 'Loss', 'epochs', slice(None),
                     'losses_train_smoothed', slice(None),
                     os.path.join(trial_out,
                                  f'epoch_vs_smoothed_loss_train_{reduction}'),
                     reduction), {
                         'folder': filter_folder,
                         'images': imgs_to_produce
                     }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (trial_src,
                     futils.make_vs_title(
                         'Epoch', 'Loss (Validation)'), 'Epoch', 'Loss',
                     'epochs', slice(None), 'losses_val', slice(None),
                     os.path.join(trial_out, f'epoch_vs_loss_val_{reduction}'),
                     reduction), {
                         'folder': filter_folder,
                         'images': imgs_to_produce
                     }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (trial_src,
                     futils.make_vs_title(
                         'Epoch', 'Loss (Val, Smoothed)'), 'Epoch', 'Loss',
                     'epochs', slice(None), 'losses_val_smoothed', slice(None),
                     os.path.join(trial_out,
                                  f'epoch_vs_smoothed_loss_val_{reduction}'),
                     reduction), {
                         'folder': filter_folder,
                         'images': imgs_to_produce
                     }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (trial_src,
                     futils.make_vs_title('Epoch', f'{perf_name} (Train)'),
                     'Epoch', perf_name_short, 'epochs', slice(None),
                     'perfs_train', slice(None),
                     os.path.join(
                         trial_out,
                         f'epoch_vs_perf_train_{reduction}'), reduction), {
                             'folder': filter_folder,
                             'images': imgs_to_produce
                         }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (trial_src,
                     futils.make_vs_title(
                         'Epoch', f'{perf_name} (Smoothed, Train)'), 'Epoch',
                     perf_name_short, 'epochs', slice(None),
                     'perfs_train_smoothed', slice(None),
                     os.path.join(trial_out,
                                  f'epoch_vs_smoothed_perf_train_{reduction}'),
                     reduction), {
                         'folder': filter_folder,
                         'images': imgs_to_produce
                     }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (trial_src,
                     futils.make_vs_title(
                         'Epoch', f'{perf_name} (Validation)'), 'Epoch',
                     perf_name_short, 'epochs', slice(None), 'perfs_val',
                     slice(None),
                     os.path.join(trial_out, f'epoch_vs_perf_val_{reduction}'),
                     reduction), {
                         'folder': filter_folder,
                         'images': imgs_to_produce
                     }, 1),
                dispatcher.Task(
                    __name__, '_rawplot',
                    (trial_src,
                     futils.make_vs_title(
                         'Epoch', f'{perf_name} (Val, Smoothed)'), 'Epoch',
                     perf_name_short, 'epochs', slice(None),
                     'perfs_val_smoothed', slice(None),
                     os.path.join(trial_out,
                                  f'epoch_vs_smoothed_perf_val_{reduction}'),
                     reduction), {
                         'folder': filter_folder,
                         'images': imgs_to_produce
                     }, 1),
            ])

    if (settings.typical_run_pca3dvis
            and os.path.exists(os.path.join(folder, 'trials', '0'))):
        model_file = os.path.join(folder, 'trials', '0', 'model.pt')
        model = torch.load(model_file)
        train_set, _ = utils.invoke(dataset_loader)

        example_item = train_set[0][0]
        example_item = torch.unsqueeze(example_item, 0)

        example_out = model(example_item)
        if isinstance(example_out, tuple):
            # we have an unstripped model!
            outfolder = (os.path.join(folder, 'analysis', 'trials', '0',
                                      'pca3dvis_train_draft') if
                         settings.typical_run_pca3dvis_draft else os.path.join(
                             folder, 'analysis', 'trials', '0',
                             'pca3dvis_train'))

            if not os.path.exists(outfolder):
                tasks.append(
                    dispatcher.Task(
                        __name__, '_pca3dvis_model',
                        (dataset_loader, model_file, outfolder, True,
                         accuracy_style, settings.typical_run_pca3dvis_draft),
                        dict(), None))

    tasks.append(
        dispatcher.Task('ignite_simple.text_analysis', 'text_analyze',
                        (settings, folder), dict(), 1))

    # TODO other stuff

    sug_imports = ('ignite_simple.analysis', )

    dispatcher.dispatch(tasks, cores, sug_imports)

    logger.info('Finished analyzing %s', folder)
Exemple #48
0
labels = 'keyword noise voice'.split()
X_train = np.array([])
y_train = np.array([])
# X_val = np.array([])
# y_val = np.array([])
# X_test = np.array([])
# y_test = np.array([])

for i, label in enumerate(labels):
    train_path = 'data/featurized/training/{}_16k.npy'.format(label)
    # val_path = 'data/featurized/validation/{}_16k.npy'.format(label)
    # test_path = 'data/featurized/testing/{}_16k.npy'.format(label)

    category = labels.index(label)

    x_train_data = np.load(train_path)
    y_train_data = np.full(x_train_data.shape[0], fill_value=category)
    # x_val_data = np.load(val_path)
    # y_val_data = np.full(x_val_data.shape[0], fill_value=category)
    # x_test_data = np.load(test_path)
    # y_test_data = np.full(x_test_data.shape[0], fill_value=category)

    if X_train.size > 0:
        X_train = np.vstack((X_train, x_train_data))
        y_train = np.append(y_train, y_train_data)
        # X_val = np.vstack((X_val, x_val_data))
        # y_val = np.append(y_val, y_val_data)
        # X_test = np.vstack((X_test, x_test_data))
        # y_test = np.append(y_test, y_test_data)
    else:
        X_train = x_train_data
Exemple #49
0
classifier.add(Flatten())
classifier.add(Dense(units=128, activation='relu'))
classifier.add(Dropout(0.3))
classifier.add(Dense(units=32, activation='relu'))
classifier.add(Dense(units=1, activation='sigmoid'))

classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

X = []
y = []

dir = "../Data/Processed0"
for filename in os.listdir(dir):
    img = np.load(os.path.join(dir, filename))[:, :, 0]
    smooth_img = skimage.filters.gaussian(img, 8)
    sobel_img = skimage.filters.sobel(img)
    im_rez = skimage.transform.resize(sobel_img, (256, 256, 1))
    X.append(im_rez)
    y.append(0)

dir = "../Data/Processed1"
for filename in os.listdir(dir):
    img = np.load(os.path.join(dir, filename))[:, :, 0]
    smooth_img = skimage.filters.gaussian(img, 8)
    sobel_img = skimage.filters.sobel(img)
    im_rez = skimage.transform.resize(sobel_img, (256, 256, 1))
    X.append(im_rez)
    y.append(1)
Exemple #50
0
def _rawplot(infile,
             title,
             xlab,
             ylab,
             x_varname,
             x_slice,
             y_varname,
             y_slice,
             outfile_wo_ext,
             reduction='none',
             folder=None,
             images=None):
    """Performs a single plot of the y-axis vs x-axis by loading the given
    numpy file, fetching the arrays by the variable names from the file,
    slicing it as specified, and then plotting.

    :param str infile: the file to load the numpy data frame
    :param str title: the title of the plot
    :param str xlab: the label for the x-axis
    :param str ylab: the label for the y-axis
    :param str x_varname: the name for the array to get the x-data from
        within the numpy file at infile
    :param slice x_slice: the slice or tuple of slices for the x-data.
        the x-data will be squeezed as much as possible after slicing
    :param str y_varname: the name for the array to get the y-data from
        within the numpy file at infile
    :param slice y_slice: the slice or tuple of slices for the y-data.
        the y-data will be squeezed as much as possible after slicing
    :param str outfile_wo_ext: the path to the outfile without an extension
    :param str reduction: how we handle extra dimensions. one of the following:

        * `none`

            no reduction is performed

        * `each`

            each of the first dimension corresponds to one graph

        * `mean`

            take the mean over the first dimension and plot without errorbars

        * `mean_with_errorbars`

            take the mean over the first dimension and plot with errorbars from
            standard deviation

        * `mean_with_fillbtwn`

            take the mean over the first dimension and plot with shaded error
            region

        * `lse`

            take the logsumexp over the first dimension

    :param optional[str] folder: the "current" folder or equivalent, which
        contains  the hparams and trials folders. Only required if images
        is not None
    :param optional[set[str]] images: if not None then only images whose
        filepath relative to folder will be produced.
    """
    with np.load(infile) as nin:
        xs = nin[x_varname][x_slice]
        ys = nin[y_varname][y_slice]

    _filter = _rawplot_filter(folder, images)

    new_shape = list(i for i in ys.shape if i != 1)
    ys = ys.reshape(new_shape)

    new_shape_x = list(i for i in xs.shape if i != 1)
    xs = xs.reshape(new_shape_x)
    if len(new_shape_x) != 1:
        xs = xs[0]  # take first

    if len(new_shape) > 1:
        if reduction == 'mean':
            ys = ys.mean(0)
        elif reduction == 'lse':
            old_settings = np.seterr(under='ignore')
            ys = scipy.special.logsumexp(ys, axis=0)
            np.seterr(**old_settings)
        elif reduction in ('mean_with_errorbars', 'mean_with_fillbtwn'):
            stds = ys.std(0)
            means = ys.mean(0)

            errs = 1.96 * stds
            errs_low = means - errs
            errs_high = means + errs
            ys = means
        elif reduction == 'each':
            pass
        else:
            raise ValueError(
                f'cannot reduce shape {new_shape} with {reduction}')
    else:
        reduction = 'none'

    warnings.simplefilter('ignore', UserWarning)
    fig, ax = plt.subplots()
    fig.set_figwidth(19.2)
    fig.set_figheight(10.8)
    fig.set_dpi(100)

    ax.set_xlabel(xlab)
    ax.set_ylabel(ylab)

    if reduction in ('none', 'mean', 'mean_with_fillbtwn', 'lse'):
        ax.plot(xs, ys)

        if reduction == 'mean_with_fillbtwn':
            ax.fill_between(xs, errs_low, errs_high, color='grey', alpha=0.4)
    elif reduction == 'mean_with_errorbars':
        ax.errorbar(xs, ys, errs)
    elif reduction == 'each':
        for y in ys:
            ax.plot(xs, y)
    else:
        raise ValueError(f'unknown reduction {reduction}')

    futils.save_fig(fig, ax, title, outfile_wo_ext, _filter)

    plt.close(fig)
Exemple #51
0
                                              refractory=t_refrac_excite,
                                              reset=reset_excite)
spike_monitor[name_neuron_group] = SpikeMonitor(
    neuron_group[name_neuron_group])
neuron_group[name_neuron_group].v = v_rest_excite
neuron_group[name_neuron_group].timer = 0 * b2.ms
neuron_group[name_neuron_group].timer_c = 0 * b2.ms
neuron_group[name_neuron_group].spike_number = 0
neuron_group[name_neuron_group].ge = 0
neuron_group[name_neuron_group].gi = 0
if STDP_on:
    if number_iteration == 0:
        neuron_group[name_neuron_group].thresh = v_thresh_excite
    else:
        neuron_group[name_neuron_group].thresh = np.load(
            iteration_folder_last + '/thresh_' + name_neuron_group + '_' +
            str(population_OUT) + '_' + str(number_iteration - 1) +
            '.npy') * b2.volt
else:
    print('load the saved thresh')
    neuron_group[name_neuron_group].thresh = np.load(
        iteration_folder + '/thresh_' + name_neuron_group + '_' +
        str(population_OUT) + '_' + str(number_iteration) + '.npy') * b2.volt
## create the inhibite neuron group
name_neuron_group = 'OUT' + 'i'
neuron_group[name_neuron_group] = NeuronGroup(1,
                                              neuron_eqs_inhibite,
                                              threshold=v_thresh_inhibite_str,
                                              refractory=t_refrac_inhibite,
                                              reset=reset_inhibite)
#spike_monitor[name_neuron_group] = SpikeMonitor(neuron_group[name_neuron_group])
neuron_group[name_neuron_group].timer = 0 * b2.ms
Exemple #52
0
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score, mean_absolute_error
from scipy.stats import linregress
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from xmat_pnnl_code import ProcessData
from xmat_pnnl_code import SKREG
import xmat_pnnl_code as xcode
import shap

#Model data
base_path = '/'.join(xcode.__path__[0].split('/')[:-1])
path = base_path + '/data_processing/9Cr_data/LMP'
model = np.load(path + '/model_params.npy', allow_pickle=True)[()]
model = model['9Cr-001']

#C data
C_data = np.load(path + '/constant_matcher_score_lib.npy',
                 allow_pickle=True)[()]
C_data = {k: v['C'] for k, v in C_data.items()}

#Load the 9Cr data
ID = [
    1, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 43, 44, 45,
    46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 58, 59, 60, 61, 62, 63, 64, 65, 66,
    67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 82
]
ID = ['9Cr-{}'.format(str(i).zfill(3)) for i in ID]
from __future__ import print_function
import numpy as np
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
print(matplotlib.matplotlib_fname())
import sys, math

data = np.load(sys.argv[1])

train = data['X_train']
y_train = data['y_train']
valid = data['X_valid']
test = data['X_test']

print("Train size", train.shape)
print("Valid size", valid.shape)
print("Test size ", test.shape)

width, height = int(math.sqrt(train.shape[1])), int(math.sqrt(train.shape[1]))

for i in range(10):
    img = train[i].reshape((height, width))
    print("Label", y_train[i])
    plt.imshow(img, cmap='gray', interpolation='none')
    plt.title("Train " + str(i), fontsize=20)
    plt.show()
# # Data pre-processing for training data
# del_num = np.array([])
# for i in range(len(x)):
# 	tmp = x[i]
# 	PM25 = tmp[27:]
# 	if (len(tmp[tmp <= 0]) > 0) or (y[i] >= 300 or y[i] <= 0) or (len(PM25[PM25 >= 300]) > 0):
# 		del_num = np.append( del_num, i)

# train_X = np.delete(x, del_num, axis = 0)
# train_Y = np.delete(y, del_num, axis = 0)
# b, w = gradient_descent(train_X, train_Y, 10 ,50000)

pa_path = os.getcwd() + '/parameters_best.npy'
# path_test = os.getcwd() + '/ml-2018spring-hw1/test.csv' # argv[1]
para = np.load(pa_path)
test_x = []
n_row = 0
text = open(sys.argv[1], "r")
row = csv.reader(text, delimiter=",")

# Data pre-processing for testing data
for r in row:
    if n_row % 18 == 2 or n_row % 18 == 5 or n_row % 18 == 8 or n_row % 18 == 9:
        if n_row % 18 == feature[0]: test_x.append([])
        for i in range(2, 11):
            if float(r[i]) == 0:
                if i == 2: test_x[n_row // 18].append(float(r[i + 1]))
                elif i == 10: test_x[n_row // 18].append(float(r[i - 1]))
                else:
                    test_x[n_row // 18].append(
Exemple #55
0
def main():
    model_name = "convlstm"
    system = "ccw" # ccw, gf
    system_dir = {"ccw":"concentric_circle_wave", "gf":"global_flow"}
    result_dir = os.path.join("results", system_dir[system], "convlstm")
    config = configparser.ConfigParser()
    config.read("config_{}.ini".format(system))

    if not os.path.exists(result_dir):
        os.mkdir(result_dir)
    # shutil.copy("config_{}.ini".format(system), result_dir)
    
    
    ## Set seed and cuda
    seed = 128
    print("Set seed {}.".format(seed))
    
    cuda = torch.cuda.is_available()
    gpu = int(config.get("train", "gpu"))
    if cuda:
        print("cuda is available")
        device = torch.device('cuda', gpu)
    else:
        print("cuda is not available")
        device = torch.device('cpu')
    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed(seed)
    np.random.seed(seed)
    
    # cuda = False
    # device = torch.device('cpu')
    # np.random.seed(seed)
    # torch.autograd.set_detect_anomaly(True)
    
    
    ## Read data and set parameters
    train_data = np.load(config.get("data", "path")).astype(np.float32) # T x h x w
    true_data = np.load(config.get("data", "true_path")).astype(np.float32)
        
    timesteps = int(config.get("data", "timesteps"))
    width = int(config.get("data", "width"))
    height = int(config.get("data", "height"))
    
    loss_name = config.get("network", "loss")
    #n_layers = int(config.get("network", "n_layers"))
    step = int(config.get("network", "step"))
    effective_step = [int(i) for i in config.get("network", "effective_step").split(",")]
    input_channels = int(config.get("network", "input_channels"))
    kernel_size = tuple([int(i) for i in config.get("network", "kernel_size").split(",")])
    n_channels = [int(i) for i in config.get("network", "n_channels").split(",")]
    n_layers = len(n_channels)
    batch_norm = bool(config.get("network", "batch_norm"))
    effective_layers = [int(i) for i in config.get("network", "effective_layers").split(",")]
    
    num_epochs = int(config.get("train", "num_epochs"))
    batch_size = int(config.get("train", "batch_size"))
    optimizer_name = config.get("train", "optimizer")
    init_lr = float(config.get("train", "init_lr"))
    decay_rate = float(config.get("train", "decay_rate"))
    decay_steps = float(config.get("train", "decay_steps"))
    train_steps = int(config.get("train", "train_steps"))
    test_steps = int(config.get("train", "test_steps"))
    prediction_steps = int(config.get("train", "prediction_steps"))
    
    display_steps = int(config.get("logs", "display_steps"))
    save_steps = int(config.get("logs", "save_steps"))

    
    ## Read model
    model = ConvLSTM((height, width), input_channels, n_channels, kernel_size, n_layers, effective_layers, batch_norm, device=device).to(device)
    if loss_name == "MSE":
        loss_fn = nn.MSELoss()
    elif loss_name == "CE":
        loss_fn = nn.CrossEntropyLoss()

    if cuda:
        cudnn.benchmark = True
    if optimizer_name == "Adam":
        optimizer = torch.optim.Adam(model.parameters(), lr=init_lr)
    elif optimizer_name == "RMSprop":
        optimizer = torch.optim.RMSprop(model.parameters(), lr=init_lr)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=decay_steps, gamma=decay_rate)
    
    
    # Define functions
    def train(epoch):
        model.train()
        epoch_loss = 0
        
        data = Variable(torch.from_numpy(train_data[:train_steps-1])).unsqueeze(1).unsqueeze(1).to(device) # T x bs(=1) x c(=1) x h x w
        # forward + backward + optimize
        optimizer.zero_grad()
        outputs, _ = model(data)
        loss = loss_fn(outputs.squeeze(), torch.from_numpy(train_data[1:train_steps]).to(device))
        loss.backward()
        optimizer.step()
        epoch_loss = loss.item()
                
        if epoch%display_steps==0:
            print_contents = "Train Epoch: [{}/{}]".format(epoch, num_epochs)
            print_contents += "\t {}: {:.6f}".format(
                                loss_name,
                                epoch_loss)
            print(print_contents)
        return epoch_loss
    
    
    def test(epoch):
        """uses test data to evaluate likelihood of the model"""
        model.eval()
        epoch_loss = 0

        data = Variable(torch.from_numpy(train_data[train_steps:train_steps+test_steps-1])).unsqueeze(1).unsqueeze(1).to(device) # T x bs(=1) x c(=1) x w x h
        # forward + backward + optimize
        optimizer.zero_grad()
        outputs, _ = model(data)
        loss = loss_fn(outputs.squeeze(), torch.from_numpy(train_data[train_steps+1:train_steps+test_steps]).to(device))
        loss.backward()
        optimizer.step()
        epoch_loss = loss.item()
            
        if epoch%display_steps==0:
            print_contents = "====> Test set loss:"
            print_contents += " {} = {:.4f}".format(loss_name,
                                                    epoch_loss)
            print(print_contents)
        return epoch_loss
        
    
    def prediction(epoch):
        """n-step prediction"""
        model.eval()
        loss = np.zeros((2, prediction_steps))
        output = np.zeros((prediction_steps, train_data.shape[1], train_data.shape[2]))
        
        data = Variable(torch.from_numpy(train_data[:train_steps-1].squeeze()))
        data = data.unsqueeze(1).unsqueeze(1).to(device) # T x bs(=1) x c(=1) x h x w
        outputs, last_state_list = model(data)
        #prev_state = outputs[-1].view(1,1,1,height,width) # T(=1) x bs(=1) x c(=1) x h x w
        prev_state = Variable(torch.from_numpy(train_data[train_steps])).unsqueeze(0).unsqueeze(0).unsqueeze(0).to(device)
        
        for i in range(prediction_steps):
            prev_state, last_state_list = model(prev_state, last_state_list)
            loss[0,i] = mean_squared_error(prev_state.squeeze().cpu().detach().numpy(), train_data[train_steps+i])
            loss[1,i] = mean_squared_error(prev_state.squeeze().cpu().detach().numpy(), true_data[train_steps+i])
            output[i] = prev_state.squeeze().cpu().detach().numpy()
        
        if epoch%display_steps==0:
            print_contents = "===> Prediction loss:\n"
            for i in range(prediction_steps):
                print_contents += "{} step forecast {}: {}\n".format(i+1, loss_name, loss[0,i])
            print(print_contents)
        
        #print("output", output.shape, output.min(), output.max())
        return loss, output
        
    
    ## Train model
    def execute():
        train_loss = np.zeros(num_epochs)
        test_loss = np.zeros(num_epochs)
        prediction_loss = np.zeros((num_epochs, 2, prediction_steps))
        outputs = np.zeros((num_epochs//save_steps, prediction_steps, train_data.shape[1], train_data.shape[2]))
        start_time = time.time()

        for epoch in range(1, num_epochs + 1):
            # training + testing
            _train_loss = train(epoch)
            _test_loss = test(epoch)
            _prediction_loss = prediction(epoch)
            scheduler.step()

            # substitute losses for array
            train_loss[epoch-1] = _train_loss
            test_loss[epoch-1] = _test_loss
            prediction_loss[epoch-1], outputs[(epoch-1)//save_steps] = _prediction_loss

            # duration
            duration = int(time.time() - start_time)
            second = int(duration%60)
            remain = int(duration//60)
            minute = int(remain%60)
            hour = int(remain//60)
            print("Duration: {} hour, {} min, {} sec.".format(hour, minute, second))
            remain = (num_epochs - epoch) * duration / epoch
            second = int(remain%60)
            remain = int(remain//60)
            minute = int(remain%60)
            hour = int(remain//60)
            print("Estimated Remain Time: {} hour, {} min, {} sec.".format(hour, 
                                                                           minute, 
                                                                           second))

            # saving model
            if epoch % save_steps == 0:
                torch.save(model.state_dict(), os.path.join(result_dir, 
                                        'state_dict_'+str(epoch)+'.pth'))
                torch.save(optimizer.state_dict(), os.path.join(result_dir,
                                        'adam_state_dict_'+str(epoch)+'.pth'))
                print('Saved model to state_dict_'+str(epoch)+'.pth')
                # np.save(os.path.join(result_dir, "train_loss.npy"), train_loss)
                # np.save(os.path.join(result_dir, "test_loss.npy"), test_loss)
                np.save(os.path.join(result_dir, "prediction_loss.npy"), prediction_loss)
                np.save(os.path.join(result_dir, "convlstm_mse.npy"), prediction_loss[:,1])
                # np.save(os.path.join(result_dir, "prediction.npy"), outputs)

                # plot loss for train and test
                fig, ax = plt.subplots(1, 1, figsize=(5, 5))
                ax.plot(range(epoch), train_loss[:epoch],
                              label="train")
                ax.plot(range(epoch), test_loss[:epoch],
                              label="test")
                ax.set_xlabel("epoch")
                ax.set_ylabel(loss_name)
                ax.legend()
                fig.savefig(os.path.join(result_dir, "loss.png"), 
                            bbox_inches="tight")
                ax.set_yscale("log")
                fig.savefig(os.path.join(result_dir, "log_loss.png"), 
                            bbox_inches="tight")
                
                # plot prediction loss
                fig, ax = plt.subplots(1, 1, figsize=(5, 5))
                for i in range(save_steps, epoch+1, save_steps):
                    ax.plot(range(train_steps, train_steps+prediction_steps), prediction_loss[i-1,0,:],
                                          label="epoch={}".format(i))
                ax.set_xlabel("timestep")
                ax.set_ylabel(loss_name)
                ax.legend()
                fig.savefig(os.path.join(result_dir, "prediction_loss.png"), 
                            bbox_inches="tight")
                ax.set_yscale("log")
                fig.savefig(os.path.join(result_dir, "log_prediction_loss.png"), 
                            bbox_inches="tight")
                
    
    
    ## Excute
    # measure for culabas rutine error
    execute()
def generate_images(network_pkl, seeds, truncation_psi, outdir, class_idx=None, dlatents_npz=None, grid=False, save_vector=False, fixnoise=False):
    tflib.init_tf()
    print('Loading networks from "%s"...' % network_pkl)
    with dnnlib.util.open_url(network_pkl) as fp:
        _G, _D, Gs = pickle.load(fp)

    os.makedirs(outdir, exist_ok=True)
    if(save_vector):
        os.makedirs(outdir+"/vectors", exist_ok=True)

    # Render images for a given dlatent vector.
    if dlatents_npz is not None:
        print(f'Generating images from dlatents file "{dlatents_npz}"')
        dlatents = np.load(dlatents_npz)['dlatents']
        max_l = 2 * int(np.log2(Gs.output_shape[-1]) - 1)  # max_l=18 for 1024x1024 models
        if dlatents.shape[1:] != (max_l, 512):  # [N, max_l, 512]
            actual_size = int(2**(dlatents.shape[1]//2+1))
            print(f'''Mismatch of loaded dlatents and network! dlatents was created with network of size: {actual_size}\n
                   {network_pkl} is of size {Gs.output_shape[-1]}''')
            sys.exit(1)
        imgs = Gs.components.synthesis.run(dlatents, output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True))
        for i, img in enumerate(imgs):
            fname = f'{outdir}/dlatent{i:02d}.png'
            print (f'Saved {fname}')
            PIL.Image.fromarray(img, 'RGB').save(fname)
        return

    # Render images for dlatents initialized from random seeds.
    Gs_kwargs = {
        'output_transform': dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True),
        'randomize_noise': False
    }
    if truncation_psi is not None:
        Gs_kwargs['truncation_psi'] = truncation_psi

    noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
    label = np.zeros([1] + Gs.input_shapes[1][1:])
    if class_idx is not None:
        label[:, class_idx] = 1

    images = []
    for seed_idx, seed in enumerate(seeds):
        print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]
        if(fixnoise):
            noise_rnd = np.random.RandomState(1) # fix noise
            tflib.set_vars({var: noise_rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
        else:
            tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
        image = Gs.run(z, label, **Gs_kwargs) # [minibatch, height, width, channel]
        images.append(image[0])
        PIL.Image.fromarray(image[0], 'RGB').save(f'{outdir}/seed{seed:04d}.png')
        if(save_vector):
            np.save(f'{outdir}/vectors/seed{seed:04d}',z)
            # np.savetxt(f'{outdir}/vectors/seed{seed:04d}',z)

    # If user wants to save a grid of the generated images
    if grid:
        print('Generating image grid...')
        PIL.Image.fromarray(create_image_grid(np.array(images)), 'RGB').save(f'{outdir}/grid.png')
Exemple #57
0
 def load(file_path: str, file_name="game_data.npy"):
     file_path = file_path[:-1] if file_path[-1] == "/" else file_path
     full_path = file_path + "/" + file_name
     state = np.load(full_path, allow_pickle=True)
     return state
# calculate matrix H
pnt_A_s = np.array([0.19, 0.75])
pnt_A_t = np.array([0, 0.9])
pnt_B_s = np.array([0.45, 0.54])
pnt_B_t = np.array([1, 0.9])
pnt_C_s = np.array([0.55, 0.54])
pnt_C_t = np.array([1, -0.9])
pnt_D_s = np.array([0.95, 0.84])
pnt_D_t = np.array([0, -0.9])

pts_src = np.array([pnt_A_s, pnt_B_s, pnt_C_s, pnt_D_s])
pts_dst = np.array([pnt_A_t, pnt_B_t, pnt_C_t, pnt_D_t])
h, status = cv2.findHomography(pts_src, pts_dst)
np.save('homography', h)
h = np.load('homography.npy')
print(h)

# calculate transformed image
n_points = 10
x = np.expand_dims(np.linspace(0, 1, n_points), axis=1)
t = np.linspace(0, 1, 10)

xy_1 = np.expand_dims(np.linspace(0, 1, n_points),
                      axis=1) * (pnt_B_s - pnt_A_s) + np.ones(
                          (n_points, 1)) * pnt_A_s
xy_2 = np.expand_dims(np.linspace(0, 1, n_points),
                      axis=1) * (pnt_C_s - pnt_B_s) + np.ones(
                          (n_points, 1)) * pnt_B_s
xy_3 = np.expand_dims(np.linspace(0, 1, n_points),
                      axis=1) * (pnt_D_s - pnt_C_s) + np.ones(
Exemple #59
0
import numpy as np 
import tensorflow as tf 
import cv2
import os
import sys
import csgan
import scipy.io

# Change these values before running code. 
# MR = 0.01, 0.04, 0.10, 0.25 and corresponding m = 10, 43, 109, 272

checkpointPath = 'checkpoints_final/mr_0_25_79000'
inputDir = 'test_images/'
matdir = 'recon_images/mr_0_25/'
phi = np.load('phi/phi_0_25_1089.npy')
blockSize = 33
m = 272
batch_size = 1

imList = os.listdir(inputDir)
print imList

with tf.Graph().as_default():

	images_tf = tf.placeholder( tf.float32, [batch_size, 33, 33, 1], name="images")
	cs_meas = tf.placeholder( tf.float32, [batch_size, 1, m, 1], name='cs_meas')
	is_train = tf.placeholder( tf.bool )

	bn1, bn2, reconstruction_ori = csgan.build_reconstruction(cs_meas, is_train)

	summary = tf.merge_all_summaries()
def main():
    parse = argparse.ArgumentParser()
    # ---------- environment setting: which gpu -------
    parse.add_argument('-gpu',
                       '--gpu',
                       type=str,
                       default='0',
                       help='which gpu to use: 0 or 1')
    parse.add_argument('-folder_name',
                       '--folder_name',
                       type=str,
                       default='datasets/citibike-data/data/')
    parse.add_argument('-output_folder_name',
                       '--output_folder_name',
                       type=str,
                       default='output/citibike-data/data/')
    # ---------- input/output settings -------
    parse.add_argument('-input_steps',
                       '--input_steps',
                       type=int,
                       default=6,
                       help='number of input steps')
    # ---------- model ----------
    parse.add_argument('-model',
                       '--model',
                       type=str,
                       default='GCN',
                       help='model: DyST, GCN, AttGCN')
    parse.add_argument('-num_layers',
                       '--num_layers',
                       type=int,
                       default=2,
                       help='number of layers in model')
    parse.add_argument('-num_units',
                       '--num_units',
                       type=int,
                       default=64,
                       help='dim of hidden states')
    parse.add_argument('-trained_adj_mx',
                       '--trained_adj_mx',
                       type=int,
                       default=0,
                       help='if training adjacent matrix')
    parse.add_argument('-filter_type',
                       '--filter_type',
                       type=str,
                       default='dual_random_walk',
                       help='laplacian, random_walk, or dual_random_walk')
    parse.add_argument('-delta',
                       '--delta',
                       type=int,
                       default=1e7,
                       help='delta to calculate rescaled weighted matrix')
    parse.add_argument('-epsilon',
                       '--epsilon',
                       type=float,
                       default=0.8,
                       help='epsilon to calculate rescaled weighted matrix')
    parse.add_argument(
        '-dy_adj',
        '--dy_adj',
        type=int,
        default=1,
        help=
        'whether to use dynamic adjacent matrix for lower feature extraction layer'
    )
    parse.add_argument(
        '-dy_filter',
        '--dy_filter',
        type=int,
        default=0,
        help='whether to use dynamic filter generate region-specific filter ')
    #parse.add_argument('-att_dynamic_adj', '--att_dynamic_adj', type=int, default=1, help='whether to use dynamic adjacent matrix in attention parts')
    parse.add_argument('-model_save',
                       '--model_save',
                       type=str,
                       default='gcn',
                       help='folder name to save model')
    parse.add_argument('-pretrained_model',
                       '--pretrained_model_path',
                       type=str,
                       default=None,
                       help='path to the pretrained model')
    # ---------- params for CNN ------------
    parse.add_argument('-num_filters',
                       '--num_filters',
                       type=int,
                       default=32,
                       help='number of filters in CNN')
    parse.add_argument('-pooling_units',
                       '--pooling_units',
                       type=int,
                       default=64,
                       help='number of pooling units')
    parse.add_argument('-dropout_keep_prob',
                       '--dropout_keep_prob',
                       type=float,
                       default=0.5,
                       help='keep probability in dropout layer')
    # ---------- training parameters --------
    parse.add_argument('-n_epochs',
                       '--n_epochs',
                       type=int,
                       default=20,
                       help='number of epochs')
    parse.add_argument('-batch_size',
                       '--batch_size',
                       type=int,
                       default=8,
                       help='batch size for training')
    parse.add_argument('-show_batches',
                       '--show_batches',
                       type=int,
                       default=100,
                       help='show how many batches have been processed.')
    parse.add_argument('-lr',
                       '--learning_rate',
                       type=float,
                       default=0.0002,
                       help='learning rate')
    parse.add_argument('-update_rule',
                       '--update_rule',
                       type=str,
                       default='adam',
                       help='update rule')
    # ---------- train or predict -------
    parse.add_argument('-train',
                       '--train',
                       type=int,
                       default=1,
                       help='whether to train')
    parse.add_argument('-test', '--test', type=int, default=0, help='if test')
    #
    args = parse.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    print('load train, test data...')
    # train: 20140401 - 20140831
    # validate: 20140901 - 20140910
    # test: 20140911 - 20140930
    split = [3672, 240, 480]
    #split = [3912, 480]
    data, train_data, val_data, test_data = load_npy_data(filename=[
        args.folder_name + 'd_station.npy', args.folder_name + 'p_station.npy'
    ],
                                                          split=split)
    # data: [num, station_num, 2]
    #f_data, train_f_data, val_f_data, test_f_data = load_pkl_data(args.folder_name + 'f_data_list.pkl', split=split)
    f_data, train_f_data, val_f_data, test_f_data = load_npy_data(
        filename=[args.folder_name + 'citibike_flow_data.npy'], split=split)
    print(len(f_data))
    print('preprocess train/val/test flow data...')
    #f_preprocessing = StandardScaler()
    f_preprocessing = MinMaxNormalization01()
    f_preprocessing.fit(train_f_data)
    train_f_data = f_preprocessing.transform(train_f_data)
    if val_f_data is not None:
        val_f_data = f_preprocessing.transform(val_f_data)
    test_f_data = f_preprocessing.transform(test_f_data)
    print('preprocess train/val/test data...')
    pre_process = MinMaxNormalization01()
    #pre_process = StandardScaler()
    pre_process.fit(train_data)
    train_data = pre_process.transform(train_data)
    if val_data is not None:
        val_data = pre_process.transform(val_data)
    test_data = pre_process.transform(test_data)
    #
    num_station = data.shape[1]
    print('number of station: %d' % num_station)
    #
    train_loader = DataLoader_graph(train_data,
                                    train_f_data,
                                    args.input_steps,
                                    flow_format='identity')
    if val_data is not None:
        val_loader = DataLoader_graph(val_data,
                                      val_f_data,
                                      args.input_steps,
                                      flow_format='identity')
    else:
        val_loader = None
    test_loader = DataLoader_graph(test_data,
                                   test_f_data,
                                   args.input_steps,
                                   flow_format='identity')
    # f_adj_mx = None
    if os.path.isfile(args.folder_name + 'f_adj_mx.npy'):
        f_adj_mx = np.load(args.folder_name + 'f_adj_mx.npy')
    else:
        f_adj_mx = train_loader.get_flow_adj_mx()
        np.save(args.folder_name + 'f_adj_mx.npy', f_adj_mx)
    #
    #
    if args.filter_type == 'laplacian':
        w = np.load(args.folder_name + 'w.npy')
        # w = np.array(w, dtype=np.float32)
        W = get_rescaled_W(w, delta=args.delta, epsilon=args.epsilon)
        # Calculate graph kernel
        L = scaled_laplacian(W)
        #
        f_adj_mx = L

    if args.model == 'FC_LSTM':
        model = FC_LSTM(num_station,
                        args.input_steps,
                        num_layers=args.num_layers,
                        num_units=args.num_units,
                        batch_size=args.batch_size)
    if args.model == 'FC_GRU':
        model = FC_GRU(num_station,
                       args.input_steps,
                       num_layers=args.num_layers,
                       num_units=args.num_units,
                       batch_size=args.batch_size)
    if args.model == 'GCN':
        model = GCN(num_station,
                    args.input_steps,
                    num_layers=args.num_layers,
                    num_units=args.num_units,
                    dy_adj=args.dy_adj,
                    dy_filter=args.dy_filter,
                    f_adj_mx=f_adj_mx,
                    trained_adj_mx=args.trained_adj_mx,
                    filter_type=args.filter_type,
                    batch_size=args.batch_size)
    if args.model == 'flow_GCN':
        model = flow_GCN(num_station,
                         args.input_steps,
                         num_layers=args.num_layers,
                         num_units=args.num_units,
                         f_adj_mx=f_adj_mx,
                         trained_adj_mx=args.trained_adj_mx,
                         filter_type=args.filter_type,
                         batch_size=args.batch_size)
    if args.model == 'Coupled_GCN':
        model = Coupled_GCN(num_station,
                            args.input_steps,
                            num_layers=args.num_layers,
                            num_units=args.num_units,
                            f_adj_mx=f_adj_mx,
                            trained_adj_mx=args.trained_adj_mx,
                            filter_type=args.filter_type,
                            batch_size=args.batch_size)
    #
    model_path = os.path.join(args.output_folder_name, 'model_save',
                              args.model_save)
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    #model_path = os.path.join(args.folder_name, 'model_save', args.model_save)
    solver = ModelSolver(
        model,
        train_loader,
        val_loader,
        test_loader,
        pre_process,
        batch_size=args.batch_size,
        show_batches=args.show_batches,
        n_epochs=args.n_epochs,
        pretrained_model=args.pretrained_model_path,
        update_rule=args.update_rule,
        learning_rate=args.learning_rate,
        model_path=model_path,
    )
    results_path = os.path.join(model_path, 'results')
    if not os.path.exists(results_path):
        os.makedirs(results_path)
    if args.train:
        print('==================== begin training ======================')
        test_target, test_prediction = solver.train(
            os.path.join(model_path, 'out'))
        np.save(os.path.join(results_path, 'test_target.npy'), test_target)
        np.save(os.path.join(results_path, 'test_prediction.npy'),
                test_prediction)
    if args.test:
        print('==================== begin test ==========================')
        test_target, test_prediction = solver.test()
        np.save(os.path.join(results_path, 'test_target.npy'), test_target)
        np.save(os.path.join(results_path, 'test_prediction.npy'),
                test_prediction)