def save_pac_mat(data, filename):
    """
    save data after pca to .mat,for example,origin file is test.mat, save pca file to test_pca.mat and the key is test_pca
    :param filename: for example:../data/googlenet_lfw.mat
    """
    pca_file = get_pca_filename(filename)
    savemat(pca_file, {(pca_file.split('/')[-1]).split('.')[0]: data})
Example #2
0
def creEmbeddingMat_HLBL():
    rfile = open(u"I:/数据/embedings/hlbl/hlbl_reps_clean_2.50d.rcv1.clean.tokenized-CoNLL03.case-intact.txt")
    embeddingMat = np.zeros([246122,50],dtype=np.float64)
    wordList = []
    # print(embeddingMat.shape)
    lines = rfile.readlines()
    rfile.close()
    print(len(lines))
    for i in range(0,len(lines)):
        line = lines[i]
        sp_index=line.index(" ")
        # print line
        word = line[0:sp_index].strip().lower()
        # print(word)
        wordList.append(word)
        print(len(wordList))
        while line[sp_index+1]==" ":
            sp_index+=1
        linelist=line[sp_index+1:].split()

        for j in range(len(linelist)):
            embeddingMat[i][j] = linelist[j]
    sio.savemat(u"I:/数据/embedings/hlbl/embedding.mat",{"embedding":embeddingMat})
    wfile = open(u"I:/数据/embedings/hlbl/words.txt","w")
    wfile.writelines(item+'\n' for item in wordList)
Example #3
0
    def to_file_map(self, file_map=None):
        ''' Write image to `file_map` or contained ``self.file_map``

        Extends Analyze ``to_file_map`` method by writing ``mat`` file

        Parameters
        ----------
        file_map : None or mapping, optional
           files mapping.  If None (default) use object's ``file_map``
           attribute instead
        '''
        if file_map is None:
            file_map = self.file_map
        super(Spm99AnalyzeImage, self).to_file_map(file_map)
        mat = self._affine
        if mat is None:
            return
        import scipy.io as sio
        hdr = self._header
        if hdr.default_x_flip:
            M = np.dot(np.diag([-1, 1, 1, 1]), mat)
        else:
            M = mat
        # Adjust for matlab 1,1,1 voxel origin
        from_111 = np.eye(4)
        from_111[:3,3] = -1
        M = np.dot(M, from_111)
        mat = np.dot(mat, from_111)
        # use matlab 4 format to allow gzipped write without error
        with file_map['mat'].get_prepare_fileobj(mode='wb') as mfobj:
            sio.savemat(mfobj, {'M': M, 'mat': mat}, format='4')
Example #4
0
File: model.py Project: daskol/nls
    def store(self, filename=None, label=None, desc=None, date=None):
        """Store object to mat-file. TODO: determine format specification
        """
        date = date if date else datetime.now()
        date = date.replace(microsecond=0).isoformat()
        filename = filename if filename else date + '.mat'

        matfile = {
            'model': str(type(self)),
            'date': date,
            'dim': len(self.init_sol.shape),
            'dimlesses': self.coeffs,
            'init_solution': self.init_sol,
            'num_iters': self.num_iters,
            'num_nodes': self.num_nodes,
            'order': self.order,
            'originals': self.originals,
            'pumping': self.getPumping(),
            'spatial_step': self.dx,
            'time_step': self.dt,
        }

        if desc:
            matfile['desc'] = desc
        if label:
            matfile['label'] = label

        savemat(filename, matfile)
def diagrams2cellarray( dia_list, outname, chop_inf=True, mat_type=np.float ):
    """dia_list : n-length list of k x 2 diagrams

    outname : name of output file. '.mat' will be automatically appended.

    Optional:
    --------

    chop_inf : Remove the row corresponding to the infinite generator.

    mat_type : some matlab programs expect a certain data type for
    diagrams (eg. Error using '+' will be thrown). defaults to
    double. Standard options should diverge far from np.int, np.float,
    np.int64, etc.

    Recipe from http://docs.scipy.org/doc/scipy/reference/tutorial/io.html#matlab-cell-arrays

    """
    n = len( dia_list )
    # object array to hold different length diagrams. Exclude the last
    # (inf) generator if chop_inf==True.
    C = np.zeros( (n,), dtype=np.object )
    
    for i,d in enumerate( dia_list ):
        # exclude last row
        if chop_inf:
            d = d[:-1]
        if d.dtype != mat_type:
            d = d.astype( mat_type )
        C[i] = d
    sio.savemat( outname+'.mat', { 'diagrams': C } )
Example #6
0
 def run_altered(self, depth):
     x = 500
     y = 200
     initial_value = 3
     num_its = 2000
     
     self.initialise_grid(y, x, initial_value)
     
     self.write_file()
     
     self.initialise_shadow_map()
     
     self.num_iterations = num_its
     
     # Standard parameter values
     self.jump_length = 1
     self.pd_s = 0.6
     self.pd_ns = 0.4
     
     self.create_gradual_grid(depth)
     
     self.altered = True
     
     self.avcount = np.zeros(num_its + 1)
     
     # Run the model
     self.main_loop()
     
     print self.avcount
     io.savemat("Counts.mat", { "count":self.avcount})
     np.save("Counts.npy", self.avcount)
Example #7
0
def vti2mat(fileIn, fileOut):
    """Convert voxel-array from VTI to MAT (MATLAB(R)) format.
    
    Parameters
    ----------
    fileIn : str
        Path for input VTI file.
        
    fileOut : str
        Path for output MAT file.
        
    """
    
    import numpy as np
    import vtk
    import scipy.io as sio
    from vtk.util import numpy_support as nps
    from math_utils import lcmm
    
    reader = vtk.vtkXMLImageDataReader()
    reader.SetFileName(fileIn)
    reader.Update()
    vtkImageData = reader.GetOutput()
    dim = vtkImageData.GetDimensions()
    flatV = nps.vtk_to_numpy(vtkImageData.GetPointData().GetScalars())
    V = flatV.reshape(dim[::-1])
    spacing = np.array(vtkImageData.GetSpacing())[::-1]
    estimatedFactors = lcmm(*spacing) / spacing
    estimatedVoxelSize = 1. / estimatedFactors
    sio.savemat(fileOut, {'volume':V, 'spacing': spacing, 'estimated_voxel_size': estimatedVoxelSize})
Example #8
0
def run_jobdef(jobdef):
    with InTemporaryDirectory():
        savemat('pyjobs.mat', jobdef)
        run_matlab_script("""
load pyjobs;
spm_jobman('run', jobs);
""")
Example #9
0
 def run(self, y, x, initial_value, num_its, altered):
     """Run the model with the arguments: y_length, x_length, initial_value, number_of_iterations, altered"""
     self.initialise_grid(y, x, initial_value)
     
     self.write_file()
     
     self.initialise_shadow_map()
     
     self.num_iterations = num_its
     
     # Standard parameter values
     self.jump_length = 1
     self.pd_s = 0.6
     self.pd_ns = 0.4
     
     self.altered = altered
     
     if self.altered == True:
         # Create the depth grid
         self.depth = np.load("Gradual_Stepped_Full.npy")
     
     self.avcount = np.zeros(num_its + 1)
     
     # Run the model
     self.main_loop()
     
     print self.avcount
     io.savemat("Counts.mat", { "count":self.avcount})
     np.save("Counts.npy", self.avcount)
Example #10
0
def RR_validationcurve(sspacing, tspacing, RR_lambda_opt, lambdas_range): 
    """
    Reconstruct all fields using RR and save to netcdf file

    Parameters
    ----------
    sspacing : 2D subsampling ratio in space (in one direction)

    tspacing : 1D subsampling ratio in time

    RR_alpha_opt : optimal regularization parameter given from RR_cv_estimate_alpha(sspacing, tspacing, alphas)
    
    """
    
    # lambdas_range= np.logspace(-2, 4, 28)

    #Load all training data
    (Xl_tr, mea_l, sig_l, Xh_tr,mea_h,sig_h) =  data_preprocess(sspacing, tspacing) 
    
    # validation curve
    from sklearn.linear_model import Ridge
    from sklearn.learning_curve import validation_curve
    
    train_MSE, test_MSE = validation_curve(Ridge(),Xl_tr, Xh_tr, param_name="alpha", param_range=lambdas_range, 
                                                 scoring = "mean_squared_error", cv=10)    
    
    # API always tries to maximize a loss function, so MSE is actually in the flipped sign
    train_MSE = -train_MSE
    test_MSE = -test_MSE
    # save to .mat file    
    import scipy.io as sio
    sio.savemat('/data/PhDworks/isotropic/regerssion/RR_crossvalidation.mat', 
                dict(lambdas_range=lambdas_range, train_MSE = train_MSE, test_MSE = test_MSE))    
    
    return (train_MSE, test_MSE)
Example #11
0
def RR_cv_estimate_alpha(sspacing, tspacing, alphas):
    """
    Estimate the optimal regularization parameter using grid search from a list
    and via k-fold cross validation

    Parameters
    ----------
    sspacing : 2D subsampling ratio in space (in one direction)

    tspacing : 1D subsampling ratio in time

    alphas : list of regularization parameters to do grid search
    
    """
    #Load all training data
    (Xl_tr, mea_l, sig_l, Xh_tr,mea_h,sig_h) =  data_preprocess(sspacing, tspacing)  
    
    # RidgeCV
    from sklearn.linear_model import RidgeCV    
    ridge = RidgeCV(alphas = alphas, cv = 10, fit_intercept=False, normalize=False)
    ridge.fit(Xl_tr, Xh_tr)
    
    RR_alpha_opt = ridge.alpha_
    
    print('\n Optimal lambda:', RR_alpha_opt)
    
    # save to .mat file
    import scipy.io as io
    filename = "".join(['/data/PhDworks/isotropic/regerssion/RR_cv_alpha_sspacing',
                        str(sspacing),'_tspacing',str(tspacing),'.mat'])
    io.savemat(filename, dict(alphas=alphas, RR_alpha_opt=RR_alpha_opt))
    
    # return
    return RR_alpha_opt
Example #12
0
File: SSAH.py Project: StatML/SSAH
    def test(self, phase):
        test = {}
        print '=========================================================='
        print '  ====                 Test map in all              ===='
        print '=========================================================='

        if phase == 'test' and self.load(self.checkpoint_dir):
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")
        test['qBX'] = self.generate_code(self.query_X, self.bit, "image")
        test['qBY'] = self.generate_code(self.query_Y, self.bit, "text")
        test['rBX'] = self.generate_code(self.retrieval_X, self.bit, "image")
        test['rBY'] = self.generate_code(self.retrieval_Y, self.bit, "text")

        test['mapi2t'] = calc_map(test['qBX'], test['rBY'], self.query_L, self.retrieval_L)
        test['mapt2i'] = calc_map(test['qBY'], test['rBX'], self.query_L, self.retrieval_L)
        test['mapi2i'] = calc_map(test['qBX'], test['rBX'], self.query_L, self.retrieval_L)
        test['mapt2t'] = calc_map(test['qBY'], test['rBY'], self.query_L, self.retrieval_L)
        print '=================================================='
        print '...test map: map(i->t): %3.3f, map(t->i): %3.3f' % (test['mapi2t'], test['mapt2i'])
        print '...test map: map(t->t): %3.3f, map(i->i): %3.3f' % (test['mapt2t'], test['mapi2i'])
        print '=================================================='

            # Save hash code
        datasetStr = DATA_DIR.split('/')[-1]
        dataset_bit_net = datasetStr + str(bit) + netStr
        savePath = '/'.join([os.getcwd(), 'Savecode', dataset_bit_net + '.mat'])
        if os.path.exists(savePath):
            os.remove(savePath)
        sio.savemat(dataset_bit_net, {'Qi': test['qBX'], 'Qt': test['qBY'],
                                      'Di': test['rBX'], 'Dt': test['rBY'],
                                      'retrieval_L': L['retrieval'], 'query_L': L['query']})
def save_x(tmpdir,x,i):
	# handle both vector and matrix inputs
	if x.ndim == 1:
		sio.savemat(tmpdir+"/x_"+`i`+".mat", {'x': x})	
	else:
		sio.savemat(tmpdir+"/x_"+`i`+".mat", {'x': x[:,i]})
	return 
Example #14
0
def Write_Mat(filename,field):
    """output the result in .mat format for matlab"""

    file = open(filename, "w")   # Open file for writing
    io.savemat(file, field, appendmat = True, format='5', long_field_names = False)
    file.close()
    return None
def update_extra_mat(matfile,to_remove):
    """ updates the time_frames, confounds and mask_suppressed arrays to
    reflect the removed volumes. However, does not change other items in
    _extra.mat file

    """

    mat = loadmat(matfile)
    # update time_frames
    ntf = np.delete(mat['time_frames'][0],to_remove)
    mat.update({'time_frames': ntf})

    # update confounds
    ncon = np.delete(mat['confounds'],to_remove,axis = 0)
    mat.update({'confounds': ncon})

    # update mask_suppressed
    ms = mat['mask_suppressed']
    for supp in to_remove:
        ms[supp][0] = 1
    mat.update({'mask_suppressed': ms})

    # save updated mat file
    jnk, flnme = os.path.split(matfile)
    savemat(os.path.join(output_dir,flnme),mat)
Example #16
0
def power_main():
    parser = argparse.ArgumentParser()
    parser.add_argument("path_to_input",
                        help="Specify the path of the input data set")
    parser.add_argument("-p", "--power", type=int,
                        help="Specify to which power to raise the matrix to")
    parser.add_argument("-w", "--walk", help="Calculate (I-A)^-1",
                        action="store_true")
    parser.add_argument("-e", "--exp", help="Calculate exp(A)",
                        action="store_true")
    parser.add_argument("-r", "--restrict", 
                        help="Restrict the elements of the "
                        "transformed matrix to the coordinates of the nonzero "
                        "elements of the original matrix.",
                        action="store_true")
    parser.add_argument("--recip", action="store_true", 
                        help="Symmetrize by reciprocal ties")
    parser.add_argument("--symmetrize", action="store_true", 
                        help="Symmetrize by the mean of entry ij and ji")
    parser.add_argument("-lcc", "--components", action="store_true", 
                        help="Extract the largest connected component")
    parser.add_argument("path_to_output", \
        help="Specify where to save output")

    args = parser.parse_args()
    in_path = args.path_to_input
    out_path = args.path_to_output
    if os.path.isfile(in_path):
        filename, ending = os.path.splitext(in_path)
        out_path, out_ending = os.path.splitext(out_path)
        try:
            A = main.get_graph(in_path)
        except IOError:
            print("File format not recognized")
        else:
            if args.power:
                mat = matrix_power(A, args.power)
            elif args.walk:
                mat = walk_generator(A)
            elif args.exp:
                mat = exponentiate(A)
            else:
                mat = A

            if args.recip:
                mat = reciprocal_ties(mat)
            elif args.symmetrize:
                mat = symmetrize(mat)
            
            if args.components:
                mat = extract_largest_component(mat)

            if args.restrict:
                mat = edge_restriction(mat, A)

            if out_path:
                io.savemat(out_path, {'mat': mat}, do_compression=True,
                           oned_as='row')
    else:
        print("Specify a valid input-file")
def main(argv):
	leveldb_name = sys.argv[1]
	print "%s" % sys.argv[1]
	print "%s" % sys.argv[2]
	print "%s" % sys.argv[3]
	print "%s" % sys.argv[4]
        # window_num = 1000;
        # window_num = 12736;
        window_num = int(sys.argv[2]);
        # window_num = 2845;

	start = time.time()
	if 'db' not in locals().keys():
		db = leveldb.LevelDB(leveldb_name)
		datum = feat_helper_pb2.Datum()

	ft = np.zeros((window_num, int(sys.argv[3])))

	for im_idx in range(window_num):
		datum.ParseFromString(db.Get(str(im_idx)))
		ft[im_idx, :] = datum.float_data

	print 'time 1: %f' %(time.time() - start)
	sio.savemat(sys.argv[4], {'feats':ft},oned_as='row')
	print 'time 2: %f' %(time.time() - start)
	print 'done!'
def run_dense_sift_matlab(img_names, data_names, sizes):
  '''
  Calculates dense sift using matlab.
  img_names - list of paths to images.
  data_names - list of paths of where to save the results. result of img_names[i]
  will be saved in a flie called data_names[i].
  '''
  # convert lists to numpy object arrays. These will be loaded as cell arrays
  # in matlab.

  img_cell = np.array(img_names, dtype=np.object)
  data_cell = np.array(data_names, dtype=np.object)
  directory_name = './tmp'
  util.makedir_if_needed(directory_name)
  
  sio.savemat(os.path.join(directory_name, 'data.mat'),
               {'img_cell':img_cell, 'data_cell': data_cell,
                'sizes': sizes})


  cmd_params = '''-nodisplay -nodesktop -nosplash -r "dense_sift('{}'); quit" '''.format(directory_name)


  print 'calling matlab with params: {}'.format(cmd_params)
  res = matlab(cmd_params)
  print res
  
  # remove the tmp dir
  shutil.rmtree(directory_name)
def saveData(dat1,mbxNr):
	print "PLEASE be patient, file is getting saved to disk."
	now = datetime.datetime.now()
	str = "data_" + now.strftime("%Y-%m-%d_%H-%M") + "_mbx{}".format(mbxNr) + ".mat"
	data = dat1[0:j,:]
	sio.savemat(str, {'data': data}, oned_as='row')
	print "finished saving .mat file to disk, filename is "+str+"!\n\n"
def saveClfOut(QuadDir,OutDir,halfwins,Numberofframe,svmclf,nbc):
              
    for halfwin in halfwins:
            outFile = '{}Classfication_nbc_{}_halfwin{}.mat'.format(OutDir,str(nbc),str(halfwin))
#        if not os.path.isfile(outFile):
            FVsFile = "{}FVS/FVsnbc_{}_halfwin_{}.mat".format(QuadDir,str(nbc),str(halfwin))
            fvs = sio.loadmat(FVsFile)['fvs']
            vecAllfvs = np.zeros((Numberofframe,nbc*13))
            isFrame_labeled = np.zeros(Numberofframe)
            i = 0;
            for fnum in xrange(Numberofframe):
                fvsum = np.sum(fvs[fnum])
                if abs(fvsum)>0:
                    vecAllfvs[i,:] = fvs[i,:]
                    isFrame_labeled[fnum] = 1
                    i+=1          

            vecAllfvs = vecAllfvs[:i,:]
            vecAllfvs = mytools.power_normalize(vecAllfvs,0.2)
            frame_probs = svmclf.predict_proba(vecAllfvs)
            frame_label = svmclf.predict(vecAllfvs)
            frame_probstemp  = np.zeros((Numberofframe,20))
            frame_probstemp[isFrame_labeled>0,:] = frame_probs
            frame_labelstemp  = np.zeros(Numberofframe)
            frame_labelstemp[isFrame_labeled>0]=frame_label
            print 'saving to ' , outFile
            sio.savemat(outFile,mdict={'frame_probs':frame_probstemp, 'frame_label':frame_labelstemp, 
            'isFrame_labeled':isFrame_labeled})
Example #21
0
def graphml2mat(ingraph, outgraph, prune=False):
	ing = Graph.Read_GraphML(ingraph)
	
	if sum(ing.es()[:]['weight']) < 500000:
		print 'bad graph? ecount= ' , sum(ing.es()[:]['weight'])
		print 'filename= ', ingraph
		return;

	#currently being done in graphgen so don't need to delete vertex 0
	#ing.vs[0].delete() 
	if prune:
		#delete zero degree nodes
		#GK TODO: be smarter
		i = list()
		for n, v in enumerate(ing.vs):
			if v.degree() == 0:
				i.append(n)
		ing.vs[i].delete()
	
	outg = lil_matrix((ing.vcount(), ing.vcount()))
	#import pdb; pdb.set_trace()
	for e in ing.es:
		outg[e.source, e.target] = e['weight']
		outg[e.target, e.source] = e['weight'] #since edges are undirected add both ways

	outg = triu(outg)
	mat_dict = {"graph": outg}
	savemat(outgraph, mat_dict)
def main():
	train_data_dir = sys.argv[1]
	train_theta_dir = sys.argv[2] 
	DATA_DIR = '/media/beomjoon/New Volume/partial_path_suggestion/' + train_data_dir + '/'
	THETA_DIR = '/media/beomjoon/New Volume/partial_path_suggestion/' + train_theta_dir  + '/thetas/'
	REWARD_DIR = DATA_DIR + 'speed_reward_mat_using_'+train_theta_dir +"/"
	if not os.path.isdir(REWARD_DIR):
		os.mkdir(REWARD_DIR)

	theta_values=[]
	theta_augmented_with_grasp = []
	thetas,pregrasp_configs,goal_configs = get_all_thetas(THETA_DIR)
	
	if len(sys.argv) < 3:
		print "Please input training file and theta directories"
		return -1


	env_file_list = os.listdir(DATA_DIR+'/env_files/')

	reward_matrix = []
	for env_idx in range(len(env_file_list)):
		train_env_f_name = get_file_name_with_given_idx(env_file_list,env_idx)

		# load the environment file
		print train_env_f_name

		env = Environment()
		env.Load(DATA_DIR+'env_files/'+train_env_f_name)

#		simple_prob=separate(env,0)
		floor = env.GetKinBody("floorwalls")
		floor.Enable(False)
	
		#env.SetViewer('qtcoin')
		#restore_env(env,train_env_f_name)

		
		pregrasp_config = pregrasp_configs[env_idx]
		goal_config = goal_configs[env_idx]

		robot = env.GetRobots()[0]
		manipulator = robot.SetActiveManipulator('leftarm_torso') 
		robot.SetActiveDOFs(manipulator.GetArmIndices())
		robot.SetActiveDOFValues(pregrasp_config)

#		traj,planning_time,prep_time,plan_status = getMotionPlan(robot,goal_config,env)	
		env_theta_vals = []
		s_time = time.time()
		for theta in thetas:
			print "speed!!! DATA_DIR = " + DATA_DIR
			print "THETA_DIR = " + THETA_DIR
			print "REWARD_DIR = " + REWARD_DIR	
			reward = via_sg_eval_theta(theta,pregrasp_config,goal_config,robot,env)
			env_theta_vals.append(reward)
		print time.time()-s_time
		reward_matrix.append(env_theta_vals)
		sio.savemat(REWARD_DIR+ 'reward_matrix'+str(env_idx)+\
				'.mat',{'reward_matrix':reward_matrix} )
		env.Destroy()
 def write_file(self, ofile):
     '''
     Writes the file
     '''
     # Creates the Matlab dictionary
     mat_dict = {'longitude': self.longitude,
                 'latitude': self.latitude,
                 'imls': self.imls,
                 'imt': self.meta_info['imt'],
                 'period': None,
                 'damping': None,
                 'curves': self.curves,
                 'statistics': self.meta_info['statistics'],
                 'investigation_time': self.investigation_time}
     if self.meta_info['imt'] == 'SA':
         mat_dict['period'] = self.meta_info['period']
         mat_dict['damping'] = self.meta_info['damping']
     elif self.meta_info['imt'] == 'PGA':
         mat_dict['period'] = 0.
         
         mat_dict['damping'] = np.nan
     else:
         pass
     # Save to binary
     savemat(ofile, mat_dict, oned_as='row')
Example #24
0
 def __init__(self, userParameters):
     
     
     
     num = 7+8
     VV = np.linspace(-0.6, 0.6, num)
     RR = []
     
     
     for V in VV:
         
         print("Start V = ", V)
         tic = time.clock()
         
         lL = tb.Lead(1.2, 2.0, -0.4, -0.4, 300, 0, 0)
         T1 = "-0.4, 0.0; 0.0, -0.4"
         B1 = tb.Insulator(5.4, 5.4, -0.4, -0.4, 5, 0, 0, V)
         T2 = "-0.4, 0.0; 0.0, -0.4"
         lR = tb.Lead(1.2, 2.0, -0.4, -0.4, 300, pi/2, V)
         wire = tb.Chain(lL, T1, B1, T2, lR)
         
         RR.append(self.__func3__(wire, V))
         print("Result = ", RR[-1])
         toc = time.clock()
         print("Δt = ", toc-tic, "sec\n")
         sio.savemat("result.mat", {"VV": VV, "RR": RR})
     
     print("All calculation finish")
Example #25
0
 def write(array, file, format, varname=None):
     if format == 'matlab':
         savemat(file+".mat", mdict={varname: array}, oned_as='column', do_compression='true')
     if format == 'npy':
         save(file, array)
     if format == 'text':
         savetxt(file+".txt", array, fmt="%.6f")
Example #26
0
def debug_ana_speed(nnode=1000):
    # get the speed of the series solution for the calculation of the Stokeslets.
    node = np.random.sample(nnode * 3).reshape((-1, 3))
    b = 0.5

    from time import time

    cth_list = np.arange(10, 1000, 10)
    dt = np.zeros_like(cth_list, dtype=np.float)
    for i0, cth in enumerate(cth_list):
        greenFun = detail(threshold=cth, b=b)
        t0 = time()
        greenFun.solve_prepare()
        greenFun.solve_uxyz(node)
        t1 = time()
        dt[i0] = t1 - t0
        PETSc.Sys.Print('cth=%d: solve stokeslets analytically use: %fs' % (cth, dt[i0]))

    comm = PETSc.COMM_WORLD.tompi4py()
    rank = comm.Get_rank()
    if rank == 0:
        savemat('debug_ana_speed.mat',
                {'cth':    cth_list,
                 'dt_ana': dt,
                 'node':   node, },
                oned_as='column')
    return True
def saveFVs(QuadDir,NumberofFrame,gmm,nbc,halfWindows):
    savefilename = "{}FVS/FVsnbc_{}_halfwin_{}.mat".format(QuadDir,str(nbc),str(halfWindows[-1])) 
#    if not os.path.isfile(savefilename):
    XX = []
    print "Saving Framwise FVS for ", QuadDir;
    for numFrame in range(1,NumberofFrame+1):
        filename = '{}desc{}.mat'.format(QuadDir,str(numFrame).zfill(5))
        Quads  = sio.loadmat(filename)['QuadDescriptors']
        XX.append(Quads)
    num = np.shape(XX)[0]
#    fvs = np.zeros((NumberofFrame,nbc*13))
    Allfvs = [np.zeros((NumberofFrame,nbc*13)) for k in range(len(halfWindows)) ]
#    del fvs
    print np.shape(Allfvs),' one ',np.shape(Allfvs[0])
    for numFrame in xrange(1,NumberofFrame+1):
        wincount = -1
        for halfwin in halfWindows:
            wincount+=1
            XXtemp = []
            for fnum in np.arange(max(0,numFrame-halfwin-1),min(numFrame+halfwin,NumberofFrame),1):
                Quads = XX[fnum]
                if np.shape(Quads)[0]>1:
                    XXtemp.extend(Quads)
            num = np.shape(XXtemp)[0]
            if num>0:
                Allfvs[wincount][numFrame-1,:] = mytools.fisher_vector(XXtemp, gmm)
    
    wincount = -1    
    for halfwin in halfWindows:
        wincount+=1
        savefilename = "{}FVS/FVsnbc_{}_halfwin_{}.mat".format(QuadDir,str(nbc),str(halfwin))
        fvs = Allfvs[wincount]
        sio.savemat(savefilename,mdict = {'fvs':fvs})
    def createDictionary(self):
        fileName = QtGui.QFileDialog.getSaveFileName(self , 'Save dictionary to a file' , expanduser('~')+'/dictionary' , 'Matlab file (*.mat);;Python pickle (*.p);;Comma sep. values (*.csv);;Excel file (*.xlsx)')
        if len(fileName) == 0:
            return
        self.displayInformation('Saving dictionary...' , flag='new')
        
        fileName = str(fileName)

        self.setDictionaryConfig()
        self.setAlgorithmConfig()

        config = generateFinalConfig(self.dictionaryConfig , self.dataMatrixes[self.filePath][1] , self.dataMatrixes[self.filePath][2])
        time   = np.arange(0,self.dataMatrixes[self.filePath][1]['numberOfSamples'])
        
        dictionary = generateDictionary(time , config)

        if fileName[-4:] == '.mat':
            dic = {col_name : dictionary[col_name].values for col_name in dictionary.columns.values}
            savemat(fileName , dic)
        elif fileName[-2:] == '.p':
            dictionary.to_pickle(fileName)
        elif fileName[-4:] == '.csv':
            dictionary.to_csv(fileName)
        elif fileName[-5:] == '.xlsx':
            dictionary.to_excel(fileName, sheet_name='dictionary')
        
        self.displayInformation('Dictionary saved.' , flag='new')
Example #29
0
def do_export_mat(fileHandle, b, f1_list, f2_list, f3_list, residualNorm, err, dp, ep, lp, rp, th, with_cover,
                  stokesletsInPipe_pipeFactor, vp_nodes, fp_nodes):
    comm = PETSc.COMM_WORLD.tompi4py()
    rank = comm.Get_rank()
    fileHandle = check_file_extension(fileHandle, extension='_force_pipe.mat')
    if rank == 0:
        savemat(fileHandle,
                {'b':                           b,
                 'f1_list':                     f1_list,
                 'f2_list':                     f2_list,
                 'f3_list':                     f3_list,
                 'residualNorm':                residualNorm,
                 'err':                         err,
                 'dp':                          dp,
                 'ep':                          ep,
                 'lp':                          lp,
                 'rp':                          rp,
                 'th':                          th,
                 'with_cover':                  with_cover,
                 'stokesletsInPipe_pipeFactor': stokesletsInPipe_pipeFactor,
                 'vp_nodes':                    vp_nodes,
                 'fp_nodes':                    fp_nodes},
                oned_as='column')
    PETSc.Sys().Print('export mat file to %s ' % fileHandle)
    pass
Example #30
0
def extract_word2vec_feature(model_name, cache_file, output=None):
    """
    Extract word2vec feature with cleaned news
    :param model_name: word2vec model name
    :param cache_file: news cache file path
    :param output: path to save extracted features
    :return: feature file path
    """
    if not output:
        output = get_word2vec_feature_file(cache_file)
    logger.info('Word2vec feature will be extracted to: {}'.format(output))
    word2vec_model = Word2VecModel(model_name)
    num_samples, cleaned_news_cache = load_cache(cache_file,
                                                 __CACHE_KEY_NUM_IDS__,
                                                 __CACHE_KEY_CLEANED_NEWS__)
    _features = np.zeros((num_samples, word2vec_model.model.vector_size))
    pbar = data_util.get_progress_bar(num_samples)
    pbar.start()
    count = 0
    for words in SentenceIterator(cleaned_news_cache):
        _features[count] = word2vec_model.get_word_vector(words)
        count += 1
        pbar.update(count)
    pbar.finish()

    collection = {
        'pooling': __POOLING__,
        'norm': __NORM__,
        __CACHE_KEY_WORD2VEC_MODEL__: model_name,
        __CACHE_KEY_FEATURE__: _features
    }
    collection.update(load_cache(cache_file))
    sio.savemat(output, collection)
    logger.info('feature saved to: {}'.format(output))
    return output
    # segmentation
    SMT_tr = ob.segmentation(CNT_tr, t_interval)
    SMT_te = ob.segmentation(CNT_te, t_interval)

    del CNT_te, CNT_tr

    # # class selection
    # SMT_tr = ob.class_selection(SMT_tr, ['right', 'left'])

    # feature extraction
    SMT_tr, CSP_W= ob.common_spatial_pattern(SMT_tr, n_pattern)
    SMT_te = ob.project_CSP(SMT_te, CSP_W)

    FT_tr = ob.log_variance(SMT_tr)
    FT_te = ob.log_variance(SMT_te)

    del SMT_te, SMT_tr

    # classification
    sh_LDA_motor = ob.shrinkage_LDA(FT_tr)
    OUT_lda_motor = ob.project_shLDA(FT_te, sh_LDA_motor)

    del FT_te, FT_tr

    accuracy[ii,0] = OUT_lda_motor
    count = count + 1

    print(ii, OUT_lda_motor)

sio.savemat('D:\Code\BCI_zero_tr/accuracy_csp_butter_order2', {'accuracy': accuracy}) 
def Ridge_OptimalAlpha_LOOCV(Training_Data, Training_Score, Alpha_Range,
                             ResultantFolder, Parallel_Quantity):
    #
    # Select optimal regularization parameter using nested LOOCV
    #
    # Training_Data:
    #     n*m matrix, n is subjects quantity, m is features quantity
    # Training_Score:
    #     n*1 vector, n is subjects quantity
    # Alpha_Range:
    #     Range of alpha, the regularization parameter balancing the training error and L2 penalty
    #     Our previous paper used (2^(-10), 2^(-9), ..., 2^4, 2^5), see Cui and Gong (2018), NeuroImage
    # ResultantFolder:
    #     Path of the folder storing the results
    # Parallel_Quantity:
    #     Parallel multi-cores on one single computer, at least 1
    #

    Subjects_Quantity = len(Training_Score)

    Inner_Predicted_Score = np.zeros((Subjects_Quantity, len(Alpha_Range)))
    Alpha_Quantity = len(Alpha_Range)
    for k in np.arange(Subjects_Quantity):

        Inner_Fold_K_Data_test = Training_Data[k, :]
        Inner_Fold_K_Data_test = Inner_Fold_K_Data_test.reshape(1, -1)
        Inner_Fold_K_Score_test = Training_Score[k]
        Inner_Fold_K_Data_train = np.delete(Training_Data, k, axis=0)
        Inner_Fold_K_Score_train = np.delete(Training_Score, k)

        Scale = preprocessing.MinMaxScaler()
        Inner_Fold_K_Data_train = Scale.fit_transform(Inner_Fold_K_Data_train)
        Inner_Fold_K_Data_test = Scale.transform(Inner_Fold_K_Data_test)

        Parallel(n_jobs=Parallel_Quantity,
                 backend="threading")(delayed(Ridge_SubAlpha_LOOCV)(
                     Inner_Fold_K_Data_train, Inner_Fold_K_Score_train,
                     Inner_Fold_K_Data_test, Inner_Fold_K_Score_test,
                     Alpha_Range[l], l, ResultantFolder)
                                      for l in np.arange(len(Alpha_Range)))

        for l in np.arange(Alpha_Quantity):
            print(l)
            Fold_l_Mat_Path = ResultantFolder + '/Alpha_' + str(l) + '.mat'
            Fold_l_Mat = sio.loadmat(Fold_l_Mat_Path)
            Inner_Predicted_Score[k, l] = Fold_l_Mat['Predicted_Score']
            os.remove(Fold_l_Mat_Path)

    Inner_Evaluation = np.zeros((1, len(Alpha_Range)))
    Inner_Evaluation = Inner_Evaluation[0]
    for l in np.arange(len(Alpha_Range)):
        Corr_tmp = np.corrcoef(Inner_Predicted_Score[:, l], Training_Score)
        Inner_Evaluation[l] = Corr_tmp[0, 1]

    Inner_Evaluation_Mat = {'Inner_Evaluation': Inner_Evaluation}
    sio.savemat(ResultantFolder + '/Inner_Evaluation.mat',
                Inner_Evaluation_Mat)

    Optimal_Alpha_Index = np.argmax(Inner_Evaluation)
    Optimal_Alpha = Alpha_Range[Optimal_Alpha_Index]
    return (Optimal_Alpha, Inner_Evaluation)
def Ridge_LOOCV_Permutation(Subjects_Data, Subjects_Score, Times_IDRange,
                            Alpha_Range, ResultantFolder, Parallel_Quantity,
                            Max_Queued, QueueOptions):

    #
    # Ridge regression with leave-one-out cross-validation (LOOCV)
    #
    # Subjects_Data:
    #     n*m matrix, n is subjects quantity, m is features quantity
    # Subjects_Score:
    #     n*1 vector, n is subjects quantity
    # Times_IDRange:
    #     The index of permutation test, for example np.arange(1000)
    # Alpha_Range:
    #     Range of alpha, the regularization parameter balancing the training error and L2 penalty
    # ResultantFolder:
    #     Path of the folder storing the results
    # Parallel_Quantity:
    #     Parallel multi-cores on one single computer, at least 1
    # Max_Queued:
    #     The maximum jobs to be submitted to SGE cluster at the same time
    # QueueOptions:
    #     Generally is '-q all.q' for SGE cluster
    #

    if not os.path.exists(ResultantFolder):
        os.mkdir(ResultantFolder)
    Subjects_Data_Mat = {'Subjects_Data': Subjects_Data}
    Subjects_Data_Mat_Path = ResultantFolder + '/Subjects_Data.mat'
    sio.savemat(Subjects_Data_Mat_Path, Subjects_Data_Mat)
    Finish_File = []
    Times_IDRange_Todo = np.int64(np.array([]))
    for i in np.arange(len(Times_IDRange)):
        ResultantFolder_I = ResultantFolder + '/Time_' + str(Times_IDRange[i])
        if not os.path.exists(ResultantFolder_I):
            os.mkdir(ResultantFolder_I)
        if not os.path.exists(ResultantFolder_I + '/Res_NFold.mat'):
            Times_IDRange_Todo = np.insert(Times_IDRange_Todo,
                                           len(Times_IDRange_Todo),
                                           Times_IDRange[i])
            Configuration_Mat = {'Subjects_Data_Mat_Path': Subjects_Data_Mat_Path, 'Subjects_Score': Subjects_Score, \
                'Alpha_Range': Alpha_Range, 'ResultantFolder_I': ResultantFolder_I, 'Parallel_Quantity': Parallel_Quantity}
            sio.savemat(ResultantFolder_I + '/Configuration.mat',
                        Configuration_Mat)
            system_cmd = 'python3 -c ' + '\'import sys;\
                sys.path.append("' + os.getcwd() + '");\
                from Ridge_CZ_Sort import Ridge_KFold_Sort_Permutation_Sub;\
                import os;\
                import scipy.io as sio;\
                configuration = sio.loadmat("' + ResultantFolder_I + '/Configuration.mat");\
                Subjects_Data_Mat_Path = configuration["Subjects_Data_Mat_Path"];\
                Subjects_Score = configuration["Subjects_Score"];\
                Alpha_Range = configuration["Alpha_Range"];\
                ResultantFolder_I = configuration["ResultantFolder_I"];\
                Parallel_Quantity = configuration["Parallel_Quantity"];\
                Ridge_LOOCV_Permutation_Sub(Subjects_Data_Mat_Path[0], Subjects_Score[0], Alpha_Range[0], ResultantFolder_I[0], Parallel_Quantity[0][0])\' '

            system_cmd = system_cmd + ' > "' + ResultantFolder_I + '/Ridge.log" 2>&1\n'
            Finish_File.append(ResultantFolder_I + '/Res_NFold.mat')
            script = open(ResultantFolder_I + '/script.sh', 'w')
            script.write(system_cmd)
            script.close()

    Jobs_Quantity = len(Finish_File)
    if len(Times_IDRange_Todo) > Max_Queued:
        Submit_Quantity = Max_Queued
    else:
        Submit_Quantity = len(Times_IDRange_Todo)
    for i in np.arange(Submit_Quantity):
        ResultantFolder_I = ResultantFolder + '/Time_' + str(
            Times_IDRange_Todo[i])
        #Option = ' -V -o "' + ResultantFolder_I + '/perm_' + str(Times_IDRange_Todo[i]) + '.o" -e "' + ResultantFolder_I + '/perm_' + str(Times_IDRange_Todo[i]) + '.e"';
        #cmd = 'qsub ' + ResultantFolder_I + '/script.sh ' + QueueOptions + ' -N perm_' + str(Times_IDRange_Todo[i]) + Option;
        #print(cmd);
        #os.system(cmd)
        os.system('at -f "' + ResultantFolder_I + '/script.sh" now')
    Finished_Quantity = 0
    while 1:
        for i in np.arange(len(Finish_File)):
            if os.path.exists(Finish_File[i]):
                Finished_Quantity = Finished_Quantity + 1
                print(Finish_File[i])
                del (Finish_File[i])
                print(
                    time.strftime('%Y-%m-%d-%H-%M-%S',
                                  time.localtime(time.time())))
                print('Finish quantity = ' + str(Finished_Quantity))
                if Submit_Quantity < len(Times_IDRange_Todo):
                    ResultantFolder_I = ResultantFolder + '/Time_' + str(
                        Times_IDRange_Todo[Submit_Quantity])
                    #Option = ' -V -o "' + ResultantFolder_I + '/perm_' + str(Times_IDRange_Todo[Submit_Quantity]) + '.o" -e "' + ResultantFolder_I + '/perm_' + str(Times_IDRange_Todo[Submit_Quantity]) + '.e"';
                    #cmd = 'qsub ' + ResultantFolder_I + '/script.sh ' + QueueOptions + ' -N perm_' + str(Times_IDRange_Todo[Submit_Quantity]) + Option
                    #print(cmd);
                    #os.system(cmd);
                    os.system('at -f "' + ResultantFolder_I +
                              '/script.sh" now')
                    Submit_Quantity = Submit_Quantity + 1
                break
        if Finished_Quantity >= Jobs_Quantity:
            break
def Ridge_LOOCV(Subjects_Data, Subjects_Score, Alpha_Range, ResultantFolder,
                Parallel_Quantity, Permutation_Flag):
    #
    # Ridge regression with leave-one-out cross-validation (LOOCV)
    # Subjects_Data:
    #     n*m matrix, n is subjects quantity, m is features quantity
    # Subjects_Score:
    #     n*1 vector, n is subjects quantity
    # Alpha_Range:
    #     Range of alpha, the regularization parameter balancing the training error and L2 penalty
    #     Our previous paper used (2^(-10), 2^(-9), ..., 2^4, 2^5), see Cui and Gong (2018), NeuroImage
    # ResultantFolder:
    #     Path of the folder storing the results
    # Parallel_Quantity:
    #     Parallel multi-cores on one single computer, at least 1
    # Permutation_Flag:
    #     1: this is for permutation, then the socres will be permuted
    #     0: this is not for permutation
    #

    if not os.path.exists(ResultantFolder):
        os.mkdir(ResultantFolder)
    Subjects_Quantity = len(Subjects_Score)

    Predicted_Score = np.zeros((1, Subjects_Quantity))
    Predicted_Score = Predicted_Score[0]
    for j in np.arange(Subjects_Quantity):

        Subjects_Data_test = Subjects_Data[j, :]
        Subjects_Data_test = Subjects_Data_test.reshape(1, -1)
        Subjects_Score_test = Subjects_Score[j]
        Subjects_Data_train = np.delete(Subjects_Data, j, axis=0)
        Subjects_Score_train = np.delete(Subjects_Score, j)

        if Permutation_Flag:
            # If doing permutation, the training scores should be permuted, while the testing scores remain
            Subjects_Index_Random = np.arange(len(Subjects_Score_train))
            np.random.shuffle(Subjects_Index_Random)
            Subjects_Score_train = Subjects_Score_train[Subjects_Index_Random]
            if j == 0:
                RandIndex = {'Fold_0': Subjects_Index_Random}
            else:
                RandIndex['Fold_' + str(j)] = Subjects_Index_Random

        Optimal_Alpha, Inner_Evaluation = Ridge_OptimalAlpha_LOOCV(
            Subjects_Data_train, Subjects_Score_train, Alpha_Range,
            ResultantFolder, Parallel_Quantity)

        normalize = preprocessing.MinMaxScaler()
        Subjects_Data_train = normalize.fit_transform(Subjects_Data_train)
        Subjects_Data_test = normalize.transform(Subjects_Data_test)

        clf = linear_model.Ridge(alpha=Optimal_Alpha)
        clf.fit(Subjects_Data_train, Subjects_Score_train)
        Fold_J_Score = clf.predict(Subjects_Data_test)
        Predicted_Score[j] = Fold_J_Score[0]

    Corr = np.corrcoef(Predicted_Score, Subjects_Score)
    Corr = Corr[0, 1]
    MAE = np.mean(np.abs(np.subtract(Predicted_Score, Subjects_Score)))

    Res_NFold = {
        'Corr': Corr,
        'MAE': MAE,
        'Test_Score': Subjects_Score,
        'Predicted_Score': Predicted_Score
    }
    ResultantFile = os.path.join(ResultantFolder, 'Res_NFold.mat')
    sio.savemat(ResultantFile, Res_NFold)
    return (Corr, MAE)
Example #35
0
def CalcLookupTable(TurbName,ModlDir,
                    WindSpeeds=None,FastExe='FAST.exe',
                    TMax=140.,Tss=80.,overwrite=0,
                    **kwargs):
    """ Calculate and save steady-state look-up table
    
        Runs a series of steady-state simulations using the previous
        steady-state values as initial conditions. Saves all steady-state
        values into a table and saves it.
        
        Note: the FAST executable must either be on the system path or in the
        model directory.
        
        Args:
            TurbName    (str) : name of turbine model
            ModlDir     (str) : path to model directory (no trailing slash)
            WindSpeeds (iter) : iterable of wind speeds at which to calculate
                                steady-state values
            FastExe     (str) : name of FAST executable to simulate turbine
            TMax        (flt) : total simulation time
            Tss         (flt) : averaging time for calculating steady-state
            overwrite   (int) : force directory overwrite without asking
            
        Returns:
            LUT        (dict) : look-up table 
    """
    
    # check if model directory exists
    if not os.path.isdir(ModlDir):
        raise ValueError('Model directory {:s} does not exist.'.format(ModlDir))
    
    # check if steady-state directory exists, overwrite if user allows
    SSDir = os.path.join(ModlDir,'steady-state')
    if os.path.isdir(SSDir) and not overwrite:
        try:                    # bind raw_input to input for Python 2
            input = raw_input
        except NameError:
            pass
        UserResp = input('Steady-state directory already exists. Overwrite? [y/n] ')
        if UserResp in ['y','Y',1]:
            shutil.rmtree(SSDir)
        elif UserResp in ['n','N',0]:
            return None
        else:
            raise ValueError('Unknown response {}'.format(UserResp))
    elif os.path.isdir(SSDir) and overwrite:
        shutil.rmtree(SSDir)
    os.mkdir(SSDir)
    
    # define default wind speeds, ensure monotonically increasing
    if WindSpeeds is None:
        WindSpeeds = np.arange(3,25,0.5)
    else:
        WindSpeeds = np.sort(np.array(WindSpeeds))
    
    # define LUT name and path
    SSName = TurbName + '_SS.mat'
    SSPath = os.path.join(SSDir,SSName)
    
    # change directory to steady-state directory to run FAST
    os.chdir(ModlDir)
    
    # define initial dictionary of default wind-dependent parameters
    WindDict = {'BlPitch(1)':0.,'BlPitch(2)':0.,'BlPitch(3)':0.,
                'OoPDefl':0.,'IPDefl':0.,'TeetDefl':0.,'Azimuth':0.,
                'RotSpeed':0.,'NacYaw':0.,'TTDspFA':0.,'TTDspSS':0.,
                'TMax':TMax,'TStart':0.}
                
    # set initial values of passed-in keyword arguments
    for key in kwargs:
        if key in WindDict.keys():
            WindDict[key] = kwargs[key]
    
    # loop through wind speeds
    NumIDs = int(np.ceil(np.log10(len(WindSpeeds))))
    
    for iWS in range(len(WindSpeeds)):
        WindSpeed = WindSpeeds[iWS]
        
        print('Processing wind speed {} of {}...'.format(iWS+1,len(WindSpeeds)))
        
        # define file ID for FAST run
        fileID     = '{:.0f}'.format(iWS).zfill(NumIDs)
        
        # create wind filename
        WindName = 'NoShr_'+'{:2.1f}'.format(WindSpeed).zfill(4)+'.wnd'
        
        # check if wind file exists, make it if not
        WindPath = os.path.join(SSDir,WindName)
        if not os.path.exists(WindPath):
            WriteSteadyWind(WindSpeed,WindPath)
                
        # write FAST input files
        FastName = TurbName + '_' + fileID
        FastPath = os.path.join(ModlDir,FastName)
        SSFastPath = os.path.join(SSDir,FastName)
        WriteFastAD(TurbName,WindPath,ModlDir,
                    FastDir=ModlDir,FastName=FastName,
                    **WindDict)
                  
        # run FAST
        command  = ' '.join([FastExe,FastName + '.fst' ])
        ExitCode = os.system(command)
        if ExitCode:
            raise ValueError('FAST did not complete successfully ' + \
                    '(Wind speed {:.1f}, exit code {:.0f})'.format(WindSpeed,ExitCode))
                      
        # load FAST files
        FASTdf = ReadFASTFile(FastName + '.out')
        Fields = [s for s in FASTdf.columns]
        
        # initialize LUT if it doesn't exist
        if iWS == 0: LUT = np.empty((len(WindSpeeds),len(Fields)))
        
        # loop through and save steady-state values
        n_t = int(FASTdf['Time'].size*Tss/TMax)
        for i_parm in range(len(Fields)):
            
            # get data
            parm = Fields[i_parm]
            x = FASTdf[parm]
                          
            # calculate and save last value
            x_SS = np.mean(x[-n_t:])
            LUT[iWS,i_parm] = x_SS
                                  
        # set initial conditions for next round
        for key in WindDict.keys():
            if key in Fields:
                WindDict[key] = LUT[iWS,Fields.index(key)]
            elif key+'1' in Fields:
                WindDict[key] = LUT[iWS,Fields.index(key+'1')]

        # delete fst, ipt files, move .out
        os.system('del {}'.format(WindPath))
        os.system('del {}.fst'.format(FastPath))
        os.system('del {}_AD.ipt'.format(FastPath))
        os.system('del {}.sum'.format(FastPath))
        os.system('move {}.out {}.out'.format(FastPath,
                                                SSFastPath))
    
    print('Simulations completed.')    
       
    # rearrange to increasing wind speed
    LUT = LUT[LUT[:,Fields.index('WindVxi')].argsort()]
       
    # save LUT   
    LUTdict = {}
    LUTdict['SS'] = LUT   
    LUTdict['Fields'] = Fields 
    scio.savemat(SSPath,LUTdict)
    print('Look-up table saved.')
    
    return LUT 
Example #36
0
    weights = weights / weights.sum()
    layer_weight = dict(zip(layers, weights))

    opts.update({'layer_weight': layer_weight})

    # Reconstruction
    snapshots_dir = os.path.join(save_dir, 'snapshots_%s-%s' % (subject, roi),
                                 'image-%s' % image_label)
    recon_img, loss_list = reconstruct_image(
        features,
        net,
        save_intermediate=True,
        save_intermediate_path=snapshots_dir,
        **opts)

    # Save the results

    # Save the raw reconstructed image
    save_name = 'recon_img' + '-' + subject + '-' + roi + '-' + image_label + '.mat'
    sio.savemat(os.path.join(save_dir, save_name), {'recon_img': recon_img})

    # To better display the image, clip pixels with extreme values (0.02% of
    # pixels with extreme low values and 0.02% of the pixels with extreme high
    # values). And then normalise the image by mapping the pixel value to be
    # within [0,255].
    save_name = 'recon_img' + '-' + subject + '-' + roi + '-' + image_label + '.jpg'
    PIL.Image.fromarray(normalise_img(clip_extreme_value(
        recon_img, pct=0.04))).save(os.path.join(save_dir, save_name))

print('Done')
Example #37
0
s_dim = env.observation_space.shape[0]
a_dim = env.action_space.shape[0]
a_bound = env.action_space.high

ddpg = DDPG(a_dim, s_dim, a_bound)
EWMA_p = 0.95
EWMA = np.zeros((1, MAX_EPISODES + 1))
iteration = np.zeros((1, MAX_EPISODES + 1))
t1 = time.time()
Q = np.zeros(2000)
R = np.zeros(2000)
for i in range(MAX_EPISODES):
    s = env.reset()
    ep_reward = 0
    for j in range(MAX_EP_STEPS):
        if RENDER:
            env.render()
        a = ddpg.choose_action(s)
        a = np.clip(np.random.normal(a, 1), -a_bound, a_bound)
        s_, r, done, hit = env.step(a, i)
        # print(r)
        Q[j], R[j] = ddpg.show_q(s, a, r)
        s = s_
        ep_reward += r
    print("Saved")
    scio.savemat('QR', {
        'Q': Q,
        'R': R,
    })
Example #38
0
def cmat(track_file,
         roi_file,
         resolution_network_file,
         matrix_name,
         matrix_mat_name,
         endpoint_name,
         intersections=False):
    """ Create the connection matrix for each resolution using fibers and ROIs. """

    stats = {}
    iflogger.info('Running cmat function')
    # Identify the endpoints of each fiber
    en_fname = op.abspath(endpoint_name + '_endpoints.npy')
    en_fnamemm = op.abspath(endpoint_name + '_endpointsmm.npy')

    iflogger.info('Reading Trackvis file %s', track_file)
    fib, hdr = nb.trackvis.read(track_file, False)
    stats['orig_n_fib'] = len(fib)

    roi = nb.load(roi_file, mmap=NUMPY_MMAP)
    roiData = roi.get_data()
    roiVoxelSize = roi.header.get_zooms()
    (endpoints, endpointsmm) = create_endpoints_array(fib, roiVoxelSize)

    # Output endpoint arrays
    iflogger.info('Saving endpoint array: %s', en_fname)
    np.save(en_fname, endpoints)
    iflogger.info('Saving endpoint array in mm: %s', en_fnamemm)
    np.save(en_fnamemm, endpointsmm)

    n = len(fib)
    iflogger.info('Number of fibers: %i', n)

    # Create empty fiber label array
    fiberlabels = np.zeros((n, 2))
    final_fiberlabels = []
    final_fibers_idx = []

    # Add node information from specified parcellation scheme
    path, name, ext = split_filename(resolution_network_file)
    if ext == '.pck':
        gp = nx.read_gpickle(resolution_network_file)
    elif ext == '.graphml':
        gp = nx.read_graphml(resolution_network_file)
    else:
        raise TypeError("Unable to read file:", resolution_network_file)
    nROIs = len(gp.nodes())

    # add node information from parcellation
    if 'dn_position' in gp.nodes[list(gp.nodes())[0]]:
        G = gp.copy()
    else:
        G = nx.Graph()
        for u, d in gp.nodes(data=True):
            G.add_node(int(u), **d)
            # compute a position for the node based on the mean position of the
            # ROI in voxel coordinates (segmentation volume )
            xyz = tuple(
                np.mean(np.where(
                    np.flipud(roiData) == int(d["dn_correspondence_id"])),
                        axis=1))
            G.nodes[int(u)]['dn_position'] = tuple([xyz[0], xyz[2], -xyz[1]])

    if intersections:
        iflogger.info("Filtering tractography from intersections")
        intersection_matrix, final_fiber_ids = create_allpoints_cmat(
            fib, roiData, roiVoxelSize, nROIs)
        finalfibers_fname = op.abspath(endpoint_name +
                                       '_intersections_streamline_final.trk')
        stats['intersections_n_fib'] = save_fibers(hdr, fib, finalfibers_fname,
                                                   final_fiber_ids)
        intersection_matrix = np.matrix(intersection_matrix)
        I = G.copy()
        H = nx.from_numpy_matrix(np.matrix(intersection_matrix))
        H = nx.relabel_nodes(
            H, lambda x: x + 1)  # relabel nodes so they start at 1
        I.add_weighted_edges_from(
            ((u, v, d['weight']) for u, v, d in H.edges(data=True)))

    dis = 0
    for i in range(endpoints.shape[0]):

        # ROI start => ROI end
        try:
            startROI = int(roiData[endpoints[i, 0, 0], endpoints[i, 0, 1],
                                   endpoints[i, 0, 2]])
            endROI = int(roiData[endpoints[i, 1, 0], endpoints[i, 1, 1],
                                 endpoints[i, 1, 2]])
        except IndexError:
            iflogger.error(
                'AN INDEXERROR EXCEPTION OCCURED FOR FIBER %s. '
                'PLEASE CHECK ENDPOINT GENERATION', i)
            break

        # Filter
        if startROI == 0 or endROI == 0:
            dis += 1
            fiberlabels[i, 0] = -1
            continue

        if startROI > nROIs or endROI > nROIs:
            iflogger.error(
                "Start or endpoint of fiber terminate in a voxel which is labeled higher"
            )
            iflogger.error(
                "than is expected by the parcellation node information.")
            iflogger.error("Start ROI: %i, End ROI: %i", startROI, endROI)
            iflogger.error("This needs bugfixing!")
            continue

        # Update fiber label
        # switch the rois in order to enforce startROI < endROI
        if endROI < startROI:
            tmp = startROI
            startROI = endROI
            endROI = tmp

        fiberlabels[i, 0] = startROI
        fiberlabels[i, 1] = endROI

        final_fiberlabels.append([startROI, endROI])
        final_fibers_idx.append(i)

        # Add edge to graph
        if G.has_edge(startROI,
                      endROI) and 'fiblist' in G.edge[startROI][endROI]:
            G.edge[startROI][endROI]['fiblist'].append(i)
        else:
            G.add_edge(startROI, endROI, fiblist=[i])

    # create a final fiber length array
    finalfiberlength = []
    if intersections:
        final_fibers_indices = final_fiber_ids
    else:
        final_fibers_indices = final_fibers_idx

    for idx in final_fibers_indices:
        # compute length of fiber
        finalfiberlength.append(length(fib[idx][0]))

    # convert to array
    final_fiberlength_array = np.array(finalfiberlength)

    # make final fiber labels as array
    final_fiberlabels_array = np.array(final_fiberlabels, dtype=int)

    iflogger.info(
        'Found %i (%f percent out of %i fibers) fibers that start or '
        'terminate in a voxel which is not labeled. (orphans)', dis,
        dis * 100.0 / n, n)
    iflogger.info('Valid fibers: %i (%f%%)', n - dis, 100 - dis * 100.0 / n)

    numfib = nx.Graph()
    numfib.add_nodes_from(G)
    fibmean = numfib.copy()
    fibmedian = numfib.copy()
    fibdev = numfib.copy()
    for u, v, d in G.edges(data=True):
        G.remove_edge(u, v)
        di = {}
        if 'fiblist' in d:
            di['number_of_fibers'] = len(d['fiblist'])
            idx = np.where((final_fiberlabels_array[:, 0] == int(u))
                           & (final_fiberlabels_array[:, 1] == int(v)))[0]
            di['fiber_length_mean'] = float(
                np.mean(final_fiberlength_array[idx]))
            di['fiber_length_median'] = float(
                np.median(final_fiberlength_array[idx]))
            di['fiber_length_std'] = float(np.std(
                final_fiberlength_array[idx]))
        else:
            di['number_of_fibers'] = 0
            di['fiber_length_mean'] = 0
            di['fiber_length_median'] = 0
            di['fiber_length_std'] = 0
        if not u == v:  # Fix for self loop problem
            G.add_edge(u, v, **di)
            if 'fiblist' in d:
                numfib.add_edge(u, v, weight=di['number_of_fibers'])
                fibmean.add_edge(u, v, weight=di['fiber_length_mean'])
                fibmedian.add_edge(u, v, weight=di['fiber_length_median'])
                fibdev.add_edge(u, v, weight=di['fiber_length_std'])

    iflogger.info('Writing network as %s', matrix_name)
    nx.write_gpickle(G, op.abspath(matrix_name))

    numfib_mlab = nx.to_numpy_matrix(numfib, dtype=int)
    numfib_dict = {'number_of_fibers': numfib_mlab}
    fibmean_mlab = nx.to_numpy_matrix(fibmean, dtype=np.float64)
    fibmean_dict = {'mean_fiber_length': fibmean_mlab}
    fibmedian_mlab = nx.to_numpy_matrix(fibmedian, dtype=np.float64)
    fibmedian_dict = {'median_fiber_length': fibmedian_mlab}
    fibdev_mlab = nx.to_numpy_matrix(fibdev, dtype=np.float64)
    fibdev_dict = {'fiber_length_std': fibdev_mlab}

    if intersections:
        path, name, ext = split_filename(matrix_name)
        intersection_matrix_name = op.abspath(name + '_intersections') + ext
        iflogger.info('Writing intersection network as %s',
                      intersection_matrix_name)
        nx.write_gpickle(I, intersection_matrix_name)

    path, name, ext = split_filename(matrix_mat_name)
    if not ext == '.mat':
        ext = '.mat'
        matrix_mat_name = matrix_mat_name + ext

    iflogger.info('Writing matlab matrix as %s', matrix_mat_name)
    sio.savemat(matrix_mat_name, numfib_dict)

    if intersections:
        intersect_dict = {'intersections': intersection_matrix}
        intersection_matrix_mat_name = op.abspath(name +
                                                  '_intersections') + ext
        iflogger.info('Writing intersection matrix as %s',
                      intersection_matrix_mat_name)
        sio.savemat(intersection_matrix_mat_name, intersect_dict)

    mean_fiber_length_matrix_name = op.abspath(name +
                                               '_mean_fiber_length') + ext
    iflogger.info('Writing matlab mean fiber length matrix as %s',
                  mean_fiber_length_matrix_name)
    sio.savemat(mean_fiber_length_matrix_name, fibmean_dict)

    median_fiber_length_matrix_name = op.abspath(name +
                                                 '_median_fiber_length') + ext
    iflogger.info('Writing matlab median fiber length matrix as %s',
                  median_fiber_length_matrix_name)
    sio.savemat(median_fiber_length_matrix_name, fibmedian_dict)

    fiber_length_std_matrix_name = op.abspath(name + '_fiber_length_std') + ext
    iflogger.info('Writing matlab fiber length deviation matrix as %s',
                  fiber_length_std_matrix_name)
    sio.savemat(fiber_length_std_matrix_name, fibdev_dict)

    fiberlengths_fname = op.abspath(endpoint_name + '_final_fiberslength.npy')
    iflogger.info('Storing final fiber length array as %s', fiberlengths_fname)
    np.save(fiberlengths_fname, final_fiberlength_array)

    fiberlabels_fname = op.abspath(endpoint_name + '_filtered_fiberslabel.npy')
    iflogger.info('Storing all fiber labels (with orphans) as %s',
                  fiberlabels_fname)
    np.save(
        fiberlabels_fname,
        np.array(fiberlabels, dtype=np.int32),
    )

    fiberlabels_noorphans_fname = op.abspath(endpoint_name +
                                             '_final_fiberslabels.npy')
    iflogger.info('Storing final fiber labels (no orphans) as %s',
                  fiberlabels_noorphans_fname)
    np.save(fiberlabels_noorphans_fname, final_fiberlabels_array)

    iflogger.info("Filtering tractography - keeping only no orphan fibers")
    finalfibers_fname = op.abspath(endpoint_name + '_streamline_final.trk')
    stats['endpoint_n_fib'] = save_fibers(hdr, fib, finalfibers_fname,
                                          final_fibers_idx)
    stats['endpoints_percent'] = float(stats['endpoint_n_fib']) / float(
        stats['orig_n_fib']) * 100
    stats['intersections_percent'] = float(
        stats['intersections_n_fib']) / float(stats['orig_n_fib']) * 100

    out_stats_file = op.abspath(endpoint_name + '_statistics.mat')
    iflogger.info('Saving matrix creation statistics as %s', out_stats_file)
    sio.savemat(out_stats_file, stats)
Example #39
0
    global xyLoc
    global curIndex
    # 显示
    # print(button)
    if pressed and button == mouse.Button.right:
        # 顶点位置数组赋值    
        xyLoc[curIndex,:] = [x,y]
        curIndex = curIndex + 1
        print('\t 第{0}个有效点位置:{1}'.format(curIndex,(x,y)))
    else:
        # Stop listener
        return False
 
while curIndex < 4:
    with mouse.Listener(on_click=my_on_click) as listener:
        listener.join()
        
# print(xyLoc)

# An example:
# [[136. 264.]
 # [145. 964.]
 # [843. 970.]
 # [845. 260.]]

savemat(r'temp.mat', mdict = {'apex':xyLoc})

# 显示
print('\t 端点位置成功写入xls文件。')

def run_training():
    """Train Model for a number of steps."""
    print("Running %s" % NAME)
    print("Logging to %s" % txt_logs_path)

    keep_prob = 0.8

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():

        #### TRAINING PORTION ####

        # Input images and labels.

        labels_tr, images_tr, seismics_tr, cam_locs_tr, ex_idxs_tr = inputs(
            train=True,
            batch_size=FLAGS.batch_size,
            num_epochs=FLAGS.num_epochs)
        dropout_keep = tf.placeholder(tf.float32, name="dropout_keep")

        # Build a Graph that computes predictions from the inference model.
        with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=True):
            logits_tr = inference(images_tr, seismics_tr, dropout_keep)

        print([
            v.name for v in tf.get_collection(
                tf.GraphKeys.VARIABLES,
                scope="inference/Image_Feats/InceptionV3")
        ][0])
        print('Printing the type of get_collection')
        print(
            type(
                tf.get_collection(tf.GraphKeys.VARIABLES,
                                  scope="inference/Image_Feats/InceptionV3")))

        # ipdb.set_trace()

        y_probs_tr = tf.nn.softmax(logits_tr, name="Softmax_Layer")

        # Add to the Graph the loss calculation.
        loss_tr, xent_tr = loss_op(y_probs_tr, labels_tr)
        # tf.scalar_summary("Training_Loss", loss_tr)

        # Add to the Graph operations that train the model.
        train_op = training(loss_tr, FLAGS.learning_rate)

        # The op for initializing the variables.
        init_op = tf.group(
            tf.initialize_all_variables(), tf.initialize_local_variables(
            ))  # local is needed to initialize number of epochs in queue

        #### VALIDATION PORTION ####

        tf.get_variable_scope().reuse_variables()
        # print(v for v in slim.variables.get_variables())

        # Input images and labels.
        labels_tst, images_tst, seismics_tst, cam_locs_tst, ex_idxs_tst = inputs(
            train=False,
            batch_size=FLAGS.batch_size,
            num_epochs=FLAGS.num_epochs)

        # Build a Graph that computes predictions from the inference model.
        with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=True):
            logits_tst = inference(images_tst, seismics_tst, dropout_keep)

        y_probs_tst = tf.nn.softmax(logits_tst, name="Softmax_Layer")

        # Add to the Graph the loss calculation.
        loss_tst, xent_tst = loss_op(y_probs_tst, labels_tst)
        # tf.scalar_summary("Testing_Loss", loss_tst)

        ## Savers
        image_feats_vars = tf.get_collection(
            tf.GraphKeys.VARIABLES, scope="inference/Image_Feats/InceptionV3")
        image_feats_dict = {
            v.op.name.replace("inference/Image_Feats/", ""): v
            for v in image_feats_vars
        }
        image_feats_saver = tf.train.Saver(image_feats_dict)

        model_saver = tf.train.Saver()

        wo_optimizer_vars = [
            var for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
            if not 'Adam' in var.name
        ]
        wo_optimizer_dict = {v.op.name: v for v in wo_optimizer_vars}
        wo_optimizer_saver = tf.train.Saver(wo_optimizer_dict)
        # ipdb.set_trace()

        #### Start Session ####

        # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=GPU_FRAC)
        # config = tf.ConfigProto(gpu_options=gpu_options)
        # Create a session for running operations in the Graph.
        # sess = tf.Session(config)
        sess = tf.Session()

        # Tensorboard
        # run 'tensorboard --logdir=./NAME/train --port=7007'
        #train_writer = tf.train.SummaryWriter(tf_summary_path + '/train',
        #                                    sess.graph)
        #merged = tf.merge_all_summaries()

        # Start the text logger
        f = open(txt_logs_path, 'w')

        f.write('Logging for file %s \n' % NAME)
        # Initialize the variables (the trained variables and the
        # epoch counter).
        sess.run(init_op)

        # image_feats_saver.restore(sess, IMAGE_FEATS_CKPT)
        # image_feats_saver.restore(sess, MODEL_CKPT)

        # Same model retrained
        # model_saver.restore(sess, MODEL_CKPT)

        if (FLAGS.isTest):
            #model_saver.restore(sess, FINAL_CKPT)
            wo_optimizer_saver.restore(sess, FINAL_CKPT)

        # Start input enqueue threads.
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        # ipdb.set_trace()
        if not FLAGS.isTest:
            # Start the text logger
            f = open(txt_logs_path, 'w')

            f.write('Logging for file %s \n' % NAME)

            avg_loss = 0
            avg_xent = 0

            for step in xrange(1, FLAGS.max_steps + 1):

                start_time = time.time()
                _, loss_value, xent_value = sess.run(
                    [train_op, loss_tr, xent_tr],
                    feed_dict={dropout_keep: keep_prob})
                duration = time.time() - start_time
                avg_loss = avg_loss + loss_value
                avg_xent = avg_xent + xent_value

                step_size = 100
                if step % step_size == 0:
                    examples_per_sec = FLAGS.batch_size / float(duration)
                    format_str = (
                        '%s.py %s: step %d, (xent+regu)/batch = %.5f, xent/batch = %.5f (%.3f sec/batch)'
                    )
                    p_str = format_str % (fNAME[0:2], time.strftime("%H:%M"),
                                          step, avg_loss * 1.0 / step_size,
                                          avg_xent * 1.0 / step_size, duration)
                    print(p_str)
                    f.write(p_str + '\n')  # Logging to file
                    avg_loss = 0
                    avg_xent = 0

                #if step % 1000 == 0:
                if step % (int(np.ceil(
                        TRAIN_SIZE * 1.0 / FLAGS.batch_size))) == 0:
                    predictions_tst, true_labels_tst, total_loss_tst = run_eval(
                        sess, y_probs_tst, xent_tst, labels_tst, dropout_keep,
                        VALID_SIZE)

                    target_names = ['no_person', 'person']
                    p_str = (
                        "********Classification Report for Testing Set********\n"
                        + "LOSS FOR TESTING : %.5f \n" % (total_loss_tst) +
                        classification_report(true_labels_tst,
                                              predictions_tst,
                                              target_names=target_names))

                    print(p_str)
                    f.write(p_str + '\n')

                    model_saver.save(sess, model_saver_path)
                    print('Checkpoint Saved')

                #if step % 10000 == 0:
                if step % (int(np.ceil(TRAIN_SIZE * 1.0 / FLAGS.batch_size)) *
                           5) == 0:
                    predictions_tr, true_labels_tr, total_loss_tr = run_eval(
                        sess, y_probs_tr, xent_tr, labels_tr, dropout_keep,
                        TRAIN_SIZE)
                    p_str = (
                        '********Classification Report for Training Set********\n'
                        + 'LOSS FOR TRAINING : %.5f \n' % (total_loss_tr) +
                        classification_report(true_labels_tr,
                                              predictions_tr,
                                              target_names=target_names))

                    print(p_str)
                    f.write(p_str + '\n')
            # After the loop is done run results one more time

            predictions_tst, true_labels_tst, total_loss_tst = run_eval(
                sess, y_probs_tst, xent_tst, labels_tst, dropout_keep,
                VALID_SIZE)

            target_names = ['no_person', 'person']
            p_str = (
                "********Classification Report for Testing Set********\n" +
                "LOSS FOR TESTING : %.5f \n" % (total_loss_tst) +
                classification_report(true_labels_tst,
                                      predictions_tst,
                                      target_names=target_names))

            print(p_str)
            f.write(p_str + '\n')

            model_saver.save(sess, model_saver_path)
            print('Checkpoint Saved')

            predictions_tr, true_labels_tr, total_loss_tr = run_eval(
                sess, y_probs_tr, xent_tr, labels_tr, dropout_keep, TRAIN_SIZE)
            p_str = (
                '********Classification Report for Training Set********\n' +
                'LOSS FOR TRAINING : %.5f \n' % (total_loss_tr) +
                classification_report(
                    true_labels_tr, predictions_tr, target_names=target_names))

            print(p_str)
            f.write(p_str + '\n')

        else:
            predictions_tst, true_labels_tst, total_loss_tst, probs_tst, indices_tst = run_eval_testing(
                sess, y_probs_tst, xent_tst, labels_tst, ex_idxs_tst,
                dropout_keep, VALID_SIZE)

            target_names = ['no_person', 'person']
            p_str = (
                "********Classification Report for Testing Set********\n" +
                "LOSS FOR TESTING : %.5f \n" % (total_loss_tst) +
                classification_report(true_labels_tst,
                                      predictions_tst,
                                      target_names=target_names))

            print(p_str)

            cmtx_tst = confusion_matrix(true_labels_tst, predictions_tst)
            cmtx_tst_normalized = cmtx_tst.astype('float') / cmtx_tst.sum(
                axis=1)[:, np.newaxis]
            print(cmtx_tst)
            print(cmtx_tst_normalized)

            precision, recall, thresholds = precision_recall_curve(
                true_labels_tst, probs_tst[:, 1])
            plt.plot(precision, recall)

            filelabel = 'visual'
            plt.savefig('results/prec_recall_' + filelabel + '.png')

            ids = indices_tst[true_labels_tst == 1][np.argsort(
                probs_tst[true_labels_tst == 1][:, 1])]
            probs = probs_tst[true_labels_tst == 1][np.argsort(
                probs_tst[true_labels_tst == 1][:, 1])]

            matdict = {
                'ids_' + filelabel: ids,
                'probs_' + filelabel: probs,
                'precision_' + filelabel: precision,
                'recall_' + filelabel: recall
            }

            sio.savemat('results/' + filelabel + '_res.mat', matdict)

            test_dict = {}
            test_dict['indices'] = indices_tst
            test_dict['probs'] = probs_tst[:, 1]
            test_dict['true_labels'] = true_labels_tst
            test_df = pd.DataFrame(test_dict)
            test_df.to_pickle('results/' + filelabel + '_probs.pkl')

            ipdb.set_trace()
            # indices_tst[true_labels_tst==1][np.argsort(probs_tst[true_labels_tst==1][:,1])[0:15]] # get worst performer IDs
            # indices_tst[true_labels_tst==1][np.argsort(probs_tst[true_labels_tst==1][:,0])[0:15]] # get best performer IDs
            # np.save('worst_to_best_prob_ids_bilinear.npy', indices_tst[true_labels_tst==1][np.argsort(probs_tst[true_labels_tst==1][:,1])])

            # probs_tst[true_labels_tst == 1, :]
            # indices_tst[true_labels_tst == 1][np.greater(probs_tst[true_labels_tst == 1, 0], 0.9)]

            predictions_tr, true_labels_tr, total_loss_tr, probs_tr, indices_tr = run_eval_testing(
                sess, y_probs_tr, xent_tr, labels_tr, ex_idxs_tr, dropout_keep,
                TRAIN_SIZE)

            p_str = (
                '********Classification Report for Training Set********\n' +
                'LOSS FOR TRAINING : %.5f \n' % (total_loss_tr) +
                classification_report(
                    true_labels_tr, predictions_tr, target_names=target_names))

            print(p_str)

            cmtx_tr = confusion_matrix(true_labels_tr, predictions_tr)
            cmtx_tr_normalized = cmtx_tr.astype('float') / cmtx_tr.sum(
                axis=1)[:, np.newaxis]
            print(cmtx_tr)
            print(cmtx_tr_normalized)

            # try:
        #   step = 0
        #   while not coord.should_stop():
        #     start_time = time.time()
        #
        #     # Run one step of the model.  The return values are
        #     # the activations from the `train_op` (which is
        #     # discarded) and the `loss` op.  To inspect the values
        #     # of your ops or variables, you may include them in
        #     # the list passed to sess.run() and the value tensors
        #     # will be returned in the tuple from the call.
        #     _, loss_value, labels_batch = sess.run([train_op, loss, labels])
        #
        #     duration = time.time() - start_time
        #
        #     # Print an overview fairly often.
        #     if step % 100 == 0:
        #       print('Step %d: loss = %.2f (%.3f sec), with batch %d' % (step, loss_value,
        #                                                  duration, labels_batch.shape[0]))
        #     step += 1
        # except tf.errors.OutOfRangeError:
        #   print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
        # finally:
        #   # When done, ask the threads to stop.
        #   coord.request_stop()

        coord.request_stop()
        # Wait for threads to finish.
        coord.join(threads)
        sess.close()
Example #41
0
                  shuffle=True)

tStop = timeit.default_timer()

# Save losses
lossHistory = score.history['loss']
valLossHistory = score.history['val_loss']

########################################################################
# Save the results
########################################################################
# Keras model
model.save(saveFilename + '.h5')

# For the loss file, append the previous training history to the new history
if train_more_epochs:
    prevLossHistory = sio.loadmat(saveFilename + '_loss.mat')
    lossHistory = np.concatenate(
        (prevLossHistory['lossHistory'].flatten(), lossHistory))
    valLossHistory = \
        np.concatenate((prevLossHistory['valLossHistory'].flatten(),
                        valLossHistory))

# Save the losses
sio.savemat(saveFilename + '_loss', {
    'lossHistory': lossHistory,
    'valLossHistory': valLossHistory
})

print("\nThe training time is %f sec" % (tStop - tStart))
Example #42
0
		r_arr = np.vstack((r_arr, np.zeros((maxHeight - r_arr.shape[0], r_arr.shape[1]), dtype = np.uint8)))
		g_arr = np.hstack((g_arr, np.zeros((g_arr.shape[0], maxWidth - g_arr.shape[1]), dtype = np.uint8)))
		g_arr = np.vstack((g_arr, np.zeros((maxHeight - g_arr.shape[0], g_arr.shape[1]), dtype = np.uint8)))
		b_arr = np.hstack((b_arr, np.zeros((b_arr.shape[0], maxWidth - b_arr.shape[1]), dtype = np.uint8)))
		b_arr = np.vstack((b_arr, np.zeros((maxHeight - b_arr.shape[0], b_arr.shape[1]), dtype = np.uint8)))
		
		r_arr = r_arr.reshape(maxWidth * maxHeight)
		g_arr = g_arr.reshape(maxWidth * maxHeight)
		b_arr = b_arr.reshape(maxWidth * maxHeight)
		
		arr = np.concatenate((r_arr, g_arr, b_arr))
		return arr
		
threadLock = threading.Lock()
threads = []

for i in range(0, 20):
	thread = ReadThread(i, "Thread" + str(i), i)
	thread.start()
	threads.append(thread)

#thread = ReadThread(14, "Thread" + str(14), 14)
#thread.start()
#threads.append(thread)

for t in threads:
	t.join()

scio.savemat('./data.mat', {'index': index, 'dataset': dataset})
print("Exiting Main Thread")
Example #43
0
def plot_graph(Conv_Method, n_num):
    from baselines import Rand, Plain, Randn, gat, gcn, gcn_mlp
    from syndata.SynDataset import SynDataset
    import torch
    import torch.nn.functional as F
    from torch_geometric.data import Data
    import random
    import numpy as np
    import math
    from sklearn.model_selection import KFold
    import scipy.io as sio

    def train(train_mask):
        model.train()
        optimizer.zero_grad()
        F.nll_loss(model(data)[train_mask], data.y[train_mask]).backward()
        optimizer.step()

    def test(train_mask, val_mask, test_mask):
        model.eval()
        logits, accs = model(data), []
        for mask in [train_mask, val_mask, test_mask]:
            pred = logits[mask].max(1)[1]
            #print(pred)
            acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()
            accs.append(acc)
        return accs

    dataset = SynDataset(root='data/Rand_numnodes' + str(n_num),
                         name='numnodes' + str(n_num))
    indx = 0
    sel_feats = 10
    len_mul = int(n_num / 10)
    best_acc = 0
    index = [idx for idx in range(len(dataset[0].y))]
    random.shuffle(index)
    index = np.array(index)
    kf = KFold(n_splits=10)
    kf.get_n_splits(index)
    d_graph = np.zeros(10)
    acc_list = np.zeros(len(dataset))
    best_model_acc = 0
    for i in range(len(dataset)):
        time = 0
        data = dataset[i]
        data.x = data.x[:, :sel_feats]
        for train_index, test_index in kf.split(index):
            mask = np.zeros(len(index))
            mask[index[train_index]] = 1
            train_mask = torch.tensor(mask, dtype=torch.uint8)
            mask = np.zeros(len(index))
            mask[index[test_index]] = 1
            test_mask = torch.tensor(mask, dtype=torch.uint8)
            val_mask = test_mask
            model, data = locals()[Conv_Method].call(data, dataset.name,
                                                     data.x.size(1),
                                                     dataset.num_classes)
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=0.005,
                                         weight_decay=0.0005)
            best_val_acc = test_acc = 0.0
            for epoch in range(1, 201):
                train(train_mask)
                train_acc, val_acc, tmp_test_acc = test(
                    train_mask, val_mask, test_mask)
                #log ='Epoch: 200, time: '+ str(time)  + ' acc: {:.4f} \n'
                #print((log.format(val_acc)))
                if val_acc >= best_val_acc:
                    test_acc = tmp_test_acc
                    best_val_acc = val_acc
                    if val_acc > best_model_acc:
                        best_model_acc = val_acc
                        torch.save(model.state_dict(), 'bestmodel.pt')
                        log = 'Epoch: 200, dataset id: ' + str(
                            i
                        ) + ' train_acc: {:.4f} val_acc: {:.4f} test_acc: {:.4f} \n'
                        print((log.format(train_acc, val_acc, tmp_test_acc)))
                        #test(train_mask,val_mask,test_mask)
                d_graph[time] = test_acc
            del data
            del model
            time += 1
            torch.cuda.empty_cache()
            data = dataset[i]
            data.x = data.x[:, :sel_feats]
        torch.cuda.empty_cache()
        log = 'Epoch: 200, dataset id: ' + str(i) + ' acc: {:.4f} \n'
        print((log.format(np.mean(d_graph))))
        acc_list[i] = np.mean(d_graph)
    f1 = open('scores/kfold_' + Conv_Method + '_heatmap_' + '.json', 'w+')
    heat_map = np.reshape(
        acc_list, [int(math.sqrt(len(dataset))),
                   int(math.sqrt(len(dataset)))])
    sio.savemat('scores/kfold_map' + Conv_Method + '.mat',
                {'heat_map': heat_map})
    json.dump(heat_map.tolist(), f1)
Example #44
0
import numpy as np
import scipy.io as sio
from mne.io import read_raw_edf
import os

#set path to your EDF files
path='/home/tarek/sleepclassification/data_G_test/' 

#set path where you want to save your mat or npy files 
save_path='/home/tarek/sleepclassification/data_G_test/'

#select either you want to save mat or npy format
output_file='npy'

for root, dirs, files in os.walk(path):
    for filename in files:
        if filename.endswith(".edf"):
            #read edf files and extract informations
            raw=read_raw_edf(os.path.join(root,filename))
            raw.load_data()
            print ('loading :' ,filename)
            #select electrodes you need to use 
            EEG_data=raw.pick_types(meg=False, eeg=True, stim=False, eog=False, ecg=False, emg=False)
            data,time=EEG_data[:] #extract time series of selected electrodes 
            del raw
            #save time series into mat or npy file 
            save_file=save_path+filename.replace('.edf','')
            if output_file=='mat':
                sio.savemat(save_file,{'Data':data})
            elif output_file=='npy':
                np.save(save_file,data)
 except OSError as exc:
     if exc.errno != errno.EEXIST:
         raise
     pass
 
 # Create content for PIR mse Header file
 pirHeader = 'File=' + pirFile + '\n' + 'n=' + str(scales) + '\n' + 'a=' + str(a) + '\n' +  'script=' + mseScriptLoc + 'mse.c'
 
 # Write PIR mse Header file
 pirHeaderFile = pirMseFolder + pirMseFileName + '.hea'
 with open(pirHeaderFile, 'w') as fp:
     fp.write(pirHeader)
 
 # Save mse files
 msePirFile = pirMseFolder + pirMseFileName + '.mat'                      
 sio.savemat(msePirFile, {'mse':msePir})
 
 # Process IR camera data
 # Read data in Camera file to camArray
 with open(camFile, 'r') as fp:
     camArray = fp.readlines()
 
 camArray = [x.strip() for x in camArray]    
 # Old method
 #camArray = [float(x.split(',')[-1]) for x in camArray]
 # New method
 camArray = [float(x.split(',')[0]) for x in camArray]
 # Read Camera Timestamp file
 with open(camTimeFile, 'r') as fp:
     timeArray = fp.readlines()
 
        neu_mask = np.where(pred_filt != 3, zero_mask, 3)
        macro_mask = np.where(pred_filt != 4, zero_mask, 4)
        
        # Get instances for each class using watershed
        epi_mask = instance_seg(epi_mask)
        lym_mask = instance_seg(lym_mask)
        neu_mask = instance_seg(neu_mask)
        macro_mask = instance_seg(macro_mask)
        
        
        # Save masks
        # Check if last number of uniques is not zero, if it is not then save this mask.
        # If it zero, it means the mask is empty, so skip this
        if np.unique(epi_mask)[-1] != 0:
                #np.save("{}/{}.npy".format(epi_path, raw_ct), epi_mask)
                sio.savemat("{}/{}.mat".format(epi_path, raw_ct), {'n_ary_mask':epi_mask})
        
        raw_ct+=1

        if np.unique(lym_mask)[-1] != 0:
                #np.save("{}/{}.npy".format(lym_path, raw_ct), lym_mask)
                sio.savemat("{}/{}.mat".format(lym_path, raw_ct), {'n_ary_mask':lym_mask})
                
        raw_ct+=1
        
        if np.unique(neu_mask)[-1] != 0:
                #np.save("{}/{}.npy".format(neu_path, raw_ct), neu_mask)
                sio.savemat("{}/{}.mat".format(neu_path, raw_ct), {'n_ary_mask':neu_mask})
                
        raw_ct+=1
        
Example #47
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    # data preparation
    train_loader, val_loader = data_loader()

    # model preparation
    # model = MCifarNet()
    # model = MCifarNetGSRouter().cuda()
    # model = MCifarNetURRouter()
    # model = VGGURRouter()
    # model = VGGL1NormRouter()

    # model = resnet18_gsrouter()
    model = resnet34().cuda()
    model = nn.DataParallel(model).cuda()
    cudnn.benchmark = True

    # optionally resume from a checkpoint
    if args.resume:
        latest_checkpoint = os.path.join(args.resume, 'checkpoint.pth.tar')
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    if args.finetune:
        baseline_checkpoint = os.path.join(args.finetune, 'model_best.pth.tar')
        print(baseline_checkpoint)
        if os.path.isfile(baseline_checkpoint):
            print("=> loading baseline checkpoint '{}'".format(args.finetune))
            checkpoint = torch.load(baseline_checkpoint)
            args.start_epoch = checkpoint['epoch']
            # best_prec1 = checkpoint['best_prec1']

            state_dict = OrderedDict()
            for name, param in checkpoint['state_dict'].items():
                state_dict[name] = param
            for name, param in model.named_parameters():
                if 'gs' in name:
                    state_dict[name] = param

            model.load_state_dict(state_dict)
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.finetune, checkpoint['epoch']))

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()
    # optimizer = optim.SGD(model.parameters(), lr = 0.1, weight_decay = 1e-4, momentum = 0.9)
    # optimizer = optim.SGD([param for name, param in model.named_parameters() if not ('bn' in name and 'bias' in name) and not ('conv' in name and 'bias' in name)], lr = 0.1, weight_decay = 1e-4, momentum = 0.9)
    optimizer = optim.SGD([{
        'params': [
            param for name, param in model.named_parameters()
            if 'gs' in name and 'fc' in name
        ],
        'lr':
        args.lrfact * args.lr,
        'weight_decay':
        20 / 1000 * args.weight_decay
    }, {
        'params': [
            param for name, param in model.named_parameters()
            if not ('gs' in name and 'fc' in name)
        ],
        'lr':
        args.lr,
        'weight_decay':
        args.weight_decay
    }],
                          momentum=args.momentum)
    if args.test:
        test_acc = validate()
        sys.exit()

    # fix temp first
    # temp = 0.66
    temp = 1

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, temp)

        # evaluate on validation set
        prec1, acts = validate(val_loader, model, criterion, temp)

        # remember best prec@1 and save checkpoint
        if acts.item() <= args.target:
            is_best = prec1 > best_prec1
            best_prec1 = max(prec1, best_prec1)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1
                }, is_best)

    print('Best accuracy: ', best_prec1)
    sio.savemat(
        './runs/' + args.expname + '/loss.mat', {
            'loss': loss_lst,
            'loss_acts': loss_acts_lst,
            'rand': rand_lst,
            'top1_train': top1_train_lst,
            'top1_val': top1_val_lst
        })
Example #48
0
if USE_GPU:
    UNet2D = UNet2D.cuda()
else:
    UNet2D = UNet2D.cpu()

# Create dataset representation
d = Dataset('../dataset/images')

# Get validation data to test on
valDataY, valDataX = d.getValidationDataBatch(0, 50)
if USE_GPU:
    valDataY = valDataY.cuda()
    valDataX = valDataX.cuda()

# save model, get output for validation data
torch.save(UNet2D, 'CP_decomposed_model.pt')

output = UNet2D(valDataX)

# write this data to be post-processed for MSE/PSNR values
output = torch.squeeze(output)
if USE_GPU:
    output = output.cpu()

# save output images and filenames
outputIm = output.detach().numpy()
sio.savemat('outputIm_CP.mat', {'outputIm': outputIm})
val = d.validationFiles()
sio.savemat('filenames_CP.mat', {'filenames': val})
    opt['regress_confounds']['flag_pca_motion'] = True
    opt['regress_confounds']['pct_var_explained'] = 0.95

    opt['regress_confounds'][
        'flag_wm'] = True  # Turn on/off the regression of the average white matter signal (true: apply / false : don't apply)
    opt['regress_confounds'][
        'flag_vent'] = True  # Turn on/off the regression of the average of the ventricles (true: apply / false : don't apply)
    opt['regress_confounds'][
        'flag_gsc'] = False  # Turn on/off the regression of the PCA-based estimation of the global signal (true: apply / false : don't apply)
    opt['regress_confounds'][
        'flag_scrubbing'] = True  # Turn on/off the scrubbing of time frames with excessive motion (true: apply / false : don't apply)
    opt['regress_confounds'][
        'thre_fd'] = 0.4  # The threshold on frame displacement that is used to determine frames with excessive motion in the

    opt['smooth_vol'] = dict()
    opt['smooth_vol'][
        'fwhm'] = 6  # Full-width at maximum (FWHM) of the Gaussian blurring kernel, in mm.
    opt['smooth_vol'][
        'flag_skip'] = 0  # Skip spatial smoothing (0: don't skip, 1 : skip)

    opt['psom'] = dict()
    opt['psom'][
        'qsub_options'] = '--account rpp-aevans-ab --time=00-05:00 --ntasks=1 --cpus-per-task=3 --mem-per-cpu=4G'
    opt['psom']['max_queued'] = int(np.floor(n_runs * 0.2))

    variables = dict()
    variables['files_in'] = files_in
    variables['opt'] = opt
    sio.savemat(str(root_p / 'Support_NIAK' / f'{site_name}_niak.mat'),
                variables)
Example #50
0
## save the network data
import scipy.io as sio
Spoisson = concatenate(sm_ext.spiketimes.values(
)) / msecond  # save spike timings of external input in ms
SynState = transpose(
    concatenate((synstate_ext[:], synstate_ee[:], synstate_ei[:],
                 synstate_ie[:], synstate_ii[:])) / (msiemens / cm**2))
Spikes_e = array(sm_e.spikes)[:, ::-1]  # switch columns
Spikes_i = array(sm_i.spikes)[:, ::-1]  # switch columns
Spikes_i[:,
         1] = Spikes_i[:,
                       1] + Ncellex  # create unique cell numbers for i cells (right above numbers of e cells)
Spikes = concatenate((Spikes_e, Spikes_i))
sio.savemat(
    'Network_brian.mat', {
        'Ncellex': double(Ncellex),
        'Ncellin': double(Ncellin),
        'ConMat2': double(ConMat2),
        'ConMat3': double(ConMat3),
        'ConMat4': double(ConMat4),
        'ConMat5': double(ConMat5),
        'gsynex': gsynex,
        'gsynin': gsynin,
        'offsetg': offsetg,
        'g0': g0,
        'gamma': gamma * 1e3,
        't_brian': synstate_ee.times / msecond,
        'Spoisson': Spoisson,
        'SynState': SynState,
        'Spikes': Spikes
    })
def MakeVerticalIndex(ListOfFilesWithinTimeInterval,RemoveToCloseValues,
                      R_s,res,directory2Data,dirnc,beamgrp,start_time,
                      log_start,stop_time,lat_start,lat_stop,lon_start,lon_stop,ping_idx): 

    
    
    #For bookkeeping
    transectID = log_start
    DataMatrix = []
    time0 = 0
    ping_counter=0
    r_mask = 0
    b_mask = 0
    ping_mask=0
    
    
    
    
    #Loop through all files within the time interval
    for filename_index in range(0,len(ListOfFilesWithinTimeInterval)):
        
        #Print the progression
        tools.printProgressBar(filename_index + 1, len(ListOfFilesWithinTimeInterval), prefix = 'Make Vertical:', suffix = 'Completed', length = 50)
        
        
        #Get the full path name
        filename = os.path.join(dirnc,ListOfFilesWithinTimeInterval[filename_index])
        
        #Load the nc file
        fileID = Dataset(filename,'r',format = 'NETCDF4')
        
        
        #Get the group with the vertical fan data
        if fileID.groups['Sonar'].groups['Beam_group1'].beam_mode == 'Vertical': 
            beamgrp = 'Beam_group1'
        elif fileID.groups['Sonar'].groups['Beam_group2'].beam_mode == 'Vertical':
            beamgrp = 'Beam_group2'
        
            
            
        #Get the vertical beam data
        #FIKS slik at den henter vertikal data
        variables = tools.GetVariablesFromNC(fileID,beamgrp,ListOfFilesWithinTimeInterval,ping_idx[filename_index])
        fileID.close()
            
            
#        try: 
        #The sonar data often includes corrputed value of 
        #the transmit power that destroys the analysis. 
        #This will fix this problum, but the sv values are not
        #correct. 
        #ADD this data should be labeled when making the work files
        #so the user can now that it is corrupted.
        if variables.transmitpower == 0: 
            variables.transmitpower = 4633
            


            
            
        #Compute the sv and TS 
        #ADD TS are not used here !!!
        sv, RangeOut= tools.ApplyTVG(10*np.log10(variables.BeamAmplitudeData),
                                variables.soundvelocity[0],
                                variables.sampleinterval,
                                variables.transmitpower,
                                variables.absorptioncoefficient[0],
                                variables.frequency,
                                variables.pulslength,
                                variables.gaintx,
                                variables.equivalentbeamangle,
                                variables.sacorrection,
                                variables.dirx)
            
        
        #Remove data too close to the vessel
        sv[np.where(RangeOut<=RemoveToCloseValues)] = np.nan
           
       
        sv[:,np.where(variables.dirx>=60)] = np.nan
#            sv[np.where(sv<-65)] = np.nan
           
           
        #Stack the vertical beam data
        if DataMatrix == []:
            sv_x,sv_y = sv.shape
            
            DataMatrix = 10**(sv/10)[:,:,np.newaxis]
            time0 = variables.time
            ping_counter = ping_counter+1
            
        else: 
            if sv_x>sv.shape[0]: 
                sv=np.append(sv,np.nan*np.ones((sv_x-sv.shape[0],sv_y)),axis=0)
            elif sv_x<sv.shape[0]: 
                DataMatrix=np.append(DataMatrix,np.nan*np.ones((sv.shape[0]-sv_x,sv_y,DataMatrix.shape[2])),axis=0)
                sv_x,sv_y = sv.shape
                
            DataMatrix = np.dstack((DataMatrix,10**(sv/10)[:,:,np.newaxis]))
            time0=np.hstack((time0,variables.time))
            ping_counter = ping_counter+1
            
#        except AttributeError:
#            print('* bad file')
        
            
            
            
    #Find the median
    Medianen = np.nanmedian(DataMatrix,axis=2)
    
    
    #Get index of everything above the median
    [r_mask0,b_mask0,ping_mask0] = np.where(DataMatrix>=4*np.repeat(Medianen[:,:,np.newaxis],len(DataMatrix[0,0,:]),axis=2))
    
    
    #
    if ping_mask0 != []: 
        ping_mask0 = time0[ping_mask0]


    #Stack the data
    r_mask = np.hstack((r_mask,r_mask0))
    b_mask = np.hstack((b_mask,b_mask0))
    ping_mask = np.hstack((ping_mask,ping_mask0))

    
    DataMatrix = []
    ping_counter = 0
        
        
    #Try saving the data
    print(directory2Data.dir_verticalwork+'/'+'Vertical_T'+str(transectID)+'.mat')
    try: 
        scp.savemat(directory2Data.dir_verticalwork+'/'+'Vertical_T'+str(transectID)+'.mat',mdict = {'r_mask':r_mask,'b_mask':b_mask,'ping_mask':ping_mask,
                'start_time':start_time,'log_start':log_start,'stop_time':stop_time,'lat_start':lat_start,'lat_stop':lat_stop,'lon_start':lon_start,'lon_stop':lon_stop})
    except TypeError: 
        print(start_time,log_start,stop_time,lat_start,lat_stop,lon_start,lon_stop)
Example #52
0
File: stft.py Project: j20100/dldc
    # add white gaussian random noise
    Noises = [0, .001]
    for i in Noises:
        x += i*np.random.normal(0, 1, len(x))
        f, t, Zxx = signal.stft(x, fs, nperseg=1024)

        log_stft = np.log(np.abs(Zxx))

        im_col = 20
        hop = im_col/2

        for c in range(0, log_stft.shape[1], hop):
            im = log_stft[:, c:c + im_col]
            if im.shape[1] == im_col:
                output_name = '/'.join(map(str,k[:-1])) + '/' + k[-1][:-5] + str(c) + 'N_'+str(i)+'.mat'
                sio.savemat(output_name, {"im": im})



searchfilesdronemat = os.path.join(cwd,'dataset','train','drone','*.mat')
searchfilesnotdronemat = os.path.join(cwd,'dataset','train','notdrone','*.mat')

filesdronemat = glob.glob(searchfilesdronemat)
filesnotdronemat = glob.glob(searchfilesnotdronemat)

val_size_drone = int(len(filesdronemat)*0.15)
val_size_not_drone = int(len(filesnotdronemat)*0.15)
print(len(filesdronemat),len(filesnotdronemat))
print(val_size_drone,val_size_not_drone)

for i in range(val_size_drone):
    elif s == 0 and s2 == 0 and s3 == 1:
        src = bilblur
        edgesrc = frame
        bilsrc = frame

    elif s == 0 and s2 == 1 and s3 == 1:
        src = edges
        edgesrc = bilblur
        bilsrc = frame
    else:
        src = edges
        edgesrc = blur
        bilsrc = frame

    blursrc = frame
    sigmaX = 0

    blur = cv2.GaussianBlur(blursrc,ksize,sigmaX)#[,dst[,sigmaY[,borderType]]])
    # bilblur = cv2.bilateralFilter(bilsrc,dia,sigCol,sigSpace)
    edges = cv2.Canny(edgesrc, minval, maxval)

    if savefile == True:
        sio.savemat(outfile,state_mat)


    if keys[QUIT]:
        running = False
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
cv2.destroyAllWindows
Example #54
0
                                      seq_length, 1, -1))
    print((var2))
    std = var2.pow(0.5)
    eps = Variable(std.data.new(std.size()).normal_())
    #sample2=eps.mul(std).add_(mean2)
    sample2 = (mean2)

    for tria in range(2):
        sample1 = Variable(torch.randn(seq_length, 1, 50))

        reconstruct1, recVar1 = model.decode(sample1)

        M1 = reconstruct1.data.view(seq_length, -1)
        U = M1.numpy()
        sio.savemat(
            '/Users/sg9872/Desktop/Miccai/Miccai18/sampleTMP/NoiseLat' +
            str(tria) + '.mat', {"U": U})

        line = plotMatrix(M1, seq_length, 161, 162, 'Line 1')
    first_legend = plt.legend(handles=[line], loc=1)

    # Add the legend manually to the current Axes.
    ax = plt.gca().add_artist(first_legend)
    #mtl.rc('xtick', labelsize=5)
    #mtl.rc('ytick', labelsize=5)
    plt.xticks(np.array([0, 100, 200]))
    plt.yticks(np.array([0, 0.5, 1]))
    ax = plt.gca()
    ax.tick_params(axis='both', which='major', labelsize=15)
    #ax.tick_params(axis = 'both', which = 'minor', labelsize = 16)
Example #55
0
                         train_y,
                         batch_size=batch_size,
                         epochs=MAX_EPOCHS,
                         validation_data=(dev_x, dev_y),
                         callbacks=[checkpointer, reduce_lr, stopping])

        return hist


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("config_file", help="path to config file")
    parser.add_argument("--experiment",
                        "-e",
                        help="tag with experiment name",
                        default="default")
    args = parser.parse_args()
    params = json.load(open(args.config_file, 'r'))
    hist = train(args, params)

    import scipy.io as sio
    import numpy as np
    arr1 = np.array(hist.history['loss'])
    sio.savemat('loss.mat', {'arr1': arr1})
    arr2 = np.array(hist.history['val_loss'])
    sio.savemat('val_loss.mat', {'arr2': arr2})
    arr3 = np.array(hist.history['acc'])
    sio.savemat('acc.mat', {'arr3': arr3})
    arr4 = np.array(hist.history['val_acc'])
    sio.savemat('val_acc.mat', {'arr4': arr4})
def MakeReport(CompleteListOfFiles,directory2Data, nation,cruice_id,platform): 
    import random
    import datetime, time
    from tools import tools
    
    outloc = '/'
    
    #Some user input data
#    integration_dist = 1
    pel_ch_thickness = 10
    NumChannels = 35
    PhantomEchoDist = 275
    PhantomEchoWidth = 50
    TH = -70
    ChannelSize = NumChannels*pel_ch_thickness
    
    Depth_bottom = 0
    
    #Something for bookkeeping
    NASC_out = []
    
    #Convert stuff to integer, 
    #This is to speed up the program
    Com = FastGetComplete(directory2Data,CompleteListOfFiles)


    #Something about the channels
    #This should be made more general
    Depth = np.linspace(0,ChannelSize,NumChannels)+ChannelSize/NumChannels/2
    Delta_Z = ChannelSize/NumChannels/2
    NASC = np.nan*np.linspace(0,ChannelSize,NumChannels).T[:,np.newaxis]
    
    
    Work_list = os.listdir(directory2Data.dir_verticalwork)
    Work_I = np.arange(len(Work_list))
    random.shuffle(Work_I)
    
    
    #Loop through each work file
    counter = 1
    for work_i in Work_I: 
        
        tools.printProgressBar(counter, len(Work_I), prefix = 'Copy files', suffix = 'Files left: '+
                         str(len(Work_I)-counter) , decimals = 1, length = 50)
                
#        print('files left: ' +str(len(Work_I)-counter),end='\r')
        counter = counter+1
        
        work = Work_list[work_i]
        if work != 'Com.mat':

            try: 
                if not os.path.isfile(directory2Data.dir_resultvertical+'/Report_'+work): 
                    
                
#    directory2Data.dir_work+'/'+'Vertical_T'+str(transectID)+'.mat'
                
                    #Load mask of the filtered data
                    workfile =  scp.loadmat(directory2Data.dir_work+'/'+work)
                    
                    
                    
                    try: 
                        pings = np.unique(workfile['ping_mask'])
                    except: 
                        pings = []
                    
        
                        
                        
                    
                    if pings == []: 
                        print('check: '+ directory2Data.dir_work+'/'+work)
                    else: 
                        #Loop through each unique ping
                        for ping_idx in range(len(pings[1:])): 
                            
                            tools.printProgressBar(ping_idx,len(pings),prefix = 'LUF20 of '+work+':', suffix = 'Completed', length = 50)
                
                
                            #Idx stuff
                            idx = np.where(workfile['ping_mask']==pings[ping_idx+1])
                            idx_file = np.where(Com == (pings[ping_idx+1]))
                            
                            
                            File = CompleteListOfFiles['FileList'][np.where(CompleteListOfFiles['ping_time']==pings[ping_idx+1])][0]
                            IDX = CompleteListOfFiles['IDX'][np.where(CompleteListOfFiles['ping_time']==pings[ping_idx+1])][0]
            
            
                                    
                            #Get the file name of the idx file
                            filname = directory2Data.dir_rawdata+'/'+CompleteListOfFiles['FileList'][idx_file[0]]
                            filname = directory2Data.dir_rawdata+'/'+File
                            
                            #Load file
                            fileID = Dataset(filname,'r',format = 'NETCDF4')
                
                            
                            #Get the group with the vertical fan data
                            if fileID.groups['Sonar'].groups['Beam_group1'].beam_mode == 'Vertical': 
                                beamgrp = 'Beam_group1'
                            elif fileID.groups['Sonar'].groups['Beam_group2'].beam_mode == 'Vertical':
                                beamgrp = 'Beam_group2'
            
                                
            
            
                            #Get variables from .nc file
                            variables = tools.GetVariablesFromNC(fileID,beamgrp,False,IDX)
                            
                            
                            #Close file
                            fileID.close()
                            
                            
            #                        if print_sa == True: 
            #                            sa_by_acocat = tools.addFrequencyLevelInfo(distance,variables.frequency,0,NumChannels,0)
            #                            print_sa = False
                                
                                
                                
                            
                            if Depth_bottom == 0: 
                                
                                    
                                delta_lat = 2000/111111
                                try: 
                                    delta_lon = 2000/(111111*np.cos(np.deg2rad(float(workfile['lat_start'][0]))))
                                except: 
                                    delta_lon = 2000/111111
                            
                            
                                Depth_bottom = -1000
                                
                            
                                try: 
                                    Depth_bottom = GetBottomDepth(float(workfile['lat_start'][0]),float(workfile['lat_stop'][0]),
                                                              float(workfile['lon_start'][0]),float(workfile['lon_stop'][0]),delta_lat,delta_lon)
                                except: 
                                    Depth_bottom = -1000
                            
                            #Get file name
            #                            try: 
                                
                                
                            
                            sv, RangeOut= tools.ApplyTVG(10*np.log10(variables.BeamAmplitudeData),
                                                    variables.soundvelocity[0],
                                                    variables.sampleinterval,
                                                    variables.transmitpower,
                                                    variables.absorptioncoefficient[0],
                                                    variables.frequency,
                                                    variables.pulslength,
                                                    variables.gaintx,
                                                    variables.equivalentbeamangle,
                                                    variables.sacorrection,
                                                    variables.dirx)
                
                            
            #                        sv[np.where(RangeOut<=RemoveToCloseValues)] = np.nan
            #                   
                            sv[np.where(RangeOut<=30)] = np.nan
                            sv[:,np.where(variables.dirx>=60)] = np.nan
                            sv[np.where(sv<TH)] = np.nan
                               
                               
                            #Get the depth of each pixel
                            Y= np.sin(np.deg2rad(variables.dirx))*np.sin(np.deg2rad(variables.diry))
                            Y = (abs(Y[:,np.newaxis]*RangeOut[:,np.newaxis].T)).T
                            sv[np.where(Y>=abs(Depth_bottom+150))] = np.nan
                               
                               
                            X= np.cos(np.deg2rad(variables.dirx))*np.sin(np.deg2rad(variables.diry))
                            X = (X[:,np.newaxis]*RangeOut[:,np.newaxis].T).T
                            sv[np.where(abs(X)>(PhantomEchoDist+PhantomEchoWidth/2))] = np.nan
                            sv[np.where(abs(X)<(PhantomEchoDist-PhantomEchoWidth/2))] = np.nan
                            
                            
                            try: 
                                sv2 = sv*np.nan
                                sv2[workfile['r_mask'][idx],workfile['b_mask'][idx]]=sv[workfile['r_mask'][idx],workfile['b_mask'][idx]]
                                sv2 = 10**(sv2/10)
                                sv2[np.isnan(sv2)] = 0
                                    
                                    
                                
                                #Remove surface beam
                #                        sv2[:,np.where(variables.dirx < 3)] = np.nan
                #                    sv[:,np.where(variables.dirx < 3)] = np.nan
                    
                #                    
                                #Select only at a specific distance from vessel
                #                    sv[np.where(abs(X)>300)] = np.nan
                #                    sv[np.where(abs(X)<250)] = np.nan
                
                #
                #    
                #    
                #    
                                #Integrate in depth channels
                                for i in range(len(Depth)): 
                                    temp = sv2[np.where(abs(abs(Y)-i*ChannelSize/NumChannels +Delta_Z)<=Delta_Z)]
                                    temp2 = np.count_nonzero(~np.isnan(temp))
                                    NASC[i,-1]= np.nansum(temp)/temp2
                
                #                    
                ##                        except IndexError: 
                ##                            print('bad ping')
                #    
                #            
                                A =  np.nansum(NASC,axis=1)[:,np.newaxis]/len(NASC[0,:])
                                A = A*(1852**2)*pel_ch_thickness*4*np.pi
                        
                                
                                
                                Liste = {}
                                Liste['Report_time'] = str(datetime.datetime.fromtimestamp((time.time())).strftime('%Y-%m-%d %H:%M:%S'))
                                
                                #General overview
                                Liste['Instrument'] = {}
                                Liste['Calibration'] = {}
                                Liste['DataAcquisition'] = {}
                                Liste['DataProcessingMethod'] = {}
                                Liste['Cruice'] = {}
                        
                        
                        
                                #Instrument section
                                
                                #Må gå inn i xml fil
                                
                                Liste['Instrument']['Frequency'] = variables.frequency
                                Liste['Instrument']['TransducerLocation'] = 'AA'
                                Liste['Instrument']['TransducerManufacturer'] = 'Simrad'
                                Liste['Instrument']['TransducerModel'] = 'SU90'
                                Liste['Instrument']['TransducerSerial'] = 'Unknown'
                                Liste['Instrument']['TransducerBeamType'] = 'M2'
                                Liste['Instrument']['TransducerDepth'] = ''
                                Liste['Instrument']['TransducerOrientation'] = ''
                                Liste['Instrument']['TransducerPSI'] = ''
                                Liste['Instrument']['TransducerBeamAngleMajor'] = ''
                                Liste['Instrument']['TransducerBeamAngleMinor'] = ''
                                Liste['Instrument']['TransceiverManufacturer'] = ''
                                Liste['Instrument']['TransceiverModel'] = ''
                                Liste['Instrument']['TransceiverSerial'] = ''
                                Liste['Instrument']['TransducerOrientation'] = ''
                        
                            
                                #Calibration Section
                                Liste['Calibration']['Date'] = ''
                                Liste['Calibration']['AcquisitionMethod'] = ''
                                Liste['Calibration']['ProcessingMethod'] = ''
                                Liste['Calibration']['AccuracyEstimate'] = ''
                                
                        
                                #Calibration Section
                                Liste['DataAcquisition']['SoftwareName'] = ''
                                Liste['DataAcquisition']['SoftwareVersion'] = ''
                                Liste['DataAcquisition']['StoredDataFormat'] = ''
                                Liste['DataAcquisition']['StoredDataFormatVersion'] = ''
                                Liste['DataAcquisition']['ConvertedDataFormat'] = ''
                                Liste['DataAcquisition']['ConvertedDataFormatVersion'] = ''
                                Liste['DataAcquisition']['PingDutyCycle'] = ''
                        
                        
                                #Data Processing
                                Liste['DataProcessingMethod']['SoftwareName'] = 'pysonar'
                                Liste['DataProcessingMethod']['SoftwareVersion'] = '0.1'
                                Liste['DataProcessingMethod']['TriwaveCorrection '] = 'NA'
                                Liste['DataProcessingMethod']['ChannelID'] = ''
                                Liste['DataProcessingMethod']['Bandwidth'] = ''
                                Liste['DataProcessingMethod']['Frequency'] = variables.frequency
                                Liste['DataProcessingMethod']['TransceiverPower'] = variables.transmitpower
                                Liste['DataProcessingMethod']['TransmitPulseLength'] = variables.pulslength
                                Liste['DataProcessingMethod']['OnAxisGain'] = variables.gaintx
                                Liste['DataProcessingMethod']['OnAxisGainUnit'] = 'dB'
                                Liste['DataProcessingMethod']['SaCorrection'] = variables.sacorrection
                                Liste['DataProcessingMethod']['Absorption'] = variables.absorptioncoefficient[0]
                                Liste['DataProcessingMethod']['AbsorptionDescription'] = 'Nominal'
                                Liste['DataProcessingMethod']['SoundSpeed'] = variables.soundvelocity[0]
                                Liste['DataProcessingMethod']['SoundSpeedDescription'] = 'Nominal'
                                Liste['DataProcessingMethod']['TransducerPSI'] = ''
                        
                                
                        
                                #Cruice
                                Liste['Cruice']['Survey'] = ''
                                Liste['Cruice']['Country'] = 'NO'
                                Liste['Cruice']['Nation'] = nation
                                Liste['Cruice']['Platform'] = platform
                                Liste['Cruice']['StartDate'] = ''
                                Liste['Cruice']['StopDate'] = ''
                                Liste['Cruice']['Organisation'] = ''
                                Liste['Cruice']['LocalID'] = cruice_id
                        
                        
                        
                        #        try: 
                                tiime = workfile['start_time'][0]
                                stime = tiime[:4]+'-'+tiime[4:6]+'-'+tiime[6:8]+' '+tiime[9:11]+':'+tiime[11:13]+':'+tiime[13:]
                        #        except: 
                        #            stime = ''
                                try: 
                            
                                    tiime = workfile['stop_time'][0]
                                    etime = tiime[:4]+'-'+tiime[4:6]+'-'+tiime[6:8]+' '+tiime[9:11]+':'+tiime[11:13]+':'+tiime[13:]
                                except: 
                                    etime = ''
                        
                        
                                Liste['Cruice']['Log'] = {}
                    
                                Liste['Cruice']['Log']['Distance'] = workfile['log_start'][0]
                                Liste['Cruice']['Log']['TimeStart'] = stime
                                Liste['Cruice']['Log']['TimeStop'] = etime
                                Liste['Cruice']['Log']['Latitude_start'] = workfile['lat_start'][0]
                                Liste['Cruice']['Log']['Longitude_start'] = workfile['lon_start'][0]
                                Liste['Cruice']['Log']['Latitude_stop'] = workfile['lat_stop'][0]
                                Liste['Cruice']['Log']['Longitude_stop'] = workfile['lon_stop'][0]
                                Liste['Cruice']['Log']['Bottom_depth'] = Depth_bottom
                                Liste['Cruice']['Log']['Origin'] = ''
                                Liste['Cruice']['Log']['Validity'] = ''
                        
                        
                                Liste['Cruice']['Log']['Sample'] = {}
                                Liste['Cruice']['Log']['Sample']['ChannelThickness'] = pel_ch_thickness
                                Liste['Cruice']['Log']['Sample']['Acocat'] = 'Unknown'
                                Liste['Cruice']['Log']['Sample']['SvThreshold'] = str(TH)
                                Liste['Cruice']['Log']['Sample']['PingAxisInterval'] = 1  #samme som integration thickness
                                Liste['Cruice']['Log']['Sample']['PingAxisIntervalType'] = 'distance'
                                Liste['Cruice']['Log']['Sample']['PingAxisIntervalUnit'] = 'nmi'
                                Liste['Cruice']['Log']['Sample']['DataValue'] = A
                                Liste['Cruice']['Log']['Sample']['DataType'] = 'C'
                                Liste['Cruice']['Log']['Sample']['DataUnit'] = 'm2nm-2'
                                Liste['Cruice']['Log']['Sample']['PhantomEchoDistance'] = PhantomEchoDist
                                Liste['Cruice']['Log']['Sample']['PhantomEchoWidth'] = PhantomEchoWidth
                            except: 
                                print('Badping')
                    
                    try: 
                        import scipy.io as sc
                        sc.savemat(directory2Data.dir_resultvertical+'/Report_'+work,mdict=Liste)
                        
            
                        Depth_bottom = 0
                    except: 
                         dummy = 1
            except: 
                print('bad')
    #                
    #                from SendMail import send_email
#                send_email('bad stuff for Report_'+work)


        #Må generaliseres










#            
##                print(A)
#        if NASC_out == []:
#            NASC_out = A
#        else: 
#            NASC_out = np.hstack((NASC_out, A))
#        
#        
#        for ikk in range(len(A)): 
#            if Depth[ikk]>(abs(Depth_bottom)-150): 
#                A[ikk] = 0
#                NASC_out[ikk,-1] = 0
#        
#        
#            if A[ikk]>0: 
##                        sa_value = ET.SubElement(sa_by_acocat,'sa')
#                sa_value.set('ch',str(ikk+1))
#                sa_value.text = str(A[ikk][0])
#
#                
#                            
#                except: 
#                     print('bad file')                   
#                tools.indent(root)
#                tree = ET.ElementTree(root)
#                tree.write(directory2Data.dir_result+outloc+'/Vertical/'+work.replace('mat','xml'), xml_declaration=True, encoding='utf-8', method="xml")
    def evaluate(self, cfg, preds, output_dir, *args, **kwargs):
        # convert 0-based index to 1-based index
        preds = preds[:, :, 0:2] + 1.0
        
        if cfg.TEST.NO_GT_LABELS:
            return {'Null': 0.0}, 0.0

        if output_dir:
            pred_file = os.path.join(output_dir, 'pred.mat')
            savemat(pred_file, mdict={'preds': preds})

        if 'test' in cfg.DATASET.TEST_SET:
            return {'Null': 0.0}, 0.0

        SC_BIAS = 0.6
        threshold = 0.5

        gt_file = os.path.join(cfg.DATASET.ROOT,
                               'annot',
                               'gt_{}.mat'.format(cfg.DATASET.TEST_SET))
        gt_dict = loadmat(gt_file)
        dataset_joints = gt_dict['dataset_joints']
        jnt_missing = gt_dict['jnt_missing']
        pos_gt_src = gt_dict['pos_gt_src']
        headboxes_src = gt_dict['headboxes_src']

        pos_pred_src = np.transpose(preds, [1, 2, 0])

        head = np.where(dataset_joints == 'head')[1][0]
        lsho = np.where(dataset_joints == 'lsho')[1][0]
        lelb = np.where(dataset_joints == 'lelb')[1][0]
        lwri = np.where(dataset_joints == 'lwri')[1][0]
        lhip = np.where(dataset_joints == 'lhip')[1][0]
        lkne = np.where(dataset_joints == 'lkne')[1][0]
        lank = np.where(dataset_joints == 'lank')[1][0]

        rsho = np.where(dataset_joints == 'rsho')[1][0]
        relb = np.where(dataset_joints == 'relb')[1][0]
        rwri = np.where(dataset_joints == 'rwri')[1][0]
        rkne = np.where(dataset_joints == 'rkne')[1][0]
        rank = np.where(dataset_joints == 'rank')[1][0]
        rhip = np.where(dataset_joints == 'rhip')[1][0]

        jnt_visible = 1 - jnt_missing
        uv_error = pos_pred_src - pos_gt_src
        uv_err = np.linalg.norm(uv_error, axis=1)
        headsizes = headboxes_src[1, :, :] - headboxes_src[0, :, :]
        headsizes = np.linalg.norm(headsizes, axis=0)
        headsizes *= SC_BIAS
        scale = np.multiply(headsizes, np.ones((len(uv_err), 1)))
        scaled_uv_err = np.divide(uv_err, scale)
        scaled_uv_err = np.multiply(scaled_uv_err, jnt_visible)
        jnt_count = np.sum(jnt_visible, axis=1)
        less_than_threshold = np.multiply((scaled_uv_err <= threshold),
                                          jnt_visible)
        PCKh = np.divide(100.*np.sum(less_than_threshold, axis=1), jnt_count)

        # save
        rng = np.arange(0, 0.5+0.01, 0.01)
        pckAll = np.zeros((len(rng), 16))

        for r in range(len(rng)):
            threshold = rng[r]
            less_than_threshold = np.multiply(scaled_uv_err <= threshold,
                                              jnt_visible)
            pckAll[r, :] = np.divide(100.*np.sum(less_than_threshold, axis=1),
                                     jnt_count)

        PCKh = np.ma.array(PCKh, mask=False)
        PCKh.mask[6:8] = True

        jnt_count = np.ma.array(jnt_count, mask=False)
        jnt_count.mask[6:8] = True
        jnt_ratio = jnt_count / np.sum(jnt_count).astype(np.float64)

        name_value = [
            ('Head', PCKh[head]),
            ('Shoulder', 0.5 * (PCKh[lsho] + PCKh[rsho])),
            ('Elbow', 0.5 * (PCKh[lelb] + PCKh[relb])),
            ('Wrist', 0.5 * (PCKh[lwri] + PCKh[rwri])),
            ('Hip', 0.5 * (PCKh[lhip] + PCKh[rhip])),
            ('Knee', 0.5 * (PCKh[lkne] + PCKh[rkne])),
            ('Ankle', 0.5 * (PCKh[lank] + PCKh[rank])),
            ('Mean', np.sum(PCKh * jnt_ratio)),
            ('[email protected]', np.sum(pckAll[11, :] * jnt_ratio))
        ]
        name_value = OrderedDict(name_value)

        return name_value, name_value['Mean']
Example #58
0
    f = fseed.dot(L)
    g = nlfunc(f)
    y = g + np.random.randn(npoints) * noise

    # Make the dictionary to save into a mat structure
    datadic = {
        'noise':        noise,
        'func':         fctnList[fwdmdlInd],
        'dfunc':        dfctnList[fwdmdlInd],
        'x':            x,
        'f':            f,
        'g':            g,
        'y':            y,
        'train':        [],
        'test':         []
        }

    # Save the data to disk
    if not os.path.exists(savedir):
        os.mkdir(savedir)

    for k, (sind, rind) in enumerate(gputils.k_fold_CV_ind(npoints, k=folds)):

        datadic['train'].append(rind)
        datadic['test'].append(sind)

    datadic['train'] = np.array(datadic['train'])
    datadic['test'] = np.array(datadic['test'])

    sio.savemat(os.path.join(savedir, savenameList[fwdmdlInd]), datadic)
    #_intrinsics.model = cameraInfo.distortion_model
    _intrinsics.model  = rs.distortion.inverse_brown_conrady
    _intrinsics.coeffs = [0, 0, 0, 0, 0] #[i for i in cameraInfo.D]
    result = rs.rs2_deproject_pixel_to_point(_intrinsics, [x, y], depth)
    #result[0]: right, result[1]: down, result[2]: forward
    return result[2], -result[0], -result[1]

path = '../data/realsense/for_test/'
folder = 'points'
img = Image.open(path + folder + '/409_depth.png')
data = np.asarray(img)
#output = np.zeros([720, 1280, 3])
output = []

clipping_distance = 1000

for i in range(720):
    for j in range(1280):
        if  (data[i, j] < clipping_distance) | (data[i, j] <= 0):
          p1, p2, p3 = convert_depth_to_phys_coord_using_realsense(i, j, data[i, j])
          #output[i, j] = [p1, p2, p3]    
          output.append([p1, p2, p3])

output = np.array(output)

print(output.shape)

np.save('coordination', output)

scio.savemat('coordination.mat', {'output': output})
Example #60
0
def MultipleModels(modelsDict, data, nEpochs, batchSize, seqLen, stateFeat,
                   rnnStateFeat, **kwargs):
    """
    Trains multiple models simultaneously

    Inputs:

        modelsDict (dict): Dictionary containing the models to be trained (see
            Modules.model.Model class)
        data (class): Data to carry out the training (see Utils.dataTools)
        nEpochs (int): number of epochs (passes over the dataset)
        batchSize (int): size of each minibatch
        seqLen (int): length of input sequence for recurrent architectures
        stateFeat (int): number of state features of GCRNN architectures
        rnnStateFeat (int): number of state features of RNN architectures

        Keyword arguments:

        validationInterval (int): interval of training (number of training
            steps) without running a validation stage.

        Optional (keyword) arguments:

        learningRateDecayRate (float): float that multiplies the latest learning
            rate used.
        learningRateDecayPeriod (int): how many training steps before 
            multiplying the learning rate decay rate by the actual learning
            rate.
        > Obs.: Both of these have to be defined for the learningRateDecay
              scheduler to be activated.
        logger (Visualizer): save tensorboard logs.
        saveDir (string): path to the directory where to save relevant training
            variables.
        printInterval (int): how many training steps after which to print
            partial results (0 means do not print)
        graphNo (int): keep track of what graph realization this is
        realitizationNo (int): keep track of what data realization this is
        >> Alternatively, these last two keyword arguments can be used to keep
            track of different trainings of the same model

    Observations:
    - Model parameters for best and last are saved.

    """

    ####################################
    # ARGUMENTS (Store chosen options) #
    ####################################

    # Training Options:
    if 'logger' in kwargs.keys():
        doLogging = True
        logger = kwargs['logger']
    else:
        doLogging = False

    if 'saveDir' in kwargs.keys():
        doSaveVars = True
        saveDir = os.path.join(kwargs['saveDir'], 'trainVars')
    else:
        doSaveVars = False

    if 'printInterval' in kwargs.keys():
        doPrint = True
        printInterval = kwargs['printInterval']
    else:
        doPrint = False

    if 'learningRateDecayRate' in kwargs.keys() and \
        'learningRateDecayPeriod' in kwargs.keys():
        doLearningRateDecay = True
        learningRateDecayRate = kwargs['learningRateDecayRate']
        learningRateDecayPeriod = kwargs['learningRateDecayPeriod']
    else:
        doLearningRateDecay = False

    validationInterval = kwargs['validationInterval']

    if 'graphNo' in kwargs.keys():
        graphNo = kwargs['graphNo']

    if 'realizationNo' in kwargs.keys():
        realizationNo = kwargs['realizationNo']

    # No training case:
    if nEpochs == 0:
        doSaveVars = False
        doLogging = False
        # If there's no training happening, there's nothing to report about
        # training losses and stuff.

    ###########################################
    # DATA INPUT (pick up on data parameters) #
    ###########################################

    nTrain = data.nTrain  # size of the training set

    # Number of batches: If the desired number of batches does not split the
    # dataset evenly, we reduce the size of the last batch (the number of
    # samples in the last batch).
    # The variable batchSize is a list of length nBatches (number of batches),
    # where each element of the list is a number indicating the size of the
    # corresponding batch.
    if nTrain < batchSize:
        nBatches = 1
        batchSize = [nTrain]
    elif nTrain % batchSize != 0:
        nBatches = np.ceil(nTrain / batchSize).astype(np.int64)
        batchSize = [batchSize] * nBatches
        # If the sum of all batches so far is not the total number of graphs,
        # start taking away samples from the last batch (remember that we used
        # ceiling, so we are overshooting with the estimated number of batches)
        while sum(batchSize) != nTrain:
            batchSize[-1] -= 1
    # If they fit evenly, then just do so.
    else:
        nBatches = np.int(nTrain / batchSize)
        batchSize = [batchSize] * nBatches
    # batchIndex is used to determine the first and last element of each batch.
    # If batchSize is, for example [20,20,20] meaning that there are three
    # batches of size 20 each, then cumsum will give [20,40,60] which determines
    # the last index of each batch: up to 20, from 20 to 40, and from 40 to 60.
    # We add the 0 at the beginning so that batchIndex[b]:batchIndex[b+1] gives
    # the right samples for batch b.
    batchIndex = np.cumsum(batchSize).tolist()
    batchIndex = [0] + batchIndex

    ##############
    # TRAINING   #
    ##############

    # Learning rate scheduler:
    if doLearningRateDecay:
        learningRateScheduler = {}
        for key in modelsDict.keys():
            learningRateScheduler[key] = torch.optim.lr_scheduler.StepLR(
                modelsDict[key].optim, learningRateDecayPeriod,
                learningRateDecayRate)

    # Initialize counters (since we give the possibility of early stopping, we
    # had to drop the 'for' and use a 'while' instead):
    epoch = 0  # epoch counter

    #\\\ Save variables to be used for each model
    # Logging variables
    if doLogging:
        lossTrainTB = {}
        evalTrainTB = {}
        lossValidTB = {}
        evalValidTB = {}

    # Training variables of interest
    if doSaveVars:
        lossTrain = {}
        evalTrain = {}
        lossValid = {}
        evalValid = {}
        timeTrain = {}
        timeValid = {}
        for key in modelsDict.keys():
            lossTrain[key] = []
            evalTrain[key] = []
            lossValid[key] = []
            evalValid[key] = []
            timeTrain[key] = []
            timeValid[key] = []

    # Model tracking
    bestScore = {}
    bestEpoch = {}
    bestBatch = {}

    for epoch in range(nEpochs):

        # Randomize dataset for each epoch
        randomPermutation = np.random.permutation(nTrain)
        # Convert a numpy.array of numpy.int into a list of actual int.
        idxEpoch = [int(i) for i in randomPermutation]

        # Learning decay
        if doLearningRateDecay:
            for key in learningRateScheduler.keys():
                learningRateScheduler[key].step()

            if doPrint:
                # All the optimization have the same learning rate, so just
                # print one of them
                # TODO: Actually, they might be different, so I will need to
                # print all of them.
                print("Epoch %d, learning rate = %.8f" %
                      (epoch + 1,
                       learningRateScheduler[key].optim.param_groups[0]['lr']))

        # Initialize counter
        batch = 0  # batch counter
        for batch in range(nBatches):

            # Extract the adequate batch
            thisBatchIndices = idxEpoch[batchIndex[batch]:batchIndex[batch +
                                                                     1]]
            # Get the samples
            xTrain, yTrain = data.getSamples('train', thisBatchIndices)
            xTrain = xTrain.view(batchSize[batch], seqLen, -1)
            yTrain = yTrain.view(batchSize[batch], seqLen, -1)

            if doPrint and printInterval > 0:
                if (epoch * nBatches + batch) % printInterval == 0:
                    trainPreamble = ''
                    if 'graphNo' in kwargs.keys():
                        trainPreamble += 'G:%02d ' % graphNo
                    if 'realizationNo' in kwargs.keys():
                        trainPreamble += 'R:%02d ' % realizationNo
                    print("[%sTRAINING - E: %2d, B: %3d]" %
                          (trainPreamble, epoch + 1, batch + 1))

            for key in modelsDict.keys():

                # Set the ordering
                xTrainOrdered = xTrain[:, :,
                                       modelsDict[key].order]  # B x F x N

                # Check if it is an RNN to add sequence dimension
                if 'RNN' in modelsDict[key].name or 'rnn' in modelsDict[key].name or \
                                                    'Rnn' in modelsDict[key].name:
                    xTrainOrdered = xTrainOrdered.unsqueeze(
                        2)  # To account for just F=1 feature
                    yTrainModel = yTrain.unsqueeze(
                        2)  # To account for just F=1 feature

                else:
                    xTrainOrdered = xTrainOrdered.view(
                        batchSize[batch] * seqLen, 1, -1)
                    yTrainModel = yTrain.view(batchSize[batch] * seqLen, 1, -1)

                # Start measuring time
                startTime = datetime.datetime.now()

                # Reset gradients
                modelsDict[key].archit.zero_grad()


                if 'GCRNN' in modelsDict[key].name or 'gcrnn' in modelsDict[key].name or \
                                                    'GCRnn' in modelsDict[key].name:
                    # Obtain the output of the GCRNN
                    h0 = torch.zeros(batchSize[batch], stateFeat,
                                     xTrainOrdered.shape[3])
                    yHatTrain = modelsDict[key].archit(xTrainOrdered, h0)
                elif 'RNN' in modelsDict[key].name or 'rnn' in modelsDict[key].name or \
                                            'Rnn' in modelsDict[key].name:
                    # Obtain the output of the GCRNN
                    h0 = torch.zeros(batchSize[batch], rnnStateFeat)
                    c0 = h0
                    yHatTrain = modelsDict[key].archit(xTrainOrdered, h0, c0)
                else:
                    # Obtain the output of the GNN
                    yHatTrain = modelsDict[key].archit(xTrainOrdered)
                    yHatTrain = yHatTrain.unsqueeze(1)

                # Compute loss
                lossValueTrain = modelsDict[key].loss(yHatTrain, yTrainModel)

                # Compute gradients
                lossValueTrain.backward()

                # Optimize
                modelsDict[key].optim.step()

                # Finish measuring time
                endTime = datetime.datetime.now()

                timeElapsed = abs(endTime - startTime).total_seconds()

                # Compute the accuracy
                #   Note: Using yHatTrain.data creates a new tensor with the
                #   same value, but detaches it from the gradient, so that no
                #   gradient operation is taken into account here.
                #   (Alternatively, we could use a with torch.no_grad():)
                accTrain = data.evaluate(yHatTrain.data, yTrainModel)

                # Logging values
                if doLogging:
                    lossTrainTB[key] = lossValueTrain.item()
                    evalTrainTB[key] = accTrain.item()
                # Save values
                if doSaveVars:
                    lossTrain[key] += [lossValueTrain.item()]
                    evalTrain[key] += [accTrain.item()]
                    timeTrain[key] += [timeElapsed]

                # Print:
                if doPrint and printInterval > 0:
                    if (epoch * nBatches + batch) % printInterval == 0:
                        print(
                            "\t(%s) %6.4f / %6.4f - %6.4fs" %
                            (key, accTrain, lossValueTrain.item()),
                            timeElapsed)

            #\\\\\\\
            #\\\ TB LOGGING (for each batch)
            #\\\\\\\

            if doLogging:
                modeLoss = 'Loss'
                modeEval = 'Accuracy'
                if 'graphNo' in kwargs.keys():
                    modeLoss += 'G%02d' % graphNo
                    modeEval += 'G%02d' % graphNo
                if 'realizationNo' in kwargs.keys():
                    modeLoss += 'R%02d' % realizationNo
                    modeEval += 'R%02d' % realizationNo
                logger.scalar_summary(mode='Training' + modeLoss,
                                      epoch=epoch * nBatches + batch,
                                      **lossTrainTB)
                logger.scalar_summary(mode='Training' + modeEval,
                                      epoch=epoch * nBatches + batch,
                                      **evalTrainTB)

            #\\\\\\\
            #\\\ VALIDATION
            #\\\\\\\

            if (epoch * nBatches + batch) % validationInterval == 0:
                # Validation:
                nValid = data.nValid
                xValid, yValid = data.getSamples('valid')
                xValid = xValid.view(nValid, seqLen, -1)
                yValid = yValid.view(nValid, seqLen, -1)

                if doPrint:
                    validPreamble = ''
                    if 'graphNo' in kwargs.keys():
                        validPreamble += 'G:%02d ' % graphNo
                    if 'realizationNo' in kwargs.keys():
                        validPreamble += 'R:%02d ' % realizationNo
                    print("[%sVALIDATION - E: %2d, B: %3d]" %
                          (validPreamble, epoch + 1, batch + 1))

                for key in modelsDict.keys():
                    # Set the ordering
                    xValidOrdered = xValid[:, :,
                                           modelsDict[key].order]  # BxFxN
                    if 'RNN' in modelsDict[key].name or 'rnn' in modelsDict[key].name or \
                                                    'Rnn' in modelsDict[key].name:
                        xValidOrdered = xValidOrdered.unsqueeze(2)
                        yValidModel = yValid.unsqueeze(2)
                    else:
                        xValidOrdered = xValidOrdered.view(
                            nValid * seqLen, 1, -1)
                        yValidModel = yValid.view(nValid * seqLen, 1, -1)

                    # Start measuring time
                    startTime = datetime.datetime.now()

                    # Under torch.no_grad() so that the computations carried out
                    # to obtain the validation accuracy are not taken into
                    # account to update the learnable parameters.
                    with torch.no_grad():
                        # Obtain the output of the GNN
                        if 'GCRNN' in modelsDict[key].name or 'gcrnn' in modelsDict[key].name or \
                                                    'GCRnn' in modelsDict[key].name:
                            h0v = torch.zeros(nValid, stateFeat,
                                              xValidOrdered.shape[3])
                            yHatValid = modelsDict[key].archit(
                                xValidOrdered, h0v)
                        elif 'RNN' in modelsDict[key].name or 'rnn' in modelsDict[key].name or \
                                                    'Rnn' in modelsDict[key].name:
                            # Obtain the output of the GCRNN
                            h0v = torch.zeros(nValid, rnnStateFeat)
                            c0v = h0
                            yHatValid = modelsDict[key].archit(
                                xValidOrdered, h0v, c0v)
                        else:
                            yHatValid = modelsDict[key].archit(xValidOrdered)
                            yHatValid = yHatValid.unsqueeze(1)

                    # Compute loss
                        lossValueValid = modelsDict[key]\
                                       .loss(yHatValid,yValidModel)

                        # Finish measuring time
                        endTime = datetime.datetime.now()

                        timeElapsed = abs(endTime - startTime).total_seconds()

                        # Compute accuracy:
                        accValid = data.evaluate(yHatValid, yValidModel)

                        # Logging values
                        if doLogging:
                            lossValidTB[key] = lossValueValid.item()
                            evalValidTB[key] = accValid.item()
                        # Save values
                        if doSaveVars:
                            lossValid[key] += [lossValueValid.item()]
                            evalValid[key] += [accValid.item()]
                            timeValid[key] += [timeElapsed]

                        # Print:
                        if doPrint:
                            print("\t(%s) %6.4f / %6.4f - %6.4fs" %
                                  (key, accValid, lossValueValid.item(),
                                   timeElapsed))

                    # No previous best option, so let's record the first trial
                    # as the best option
                    if epoch == 0 and batch == 0:
                        bestScore[key] = accValid
                        bestEpoch[key], bestBatch[key] = epoch, batch
                        # Save this model as the best (so far)
                        modelsDict[key].save(label='Best')
                        # Store the keys of the best models when they happen
                        keyBest = []
                    else:
                        thisValidScore = accValid
                        if thisValidScore < bestScore[key]:
                            bestScore[key] = thisValidScore
                            bestEpoch[key], bestBatch[key] = epoch, batch
                            if doPrint:
                                keyBest += [key]
                            modelsDict[key].save(label='Best')

                if doPrint:
                    if len(keyBest) > 0:
                        for key in keyBest:
                            print("\t=> New best achieved for %s: %.4f" % \
                                              (key, bestScore[key]))
                        keyBest = []

                if doLogging:
                    logger.scalar_summary(mode='Validation' + modeLoss,
                                          epoch=epoch * nBatches + batch,
                                          **lossValidTB)
                    logger.scalar_summary(mode='Validation' + modeEval,
                                          epoch=epoch * nBatches + batch,
                                          **evalValidTB)

            #\\\\\\\
            #\\\ END OF BATCH:
            #\\\\\\\

            #\\\ Increase batch count:
            batch += 1

        #\\\\\\\
        #\\\ END OF EPOCH:
        #\\\\\\\

        #\\\ Save models:
        for key in modelsDict.keys():
            modelsDict[key].save(label='Last')

        #\\\ Increase epoch count:
        epoch += 1

    #################
    # TRAINING OVER #
    #################

    if doSaveVars:
        # We convert the lists into np.arrays to be handled by both Matlab(R)
        # and matplotlib
        for key in modelsDict.keys():
            lossTrain[key] = np.array(lossTrain[key])
            evalTrain[key] = np.array(evalTrain[key])
            timeTrain[key] = np.array(timeTrain[key])
            lossValid[key] = np.array(lossValid[key])
            evalValid[key] = np.array(evalValid[key])
            timeValid[key] = np.array(timeValid[key])
        # And we would like to save all the relevant information from training
        if not os.path.exists(saveDir):
            os.makedirs(saveDir)
        # Dictionaries of variables to save
        varsMatlab = {}
        varsPickle = {}
        # And let's start with pickle
        varsPickle['nEpochs'] = nEpochs
        varsPickle['nBatches'] = nBatches
        varsPickle['validationInterval'] = nBatches
        varsPickle['batchSize'] = np.array(batchSize)
        varsPickle['batchIndex'] = np.array(batchIndex)
        varsPickle['lossTrain'] = lossTrain
        varsPickle['evalTrain'] = evalTrain
        varsPickle['timeTrain'] = timeTrain
        varsPickle['lossValid'] = lossValid
        varsPickle['evalValid'] = evalValid
        varsPickle['timeValid'] = timeValid
        # Create file for pickling
        varsFilename = 'trainVars'
        # And add the information if this is a specific realization run
        if 'graphNo' in kwargs.keys():
            varsFilename += 'G%02d' % graphNo
            varsPickle['graphNo'] = graphNo
            varsMatlab['graphNo'] = graphNo
        if 'realizationNo' in kwargs.keys():
            varsFilename += 'R%02d' % realizationNo
            varsPickle['realizationNo'] = realizationNo
            varsMatlab['realizationNo'] = realizationNo
        # Create the file
        pathToFile = os.path.join(saveDir, varsFilename + '.pkl')
        # Open and save it
        with open(pathToFile, 'wb') as trainVarsFile:
            pickle.dump(varsPickle, trainVarsFile)
        # And because of the SP background, why not save it in matlab too?
        pathToMat = os.path.join(saveDir, varsFilename + '.mat')
        varsMatlab['nEpochs'] = nEpochs
        varsMatlab['nBatches'] = nBatches
        varsMatlab['validationInterval'] = nBatches
        varsMatlab['batchSize'] = np.array(batchSize)
        varsMatlab['batchIndex'] = np.array(batchIndex)
        for key in modelsDict.keys():
            varsMatlab['lossTrain' + key] = lossTrain[key]
            varsMatlab['evalTrain' + key] = evalTrain[key]
            varsMatlab['timeTrain' + key] = timeTrain[key]
            varsMatlab['lossValid' + key] = lossValid[key]
            varsMatlab['evalValid' + key] = evalValid[key]
            varsMatlab['timeValid' + key] = timeValid[key]
        savemat(pathToMat, varsMatlab)

    # Now, if we didn't do any training (i.e. nEpochs = 0), then the last is
    # also the best.
    if nEpochs == 0:
        for key in modelsDict.keys():
            modelsDict[key].save(label='Best')
            modelsDict[key].save(label='Last')
        if doPrint:
            print("WARNING: No training. Best and Last models are the same.")

    # After training is done, reload best model before proceeding to evaluation:
    for key in modelsDict.keys():
        modelsDict[key].load(label='Best')

    #\\\ Print out best:
    if doPrint and nEpochs > 0:
        for key in modelsDict.keys():
            print(
                "=> Best validation achieved for %s (E: %2d, B: %2d): %.4f" %
                (key, bestEpoch[key] + 1, bestBatch[key] + 1, bestScore[key]))