Exemplo n.º 1
0
    def save(self, outfile):
        """To save the content of the domain in a file

        Parameters
        ----------

        outfile: string
            output filename
        """
        sp.savez(outfile,
        __shapes = self.__shapes,
        __outline_color_shapes = self.__outline_color_shapes,
        __fill_color_shapes = self.__fill_color_shapes,
        __image_filename = self.__image_filename,
        name = self.name,
        __background = self.__background,
        pixel_size = self.pixel_size,
        xmin = self.xmin,
        ymin = self.ymin,
        xmax = self.xmax,
        ymax = self.ymax,
        width=self.width,
        height=self.height,
        X = self.X,
        Y = self.Y,
        wall_colors = self.wall_colors,
        wall_mask = self.wall_mask,
        wall_id = self.wall_id,
        wall_distance = self.wall_distance,
        wall_grad_X = self.wall_grad_X,
        wall_grad_Y = self.wall_grad_Y,
        destinations = self.destinations,
        image = self.image)
Exemplo n.º 2
0
    def write(filename, snpdata):
        """Writes a :class:`SnpData` to SnpNpz format and returns the :class:`.SnpNpz`

        :param filename: the name of the file to create
        :type filename: string
        :param snpdata: The in-memory data that should be written to disk.
        :type snpdata: :class:`SnpData`
        :rtype: :class:`.SnpNpz`

        >>> from pysnptools.snpreader import SnpNpz, Bed
        >>> import pysnptools.util as pstutil
        >>> from pysnptools.util import example_file # Download and return local file name
        >>> bed_file = example_file("pysnptools/examples/toydata.5chrom.*","*.bed")
        >>> snpdata = Bed(bed_file,count_A1=False)[:,:10].read()     # Read first 10 snps from Bed format
        >>> pstutil.create_directory_if_necessary("tempdir/toydata10.snp.npz")
        >>> SnpNpz.write("tempdir/toydata10.snp.npz",snpdata)          # Write data in SnpNpz format
        SnpNpz('tempdir/toydata10.snp.npz')
        """
        row_ascii = np.array(
            snpdata.row,
            dtype='S')  #!!! would be nice to avoid this copy when not needed.
        col_ascii = np.array(
            snpdata.col,
            dtype='S')  #!!! would be nice to avoid this copy when not needed.
        np.savez(filename,
                 row=row_ascii,
                 col=col_ascii,
                 row_property=snpdata.row_property,
                 col_property=snpdata.col_property,
                 val=snpdata.val)
        logging.debug("Done writing " + filename)
        return SnpNpz(filename)
Exemplo n.º 3
0
def save_binary_errors_dual_odor(errors_nonzero, errors_nonzero_2, 
									errors_zero, data_flag):
	"""
	Save decoding error from array of CS objects as numpy object, 
	above or below certain threshold for nonzero and zero components.

	Args:
		errors_nonzero_components: Error array to be saved, for 
			nonzero components of sparse_idxs not in idxs_2
		errors_nonzero_components: Error array to be saved, for 
			nonzero components of idxs_2
		errors_zero_components: Error array to be saved, for 
			zero components
		data_flag: Data identifier for saving and loading.
	"""

	out_dir = '%s/analysis/%s' % (DATA_DIR, data_flag)
	if not os.path.exists(out_dir):
		os.makedirs(out_dir)

	filename = '%s/binary_errors.npz' % out_dir
	sp.savez(filename, errors_nonzero=errors_nonzero, 
				errors_nonzero_2=errors_nonzero_2, 
				errors_zero=errors_zero)
	print('\nSignal errors file saved to %s' % filename)
Exemplo n.º 4
0
def test_once(filename):
  """Eval CIFAR-10 for a number of steps."""
  with tf.Graph().as_default() as g:
    # Get loss.
    # Create placeholder.
    
    images = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, IMAGE_SIZE, IMAGE_SIZE, 3))

    loss_label, loss_domain = cifar10.inference(images)
    

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(cifar10.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()

    saver = tf.train.Saver(variables_to_restore)
   
    _, _, _, _, _, _, \
            x, y, domain= input.input()

    # Supplement the number of examples to multiplier of 25.
    num_of_examples = np.shape(x)[0]

    remainder = FLAGS.batch_size - int(math.ceil(num_of_examples/FLAGS.batch_size))
    index = range(num_of_examples) + [0] * remainder

    with tf.Session() as sess:
       #need to modify for test
       saver.restore(sess, filename) 
       global_step = int(filename.split('-')[1])


       # Allocate results in a list.
       losses_label = []
       losses_domain = []

       # Start the queue runners.
       step = 0
       while step + FLAGS.batch_size <= len(index):
           
           label_loss_value, domain_loss_value = sess.run([loss_label, loss_domain], feed_dict = {images:x[index[step:step+FLAGS.batch_size], :]})

           losses_label.append(label_loss_value)
           losses_domain.append(domain_loss_value)
           step = step + FLAGS.batch_size


       # Convert list of lists to numpy array.
       losses_label = np.asarray(losses_label)
       losses_domain = np.asarray(losses_domain)

       losses_label = losses_label.reshape((-1, 21))
       losses_domain = losses_domain.reshape((-1, 2))

       losses_label = losses_label[:num_of_examples, :]
       losses_domain = losses_domain[:num_of_examples, :]

  sp.savez('test.npz', losses_label = losses_label, losses_domain = losses_domain, y = y, domain = domain)

  return losses_label, losses_domain, y, domain
Exemplo n.º 5
0
    def frames2batch(k=12, batch_size=1024, is_calib=False):
        pos = util.get_files(rootdir='F:\\train_data\\pos\\')
        neg = util.get_files(rootdir='F:\\train_data\\neg\\')
        pos = shuffle(pos)
        neg = shuffle(neg)
        total = pos + neg
        total = shuffle(total)
        batch = []
        c = 0
        bpath = 'F:\\train_data\\batch\\'
        for item_path in total:

            frame = fr.get_frame(item_path)
            frame_r = fr.resize_frame(frame, (k, k))
            if frame_r == None:
                continue
            vec = fr.frame_to_vect(frame_r)
            label = 1 if item_path.split('\\')[-1].find('pos') > 0 else 0
            print(item_path, label)
            batch.append((vec, label))
            if len(batch) > 0 and len(batch) % batch_size == 0:
                batch = sp.array(batch)
                sp.savez(
                    bpath + str(c) + '_' + str(k) +
                    ('_' if not is_calib else '_calib-') + 'net', batch)
                batch = []

                c += 1
        if len(batch) > 0 and len(batch) % batch_size == 0:
            batch = sp.array(batch)
            sp.savez(
                bpath + str(c) + '_' + str(k) +
                ('_' if not is_calib else '_calib') + '-net', batch)
            batch = []
            c += 1
 def frames2batch(k = 12,batch_size = 1024, is_calib = False):
     pos = util.get_files(rootdir = 'F:\\train_data\\pos\\')
     neg = util.get_files(rootdir = 'F:\\train_data\\neg\\')
     pos = shuffle(pos)
     neg = shuffle(neg)
     total = pos + neg
     total  = shuffle(total)
     batch = []
     c = 0
     bpath = 'F:\\train_data\\batch\\'
     for item_path in total:
         
         frame = fr.get_frame(item_path)
         frame_r = fr.resize_frame(frame,(k,k))
         if frame_r == None:
             continue
         vec = fr.frame_to_vect(frame_r)
         label = 1 if item_path.split('\\')[-1].find('pos') > 0 else 0
         print(item_path,label)
         batch.append((vec,label))
         if len(batch) > 0 and len(batch) % batch_size == 0:
             batch = sp.array(batch)
             sp.savez(bpath + str(c) + '_' + str(k) + ('_' if not is_calib else '_calib-')  + 'net',batch)
             batch = []
             
             c += 1
     if len(batch) > 0 and len(batch) % batch_size == 0:
         batch = sp.array(batch)
         sp.savez(bpath + str(c) + '_' + str(k) + ('_' if not is_calib else '_calib')  + '-net',batch)
         batch = []
         c += 1
Exemplo n.º 7
0
    def write_data_file(self, trials, base_path, file_format='.mat'):
        self._check_requirements(trials)

        filenames = self.construct_filenames(trials, base_path)
        fullpaths = []
        for trial in trials:
            filename = filenames[trial.trial_id]
            fullpath = '%s%s' % (filename, file_format)
            fullpaths.append(fullpath)

            data_dict = {'filtered_data':trial.ef_traces.data,
                         'sampling_freq':trial.ef_sampling_freq.data} 
            if file_format == '.mat':
                scipy.io.savemat(fullpath, data_dict)
            elif file_format == '.npz':
                scipy.savez(fullpath, **data_dict)
            elif file_format == '.csv' or file_format == '.txt':
                delimiters = {'.csv':',', '.txt':' '}
                delimiter = delimiters[file_format]
                with open(fullpath, 'wb') as outfile:
                    writer = csv.writer(outfile, delimiter=delimiter)
                    writer.writerow([trial.ef_sampling_freq.data])
                    for channel_data in trial.ef_traces.data:
                        writer.writerow(channel_data)
        return fullpaths
Exemplo n.º 8
0
    def write_data_file(self, trials, base_path, file_format='.mat'):
        self._check_requirements(trials)

        filenames = self.construct_filenames(trials, base_path)
        fullpaths = []
        for trial in trials:
            filename = filenames[trial.trial_id]
            fullpath = '%s%s' % (filename, file_format)
            fullpaths.append(fullpath)

            data_dict = {}
            for channel_num, channel_data in enumerate(trial.event_times.data):
                data_dict['events_on_channel_%d' % channel_num] = channel_data
            if file_format == '.mat':
                scipy.io.savemat(fullpath, data_dict)
            elif file_format == '.npz':
                scipy.savez(fullpath, **data_dict)
            elif file_format == '.cPickle':
                with open(fullpath, 'wb') as outfile:
                    cPickle.dump(trial.event_times.data, outfile, protocol=-1)
            elif file_format == '.csv' or file_format == '.txt':
                delimiters = {'.csv':',', '.txt':' '}
                delimiter = delimiters[file_format]
                with open(fullpath, 'wb') as outfile:
                    writer = csv.writer(outfile, delimiter=delimiter)
                    for channel_data in trial.event_times.data:
                        writer.writerow(channel_data)
        return fullpaths
Exemplo n.º 9
0
def main():

    print('Reading subjects')

    ctrl_files = read_gord_data(data_dir=CTRL_DIR, num_sub=NUM_CTRL)

    t0 = time.time()
    print('performing stats based on random pairwise distances')

    pval_fdr, pval = compare_sub2ctrl(bfp_path=BFPPATH,
                                      sub_file=SUB_DATA,
                                      ctrl_files=ctrl_files,
                                      num_pairs=2000,
                                      nperm=1000,
                                      len_time=LEN_TIME,
                                      num_proc=4,
                                      fdr_test=True)
    t1 = time.time()

    print(t1 - t0)

    sp.savez('pval_out200.npz', pval=pval, pval_fdr=pval_fdr)

    vis_grayord_sigpval(bfp_path=BFPPATH,
                        pval=pval,
                        surf_name='subdiff',
                        out_dir='/big_disk/ajoshi/coding_ground/bfp/src/stats',
                        smooth_iter=1000)

    print('Results saved')
Exemplo n.º 10
0
    def write_data_file(self, trials, base_path, file_format='.mat'):
        self._check_requirements(trials)

        filenames = self.construct_filenames(trials, base_path)
        fullpaths = []
        for trial in trials:
            filename = filenames[trial.trial_id]
            fullpath = '%s%s' % (filename, file_format)
            fullpaths.append(fullpath)

            data_dict = {'features':trial.features.data,
                         'feature_times':trial.feature_times.data}
            if file_format == '.mat':
                scipy.io.savemat(fullpath, data_dict)
            elif file_format == '.npz':
                scipy.savez(fullpath, **data_dict)
            elif file_format == '.csv' or file_format == '.txt':
                delimiters = {'.csv':',', '.txt':' '}
                delimiter = delimiters[file_format]
                with open(fullpath, 'wb') as outfile:
                    writer = csv.writer(outfile, delimiter=delimiter)
                    writer.writerow(trial.feature_times.data)
                    writer.writerow([len(trial.features.data)])
                    for feature in trial.features.data:
                        writer.writerow(feature)
        return fullpaths
Exemplo n.º 11
0
    def write(filename, distdata):
        """Writes a :class:`DistData` to DistNpz format and returns the :class:`.DistNpz`

        :param filename: the name of the file to create
        :type filename: string
        :param distdata: The in-memory data that should be written to disk.
        :type distdata: :class:`DistData`
        :rtype: :class:`.DistNpz`

        >>> from pysnptools.distreader import DistNpz, DistHdf5
        >>> import pysnptools.util as pstutil
        >>> from pysnptools.util import example_file # Download and return local file name
        >>> hdf5_file = example_file("pysnptools/examples/toydata.iidmajor.dist.hdf5")
        >>> distdata = DistHdf5(hdf5_file)[:,:10].read()     # Read first 10 snps from DistHdf5 format
        >>> pstutil.create_directory_if_necessary("tempdir/toydata10.dist.npz")
        >>> DistNpz.write("tempdir/toydata10.dist.npz",distdata)          # Write data in DistNpz format
        DistNpz('tempdir/toydata10.dist.npz')
        """
        row_ascii = np.array(
            distdata.row,
            dtype='S')  #!!! would be nice to avoid this copy when not needed.
        col_ascii = np.array(
            distdata.col,
            dtype='S')  #!!! would be nice to avoid this copy when not needed.
        np.savez(filename,
                 row=row_ascii,
                 col=col_ascii,
                 row_property=distdata.row_property,
                 col_property=distdata.col_property,
                 val=distdata.val)
        logging.debug("Done writing " + filename)
        return DistNpz(filename)
Exemplo n.º 12
0
    def write_data_file(self, trials, base_path, file_format='.mat'):
        self._check_requirements(trials)

        filenames = self.construct_filenames(trials, base_path)
        fullpaths = []
        for trial in trials:
            filename = filenames[trial.trial_id]
            fullpath = '%s%s' % (filename, file_format)
            fullpaths.append(fullpath)

            if file_format == '.mat' or file_format == '.npz':
                data_dict = {}
                cf = trial.clustered_features
                cft = trial.clustered_feature_times
                for cluster_id in cf.keys():
                    data_dict['features(%s)' % cluster_id] = cf[cluster_id]
                    data_dict['feature_times(%s)' % cluster_id] = \
                            cft[cluster_id]
                    
                if file_format == '.mat':
                    scipy.io.savemat(fullpath, data_dict)
                elif file_format == '.npz':
                    scipy.savez(fullpath, **data_dict)

            elif file_format == 'cPickle' or file_format == 'cPickle.gz':
                data_dict = {'features':trial.clustered_features,
                        'feature_times':trial.clustered_feature_times}
                if file_format == 'cPickle.gz':
                    outfile = gzip.open(fullpath, 'wb')
                    cPickle.dump(data_dict, outfile, protocol=-1)
                    outfile.close()
                else:
                    with open(fullpath, 'wb') as outfile:
                        cPickle.dump(data_dict, outfile, protocol=-1)

            elif file_format == '.csv' or file_format == '.txt':
                cfal = trial.clustered_features_as_list
                cftal = trial.clustered_feature_times_as_list
                # cluster ids
                # nc (Number of clusters)
                # nc rows of feature_times
                # nc sets of:
                #    ne (Number of events within this cluster)
                #    ne rows of feature values (1 row per event)
                delimiters = {'.csv':',', '.txt':' '}
                delimiter = delimiters[file_format]
                nc = len(cfal)
                with open(fullpath, 'wb') as outfile:
                    writer = csv.writer(outfile, delimiter=delimiter)
                    writer.writerow(trial.clustered_features.keys())
                    writer.writerow([nc])
                    for clustered_times in cftal:
                        writer.writerow(clustered_times)

                    for clustered_features in cfal:
                        writer.writerow([len(clustered_features)])
                        for feature in clustered_features:
                            writer.writerow(feature)
        return fullpaths
Exemplo n.º 13
0
def main():

    print('Reading subjects')

    _, reg_var, sub_files = read_oasis3_data(
        csv_fname=CSV_FILE,
        data_dir=DATA_DIR,
        reg_var_name='UDSB9',  #'Verbal IQ',  #  #
        num_sub=NUM_SUB,
        len_time=LEN_TIME,
        data_field='SCT_GO')

    # Shuffle reg_var and subjects for testing
    #reg_var = sp.random.permutation(reg_var)
    #ran_perm = sp.random.permutation(len(reg_var))
    #reg_var = reg_var
    #sub_files = [sub_files[i] for i in range(len(reg_var))]

    sub_files = sub_files  #[50:100]
    reg_var = reg_var  #[50:100]
    t0 = time.time()
    print('performing stats based on random pairwise distances')

    corr_pval_max, corr_pval_fdr = randpairs_regression(
        bfp_path=BFPPATH,
        sub_files=sub_files,
        reg_var=reg_var,
        num_pairs=20000,  # 19900,
        nperm=2000,
        len_time=LEN_TIME,
        num_proc=6,
        pearson_fdr_test=False,
        data_field='SCT_GO')
    t1 = time.time()

    print(t1 - t0)
    sp.savez('pval_num_pairs20000_nsub350_nperm2000_SCT.npz',
             corr_pval_max=corr_pval_max,
             corr_pval_fdr=corr_pval_fdr)
    # corr_pval_max=a['corr_pval_max']
    # corr_pval_fdr=a['corr_pval_fdr']
    vis_grayord_sigpval(corr_pval_max,
                        surf_name='rand_dist_corr_perm_pairs20000_max_SCT',
                        out_dir='.',
                        smooth_iter=1000,
                        bfp_path=BFPPATH,
                        fsl_path=FSL_PATH,
                        sig_alpha=0.05)
    vis_grayord_sigpval(corr_pval_fdr,
                        surf_name='rand_dist_corr_perm_pairs20000_fdr_SCT',
                        out_dir='.',
                        smooth_iter=1000,
                        bfp_path=BFPPATH,
                        fsl_path=FSL_PATH,
                        sig_alpha=0.05)

    print('Results saved')
Exemplo n.º 14
0
    def saveMatrix(self, fname):
        """ Saves the free Hamiltonian and potential matrices to file """

        t = (fname, self.L, self.m, \
            self.fullBasis.Emax, self.fullBasis.nmax, self.fullBasis.bcs, \
            self.h0.M.data,self.h0.M.row,self.h0.M.col, \
            self.potential.M.data,self.potential.M.row,self.potential.M.col, \
            )
        scipy.savez(*t)
 def get_train_calib_data(k = 12):
     '''
     for calibration net
     return X - features
            y - labels
            cnt - count of examples
     '''
     sp.random.seed(42)
     X_data,y_data = [],[]
     
     suff = str(k)
     c = 0
     X_name = 'train_data_icalib_'+ suff +  '.npz'
     y_name = 'labels_icalib_'+ suff + '.npz'
     label = -1
     dbpath = 'F:\\datasets\\image_data_sets\\faces\\AFLW'
     dbpath = join(dbpath,'aflw.sqlite')
     rfpath = 'F:\\datasets\\image_data_sets\\faces\\AFLW\\img'
     conn = sqlite3.connect(dbpath)
     c = 0
     for file_id,x,y,ra,rb,theta in conn.execute('SELECT file_id,x,y,ra,rb,theta FROM Faces NATURAL JOIN FaceEllipse'):
         fpath = join(rfpath,file_id)
         frame = fr.get_frame(fpath)
         x1,y1,x2,y2 = util.ellipse2bbox(a = ra, b = rb, angle = theta, cx = x, cy = y)
         x = x1
         y = y1
         h = abs(y2-y1)
         w = abs(x2-x1)
         no_neg = sp.all(sp.array([x,y,h,w]) > 0) ## ignore a bad data in sql table
         if frame != None and no_neg:
             y,x,w,h = [int(e) for e in (y,x,w,h)]
             face = fr.get_patch(frame,y,x,(w,h))
             #fr.write_frame('F:\\1\\' + str(c) + 'orig',face)
             c += 1
             for ((new_y,new_x,new_w,new_h),label) in [(util.calib(y,x,w,h,k),k) for k in sp.random.randint(0,45,5)]:
                 face = fr.get_patch(frame,new_y,new_x,(new_w,new_h))
                 no_neg_calib = sp.all(sp.array([new_x,new_y,new_h,new_w]) > 0)
                 face_r,good_example = Datasets.sample_resize(face,k,k)
                 
                 if good_example and no_neg_calib:
                     #fr.write_frame('F:\\1\\' + str(c) + 'calib_'+str(label) ,face)
                     print('face:',fpath,label)
                     vec = fr.frame_to_vect(face_r)    
                     X_data.append(vec)
                     y_data.append(label)
                     
     y_data = sp.array(y_data)
     sp.savez(y_name,y_data)
     X_data = sp.array(X_data)
     sp.savez(X_name,X_data)
     return X_data,y_data
Exemplo n.º 16
0
def save_tuning_curve(tuning_curve, epsilons, Kk2s, data_flag):
    """
	Save objects saved by save_objects in the save_data module. 

	Args:
		tuning_curves: numpy array holding response data.
		data_flag: Data identifier for loading and saving.
	"""

    out_dir = '%s/objects/%s' % (DATA_DIR, data_flag)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    filename = '%s/tuning_curve' % out_dir
    sp.savez(filename, tuning_curve=tuning_curve, epsilons=epsilons, Kk2s=Kk2s)

    print('Tuning curve data saved to %s' % filename)
Exemplo n.º 17
0
 def save(self, path_to_folder):
     if not path.exists(path_to_folder):
         os.makedirs(path_to_folder)
     else:
         assert path.isdir(path_to_folder)
     np_arrays = path.join(path_to_folder, 'arrays.npz')
     with open(np_arrays, 'wb') as npz:
         sp.savez(npz,
                  W=self.W,
                  H=self.H,
                  lag_val=self.lag_val,
                  lag_set=self.lag_set)
     other_members = path.join(path_to_folder, 'other.pkl')
     with open(other_members, 'wb') as other:
         tmp = {'transform': self.transform}
         pickle.dump(tmp, other)
Exemplo n.º 18
0
def save_MSE_errors(errors_nonzero, errors_zero, data_flag):
	"""
	Save decoding error from array of CS objects as numpy object.

	Args:
		errors: Error array to be saved
		data_flag: Data identifier for saving and loading.
	"""

	out_dir = '%s/analysis/%s' % (DATA_DIR, data_flag)
	if not os.path.exists(out_dir):
		os.makedirs(out_dir)

	filename = '%s/MSE_errors.npz' % out_dir
	sp.savez(filename, errors_nonzero=errors_nonzero, errors_zero=errors_zero)
	print('\nSignal errors file saved to %s' % filename)
Exemplo n.º 19
0
    def saveMatrix(self, fname):
        """ Saves the free Hamiltonian and potential matrices to file """

        t = (fname, self.L, self.m, \
            self.fullBasis[1].Emax, self.fullBasis[1].nmax, \
            self.fullBasis[-1].Emax, self.fullBasis[-1].nmax, \
            self.h0[1].M.data,self.h0[1].M.row,self.h0[1].M.col, \
            self.potential[1][0].M.data,self.potential[1][0].M.row,self.potential[1][0].M.col, \
            self.potential[1][2].M.data,self.potential[1][2].M.row,self.potential[1][2].M.col, \
            self.potential[1][4].M.data,self.potential[1][4].M.row,self.potential[1][4].M.col, \
            self.h0[-1].M.data,self.h0[-1].M.row,self.h0[-1].M.col, \
            self.potential[-1][0].M.data,self.potential[-1][0].M.row,self.potential[-1][0].M.col, \
            self.potential[-1][2].M.data,self.potential[-1][2].M.row,self.potential[-1][2].M.col, \
            self.potential[-1][4].M.data,self.potential[-1][4].M.row,self.potential[-1][4].M.col \
            )
        scipy.savez(*t)
Exemplo n.º 20
0
def save_success_ratios(successes, data_flag):
	"""
	Save list of successes based on decoding error of CS
	objects.
	
	Args:
		successes: numpy array of number of binary data for
					success (1) or not success (0), for full CS
					object array.
		data_flag: Data identifier for loading and saving.
	"""
	
	out_dir = '%s/analysis/%s' % (DATA_DIR, data_flag)
	if not os.path.exists(out_dir):
		os.makedirs(out_dir)

	filename = '%s/successes.npz' % out_dir
	sp.savez(filename, successes=successes)
	print('\nSignal binary successes file saved to %s' % filename)
Exemplo n.º 21
0
    def get_train_data(n_pos=46443, n_neg=206940, k=12):
        '''
        megre positive and negative examples
        '''
        suff = str(k)
        X_name = 'train_data_' + suff + '.npz'
        y_name = 'labels_' + suff + '.npz'
        if not (os.path.exists(X_name) and os.path.exists(y_name)):
            X_pos = []
            #            X_train_face,y_train_face  = Datasets.get_train_face_wider_data(k = k)
            #            X_pos = X_train_face[y_train_face==1]
            #            X_pos = X_train_face
            X_aflw, y_train_face_aflw = Datasets.get_aflw_face_data(k=k)
            #            if len(X_pos) > 0:
            #                X_pos = sp.vstack( [X_pos,X_aflw] )
            #            else:
            #                X_pos = X_aflw
            X_pos = X_aflw
            X_train_non_face, y_train_non_face = Datasets.get_train_non_face_data(
                k=k)
            print('c1_pos:', len(X_pos))
            #print((X_train_face[y_train_face==0].shape,X_train_non_face.shape))
            #            if len(X_train_face[y_train_face==0]) > 0:
            #                X_neg = sp.vstack( (X_train_face[y_train_face==0],X_train_non_face) )
            #            else:
            #                X_neg = X_train_non_face
            X_neg = X_train_non_face
            X_pos = shuffle(X_pos, random_state=42)
            X_neg = shuffle(X_neg, random_state=42)
            X_pos = X_pos[:n_pos]
            X_neg = X_neg[:n_neg]

            n_neg = len(X_neg)
            n_pos = len(X_pos)
            y_pos = sp.ones(n_pos, int)
            y_neg = sp.zeros(n_neg, int)
            X = sp.vstack((X_pos, X_neg))
            y = sp.hstack((y_pos, y_neg))
            X, y = shuffle(X, y, random_state=42)
            sp.savez(X_name, X)
            sp.savez(y_name, y)
Exemplo n.º 22
0
def save_signal_decoding_weber_law(successes, gains, epsilons, data_flag):
    """
	Save list of successes based on decoding error of CS
	objects.
	
	Args:
		successes: numpy array of number of binary data for
				success (1) or not success (0), for full CS
				object array.
		gains: numpy array of gains
		epsilons: numpy aray of free energy 
		data_flag: Data identifier for loading and saving.
	"""

    out_dir = '%s/analysis/%s' % (DATA_DIR, data_flag)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    filename = '%s/signal_decoding_weber_law.npz' % out_dir
    sp.savez(filename, successes=successes, gains=gains, epsilons=epsilons)
    print('\nDecoding error data file saved to %s' % filename)
    def get_train_data(n_pos = 46443, n_neg = 206940,k=12):        
        '''
        megre positive and negative examples
        '''
        suff = str(k)        
        X_name = 'train_data_'+ suff + '.npz'
        y_name = 'labels_'+ suff + '.npz' 
        if not(os.path.exists(X_name) and os.path.exists(y_name)):
            X_pos = []            
#            X_train_face,y_train_face  = Datasets.get_train_face_wider_data(k = k)
#            X_pos = X_train_face[y_train_face==1]
#            X_pos = X_train_face
            X_aflw,y_train_face_aflw  = Datasets.get_aflw_face_data(k = k)
#            if len(X_pos) > 0:
#                X_pos = sp.vstack( [X_pos,X_aflw] )
#            else:
#                X_pos = X_aflw
            X_pos = X_aflw
            X_train_non_face,y_train_non_face =  Datasets.get_train_non_face_data(k = k)
            print('c1_pos:',len(X_pos))
            #print((X_train_face[y_train_face==0].shape,X_train_non_face.shape))
#            if len(X_train_face[y_train_face==0]) > 0:
#                X_neg = sp.vstack( (X_train_face[y_train_face==0],X_train_non_face) )
#            else:
#                X_neg = X_train_non_face
            X_neg = X_train_non_face
            X_pos = shuffle(X_pos,random_state=42)
            X_neg = shuffle(X_neg,random_state=42)
            X_pos = X_pos[:n_pos]
            X_neg = X_neg[:n_neg]
            
            n_neg = len(X_neg)
            n_pos = len(X_pos)
            y_pos = sp.ones(n_pos,int)
            y_neg = sp.zeros(n_neg,int)
            X = sp.vstack((X_pos,X_neg))
            y = sp.hstack( (y_pos,y_neg) )
            X,y = shuffle(X,y,random_state=42)
            sp.savez(X_name,X)
            sp.savez(y_name,y)
def save_signal_discrimination_weber_law(successes,  successes_2, data_flag):
	"""
	Save list of successes based on decoding error of CS
	objects.
	
	Args:
		successes: numpy array of number of binary data for
				success (1) or not success (0), for full CS
				object array.
		successes_2: numpy array of number of binary data for
				success (1) or not success (0), for full CS
				object array, for fluctuating odor 2, indices
				given by .idxs_2
		data_flag: Data identifier for loading and saving.
	"""
	
	out_dir = '%s/analysis/%s' % (DATA_DIR, data_flag)
	if not os.path.exists(out_dir):
		os.makedirs(out_dir)

	filename = '%s/signal_discrimination_weber_law.npz' % out_dir
	sp.savez(filename, successes=successes, successes_2=successes_2)
	print ('\nDecoding error data file saved to %s' % filename)
Exemplo n.º 25
0
absorber = (1000 *
            (1/s.cosh((x - x.min()) / 8.0) +
             1/s.cosh((x - x.max()) / 8.0)))
absorber[abs(x) < 64] = 0


t = s.linspace(args.mint, args.maxt, args.nt)
states = ccgnlse.integrate(
    t, x, input, args.dt,
    [-delta, 0.0, -1.0],
    1.0,
    potential,
    pump, loss,
    absorber, background)


workspace = {}
workspace["t"] = t
workspace["x"] = x
workspace["states"] = states
workspace["input"] = input
workspace["background"] = background
workspace["delta"] = delta
workspace["pump"] = pump
workspace["loss"] = loss
workspace["absorber"] = absorber


s.savez(filename, **workspace)
print(filename)
Exemplo n.º 26
0
    sub = lst[subno]
    data = scipy.io.loadmat(os.path.join(p_dir, sub, sub + '.rfMRI_REST2_LR.\
reduce3.ftdata.NLM_11N_hvar_25.mat'))
    LR_flag = msk['LR_flag']
    LR_flag = np.squeeze(LR_flag) != 0
    data = data['ftdata_NLM']
    temp = data[LR_flag, :]

    d2 = temp.T

    ind = 0
    for len1 in IntV:
        sub_data1, _, _ = normalizeData(d1[:len1, :])
        sub_data2, _, _ = normalizeData(d2[:len1, :])
        s = sp.std(sub_data2, axis=0)
        sub_data1 = sub_data1[:, s > 1e-2]
        sub_data2 = sub_data2[:, s > 1e-2]
        sub_data2_sync, Rot = brainSync(X=sub_data1, Y=sub_data2)
        rho_orig[subno, ind] = sp.mean(sp.sum(sub_data2*sub_data1, axis=0))
        rho[subno, ind] = sp.mean(sp.sum(sub_data2_sync*sub_data1, axis=0))
        print(subno, len1, rho_orig[subno, ind], rho[subno, ind])
        ind += 1

    sp.savez('avg_corr.npz', rho=rho, rho_orig=rho_orig)

plt.plot(IntV, rho)
plt.ylim(ymax=1, ymin=0.5)
plt.savefig('rho_sync_vs_len_same_sub2.pdf')
plt.show()
Exemplo n.º 27
0
            fill_value=(imag[0], imag[-1]),
            bounds_error=False)(x)
        initial[:nx] = real
        initial[nx:] = imag
    else:
        solution = args.scale * solution
        initial[:nx] = solution.real
        initial[nx:] = solution.imag
else:
    initial[:] = 0


# Solve using Newton-Krylov method.
solution = optimize.newton_krylov(l0, initial)


filename = ("mode=%d_delta=%.3f_pump=%.2E_loss=%.2E_%s.npz" %
            (args.n, args.delta, args.pump, args.loss, args.label))
workspace = {}
workspace["x"] = x
workspace["potential"] = u
workspace["n"] = args.n
workspace["delta"] = args.delta
workspace["solution"] = solution[:nx] + 1j * solution[nx:]
workspace["pump"] = args.pump
workspace["loss"] = args.loss


scipy.savez(filename, **workspace)
print(filename)
max_uav_to_d2d_gains = sp.zeros((max_num_d2d_pairs, max_chan_realizaion))
max_d2d_to_d2d_gains = sp.zeros(
    (max_num_d2d_pairs, max_num_d2d_pairs, max_chan_realizaion))
# ############################################################
# This loop for channel realization - Monte Carlos
# ############################################################
for Mon in xrange(max_chan_realizaion):
    d2d_pairs = []
    uav = UAV(height)
    for p in range(max_num_d2d_pairs):
        # d2d_pairs.append(D2DPair(p, coverage_r, d2d_max, low_rx=0.8))
        # d2d_pairs.append(D2DPair(p, coverage_r, d2d_max))
        d2d_pairs.append(
            D2DPair(p, coverage_r, d2d_max, low_rx=0.0, low_tx=0.0))

    for i in xrange(max_num_d2d_pairs):
        for j in xrange(max_num_d2d_pairs):
            max_d2d_to_d2d_gains[i, j, Mon] = sp.divide(
                d2d_pairs[i].loss_to_pair(d2d_pairs[j]), noise_variance)
        max_uav_to_d2d_gains[i, Mon] = sp.divide(
            uav.loss_to_pair(d2d_pairs[i], atg_a, atg_b), noise_variance)

# print max_uav_to_d2d_gains
# print max_d2d_to_d2d_gains
# max_d2d_to_d2d_gains_diff = sp.copy(max_d2d_to_d2d_gains)
# sp.fill_diagonal(max_d2d_to_d2d_gains_diff, 0)
# max_d2d_to_d2d_gains_diag = sp.subtract(max_d2d_to_d2d_gains, max_d2d_to_d2d_gains_diff)

sp.savez('chan_model', uav=max_uav_to_d2d_gains, d2d=max_d2d_to_d2d_gains)

# plt.show()
# Calculate the total time of training datdaset
time_sol = (time.time() - t0)

v1 = sp.array(EE_sol)
EE_sol_vec_Mon.append(sp.mean(v1))
v2 = sp.array(maximin_rate)
maximin_rate_sol_Mon.append(sp.mean(v2))
v3 = sp.array(tau_sol)
tau_sol_vec_Mon.append(sp.mean(v3))

vec_chan_training = sp.array(vec_chan)
vec_tau_training = sp.array(tau_sol)

print EE_sol
print tau_sol
print maximin_rate

print "Time for generate training dataset:", time_sol, "seconds"
print "Number of infeasible solving", num_infeasible
print "Size of vec_chan_training", sp.shape(vec_chan_training)
print "Size of vec_tau_training", sp.shape(vec_tau_training)

# test_vec_chan_training = vec_chan_training[1][1]

# Saving training dataset for DNN model
sp.savez('x_OHT_dataset',
         chan_dataset=vec_chan_training,
         tau_dataset=vec_tau_training)

print "Done. Saved"
	def npy2npz():
		 sp.savez('train_data_12.npz',sp.load('train_data_12.npy'))
		 
		 sp.savez('labels_12.npz',sp.load('labels_12.npy'))
 def save_model(self,model_name = nn_name+'.npz'):
     sp.savez(model_name, *layers.get_all_param_values(self.net))
Exemplo n.º 32
0
n_samples = range(20, 1200, 20)
n_samples.extend([1200])
labels_corr_sininv_all = sp.zeros((n_rep, len(n_samples), msksize[0]))
labels_corr_exp_all = sp.zeros((n_rep, len(n_samples), msksize[0]))
labels_corr_dist_all = sp.zeros((n_rep, len(n_samples), msksize[0]))
labels_corr_corr_exp_all = sp.zeros((n_rep, len(n_samples), msksize[0]))

for rep in range(n_rep):
    for ind in range(len(n_samples)):
        labels_corr_sininv, labels_corr_exp, labels_corr_dist,\
        labels_corr_corr_exp, msk_small_region, _ = parcellate_region(roiregion, sub,
                                           nClusters[n],
                                           sdir[scan / 2],
                                           scan_type[i], 1,
                                           session_type[scan % 2],
                                           algo=0, type_cor=0,
                                           n_samples=n_samples[ind])

        labels_corr_sininv_all[rep, ind, :] = labels_corr_sininv
        labels_corr_exp_all[rep, ind, :] = labels_corr_exp
        labels_corr_dist_all[rep, ind, :] = labels_corr_dist
        labels_corr_corr_exp_all[rep, ind, :] = labels_corr_corr_exp

        print str(ind) + ' out of ' + str(len(n_samples)) + 'rep = ' + str(rep)

sp.savez('temp100_algo0_MTG_tmp.npz', labels_corr_sininv_all=labels_corr_sininv_all,
         labels_corr_exp_all=labels_corr_exp_all,
         labels_corr_dist_all=labels_corr_dist_all,
         labels_corr_corr_exp_all=labels_corr_corr_exp_all)

Exemplo n.º 33
0
import scipy


parser = argparse.ArgumentParser()
parser.add_argument("input",
                    type=str,
                    nargs="+")
args = parser.parse_args()


final = {}
for filename in args.input:
    partial = scipy.load(filename)
    for key in partial.files:
        if key not in final:
            final[key] = partial[key]
        else:
            if key == "t":
                final[key] = scipy.hstack(
                    [final[key], partial[key]])
            if key == "states":
                final[key] = scipy.vstack(
                    [final[key], partial[key]])

t = final["t"]
filename = (
    "delta=%.2f_pump=%.2E_loss=%.2E_mint=%.2f_maxt_%.2f_nt=%d.npz" %
    (final["delta"], final["pump"], final["loss"],
     t.min(), t.max(), len(t)))
scipy.savez(filename, **final)
        except (SolverError, TypeError):
            # pass
            num_infeasible[prin - 2] += 1

    v1 = sp.array(EE_sol_vec)
    EE_sol_vec_Mon.append(sp.mean(v1))
    v2 = sp.array(time_sol_vec)
    time_sol_vec_Mon.append(sp.mean(v2))

    v3 = sp.array(tau_sol_vec)
    tau_sol_vec_Mon.append(sp.mean(v3))

print EE_sol_vec_Mon
print time_sol_vec_Mon
print tau_sol_vec_Mon

print num_infeasible

sp.savez('result_JHTPA',
         EE_JTHPA=EE_sol_vec_Mon,
         time_JTHPA=time_sol_vec_Mon,
         tau_JTHPA=tau_sol_vec_Mon)

# plt.figure(figsize=(8, 6))
# plt.clf()
# plt.plot(range_num_d2d_pairs, time_sol_vec_Mon)
# plt.figure(figsize=(8, 6))
# plt.clf()
# plt.plot(range_num_d2d_pairs, EE_sol_vec_Mon)
# plt.show()
Exemplo n.º 35
0
 def save( self ):
     """Save to file"""
     sc.savez( self.fname, **self.store )
Exemplo n.º 36
0
 def save(self, fname):
     """Flush to disk"""
     sc.savez(fname, seed=self["seed"], **self.params)
 sub_labels = clf.predict(rho)
 sub_label = np.zeros(vertices.shape[0], dtype=float)
 sub_label[mask] = sub_labels
 mlab.triangular_mesh(vertices[:, 0],
                      vertices[:, 1],
                      vertices[:, 2],
                      faces,
                      representation='surface',
                      opacity=1,
                      scalars=np.float64(sub_label))
 mlab.gcf().scene.parallel_projection = True
 mlab.view(azimuth=0, elevation=90)
 mlab.colorbar(orientation='horizontal')
 mlab.close()
 data_file = 'data_file' + str(nCluster) + str(i) + 'logistic_labels.npz'
 sp.savez(data_file, labels=sub_label)
 SC = SpectralClustering(n_clusters=nCluster, affinity='precomputed')
 labels = SC.fit_predict(rho)
 label = np.zeros(vertices.shape[0], dtype=float)
 label[mask] = labels + 1
 mlab.triangular_mesh(vertices[:, 0],
                      vertices[:, 1],
                      vertices[:, 2],
                      faces,
                      representation='surface',
                      opacity=1,
                      scalars=np.float64(label))
 mlab.gcf().scene.parallel_projection = True
 mlab.view(azimuth=0, elevation=90)
 mlab.colorbar(orientation='horizontal')
 mlab.close()
Exemplo n.º 38
0
 def save(self):
     """Flush to disk"""
     sc.savez( self.fname, **self.params )
Exemplo n.º 39
0
                    output = None
                else:
                    if timeIssue[ 0 ][ -1 ] == 'o':
                        output = open( arg[ '-o' ] + '-' + str( i ), 'w' ) if arg[ '-o' ] else stdout
                        print( "# time evolution: {0}".format( timeIssue ), file = output )
                        print( "# bound dimension: {0}".format( parameter[ 'DIMBOUND' ] ), file = output )
                        print( "# delta: {0}".format( parameter[ 'DELTA' ] ), file = output )
                        print( "\n\n# {0:^10}{1:^25}{2:^25}{3:^25}{4:^25}".format( "time", "energy", "<Sz>", "entropy(A)", "entropy(B)" ),
                            file = output )
                    else:
                        output = None

                stepT = float( timeIssue[ 0 ][ 0:-1 ] ) if timeIssue[ 0 ][ -1 ] == 'o' else float( timeIssue[ 0 ] )
                totalT = float( timeIssue[ 1 ] )
                wf = groundState( H, parameter[ 'DIMBOUND' ], ( stepT, totalT ), wf, output )
                sp.savez( waveFile + '_out' + '-' + str( i ), gamma = wf[ 0 ], l = wf[ 1 ] )

                i += 1
                if output:
                    print( "\n\n# running time: {0:.6f} (s)".format( time() - start_time ), file = output )

                    if arg[ '-o' ]:
                        output.close()

        except IOError:
            print( "There is no file {0}".format( wavefile ) )
            exit( 1 )
        except KeyboardInterrupt:
            print( "end of program!" )
            exit( 1 )
    else:
Exemplo n.º 40
0
    elif i > 3 and i <= 7:
        k[i] = kk[2]
    elif i > 7 and i <= 11:
        k[i] = kk[3]
    elif i > 11 and i <= 15:
        k[i] = kk[4]
    elif i > 15 and i <= 19:
        k[i] = kk[5]

d1 = sp.ones(n)
d = sp.vstack((d1, d1, d1))
K = sp.asarray(spdiags(d, (-1, 0, 1), n, n).todense())

K = -k*K

for i in range(n-1):
    K[i,i] = k[i]+k[i+1]    
K[-1,-1] = k[-1]

f1 = 0.2 * Hz
f2 = 2 * Hz
xi1 = 0.0025
xi2 = xi1

a0 = ((4*sp.pi*f1*f2)/(f1**2-f2**2))*(f1*xi2-f2*xi1)
a1 = (f1*xi1-f2*xi2)/(sp.pi*(f1**2-f2**2))

C = a0*M + a1*K

sp.savez('mck.npz', M=M, C=C, K=K)
Exemplo n.º 41
0
    sub_conn1 = sub_conn1 - sp.mean(sub_conn1, axis=1)[:, None]
    sub_conn1 = sub_conn1 / (np.std(sub_conn1, axis=1) + 1e-16)[:, None]
    sub_conn2 = sp.corrcoef(sub_data2[:, :, ind] + 1e-16)
    sub_conn2 = sub_conn2 - sp.mean(sub_conn2, axis=1)[:, None]
    sub_conn2 = sub_conn2 / (np.std(sub_conn2, axis=1) + 1e-16)[:, None]

    dist_all_conn[cc_msk] += sp.mean((sub_conn1 - sub_conn2)**2.0, axis=(1))
    print ind,

dist_all_conn = dist_all_conn / (nSub)

var_all = sp.zeros((sub_data1.shape[0], sub_data2.shape[1]))

avg_sub_data = sp.mean(sub_data1, axis=2)

dfs_left_sm = patch_color_attrib(dfs_left_sm, dist_all_conn, clim=[0, 1])
view_patch_vtk(dfs_left_sm,
               azimuth=-90,
               elevation=-180,
               roll=-90,
               outfile='dist_sess_conn_view1_1sub_left.png',
               show=0)
view_patch_vtk(dfs_left_sm,
               azimuth=90,
               elevation=180,
               roll=90,
               outfile='dist_sess_conn_view2_1sub_left.png',
               show=0)

sp.savez('conn_sessions_pairwise_dist.npz', dist_all_conn)
sub_conn_0 = sub_conn_0 / (np.std(sub_conn_0, axis=1) + 1e-16)[:, None]
for ind in range(1, nSub):
    sub_conn = sp.corrcoef(sub_data[:, :, ind] + 1e-16)
    sub_conn = sub_conn - sp.mean(sub_conn, axis=1)[:, None]
    sub_conn = sub_conn / (np.std(sub_conn, axis=1) + 1e-16)[:, None]
    dist_all_conn[cc_msk] += sp.mean((sub_conn_0 - sub_conn)**2.0, axis=(1))
    print ind,

dist_all_conn = dist_all_conn / nSub

var_all = sp.zeros((sub_data.shape[0], sub_data.shape[1]))

avg_sub_data = sp.mean(sub_data, axis=2)

# azimuth=-90,elevation=-180, roll=-90,
dfs_left_sm = patch_color_attrib(dfs_left_sm, dist_all_conn, clim=[0, 1])
view_patch_vtk(dfs_left_sm,
               azimuth=-90,
               elevation=-180,
               roll=-90,
               outfile='dist_conn_view1_1sub_left.png',
               show=0)
view_patch_vtk(dfs_left_sm,
               azimuth=90,
               elevation=180,
               roll=90,
               outfile='dist_conn_view2_1sub_left.png',
               show=0)

sp.savez('conn_pairwise_dist.npz', dist_all_conn)
def parcellate_region(roilist,
                      sub,
                      nClusters,
                      scan,
                      scan_type,
                      savepng=0,
                      session=1,
                      algo=0,
                      type_cor=0):
    p_dir = '/big_disk/ajoshi/HCP100-fMRI-NLM/HCP100-fMRI-NLM'
    out_dir = '/big_disk/ajoshi/out_dir'
    r_factor = 3
    ref_dir = os.path.join(p_dir, 'reference')
    ref = '100307'
    fn1 = ref + '.reduce' + str(r_factor) + '.LR_mask.mat'
    fname1 = os.path.join(ref_dir, fn1)
    msk = scipy.io.loadmat(fname1)

    dfs_left_sm = readdfs(
        os.path.join('/home/ajoshi/for_gaurav',
                     '100307.BCI2reduce3.very_smooth.' + scan_type + '.dfs'))
    dfs_left = readdfs(
        os.path.join('/home/ajoshi/for_gaurav',
                     '100307.BCI2reduce3.very_smooth.' + scan_type + '.dfs'))

    data = scipy.io.loadmat(
        os.path.join(
            p_dir, sub, sub + '.rfMRI_REST' + str(session) + scan +
            '.reduce3.ftdata.NLM_11N_hvar_25.mat'))

    LR_flag = msk['LR_flag']
    # 0= right hemisphere && 1== left hemisphere
    if scan_type == 'right':
        LR_flag = np.squeeze(LR_flag) == 0
    else:
        LR_flag = np.squeeze(LR_flag) == 1
    data = data['ftdata_NLM']
    temp = data[LR_flag, :]
    m = np.mean(temp, 1)
    temp = temp - m[:, None]
    s = np.std(temp, 1) + 1e-16
    temp = temp / s[:, None]
    msk_small_region = np.in1d(dfs_left.labels, roilist)
    d = temp[msk_small_region, :]
    rho = np.corrcoef(d)
    rho[~np.isfinite(rho)] = 0
    d_corr = temp[~msk_small_region, :]
    rho_1 = np.corrcoef(d, d_corr)
    rho_1 = rho_1[range(d.shape[0]), d.shape[0]:]
    rho_1[~np.isfinite(rho_1)] = 0
    f_rho = np.arctanh(rho_1)
    f_rho[~np.isfinite(f_rho)] = 0
    B = np.corrcoef(f_rho)
    B[~np.isfinite(B)] = 0
    SC = SpectralClustering(n_clusters=nClusters, affinity='precomputed')
    affinity_matrix = np.arcsin(rho)
    labels_corr_sininv = SC.fit_predict(np.abs(affinity_matrix))

    affinity_matrix = sp.exp((-2.0 * (1 - rho)) / (.72**2))
    labels_corr_exp = SC.fit_predict(np.abs(affinity_matrix))

    affinity_matrix = sp.sqrt(2.0 + 2.0 * rho)
    labels_corr_dist = SC.fit_predict(np.abs(affinity_matrix))

    B1 = sp.exp((-2.0 * (1.0 - B)) / (0.72**2.0))
    labels_corr_corr_exp = SC.fit_predict(B1)

    sp.savez(os.path.join(
        out_dir, sub + '.rfMRI_REST' + str(session) + scan + str(roilist) +
        '.labs.npz'),
             labels_corr_sininv=labels_corr_sininv,
             labels_corr_corr_exp=labels_corr_corr_exp,
             labels_corr_dist=labels_corr_dist,
             labels_corr_exp=labels_corr_exp,
             msk_small_region=msk_small_region)
    return labels_corr_sininv, msk_small_region, dfs_left_sm
Exemplo n.º 44
0
def save_results(history, filename):
	sp.savez(filename, history=history)
Exemplo n.º 45
0
    def npy2npz():
        sp.savez('train_data_12.npz', sp.load('train_data_12.npy'))

        sp.savez('labels_12.npz', sp.load('labels_12.npy'))
Exemplo n.º 46
0
import os.path
import re
import scipy


parser = argparse.ArgumentParser()
parser.add_argument("input",
                    help="solution file",
                    type=str)
args = parser.parse_args()


pattern = re.compile("mode=(\d+)_delta=(.*)_pump=(.*)_loss=(.*)_(.*).npz")
match = pattern.match(os.path.basename(args.input))
if not match:
    exit()

print(args.input)

mode, delta, pump, loss, label = match.groups()
pump, loss = map(float, (pump, loss))


workspace = scipy.load(args.input)
workspace_ = {}
for key in workspace.files:
    workspace_[key] = workspace[key]
workspace_["pump"] = pump
workspace_["loss"] = loss
scipy.savez(args.input, **workspace_)
Exemplo n.º 47
0
''' ---- Compute stats ----- '''

# relativePos = [x[2]-x[1], x[]]] for x in AllBodyPos]

labels = ["r_shoulder", "r_arm", "r_hand",
		  "l_shoulder", "l_arm", "l_hand"]
relDistsIndiv = [
			[x[1]-x[0] for x in AllBodyPos if x != -1 and np.any(x[1] != 0)],
			[x[2]-x[1] for x in AllBodyPos if x != -1 and np.any(x[2] != 0)],
			[x[3]-x[2] for x in AllBodyPos if x != -1 and np.any(x[3] != 0)],
			[x[4]-x[0] for x in AllBodyPos if x != -1 and np.any(x[4] != 0)],
			[x[5]-x[4] for x in AllBodyPos if x != -1 and np.any(x[5] != 0)],
			[x[6]-x[5] for x in AllBodyPos if x != -1 and np.any(x[6] != 0)]
			]

relDists = [np.mean(x, 0) for x in relDistsIndiv]
absDists = [np.sqrt(np.sum(x**2)) for x in relDists]
relStds = [np.std(x,0) for x in relDistsIndiv]
absStds = [np.std(x) for x in relDistsIndiv]

scipy.savez("labeledSkels_Every500.npz",
			relDistsIndiv=relDistsIndiv,
			relDists=relDists,
			absDists=absDists,
			relStds=relStds,
			absStds=absStds,
			times=bodyTimes, 
			dataDir=dataDir)


Exemplo n.º 48
0
    plt.xlabel('Tiempo, $\it{t}$ (s)')
    plt.ylabel('Dist, $\it{d}$ (cm)')
    plt.plot(t, d)
    plt.plot(t[i_PGD], PGD, 'or')
    plt.text(t[i_PGD], PGD, "PGD={0:0.3f}cm".format(abs(PGD)))
    plt.axvline(t_05, ls='--', color='k')
    plt.axvline(t_95, ls='--', color='k')

    plt.subplot(1, 2, 2)
    plt.grid()
    plt.xlabel('Tiempo, $\it{t}$ (s)')
    plt.plot(t, Ia)
    plt.axvline(t_05, ls='--', color='k')
    plt.axvline(t_95, ls='--', color='k')
    plt.title("$D_{{5-95}} = {}s$".format(round(D_5_95, 4)))

    plt.suptitle('Evento: ' + arch[:-4])
    plt.show()

# -----------------------------------------------------------------------------

for j, i in enumerate(BM):
    sp.savez("registro_{}".format(str(j + 1).zfill(2)),
             a=i['a'],
             t=i['t'],
             metadatos=i['metadatos'])

# -----------------------------------------------------------------------------

print("--- Terminado en %s seg. ---" % (time.time() - start_time))
Exemplo n.º 49
0
def typeI_table(n1, n2, ncases, path=None):
    """Return a table of the m-test statistics under the null hypothesis.

    The function returns a table containing the value of the
    m-statistics of `ncases` draws from two populations of size `n1`
    and `n2` under the null hypothesis that the mean of the two
    populations is the same.

    If a table for population sizes `n1` and `n2` with more entries than
    `ncases` exists, all the stored values are returned.
    Otherwise, new cases are computed and stored, then returned.

    Parameters
    ----------
    n1 : number of samples in population 1
    n2 : number of samples in population 2
    ncases : number of populations to generate
    path : path to the m-test tables (see `get_tables_path`)

    Returns
    -------
    test_values : 1D array of m-test statistics, containing *at least*
                  `ncases` elements, but possibly more
    """

    fname = os.path.join(get_tables_path(path), TABLESNAME%(n1,n2))
    if os.path.exists(fname):
        logging.debug('Loading type I table %s', fname)
        npzfile = sp.load(fname)
        test_values = npzfile['test_values'].flatten()
    else:
        test_values = sp.array([])
    
    nvalues = test_values.shape[0]
    if nvalues>=ncases:
        return test_values

    nmissing = ncases-nvalues
    
    # compute missing entries
    if nmissing > 0:
        logging.debug('Requested %d cases, found %d, missing %d',
                      ncases, nvalues, nmissing)
        print 'The requested mtest table is incomplete.'
        print ('Need to process %d additional cases, this may take some time.'
               % nmissing)
        
        missing_values = sp.zeros((nmissing,))
        pop1_test, pop2_test = _random_same_mean(n1, n2, nmissing)
        
        for i in progressinfo(range(nmissing), style='timer'):
            missing_values[i] = mtest_marginal_likelihood_ratio(pop1_test[i,:],
                                                                pop2_test[i,:],
                                                                nprior=_NPRIOR)

        # update and save table
        test_values = sp.concatenate((test_values, missing_values))
        logging.debug('Saving updated table %s', fname)
        sp.savez(fname, test_values=test_values)

    return test_values
Exemplo n.º 50
0
def save_corpus(fname, ofname, n):
    """Read @n documents of corpus in @fname and save the parsed arrays in @ofname"""
    corpus, dictionary = parse_text( fname, n )
    sc.savez( ofname, C = corpus, D = dictionary )
Exemplo n.º 51
0
def typeII_table(n1, n2, ncases, mean, std, path=None):
    """Return a table of the m-test statistics under a specific hypothesis.

    The function returns a table containing the value of the
    m-statistics and (for comparison) the t-statistics (independent
    t-test) of `ncases` draws from two populations of size `n1` and
    `n2`, the first with distribution Normal(mean, std^2), and the
    second with distribution Normal(0, 1).

    The table is used to compute the power of the test under different
    conditions.
    
    If a table for population sizes `n1` and `n2` with more entries than
    `ncases` exists, all the stored values are returned.
    Otherwise, new cases are computed and stored, then returned.

    Parameters
    ----------
    n1 : number of samples in population 1
    n2 : number of samples in population 2
    ncases : number of populations to generate
    mean -- mean of population 1
    std -- standard deviation of population 1
    path : path to the m-test tables (see `get_tables_path`)

    Returns
    -------
    m_test_values : 1D array of m-test statistics, containing *at least*
                    `ncases` elements, but possibly more
    t_test_values : 1D array of t-test statistics, containing *at least*
                    `ncases` elements, but possibly more
    """

    fname = os.path.join(get_tables_path(path),
                         TYPEII_TABLESNAME%(n1,n2,mean,std))
    if os.path.exists(fname):
        logging.debug('Loading type I table %s', fname)
        npzfile = sp.load(fname)
        m_test_values = npzfile['m_test_values'].flatten()
        t_test_values = npzfile['t_test_values'].flatten()
    else:
        m_test_values = sp.array([])
        t_test_values = sp.array([])
    
    nvalues = m_test_values.shape[0]
    if nvalues>=ncases:
        return m_test_values, t_test_values

    nmissing = ncases-nvalues
    if nmissing > 0:
        logging.debug('Requested %d cases, found %d, missing %d',
                      ncases, nvalues, nmissing)
        print 'The requested mtest table is incomplete.'
        print ('Need to process %d additional cases, this may take some time.'
               % nmissing)
    
        # compute missing entries
        pop1_test, pop2_test = _random_different_mean(n1, n2, nmissing,
                                                      mean, std)

        m_missing_values = sp.zeros((nmissing,))
        t_missing_values = sp.zeros((nmissing,))
        for i in progressinfo(range(nmissing), style='timer'):
            m_missing_values[i] = mtest_marginal_likelihood_ratio(
                pop1_test[i,:], pop2_test[i,:], nprior=_NPRIOR)
            t_missing_values[i] = stats.ttest_ind(pop1_test[i,:],
                                                  pop2_test[i,:])[1]

        # update and save table
        m_test_values = sp.concatenate((m_test_values, m_missing_values))
        t_test_values = sp.concatenate((t_test_values, t_missing_values))
        logging.debug('Saving updated table %s', fname)
        sp.savez(fname, m_test_values=m_test_values, t_test_values=t_test_values)

    return m_test_values, t_test_values