Пример #1
0
def functionaltimes_readwrite (**cfg):

  nWrite = int(cfg['OCFourier']['{write_harmonic}'])
  nTime  = int(cfg['OCTime']['{read_timecnt}'])

  prefix        =cfg['FILES']['{prefix}']
  postfix       =cfg['FILES']['{postfix}']
  name_readwrite=getNameReadWrite(**cfg)
  name_optimized=cfg['FILES']['{name_optimized}']

  print (" read data : functional time")
  timeRead  =sp.zeros([nTime],float)

  # load time for reading section memory - part of reg2
  filename=prefix+"harmonic{0:0>4}_cavityMode_reg2_memory".format(nWrite)+postfix
  timeRead ,__,__ = sp.loadtxt(filename).T

  filename=prefix+"harmonic{0:0>4}_cavityMode_reg1_write".format(nWrite)+postfix
  timeWrite,__,__ = sp.loadtxt(filename).T

  # read funtional times t2, t3
  configParser2 = cp.ConfigParser()
  configParser2.read(prefix+name_readwrite+name_optimized+"FunctionalTimes"+postfix)

  time=configParser2.__dict__['_sections']['functime'] # in seconds*wc
  time['read'] =timeRead  # in seconds*wc
  time['write']=timeWrite # in seconds*wc
  # read funtional times t2, t3

  cfg['METime']['{fidelity_ti}'] = time['idx_ti']
  cfg['METime']['{fidelity_tf}'] = time['idx_tf']

  return time
Пример #2
0
def harmonics_readwrite (**cfg):
  nWrite = int(cfg['OCFourier']['{write_harmonic}'])
  nRead  = int(cfg['OCFourier']['{read_harmonic}'])
  nTime  = int(cfg['OCTime']['{read_timecnt}'])
  wTime  = int(cfg['OCTime']['{write_timecnt}'])

  prefix        =cfg['FILES']['{prefix}']
  postfix       =cfg['FILES']['{postfix}']

  print (" read data : cavity modes")
  cavityWrite=sp.zeros([nWrite,wTime],complex)
  cavityMemo =sp.zeros([nWrite,nTime],complex)
  cavityRead =sp.zeros([nRead,nTime],complex)

  # load memory - part of reg2
  for iMemo in range(nWrite):
    filename=prefix+"harmonic"+"{0:0>4}".format(iMemo+1)+"_cavityMode_reg1_write"+postfix
    __,real,imag = sp.loadtxt(filename).T
  #  time,real,imag=sp.loadtxt(filename,unpack=True)
    cavityWrite[iMemo,:] = real[:]+1j*imag[:]

    filename=prefix+"harmonic"+"{0:0>4}".format(iMemo+1)+"_cavityMode_reg2_memory"+postfix
    __,real,imag = sp.loadtxt(filename).T
  #  time,real,imag=sp.loadtxt(filename,unpack=True)
    cavityMemo[iMemo,:] = real[:]+1j*imag[:]

  # load memory - part of reg2
  for iRead in range(nRead):
    filename=prefix+"harmonic"+"{0:0>4}".format(iRead+1)+"_cavityMode_reg2_read"+postfix
    __,real,imag = sp.loadtxt(filename).T
  #  time,real,imag=sp.loadtxt(filename,unpack=True)
    cavityRead[iRead,:] = real[:]+1j*imag[:]

  return cavityWrite,cavityMemo,cavityRead
Пример #3
0
def compare_mixed_files(file1,file2,tol=1e-8,delimiter="\t"):
    '''
    Given two files, compare the contents, including numbers up to absolute tolerance, tol
    Returns: val,msg 
    where val is True/False (true means files to compare to each other) and a msg for the failure.
    '''
    dat1=sp.loadtxt(file1,dtype='str',delimiter=delimiter,comments=None)
    dat2=sp.loadtxt(file2,dtype='str',delimiter=delimiter,comments=None)

    ncol1=dat1[0].size
    ncol2=dat2[0].size

    if ncol1!=ncol2:         
        return False,"num columns do not match up"

    try:
        r_count = dat1.shape[0]
        c_count = dat1.shape[1]
    except:
        #file contains just a single column.
        return sp.all(dat1==dat2), "single column result doesn't match exactly ('{0}')".format(file1)

    for r in xrange(r_count):
        for c in xrange(c_count):
            val1 = dat1[r,c]
            val2 = dat2[r,c]
            if val1!=val2:
                try:
                    f1 = float(val1)
                    f2 = float(val2)
                except:
                    return False, "Values do not match up (file='{0}', '{1}' =?= '{2}')".format(file1, val1, val2)
                if abs(f1-f2) > tol:
                    return False, "Values too different (file='{0}', '{1}' =?= '{2}')".format(file1, val1, val2)
    return True, "files are comparable within abs tolerance=%e" % tol
Пример #4
0
def time_storage (**cfg):
  nStore = int(cfg['MEFourier']['{storage_harmonic}'])
  nWriteTime  = int(cfg['OCTime']['{write_timecnt}'])
  nReadTime   = int(cfg['OCTime']['{read_timecnt}'])
  nStoreTime  = int(cfg['METime']['{storage_timecnt}'])
  omega_c     = float(cfg['NVSETUP']['{omega_c}'])

  name_readwrite = getNameReadWrite(**cfg)
  prefix         = cfg['FILES']['{prefix}']
  filename       = name_readwrite+"harmonic{0:0>4}_cavityMode_".format(0)
  postfix       =cfg['FILES']['{postfix}']

  print (" read data : storage and read time")

  ### reading <down> cavity-amplitudes ###################################################
  timeStore,__,__ = sp.loadtxt(prefix+filename+"reg2_store_down"+postfix).T
  timeRead ,__,__ = sp.loadtxt(prefix+filename+"reg3_read_stored_down"+postfix).T

  time=functionaltimes_readwrite (**cfg)
  time['store']=timeStore
  time['read'] =timeRead 


  time['ti'] =timeRead[int(time['idx_ti'])-1]
  time['tf'] =timeRead[int(time['idx_tf'])-1]

  return time
Пример #5
0
def get_average_column(path, column=0):
    """
    Get the index-based average column for a series of results files.

    Args:
        path(str): the path containing the results files.

    Kwargs:
        column (int): the column index in a results file.

    Returns:
        A numpy.ndarray containing the average values for the specified
        column-index of a series of results files.

    """
    files = [f for f in listdir(path) if isfile(join(path, f))
             and f.endswith(".txt")]

    col_seq = column,

    sum_col = loadtxt(join(path, files[0]), usecols=col_seq, unpack=True)

    for file in files[1:]:
        sum_col = sum_col + loadtxt(join(path, file), usecols=col_seq,
                                    unpack=True)

    return sum_col / len(files)
Пример #6
0
    def convert_g012(self,hdf,g012_file,chrom,start,end):
        """convert g012 file to LIMIX hdf5
        hdf: handle for hdf5 file (target)
        g012_file: filename of g012 file
        chrom: select chromosome for conversion
        start: select start position for conversion
        end:  select end position for conversion
        """
        if ((start is not None) or (end is not None) or (chrom is not None)):
            print "cannot handle start/stop/chrom boundaries for g012 file"
            return
        #store
        if 'genotype' in hdf.keys():
            del(hdf['genotype'])
        genotype = hdf.create_group('genotype')
        col_header = genotype.create_group('col_header')
        row_header = genotype.create_group('row_header')
        #load position and meta information
        indv_file = g012_file + '.indv'
        pos_file  = g012_file + '.pos'
        sample_ID = sp.loadtxt(indv_file,dtype='str')
        pos  = sp.loadtxt(pos_file,dtype='str')
        chrom = pos[:,0]
        pos   = sp.array(pos[:,1],dtype='int')

        row_header.create_dataset(name='sample_ID',data=sample_ID)
        col_header.create_dataset(name='chrom',data=chrom)
        col_header.create_dataset(name='pos',data=pos)
        M = sp.loadtxt(g012_file,dtype='uint8')
        snps = M[:,1::]
        genotype.create_dataset(name='matrix',data=snps,chunks=(snps.shape[0],min(10000,snps.shape[1])),compression='gzip')
        pass
Пример #7
0
 def setUp(self):
     sp.random.seed(0)
     self.Y = sp.loadtxt(os.path.join(base_folder, 'Y.txt')) 
     self.XX = sp.loadtxt(os.path.join(base_folder, 'XX.txt')) 
     self.Xr = sp.loadtxt(os.path.join(base_folder, 'Xr.txt')) 
     self.N,self.P = self.Y.shape
     self.write = False 
Пример #8
0
 def setUp(self):
     SP.random.seed(0)
     self.Y = SP.loadtxt('./data/Y.txt') 
     self.XX = SP.loadtxt('./data/XX.txt') 
     self.Xr = SP.loadtxt('./data/Xr.txt') 
     self.N,self.P = self.Y.shape
     self.write = False 
Пример #9
0
def main(argv):
    import scipy
    from sklearn import metrics
    from sklearn.multiclass import OneVsOneClassifier
    from sklearn.naive_bayes import GaussianNB
    from sklearn.cross_validation import cross_val_score
    from sklearn.svm import SVC
    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.tree import DecisionTreeClassifier
    from sklearn import preprocessing
    import similarity
    
    class ScaledSVC(SVC):
        def _scale(self, data):
            return preprocessing.scale(data)
        def fit(self, X, Y):
            return super(ScaledSVC, self).fit(self._scale(X), Y)
        def predict(self, X):
            return super(ScaledSVC, self).predict(self._scale(X))

    data, labels = scipy.loadtxt(argv[1]), scipy.loadtxt(argv[2])
    if len(argv) > 3:
        features = np.array([int(s) for s in argv[3].split(',')])
        data = data[:, features]
        
    def ovo(model, adj_strat):
        return OneVsOneClassifier(BinaryTiloClassifier(model, adj_strat))

    classifiers = [
        ('TILO/PRC/Gaussian',
         ovo(PinchRatioCutStrategy(),
             similarity.Gaussian())),
        ("TILO/Nearest/Gaussian",
         ovo(NearestCutStrategy(),
             similarity.Gaussian())),
        ("TILO/PRC/KNN",
         ovo(PinchRatioCutStrategy(),
             similarity.KNN())),
        ("TILO/Nearest/KNN",
         ovo(NearestCutStrategy(),
             similarity.KNN())),
        ("SVC", ScaledSVC()),
        ("Gaussian Naive Bayes", GaussianNB()),
        ("K Neighbors", KNeighborsClassifier()),
        ("Decision Tree", DecisionTreeClassifier())]
    format_str = '{:<30} {} {} {}'
    print '{:<30} {:<10}         RAND   Accuracy'.format('method', 'accuracy')
    for name, c in classifiers:
        scores = cross_val_score(c, data, labels, cv=5)
        #scores = np.array([1., 1.])
        model = c.fit(data, labels)
        guesses = model.predict(data)
        acc = metrics.zero_one_score(guesses, labels)
        rand = metrics.adjusted_rand_score(guesses, labels)
        print '{:<30} {:.4f} +/- {:.4f} {: .4f} {:.4f}'.format(name, scores.mean(),
                                                               scores.std() / 2,
                                                               rand, acc)
Пример #10
0
def calc_loss_deagg_suburb(bval_path_file, total_building_loss_path_file, site_db_path_file, file_out):
    """ Given EQRM ouput data, produce a csv file showing loss per suburb

    The produced csv file shows total building loss, total building
    value and loss as a percentage.  All of this is shown per suburb.

    bval_path_file - location and name of building value file produced by EQRM
    total_building_loss_path_file - location and name of the total building
      loss file
    site_db_path_file - location and name of the site database file

    Note: This can be generalised pretty easily, to get results
          deaggregated on other columns of the site_db
    """
    aggregate_on = ["SUBURB"]

    # Load all of the files.
    site = csv_to_arrays(site_db_path_file, **attribute_conversions)
    # print "site", site
    bvals = loadtxt(bval_path_file, dtype=scipy.float64, delimiter=",", skiprows=0)
    # print "bvals", bvals
    # print "len(bvals", len(bvals)

    total_building_loss = loadtxt(total_building_loss_path_file, dtype=scipy.float64, delimiter=" ", skiprows=1)
    # print "total_building_loss", total_building_loss
    # print "total_building_loss shape", total_building_loss.shape
    site_count = len(site["BID"])
    assert site_count == len(bvals)
    assert site_count == total_building_loss.shape[1]
    # For aggregates
    # key is the unique AGGREGATE_ON combination .eg ('Hughes', 2605,...)
    # Values are a list of indices where the combinations are repeated in site
    aggregates = {}
    for i in range(site_count):
        assert site["BID"][i] == int(total_building_loss[0, i])
        marker = []
        for name in aggregate_on:
            marker.append(site[name][i])
        marker = tuple(marker)
        aggregates.setdefault(marker, []).append(i)
    # print "aggregates", aggregates

    handle = csv.writer(open(file_out, "w"), lineterminator="\n")

    handle.writerow(["percent losses (building and content) by suburb"])
    handle.writerow(["suburb", "loss", "value", "percent loss"])
    handle.writerow(["", " ($ millions)", " ($ millions)", ""])
    keys = aggregates.keys()
    keys.sort()
    for key in keys:
        sum_loss = 0
        sum_bval = 0
        for row in aggregates[key]:
            sum_loss += total_building_loss[1][row]
            sum_bval += bvals[row]
        handle.writerow([key[0], sum_loss / 1000000.0, sum_bval / 1000000.0, sum_loss / sum_bval * 100.0])
Пример #11
0
def plotSummary(mctraj_filename, ratespec_filename, nskip=0):
    """Read in the MC trajectory data and make a plot of it.
    Skip the first nskip points (as these may be far from the mean)"""

    data = scipy.loadtxt(mctraj_filename)  # step     w       sigma   tau     neglogP
    ratespec_data = scipy.loadtxt(ratespec_filename)

    figure()

    # plot the lambda trajectory
    subplot(2,2,1)
    plot(data[nskip:,0], data[nskip:,1])
    xlabel('accepted steps')
    ylabel('$\lambda$')
    #title(mctraj_filename)

    # try a contour plot of sigma and tau
    subplot(2,2,2)
    myhist, myextent = histBin( data[nskip:,2], data[nskip:,3], 20)
    # convert to log scale
    myhist = np.log(np.array(myhist) + 1.)
    #contour(myhist, extent = myextent, interpolation = 'nearest')
    contourf(myhist, extent = myextent, interpolation = 'nearest')

    # plot mean +/- std spectrum
    ax = subplot(2,2,3)
    Timescales = ratespec_data[:,0]
    maxLikA = ratespec_data[:,1]
    meanA = ratespec_data[:,2]
    stdA = ratespec_data[:,3]
    ci_5pc = ratespec_data[:,4]
    ci_95pc = ratespec_data[:,5]
    #matplotlib.pyplot.errorbar(Timescales, meanA, yerr=stdA)
    PlotStd = False
    plot(Timescales, meanA, 'k-', linewidth=2)
    hold(True)
    if PlotStd:
        plot(Timescales, meanA+stdA, 'k-', linewidth=1)
        hold(True)
        plot(Timescales, meanA-stdA, 'k-', linewidth=1)
    else:
        plot(Timescales, ci_5pc, 'k-', linewidth=1)
        hold(True)
        plot(Timescales, ci_95pc, 'k-', linewidth=1)
    ax.set_xscale('log')
    xlabel('timescale (s)')

    # plot mean +/- std spectrum
    subplot(2,2,4)
    wcounts, wbins = np.histogram(data[nskip:,1], bins=30)
    plot(wbins[0:-1], wcounts, linestyle='steps', linewidth=2)
    xlabel('$\lambda$')

    show()
Пример #12
0
def load_dataset(path):
  sortedfilesbyglob = lambda x: sorted(glob.glob(os.path.join(path, '%s*' % x)))
  inptfiles = sortedfilesbyglob('input')
  targetfiles = sortedfilesbyglob('target')

  data = []
  for infn, targetfn in itertools.izip(inptfiles, targetfiles):
    inpt = scipy.loadtxt(infn)
    target = scipy.loadtxt(targetfn)
    target.shape = scipy.size(target), 1
    data.append((inpt, target))
  return data
Пример #13
0
def read_CavityMemory (**cfgFiles):
  filename  = cfgFiles['{prefix}']+cfgFiles['{name_readwrite}']+ \
              cfgFiles['{name_optimized}']+cfgFiles['{name_cavity}']

  print ("### read initial value for cavity up")
  cavity      = sp.loadtxt(filename+"up"+cfgFiles['{postfix}']    )
  cavity_up   = cavity[0] + 1j*cavity[1]

  print ("### read initial value for cavity down")
  cavity      = sp.loadtxt(filename+"down"+cfgFiles['{postfix}']    )
  cavity_down = cavity[0] + 1j*cavity[1]

  return cavity_down, cavity_up
Пример #14
0
def compare_files(file1,file2,tol=1e-8,delimiter="\t"):
    '''
    Given two files, compare the contents, including numbers up to absolute tolerance, tol
    Returns: val,msg 
    where val is True/False (true means files to compare to each other) and a msg for the failure.
    '''
    dat1=sp.loadtxt(file1,dtype='str',delimiter=delimiter,comments=None)
    dat2=sp.loadtxt(file2,dtype='str',delimiter=delimiter,comments=None)

    ncol1=dat1[0].size
    ncol2=dat2[0].size

    if ncol1!=ncol2:         
        return False,"num columns do not match up"

    try:
        head1=dat1[0,:]
        head2=dat2[0,:]
    except:
        #file contains just a single column.
        return sp.all(dat1==dat2), "single column result doesn't match exactly ('{0}')".format(file1)

    #logging.warn("DO headers match up? (file='{0}', '{1}' =?= '{2}')".format(file1, head1,head2))
    if not sp.all(head1==head2):         
        return False, "headers do not match up (file='{0}', '{1}' =?= '{2}')".format(file1, head1,head2)
        
    for c in range(ncol1):
        checked=False
        col1=dat1[1:,c]
        col2=dat2[1:,c]        
        try:
            #if it is numeric
            col1=sp.array(col1,dtype='float64')
            col2=sp.array(col2,dtype='float64')                    
        except Exception:
            # if it is a string
            pass
            if not sp.all(col1==col2):     
                return False, "string column %s does not match" % head1[c]
            checked=True

        #if it is numeric
        if not checked:
            absdiff=sp.absolute(col1-col2)
            if sp.any(absdiff>tol):
                try:                
                    return False, "numeric column %s does diff of %e not match within tolerance %e" % (head1[c],max(absdiff),  tol)
                except:
                    return False, "Error trying to print error message while comparing '{0}' and '{1}'".format(file1,file2)
        
    return True, "files are comparable within abs tolerance=%e" % tol
Пример #15
0
def plotmonetvspg():
    x1=s.linspace(0,22,22,endpoint=False)
    y1=s.loadtxt('average-monet.log')
    y2=s.loadtxt('average-pg.log')
    y3=s.loadtxt('result-mysql.log')
    p1=py.bar(x1,y1,width=0.35)
    p2=py.bar(x1+0.4,y2,width=0.4,color='green')
    p3=py.bar(x1+0.8,y3,width=0.4,color='magenta')
    py.xlabel('queries')
    py.xlim(0,22)
    py.ylabel('reponse time in seconds')
    #py.xticks((p1,p2),('m','p'))
    py.legend((p1,p2,p3),('monetdb','postgresql','mysql'),loc='upper left')
    py.title('TPC-H benchmark with Postgresql and MonetDB')
    py.savefig('monetvspg_mysql.jpg')
def main():
    # Do not modify
    start = time.time()

    parser = argparse.ArgumentParser(description='Classify unknown point with kNN.')
    parser.add_argument('filename', type=str, help='root name of file with data (vectors, categories, train-test split)')
    parser.add_argument('k', type=int, help='k in kNN')

    args = parser.parse_args()

    points = scipy.loadtxt('cluster_input/' + args.filename+'.vecs')
    cats = scipy.loadtxt('cluster_input/' + args.filename+'.cats', dtype=int)
    traintestsplit = scipy.loadtxt('cluster_input/' + args.filename+'.ttsplit')

    # use ttsplit indices to separate out train and test data
    trainpoints = points[traintestsplit==0]
    traincats = cats[traintestsplit==0]
    testpoints = points[traintestsplit==1]
    testcats = cats[traintestsplit==1]

    # run knn classifier
    predictions = knn(trainpoints, traincats, testpoints, args.k)

    # write actual category, predict category, and text of test points, and compute accuracy
    o = codecs.open(args.filename+'.predictions', 'w', 'utf8')
    o.write('ACTUAL,PREDICTED,CORRECT?,TEXT\n')
    textfile = codecs.open(args.filename+'.txt', 'r', 'utf8')
    testindex = 0
    numcorrect = 0.
    for i in traintestsplit:
        line = textfile.readline()
        if i==1:
            o.write(str(testcats[testindex]))
            o.write(',')
            o.write(str(predictions[testindex]))
            o.write(',')
            if testcats[testindex] == predictions[testindex]:
                numcorrect += 1
                o.write('CORRECT,')
            else:
                o.write('WRONG,')
            o.write(line)
            testindex+=1
    print 'Stored predictions in', args.filename+'.predictions', 'for test points'
    acc = numcorrect*100/testindex
    print 'Accuracy: {0:.2f}%'.format(acc)

    print time.time()-start, 'seconds'
Пример #17
0
def read_a_cest_profile(filename, parameters):
    """Reads in the fuda file and spit out the intensities"""

    data = sc.loadtxt(filename, dtype=[('b1_offset', '<f8'), ('intensity', '<f8'), ('intensity_err', '<f8')])

    uncertainty = estimate_uncertainty(data)
    #    data = find_subset(data)

    data_points = []

    intensity_ref = 1.0

    for b1_offset, intensity_val, intensity_err in data:
        if abs(b1_offset) >= 10000.0:
            intensity_ref = intensity_val

    parameters['intensity_ref'] = intensity_ref

    for b1_offset, intensity_val, intensity_err in data:
        parameters['b1_offset'] = b1_offset

        intensity_err = uncertainty

        exp_type = parameters['experiment_type'].replace('_cest', '')

        data_point = __import__(exp_type + '.data_point', globals(), locals(), ['DataPoint'], -1)

        data_points.append(data_point.DataPoint(intensity_val, intensity_err, parameters))

    return data_points
Пример #18
0
def plot_fourier_spectra(file_name):
    """
    Plot the the Fourier spectra of a CaPso results file.

    Args:
        file_name (str): the text file containing the results.

    """
    # load data file
    index, preys = loadtxt(file_name, usecols=(0, 1), unpack=True)

    N = preys.size

    f = arange(-N / 2, N / 2) / N

    zero_mean_data = preys - mean(preys)

    transform = fft(zero_mean_data)

    transform_scaled = transform / N

    F = abs(fftshift(transform_scaled))

    figure(1, (9, 8))

    _set_font()

    _setup_grid_and_axes(r'$\omega / 2 \pi$', '')

    # plot using a solid line
    plot(f, F, 'k-', antialiased=True, linewidth=1.0)
    x_axis = gca()
    x_axis.set_xlim([0, f.max()])
Пример #19
0
	def loadData(self):
		'''Завантаження даних з файлів'''
		Tabs = ( ('tab_2', 'tab_3','tab_4'),
			('tab_3', 'tab_2','tab_4'))
		uiObj = ('XColumn', 'YColumn', 'MColumn', 'MCheck')
		
		senderName = self.sender().objectName()
		key = senderName[0]
		active = [self.Types[key]] + self.findUi( [key + i for i in uiObj])
		data = []
		XY = sp.zeros((0,2))
		path = self.Path[active[0]]
		if os.path.exists(path):
			try:
				data = sp.loadtxt(path)
				'''
				activeFilt = self.findChilds(QtGui.QLineEdit, FiltersKeys[active[0]])
				filtNames = ''
				
				if activeFilt[0].isEnabled() and activeFilt[1].isEnabled():
					self.filtersDict = self.getFilters(length = self.LENGTH)
					for i in (0,1):
						filtNames = activeFilt[i].text().strip().replace(" ","").upper()
						temp = 1.
						
						if filtNames:
							temp = self.resFilters(filtNames)
							
						self.filtList[active[0]][i] = temp
				else:
					self.filtList[active[0]][:] = [1., 1.]
				print("Filters [X,Y]:",self.filtList[active[0]])
				'''
				xc = active[1].value()
				yc = active[2].value()
				mc = active[3].value()
				if active[4].checkState():
					XY = sp.array( [data[:,xc], data[:,yc] ]).T / sp.array([data[:,mc], data[:,mc]]).T
				else:
					XY = sp.array( [data[:,xc], data[:,yc] ]).T
				XY = XY[XY[:,0] > 0]
				XY = XY[XY[:,1] > 0]
				if getattr(self.ui,senderName[0]+'CutForward').isChecked():
					p = sp.where( XY[:,0] == XY[:,0].max())[0][0]
					print(p)
					XY = XY[:p,:]
				XY = XY[sp.argsort(XY[:,0])]
				'''
				XY[:,0] = XY[:,0]/self.filtList[active[0]][0]
				XY[:,1] = XY[:,1]/self.filtList[active[0]][1]
				'''
				self.updateData(array = Array(XY,Type = active[0]), action = 0)
				tabs = self.findUi(Tabs[active[0]])
				tabs[0].setEnabled(True)
				
				if tabs[1].isEnabled():
					tabs[2].setEnabled(True)
			except (ValueError, IOError, IndexError):
				self.mprint("loadData: readError")
		else:  self.mprint('loadData: pathError')
Пример #20
0
def read_filter_file(path, plot=False, title=None, figsize=None):
    """Read a filter file, optionally plotting the transmission curve.
    
    The file should have 2 header rows and be formatted with the energy (in eV)
    in the first column and the transmission in the second column. The file
    should be whitespace-delimited. This is the format used by
    http://henke.lbl.gov/optical_constants/filter2.html
    
    Returns a :py:class:`scipy.interpolate.InterpolatedUnivariateSpline`
    instance which takes as an argument the energy in keV and returns the
    transmission.
    
    Parameters
    ----------
    path : str
        The path to the filter file.
    plot : bool, optional
        If True, the filter curve will be plotted. Default is False (do not plot
        the filter curve).
    """
    E, T = scipy.loadtxt(path, skiprows=2, unpack=True)
    E = E / 1e3
    if plot:
        f = plt.figure(figsize=figsize)
        a = f.add_subplot(1, 1, 1)
        a.plot(E, T)
        a.set_xlabel("$E$ [keV]")
        a.set_ylabel("transmission, $T$")
        a.set_title(path if title is None else title)
    return scipy.interpolate.InterpolatedUnivariateSpline(E, T, ext='const')
        
Пример #21
0
 def loadStuff(self,fbasename,keys):
     """ util function """ 
     RV = {}
     base = os.path.join(base_folder, 'res_'+fbasename+'_')
     for key in keys: 
         RV[key] = sp.loadtxt(base+key+'.txt')
     return RV
Пример #22
0
def read_a_shift_file(filename, parameters, res_incl=None, res_excl=None):
    """Reads in the fuda file and spit out the intensities"""

    data = sc.loadtxt(filename, dtype=[('resonance_id', 'S10'), ('shift_ppb', 'f8'), ('shift_ppb_err', 'f8')])

    data_points = list()

    exp_type = parameters['experiment_type'].replace('_shift', '')
    data_point = __import__(exp_type + '.data_point', globals(), locals(), ['DataPoint'], -1)

    for resonance_id, shift_ppb, shift_ppb_err in data:

        included = (
            (res_incl is not None and resonance_id in res_incl) or
            (res_excl is not None and resonance_id not in res_excl) or
            (res_incl is None and res_excl is None)
        )

        if not included:
            continue

        parameters['resonance_id'] = resonance_id

        data_points.append(data_point.DataPoint(shift_ppb, shift_ppb_err, parameters))

    return data_points
Пример #23
0
def simple_unsupervised_demo():
    print "Simple PEER application. All default prior values are set explicitly as demonstration."
    y = SP.loadtxt("data/expression.csv",delimiter=",")
    K = 20
    Nmax_iterations = 100
    model = peer.PEER()
    
    # set data and parameters
    model.setNk(K) #number of factor for learning
    model.setPhenoMean(y) # data for inference
    # set priors (these are the default settings of PEER)
    model.setPriorAlpha(0.001,0.1);
    model.setPriorEps(0.1,10.);
    model.setNmax_iterations(Nmax_iterations)
    # perform inference
    model.update()

    #investigate results
    #factors:
    X = model.getX()
    #weights:
    W = model.getW()
    #ARD parameters
    Alpha = model.getAlpha()
    #get corrected dataset:
    Yc = model.getResiduals()

    # plot variance of factors - in this case, we expect a natural elbow where there are 5 active factors, as 5 were simulated
    plot_Alpha(Alpha)
    PL.savefig("demo_simple.pdf")
    print "Plotted factor relevance"
    PL.show()
Пример #24
0
def read_digits(dir='digits'):
    """
    read all example digits return a matrix X, where each row is the
    (flattened) pixels of an example digit and a vector y, where each
    entry gives the digit as an integer
    """
    
    for d in range(10):
        fname = '%s/train.%d' % (dir, d)

        print "reading" , fname

        # read digits from train.d
        Xd = sp.loadtxt(fname, delimiter=',')

        # create vector of labels
        yd = d*sp.ones(Xd.shape[0])

        try:
            # append digits and labels to X and y, respectively
            X = sp.vstack((X, Xd))
            y = sp.concatenate((y, yd))
        except UnboundLocalError:
            # create X and y if they don't exist
            X = Xd
            y = yd
            
    return X, y
Пример #25
0
    def test_networkx_matrix(self):
        print('\n---------- Matrix Test Start -----------\n')

        g = nx.barabasi_albert_graph(30, 2)
        nodes = g.nodes()
        edges = g.edges()
        print(edges)

        mx1 = nx.adjacency_matrix(g)
        fp = tempfile.NamedTemporaryFile()
        file_name = fp.name
        sp.savetxt(file_name, mx1.toarray(), fmt='%d')

        # Load it back to matrix
        mx2 = sp.loadtxt(file_name)
        fp.close()

        g2 = nx.from_numpy_matrix(mx2)
        cyjs_g = util.from_networkx(g2)

        #print(json.dumps(cyjs_g, indent=4))

        self.assertIsNotNone(cyjs_g)
        self.assertIsNotNone(cyjs_g['data'])
        self.assertEqual(len(nodes), len(cyjs_g['elements']['nodes']))
        self.assertEqual(len(edges), len(cyjs_g['elements']['edges']))

        # Make sure all edges are reproduced
        print(set(edges))
        diff = compare_edge_sets(set(edges), cyjs_g['elements']['edges'])
        self.assertEqual(0, len(diff))
Пример #26
0
    def ParseTxtFileColumnsToDataContainers(self,
                                            ColumnList=[]):
        DCs    = DataContainer.DataContainers()
        Header = self.GetFileHandle().readline().strip().split()
        self.Close()

        ColumnIndices = []
        for Entry in ColumnList:
            ColumnIndices.append(Header.index(Entry))
            DCs.DataContainers[Entry] = DataContainer.DataContainer()
            DCs.DataContainers[Entry].SetDataName(Entry)

        FName = None
        if(self.GetboCompressed()):
            FName = self.GetDecomprName()
        else:
            FName = self.GetName()
        ColumIndicesTuple = tuple(ColumnIndices)
        DataArrays = scipy.loadtxt(fname=FName,
                                   dtype=str,
                                   skiprows=1,
                                   usecols=ColumIndicesTuple,
                                   unpack=True)

        for i in range(len(ColumIndicesTuple)):
            HeaderIndex = ColumIndicesTuple[i]
            HeaderEntry = Header[HeaderIndex]
            DCs.DataContainers[HeaderEntry].ReplaceDataArray(DataArrays[i])

        return DCs
Пример #27
0
def timeTest3():
    MATRICE = sp.loadtxt("IOFiles/BigBlockMatrix5000.out")
    TimeReal=[]
    TimeComplex =[]
    ErrReal = []
    ErrComplex =[]
    DIMS = [10,20,40,80,160,320,640,900,1280,1700,2000,2560]
    print "inizio"
    for i in range(0,len(DIMS)):
        M = MATRICE[0:DIMS[i],0:DIMS[i]]
        print DIMS[i]
        I = time.clock()
        Res = sqrtm5(M,10)
        F = time.clock()
        TimeReal.append(F - I)
        ErrReal.append(sp.linalg.norm(Res.dot(Res) - M)/sp.linalg.norm(M))
        I = time.clock()
        Res = sp.linalg.sqrtm(M)
        F = time.clock()
        TimeComplex.append(F - I)
        ErrComplex.append(sp.linalg.norm(Res.dot(Res) - M)/sp.linalg.norm(M))

    np.savetxt(home + workspace + ttest + "__TimeReal"+str(9)+".tt", TimeReal)
    np.savetxt(home + workspace + ttest + "__TimeComplex"+str(9)+".tt", TimeComplex)
    np.savetxt(home + workspace + ttest + "__ErrReal"+str(9)+".tt", ErrReal)
    np.savetxt(home + workspace + ttest + "__ErrComplex"+str(9)+".tt", ErrComplex)
    np.savetxt(home + workspace + ttest + "__DIMS"+str(9)+".tt", DIMS)
Пример #28
0
def load_data(fname, delimiter=','):
    """ return the features x and result y as matrix """
    data = sp.loadtxt(fname, delimiter=delimiter)
    m, n = data.shape
    x = sp.asmatrix(data[:, range(0, n - 1)].reshape(m, n - 1))
    y = sp.asmatrix(data[:, n - 1].reshape(m, 1))
    return x, y
Пример #29
0
 def __init__(self,filename='.\\'):
     """lädt die MAR-Theoriekurven in den Einheiten 2*e*Delta/h gegen
     Delta."""
     #files=glob.glob(filename)
     files = []
     for _file in os.listdir(filename):
         #if _file.endswith(".dat"):
         if True:
             files.append(_file)
     files.sort()
     #print files
     
     if len(files)==0: raise Exception("Keine Dateien mit MAR-Daten gefunden.")
     
     #Ts=[f.split('\\')[-1].split('.')[0] for f in files]
     Ts=[float(f.split('\\')[-1]) for f in files]
     Ts=[0.]+Ts#[float(i)/1e4 for i in Ts]
     #Tfs=[xy.open(f, x=1, y=2) for f in files]
     #print Ts
     Tfs = []
     for f in files:
         _data = scipy.loadtxt(os.path.join(filename,f),unpack=True)
         Tfs.append(_data)
     for i in Tfs:
         #print i
         i=(i[0][:600],i[1][:600])
         #i[1]=
     Tf0=Tfs[0].copy()*0
     Tfs=[Tf0]+Tfs
     #print len(Ts), len(Tfs)
     self.Ts=Ts; self.Tfs=Tfs
def getSizeFactor(fn_anno, data, gid, mode = 'sum', withXYMT = True, filterbyPC = True):
    '''
    input annotation, counts and gene ids
    output sum of protein coding gene levels excluding sex chromosomes and mitochondria genes
    '''
    anno  = sp.loadtxt(fn_anno, delimiter = '\t', dtype = 'string', usecols=[0,2,8])
    anno  = anno[anno[:,1] == 'gene', :]
    if not withXYMT: ### filter xymt
        anno  = anno[anno[:,0] != 'MT',:]
        anno  = anno[anno[:,0] != 'Y',:]
        anno  = anno[anno[:,0] != 'X',:]

    agid   = [x.split(';')[0] for x in anno[:,2]] ### clean gene id's
    agid   = sp.array([x.split(" ")[1].strip('\"') for x in agid])

    if filterbyPC: ### filter protein coding
        gtpe  = [x.split(';')[2] for x in anno[:,2]]
        gtpe  = sp.array([x.split('\"')[1].split('\"')[0] for x in gtpe])
        iPC   = sp.where(gtpe == 'protein_coding')[0]
        agid  = agid[iPC]

    iGn = sp.in1d(gid, agid)
    libsize = sp.sum(data[iGn,:], axis = 0) 
    if mode == 'uq':
         libsize = sp.array([sp.percentile(x[x!=0] ,75) for x in data[iGn,:].T])  * iGn.sum() 

    return libsize
from scipy.fftpack import fft2, ifft2
from scipy import loadtxt, exp, empty, real
from pylab import imshow, plot, show, gray
from numpy.fft import rfft2, irfft2

# Constants
sigma = 25
blurred_photo = loadtxt('../../cpresources/blur.txt', float)
y_dim, x_dim = blurred_photo.shape


def point_spread(x, y):
    return exp(-(x**2 + y**2) / (2 * sigma**2))


# calculate point spread function for each point
point_spread_array = empty([y_dim, x_dim], float)
for i in range(y_dim):
    for j in range(x_dim):
        point_spread_array[i, j] = point_spread( (j + y_dim / 2) % y_dim - y_dim / 2, \
                                                 (i + x_dim / 2) % x_dim - x_dim / 2)

# Fourier transform both
blurred_photo_fourier = rfft2(blurred_photo)
point_spread_fourier = rfft2(point_spread_array)

# divide
unblurred_fourier = empty([y_dim, x_dim // 2 + 1], complex)
epsilon = 10**-4
for i in range(x_dim // 2 + 1):
    for j in range(y_dim):
Пример #32
0
def main(dataset, show=False):
    xdata, n1data, n2data, n3data, n1err, n2err, n3err = loadtxt(
        dataset, unpack=True, usecols=[0, 1, 2, 3, 4, 5, 6], skiprows=1)
    fitxdata = np.linspace(xdata[0], xdata[-1], 500)

    n1err = np.multiply(n1data, n1err)
    n2err = np.multiply(n2data, n2err)
    n3err = np.multiply(n3data, n3err)

    def expand_exponential(x, *p):
        return (p[0] * np.exp(p[1] * x)) + (p[2] * np.exp(p[3] * x)) + p[4]

    def exponential(x, *p):
        return p[0] * np.exp(p[1] * x) + p[2]

    p = [129.716, -.00236, -4.8764]
    exp_p1 = [129, -.00236, 300, -.09, -4]
    exp_p2 = [85, -.0236, -4]

    lamda = -.00236
    scale = 450

    #lambda x, a, c, d, e: expand_exponential(x, a, lamda, c, d, e)

    popt1, pcov1 = curve_fit(expand_exponential,
                             xdata,
                             n1data,
                             p0=exp_p1,
                             sigma=n1err,
                             maxfev=int(3e8))  #expanded exponential
    popt2, pcov2 = curve_fit(exponential,
                             xdata,
                             n2data,
                             p0=exp_p2,
                             sigma=n2err,
                             maxfev=int(3e8))
    popt3, pcov3 = curve_fit(exponential, xdata, n3data, p0=p, sigma=n3err)

    pcov1 = np.absolute(pcov1)
    pcov2 = np.absolute(pcov2)
    pcov3 = np.absolute(pcov3)

    yFit1 = expand_exponential(xdata, *popt1)
    yuFit1 = expand_exponential(xdata, *exp_p1)

    yFit2 = exponential(xdata, *popt2)
    yuFit2 = exponential(xdata, *exp_p2)

    yFit3 = exponential(xdata, *popt3)
    yuFit3 = exponential(xdata, *p)

    chisq1 = np.sum(((yFit1 - n1data) / n1err)**2)
    chisq2 = np.sum(((yFit2 - n2data) / n2err)**2)
    chisq3 = np.sum(((yFit3 - n3data) / n3err)**2)

    ##linear best fit for finding lambda
    '''def lin(x, *p):
		return p[0]*x + p[1]

	def expanded_lin(x, *p):
		return p[0]*x + p[1]*x + p[2]

	p1, _ = curve_fit(lin, xdata, np.log(yFit1/popt1[0]), p0 = [.5, -.04])
	p2, _ = curve_fit(lin, xdata, np.log(yFit2/popt2[0]), p0 = [.5, -.04])
	p3, _ = curve_fit(expanded_lin, xdata, np.log(yFit3/popt3[0]), p0 = [.022, ])

	plt.figure(figsize = (14, 9))
	plt.plot(xdata, np.log(yFit1/popt1[0]), 'o', label = "Al Absorber/31 keV Energy")
	plt.plot(xdata, np.log(yFit2/popt2[0]), 'o', label = "Al Absorber/81 MeV Energy")
	plt.plot(xdata, np.log(yFit3/popt3[0]), 'o', label = "Al Absorber/356 keV Energy")
	plt.plot(fitxdata, lin(fitxdata, *p1), label = "511 keV Energy Fit")
	plt.plot(fitxdata, lin(fitxdata, *p2), label = "1.27 MeV Energy Fit")
	plt.plot(fitxdata, lin(fitxdata, *p3), label = "356 keV Energy Fit")
	plt.annotate(r"$\lambda_1 = %f \, mm^{-1}$" % np.absolute(p1[0]), xy = (xdata[3],lin(xdata[3], *p1)), 
		xytext = (xdata[3]+3,lin(xdata[3], *p1)), arrowprops = {"width":2, "frac":.3, "headwidth":7})
	plt.annotate(r"$\lambda_2 = %f \, mm^{-1}$" % np.absolute(p2[0]), xy = (xdata[3],lin(xdata[3], *p2)), 
		xytext = (xdata[3]+3,lin(xdata[3], *p2)), arrowprops = {"width":2, "frac":.3, "headwidth":7})
	plt.annotate(r"$\lambda_3 = %f \, mm^{-1}$" % np.absolute(p3[0]), xy = (xdata[3],lin(xdata[3], *p3)), 
		xytext = (xdata[3]+3,lin(xdata[3], *p3)), arrowprops = {"width":2, "frac":.3, "headwidth":7})
	plt.title("Fitting to find the Linear Attenuation Coefficient")
	plt.xlabel(r"$Thickness\,(mm)$")
	plt.ylabel(r"$ln(\frac{R}{R_0})$")
	plt.legend(loc = 0)
	plt.savefig("/users/aman/desktop/phys211/gamma cross sections/plots/linearcoeff_Na.pdf")
	plt.show()'''
    ##back to the exponential fit

    yFit1 = expand_exponential(fitxdata, *popt1)
    yuFit1 = expand_exponential(fitxdata, *exp_p1)

    yFit2 = exponential(fitxdata, *popt2)
    yuFit2 = exponential(fitxdata, *exp_p2)

    yFit3 = exponential(fitxdata, *popt3)
    yuFit3 = exponential(fitxdata, *p)

    fig = plt.figure(figsize=(15, 10))
    fig.add_subplot(131)
    plt.errorbar(xdata, n1data, n1err, fmt='o', label="Raw Data")
    plt.plot(fitxdata,
             yFit1,
             linewidth=2,
             alpha=.9,
             label="Falling Exponential Fit")
    #plt.plot(fitxdata, yuFit1,
    #	linewidth = 2, alpha = .9, label = "guesses")
    plt.text(20, 125,  r"$R(x) = R_0e^{-\lambda x} + R_0'e^{-\tau x} + C$"\
         "\n"\
        r"$R_0 = %.0f \pm %.1g \, counts/s$"\
         "\n"\
        r"$\lambda = %.4f \pm %.1g \, mm^{-1}$"\
         "\n"\
        r"$R_0' = %.0f \pm %.1g \, counts/s$"\
         "\n"\
        r"$\tau = %.3f \pm %.1g \, mm^{-1}$"\
         "\n"\
        r"$C = %.0f \pm %.1g \, counts/s$"\
         "\n"\
        r"$\chi^2 = %.2f $"\
         "\n"\
        r"$\frac{\chi^2}{\nu} = %.2f $"\
         "\n"\
        r"$\nu = %.0f$"\
        % (popt1[0], np.sqrt(pcov1[0,0]), np.absolute(popt1[1]), np.sqrt(pcov1[1,1]),
         popt1[2], np.sqrt(pcov1[2,2]), np.absolute(popt1[3]), np.sqrt(pcov1[3,3]), popt1[4], np.sqrt(pcov1[4,4]),
         chisq1, chisq1/(len(xdata) - len(exp_p1)), len(xdata)-len(exp_p1)))
    plt.xlabel("Al Thickness")
    plt.ylabel("Countrate (counts/s)")
    plt.title("31 keV Transmission Intensity")
    plt.legend()

    fig.add_subplot(132)
    plt.errorbar(xdata, n2data, n2err, fmt='o', label="Raw Data")
    plt.plot(fitxdata,
             yFit2,
             linewidth=2,
             alpha=.9,
             label="Falling Exponential Fit")
    #plt.plot(fitxdata, yuFit2,
    #	linewidth = 2, alpha = .9, label = "guesses")
    plt.text(20, 70,  r"$R(x) = R_0e^{-\lambda x} + C$"\
         "\n"\
        r"$R_0 = %.0f \pm %.1g \, counts/s$"\
         "\n"\
        r"$\lambda = %.4f \pm %.1g \, mm^{-1}$"\
         "\n"\
        r"$C = %.0f \pm %.1g \, counts/s$"\
         "\n"\
        r"$\chi^2 = %.2f $"\
         "\n"\
        r"$\frac{\chi^2}{\nu} = %.2f $"\
         "\n"\
        r"$\nu = %.0f$"\
        % (popt2[0], np.sqrt(pcov2[0,0]), np.absolute(popt2[1]), np.sqrt(pcov2[1,1]),
         popt2[2], np.sqrt(pcov2[2,2]), chisq2, chisq2/(len(xdata) - len(exp_p2)), len(xdata)-len(exp_p2)))

    plt.xlabel("Al Thickness (mm)")
    plt.ylabel("Countrate (counts/s)")
    plt.title("81 keV Transmission Intensity")
    plt.legend()

    fig.add_subplot(133)
    plt.errorbar(xdata, n3data, n3err, fmt='o', label="Raw Data")
    plt.plot(fitxdata,
             yFit3,
             linewidth=2,
             alpha=.9,
             label="Falling Exponential Fit")
    plt.text(25, 100,  r"$R(x) = R_0e^{-\lambda x} + C$"\
         "\n"\
        r"$R_0 = %.0f \pm %.1g \, counts/s$"\
         "\n"\
        r"$\lambda = %.4f \pm %.1g \, mm^{-1}$"\
         "\n"\
        r"$C = %.0f \pm %.1g \, counts/s$"\
         "\n"\
        r"$\chi^2 = %.2f $"\
         "\n"\
        r"$\frac{\chi^2}{\nu} = %.2f $"\
         "\n"\
        r"$\nu = %.0f$"\
        % (popt3[0], np.sqrt(pcov3[0,0]), np.absolute(popt3[1]), np.sqrt(pcov3[1,1]),
         popt3[2], np.sqrt(pcov3[2,2]), chisq3, chisq3/(len(xdata) - len(p)), len(xdata)-len(p)))

    plt.xlabel("Al Thickness (mm)")
    plt.ylabel("Countrate (counts/s)")
    plt.title("356 keV Transmission Intensity")
    plt.legend()

    plt.savefig(
        "/users/aman/desktop/phys211/gamma cross sections/plots/%s_Ba.pdf" %
        dataset[0:dataset.find('.')])
    if show:
        plt.show()
    "Update Time", "Solution Time", "Assembly Time", "Repartitioning Time"
]

fignum = 1
for g, tit in zip(globs, titles):

    plt.figure(fignum).set_size_inches([12, 5], forward=True)
    i = 1
    for d in dirs:
        print g

        files = glob.glob(d + g)
        files.sort()

        for f in files:
            data = sp.loadtxt(f)
            if len(data.shape) == 2:
                t = data[:, 0]
                u = data[:, 1]
                if i == 1:
                    ax1 = plt.subplot(2, 2, i)
                if i == 3:
                    plt.subplot(2, 2, i, sharex=ax1, sharey=ax1)
                plt.plot(t, u)
                plt.ylabel("Time (s)")
                if i == 1:
                    plt.title("Incremental", size="medium")
                if i == 3:
                    plt.xlabel("Analysis Time, [s]")
                if i == 1:
                    ax2 = plt.subplot(2, 2, i + 1)
Пример #34
0
###     - overrep_seq_flag
###     - seqwise_gc_cont_flag
###     - basewise_n_cont_flag
###     - seqwise_qual_flag
### we flag a sample as low quality if the degradation score is larger than Q3 + 1.5xIQR
### we flag a sample as low quality if the GC content is more then 1.5xIQR below Q1 or above Q3
### we flag a sample as low quality if the number of reads is more then 1.5xIQR below Q1 or above Q3
### we exclude a sample if it is flagged for at least three low quality criteria
### we exclude a sample if the degradation score is larger than Q3 + 3xIQR
### we exclude a sample if the GC content is more then 3xIQR below Q1 or above Q3
### we exclude a sample if the number of reads is more then 3xIQR below Q1 or above Q3
### we exclude a sample if it has been sequenced on a different machine than Hiseq 2000


### load and process FASTQC data
data_fq = sp.loadtxt(FASTQC, dtype='str', delimiter='\t')
idx_aid = sp.where(data_fq[0, :] == 'analysis_id')[0][0] 
idx_bqf = sp.where(data_fq[0, :] == 'basewise_qual_flag')[0][0] 
idx_osf = sp.where(data_fq[0, :] == 'overrep_seq_flag')[0][0] 
idx_sgf = sp.where(data_fq[0, :] == 'seqwise_gc_cont_flag')[0][0] 
idx_bnf = sp.where(data_fq[0, :] == 'basewise_n_cont_flag')[0][0]
idx_sqf = sp.where(data_fq[0, :] == 'seqwise_qual_flag')[0][0]
idx_rc = sp.where(data_fq[0, :] == 'readcount')[0][0]
idx_gc = sp.where(data_fq[0, :] == 'seqwise_gc_mean')[0][0]

data_fq = data_fq[1:, :]
data_fq_ids = sp.array([x.split('/')[-1] for x in data_fq[:, idx_aid]])
s_idx = sp.argsort(data_fq_ids)
data_fq_ids = data_fq_ids[s_idx]
data_fq = data_fq[s_idx, :]
Пример #35
0
'''

if len(sys.argv[1:]) < 2:
	sys.stderr.write('ERROR: missing parameters\n')
	usage()
	sys.exit(1)

csv,covout=sys.argv[1:]

if os.path.isfile(csv) != True:
	sys.stderr.write('ERROR: '+csv+' not found\n')
	sys.exit(1)


#load the csv file

csvin = sp.loadtxt(csv, delimiter='\t', dtype='S100')

#open out file and write matrix of covariates and sample IDs
csvout = h5py.File(covout, 'w')
dset = csvout.create_dataset('covariates', csvin[:,1:].shape, dtype='float64')
dset[...]=csvin[:,1:].astype('float64')

dset2 = csvout.create_dataset('row_header/sample_ID', (csvin[:].shape[0],), dtype='S100')
dset2[...]=csvin[:,0][:]

csvout.close()



Пример #36
0
import os
import scipy as sp

__all__ = ['vsh', 'vsh_clim']

oceanval = os.environ.get(
    'OCEANVAL', '/discover/nobackup/projects/gmao/oceanval/verification')
path = oceanval + '/PIOMAS/RAW'
fmt = '{path}/piomass.dat'

xx = sp.ma.masked_array(sp.loadtxt(fmt.format(path=path))[:, 1:])
xx[xx == -1.0] = sp.ma.masked

vnh = xx.flatten()
vnh_clim = xx.mean(axis=0)
    files = sys.argv[1]
data_size = 16
start_line = 0
power = [0, 3, 6, 10]
power = [10]
for j in power:
    titles = []
    x = np.empty([0, 10])
    y = np.empty([0, 10])
    for i in files:
        subs = 'rate_for_P' + str(j)
        if subs in i:
            print(i)
            l = scipy.loadtxt(i,
                              comments="#",
                              skiprows=0,
                              unpack=False,
                              dtype=str)
            titl = i[0:10] + "_=_" + i[10:10 + len(str(j))] + "_dB_" + i[
                10 + len(str(j)):16 +
                len(str(j))] + "_=_" + i[16 + len(str(j))] + "_rx_ant_=_2"
            titl = titl.replace("_", " ")
            titles += list(titl.split(";"))
            print(titles)
            bits = (np.arange(1, 15) * 2).astype(np.float)
            bound = l[0, :].astype(np.float)
            actual_rate = l[1, :].astype(np.float)
            x = np.append(x, bound)
            y = np.append(y, actual_rate)
            xrange = 14
            x = x.reshape(int(len(x) / xrange), xrange)
Пример #38
0
    # import genotype file
    bedfile = "data_structlmm/chrom22_subsample20_maf0.10"
    (bim, fam, G) = read_plink(bedfile)

    # subsample snps
    Isnp = gs.is_in(bim, ("22", 17500000, 18000000))
    G, bim = gs.snp_query(G, bim, Isnp)

    # load phenotype file
    phenofile = "data_structlmm/expr.csv"
    dfp = pd.read_csv(phenofile, index_col=0)
    pheno = gaussianize(dfp.loc["gene1"].values[:, None])

    # load environment file and normalize
    envfile = "data_structlmm/env.txt"
    E = sp.loadtxt(envfile)
    E = norm_env_matrix(E)

    # mean as fixed effect
    covs = sp.ones((E.shape[0], 1))

    # run analysis with struct lmm
    snp_preproc = {"max_miss": 0.01, "min_maf": 0.02}
    res = run_structlmm(G,
                        bim,
                        pheno,
                        E,
                        covs=covs,
                        batch_size=100,
                        snp_preproc=snp_preproc)
Пример #39
0
    completed = subprocess.Popen(["FlexPDE7n", "-S", FlexFileName])
    time.sleep(0.1)
    processes.insert(0, completed)
    print('Started flow =', round(Flow, 2), "kg/s")

for p in processes:
    p.wait()

#input("Press enter to continue once processes are done")
time.sleep(2)

for Flow in FlowRange:
    try:
        with open("D:\\EngPhys\\2CM4\\Week1\\test1bOwenBruce2CM4_output\\" +
                  str(Flow) + "_traj.txt") as f:
            data = sp.loadtxt(f, skiprows=8)
        t = data[:, 0]
        xd = data[:, 1]
        yd = data[:, 2]
        plt.plot(xd, yd)
        xfinal = np.append(xfinal, [xd[-1]])
        tfinal = np.append(tfinal, [t[-1]])
        if (xd[-1] + 3000) > maxx:
            maxx = xd[-1] + 3000
            maxFlow = Flow
        print(
            'Flow rate of {Flow} kg/s lands at xd = {xdfinal}m after {time} seconds'
            .format(Flow=round(Flow, 2), xdfinal=xd[-1], time=t[-1]))
    except:
        print("missing: ", Flow)
Пример #40
0
stdang = np.zeros((nsnap - nskip))
bins = np.linspace(0, 180, 45)
dbin = bins[1] - bins[0]

w = 0
for J in JList:
    #for v in vList:
    #plt.figure(figsize=(10,7),linewidth=2.0)
    #print J
    print v
    #/home/silke/Documents/CurrentProjects/Rastko/nematic/data/R_16.0_long/defects_J_0.01_R_16_long.dat
    #infile= basefolder +'/R_' + R+'.0_long/defects_J_' + J + '_R_' + R +'_long.dat'
    infile = basefolder + 'R_16.0_J' + J + '/defects_J_' + J + 'v0_' + v + '_long.dat'
    print infile
    # header='theta rho vel energy pressure alpha alpha_v'
    datamat = (sp.loadtxt(infile, unpack=True)[:, (nskip + 1):]).T
    ndefect = datamat[:, 0]

    # This includes potential zeros. Be very careful in the subsequent analysis
    # Currently divides by the absolute value of the first element. Which should be fine, but a bit imprecise ...
    defectmat[:, 0, :] = datamat[:, 1:4] / lin.norm(datamat[0, 1:4])
    defectmat[:, 1, :] = datamat[:, 4:7] / lin.norm(datamat[0, 4:7])
    defectmat[:, 2, :] = datamat[:, 7:10] / lin.norm(datamat[0, 7:10])
    defectmat[:, 3, :] = datamat[:, 10:14] / lin.norm(datamat[0, 10:14])

    # Defined for two defects
    angles[:, 0] = np.degrees(
        np.arccos(np.sum(defectmat[:, 0, :] * defectmat[:, 1, :], axis=1)))
    # Defined for three defects
    angles[:, 1] = np.degrees(
        np.arccos(np.sum(defectmat[:, 0, :] * defectmat[:, 2, :], axis=1)))
#%%
# read in all the data files in rawdata directory using a for loop
# columns: subject, stimulus, pairing, accuracy, median RT
#
os.chdir('/Users/joelleg/Desktop/ps2-girgisjo-V2/rawdata')  #change directory


data = np.empty((0,5))   #create empty np array
print(data)
 

#for loop ---first print up to print(tmp), then run whole for loop
for r in testingrooms:
    print('/Users/joelleg/Desktop/ps2-girgisjo-V2/rawdata/testroom' + r ) #test code
    tmp = sp.loadtxt('/Users/joelleg/Desktop/ps2-girgisjo-V2/rawdata/testroom' + r + '.csv',delimiter=',')  #read in files in rawdata folder
    print(tmp)   #temp value
    data = np.vstack([data,tmp])  #stack/combine data from the 3 testrooms
    print(data)



#Assign variables for each column
sbj=data[:,0]     # select 1st column
print(sbj)
print(sbj.dtype)
sbj = sbj.astype('int32')   #convert to string

stim=data[:,1]  #select 2nd column
print(stim)
stim = stim.astype('int32')   #convert to string
Пример #42
0
# Profiles
# Set column to plot
usecolumn=4

profList=[r'$\theta$',r'$\rho$',r'$\sqrt{\langle v^2 \rangle}/v_0$','energy','pressure',r'$\Sigma_{\theta \theta}$',r'$\Sigma_{\theta \phi}$',r'$\Sigma_{\phi \theta}$',r'$\Sigma_{\phi \phi}$',r'$\alpha$',r'$\alpha_v$']
profName=['theta','rho','vrms','energy','pressure','stt','stp','spt','spp','alpha','alpha_v']

for i in range(len(vList)):
	plt.figure(figsize=(10,7),linewidth=2.0)
	for j in range(len(RList)):
		print vList[i],RList[j]
		ax=plt.gca()
		outfile=basedir+'/profiles_v0' + vList[i] + '_R' + RList[j] + '.dat'
		outfile2=basedir + '/axis_v0'  + vList[i] + '_R' + RList[j] + '.dat'
		# header='theta rho vel energy pressure alpha alpha_v'
		profiles=sp.loadtxt(outfile, unpack=True)[:,:] 
		isdata=[index for index,value in enumerate(profiles[1,:]) if (value >0)]
		# Corrected
		## Forgot the normalization of rho by the angle band width
		#if usecolumn==1:
			#normz=2*np.pi*rval*abs(np.cos(profiles[0,:]))
			#profiles[1,isdata]=profiles[1,isdata]/normz[isdata]
			#profiles[1,:]/=np.mean(profiles[1,:])
		if usecolumn==2:
			plt.plot(profiles[0,isdata],profiles[usecolumn,isdata]/float(vList[i]),color=testmap(j), linestyle='solid',label=RList[j])
		else:
			plt.plot(profiles[0,isdata],profiles[usecolumn,isdata],color=testmap(j), linestyle='solid',label=RList[j])
	#if usecolumn<=8:
		#plt.ylim(0,1.25*profiles[usecolumn,nbin/2])
	if usecolumn==9:
		plt.plot(2*profiles[0,isdata],0.45*2*profiles[0,isdata],'k--')
import sys
import os
import scipy as sp

sys.path.append('..')
from paths import BASEDIR_AS
basedir = os.path.join(BASEDIR_AS, 'alternative_splicing')

full_data = sp.loadtxt(os.path.join(basedir, 'exonization_candidates_C2.txt'), dtype='str', delimiter='\t')
snv_data = sp.loadtxt(os.path.join(basedir, 'exonization_candidates_C2.SVovlp.txt'), dtype='str', delimiter='\t')

fin_data = []
### keep relevant subset of data
### event id
fin_data.append(full_data[:, 0])
### event pos
fin_data.append(sp.array([x[1] + '-' + ':'.join(x[2:8])  for x in full_data]))
### strand
fin_data.append(full_data[:, 11])
### ensemble id
fin_data.append(full_data[:, 8])
### gene name
fin_data.append(full_data[:, 9])
### max dPSI
fin_data.append(full_data[:, 10])
### coding status
fin_data.append(full_data[:, 12])
### overlapping SNVs
snvs = []
for i in range(full_data.shape[0]):
    tmp = []
print "***Saturn_KarkRef1993***", Jupiter_KarkRef1993


# Read and reshape spectral data files    
CLR = scipy.fromfile(file="../20150123UT/JupiterSpectrum-20150122LT-CLR-sum50m00s-Rotated-Cropped-WVCal.dat", dtype=float, count=-1, sep='\t')    
NIR = scipy.fromfile(file="../20150123UT/JupiterSpectrum-20150122LT-742-sum3h20m-Rotated-Cropped-WVCal.dat", dtype=float, count=-1, sep='\t')    
NormResponsewithWV= scipy.fromfile(file="../PolluxResponse20150123UT.txt", dtype=float, count=-1, sep=" ")
CLR=scipy.reshape(CLR,[CLR.size/2,2])
NativeDispersion=(CLR[(CLR.size/2-1),0]-CLR[0,0])/(CLR.size/2-1)
NIR=scipy.reshape(NIR,[NIR.size/2,2])
NIR[:,0]=NIR[:,0]+16.
NRespWV=scipy.reshape(NormResponsewithWV,[NormResponsewithWV.size/2,2])
MasterDispersion=(NRespWV[(NRespWV.size/2-1),0]-NRespWV[0,0])/(NRespWV.size/2-1)

#Load Reference Spectrum: Average G2v for albedo calculations
Ref = scipy.loadtxt("F:/Astronomy/Python Play/SPLibraries/SpectralReferenceFiles/ReferenceLibrary/g2v.dat", dtype=float, skiprows=3,usecols=(0,1))

#Interpolate NIR, Response and Reference spectra onto CLR Wavelengths

CLRInterp=interpolate.interp1d(CLR[:,0],CLR[:,1],kind='linear', copy=True,
                         bounds_error=False, fill_value=0.0)  
CLRonRef=CLRInterp(Ref[:,0])

NIRInterp=interpolate.interp1d(NIR[:,0],NIR[:,1],kind='linear', copy=True,
                         bounds_error=False, fill_value=0.0)  
NIRonRef=NIRInterp(Ref[:,0])

NRespInterp=interpolate.interp1d(NRespWV[:,0],NRespWV[:,1],kind='linear', copy=True,
                         bounds_error=False, fill_value=0.0)  
NResponRef=NRespInterp(Ref[:,0])
Пример #45
0
import scipy as sp
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit

d3, a3 = sp.loadtxt("cavity measurements.txt", unpack=True)
dist3 = d3 / 100

plt.plot(dist3, a3, 'bx')
xps = sp.linspace(0.36, 1.1, 101)


def amp(x, a, b, c):
    return a / (x - b) + c


fit = curve_fit(amp, dist3, a3, p0=[-1, -1, 1])

data_fit = amp(xps, *fit[0])
plt.xlabel('distance/m')
plt.ylabel('amplitude/V')
plt.plot(xps, data_fit)
plt.plot(dist3, a3, 'x')
plt.show()

print("fit variables used: a=%.3f, b=%.3f, c=%.3f." %
      (fit[0][0], fit[0][1], fit[0][2]))
snd = sp.sqrt(sp.diag(fit[1]))
print("these has standard deviations of a=%.3f, b=%.3f, c=%.3f." %
      (snd[0], snd[1], snd[2]))
#%%
theta = sp.arcsin(dist3 / 1.14)
Пример #46
0
#shutil.copyfile(filepath,newpath)
testingrooms = ['A','B','C']
for room in testingrooms:
    path="/Users/asamson/Documents/GitHub/ps2-nosmasa/"+"testingroom"+ room +"/experiment_data.csv"
    newpath="/Users/asamson/Documents/GitHub/ps2-nosmasa/"+"rawdata/"+"experiemtn_data_"+ room +".csv"
    shutil.copyfile(path,newpath)


#%%
# read in all the data files in rawdata directory using a for loop
# columns: subject, stimulus, pairing, accuracy, median RT
#
data = np.empty((0,5))
for room in testingrooms:
    newpath="/Users/asamson/Documents/GitHub/ps2-nosmasa/"+"rawdata/"+"experiemtn_data_"+ room +".csv"
    tmp = sp.loadtxt(newpath,delimiter=',')
    data = np.vstack([data,tmp])


#%%
# calculate overall average accuracy and average median RT

subject_number = data [:,0]
stimuli = data [:,1]
pairing = data [:,2]
accuracy = data [:,3]
median = data [:,4]

acc_avg = np.mean(accuracy*100)
mrt_avg = np.mean(median)# 477.3ms
Пример #47
0
for quantity in quantities:

    ##Start figure + subplot
    plt.figure(figsize=(7, 5))

    ## Load data from multiple files
    print "Got %d files to plot %s..." % (len(sys.argv), quantity)
    x, y, z, z2 = [np.array([]) for _ in range(4)]  ## three empty arrays

    filenames = sys.argv[1:]
    #filenames.sort(key=lambda name: float(name.split('radius=')[1].split('_')[0]))     # sort (optional)

    for datafile_name in filenames:
        ## Getting 1D data
        (freq, s11_ampli, s11p, s12_ampli, s12p, Nre, Nim, Zre, Zim, eps_r, eps_i, mu_r, mu_i) = \
                np.loadtxt(datafile_name, usecols=range(13), unpack=True)
        if quantity == 'reflection': znew = s11_ampli
        elif quantity == 'transmission': znew = s12_ampli
        elif quantity == 'loss': znew = np.log(1 - s11_ampli**2 - s12_ampli**2)
        elif quantity == 'absNimag':
            znew = np.clip(abs(Nim), 0, 10)
            #znew = np.log10(np.clip(abs(Nim), 0, 300 ))
        elif quantity == 'absNre':
            znew = abs(
                np.arcsin(np.sin(np.real(Nre * freq * 100e-6 / c) * np.pi)) /
                np.pi)
            znew2 = np.clip(abs(Nim), 0, 10)
        elif quantity == 'Nre':
            znew = Nre  #np.real(Nre*freq*100e-6/c)
        elif quantity == 'eps':
            znew = eps_r
Пример #48
0
def load_reference_terms_A_1loop():
	dtype = [('k','f8'),('pk',('f8',5))]
	ref = scipy.loadtxt('self_terms_A_1loop.dat',dtype=dtype)
	return ref
Пример #49
0
import h5py
import scipy as sp
import cPickle
import re

sys.path.append(
    '/cluster/home/akahles/git/projects/2013/PanCancerTCGA/rerun2017')
import utils.utils as utils
from utils.paths import paths

if len(sys.argv) < 2:
    print >> sys.stderr, 'Usage: %s <counts.hdf5>' % sys.argv[0]
    sys.exit(1)
infname = sys.argv[1]

whitelist = sp.loadtxt(paths.whitelist, delimiter='\t', dtype='str')
whitelist = sp.array([x.split('.')[1] for x in whitelist])

d_psi_t = [0.0, 0.1, 0.3, 0.5]
mr_t = [5, 20, 50]
nan_t = 10

IN = h5py.File(infname, 'r')

### get strain / CT information
(ct_dict, tumor_dict) = utils.get_ct_dict_metatable(paths.metadata,
                                                    style='pancan_rerun18')
strains = sp.array([x.split('.')[1] for x in IN['strains'][:]])
ctypes = sp.array([ct_dict[x] if x in ct_dict else 'NA' for x in strains])
istumor = sp.array(
    [tumor_dict[x] if x in tumor_dict else 'NA' for x in strains])
Пример #50
0
def load_reference_terms_bias_1loop():
	dtype = [(key,'f8') for key in Bias1Loop.FIELDS]
	ref = scipy.loadtxt('self_terms_bias_1loop.dat',dtype=dtype)
	return ref
Пример #51
0
import numpy as np
from scipy import loadtxt, optimize
import matplotlib.pyplot as plt

# Here we define our fit function and residual functions
def fitfunc(p, x):
    return p[0]*x + p[1]
def residual(p, x, y, dy):
    return (fitfunc(p, x)-y)/dy

# Read in the data from file
t, ch,dt, dch= loadtxt('pha_calib.txt', unpack=True, skiprows=1)

##############################################################################
# Fit
##############################################################################
p01 = [10.,60]
pf1, cov1, info1, mesg1, success1 = optimize.leastsq(residual, p01,
                                     args = (ch, t, dt), full_output=1)

if cov1 is None:
    print('Fit did not converge')
    print('Success code:', success1)
    print(mesg1)
else:
    print('Fit Converged')
    chisq1 = sum(info1['fvec']*info1['fvec'])
    dof1 = len(t)-len(pf1)
    pferr1 = [np.sqrt(cov1[i,i]) for i in range(len(pf1))]
    print('Converged with chi-squared', chisq1)
    print('Number of degrees of freedom, dof =',dof1)
Пример #52
0
def main():

    description = 'Spectroscopic Surface & Atmosphere Fitting'
    parser = argparse.ArgumentParser()
    parser.add_argument('config_file')
    parser.add_argument('--row_column', default='')
    parser.add_argument('--profile', default='')
    args = parser.parse_args()

    # Setup
    config = json.load(open(args.config_file, 'r'))
    configdir, f = split(abspath(args.config_file))
    config = expand_all_paths(config, configdir)

    # Forward Model
    fm = ForwardModel(config['forward_model'])

    # Inversion method
    if 'mcmc_inversion' in config:
        iv = MCMCInversion(config['mcmc_inversion'], fm)
    else:
        iv = Inversion(config['inversion'], fm)

    # Output object
    out = Output(config, iv)

    # Simulation mode? Binary or text mode?
    simulation_mode = (not ('input' in config)) or \
        (not ('measured_radiance_file' in config['input']))
    text_mode = simulation_mode or \
        config['input']['measured_radiance_file'].endswith('txt')

    # Do we apply a radiance correction?
    if (not simulation_mode) and \
            'radiometry_correction_file' in config['input']:
        radiance_correction_file = config['input'][
            'radiometry_correction_file']
        radiance_correction, wl = spectrumLoad(radiance_correction_file)
    else:
        radiance_correction = None

    if text_mode:

        # ------------------------------- Text mode

        # build geometry object
        if 'input' in config:
            obs, loc, glt = None, None, None
            if 'glt_file' in config['input']:
                glt = s.loadtxt(config['input']['glt_file'])
            if 'obs_file' in config['input']:
                obs = s.loadtxt(config['input']['obs_file'])
            if 'loc_file' in config['input']:
                loc = s.loadtxt(config['input']['loc_file'])
            geom = Geometry(obs=obs, glt=glt, loc=loc)
        else:
            geom = None

        if simulation_mode:

            # Get our measurement from instrument data or the initial state vector
            state_est = fm.init_val.copy()
            rdn_est = fm.calc_rdn(state_est, geom)
            rdn_meas = rdn_est.copy()
            rdn_sim = fm.instrument.simulate_measurement(rdn_meas, geom)
            rfl_est, rdn_est, path_est, S_hat, K, G =\
                iv.forward_uncertainty(state_est, rdn_meas, geom)

        else:

            # Get radiance
            rdn_meas, wl = spectrumLoad(
                config['input']['measured_radiance_file'])
            if radiance_correction is not None:
                rdn_meas = rdn_meas * radiance_correction
            rdn_sim = None

            if len(args.profile) > 0:
                cProfile.runctx('iv.invert(rdn_meas, geom, None)', globals(),
                                locals())
                sys.exit(0)

            elif 'mcmc_inversion' in config:

                # MCMC Sampler
                samples = iv.invert(rdn_meas, geom, out=out)
                if 'mcmc_samples_file' in config['output']:
                    D = {'samples': samples}
                    savemat(config['output']['mcmc_samples_file'], D)
                state_est = samples.mean(axis=0)
                rfl_est, rdn_est, path_est, S_hat, K, G =\
                    iv.forward_uncertainty(state_est, rdn_meas, geom)

            else:

                # Conjugate Gradient
                state_est = iv.invert(rdn_meas, geom, out=out)
                rfl_est, rdn_est, path_est, S_hat, K, G =\
                    iv.forward_uncertainty(state_est, rdn_meas, geom)

        out.write_spectrum(state_est, rfl_est, rdn_est, path_est, rdn_meas,
                           rdn_sim, geom)

    else:

        # ------------------------------ Binary mode

        meas_file = config['input']['measured_radiance_file']
        meas_hdr = meas_file + '.hdr'
        meas = envi.open(meas_hdr, meas_file)
        nl, nb, ns = [
            int(meas.metadata[n]) for n in ('lines', 'bands', 'samples')
        ]

        # Do we apply a flatfield correction?
        if 'flatfield_correction_file' in config['input']:
            ffile = config['input']['flatfield_correction_file']
            fcor = envi.open(ffile + '.hdr', ffile)
            fcor_mm = fcor.open_memmap(interleave='source', writable=False)
            flatfield = s.array(fcor_mm[0, :, :])
        else:
            flatfield = None

        if 'obs_file' in config['input']:
            obs_file = config['input']['obs_file']
            obs_hdr = obs_file + '.hdr'
            obs = envi.open(obs_hdr, obs_file)
            if int(obs.metadata['bands']) != 11 and \
               int(obs.metadata['bands']) != 10:
                raise ValueError('Expected 10 or 11 bands in OBS file')
            if int(obs.metadata['lines']) != nl or \
               int(obs.metadata['samples']) != ns:
                raise ValueError('obs file dimensions do not match radiance')
        else:
            obs_file, obs_hdr = None, None

        if 'glt_file' in config['input']:
            glt_file = config['input']['glt_file']
            glt_hdr = glt_file + '.hdr'
            glt = envi.open(glt_hdr, glt_file)
            if int(glt.metadata['bands']) != 2:
                raise ValueError('Expected two bands in GLT file')
            if int(glt.metadata['lines']) != nl or \
               int(glt.metadata['samples']) != ns:
                raise ValueError('GLT file dimensions do not match radiance')
        else:
            glt_file, glt_hdr = None, None

        if 'loc_file' in config['input']:
            loc_file = config['input']['loc_file']
            loc_hdr = loc_file + '.hdr'
            loc = envi.open(loc_hdr, loc_file)
            if int(loc.metadata['bands']) != 3:
                raise ValueError('Expected three bands in LOC file')
            if int(loc.metadata['lines']) != nl or \
               int(loc.metadata['samples']) != ns:
                raise ValueError('loc file dimensions do not match radiance')
        else:
            loc_file, loc_hdr = None, None

        rfl_file = config['output']['estimated_reflectance_file']
        rfl_hdr = rfl_file + '.hdr'
        rfl_meta = meas.metadata.copy()
        if not os.path.exists(rfl_hdr):
            rfl = envi.create_image(rfl_hdr, rfl_meta, ext='', force=True)
            del rfl
        rfl = envi.open(rfl_hdr, rfl_file)

        state_file = config['output']['estimated_state_file']
        state_hdr = state_file + '.hdr'
        state_meta = meas.metadata.copy()
        state_meta['bands'] = len(fm.statevec)
        state_meta['band names'] = fm.statevec[:]
        if not os.path.exists(state_hdr):
            state = envi.create_image(state_hdr,
                                      state_meta,
                                      ext='',
                                      force=True)
            del state
        state = envi.open(state_hdr, state_file)

        mdl_file = config['output']['modeled_radiance_file']
        mdl_hdr = mdl_file + '.hdr'
        mdl_meta = meas.metadata.copy()
        if not os.path.exists(mdl_hdr):
            mdl = envi.create_image(mdl_hdr, mdl_meta, ext='', force=True)
            del mdl
        mdl = envi.open(mdl_hdr, mdl_file)

        path_file = config['output']['path_radiance_file']
        path_hdr = path_file + '.hdr'
        path_meta = meas.metadata.copy()
        if not os.path.exists(path_hdr):
            path = envi.create_image(path_hdr, path_meta, ext='', force=True)
            del path
        path = envi.open(path_hdr, path_file)

        post_file = config['output']['posterior_uncertainty_file']
        post_hdr = post_file + '.hdr'
        post_meta = state_meta.copy()
        if not os.path.exists(post_hdr):
            post = envi.create_image(post_hdr, post_meta, ext='', force=True)
            del post
        post = envi.open(post_hdr, post_file)

        if 'component_file' in config['output']:
            comp_file = config['output']['component_file']
            comp_hdr = comp_file + '.hdr'
            comp_meta = state_meta.copy()
            comp_meta['bands'] = 1
            comp_meta['band names'] = '{Surface Model Component}'
            if not os.path.exists(comp_hdr):
                comp = envi.create_image(comp_hdr,
                                         comp_meta,
                                         ext='',
                                         force=True)
                del comp
            comp = envi.open(comp_hdr, comp_file)
        else:
            comp = None

        meas_mm, obs_mm, state_mm, rfl_mm, path_mm, mdl_mm = \
            None, None, None, None, None, None

        # Did the user specify a row,column tuple, or a row?
        if len(args.row_column) < 1:
            lines, samps = range(nl), range(ns)
        else:
            # Restrict the range of the retrieval if overridden.
            ranges = args.row_column.split(',')
            if len(ranges) == 1:
                lines, samps = [int(ranges[0])], range(ns)
            if len(ranges) == 2:
                line_start, line_end = ranges
                lines, samps = range(int(line_start), int(line_end)), range(ns)
            elif len(ranges) == 4:
                line_start, line_end, samp_start, samp_end = ranges
                lines = range(int(line_start), int(line_end))
                samps = range(int(samp_start), int(samp_end))

        # analyze the image one frame at a time
        for i in lines:

            # Flush cache every once in a while
            print('line %i/%i' % (i, nl))
            if meas_mm is None or i % 100 == 0:

                # Refresh Radiance buffer
                del meas
                meas = envi.open(meas_hdr, meas_file)
                meas_mm = meas.open_memmap(interleave='source', writable=False)

                # Refresh OBS buffer
                if obs_hdr is not None:
                    del obs
                    obs = envi.open(obs_hdr, obs_file)
                    obs_mm = obs.open_memmap(interleave='source',
                                             writable=False)

                # Refresh GLT buffer
                if glt_hdr is not None:
                    del glt
                    glt = envi.open(glt_hdr, glt_file)
                    glt_mm = glt.open_memmap(interleave='source',
                                             writable=False)

                # Refresh LOC buffer
                if loc_hdr is not None:
                    del loc
                    loc = envi.open(loc_hdr, loc_file)
                    loc_mm = loc.open_memmap(interleave='source',
                                             writable=False)

                # Refresh Output buffers
                del rfl
                rfl = envi.open(rfl_hdr, rfl_file)
                rfl_mm = rfl.open_memmap(interleave='source', writable=True)

                del state
                state = envi.open(state_hdr, state_file)
                state_mm = state.open_memmap(interleave='source',
                                             writable=True)

                del path
                path = envi.open(path_hdr, path_file)
                path_mm = path.open_memmap(interleave='source', writable=True)

                del mdl
                mdl = envi.open(mdl_hdr, mdl_file)
                mdl_mm = mdl.open_memmap(interleave='source', writable=True)

                del post
                post = envi.open(post_hdr, post_file)
                post_mm = post.open_memmap(interleave='source', writable=True)

                if comp is not None:
                    del comp
                    comp = envi.open(comp_hdr, comp_file)
                    comp_mm = comp.open_memmap(interleave='source',
                                               writable=True)

            # translate to BIP
            meas_frame = s.array(meas_mm[i, :, :]).T

            if obs_hdr is not None:
                obs_frame = s.array(obs_mm[i, :, :])

            if glt_hdr is not None:
                glt_frame = s.array(glt_mm[i, :, :])

            if loc_hdr is not None:
                loc_frame = s.array(loc_mm[i, :, :])

            init = None

            nl = int(rfl_meta['lines'])
            ns = int(rfl_meta['samples'])
            nb = int(rfl_meta['bands'])
            nsv = int(state_meta['bands'])

            if comp is not None:
                comp_frame = s.zeros((1, ns))
            post_frame = s.zeros((nsv, ns), dtype=s.float32)
            rfl_frame = s.zeros((nb, ns), dtype=s.float32)
            state_frame = s.zeros((nsv, ns), dtype=s.float32)
            path_frame = s.zeros((nb, ns), dtype=s.float32)
            mdl_frame = s.zeros((nb, ns), dtype=s.float32)

            for j in samps:
                try:

                    # Use AVIRIS-C convention?
                    if loc_hdr is not None and "t01p00r" in loc_file:
                        loc_frame[:, 2] = loc_frame[:, 2] / \
                            1000.0  # translate to km

                    rdn_meas = meas_frame[j, :]

                    # Bad data test
                    if all(rdn_meas < -49.0):
                        raise OOBError()

                    if obs_hdr is not None:
                        obs_spectrum = obs_frame[j, :]
                    else:
                        obs_spectrum = None

                    if glt_hdr is not None:
                        pc = abs(glt_frame[j, 0]) - 1
                        if pc < 0:
                            raise OOBError()
                        glt_spectrum = glt_frame[j, :]
                    else:
                        pc = j
                        glt_spectrum = None

                    if loc_hdr is not None:
                        loc_spectrum = loc_frame[j, :]
                    else:
                        loc_spectrum = None

                    if flatfield is not None:
                        rdn_meas = rdn_meas * flatfield[:, pc]
                    if radiance_correction is not None:
                        rdn_meas = rdn_meas * radiance_correction
                    geom = Geometry(obs_spectrum,
                                    glt_spectrum,
                                    loc_spectrum,
                                    pushbroom_column=pc)

                    # Inversion
                    if len(args.profile) > 0:
                        cProfile.runctx('iv.invert(rdn_meas, geom, None)',
                                        globals(), locals())
                        sys.exit(0)
                    else:
                        state_est = iv.invert(rdn_meas, geom, None, init=init)
                        rfl_est, rdn_est, path_est, S_hat, K, G =\
                            iv.forward_uncertainty(state_est, rdn_meas, geom)

                    # write spectrum
                    state_surf = state_est[iv.fm.surface_inds]
                    post_frame[:, j] = s.sqrt(s.diag(S_hat))
                    rfl_frame[:, j] = rfl_est
                    state_frame[:, j] = state_est
                    path_frame[:, j] = path_est
                    mdl_frame[:, j] = rdn_est
                    if comp is not None:
                        comp_frame[:, j] = iv.fm.surface.component(
                            state_surf, geom)

                except OOBError:
                    post_frame[:, j] = -9999 * s.ones((nsv))
                    rfl_frame[:, j] = -9999 * s.ones((nb))
                    state_frame[:, j] = -9999 * s.ones((nsv))
                    path_frame[:, j] = -9999 * s.ones((nb))
                    mdl_frame[:, j] = -9999 * s.ones((nb))
                    if comp is not None:
                        comp_frame[:, j] = -9999

            post_mm[i, :, :] = post_frame.copy()
            rfl_mm[i, :, :] = rfl_frame.copy()
            state_mm[i, :, :] = state_frame.copy()
            path_mm[i, :, :] = path_frame.copy()
            mdl_mm[i, :, :] = mdl_frame.copy()
            if comp is not None:
                comp_mm[i, :, :] = comp_frame.copy()

        del post_mm
        del rfl_mm
        del state_mm
        del path_mm
        del mdl_mm
        if comp is not None:
            del comp_mm
Пример #53
0
def load_pklin():
	return scipy.loadtxt('ref_pk_lin.dat',unpack=True)
Пример #54
0
    Order = 1
    #computing first order stuff
    Qgrav = Mp * (c)
    scale = 1.0 / scale
    k1 = 1.0 / (Qgrav * scale)
    D1 = 1.37738149628 * 10**23  #Dn(redshift,Order)
    #print D1
    #file=open('distance.txt','w')
    #file.write(str(D1))
    #file.close()
    pl = 1.0 / (k1 * D1)

    return deltat * pl


EEn, tti, ty, = sp.loadtxt('100MEV10DEGEVENTS.txt', unpack=True, skiprows=3)
stopwatch = time.time()  ################## START STOPWATCH

ttti = []
EEEn = []
ti = []
En = []
Emin = 1000.0
Emax = 1000000.0

for i in range(len(tti)):
    if (Emin < EEn[i] < Emax):
        ttti.append(tti[i])
        EEEn.append(EEn[i])

starttime = tti[
Пример #55
0
def load_reference_spectrum_1loop(a='delta',b='theta'):
	dtype = ['k','pk_nowiggle','pk_lin','pk','error','Dgrowth']
	dtype = [(key,'f8') for key in dtype]
	ref = scipy.loadtxt('ref_spectrum_1loop_{}_{}.dat'.format(a,b),dtype=dtype)
	return ref
Пример #56
0
    return recta


def modelo1(x, A, B, C, mu, sigma):
    modelo1 = recta(x, A, B) - gaussiana(x, C, mu, sigma)
    return modelo1


def modelo2(x, A, B, C, mu, sigma):
    modelo2 = recta(x, A, B) - lorentz(x, C, mu, sigma)
    return modelo2


# Main Setup

x = sp.loadtxt('espectro.dat')[:, 0]  # long de onda
y = sp.loadtxt('espectro.dat')[:, 1]  # Flujo
sigma = sp.std(x)
mu = sp.mean(x)
A, B = sp.polyfit(x, y, 1)
C = 1 * 10**(-16)  # valor mas pequeno de y
a1, b1 = cv(modelo1, x, y, [A, B, C, mu, sigma])
a2, b2 = cv(modelo2, x, y, [A, B, C, mu, sigma])
modelo1v = modelo1(x, *a1)
modelo2v = modelo2(x, *a2)

# Grafico modelo 1

fig1 = plt.figure(1)
fig1.clf()
plt.plot(x, y, 'b-')
Пример #57
0
def load_reference_terms_B_2loop():
	dtype = [('k','f8'),('pk',('f8',9))]
	ref = scipy.loadtxt('ref_terms_B_2loop.dat',dtype=dtype)
	return ref
Пример #58
0
import rasterTools as rt
import scipy as sp
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt

# Load data set
im, GeoT, Proj = rt.open_data('../Data/university.tif')
[h, w, b] = im.shape
im.shape = (h * w, b)
wave = sp.loadtxt('../Data/waves.csv', delimiter=',')

# Do PCA
pca = PCA()
pca.fit(im)

# Save Eigenvectors
D = sp.concatenate((wave[:, sp.newaxis], pca.components_[:3, :].T), axis=1)
sp.savetxt('../FeatureExtraction/figures/pca_pcs.csv', D, delimiter=',')

# Plot explained variance
l = pca.explained_variance_
print l[:5]
print(l.cumsum() / l.sum())[:5]

# Projection of the first PCs
imp = sp.dot(im, pca.components_[:3, :].T)
imp.shape = (h, w, 3)

# Save image
rt.write_data('../Data/pca_university.tif', imp, GeoT, Proj)
Пример #59
0
print "-" * 50
print "= - Parse Commandline "
# Import command line option parser:
from optparse import OptionParser
# Setup of the command line options:
usage = "usage: %prog [options] filename.dat"
parser = OptionParser(usage)
options, filename = parser.parse_args(sys.argv[1:])
print "-" * 50

print "-" * 50

print "= - Load data"
print "Checking test results from file:", filename[0]
print "by comparing with data in file:", filename[1]
myTestData = scipy.loadtxt(filename[0], skiprows=1)
myExpectedResults = scipy.loadtxt(filename[1], skiprows=1)
print "-" * 50

print "-" * 50
print "Error between the two files:"
"""diff=(myTestData[:,2]-myExpectedResults[:,2])**2;
diff2 = []
for d in diff:
	diff2.append(sqrt(d))

diff = diff2
diff=diff[:]/-myExpectedResults[:,2];
print diff"""

if len(myTestData) != len(myExpectedResults):
Пример #60
0
def main():

    parser = argparse.ArgumentParser(description="Create a surface model")
    parser.add_argument('config', type=str, metavar='INPUT')
    args = parser.parse_args()
    config = json_load_ascii(args.config, shell_replace=True)
    configdir, configfile = split(abspath(args.config))

    # Determine top level parameters
    for q in ['output_model_file', 'sources', 'normalize', 'wavelength_file']:
        if q not in config:
            raise ValueError('Missing parameter: %s' % q)

    wavelength_file = expand_path(configdir, config['wavelength_file'])
    normalize = config['normalize']
    reference_windows = config['reference_windows']
    outfile = expand_path(configdir, config['output_model_file'])

    # load wavelengths file
    q = s.loadtxt(wavelength_file)
    if q.shape[1] > 2:
        q = q[:, 1:]
    if q[0, 0] < 100:
        q = q * 1000.0
    wl = q[:, 0]
    nchan = len(wl)

    # build global reference windows
    refwl = []
    for wi, window in enumerate(reference_windows):
        active_wl = aand(wl >= window[0], wl < window[1])
        refwl.extend(wl[active_wl])
    normind = s.array([s.argmin(abs(wl - w)) for w in refwl])
    refwl = s.array(refwl, dtype=float)

    # create basic model template
    model = {
        'normalize': normalize,
        'wl': wl,
        'means': [],
        'covs': [],
        'refwl': refwl
    }

    for si, source_config in enumerate(config['sources']):

        # Determine source parameters
        for q in [
                'input_spectrum_files', 'windows', 'n_components', 'windows'
        ]:
            if q not in source_config:
                raise ValueError('Source %i is missing a parameter: %s' %
                                 (si, q))

        infiles = [
            expand_path(configdir, fi)
            for fi in source_config['input_spectrum_files']
        ]
        ncomp = int(source_config['n_components'])
        windows = source_config['windows']

        # load spectra
        spectra = []
        for infile in infiles:
            hdrfile = infile + '.hdr'
            rfl = envi.open(hdrfile, infile)
            nl, nb, ns = [
                int(rfl.metadata[n]) for n in ('lines', 'bands', 'samples')
            ]
            swl = s.array([float(f) for f in rfl.metadata['wavelength']])
            rfl_mm = rfl.open_memmap(interleave='source', writable=True)
            if rfl.metadata['interleave'] == 'bip':
                x = s.array(rfl_mm[:, :, :])
            if rfl.metadata['interleave'] == 'bil':
                x = s.array(rfl_mm[:, :, :]).transpose((0, 2, 1))
            x = x.reshape(nl * ns, nb)
            swl = s.array([float(f) for f in rfl.metadata['wavelength']])

            # import spectra and resample
            spectra.extend(([
                interp1d(swl,
                         x1,
                         kind='linear',
                         bounds_error=False,
                         fill_value='extrapolate')(wl) for x1 in x
            ]))

        spectra = s.array(spectra)
        use = s.all(s.isfinite(spectra), axis=1)
        spectra = spectra[use, :]

        # accumulate total list of window indices
        window_idx = -s.ones((nchan), dtype=int)
        for wi, win in enumerate(windows):
            active_wl = aand(wl >= win['interval'][0], wl < win['interval'][1])
            window_idx[active_wl] = wi

        # Two step model.  First step is k-means initialization
        kmeans = KMeans(init='k-means++', n_clusters=ncomp, n_init=10)
        kmeans.fit(spectra)
        Z = kmeans.predict(spectra)

        for ci in range(ncomp):

            m = s.mean(spectra[Z == ci, :], axis=0)
            C = s.cov(spectra[Z == ci, :], rowvar=False)

            for i in range(nchan):
                window = windows[window_idx[i]]
                if window['correlation'] == 'EM':
                    C[i, i] = C[i, i] + float(window['regularizer'])
                elif window['correlation'] == 'decorrelated':
                    ci = C[i, i]
                    C[:, i] = 0
                    C[i, :] = 0
                    C[i, i] = ci + float(window['regularizer'])
                else:
                    raise ValueError('I do not recognize the source ' +
                                     window['correlation'])

            # Normalize the component spectrum if desired
            if normalize == 'Euclidean':
                z = s.sqrt(s.sum(pow(m[normind], 2)))
            elif normalize == 'RMS':
                z = s.sqrt(s.mean(pow(m[normind], 2)))
            elif normalize == 'None':
                z = 1.0
            else:
                raise ValueError('Unrecognized normalization: %s\n' %
                                 normalize)
            m = m / z
            C = C / (z**2)

            model['means'].append(m)
            model['covs'].append(C)

    model['means'] = s.array(model['means'])
    model['covs'] = s.array(model['covs'])

    savemat(outfile, model)
    print('saving results to ' + outfile)