Exemple #1
1
def merge_csvs(in_list):
    for idx, in_file in enumerate(in_list):
        try:
            in_array = np.loadtxt(in_file, delimiter=',')
        except ValueError as ex:
            try:
                in_array = np.loadtxt(in_file, delimiter=',', skiprows=1)
            except ValueError as ex:
                with open(in_file, 'r') as first:
                    header_line = first.readline()

                header_list = header_line.split(',')
                n_cols = len(header_list)
                try:
                    in_array = np.loadtxt(
                        in_file, delimiter=',', skiprows=1,
                        usecols=list(range(1, n_cols))
                    )
                except ValueError as ex:
                    in_array = np.loadtxt(
                        in_file, delimiter=',', skiprows=1, usecols=list(range(1, n_cols - 1)))
        if idx == 0:
            out_array = in_array
        else:
            out_array = np.dstack((out_array, in_array))
    out_array = np.squeeze(out_array)
    iflogger.info('Final output array shape:')
    iflogger.info(np.shape(out_array))
    return out_array
Exemple #2
0
def cv_eval_models(cv_dir, model_type, load_model):
    X = np.loadtxt(os.path.join(cv_dir, "X.txt"))
    y = np.loadtxt(os.path.join(cv_dir, "y.txt"))

    residuals = []

    ii = 0
    while os.path.exists(os.path.join(cv_dir, "fold_%02d_train.txt" % ii)):
        train = [int(x) for x in np.loadtxt(os.path.join(cv_dir, "fold_%02d_train.txt" % ii))]
        test = [int(x) for x in np.loadtxt(os.path.join(cv_dir, "fold_%02d_test.txt" % ii))]

        testX = X[test, :]
        testy = y[test]

        fname = ".".join(["fold_%02d" % ii, model_type])
        model = load_model(os.path.join(cv_dir, fname))
        residuals += [model.predict(testX[i:i+1]) - testy[i] for i in range(len(testy))]
        ii += 1

    mean_abs_error = np.mean(np.abs(residuals))
    median_abs_error = np.median(np.abs(residuals))

    f_results = open(os.path.join(cv_dir, "%s_results.txt" % model_type), 'w')
    f_results.write('mean_abs_error %f\n' % mean_abs_error)
    f_results.write('median_abs_error %f\n' % median_abs_error)
    f_results.close()

    return mean_abs_error, median_abs_error
Exemple #3
0
def check_that_nr_fit_runs():
    from jds_image_proc.clouds import voxel_downsample
    #from brett2.ros_utils import RvizWrapper    
    #import lfd.registration as lr
    ##import lfd.warping as lw    
    #if rospy.get_name() == "/unnamed":
        #rospy.init_node("test_rigidity", disable_signals=True)
        #from time import sleep
        #sleep(1)
    #rviz = RvizWrapper.create()
    
    pts0 = np.loadtxt("../test/rope_control_points.txt")
    pts1 = np.loadtxt("../test/bad_rope.txt")    
    pts_rigid = voxel_downsample(pts0[:10], .02)
    #lr.Globals.setup()
    np.seterr(all='ignore')
    np.set_printoptions(suppress=True)

    lin_ag, trans_g, w_eg, x_ea = tps.tps_nr_fit_enhanced(pts0, pts1, 0.01, pts_rigid, 0.001, method="newton",plotting=1)
    #lin_ag2, trans_g2, w_ng2 = tps_fit(pts0, pts1, .01, .01)
    #assert np.allclose(w_ng, w_ng2)
    def eval_partial(x_ma):
        return tps_eval(x_ma, lin_ag, trans_g, w_eg, x_ea) 
    #lr.plot_orig_and_warped_clouds(eval_partial, pts0, pts1, res=.008)
    #handles = lw.draw_grid(rviz, eval_partial, pts0.min(axis=0), pts0.max(axis=0), 'base_footprint')

    grads = tps.tps_grad(pts_rigid, lin_ag, trans_g, w_eg, x_ea)
Exemple #4
0
def plot_igraph_matrix(mod_cor_mat_file,mod_average_coords_file):

    import os
    #import igraph as ig
    import numpy as np
    
    from dmgraphanalysis.plot_igraph import plot_3D_igraph_weighted_signed_matrix
    
    print 'loading module (node) coordinates'
    
    #mod_average_coords = np.loadtxt(mod_average_coords_file)
    

    print 'load coords'
    
    mod_average_coords = np.loadtxt(mod_average_coords_file)
    
    
    #with open(mod_average_coords_file, 'Ur') as f:
        #mod_average_coords_list = list(tuple(map(float,rec))[0:2] for rec in csv.reader(f, delimiter=' '))
    
    #print mod_average_coords
    
    print "loading mod cor mat"
    
    mod_cor_mat = np.loadtxt(mod_cor_mat_file)
    
    i_graph_file = plot_3D_igraph_weighted_signed_matrix(mod_cor_mat,mod_average_coords)
    
    return i_graph_file
    
Exemple #5
0
def get_dates(file):
    """
    Read the first file in the input directory and create a ordinal based
    timeseries.
    Also find the indicies to split the time series into months and years
    """
    hours = (0, 1, 2, 3)
    days = (0, 1, 2)
    try:
        data = np.loadtxt(file, usecols=hours, dtype=int)
        datelist = [datetime(*d) for d in data]
    except (ValueError, TypeError):
        data = np.loadtxt(file, usecols=days, dtype=int)
        datelist = [datetime(*d) for d in data]

    # check to make sure we haven't used used daily by mistake
    # (creating a bunch of duplicate times)
    newlist = []
    for i in datelist:
        if i not in newlist:
            newlist.append(i)
        else:
            raise ValueError('Found duplicate datetimes in datelist')

    print('VIC startdate: {0}'.format(datelist[0]))
    print('VIC enddate: {0}'.format(datelist[-1]))

    return datelist
def loadFilter(fname):
    #load filter transmission
    try:
        fdata = np.loadtxt(fname,dtype=float)
    except ValueError:
        fdata = np.loadtxt(fname,dtype=float,delimiter = ',')
    filtx = np.array(fdata[:,0])
    filty = np.array(fdata[:,1])
    filty[filty<0.0] = 0.0
    if max(filty)>1.0:
        filty/=100.0
    if max(filtx<2000):
        filtx*=10.0
    print (filty)

    if fname == 'Rfilter.txt':
        filty = filty/max(filty)*0.8218 #normalize to our Johnson R filter max Transmission
    if fname == 'Vfilter.txt':
        filty = filty/max(filty)*0.8623 #normalize to our Johnson V filter max transmission

    if (filtx[-1] < filtx[0]): #if array goes high to low, reverse the order to the integration does not get a negative
        filtx = filtx[::-1]
        filty = filty[::-1]

    filtWidth = filtx[-1]-filtx[0]
    print "filter width = ",filtWidth
    filtCorrection = integrate.simps(filty,x=filtx)/filtWidth #calculate correction factor for filter width
    print "filter correction = ", filtCorrection
    return filtx, filty, filtWidth, filtCorrection
Exemple #7
0
def loadlitlist(band):        
    try:
        f=TEMPLDIR+"/"+"templatelist"+band+".txt"
        templates=np.loadtxt(f,usecols=(0,1,2,3,4,5,7,8,9,10,13,14,15,16),dtype={'names': ('sn','dist','e_dist','incl','ebv','type','mjdmax','e_mjdmax','peak_ap','e_peak_ap','ebmvhost','vmr','peak','e_peak'),'formats': ('S6','f','f','f','f','S5','f','f','f','f','f','f','f','f')},skiprows=1, comments='#')

        print "reading files list ",f," worked"
    except:
        print "reading files list ",f," failed"


    sne=[]
    print "the templates are: ",templates['sn']

    for i,sn in enumerate(templates['sn']):         
        sne.append(templatesn(sn,dist=templates[i]['dist'],e_dist=templates[i]['e_dist'],incl=templates[i]['incl'],ebv=templates[i]['ebv'],sntype=templates[i]['type'],mjdmax=templates[i]['mjdmax'],e_mjdmax=templates[i]['e_mjdmax'],ebmvhost=templates[i]['ebmvhost'],vmr=templates[i]['vmr'],peak=templates[i]['peak'],e_peak=templates[i]['e_peak'],peak_ap=templates[i]['peak_ap'],e_peak_ap=templates[i]['e_peak_ap']))

#    for i,sn in  enumerate(templates['sn']):
        sne[i].phot=(np.loadtxt(TEMPLDIR+"/"+"sn"+sn+".dat",usecols=(0,bandscols[band], bandscols[band]+1), skiprows=1, unpack=1))
        #dereddening
        sne[i].phot[1]+=3.2*sne[i].ebv

        sne[i].absphot=sne[i].phot[1]-(sne[i].peak_ap-sne[i].peak)
        sne[i].normphot=sne[i].absphot-sne[i].peak
        #sne[i]['phot'][1]-2.5*np.log10((sne[i]['dist']/1e6/10.0)**2)
#    mysn=np.where(templates['sn']=='1994I')[0]
#    print sne[mysn]
##        Mv = m - 2.5 log[ (d/10)2 ].
##        flux = 10**(-lc['mag']/2.5)*5e10
##        dflux = flux*lc['dmag']/LN10x2p5
#        print sn
#    
    print "loaded sne"
    return sne, templates
Exemple #8
0
def plot_igraph_modules_conf_cor_mat_rada(rada_lol_file,Pajek_net_file,coords_file,net_List_file,gm_mask_coords_file):

    import numpy as np
    import nibabel as nib
    import os
    import csv
        
    from dmgraphanalysis.utils_cor import return_mod_mask_corres,read_lol_file,read_Pajek_corres_nodes,read_List_net_file
    
    from dmgraphanalysis.plot_igraph import plot_3D_igraph_modules_Z_list

    print 'Loading node_corres'
    
    node_corres = read_Pajek_corres_nodes(Pajek_net_file)
    
    print node_corres
    print node_corres.shape
    
    print 'Loading coords'
    
    
    #with open(coords_file, 'Ur') as f:
        #coords_list = list(tuple(map(float,rec))[0:2] for rec in csv.reader(f, delimiter=' '))
    
    coords = np.array(np.loadtxt(coords_file),dtype = 'int64')
    
    print coords.shape
    
    print 'Loading gm mask coords'
    
    gm_mask_coords = np.array(np.loadtxt(gm_mask_coords_file),dtype = 'int64')
    
    print gm_mask_coords.shape
    
    
    print "Loading community belonging file" + rada_lol_file

    community_vect = read_lol_file(rada_lol_file)
    
    #print community_vect
    print community_vect.shape
    
    print "loading net_List_net as list"
    
    Z_list = read_List_net_file(net_List_file)
    
    print Z_list
    
    print 'extracting node coords'
    
    node_coords = coords[node_corres,:]
    
    print node_coords.shape
    
    print "plotting conf_cor_mat_modules_file with igraph"
    
    Z_list_all_modules_file = plot_3D_igraph_modules_Z_list(community_vect,node_coords,Z_list,gm_mask_coords)
    
    
    return Z_list_all_modules_file
Exemple #9
0
  def __init__(self,fname=None,mindx=None,mlist=[],logopt = 0, Lbox=2750., massfxnfname=None):
    """
    Read in a list of masses.  If logopt==1, then input masses are understood to be logarithmic.
    """
    self.Lbox = Lbox
    if mlist != []:
      if logopt == 0:
        self.m = np.array(mlist)
        self.lg10m = np.log10(self.m)
      else:
        self.lg10m = np.array(mlist)
        self.m = 10**(self.log10m)
      self.lg10mcen, self.Nofm = None, None ## set these later with massfxn.
    elif massfxnfname is not None:
      self.lg10mcen, self.Nofm = np.loadtxt(massfxnfname,unpack=True,usecols=[0,1])


    else:
      if 0==0:
#      try:
        if logopt == 0:
          self.m = np.loadtxt(fname,usecols=[mindx],unpack=True)
          self.lg10m = np.log10(self.m)
        else:
          self.lg10m = np.loadtxt(fname,usecols=[mindx],unpack=True)
          self.m = 10**(self.lg10m)
        self.lg10mcen, self.Nofm = None, None ## set these later with massfxn.
      else:
#      except:
        print 'file read did not work.'
        self.m = None
        self.lg10m = None
Exemple #10
0
 def testEpsilon_MOEA_NegativeDTLZ2(self):
     random = pyotl.utility.Random(1)
     problemGen = lambda: pyotl.problem.real.NegativeDTLZ2(3)
     problem = problemGen()
     pathProblem = os.path.join(self.pathData, type(problem).__name__.replace('Negative', ''), str(problem.GetNumberOfObjectives()))
     crossover = pyotl.crossover.real.SimulatedBinaryCrossover(random, 1, problem.GetBoundary(), 20)
     mutation = pyotl.mutation.real.PolynomialMutation(random, 1 / float(len(problem.GetBoundary())), problem.GetBoundary(), 20)
     epsilon = pyotl.utility.PyList2Vector_Real([0.06] * problem.GetNumberOfObjectives())
     pfList = []
     for _ in range(self.repeat):
         problem = problemGen()
         initial = pyotl.initial.real.BatchUniform(random, problem.GetBoundary(), 100)
         optimizer = pyotl.optimizer.couple_couple.real.Epsilon_MOEA(random, problem, initial, crossover, mutation, epsilon)
         while optimizer.GetProblem().GetNumberOfEvaluations() < 30000:
             optimizer()
         pf = pyotl.utility.PyListList2VectorVector_Real(
             [list(solution.objective_) for solution in optimizer.GetSolutionSet()])
         for objective in pf:
             problem.Fix(objective)
         pfList.append(pf)
     pathCrossover = os.path.join(pathProblem, type(crossover).__name__)
     pathOptimizer = os.path.join(pathCrossover, type(optimizer).__name__)
     pfTrue = pyotl.utility.PyListList2VectorVector_Real(numpy.loadtxt(os.path.join(pathProblem, 'PF.csv')).tolist())
     # GD
     indicator = pyotl.indicator.real.DTLZ2GD()
     metricList = [indicator(pf) for pf in pfList]
     rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'GD.csv')).tolist()
     self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
     # IGD
     indicator = pyotl.indicator.real.InvertedGenerationalDistance(pfTrue)
     metricList = [indicator(pf) for pf in pfList]
     rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'IGD.csv')).tolist()
     self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
Exemple #11
0
def ascii_specfem2d_obspy(**kwargs):
    """ Reads seismic traces from text files
    """
    from obspy.core.stream import Stream
    from obspy.core.trace import Trace

    filenames = glob(solver='specfem2d', **kwargs)

    t = _np.loadtxt(files[0])[:,0]
    nt = len(t)
    nr = len(filenames)

    d = Trace(data=np.zeros(nt, dtype='float32'))

    trace.stats.starttime = t[0]
    trace.stats.delta = _np.mean(_np.diff(t))
    trace.stats.nt = len(t)

    # read data
    stream = Stream(t)*nr

    for filename in filenames:
        stream.data = _np.loadtxt(filename)[:, 1]

    return stream
Exemple #12
0
def ascii_specfem2d(**kwargs):
    """ Reads seismic traces from text files
    """
    files = glob(solver='specfem2d', **kwargs)
    t = _np.loadtxt(files[0])[:,0]
    h = Struct()
    h['t0'] = t[0]
    h['nr'] = len(files)
    h['ns'] = 1
    h['dt'] = _np.mean(_np.diff(t))
    h['nt'] = len(t)

    # read data
    s = _np.zeros((h['nt'], h['nr']))
    i = 0
    for file in files:
        s[:, i] = _np.loadtxt(file)[:, 1]
        i += 1

    # keep track of file names
    h.files = []
    for file in files:
        file = basename(file)
        h.files.append(file)

    return s, h
    def _stimcorr_core(self, motionfile, intensityfile, designmatrix, cwd=None):
        """
        Core routine for determining stimulus correlation

        """
        if not cwd:
            cwd = os.getcwd()
        # read in motion parameters
        mc_in = np.loadtxt(motionfile)
        g_in = np.loadtxt(intensityfile)
        g_in.shape = g_in.shape[0], 1
        dcol = designmatrix.shape[1]
        mccol = mc_in.shape[1]
        concat_matrix = np.hstack((np.hstack((designmatrix, mc_in)), g_in))
        cm = np.corrcoef(concat_matrix, rowvar=0)
        corrfile = self._get_output_filenames(motionfile, cwd)
        # write output to outputfile
        file = open(corrfile, 'w')
        file.write("Stats for:\n")
        file.write("Stimulus correlated motion:\n%s\n" % motionfile)
        for i in range(dcol):
            file.write("SCM.%d:" % i)
            for v in cm[i, dcol + np.arange(mccol)]:
                file.write(" %.2f" % v)
            file.write('\n')
        file.write("Stimulus correlated intensity:\n%s\n" % intensityfile)
        for i in range(dcol):
            file.write("SCI.%d: %.2f\n" % (i, cm[i, -1]))
        file.close()
Exemple #14
0
def prune_layer(layer_name, model, netspec, original_activations_dir, current_activations_dir, args):
	log(args, "Starting to prune Layer %s\n" % layer_name)

	layer = get_layer(layer_name, model)
	log(args, "Old Weight Shape: %s" % str(layer.blobs[0].data.shape))
	log(args, "Old Bias Shape: %s" % str(layer.blobs[1].data.shape))

	layer_param = get_layer_param(layer_name, netspec)
	if layer_param is None:
		raise Exception("Layer %s does not exist in file %s" % (layer_name, args.network_file))
	bottom_blob_name = layer_param.bottom[0]
	bottom_activations_file = os.path.join(current_activations_dir, "%s.txt" % bottom_blob_name)
	bottom_activations = np.loadtxt(bottom_activations_file)
	log(args, "Bottom shape: %s" % str(bottom_activations.shape))

	top_blob_name = layer_param.top[0]
	top_activations_file = os.path.join(original_activations_dir, "%s.txt" % top_blob_name)
	top_activations = np.loadtxt(top_activations_file)
	log(args, "Top shape: %s" % str(top_activations.shape))

	# row = instance, col = neuron, so to get neuron similarity, we transpose
	gram_matrix = gram(bottom_activations.transpose(), args)
	log(args, "Gram Matrix shape: %s" % str(gram_matrix.shape))
	neuron_indices_to_keep = sample_neurons(gram_matrix, args)

	weights, bias = update_weights(bottom_activations, neuron_indices_to_keep, top_activations)
	log(args, "New Weight shape: %s" % str(weights.shape))
	log(args, "New Bias shape: %s" % str(bias.shape))

	layer.blobs[1].data[:] = bias[:]
	layer.blobs[0].reshape(weights.shape[0], weights.shape[1])
	layer.blobs[0].data[:] = weights[:]

	prune_prev_layer(layer_name, neuron_indices_to_keep, model, netspec, args)
def single_eval(pred_filepath, true_filepath):
    '''
    @brief: do evaluation on a single prediction
    '''
    # read and flatten
    y_pred_raw = np.loadtxt(open(pred_filepath,"rb"),delimiter=",").flatten()
    y_true_raw = np.loadtxt(open(true_filepath,"rb"),delimiter=",").flatten()
    assert len(y_pred_raw)==len(y_true_raw), 'len(y_pred_raw)!=len(y_true_raw)'

    # remove void
    void_num = 255
    nonvoid_idxes = [i for i in range(len(y_true_raw)) if y_true_raw[i]!=void_num]

    y_pred = [y_pred_raw[i] for i in nonvoid_idxes]
    y_true = [y_true_raw[i] for i in nonvoid_idxes]
    assert len(y_pred)==len(y_true), 'len(y_pred)!=len(y_true)'

    # get confusion mat
    cm = confusion_matrix(y_true, y_pred)

    # get present classes in an ascending order!, map to their accuracy
    present_classes = sorted( list(set(y_pred + y_true)) )

    class_perf_map = {}
    for i in range(len(present_classes)):
        class_perf_map[i] = get_perf(cm, i)

    return class_perf_map 
def get_target_feature(model):
    """ Get target features """
    name = model.subdir
    iteration = model.Mut_iteration
    cwd = os.getcwd()
    sub = "%s/%s/Mut_%d" % (cwd,name,iteration)
    
    ## To Do:
    ## - Check if a target set of contact probabilities is given
    ##   else construct target as <Q_i^TS> = Q^TS (uniform TS).

    ## ---- For future
    ## Format for target_Qi.dat
    ## - three columns:
    ##  <res_a>  <res_b>  <Q_ab^TS>
    ## computes contact as within native contact distance.
    ## ---- 
    
    ## ---- For now 
    ## Just a column with the desired contact probability in the TS

    if os.path.exists("%s/target_Qi.dat" % name):
        target =  np.loadtxt("%s/target_Qi.dat" % name)
        target_err =  np.loadtxt("%s/target_Qi_err.dat" % name)
    else:
        ## Compute the average Q of the TS: Average of the endpoints.
        os.chdir("%s" % sub)
        bounds, state_labels = get_state_bounds()
        Q_TS = 0.5*(bounds[2] + bounds[3])/float(model.n_contacts)
        target = Q_TS*np.ones(model.n_contacts,float)
        target_err = 0.05*np.ones(model.n_contacts,float)
        os.chdir(cwd)

    return target, target_err
Exemple #17
0
def make_lick_individual(targetSN, w1, w2):
    """ Make maps for the kinematics. """
    filename = "lick_corr_sn{0}.tsv".format(targetSN)
    binimg = pf.getdata("voronoi_sn{0}_w{1}_{2}.fits".format(targetSN, w1, w2))
    intens = "collapsed_w{0}_{1}.fits".format(w1, w2)
    extent = calc_extent(intens)
    bins = np.loadtxt(filename, usecols=(0,), dtype=str).tolist()
    bins = np.array([x.split("bin")[1] for x in bins]).astype(int)
    data = np.loadtxt(filename, usecols=np.arange(25)+1).T
    labels = [r'Hd$_A$', r'Hd$_F$', r'CN$_1$', r'CN$_2$', r'Ca4227', r'G4300',
             r'Hg$_A$', r'Hg$_F$', r'Fe4383', r'Ca4455', r'Fe4531', r'C4668',
             r'H$_\beta$', r'Fe5015', r'Mg$_1$', r'Mg$_2$', r'Mg$_b$', r'Fe5270',
             r'Fe5335', r'Fe5406', r'Fe5709', r'Fe5782', r'Na$_D$', r'TiO$_1$',
             r'TiO$_2$']
    mag = "[mag]"
    ang = "[\AA]"
    units = [ang, ang, mag, mag, ang, ang,
             ang, ang, ang, ang, ang, ang,
             ang, ang, mag, mag, ang, ang,
             ang, ang, ang, ang, ang, mag,
             mag]
    lims = [[None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None]]
    pdf = PdfPages("figs/lick_sn{0}.pdf".format(targetSN))
    fig = plt.figure(1, figsize=(6.25,5))
    plt.subplots_adjust(bottom=0.12, right=0.97, left=0.09, top=0.96)
    plt.minorticks_on()
    ax = plt.subplot(111)
    ax.minorticks_on()
    plot_indices = np.arange(12,22)
    for i, vector in enumerate(data):
        if i not in plot_indices:
            continue
        print "Making plot for {0}...".format(labels[i])
        kmap = np.zeros_like(binimg)
        kmap[:] = np.nan
        for bin,v in zip(bins, vector):
            idx = np.where(binimg == bin)
            kmap[idx] = v
        vmin = lims[i][0] if lims[i][0] else np.median(vector) - 2 * vector.std()
        vmax = lims[i][1] if lims[i][1] else np.median(vector) + 2 * vector.std()
        m = plt.imshow(kmap, cmap="inferno", origin="bottom", vmin=vmin,
                   vmax=vmax, extent=extent, aspect="equal")
        make_contours()
        plt.minorticks_on()
        plt.xlabel("X [kpc]")
        plt.ylabel("Y [kpc]")
        plt.xlim(extent[0], extent[1])
        plt.ylim(extent[2], extent[3])
        cbar = plt.colorbar(m)
        cbar.set_label("{0} {1}".format(labels[i], units[i]))
        pdf.savefig()
        plt.clf()
    pdf.close()
    return
Exemple #18
0
	def Visualize(self,path=None,filename=None,viz_type='difference'):
		if path is None:
			path = self.result_path
		if filename is None:
			filename = '/results'
		im = []
		if self.n<=1:
			fig = mpl.figure()
			x = np.linspace(0,1,self.m)
			counter = 1
			for step in sorted(glob.glob(path+filename+'*.txt')):
				tmp = np.loadtxt(step)
				if viz_type=='difference':
					im.append(mpl.plot(x,(self.exact(x,np.zeros(self.m),counter*self.dt)-tmp),'b-'))
				else:
					im.append(mpl.plot(x,tmp,'b-'))
				counter += 1
			ani = animation.ArtistAnimation(fig,im)
			mpl.show()
		else:
			X,Y = np.meshgrid(np.linspace(0,1,self.m),np.linspace(0,1,self.n))
			mpl.ion()
			fig = mpl.figure()
			ax = fig.add_subplot(111,projection='3d')
			counter = 1
			for step in sorted(glob.glob(path+filename+'*.txt')):
				tmp = np.loadtxt(step)
				wframe = ax.plot_wireframe(X,Y,(self.exact(X,Y,(counter*self.dt))-tmp))
				mpl.draw()
				if counter==1:
					pass
					# ax.set_autoscaley_on(False)
				ax.collections.remove(wframe)
				counter +=1
Exemple #19
0
def test_psd_matlab():

    """ Test the results of mlab csd/psd against saved results from Matlab"""

    from matplotlib import mlab

    test_dir_path = os.path.join(nitime.__path__[0],'tests')
    
    ts = np.loadtxt(os.path.join(test_dir_path,'tseries12.txt'))
    
    #Complex signal! 
    ts0 = ts[1] + ts[0]*np.complex(0,1) 

    NFFT = 256;
    Fs = 1.0;
    noverlap = NFFT/2

    fxx, f = mlab.psd(ts0,NFFT=NFFT,Fs=Fs,noverlap=noverlap,
                      scale_by_freq=True)

    fxx_mlab = np.fft.fftshift(fxx).squeeze()

    fxx_matlab = np.loadtxt(os.path.join(test_dir_path,'fxx_matlab.txt'))

    npt.assert_almost_equal(fxx_mlab,fxx_matlab,decimal=5)
Exemple #20
0
    def train(self, sf_pickle = ''):
        # load precomputed descriptors and target values
        self.train_descs = np.loadtxt(dirname(__file__) + '/NNScore/train_descs.csv', delimiter=',', dtype=float)
        self.train_target = np.loadtxt(dirname(__file__) + '/NNScore/train_target.csv', delimiter=',', dtype=float)
        self.test_descs = np.loadtxt(dirname(__file__) + '/NNScore/test_descs.csv', delimiter=',', dtype=float)
        self.test_target = np.loadtxt(dirname(__file__) + '/NNScore/test_target.csv', delimiter=',', dtype=float)

        n_dim = (~((self.train_descs == 0).all(axis=0) | (self.train_descs.min(axis=0) == self.train_descs.max(axis=0)))).sum()

        # number of network to sample; original implementation did 1000, but 100 give results good enough.
        n = 1000
        trained_nets = Parallel(n_jobs=self.n_jobs)(delayed(_parallel_helper)(neuralnetwork([n_dim,5,1]), 'fit', self.train_descs, self.train_target, train_alg='tnc', maxfun=1000) for i in xrange(n))
        # get 20 best
        best_idx = np.array([net.score(self.test_descs, self.test_target.flatten()) for net in trained_nets]).argsort()[::-1][:20]
        self.model = ensemble_model([trained_nets[i] for i in best_idx])

        r2 = self.model.score(self.test_descs, self.test_target)
        r = np.sqrt(r2)
        print 'Test set: R**2:', r2, ' R:', r

        r2 = self.model.score(self.train_descs, self.train_target)
        r = np.sqrt(r2)
        print 'Train set: R**2:', r2, ' R:', r

        if sf_pickle:
            return self.save(sf_pickle)
        else:
            return self.save('NNScore.pickle')
Exemple #21
0
def CoAddFinal(frames, mode='mean', display=True):
    # co-add FINSIHED, reduced spectra
    # only trick: resample on to wavelength grid of 1st frame
    files = np.loadtxt(frames, dtype='string',unpack=True)

    # read in first file
    wave_0, flux_0 = np.loadtxt(files[0],dtype='float',skiprows=1,
                                unpack=True,delimiter=',')

    for i in range(1,len(files)):
        wave_i, flux_i = np.loadtxt(files[i],dtype='float',skiprows=1,
                                    unpack=True,delimiter=',')

        # linear interp on to wavelength grid of 1st frame
        flux_i0 = np.interp(wave_0, wave_i, flux_i)

        flux_0 = np.dstack( (flux_0, flux_i0))

    if mode == 'mean':
        flux_out = np.squeeze(flux_0.sum(axis=2) / len(files))
    if mode == 'median':
        flux_out = np.squeeze(np.median(flux_0, axis=2))

    if display is True:
        plt.figure()
        plt.plot(wave_0, flux_out)
        plt.xlabel('Wavelength')
        plt.ylabel('Co-Added Flux')
        plt.show()

    return wave_0, flux_out
Exemple #22
0
def read_hist(path):
    """Read a histogram from a pair of files.

    This method accepts the location of the raw histogram file, e.g.,
    ehist.dat and parses the appropriate limits file (here, elim.dat)
    in the same directory.

    Args:
        path: The location of the raw histogram data.

    Returns:
        A Histogram object.
    """
    raw_hist = np.transpose(np.loadtxt(path))[1:]
    limits = np.loadtxt(_get_limits_path(path))

    def create_subhist(line):
        sub, size, lower, upper, step = line
        sub, size = int(sub), int(size)
        bins = np.linspace(lower, upper, size)
        if 'nhist' in path:
            bins = bins.astype('int')

        if len(raw_hist.shape) == 1:
            counts = np.array([raw_hist[sub]])
        else:
            counts = raw_hist[sub][0:size]

        return SubHistogram(bins=bins, counts=counts.astype('int'))

    return Histogram([create_subhist(line) for line in limits])
def main():
    args = _args()

    tests = {
        'squared_diff': test_squared_diff,
        'chi_square': chi_square,
        'chi_square_shape': chi_square_shape,
        'ks': kolmogorov_smirnov
    }
    if args.test_type not in tests:
        print('--test_type not found, available: {}'.format(tests.keys()))
        return

    # Create histograms with the same bins.
    if args.raw_data:
        print('Reading {}'.format(args.data_1))
        data_1 = np.loadtxt(args.data_1)
        print('Reading {}'.format(args.data_2))
        data_2 = np.loadtxt(args.data_2)
        print('Data read')
    
        if args.frames and args.raw_data:
            data_1 = data_1[:args.frames]
            data_2 = data_2[:args.frames]

        min_bins, max_bins = map(float, args.min_max.split(':'))
        bins = np.arange(min_bins, max_bins, (max_bins-min_bins)/args.bins)
        histogram_1, _ = np.histogram(data_1, bins=bins, density=False)
        histogram_2, _ = np.histogram(data_2, bins=bins, density=False)
    else:
        histogram_1 = np.loadtxt(args.data_1, usecols=(0, 1))
        histogram_2 = np.loadtxt(args.data_2, usecols=(0, 1))

    print('Running test {}'.format(args.test_type))
    tests[args.test_type](histogram_1, histogram_2)
Exemple #24
0
def create_class_vec(new_name):
    cfa_dir=PLS_code_dir+"data/cfaspec_snIa/"
    SNe_data=np.loadtxt(cfa_dir+'/cfasnIa_param_mod.dat', dtype={'names': ('SN_name', 'zhel', 'tMaxB', 'err_tMaxB', 'ref', 'Dm15', 'err_Dm15', 'ref2', 'M_B', 'err_M_B', "BmV", "err_BmV", "BmmVm", "err_BmmVm", "Phot_ref"),'formats': ('S15', "f8", "f8","f8", "S15", "f8", "f8","S15","f8" , "f8","f8", "f8","f8", "f8","S15")})
    spectra_data=np.loadtxt(cfa_dir+'/cfasnIa_mjdspec.dat', dtype={'names': ('spectrum_name', 'time'),'formats': ('S40', "f8")})
    SNe_BranchWang_class=np.loadtxt(cfa_dir+'/branchwangclass_mod.dat', dtype={'names': ('SN_name', 'pEW5972', 'pEW6355', 'vabs6355', 'phase', 'Branch', 'Wang'),'formats': ('S15', "f8", "f8","f8",  "f8","S15","S15")})
    name_regex = re.compile('(.+)\-\d+\.\d+')
    name_vector=[]
    for spectrum_name in enumerate(spectra_data["spectrum_name"]):
        name_vector.append(name_regex.search(spectrum_name[1]).group(1))
    #It creates the vectors of the classification of Branch and Wang
    #SN_name_vec=[]
    pEW5972_vec=[]
    pEW6355_vec=[]
    vabs6355_vec=[]
    Branch_vec=[]
    Wang_vec=[]
    for i, supernova in enumerate(new_name):
        pEW5972_tmp=np.nan
        pEW6355_tmp=np.nan
        vabs6355_tmp=np.nan
        Branch_tmp=np.nan
        Wang_tmp=np.nan
        for name_sn in enumerate(SNe_BranchWang_class["SN_name"]):
            if name_sn[1] ==  supernova:
                SN_name_tmp, pEW5972_tmp, pEW6355_tmp, vabs6355_tmp, phase_tmp, Branch_tmp, Wang_tmp= SNe_BranchWang_class[name_sn[0]]
        #SN_name_vec.append(SN_name_tmp)
        pEW5972_vec.append(pEW5972_tmp)
        pEW6355_vec.append(pEW6355_tmp)
        vabs6355_vec.append(vabs6355_tmp)
        Branch_vec.append(Branch_tmp)
        Wang_vec.append(Wang_tmp)
    #color plot for Branch 
    color_plot_Branch=[]
    for i in  range(0,np.size(new_name)):
        if Branch_vec[i]=="CN":
            color_plot_Branch.append('r')
        elif  Branch_vec[i]=="SS":
            color_plot_Branch.append('g')
        elif  Branch_vec[i]=="BL":
            color_plot_Branch.append('b')
        elif  Branch_vec[i]=="CL":
            color_plot_Branch.append('y')
        else:
            color_plot_Branch.append('w')
    #color plot for Wang 
    color_plot_Wang=[]
    for i in  range(0,np.size(new_name)):
        if Wang_vec[i]=="91T":
            color_plot_Wang.append('r')
        elif  Wang_vec[i]=="N" :
            color_plot_Wang.append('g')
        elif  Wang_vec[i]=="pec":
            color_plot_Wang.append('b')
        elif  Wang_vec[i]=="HV":
            color_plot_Wang.append('y')
        elif  Wang_vec[i]=="91bg":
            color_plot_Wang.append('c')
        else:
            color_plot_Wang.append('w')
    return color_plot_Wang, color_plot_Branch
Exemple #25
0
def datos():
    '''
    Descarga los datos de un archivo y los retorna en columnas
    '''
    lambda1 = np.loadtxt('espectro.dat', usecols=(-2,))
    flujo = np.loadtxt('espectro.dat', usecols=(-1,))
    return lambda1, flujo
Exemple #26
0
 def testISNPS_DTLZ2(self):
     random = pyotl.utility.Random(1)
     problemGen = lambda: pyotl.problem.real.DTLZ2(3)
     problem = problemGen()
     pathProblem = os.path.join(self.pathData, type(problem).__name__, str(problem.GetNumberOfObjectives()))
     _crossover = pyotl.crossover.real.SimulatedBinaryCrossover(random, 1, problem.GetBoundary(), 20)
     crossover = pyotl.crossover.real.CoupleCoupleCrossoverAdapter(_crossover, random)
     mutation = pyotl.mutation.real.PolynomialMutation(random, 1 / float(len(problem.GetBoundary())), problem.GetBoundary(), 20)
     angle1 = 2.3 * math.pi / 180
     angle2 = 45 * math.pi / 180
     amplification = 3
     pfList = []
     for _ in range(self.repeat):
         problem = problemGen()
         initial = pyotl.initial.real.BatchUniform(random, problem.GetBoundary(), 100)
         convergenceDirection = pyotl.utility.PyList2BlasVector_Real([1] * problem.GetNumberOfObjectives())
         optimizer = pyotl.optimizer.real.ISNPS(random, problem, initial, crossover, mutation, convergenceDirection, angle1, angle2, amplification)
         while optimizer.GetProblem().GetNumberOfEvaluations() < 30000:
             optimizer()
         pf = pyotl.utility.PyListList2VectorVector_Real(
             [list(solution.objective_) for solution in optimizer.GetSolutionSet()])
         pfList.append(pf)
     pathCrossover = os.path.join(pathProblem, type(crossover.GetCrossover()).__name__)
     pathOptimizer = os.path.join(pathCrossover, type(optimizer).__name__)
     pfTrue = pyotl.utility.PyListList2VectorVector_Real(numpy.loadtxt(os.path.join(pathProblem, 'PF.csv')).tolist())
     # GD
     indicator = pyotl.indicator.real.DTLZ2GD()
     metricList = [indicator(pf) for pf in pfList]
     rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'GD.csv')).tolist()
     self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
     # IGD
     indicator = pyotl.indicator.real.InvertedGenerationalDistance(pfTrue)
     metricList = [indicator(pf) for pf in pfList]
     rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'IGD.csv')).tolist()
     self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
Exemple #27
0
def run_test(name):
    basepath = os.path.join('results', name)
    if not os.path.exists(basepath):
        os.makedirs(basepath)

    ctrl = LBSimulationController(TestLDCSim)
    ctrl.run(ignore_cmdline=True)
    horiz = np.loadtxt('ldc_golden/re400_horiz', skiprows=1)
    vert = np.loadtxt('ldc_golden/re400_vert', skiprows=1)

    plt.plot(2 * (horiz[:,0] - 0.5), -2 * (horiz[:,1] - 0.5), '.', label='Sheu, Tsai paper')
    plt.plot(2 * (vert[:,0] - 0.5), -2 * (vert[:,1] - 0.5), '.', label='Sheu, Tsai paper')
    save_output(basepath, MAX_ITERS)
    plt.legend(loc='lower right')
    plt.gca().yaxis.grid(True)
    plt.gca().xaxis.grid(True)
    plt.gca().xaxis.grid(True, which='minor')
    plt.gca().yaxis.grid(True, which='minor')

    plt.title('Lid Driven Cavity, Re = 400')
    print os.path.join(basepath, 're400.pdf' )
    plt.savefig(os.path.join(basepath, 're400.pdf' ), format='pdf')

    plt.clf()
    plt.cla()
    plt.show()
    shutil.rmtree(tmpdir)
Exemple #28
0
 def _readVICOutputFromFile(self, lat, lon, depths, filespath):
     """Read DSSAT inputs from VIC output files for a specific pixel."""
     startdate = date(self.startyear, self.startmonth, self.startday)
     enddate = date(self.endyear, self.endmonth, self.endday)
     filename = "{0}/output/eb_{1:.{3}f}_{2:.{3}f}".format(
         filespath, lat, lon, self.grid_decimal)
     viceb = np.loadtxt(filename)
     filename = "{0}/output/sub_{1:.{3}f}_{2:.{3}f}".format(
         filespath, lat, lon, self.grid_decimal)
     vicsm = np.loadtxt(filename)
     filename = "{0}/output/sur_{1:.{3}f}_{2:.{3}f}".format(
         filespath, lat, lon, self.grid_decimal)
     vicsr = np.loadtxt(filename)
     filename = "{0}/forcings/data_{1:.{3}f}_{2:.{3}f}".format(
         filespath, lat, lon, self.grid_decimal)
     met = np.loadtxt(filename)
     sm = vicsm[:, 3:len(depths) + 3]
     weather = np.vstack(
         (viceb[:, 3] + viceb[:, 4], met[:, 1], met[:, 2], met[:, 0])).T
     year = vicsm[:, 0].astype(int)
     month = vicsm[:, 1].astype(int)
     day = vicsm[:, 2].astype(int)
     tidx = [i for i in range(len(year)) if date(year[i], month[i], day[
         i]) >= startdate and date(year[i], month[i], day[i]) <= enddate]
     lai = dict(zip([date(year[i], month[i], day[i])
                     for i in range(len(year)) if i in tidx], vicsr[:, 12]))
     return year[tidx], month[tidx], day[tidx], weather[tidx, :], sm[tidx, :], lai
Exemple #29
0
    def test_usecols(self):
        a = np.array([[1, 2], [3, 4]], float)
        c = StringIO.StringIO()
        np.savetxt(c, a)
        c.seek(0)
        x = np.loadtxt(c, dtype=float, usecols=(1,))
        assert_array_equal(x, a[:,1])

        a =np.array([[1, 2, 3], [3, 4, 5]], float)
        c = StringIO.StringIO()
        np.savetxt(c, a)
        c.seek(0)
        x = np.loadtxt(c, dtype=float, usecols=(1, 2))
        assert_array_equal(x, a[:, 1:])

        # Testing with arrays instead of tuples.
        c.seek(0)
        x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
        assert_array_equal(x, a[:, 1:])

        # Checking with dtypes defined converters.
        data = '''JOE 70.1 25.3
                BOB 60.5 27.9
                '''
        c = StringIO.StringIO(data)
        names = ['stid', 'temp']
        dtypes = ['S4', 'f8']
        arr = np.loadtxt(c, usecols=(0, 2), dtype=zip(names, dtypes))
        assert_equal(arr['stid'], ["JOE",  "BOB"])
        assert_equal(arr['temp'], [25.3,  27.9])
Exemple #30
0
 def __init__(self, beta=0.99, sigma=2.0, gamma=1, aH=5.0, aL=0.0, y=-1, dti = 0.5,
     aN=101, psi=0.03, tol=0.01, neg=-1e10, W=45, R=30, a0 = 0, tcost = 0.0, ltv=0.7):
     self.beta, self.sigma, self.gamma, self.psi = beta, sigma, gamma, psi
     self.R, self.W, self.y = R, W, y
     self.tcost, self.ltv, self.dti = tcost, ltv, dti
     self.T = T = (y+1 if (y >= 0) and (y <= W+R-2) else W+R)
     self.aH, self.aL, self.aN, self.aa = aH, aL, aN, aL+(aH-aL)*linspace(0,1,aN)
     self.tol, self.neg = tol, neg
     """ house sizes and number of feasible feasible house sizes """
     self.hh = [0.0, 0.2, 0.5, 0.7, 1.0]
     # self.hh = loadtxt('hh.txt', delimiter='\n')
     self.sp = loadtxt('sp.txt', delimiter='\n')  # survival probability
     self.hN = len(self.hh)
     """ age-specific productivity """
     self.ef = loadtxt('ef.txt', delimiter='\n')
     """ value function and its interpolation """
     self.v = array([[[0 for i in range(aN)] for h in range(self.hN)] for y in range(T)], dtype=float)
     self.vtilde = [[[] for h in range(self.hN)] for y in range(T)]
     """ policy functions used in value function method """
     self.na = [[[0 for i in range(2)] for h in range(self.hN)] for y in range(T)]
     self.ao = array([[[0 for i in range(aN)] for h in range(self.hN)] for y in range(T)], dtype=float)
     self.ho = array([[[0 for i in range(aN)] for h in range(self.hN)] for y in range(T)], dtype=float)
     self.co = array([[[0 for i in range(aN)] for h in range(self.hN)] for y in range(T)], dtype=float)
     self.ro = array([[[0 for i in range(aN)] for h in range(self.hN)] for y in range(T)], dtype=float)
     """ the following paths for a, c, n and u are used in direct and value function methods
     In direct method, those paths are directly calculated, while in the value function
     method the paths are calculated from value and policy functions """
     self.apath = array([a0 for y in range(T)], dtype=float)
     self.hpath = array([0 for y in range(T)], dtype=float)
     self.cpath = array([0 for y in range(T)], dtype=float)
     self.rpath = array([0 for y in range(T)], dtype=float)
     self.spath = array([0 for y in range(T)], dtype=float)
     self.epath = array([0 for y in range(T)], dtype=float) # labor supply in efficiency unit
     self.upath = array([0 for y in range(T)], dtype=float)
import numpy as np
import sys

o, h, l, c = np.loadtxt('BHP.csv', delimiter=',', usecols=(3, 4, 5, 6), unpack=True)

def calc_profit(open, high, low, close):
   #buy just below the open
   buy = open * float(sys.argv[1])

   # daily range
   if low <  buy < high:
      return (close - buy)/buy
   else:
      return 0

func = np.vectorize(calc_profit)
profits = func(o, h, l, c)
print "Profits", profits

real_trades = profits[profits != 0]
print "Number of trades", len(real_trades), round(100.0 * len(real_trades)/len(c), 2), "%"
print "Average profit/loss %", round(np.mean(real_trades) * 100, 2)

winning_trades = profits[profits > 0]
print "Number of winning trades", len(winning_trades), round(100.0 * len(winning_trades)/len(c), 2), "%"
print "Average profit %", round(np.mean(winning_trades) * 100, 2)

losing_trades = profits[profits < 0]
print "Number of losing trades", len(losing_trades), round(100.0 * len(losing_trades)/len(c), 2), "%"
print "Average loss %", round(np.mean(losing_trades) * 100, 2)
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
import random

#training = 'training_data.csv'
#training = 'training_data_threshold70.csv' #70%
#training = 'LIWC2015_results_training_data.csv' #7 features from LIWC
#training = 'training_data_allfeatures.csv' #all features from LIWC
training = 'WordNet_15_realtraining.csv'
training_depression_values = 'training_data.csv'

os.chdir(r'E:\NLP Research/sentiment labelled sentences/sentiment labelled sentences/TextFiles')

#dataX = np.loadtxt(training, delimiter=',', usecols=range(1,8)) #70%
#dataX = np.loadtxt(training, delimiter=',', usecols=range(0,7)) #7 features from LIWC
dataX = np.loadtxt(training, delimiter=',') #all features from LIWC
dataY = np.loadtxt(training_depression_values, delimiter=',', usecols=(8,))

X = dataX
y = dataY


clf = tree.DecisionTreeClassifier( random_state= 1 )
#clf = svm.SVC()

#scaler = preprocessing.StandardScaler().fit(X)
#X = scaler.transform(X)
X = preprocessing.scale(X)

clf.fit(X, y) 
 # 创建一个内存初始化文件
 fp = open("./sim_source/sp-%d-label-%d.list"%(i, int(sample_label[i])), "w")
 fp_a = open("./sim_source/sp-%d-label-%d_da.list"%(i, int(sample_label[i])), "w")
 fp_b = open("./sim_source/sp-%d-label-%d_db.list"%(i, int(sample_label[i])), "w")
 fp_c = open("./sim_source/sp-%d-label-%d_dc.list"%(i, int(sample_label[i])), "w")
 fp_d = open("./sim_source/sp-%d-label-%d_dd.list"%(i, int(sample_label[i])), "w")
 # 首先将参数存储起来
 for para in para_dict:
     # 参数所在的地址
     fp.write("@%08X\n"%(para_dict[para]))
     fp_a.write("@%08X\n"%(para_dict[para]))
     fp_b.write("@%08X\n"%(para_dict[para]))
     fp_c.write("@%08X\n"%(para_dict[para]))
     fp_d.write("@%08X\n"%(para_dict[para]))
     # 加载参数列表
     para_val = np.loadtxt("./para/"+para+".csv", delimiter=",")
     # print(para_val.shape, len(para_val.shape))
     # 如果参数是矩阵
     if len(para_val.shape)==2:
         for m in range(0, para_val.shape[0]):
             for n in range(0, para_val.shape[1]):
                 DAT = int(para_val[m][n]*65536)
                 fp.write("%08X\n"%(DAT&0xFFFFFFFF))
                 fp_a.write("%03X\n"%((DAT>>24)&0x1FF))
                 fp_b.write("%03X\n"%((DAT>>16)&0x1FF))
                 fp_c.write("%03X\n"%((DAT>>8)&0x1FF))
                 fp_d.write("%03X\n"%((DAT>>0)&0x1FF))
     # 如果参数是向量
     elif len(para_val.shape)==1:
         for m in range(0, para_val.shape[0]):
             DAT = int(para_val[m]*65536)
from pylab import *
import os
import json

font = {'family' : 'Arial',
        'weight' : 'normal',
        'size'   : 30}
matplotlib.rc('font', **font)
axis_font = {'fontname':'Arial', 'size':'35'}

fig, ax = plt.subplots()
fig.set_figheight(8)
fig.set_figwidth(10)

with open('speedup.txt') as infile:
    trend = numpy.loadtxt(infile)
    # print trend

# with open('speedup_1.txt') as infile:
#     trend_1 = numpy.loadtxt(infile)
    # print trend


# y_max = max(trend[:,1])
#plt.ylim([0, 2])

#plt.plot(trend[:,0], trend[:,1], 'b^-', label='Varying graph size', ms=15, lw=5)
#plt.plot(trend_1[:,0], trend_1[:,1], 'ro-', label='Varying # of object', ms=15, lw=5)
#serial = 2449436.627
# plt.plot(trend[:,0], trend[:,1], 'ro-', label='ExactAlg-baseline', ms=18, lw=5)
# plt.plot(trend[:,0], trend[:,1], 'ro-', label='ExactAlg-improved', ms=18, lw=4)
Exemple #35
0
#!/usr/bin/python3

import numpy
from matplotlib import pylab

data = numpy.loadtxt('/tmp/dump3.csv',
                     delimiter=',',
                     skiprows=1)
x = range(len(data))

pylab.subplot(1, 1, 1)
pylab.plot(x, [d[0] for d in data], 'ro', label='ia')
pylab.plot(x, [d[1] for d in data], 'go', label='ib')
pylab.plot(x, [d[2] for d in data], 'bo', label='ic')
pylab.plot(x, [d[3] for d in data], 'r--', label='ia_goal')
pylab.plot(x, [d[4] for d in data], 'g--', label='ib_goal')
pylab.plot(x, [d[5] for d in data], 'b--', label='ic_goal')
pylab.plot(x, [d[6] for d in data], 'rx', label='i_overall')
pylab.plot(x, [d[7] for d in data], 'gx', label='omega')
pylab.plot(x, [d[8] for d in data], 'r', label='van')
pylab.plot(x, [d[9] for d in data], 'g', label='vbn')
pylab.plot(x, [d[10] for d in data], 'b', label='vcn')
pylab.legend()

pylab.show()
def calc_yield(lamb,ks,kt,kstd,temp,temp_dat,lifetime_exp_zero,lifetime_exp_res,lifetime_exp_high,J):


    # Define variables, initial frame
    rad_fram_aniso_g1 = np.array([[0.0006,0.0,0.0],[0.0,0.0001,0.0],[0.0,0.0,-0.0009]])
    rad_fram_aniso_g2 = np.array([[0.0010,0.0,0.0],[0.0,0.0007,0.0],[0.0,0.0,-0.0020]])
    
    rad_fram_aniso_hyperfine_1 = np.zeros([19,3,3])
    rad_fram_aniso_hyperfine_1[0] = array_construct(0.018394,0.00575,-0.024144,0.119167,-0.090257,-0.105530)
    rad_fram_aniso_hyperfine_1[1] = array_construct(-0.030255,0.134767,-0.104512,0.111178,0.03952,0.065691)
    rad_fram_aniso_hyperfine_1[2] = array_construct(0.041327,-0.039294,0.002033,0.017961,0.78922,0.025615)
    rad_fram_aniso_hyperfine_1[3] = array_construct(0.065617,-0.016154,-0.049462,0.036655,0.014217,0.004047)
    rad_fram_aniso_hyperfine_1[4] = array_construct(0.069089,-0.054902,-0.014187,0.013749,-0.075976,-0.006477)
    rad_fram_aniso_hyperfine_1[5] = array_construct(0.098308,-0.041108,-0.0572,-0.024641,0.013959,0.002803)
    rad_fram_aniso_hyperfine_1[6] = array_construct(0.017844,0.006183,-0.024028,-00.119099,-0.090068,0.105661)
    rad_fram_aniso_hyperfine_1[7] = array_construct(-0.030775,0.135406,-0.104631,-0.110876,0.039322,-0.065607)
    rad_fram_aniso_hyperfine_1[8] = array_construct(0.041235,-0.039174,-0.002061,-0.018150,0.078901,-0.025838)
    rad_fram_aniso_hyperfine_1[9] = array_construct(0.065415,-0.015957,-0.049358,-0.036874,0.014222,-0.004080)
    rad_fram_aniso_hyperfine_1[10] = array_construct(0.069102,-0.054901,-0.014201,-0.014035,-0.075981,0.006618)
    rad_fram_aniso_hyperfine_1[11] = array_construct(0.098464,-0.041245,-0.0571219,0.024346,0.014054,-0.002814)
    rad_fram_aniso_hyperfine_1[12] = array_construct(0.036159,-0.00026,-0.035899,0.038259,-0.007026,-0.004047)
    rad_fram_aniso_hyperfine_1[13] = array_construct(0.036159,-0.00026,-0.035899,0.038259,-0.007026,-0.004047)
    rad_fram_aniso_hyperfine_1[14] = array_construct(0.036159,-0.00026,-0.035899,0.038259,-0.007026,-0.004047)
    rad_fram_aniso_hyperfine_1[15] = array_construct(0.035983,-0.000104,-0.035879,-0.038338,-0.007021,0.004066)
    rad_fram_aniso_hyperfine_1[16] = array_construct(0.035983,-0.000104,-0.035879,-0.038338,-0.007021,0.004066)
    rad_fram_aniso_hyperfine_1[17] = array_construct(0.035983,-0.000104,-0.035879,-0.038338,-0.007021,0.004066)
    rad_fram_aniso_hyperfine_1[18] = array_construct(-0.772676,-0.7811,1.553776,0.000000,-0.061480,0.000443)

    rad_fram_aniso_hyperfine_2 = np.zeros([6,3,3])
    rad_fram_aniso_hyperfine_2[0] = array_construct(0.011586,0.032114,-0.0437,-0.101834,-0.000008,0.000014)
    rad_fram_aniso_hyperfine_2[1] = array_construct(0.011586,0.032114,-0.0437,-0.101834,0.000014,0.000008)
    rad_fram_aniso_hyperfine_2[2] = array_construct(0.011586,0.032114,-0.0437,-0.101834,0.000014,0.000008)
    rad_fram_aniso_hyperfine_2[3] = array_construct(0.011586,0.032114,-0.0437,-0.101834,-0.000008,0.000014)
    rad_fram_aniso_hyperfine_2[4] = array_construct(0.0352,0.034,-0.0692,0.0,0.0,0.0)
    rad_fram_aniso_hyperfine_2[5] = array_construct(0.0352,0.034,-0.0692,0.0,0.0,0.0)

    # axis frames
    data_xyz = np.loadtxt('dmj-an-fn1-ndi-opt.txt',delimiter=',')
    transform_mol = inertia_tensor(data_xyz)
    
    dmj_xyz = np.loadtxt('dmj_in_fn1.txt',delimiter=',')
    transform_dmj = inertia_tensor(dmj_xyz)
    
    ndi_xyz = np.loadtxt('NDI_in_fn1.txt',delimiter=',')
    transform_ndi = inertia_tensor(ndi_xyz)
    
    # Convert to molecular frame
    aniso_g1 = rad_tensor_mol_axis(transform_mol,transform_dmj,rad_fram_aniso_g1)
    aniso_g2 = rad_tensor_mol_axis(transform_mol,transform_ndi,rad_fram_aniso_g2)

    aniso_hyperfine_1 = rad_tensor_mol_axis(transform_mol,transform_dmj,rad_fram_aniso_hyperfine_1)
    aniso_hyperfine_2 = rad_tensor_mol_axis(transform_mol,transform_ndi,rad_fram_aniso_hyperfine_2)
    
    
    # for n=1 
    radius =  20.986e-10 
    
    cnst = (1.0e3*1.25663706e-6*1.054e-34*1.766086e11)/(4.0*np.pi*radius**3)
    aniso_dipolar = np.array([[1.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,-2.0]])*cnst
    
    # Isotropic components
    g1_iso = 2.0031
    g2_iso = 2.0040
    
    # ISO h1 for the anti conformation
    iso_h1 = np.array([[2.308839,0.903770,-0.034042,-0.077575,1.071863,0.258828,2.308288,0.0902293,-0.034202,0.077648,1.073569,0.259878,-0.166563,-0.166563,-0.166563,-0.166487,-0.166487,-0.166487,0.831260]])
    
    iso_h2 = np.array([[-0.1927,-0.1927,-0.1927,-0.1927,-0.0963,-0.0963]])
    
    spin_numbers_1 = np.array([[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,1.0]])
    spin_numbers_2 = np.array([[0.5,0.5,0.5,0.5,1.0,1.0]])        

    field = np.reshape(temp_dat[:,0],(len(temp_dat[:,0])))
    data_y = np.reshape(temp_dat[:,1],(len(temp_dat[:,1])))
    
    sampled_field = np.linspace(0.0,120.0,40)
    triplet_yield = np.zeros_like(sampled_field)
    standard_error = np.zeros_like(sampled_field)     
    compound_error = np.zeros_like(sampled_field)  


    num_samples = 200
    samples = np.arange(1.0,np.float(num_samples))
    trip = np.zeros_like(samples)
    
#--------------------------------------------------------------------------------------------------------------------------------------
#zero field lifetime
    
    
    lifetime_zero = 0.0
    zero = np.zeros_like(samples)
    # zero field lifetime
    for index, item in enumerate(samples):
            relaxation_0 = rotational_relaxation(aniso_dipolar,g1_iso,g2_iso,aniso_g1,aniso_g2,iso_h1,iso_h2,aniso_hyperfine_1,aniso_hyperfine_2,spin_numbers_1,spin_numbers_2,0.0,J,ks,kt,lamb,temp,kstd)
            zero[index] = relaxation_0.lifetime()
            lifetime_zero += zero[index]
    zero_error = sts.sem(zero) 
    lifetime_zero = np.float(lifetime_zero)/np.float(num_samples)
    lifetime_dif_zero = lifetime_zero - lifetime_exp_zero
    
    
#--------------------------------------------------------------------------------------------------------------------------------------
#resonance field lifetime (B=2J)
    
    lifetime_res = 0.0
    res = np.zeros_like(samples)
    # zero field lifetime
    for index, item in enumerate(samples):
            relaxation_0 = rotational_relaxation(aniso_dipolar,g1_iso,g2_iso,aniso_g1,aniso_g2,iso_h1,iso_h2,aniso_hyperfine_1,aniso_hyperfine_2,spin_numbers_1,spin_numbers_2,2.0*J,J,ks,kt,lamb,temp,kstd)
            res[index] = relaxation_0.lifetime()
            lifetime_res += res[index]
    res_error = sts.sem(res) 
    lifetime_res = np.float(lifetime_res)/np.float(num_samples)
    lifetime_dif_res = lifetime_res - lifetime_exp_res
    
#--------------------------------------------------------------------------------------------------------------------------------------
# High field lifetime 
    
    lifetime_high = 0.0
    high = np.zeros_like(samples)
    # zero field lifetime
    for index, item in enumerate(samples):
            relaxation_0 = rotational_relaxation(aniso_dipolar,g1_iso,g2_iso,aniso_g1,aniso_g2,iso_h1,iso_h2,aniso_hyperfine_1,aniso_hyperfine_2,spin_numbers_1,spin_numbers_2,120.0,J,ks,kt,lamb,temp,kstd)
            high[index] = relaxation_0.lifetime()
            lifetime_high += high[index]
    high_error = sts.sem(high) 
    lifetime_high = np.float(lifetime_high)/np.float(num_samples)
    lifetime_dif_high = lifetime_high - lifetime_exp_high 
    
    

#--------------------------------------------------------------------------------------------------------------------------------------
  
    
    for index_field,item_field in enumerate(sampled_field):
        total_t = 0.0
        #print("%",100.0*(np.float(index_field))/(np.float(len(sampled_field))))
        for index, item in enumerate(samples):
            np.random.seed(index)
            # Define class       
            relaxation = rotational_relaxation(aniso_dipolar,g1_iso,g2_iso,aniso_g1,aniso_g2,iso_h1,iso_h2,aniso_hyperfine_1,aniso_hyperfine_2,spin_numbers_1,spin_numbers_2,item_field,J,ks,kt,lamb,temp,kstd)
            # Calculate triplet yield
            trip[index] = relaxation.triplet_yield()
            total_t += trip[index]
            
        triplet_yield[index_field] = total_t
        standard_error[index_field] = sts.sem(trip)
        compound_error[index_field] = np.sqrt(standard_error[0]*standard_error[0]*((1.0/triplet_yield[0])**2 + (standard_error[index_field]*standard_error[index_field]*(triplet_yield[index_field]/triplet_yield[0])**2)))
        
    compound_error[0] = 0.0
    triplet_yield = triplet_yield/(triplet_yield[0])
    


    tck = interpolate.splrep(sampled_field, triplet_yield, s=0)
    xnew = field
    ynew = interpolate.splev(xnew, tck, der=0)

    mary = ((ynew)-(data_y-data_y[0]+1.0))*((ynew)-(data_y-data_y[0]+1.0))
    mean_mary = (np.sum(ynew))/np.float(len(ynew))
    mary_var = (mean_mary - ynew)*(mean_mary - ynew)
    
    lt = np.array([lifetime_zero,lifetime_res,lifetime_high])
    sq_lt_diff = np.array([(lifetime_dif_zero)*(lifetime_dif_zero),(lifetime_dif_res)*(lifetime_dif_res),(lifetime_dif_high)*(lifetime_dif_high)])
    mean_lt = (np.sum(lt))/np.float(len(lt))
    lt_var = (mean_lt - lt)*(mean_lt - lt)
    
    val =  0.5*np.float(((len(sampled_field)-1.0)/(len(sampled_field)+1.0))*(np.sum(mary)/np.sum(mary_var)) + (2.0/(len(sampled_field)+1.0))*(np.sum(sq_lt_diff)/np.sum(lt_var)))
    
    print('lamb,ks,kt,kstd',lamb,ks,kt,kstd)
    print(val)

    plt.clf()
    #plt.plot(field,ynew,'o')
    plt.plot(sampled_field,triplet_yield,'o--')
    plt.plot(field,(data_y-data_y[0]+1.0),'o')
    plt.fill_between(sampled_field, triplet_yield - 2.0*compound_error, triplet_yield + 2.0*compound_error,
                 color='salmon', alpha=0.4)
    plt.ylabel('Relative Triplet Yield')
    plt.title('FN1 at (K) '+str(temp))
    plt.xlabel('field (mT)')
    #plt.savefig("fn1"+str(temp)+".pdf") 
    plt.show()
    
    plt.clf()
    plt.plot(np.array([0.0,2.0*J,120.0]),np.array([lifetime_zero,lifetime_res,lifetime_high]), label = 'Calculated')
    plt.plot(np.array([0.0,2.0*J,120.0]),np.array([lifetime_exp_zero,lifetime_exp_res,lifetime_exp_high]),label = 'Experimental')
    plt.fill_between([0.0,2.0*J,120.0], np.array([lifetime_zero,lifetime_res,lifetime_high]) - 2.0*np.array([zero_error,res_error,high_error]), np.array([lifetime_zero,lifetime_res,lifetime_high]) + 2.0*np.array([zero_error,res_error,high_error]),color='g', alpha=0.4)
    plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=2,
           ncol=2, mode="expand", borderaxespad=-1.)
    plt.xlabel('Field (mT)')
    plt.ylabel('Lifetime')
    plt.title('FN1 extreme narrowing limit lifetime at (K) '+str(temp))
    #plt.savefig("FN1_lifetimes_"+str(temp)+".pdf")"""
    plt.show() 
    

    
    return val
        lifetime_exp_high = 8.078488090136942
        x = [0.3, 0.030867040869067, 27.857193577474373]
        x1 = [0.3759389320459983, 0.0261220075271366, 20.369181086758637]
        J = 35.41
        kstd = 0.0
        #res = minimize(lambda x1,x2,x3,x4,x5,x6,x7,x8: calc_yield(*x1,x2,x3,x4,x5,x6,x7,x8),x0=x,bounds=bnds,args=(kstd,temp,temp_dat,lifetime_exp_zero,lifetime_exp_res,lifetime_exp_high,J))
        res = differential_evolution(lambda x1,x2,x3,x4,x5,x6,x7,x8: calc_yield(*x1,x2,x3,x4,x5,x6,x7,x8),bounds=bnds,args=(kstd,temp,temp_dat,lifetime_exp_zero,lifetime_exp_res,lifetime_exp_high,J),maxiter=10)
       
        f.write("\n")
        f.write("x0 for T=273k\n")
        f.write(str(res)+"\n")
        for i in range(0,len(res.x)):
            p.write(str(res.x[i])+",")
        p.write(str(temp)+"\n")
"""
temp_dat = np.loadtxt('t_353_new.txt',delimiter=',')

lifetime_exp_zero = 7.049588177788923
lifetime_exp_res = 1.4796659908006953
lifetime_exp_high = 8.078488090136942

J = 35.41

# No initital triplet pop
lamb = 0.0
ks = 0.05314714309198708
kt = 7.9861117182848975
kstd = 8.017707448438157

# Experimental rate
#ks = 0.124363
Exemple #38
0
        result = 0

    return result


# Assume nshocks is not equal for each model

pc_to_cm = 3.086e18
Msun = 1.98E33
dist = 10  #kpc

for idx, path in enumerate(path_files):

    #20 profiles, regardless of nshocks:
    # num_div_print = 20    !Number of profile printouts
    data_vs_rad = np.loadtxt(path + 'data_vs_rad.dat')
    data_vs_rad_age = np.unique(data_vs_rad[:, 19])  # time_yr_run

    if data_vs_rad_age[0] == 0:
        data_vs_rad_age = data_vs_rad_age[1:]

    data_vs_rad_iRS = np.zeros(len(data_vs_rad_age), dtype=int)
    data_vs_rad_iCD = np.zeros(len(data_vs_rad_age), dtype=int)
    data_vs_rad_iFS = np.zeros(len(data_vs_rad_age), dtype=int)

    r = [[] * i for i in range(len(data_vs_rad_age))]
    dm = [[] * i for i in range(len(data_vs_rad_age))]
    lagm = [[] * i for i in range(len(data_vs_rad_age))]

    r_amb = [[] * i for i in range(len(data_vs_rad_age))]
    dm_amb = [[] * i for i in range(len(data_vs_rad_age))]
Exemple #39
0
import numpy as np
import csv

methods_name = ['188-bit', 'AAC', 'ASDC', 'CKSAAP', 'CTD']
for it in range(5):
    name = methods_name[it]
    print(name + ':')

    f1 = np.loadtxt('D:/study/Bioinformatics/QSP/200p_200n/10_fold/' + name +
                    '/train_' + name + '.csv',
                    delimiter=',',
                    skiprows=1)
    m = 0
    n = 20
    k = 0
    for k in range(10):
        index = 0
        temp = np.zeros((40, np.shape(f1)[1]))
        for i in range(m, n):
            temp[index] = f1[i]
            index = index + 1
        for j in range(m, n):
            temp[index] = f1[j + 200]
            index = index + 1
        m = m + 20
        n = n + 20
        with open('D:/study/Bioinformatics/QSP/200p_200n/10_fold/' + name +
                  '/test/test_' + name + '_' + str(k) + '.csv',
                  'w',
                  newline='') as csvfile:
            writer = csv.writer(csvfile)
def process_singlecmds(singlecmds, multicmds, G):
    """Checks the validity of command parameters and creates instances of classes of parameters.
        
    Args:
        singlecmds (dict): Commands that can only occur once in the model.
        multicmds (dict): Commands that can have multiple instances in the model (required to pass to process_materials_file function).
        G (class): Grid class instance - holds essential parameters describing the model.
    """

    # Check validity of command parameters in order needed
    # messages
    cmd = '#messages'
    if singlecmds[cmd] != 'None':
        tmp = singlecmds[cmd].split()
        if len(tmp) != 1:
            raise CmdInputError(cmd + ' requires exactly one parameter')
        if singlecmds[cmd].lower() == 'y':
            G.messages = True
        elif singlecmds[cmd].lower() == 'n':
            G.messages = False
        else:
            raise CmdInputError(cmd +
                                ' requires input values of either y or n')

    # Title
    cmd = '#title'
    if singlecmds[cmd] != 'None':
        G.title = singlecmds[cmd]
        if G.messages:
            print('Model title: {}'.format(G.title))

    # Number of processors to run on (OpenMP)
    cmd = '#num_threads'
    ompthreads = os.environ.get('OMP_NUM_THREADS')
    if singlecmds[cmd] != 'None':
        tmp = tuple(int(x) for x in singlecmds[cmd].split())
        if len(tmp) != 1:
            raise CmdInputError(
                cmd +
                ' requires exactly one parameter to specify the number of threads to use'
            )
        if tmp[0] < 1:
            raise CmdInputError(
                cmd + ' requires the value to be an integer not less than one')
        G.nthreads = tmp[0]
    elif ompthreads:
        G.nthreads = int(ompthreads)
    else:
        # Set number of threads to number of physical CPU cores, i.e. avoid hyperthreading with OpenMP
        G.nthreads = psutil.cpu_count(logical=False)
    if G.messages:
        print('Number of threads: {}'.format(G.nthreads))

    # Spatial discretisation
    cmd = '#dx_dy_dz'
    tmp = [float(x) for x in singlecmds[cmd].split()]
    if len(tmp) != 3:
        raise CmdInputError(cmd + ' requires exactly three parameters')
    if tmp[0] <= 0:
        raise CmdInputError(
            cmd +
            ' requires the x-direction spatial step to be greater than zero')
    if tmp[1] <= 0:
        raise CmdInputError(
            cmd +
            ' requires the y-direction spatial step to be greater than zero')
    if tmp[2] <= 0:
        raise CmdInputError(
            cmd +
            ' requires the z-direction spatial step to be greater than zero')
    G.dx = tmp[0]
    G.dy = tmp[1]
    G.dz = tmp[2]
    if G.messages:
        print('Spatial discretisation: {:g} x {:g} x {:g}m'.format(
            G.dx, G.dy, G.dz))

    # Domain
    cmd = '#domain'
    tmp = [float(x) for x in singlecmds[cmd].split()]
    if len(tmp) != 3:
        raise CmdInputError(cmd + ' requires exactly three parameters')
    G.nx = round_value(tmp[0] / G.dx)
    G.ny = round_value(tmp[1] / G.dy)
    G.nz = round_value(tmp[2] / G.dz)
    if G.nx == 0 or G.ny == 0 or G.nz == 0:
        raise CmdInputError(cmd +
                            ' requires at least one cell in every dimension')
    if G.messages:
        print(
            'Domain size: {:g} x {:g} x {:g}m ({:d} x {:d} x {:d} = {:g} cells)'
            .format(tmp[0], tmp[1], tmp[2], G.nx, G.ny, G.nz,
                    (G.nx * G.ny * G.nz)))
        # Guesstimate at memory usage
        mem = (((G.nx + 1) * (G.ny + 1) *
                (G.nz + 1) * 13 * np.dtype(floattype).itemsize + (G.nx + 1) *
                (G.ny + 1) * (G.nz + 1) * 18) * 1.1) + 30e6
        print('Memory (RAM) usage: ~{} required, {} available'.format(
            human_size(mem), human_size(psutil.virtual_memory().total)))

    # Time step CFL limit - use either 2D or 3D (default)
    cmd = '#time_step_limit_type'
    if singlecmds[cmd] != 'None':
        tmp = singlecmds[cmd].split()
        if len(tmp) != 1:
            raise CmdInputError(cmd + ' requires exactly one parameter')
        if singlecmds[cmd].lower() == '2d':
            if G.nx == 1:
                G.dt = 1 / (c * np.sqrt((1 / G.dy) * (1 / G.dy) + (1 / G.dz) *
                                        (1 / G.dz)))
            elif G.ny == 1:
                G.dt = 1 / (c * np.sqrt((1 / G.dx) * (1 / G.dx) + (1 / G.dz) *
                                        (1 / G.dz)))
            elif G.nz == 1:
                G.dt = 1 / (c * np.sqrt((1 / G.dx) * (1 / G.dx) + (1 / G.dy) *
                                        (1 / G.dy)))
            else:
                raise CmdInputError(
                    cmd +
                    ' 2D CFL limit can only be used when one dimension of the domain is one cell'
                )
        elif singlecmds[cmd].lower() == '3d':
            G.dt = 1 / (c * np.sqrt((1 / G.dx) * (1 / G.dx) + (1 / G.dy) *
                                    (1 / G.dy) + (1 / G.dz) * (1 / G.dz)))
        else:
            raise CmdInputError(cmd +
                                ' requires input values of either 2D or 3D')
    else:
        G.dt = 1 / (c * np.sqrt((1 / G.dx) * (1 / G.dx) + (1 / G.dy) *
                                (1 / G.dy) + (1 / G.dz) * (1 / G.dz)))

    # Round down time step to nearest float with precision one less than hardware maximum. Avoids inadvertently exceeding the CFL due to binary representation of floating point number.
    G.dt = round_value(G.dt, decimalplaces=d.getcontext().prec - 1)

    if G.messages:
        print('Time step: {:g} secs'.format(G.dt))

    # Time step stability factor
    cmd = '#time_step_stability_factor'
    if singlecmds[cmd] != 'None':
        tmp = tuple(float(x) for x in singlecmds[cmd].split())
        if len(tmp) != 1:
            raise CmdInputError(cmd + ' requires exactly one parameter')
        if tmp[0] <= 0 or tmp[0] > 1:
            raise CmdInputError(
                cmd +
                ' requires the value of the time step stability factor to be between zero and one'
            )
        G.dt = G.dt * tmp[0]
        if G.messages:
            print('Time step (modified): {:g} secs'.format(G.dt))

    # Time window
    cmd = '#time_window'
    tmp = singlecmds[cmd].split()
    if len(tmp) != 1:
        raise CmdInputError(
            cmd +
            ' requires exactly one parameter to specify the time window. Either in seconds or number of iterations.'
        )
    tmp = tmp[0].lower()
    # If real floating point value given
    if '.' in tmp or 'e' in tmp:
        if float(tmp) > 0:
            G.timewindow = float(tmp)
            G.iterations = round_value((float(tmp) / G.dt)) + 1
        else:
            raise CmdInputError(cmd + ' must have a value greater than zero')
    # If number of iterations given
    else:
        G.timewindow = (int(tmp) - 1) * G.dt
        G.iterations = int(tmp)
    if G.messages:
        print('Time window: {:g} secs ({} iterations)'.format(
            G.timewindow, G.iterations))

    # PML
    cmd = '#pml_cells'
    if singlecmds[cmd] != 'None':
        tmp = singlecmds[cmd].split()
        if len(tmp) != 1 and len(tmp) != 6:
            raise CmdInputError(cmd + ' requires either one or six parameters')
        if len(tmp) == 1:
            G.pmlthickness = (int(tmp[0]), int(tmp[0]), int(tmp[0]),
                              int(tmp[0]), int(tmp[0]), int(tmp[0]))
        else:
            G.pmlthickness = (int(tmp[0]), int(tmp[1]), int(tmp[2]),
                              int(tmp[3]), int(tmp[4]), int(tmp[5]))
    if 2 * G.pmlthickness[0] >= G.nx or 2 * G.pmlthickness[
            1] >= G.ny or 2 * G.pmlthickness[2] >= G.nz or 2 * G.pmlthickness[
                3] >= G.nx or 2 * G.pmlthickness[
                    4] >= G.ny or 2 * G.pmlthickness[5] >= G.nz:
        raise CmdInputError(cmd + ' has too many cells for the domain size')

    # src_steps
    cmd = '#src_steps'
    if singlecmds[cmd] != 'None':
        tmp = singlecmds[cmd].split()
        if len(tmp) != 3:
            raise CmdInputError(cmd + ' requires exactly three parameters')
        G.srcstepx = round_value(float(tmp[0]) / G.dx)
        G.srcstepy = round_value(float(tmp[1]) / G.dy)
        G.srcstepz = round_value(float(tmp[2]) / G.dz)
        if G.messages:
            print(
                'All sources will step {:g}m, {:g}m, {:g}m for each model run.'
                .format(G.srcstepx * G.dx, G.srcstepy * G.dy,
                        G.srcstepz * G.dz))

    # rx_steps
    cmd = '#rx_steps'
    if singlecmds[cmd] != 'None':
        tmp = singlecmds[cmd].split()
        if len(tmp) != 3:
            raise CmdInputError(cmd + ' requires exactly three parameters')
        G.rxstepx = round_value(float(tmp[0]) / G.dx)
        G.rxstepy = round_value(float(tmp[1]) / G.dy)
        G.rxstepz = round_value(float(tmp[2]) / G.dz)
        if G.messages:
            print(
                'All receivers will step {:g}m, {:g}m, {:g}m for each model run.'
                .format(G.rxstepx * G.dx, G.rxstepy * G.dy, G.rxstepz * G.dz))

    # Excitation file for user-defined source waveforms
    cmd = '#excitation_file'
    if singlecmds[cmd] != 'None':
        tmp = singlecmds[cmd].split()
        if len(tmp) != 1:
            raise CmdInputError(cmd + ' requires exactly one parameter')
        excitationfile = tmp[0]

        # See if file exists at specified path and if not try input file directory
        if not os.path.isfile(excitationfile):
            excitationfile = os.path.join(G.inputdirectory, excitationfile)

        # Get waveform names
        with open(excitationfile, 'r') as f:
            waveformIDs = f.readline().split()

        # Read all waveform values into an array
        waveformvalues = np.loadtxt(excitationfile,
                                    skiprows=1,
                                    dtype=floattype)

        for waveform in range(len(waveformIDs)):
            if any(x.ID == waveformIDs[waveform] for x in G.waveforms):
                raise CmdInputError(
                    'Waveform with ID {} already exists'.format(
                        waveformIDs[waveform]))
            w = Waveform()
            w.ID = waveformIDs[waveform]
            w.type = 'user'
            if len(waveformvalues.shape) == 1:
                w.uservalues = waveformvalues[:]
            else:
                w.uservalues = waveformvalues[:, waveform]

            if G.messages:
                print('User waveform {} created.'.format(w.ID))

            G.waveforms.append(w)
Exemple #41
0
import numpy as np
import matplotlib.pyplot as plt

aosep = np.loadtxt('aosep.txt')
aodjmag = np.loadtxt('aodjmag.txt')

Jp = 13.379
Kp = 17.131

J = Jp + aodjmag
K = J + 0.1918 + J * 0.08156

aodkmag = K - Kp
aosep[0] = aosep[2]
aosep[1] = aosep[2]

plt.plot(aosep, aodjmag, 'b--', label='J mag')
plt.plot(aosep, aodkmag, 'g', label='Kep mag')
plt.ylim(plt.ylim(0, 12)[::-1])
plt.xlim(0, 3)
plt.show()
Exemple #42
0
                'fliplr': (0, 0.0, 1.0),  # image flip left-right (probability)
                'mosaic': (1, 0.0, 1.0),  # image mixup (probability)
                'mixup': (1, 0.0, 1.0)}  # image mixup (probability)

        assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
        opt.notest, opt.nosave = True, True  # only test/save final epoch
        # ei = [isinstance(x, (int, float)) for x in hyp.values()]  # evolvable indices
        yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml'  # save best result here
        if opt.bucket:
            os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket)  # download evolve.txt if exists

        for _ in range(300):  # generations to evolve
            if Path('evolve.txt').exists():  # if evolve.txt exists: select best hyps and mutate
                # Select parent(s)
                parent = 'single'  # parent selection method: 'single' or 'weighted'
                x = np.loadtxt('evolve.txt', ndmin=2)
                n = min(5, len(x))  # number of previous results to consider
                x = x[np.argsort(-fitness(x))][:n]  # top n mutations
                w = fitness(x) - fitness(x).min()  # weights
                if parent == 'single' or len(x) == 1:
                    # x = x[random.randint(0, n - 1)]  # random selection
                    x = x[random.choices(range(n), weights=w)[0]]  # weighted selection
                elif parent == 'weighted':
                    x = (x * w.reshape(n, 1)).sum(0) / w.sum()  # weighted combination

                # Mutate
                mp, s = 0.8, 0.2  # mutation probability, sigma
                npr = np.random
                npr.seed(int(time.time()))
                g = np.array([x[0] for x in meta.values()])  # gains 0-1
                ng = len(meta)
import numpy as np
import pandas as pd
import os, glob

feats = np.load('../flickr/feats.npy')
feat_lookup = np.loadtxt('../BoW_int.dat')

urls = pd.read_fwf('../NUS-WIDE-urls.txt')

left_images = open('left_images.txt', 'w')

curr, prob = len(glob.glob('*')), 0
for img_f in feats:
    for idx, f in enumerate(feat_lookup):
        if np.array_equal(img_f, f):
            # web scrape
            #link = urls.iloc[idx, 0].split()[2]
            #os.system('wget ' + link)

            # local move
            link = urls.iloc[idx, 0].split()[0]
            os.system('mv ../' + '/'.join(link.split('/')[2:]) + ' .')

            num_files = len(glob.glob('*'))
            if num_files == curr + 1:
                curr += 1
            elif num_files == curr:
                prob += 1
                left_images.write(link + '\n')
            else:
                raise
#!/usr/bin/env python3

import matplotlib.pyplot as plt
import numpy as np



plt.style.use( 'seaborn-paper' )
fig = plt.figure()
ax = fig.add_axes( [0.22,0.22,0.72,0.73] )

ax.set_xlabel( '$k_b T$' , fontsize=25 )
ax.set_ylabel( '$S^2$' , fontsize=25 )


data = np.loadtxt( 'custom' )
t = data[:,0]
sdsd = data[:,2] 
ss = 3 * np.loadtxt( 'td_diff.dat' )[:,1]

ax.plot( t , sdsd , color='black' , label='$<S^2_{\mathrm{imp}}>$' , marker='d' , markersize=5 , markevery=3 )
ax.plot( t , ss , color='black' , label='$<S^2_{\mathrm{tot}}>$' , marker='^' , markersize=6 , markevery=3  )


lims = [ -0.05 , 1.2 ]
ticks = np.arange( 0 , 1.0 , 0.25 )

ax.set_ylim( lims )
ax.set_yticks( ticks )
ax.set_xlim([ 10**(-15) , 1 ])
ax.set_xticks([ 10**(-15) , 10**(-10) , 10**(-5) , 1 ])
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D


# Obtiene los datos

data = np.loadtxt("datos.dat")
data = data.T
# Descifra el formato
x_t = [] # Los valores de densidad de probabilidad
z_t = [] # Los valores de posicion
for i in range(len(data)/2):
    x_t.append(data[2*i+1])
    z_t.append(data[2*i][0])
    

x_t = np.array(x_t).T
z_t = np.array(z_t).T
print len(z_t), len(x_t[0])

########################
# Grafica la animacion #
########################

fig, ax = plt.subplots()

# Selecciona los limites de la grafica 
lim = 80
Exemple #46
0
def read_numpy_array(filename):
    # read numpy array from a textfile
    lines = np.loadtxt(filename, delimiter=",", unpack=False)
    return lines
import numpy as np

data = ""
data += input('Number of times pregnant: ') + ","
data += input('Plasma glucose concentration: ') + ","
data += input('blood pressure in mm Hg: ') + ","
data += input('Tricep skinfold thickness: ') + ","
data += input('2-hour serum insulin: ') + ","
data += input('body mass index: ') + ","
data += input('diabetes pedigree function: ') + ","
data += input('age: ') + " "

data = StringIO(data)

# load pima indians dataset
dataset = np.loadtxt(data, delimiter=",")
dataset = np.reshape(dataset, (-1, 8))

X = dataset[:, 0:8]
print(X)

# returns a compiled model
# identical to the previous one
model = load_model('diabetes_model.h5')

# calculate predictions
prediction = model.predict(dataset)
# round predictions
rounded = round(prediction[0][0])

# Print precitions out in understandable terms
def to_generate_annotation_labels(directory):
    print(
        '#########################to_generate_annotation_labels ######################'
    )

    parent_path = os.path.dirname(directory)
    if len(parent_path) == (len(directory) - 1):
        parent_path = os.path.dirname(parent_path)
    output_path = parent_path + '/labels/'
    isExists = os.path.exists(output_path)
    if not isExists:
        os.makedirs(output_path)

    class_info = np.loadtxt(parent_path + '/count_class.txt', delimiter=";")
    # for filename in os.listdir(ARGS.input_dir):
    num = 0
    for root, dirs, filenames in os.walk(directory):

        for filename in filenames:

            if filename.endswith(".txt"):

                num = num + 1
                filepath = os.path.join(root, filename)
                filepath_2 = filepath + ' ' + str(num)
                print(filepath_2 + '---> ' +
                      'step_2: to_make_annotation_labels')

                # Removes .txt extension
                basename = os.path.splitext(filename)[0]
                basepath = os.path.splitext(filepath)[0]

                # Set image size
                metapath = basepath + '.meta'
                if os.path.isfile(metapath):
                    width, height = get_image_size(metapath)
                else:
                    print('Error:Not exit *.meta')
                    exit()

                # Create a single-channel 8-bit image
                im = Image.new(mode='L', size=(width, height), color=0)
                draw = ImageDraw.Draw(im)

                with open(filepath, "r") as file:
                    for line in file:
                        nums = line.split(';')
                        vertices = np.asarray(nums[1:-2], dtype=int)
                        nvertices = len(vertices) / 2
                        assert np.mod(nvertices, 1) == 0
                        vertices = vertices.reshape(int(nvertices), 2)
                        # print('len_nums:',len(nums))
                        if (len(nums) >= 2):
                            classID = np.asarray(int(nums[-2]))
                            col = id2color(classID, class_info)
                            draw.polygon([(x, y) for x, y in vertices],
                                         fill=col,
                                         outline=None)

                # Create path to the figure we are going to save
                if output_path:
                    figPath = output_path + basename
                else:
                    figPath = basepath

                im.save(figPath + '.png')
Exemple #49
0
import features
import numpy as np
from collections import defaultdict


def gensif(temp,f):
	infile2=os.path.dirname(os.path.abspath(__file__))+'/csvFiles/pred.csv'
	fh1=open(infile2,'w')
	for k in temp:
		k=int(k)
		print >>fh1,str(k)
	fh1.close()
	gengraph.main(f)

features.main()
tex=loadtxt('/home/aditya/Project/csvFiles/trainNLP.csv',delimiter=',')
tex_dev=loadtxt('/home/aditya/Project/csvFiles/devNLP.csv',delimiter=',')
tex_test=loadtxt('/home/aditya/Project/csvFiles/testNLP.csv',delimiter=',')
vals= tex.shape
vals_dev= tex_dev.shape
vals_test= tex_test.shape
ty=tex[:,vals[1]-1]
tx=tex[:,0:vals[1]-2]
ty_dev=tex_dev[:,vals_dev[1]-1]
tx_dev=tex_dev[:,0:vals_dev[1]-2]
tx_test=tex_test[:,0:vals_test[1]-1]
#clf=svm.SVC()
#clf.fit(tx,ty)/home/aditya/Project/dev
newtext_train='./GRN.py --a1-dir /home/aditya/Project/train --a2-dir /home/aditya/Project/train --pred-sif /home/aditya/Project/output_train.sif /home/aditya/Project/train/PMID-*.txt'
newtext_dev='./GRN.py --a1-dir /home/aditya/Project/dev --a2-dir /home/aditya/Project/dev --pred-sif /home/aditya/Project/output_dev.sif /home/aditya/Project/dev/PMID-*.txt'
print '....'
def main(mkt, str_buy_indicator, str_boll_threshold, str_sell_offeset,
         stop_loss):

    global analysisPath
    analysisPath = prjPath + mkt + "/analysis/"

    os.chdir(prjPath)

    with open(prjPath + 'symbols' + mkt + '.txt') as f:
        ls_symbols = f.read().splitlines()

    dt_timeofday = dt.timedelta(hours=16)
    dt_start = dt.datetime(2017, 2, 15) + dt_timeofday
    dt_end = dt.datetime(2017, 12, 29) + dt_timeofday
    ldt_timestamps = qdu.getNYSEdays(dt_start, dt_end, dt_timeofday)
    #c_dataobj = da.DataAccess(mkt + 'Yahoo')
    #sls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
    #ldf_data = c_dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
    #d_data = dict(zip(ls_keys, ldf_data))
    #df_close = d_data['close']
    c_dataobj = da.DataAccess(mkt + 'Yahoo', cachestalltime=0)
    #global df_close
    #df_close = c_dataobj.get_data(ldt_timestamps, ls_symbols, "close")
    #df_close = df_close.fillna(method='ffill')
    #df_close = df_close.fillna(method='bfill')

    #global df_open
    #df_open = c_dataobj.get_data(ldt_timestamps, ls_symbols, "open")
    #df_open = df_open.fillna(method='ffill')
    #df_open = df_open.fillna(method='bfill')

    buy_indicator = np.float32(str_buy_indicator)
    boll_threshold = np.float32(str_boll_threshold)
    sell_offeset = int(str_sell_offeset)
    '''Main Function'''
    ls_files = os.listdir(analysisPath)

    orders = []
    new_order = []

    for file_name in ls_files:
        if file_name.startswith("analysis") == False:
            continue
        str_date = file_name[9:17]
        dt_date = dt.datetime.strptime(str_date, "%Y%m%d")
        analysis_row = np.loadtxt(analysisPath + file_name,
                                  dtype='S8,f4,f4,f4,f4,f4,f4,f4,f4,f4,f4',
                                  delimiter=',',
                                  comments="#",
                                  skiprows=1)

        # Sorting the portfolio by BOLLINGER value
        analysis_row = sorted(analysis_row, key=lambda x: x[10])

        # Create two list for symbol names and allocation
        #ls_order_syms = [] lf_order_smas = [] lf_order_macd = [] lf_order_bolls = []

        ldt_timestamp_idx = ldt_timestamps.index(dt_date + dt_timeofday) + 1

        for row in analysis_row:
            if row[4] >= buy_indicator and row[7] <= boll_threshold:
                #ls_order_syms.append(row[0]) #lf_order_bolls.append(row[7]) #lf_order_macd.append(row[10]) #lf_order_smas.append(row[4])

                #new_order =[ldt_timestamps[ldt_timestamp_idx].year, ldt_timestamps[ldt_timestamp_idx].month, ldt_timestamps[ldt_timestamp_idx].day, row[0], 'Buy', int(1000/df_open.iloc[ldt_timestamp_idx, ls_symbols.index(row[0])])]
                new_order = [
                    ldt_timestamps[ldt_timestamp_idx].year,
                    ldt_timestamps[ldt_timestamp_idx].month,
                    ldt_timestamps[ldt_timestamp_idx].day, row[0], 'Buy', 1000
                ]
                orders.append(new_order)

                #if(stop_loss == 0):
                new_order = [
                    ldt_timestamps[ldt_timestamp_idx + sell_offeset].year,
                    ldt_timestamps[ldt_timestamp_idx + sell_offeset].month,
                    ldt_timestamps[ldt_timestamp_idx + sell_offeset].day,
                    row[0], 'Sell', 1000
                ]
                #new_order =[ldt_timestamps[ldt_timestamp_idx+sell_offeset].year, ldt_timestamps[ldt_timestamp_idx+sell_offeset].month, ldt_timestamps[ldt_timestamp_idx+sell_offeset].day, row[0], 'Sell', int(1000/df_open.iloc[ldt_timestamp_idx, ls_symbols.index(row[0])])]
                #else:
                #    new_order = stop_loss_order(stop_loss, ldt_timestamps, ldt_timestamp_idx, sell_offeset, ls_symbols.index(row[0]), row[0])

                orders.append(new_order)

    write_order(orders)
    #write_denorm_order(orders, ls_symbols)

    #if(len(ls_order_syms)>0):
    #print str_date
    #for i in range(0, len(ls_order_syms)):
    #print ls_order_syms[i]+ ",{},{}".format(lf_order_smas[i],lf_order_bolls[i])
    print 'order generated!'
Exemple #51
0
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation

fig = plt.figure()

data = np.loadtxt('position_movie.res')
print(data.shape)
domain_x = 20
domain_y = 50
nframes = 100

frames = np.zeros((nframes, domain_y, domain_x))
ims = []
for k in range(nframes):
    istart = k * domain_y
    iend = istart + domain_y
    frames[k, :, :] = data[istart:iend, :]
    im = plt.imshow(frames[k, :, :], animated=True)
    ims.append([im])

interval = 50
repeat_delay = interval * nframes
ani = animation.ArtistAnimation(fig,
                                ims,
                                interval=interval,
                                blit=True,
                                repeat_delay=repeat_delay)
plt.show()
N = len(mps)

#target milestone (bound state) (index starts from 1)
Nend = 12 #r = 6.0 A


#---------- construct K matrix ----------------------------------------
K = np.zeros((N,N))

t = np.zeros(N)

Nhit = np.zeros((N,N))

for i in range(N-1):
    l = np.loadtxt('milestone_%dA/milestone-data.dat'%(mps[i]))
    K[i,i+1] = l[4]
    Nhit[i,i+1] = l[6]
    if i!=0:
        K[i,i-1] = l[3]
        Nhit[i,i-1] = l[5]
    t[i] = l[2]
K[N-1,0] = 1.0    #cyclic or periodic boundary condition to get steady state


#define function to calculate free energy profile and Mean first passage time from a given matrix
def MFPT(Q,t,N):
    K = Q.copy()
    #multiply the t[i]'s to get back the actual K
    for i in range(N):
        for j in range(N):
# This Python file uses the following encoding: utf-8
# Author: Aliaksei Antonau ([email protected])

# import the necessary packages

import numpy as np
import matplotlib.pyplot as plt
#import plotly.plotly as py

fileName = "/home/alant/python/nirs/image_classif/dict_results.txt"
file = open(fileName, 'r')

dict_results = np.loadtxt(file)

prob = []
idf = []
basis = []



for elem in dict_results:
	if (elem[3]>0.8) and (elem[4]>0.2):
		prob.append(elem[0])
		idf.append(elem[1])
		basis.append(elem[2])


print prob, '\n'
print idf, '\n'
print basis, '\n'
Exemple #54
0
def get_redshift_efficiency(simtype, targets, truth, targets_in_tile, obsconditions, params, ignore_obscondition=False):
    """
    Simple model to get the redshift effiency from the observational conditions or observed magnitudes+redshuft

    Args:
        simtype: ELG, LRG, QSO, MWS, BGS
        targets: target catalog table; currently used only for TARGETID
        truth: truth table with OIIFLUX, TRUEZ
        targets_in_tile: dictionary. Keys correspond to tileids, its values are the
            arrays of targetids observed in that tile.
        obsconditions: table observing conditions with columns
           'TILEID': array of tile IDs
           'AIRMASS': array of airmass values on a tile
           'EBMV': array of E(B-V) values on a tile
           'LINTRANS': array of atmospheric transparency during spectro obs; floats [0-1]
           'MOONFRAC': array of moonfraction values on a tile.
           'SEEING': array of FWHM seeing during spectroscopic observation on a tile.
        parameter_filename: yaml file with quickcat parameters
        ignore_obscondition: if True, no variation of efficiency with obs. conditions (adjustment of exposure time should correct for mean change of S/N)
    Returns:
        tuple of arrays (observed, p) both with same length as targets

        observed: boolean array of whether the target was observed in these tiles

        p: probability to get this redshift right
    """
    targetid = targets['TARGETID']
    n = len(targetid)

    try:
        if 'DECAM_FLUX' in targets.dtype.names :
            true_gflux = targets['DECAM_FLUX'][:, 1]
            true_rflux = targets['DECAM_FLUX'][:, 2]
        else:
            true_gflux = targets['FLUX_G']
            true_rflux = targets['FLUX_R']
    except:
        raise Exception('Missing photometry needed to estimate redshift efficiency!')

    a_small_flux=1e-40
    true_gflux[true_gflux<a_small_flux]=a_small_flux
    true_rflux[true_rflux<a_small_flux]=a_small_flux
    

    
    if (obsconditions is None) or ('OIIFLUX' not in truth.dtype.names):
        raise Exception('Missing obsconditions and flux information to estimate redshift efficiency')

    
    
    if (simtype == 'ELG'):
        # Read the model OII flux threshold (FDR fig 7.12 modified to fit redmonster efficiency on OAK)
        # filename = resource_filename('desisim', 'data/quickcat_elg_oii_flux_threshold.txt')
        
        # Read the model OII flux threshold (FDR fig 7.12)
        filename = resource_filename('desisim', 'data/elg_oii_flux_threshold_fdr.txt')
        fdr_z, modified_fdr_oii_flux_threshold = np.loadtxt(filename, unpack=True)
        
        # Compute OII flux thresholds for truez
        oii_flux_limit = np.interp(truth['TRUEZ'],fdr_z,modified_fdr_oii_flux_threshold)
        oii_flux_limit[oii_flux_limit<1e-20]=1e-20
        
        # efficiency is modeled as a function of flux_OII/f_OII_threshold(z) and an arbitrary sigma_fudge
        
        snr_in_lines       = params["ELG"]["EFFICIENCY"]["SNR_LINES_SCALE"]*7*truth['OIIFLUX']/oii_flux_limit
        snr_in_continuum   = params["ELG"]["EFFICIENCY"]["SNR_CONTINUUM_SCALE"]*true_rflux
        snr_tot            = np.sqrt(snr_in_lines**2+snr_in_continuum**2)
        sigma_fudge        = params["ELG"]["EFFICIENCY"]["SIGMA_FUDGE"]
        nsigma             = 3.
        simulated_eff = eff_model(snr_tot,nsigma,sigma_fudge)

    elif(simtype == 'LRG'):
        
       
        r_mag = 22.5 - 2.5*np.log10(true_rflux)
        
        sigmoid_cutoff = params["LRG"]["EFFICIENCY"]["SIGMOID_CUTOFF"]
        sigmoid_fudge  = params["LRG"]["EFFICIENCY"]["SIGMOID_FUDGE"]
        simulated_eff = 1./(1.+np.exp((r_mag-sigmoid_cutoff)/sigmoid_fudge))

        log.info("{} eff = sigmoid with cutoff = {:4.3f} fudge = {:4.3f}".format(simtype,sigmoid_cutoff,sigmoid_fudge))
    
    elif(simtype == 'QSO'):
        
        zsplit = params['QSO_ZSPLIT']
        r_mag = 22.5 - 2.5*np.log10(true_rflux) 
        simulated_eff = np.ones(r_mag.shape)

        # lowz tracer qsos
        sigmoid_cutoff = params["LOWZ_QSO"]["EFFICIENCY"]["SIGMOID_CUTOFF"]
        sigmoid_fudge  = params["LOWZ_QSO"]["EFFICIENCY"]["SIGMOID_FUDGE"]
        ii=(truth['TRUEZ']<=zsplit)
        simulated_eff[ii] = 1./(1.+np.exp((r_mag[ii]-sigmoid_cutoff)/sigmoid_fudge))
        log.info("{} eff = sigmoid with cutoff = {:4.3f} fudge = {:4.3f}".format("LOWZ QSO",sigmoid_cutoff,sigmoid_fudge))
                
        # highz lya qsos
        sigmoid_cutoff = params["LYA_QSO"]["EFFICIENCY"]["SIGMOID_CUTOFF"]
        sigmoid_fudge  = params["LYA_QSO"]["EFFICIENCY"]["SIGMOID_FUDGE"]
        ii=(truth['TRUEZ']>zsplit)
        simulated_eff[ii] = 1./(1.+np.exp((r_mag[ii]-sigmoid_cutoff)/sigmoid_fudge))

        log.info("{} eff = sigmoid with cutoff = {:4.3f} fudge = {:4.3f}".format("LYA QSO",sigmoid_cutoff,sigmoid_fudge))
        
    elif simtype == 'BGS':
        simulated_eff = 0.98 * np.ones(n)

    elif simtype == 'MWS':
        simulated_eff = 0.98 * np.ones(n)

    else:
        default_zeff = 0.98
        log.warning('using default redshift efficiency of {} for {}'.format(default_zeff, simtype))
        simulated_eff = default_zeff * np.ones(n)

    #- Get the corrections for observing conditions per tile, then
    #- correct targets on those tiles.  Parameterize in terms of failure
    #- rate instead of success rate to handle bookkeeping of targets that
    #- are observed on more than one tile.
    #- NOTE: this still isn't quite right since multiple observations will
    #- be simultaneously fit instead of just taking whichever individual one
    #- succeeds.

    if ignore_obscondition :
        ncond = len(np.atleast_1d(obsconditions['AIRMASS']))
        zeff_obs = np.ones(ncond)
    else :
        zeff_obs = get_zeff_obs(simtype, obsconditions)
    pfail = np.ones(n)
    observed = np.zeros(n, dtype=bool)

    # More efficient alternative for large numbers of tiles + large target
    # list, but requires pre-computing the sort order of targetids.
    # Assume targets['TARGETID'] is unique, so not checking this.
    sort_targetid = np.argsort(targetid)

    # Extract the targets-per-tile lists into one huge list.
    concat_targets_in_tile  = np.concatenate([targets_in_tile[tileid] for tileid in obsconditions['TILEID']])
    ntargets_per_tile       = np.array([len(targets_in_tile[tileid])  for tileid in obsconditions['TILEID']])

    # Match entries in each tile list against sorted target list.
    target_idx    = targetid[sort_targetid].searchsorted(concat_targets_in_tile,side='left')
    target_idx_r  = targetid[sort_targetid].searchsorted(concat_targets_in_tile,side='right')
    del(concat_targets_in_tile)

    # Flag targets in tiles that do not appear in the target list (sky,
    # standards).
    not_matched = target_idx_r - target_idx == 0
    target_idx[not_matched] = -1
    del(target_idx_r,not_matched)

    # Not every tile has 5000 targets, so use individual counts to
    # construct offset of each tile in target_idx.
    offset  = np.concatenate([[0],np.cumsum(ntargets_per_tile[:-1])])

    # For each tile, process targets.
    for i, tileid in enumerate(obsconditions['TILEID']):
        if ntargets_per_tile[i] > 0:
            # Quickly get all the matched targets on this tile.
            targets_this_tile  = target_idx[offset[i]:offset[i]+ntargets_per_tile[i]]
            targets_this_tile  = targets_this_tile[targets_this_tile > 0]
            # List of indices into sorted target list for each observed
            # source.
            ii  = sort_targetid[targets_this_tile]
            tmp = (simulated_eff[ii]*zeff_obs[i]).clip(0, 1)
            pfail[ii] *= (1-tmp)
            observed[ii] = True

    simulated_eff = (1-pfail)

    return observed, simulated_eff
Exemple #55
0
def read_weight_csv(fname):
    return np.loadtxt(fname, delimiter=',')
Exemple #56
0
# -*- coding: utf-8 -*-
# List 7-3  都道府県別 人口と小売店数との関係
import numpy as np
import matplotlib.pyplot as plt

x = np.loadtxt('jinkou-kouriten.csv', delimiter=",")  # CSVファイルからデータを読込む
# 出典 
#
#人口: 平成27年国勢調査  人口等基本集計(男女・年齢・配偶関係,世帯の構成,住居の状態など) 全国結果 
#http://www.e-stat.go.jp/SG1/estat/GL08020103.do?_csvDownload_&fileId=000007809735&releaseCount=2
#
#小売店数; 平成26年商業統計確報 うち、2巻2表から、都道府県別小売業事業所数
#http://www.meti.go.jp/statistics/tyo/syougyo/result-2/h26/xlsx/kaku2.xlsx

jinkou = [u[0]/1000 for u in x]
kouriten = [u[1] for u in x]
print('相関係数', np.corrcoef(jinkou, kouriten)[0,1].round(4))
plt.scatter(jinkou, kouriten, marker='x')
plt.title('都道府県別の人口と小売店数との関係')
plt.xlabel('人口(千人)')
plt.ylabel('小売店数')
plt.show()
# 出力結果は
# 相関係数 0.9826

Exemple #57
0
'''This module makes a plot of some stars from the Hipparcos mission.'''

import matplotlib.pyplot as plt
import numpy as np

# load information about a bunch of stars
try:
    data = np.loadtxt('hipparcos.txt')
except IOError:
    data = np.loadtxt('/home/zabe0091/astr2600/hipparcos.txt')

# the Right Ascension is the third column in thetable
ra = data[:, 2]

# the Declination is the fouth column in the table
dec = data[:, 3]

# make a plot of the RA and Dec of the stars
plt.scatter(ra, dec, marker='.', s=1)
plt.xlabel('RA (degrees)')
plt.ylabel('Dec (degrees)')

# make the plot display to the screen
plt.show()
Exemple #58
0
import sys
sys.path.append('..')
import numpy as np
import scipy.optimize as op
import matplotlib
from matplotlib import pyplot as plt
# use the common functions relative with linear regression implemented in file ex1.linear.py
from ex1.linear import *

if __name__ == '__main__':
    fig, ax = plt.subplots()
    x = np.loadtxt('x.txt')
    y = np.loadtxt('y.txt')
    m = x.size
    ex = x.reshape((m, 1))
    ex = np.hstack((np.ones((m, 1)), ex))
    theta = np.ones(2)

    ax.scatter(x, y, c='r', marker='x', label='Original')
    ax.set_xlabel('change in water level')
    ax.set_ylabel('water flowing out of the dam')
    # the function gradient is in file ex1.linear.py
    print gradient(theta, ex, y, reg=True, lamda=1.0)
    # the function costFunc is in file ex1.linear.py
    print costFunc(theta, ex, y, reg=True, lamda=1.0)
    # the function optimSolve is in file ex1.linear.py
    status, res = optimSolve(theta, ex, y)
    print res
    # the function predict is in file ex1.linear.py
    ax.plot(x, predict(res, x), label='Predicted')
    ax.legend(loc='best')
import tensorflow as tf
import numpy as np
import pandas as pd

data_path = './190319tensordata/'
log_path = '/Iline_short/'
summaries_dir = './logs/' + log_path + '/20MAR19/' # for tensorboard summary
model_dir = './model/' + log_path + '/20MAR19/'# for model saver

input_protocol = '_short' # change X place holder and layer shapes
output_class = 'I'      # change Y place holder and layer shapes
result_path = './190319_hyperparameter_test/10_2_Iline_short_fine.csv'
HP_df = pd.read_csv(result_path)
HP_np = np.array(HP_df.sort_values('test_cost').head(10))

trainX = np.loadtxt(data_path + output_class + 'train' + input_protocol + 'X.csv', delimiter = ',')
trainY = np.loadtxt(data_path + output_class + 'train' + input_protocol + 'Y.csv', delimiter = ',')

testX = np.loadtxt(data_path + output_class + 'test' + input_protocol + 'X.csv', delimiter = ',')
testY = np.loadtxt(data_path + output_class + 'test' + input_protocol + 'Y.csv', delimiter = ',')

X = tf.placeholder(tf.float32, [None, 11]) 
Y = tf.placeholder(tf.float32, [None, 9]) 
keep_prob = tf.placeholder(tf.float32)
is_training_holder = tf.placeholder(tf.bool)
learning_rate = tf.placeholder(tf.float32)
L2beta = tf.placeholder(tf.float32)
epsilon = 1e-3 # for Batch normalization
layer1_shape = [11, 10]
layer2_shape = [10, 10]
output_shape = [10, 9]  
Exemple #60
0
 def read_csv(f, c, t=np.float64):
     return np.loadtxt(f, usecols=c, delimiter=',', ndmin=2, dtype=t)