예제 #1
0
    def train(self, nIter, burgers_data_loc, base_plt_dir):

        tf_dict = {self.x0_tf: self.x0, self.t0_tf: self.t0,
                   self.u0_tf: self.u0,
                   self.x_lb_tf: self.x_lb, self.t_lb_tf: self.t_lb,
                   self.x_ub_tf: self.x_ub, self.t_ub_tf: self.t_ub,
                   self.x_f_tf: self.x_f, self.t_f_tf: self.t_f}

        start_time = time.time()
        losses = {}
        for it in range(nIter):
            self.sess.run(self.train_op_Adam, tf_dict)

            # Print
            if it % 10 == 0:
                elapsed = time.time() - start_time
                loss_value = self.sess.run(self.loss, tf_dict)
                losses[it] = loss_value
                print('It: %d, Loss: %.3e, Time: %.2f' %
                      (it, loss_value, elapsed))
                start_time = time.time()

            if (it & (it - 1)) == 0:
                inputs = prepare_nn_inputs(burgers_data_loc, random_seed=1234, debugging=False)
                u_pred, f_pred = self.predict(self.X_star)
                plotting.plotting(inputs, u_pred, base_plt_dir, "{}".format(it))

        self.optimizer.minimize(self.sess,
                                feed_dict=tf_dict,
                                fetches=[self.loss],
                                loss_callback=self.callback)
        return losses
예제 #2
0
    def test_2d_1cluster(self):
        #create simple data:
        a = range(0, 11)
        print a
        d = []
        for i in a:
            for j in a:
                d.append([i*1.0, j*1.0])

        dbscanner = Dbscan(np.array(d), 3, 2.0)
        dbscanner.run()
        plotting.plotting(dbscanner.getClusterList(),dbscanner.getNoise())
예제 #3
0
    def test_2d_1cluster(self):
        #create simple data:
        a = range(0, 11)
        print a
        d = []
        for i in a:
            for j in a:
                d.append([i * 1.0, j * 1.0])

        dbscanner = Dbscan(np.array(d), 3, 2.0)
        dbscanner.run()
        plotting.plotting(dbscanner.getClusterList(), dbscanner.getNoise())
예제 #4
0
 def test_plotting(self):
     #some dummy data
     number, x_coordinate, y_coordinate = loadtxt('testdata/eps3-minpts5-cluster5-noise20.dat', unpack = True)
     D=[None]*len(x_coordinate)
     for ii in range(len(x_coordinate)):
         D[ii]=[x_coordinate[ii],y_coordinate[ii]]
     #put in the data we want to use
     minNeighbors = 5
     epsilon = 3.
     data = np.array(D, dtype=np.float64)
     # use dbscan
     dbscanner = Dbscan(data, minNeighbors, epsilon)
     dbscanner.run()
     # use plotting
     plotting.plotting(dbscanner.getClusterList(),dbscanner.getNoise())
예제 #5
0
 def test_plotting(self):
     #some dummy data
     number, x_coordinate, y_coordinate = loadtxt('testdata/eps3-minpts5-cluster5-noise20.dat', unpack = True)
     D=[None]*len(x_coordinate)
     for ii in range(len(x_coordinate)):
         D[ii]=[x_coordinate[ii],y_coordinate[ii]]
     #put in the data we want to use
     minNeighbors = 5
     #epsilon = 2
     data = array(D)
     # use dbscan
     dbscanner = Dbscan(data, minNeighbors)
     result = dbscanner.run()
     # use plotting
     plotting.plotting(result)
예제 #6
0
 def test_plotting(self):
     #some dummy data
     number, x_coordinate, y_coordinate = loadtxt(
         'testdata/eps3-minpts5-cluster5-noise20.dat', unpack=True)
     D = [None] * len(x_coordinate)
     for ii in range(len(x_coordinate)):
         D[ii] = [x_coordinate[ii], y_coordinate[ii]]
     #put in the data we want to use
     minNeighbors = 5
     epsilon = 3.
     data = np.array(D, dtype=np.float64)
     # use dbscan
     dbscanner = Dbscan(data, minNeighbors, epsilon)
     dbscanner.run()
     # use plotting
     plotting.plotting(dbscanner.getClusterList(), dbscanner.getNoise())
예제 #7
0
def copy_task():
    n_train = 1000
    for i in range(0,n_train):
        seq_size = 5#int(9* r())+1
        seq = np.random.binomial(1, 0.5, size=(seq_size, y_size-1)).astype(theano.config.floatX)   
        x_ = np.zeros((seq_size*2+1, y_size)).astype(theano.config.floatX)
        x_[:seq_size,:y_size-1] = seq
        x_[seq_size, y_size-1] = 1.0
        y = np.zeros((seq_size*2+1, y_size)).astype(np.int32)
        y[seq_size+1:,:y_size-1] = seq
        cost, ypred = train(x_,y)
        print "cost =", cost 
        # pic.dump(ypred, open("output.txt", "w+"))
        # print f(x_,y)
        # sys.exit()
    n_test = 10   
    p = plotting()
    for i in range(0,n_test):
        seq_size = 5#int(9* r())+1
        seq = np.random.binomial(1, 0.5, size=(seq_size, y_size-1)).astype(theano.config.floatX)   
        x_ = np.zeros((seq_size*2+1, y_size)).astype(theano.config.floatX)
        x_[:seq_size,:y_size-1] = seq
        x_[seq_size, y_size-1] = 1.0
        y = np.zeros((seq_size*2+1, y_size)).astype(np.int32)
        y[seq_size+1:,:y_size-1] = seq
        y_pred= test(x_)
        print "cost =", cost
        p.draw([np.transpose(x_), np.transpose(y), np.transpose(y_pred)])
def main():
    fpath = "/Users/greg/Google Drive/Spring 19/CS6241/fivethirtyeight-topic-modeling/extract-data/"
    fname = "five38_data.csv"
    with open(fpath + fname, newline='') as f:
        reader = csv.reader(f)
        raw_dat = list(reader)

    raw_text = raw_dat[0]
    titles = raw_dat[1]
    classes = raw_dat[2]

    fname = "five38_tfidf.npy"
    X = np.load(fpath + fname)

    W, H, errs = HALS(X, n_iters=100, rank=3, seed=21)
    plotting(W, classes=classes)
예제 #9
0
파일: run.py 프로젝트: Sibu08/larkai
def main():
    path_ecg = "C:/Users/Administrator/Desktop/larkai/ecg_sibam.csv"
    path_pcg = "C:/Users/Administrator/Desktop/larkai/pcg_sibam.csv"

    #ECG
    time_axis_ecg, normalised_valueInt, time_one_sample = Axis.create_axis(
        path_ecg)
    r_peaks, r_peaks_time, all_peaks, index_of_r_values, show_r = R_Peaks.r_peaks_detection(
        normalised_valueInt, time_one_sample)
    t_time, t_values, show_t, index_of_t_values = T_Peaks.t_peaks_detection(
        normalised_valueInt, time_one_sample, index_of_r_values, all_peaks)
    p_time, p_values, show_p = P_Peaks.p_peaks_detection(
        normalised_valueInt, time_one_sample, index_of_r_values, all_peaks)
    t_end_time, t_end_values, show_t_end = T_Peaks.t_end_peaks_detection(
        normalised_valueInt, time_one_sample, all_peaks, t_values,
        index_of_t_values)
    Q_time, Q_values, show_q = Q_Peaks.q_peaks_detection(
        normalised_valueInt, time_one_sample, p_values, r_peaks)
    S_time, S_values, show_s = S_Peaks.s_peaks_detection(
        normalised_valueInt, time_one_sample, t_values, r_peaks)
    t_start_time, t_start_values, show_t_start = T_Peaks.t_start_peaks_detection(
        normalised_valueInt, time_one_sample, all_peaks, t_values,
        index_of_t_values, r_peaks, S_values)
    p_start_time, p_start_values, show_p_start = P_Peaks.p_start_peaks_detection(
        normalised_valueInt, time_one_sample, all_peaks, p_values, t_values)
    #PCG
    normalised, time_axis_pcg, time_one_sample = S_D.create_wavelet(path_pcg)
    envelope_filtered, allpeaks, peak_time_s1, peak_s1, peak_time_s2, peak_s2 = S_D.s_d_detection(
        normalised, time_one_sample)
    s1_width_time, s2_width_time = S_D.s1_s2_widths(envelope_filtered,
                                                    time_one_sample, allpeaks,
                                                    peak_s1, peak_s2)

    # Information.information(normalised_valueInt, r_peaks_time, p_time, Q_time, S_time, t_time, t_start_time, t_end_time, p_start_time, r_peaks, p_values, Q_values, S_values, t_values, peak_time_s1, peak_time_s2, s1_width_time, s2_width_time)

    # plotting.plotting(time_axis_ecg, normalised_valueInt, normalised, r_peaks_time, r_peaks, p_time, p_values, t_time, t_values,
    #                   t_end_time, t_end_values, t_start_time, t_start_values, Q_time, Q_values, S_time, S_values,
    #                   show_r, show_p, show_q, show_t, show_s, time_axis_pcg, envelope_filtered, peak_time_s1,
    #                   peak_s1, peak_time_s2, peak_s2, s1_width_time, s2_width_time)

    information_thread = threading.Thread(target=Information.information(
        normalised_valueInt, r_peaks_time, p_time, Q_time, S_time, t_time,
        t_start_time, t_end_time, p_start_time, r_peaks, p_values, Q_values,
        S_values, t_values, peak_time_s1, peak_time_s2, s1_width_time,
        s2_width_time))
    plotting_thread = threading.Thread(target=plotting.plotting(
        time_axis_ecg, normalised_valueInt, normalised, r_peaks_time, r_peaks,
        p_time, p_values, t_time, t_values, t_end_time, t_end_values,
        t_start_time, t_start_values, Q_time, Q_values, S_time, S_values,
        show_r, show_p, show_q, show_t, show_s, time_axis_pcg,
        envelope_filtered, peak_time_s1, peak_s1, peak_time_s2, peak_s2,
        s1_width_time, s2_width_time))

    information_thread.start()
    plotting_thread.start()
    information_thread.join()
    plotting_thread.join()

    pdf_gen.generate_pdf()
def main(inargs):
    """
    Runs the main program
    
    Parameters
    ----------
    inargs : argparse object
      Argparse object with all input arguments
    """

    # Check arguments
    assert inargs.plot in ['weather_ts', 'prec_stamps', 'prec_hist'], \
        'Plot not supported.'

    # Check if pre-processed file exists
    if (pp_exists(inargs) is False) or (inargs.recompute is True):
        print('Compute preprocessed file: ' + get_pp_fn(inargs))
        # Call preprocessing routine with arguments
        preprocess(inargs)
    else:
        print('Found pre-processed file:' + get_pp_fn(inargs))

    # Call analyzing and plotting routine
    plotting(inargs)
예제 #11
0
 def makeGraphs(self, index): #call to make graphs
     #print('DetectorRun: makeGraphs')
     y=plotting.plotting()
     x=0
     if index>2: #index = column of proccesed array (0= total, 1= total coin, 2= total ainti, 3= ch0 tot, etc)
         x=3 #set x=3 for columns of ch0, ch1 proccessed data
         
     if not self.resCalib: #if res calib is empty
         y.graphRes(self.processed[:,index]) #Calls plotting.py module to plot data
     
     else: #if calibration exists, use resCalib data
         y.graphRes(self.processed[:,index], self.resCalib[math.floor((index-x)/3)]) #return ResCalib[0] for index 0-2 (Total ch) & for index 3-5 (CH0), returns ResCalib[1] for index 6-8 (CH1)
                     
         #testdata = self.resCalib #####debugging###############
         
     self.resGraphs.append(y) #append current produced graph to resGraph list (names)
예제 #12
0
def main():
    """
    Exectute the vs_plot_enrich script
    """

    title, vsLegends, vsPaths, \
        libraryIDstr, truePosIDstr, ref, zoom, gui, showAUC = parseArgs()

    # Define mode
    mode = "enrich"
    # Define log
    log = True

    # Creating a plotting instance for access to all methods
    p = plotting.plotting(title)

    # Get the truePosID range in list format
    truePosIDlist = p.makeIDlist(truePosIDstr,
                                 "True positive ID list",
                                 printOut=True)
    libraryIDlist = p.makeIDlist(libraryIDstr,
                                 "Library IDs (not displayed)",
                                 printOut=False)

    # Generate a dictionary containing the refinement ligands, if any
    # refinement ligand was submitted
    if ref:
        refDict = p.makeRefDict(ref)
    else:
        refDict = {}

    # Make zoom a float if it was passed as an argument, otherwise make it "0.0"
    # to have no zoomed window on the plot
    if zoom:
        zoom = float(zoom)
    elif zoom == None:
        zoom = 0.0

    # Read the results of each VS and keep only the ligIDs that are common
    # to all of them (create an interesect result list)
    vsIntersects, ligIDintersectSet = p.intersectResults(
        vsPaths, libraryIDlist)

    # Get updated true positive, true negative and library counts given the
    # intersect results
    truePosCount = p.updatedLigCounts(ligIDintersectSet, truePosIDlist,
                                      "true positives")
    #trueNegCount = p.updatedLigCounts(ligIDintersectSet,
    #                                  trueNegIDlist,
    #                                  "true negatives")
    libraryCount = p.updatedLigCounts(ligIDintersectSet, libraryIDlist,
                                      "whole library")

    # Calculate % of total curves for each of these (write file + return data)
    vsPockets = []
    for vsPath, vsIntersect in zip(vsPaths, vsIntersects):
        #vsDir = os.path.dirname(vsPath)
        vsPocket = p.writePercFile(vsIntersect, vsPath, mode, refDict,
                                   "library", libraryIDstr, libraryIDlist,
                                   libraryCount, "true_pos", truePosIDstr,
                                   truePosIDlist, truePosCount)

        vsPockets.append(vsPocket)

    # Extract the data from the vs percent data (in both enrichment curves and
    # ROC curves, the truePositive count would be used to draw the perfect curve
    plotData, xLim, yLim = p.extractPlotData(vsPockets, vsLegends, zoom)

    # FIX AND COMPUTE ON ONE CURVE AT A TIME, on percent vs data?
    # p.getAUC_NSQ(plotData, perfect)

    # Define title and axis names based on mode
    xAxisName = "% of ranked database (total=" + str(libraryCount) + ")"
    yAxisName = "% of known ligands found (total=" + str(truePosCount) + ")"

    # Plot the data calculated by writePercFile, and read in by extracPlotData
    p.plot(title,
           plotData,
           libraryCount,
           truePosCount,
           xLim,
           yLim,
           xAxisName,
           yAxisName,
           gui,
           log,
           zoom,
           mode,
           showAUC,
           scatterData=False)

    # Write the command used to execute this script into a txt file
    p.writeCommand(title)

    print("\n")
예제 #13
0
import plotting as plt
import wfdb
import wfdbi

plt.plotting('/home/manuel/Documents/Minip/Dataset/training2017/A00002', 1,
             './')
        self.rotation_list = [0]
        self.prediction = 0
        self.initial_adjust = False




if __name__ == '__main__':
    source_distance = 1  # source distance in meters
    source_azimuth = 10  # source azimuth in degrees

    # Instantiation of the simulation class
    sim = Simulation(directory="simulation_test",
                     source_azimuth=source_azimuth,
                     source_distance=source_distance,
                     model="raw_resnet")

    prediction = sim.simulate()  # Run simulation and get prediction

    plot = plotting(room_dim=constants.room_dim,
                    source_distance=source_distance,
                    source_azimuth=source_azimuth,
                    mic_centre=sim.mic_centre,
                    rotation_list=sim.rotation_list,
                    prediction_list=sim.predictions,
                    prediction=sim.prediction)

    plot.plot_room()

    print("Final Prediction: {prediction}".format(prediction=prediction))
예제 #15
0
        br_lat = bl_lat
        br_long = tr_long

        tl_grib_values = grib_data[(tl_lat, tl_long)]
        bl_grib_values = grib_data[(bl_lat, bl_long)]
        tr_grib_values = grib_data[(tr_lat, tr_long)]
        br_grib_values = grib_data[(br_lat, br_long)]
        at_waypoint.append(entry)
        res_values = []
        for level in tl_grib_values.values():
            for tl_param in level.parameters.values():
                bl_param = bl_grib_values[level.level].parameters[
                    tl_param.name]
                tr_param = tr_grib_values[level.level].parameters[
                    tl_param.name]
                br_param = br_grib_values[level.level].parameters[
                    tl_param.name]
                try:
                    ip = util.get_interpolated_value(
                        tl_lat, tl_long, tl_param.data, tr_lat, tr_long,
                        tr_param.data, bl_lat, bl_long, bl_param.data, br_lat,
                        br_long, br_param.data, entry.latitude,
                        entry.longitude)
                    res_values.append((level.level, level.name, tl_param.name,
                                       tl_param.unit, ip))
                except:
                    pass
        res.append(res_values)
    # plot data
    plotting(NUM_POINTS, res, at_waypoint, False)
def main():
	max_ccg_score = 0

	for _ in range(0, N_ITTER):
		
		
		'''randomno vibiraem k clusterov. t.e eto nash initial dict, keys
		kotorogo randomno budut vibrani(v kolicestve K_CLUSTERS) 
		iz naznvaniya vidosov'''
		slcted_cntr_video_indx_lst = random.sample(range(1, N_VIDEOS), K_CLUSTERS)
		slcted_cntr_video_indx_lst.sort()
		slcted_cntr_video_lst = []
		#[*dct] means key list of dict
		video_name_lst = [*videos_with_their_tags_dct]

		for i in slcted_cntr_video_indx_lst:
			slcted_cntr_video_lst.append(video_name_lst[i])
		slcted_cntr_video_dct = {key:[] for key in slcted_cntr_video_lst}

		
		'''Step 1: Groupiing by centroid video. u mena est vibrannie vidosi, da?.
		teper ya smotru na vse vidosi, sravnivayu tegi vidosov s centralnimi 
		vidosami(skajem nazivaniyami grup), i dealyu append etot vidos(kotoriy 
		ya rassmatrivayu shas) k gruppe k kotoroy on tonostitsa'''
		max_matched_tags = -1
		for i in video_name_lst:

			for j in slcted_cntr_video_lst:
				common_tags = videos_with_their_tags_dct[i].intersection(videos_with_their_tags_dct[j])
				if len(common_tags) > max_matched_tags:
					max_matched_tags = len(common_tags)
					video_belongs_to = j
			
			slcted_cntr_video_dct[video_belongs_to].append(i)
			# dla sledushey itteracii nado obnulit max_matched_tags
			max_matched_tags = -1
		

		''' Step 2  Building 1st tag conglamerats: teper kogda u mena est 
		opredelnnie gruppi, ya teper poluchu tegi etix vsex grupp.
		T.e ya xochu poluchit dictinoray keys kotorogo budut nazvaniya
		vidosov(skajem nazvaniya grup), a values budut listi tegov ot vidosov
		kororie v svoyu oceredotnosatsa k etoy je gruppe'''
		# central_group_tags_dct = {'ct1':[], 'ct2': [],'ct3': [],'ct4': [],'ct5': [],'ct6': [], 'ct7': [],'ct8': [], 'ct9': [],'ct10': [],'ct11': [],'ct12': []}
		cntr_name_lst = [0]*K_CLUSTERS
		for j in range (1, K_CLUSTERS+1):
			cntr_name_lst[j-1] = "ct"+str(j)
		central_groups_dct  = {key:[] for key in cntr_name_lst}

		central_group_tags_dct = {key:[] for key in cntr_name_lst}
		central_group_tags_dct = tagger(slcted_cntr_video_dct, central_group_tags_dct)


		'''Step 3 going throug congamerats
		!!!Problema : ya sam est tut, mne bali dobavlayusa
		 a te kto prishel so stroni ne portat kartinu kak v knn euclidian'''
		converged = False
		central_groups_dct_coppy = {key:[] for key in cntr_name_lst}
		counter = 0
		central_groups_dct_init = {}
		
		while converged == False:
			central_groups_dct_2_itr_ago = central_groups_dct_init
			if counter == 0:
				for i, j in zip(list(central_groups_dct_coppy.keys()), list(slcted_cntr_video_dct.keys()) ):
					central_groups_dct_coppy[i] = slcted_cntr_video_dct[j]
			else:
				central_groups_dct_coppy = central_groups_dct
			counter = counter + 1

			central_groups_dct_init = central_groups_dct
			central_groups_dct  =  {key:[] for key in cntr_name_lst}
			
			'''proxoju cherez kajdiy vidos, smotra na ego tegi, sravnivayu ego tegi
			s tegami central_group_tags_dct. t.e s gruppoy i s ego vsego tegami.
			i.e eto s dct keys kotoreogo nazvanie gruppi(naprimer ct1) a values list
			vsex tegov kotroie otnosatsa k etoy gruppe. V kakoy gruppe etot vidos
			naberet bolshe score(koliceswtvo sovpadeniy tegov) k toy gruppe i dobavlu ego'''
			video_score = 0
			max_score = -1
			for i in video_name_lst:
				for j in [*central_group_tags_dct]:
					for k in videos_with_their_tags_dct[i]:

						N_VIDEOS_in_group = len(central_groups_dct_coppy[j])\
							if len(central_groups_dct_coppy[j]) > 0 else 1
						video_score = video_score + float(central_group_tags_dct[j].get(k,0))\
							/float(N_VIDEOS_in_group)
					
					if video_score > max_score:
						max_score = video_score
						video_belongs_to_conglrmt = j
				
					# nujno obnulit video_score i max_score dla sledushey 
					# itteracii(t.e sled vidosa)
					video_score = 0
				max_score = 0
				central_groups_dct[video_belongs_to_conglrmt].append(i)

			# esli posle etogo processsa central_groups_dct ne pomenalsa. 
			# znacit process nado ostanovit tak bolshe progrssa ne budet 
			if central_groups_dct == central_groups_dct_init or\
				central_groups_dct == central_groups_dct_2_itr_ago:
				converged = True
		

		'''now we need to choose best results from n itrerations'''
		video_score = 0
		for group_name in [*central_groups_dct]:
			for video in central_groups_dct[group_name]:
				for tag in videos_with_their_tags_dct[video]:

					video_score = video_score + \
						float(central_group_tags_dct[group_name].get(tag, 0))/ \
						float(len(central_groups_dct[group_name]))
					
		if video_score > max_ccg_score:
			max_ccg_score = video_score
			central_groups_dct_best = central_groups_dct

		# max_ccg_score = 0

	print ("max score:", max_ccg_score, "\n")
	print ("best group with this score:", "\n", central_groups_dct_best, "\n")

	plotting(central_groups_dct_best, videos_with_their_tags_dct)
예제 #17
0
def main():
    """
    Run script
    """

    title, vsLegends, vsPaths, vsColors, \
        truePosIDstr, falsePosIDstr, ligLibsJson, \
        ref, gui, labelBars, customEFs = parseArgs()

    # Define mode
    mode = "EF"
    # Define zoom
    zoom = 0.0
    # Define log
    log = True
    # Define ef_cutoffs
    ef_cutoffs = define_ef_cutoffs(customEFs)

    # Creating a plotting instance for access to all methods
    p = plotting.plotting(title)

    # Create a library ID string, combining true positive and true negative
    # strings
    libraryIDstr = truePosIDstr + "," + falsePosIDstr

    # Get the truePosID range in list format
    truePosIDlist = p.makeIDlist(truePosIDstr, "True positive ID list: ",
                                 printOut=True)
    falsePosIDlist = p.makeIDlist(falsePosIDstr, "False positive ID list: ",
                                  printOut=True)
    libraryIDlist = truePosIDlist + falsePosIDlist

    # print(len(truePosIDlist), len(falsePosIDlist), len(libraryIDlist))

    # Generate a dictionary containing the refinement ligands, if any
    # refinement ligand was submitted
    if ref:
        refDict = p.makeRefDict(ref)
    else:
        refDict = {}

    # Get ligand ID list from sdf file(s)
    # Each of the SDF file represents a ligand "type" can be chemotype,
    # pharmacology, molecularWeight, or interaction pattern.
    # The information of this ligand "type" is stored in the dictionary key,
    # which points to a .sdf path containing ligands of that "type"
    lig_types = p.getLigandListFromJson(ligLibsJson)

    # Read the results of each VS and keep only the ligIDs that are common
    # to all of them
    vsIntersects, ligIDintersectSet = p.intersectResults(vsPaths,
                                                         libraryIDlist)

    # Get updated true positive, true negative and library counts given the
    # intersect results
    truePosCount = p.updatedLigCounts(ligIDintersectSet,
                                      truePosIDlist,
                                      "true positives")
    falsePosCount = p.updatedLigCounts(ligIDintersectSet,
                                       falsePosIDlist,
                                       "false positives")
    libraryCount = p.updatedLigCounts(ligIDintersectSet,
                                      libraryIDlist,
                                      "full library")

    # Calculate % of total curves for each of these (write file + return data)
    vsPockets = []
    for vsPath, vsIntersect in zip(vsPaths, vsIntersects):
        vsPocket = p.writePercFile(vsIntersect, vsPath, mode, refDict,
                                   "full_lib", libraryIDstr,
                                   libraryIDlist, libraryCount,
                                   "true_pos", truePosIDstr,
                                   truePosIDlist, truePosCount)
        vsPockets.append(vsPocket)

    # Extract the data from the vs percent data (in both enrichment curves and
    # ROC curves, the truePositive count would be used to draw a perfect curve
    # plotData, xLim, yLim = p.extractPlotData(vsPockets, vsLegends, zoom)

    # Extract data related to ligand type (plotting and barplot data)
    enrichFactorData = p.extractLigTypeData(vsPockets,
                                            vsLegends,
                                            lig_types,
                                            libraryCount,
                                            ef_cutoffs)

    # import pprint
    # pprint.pprint(enrichFactorData)
    # pprint.pprint(lig_types)

    # Plot the barplot represeting the enrochment factors (EFs) in known
    # ligands at ef_cutoffs of the screened library
    p.barPlot(title, enrichFactorData, vsLegends, ef_cutoffs,
              vsColors, lig_types, gui, labelBars)

    # Write the command used to execute this script into a log file
    p.writeCommand(title)

    print("\n")
예제 #18
0
 def make2DGraphs(self): #called when user selects 'make 2d graph' from operations tab of  gamalyzer window
     print('DetectorRun: make2DGraph (initializing 2D Graph....)')
     y=plotting.plotting()
     y.graph2DRes(self.matCoin)#calls graph2dres from plotting.py, inputting matCoin array data
     self.resGraphs.append(y) #append to resultant graphs
예제 #19
0
    name = (f"matmul{i}.txt")
    fid = open(name, "w")

    for N in Ns:

        print(f"N={N}")
        A = rand(N, N)
        B = rand(N, N)

        t1 = perf_counter()
        C = A @ B
        t2 = perf_counter()

        dt = t2 - t1
        size = 3 * (N**2) * 8

        dts.append(dt)
        mem.append(size)

        fid.write(f"{N} {dt} {size} \n")

        print(f"Tiempo transcurrido = {dt} s")
        print(f"Memoria usada = {size} bytes")

        fid.flush()
    fid.close()

from plotting import plotting

plotting(Ncorridas)
예제 #20
0
    elapsed = time.time() - start_time
    print('Training time: %.4f' % (elapsed))

    u_pred, f_pred = model.predict(inputs.X_star)
    u_star = inputs.exact.flatten()[:, None]

    error_u = np.linalg.norm(u_star - u_pred, 2) / np.linalg.norm(u_star, 2)
    print('Error u: %e' % (error_u))

    t = inputs.t
    x = inputs.x
    X, T = np.meshgrid(x, t)

    U_pred = griddata(inputs.X_star, u_pred.flatten(), (X, T), method='cubic')
    Error = np.abs(inputs.exact - U_pred)

    plt.close()
    fig, ax = plt.subplots(1, 1, figsize=(10, 10))
    pd.Series(losses).plot(logy=True, ax=ax)
    lp_loc = '/tmp/loss_plot.eps'
    plt.savefig(lp_loc)
    print("saved loss plot to {}".format(lp_loc))

    save_base_dir = '~/junk/eg_model'
    model.save_weights_and_biases(
        os.path.join(save_base_dir, 'weights_and_biases_2.npz'))

    u_pred, f_pred = model.predict(
        inputs.X_star)  # X_star = tf.convert_to_tensor(X_star) ?
    plotting.plotting(inputs, u_pred, '~/plots')
예제 #21
0
# Number of feedforward and feedback weights for IIR testing
numWIIRForward = 10  # feedforward
numWIIRBack = 10  # feedback
# Plant feedforward coefficients
plantA = [1, 1.3, -0.6]
# Plant feedback coefficients
plantB = [1, -0.9, 0.4]
# Generate the desired output from plant
desiredOutput = sig.lfilter(plantA, plantB, noise)
# Calculate the output power
plantPower = (desiredOutput * desiredOutput).sum() / desiredOutput.size
# Run the recursiveFilter function to find the convergence filter coefficients & error
FIR_a, FIR_b, FIR_e = recFilter.recursiveFilter(numWFIRForward, numWFIRBack,
                                                noise, desiredOutput, mu)
# Plot the result of the convergence FIR filter compare to the plant
percentMSE = plting.plotting(FIR_a, FIR_b, plantA, plantB, FIR_e, plantPower,
                             'FIR1')
print "percentMSE_FIR = ", percentMSE
# Run the recursiveFilter function to find the convergence filter coefficients & error
IIR_a, IIR_b, IIR_e = recFilter.recursiveFilter(numWIIRForward, numWIIRBack,
                                                noise, desiredOutput, mu)
# Plot the result of the convergence IIR filter compare to the plant
percentMSE = plting.plotting(IIR_a, IIR_b, plantA, plantB, IIR_e, plantPower,
                             'IIR1')
print "percentMSE_IIR = ", percentMSE

#----------------------------------------------------------------------------------#

##--Question 2--##----------------------------------------------------------------##

# Number of feedforward weights for FIR testing
numWFIR_1 = 100
예제 #22
0
    def train(self, nIter, burgers_data_loc, N_u, N_f, base_plt_dir):

        tf_dict = {
            self.x_u_tf: self.x_u,
            self.t_u_tf: self.t_u,
            self.u_tf: self.u,
            self.x_f_tf: self.x_f,
            self.t_f_tf: self.t_f
        }

        start_time = time.time()
        losses = {}
        for it in range(nIter):
            self.sess.run(self.train_op_Adam, tf_dict)

            # Print
            if it % 10 == 0:
                elapsed = time.time() - start_time
                loss_value = self.sess.run(self.loss, tf_dict)
                losses[it] = loss_value
                print('It: %d, Loss: %.3e, Time: %.2f' %
                      (it, loss_value, elapsed))
                start_time = time.time()

                #all of this commented-out code records the weights and biases every 10 iterations during training and saves them in .p files.

                #lossy_a, lossy_b = self.sess.run([self.loss_a, self.loss_b], feed_dict = tf_dict) #add self.loss_a/b
                #with open('epochs_v_error_hp.p', 'rb') as fp:
                #    d = pickle.load(fp)
                #inputs = interiorburgershp.prepare_nn_inputs_burgers(burgers_data_loc, N_u, N_f, random_seed=1234, debugging=False)
                #data = scipy.io.loadmat('burgers_shock.mat')
                #u = np.real(data['usol']).T.flatten()[:,None]
                #u_pred, f_pred = self.predict(self.X_star)
                #error = np.linalg.norm(u-u_pred, 2)/25000
                #d[it] = {'MSE' : lossy_a, 'PDE' : lossy_b, 'error' : error}
                #with open('epochs_v_error_hp.p', 'wb') as fp:
                #    pickle.dump(d, fp, protocol=2)
                #weights = {}
                #biases={}
                #for i, w in enumerate(self.weights):
                #    weights[i] = w.eval(self.sess)
                #for i, b in enumerate(self.biases):
                #    biases[i] = b.eval(self.sess)
                #with open('wab_dict_hp.p', 'rb') as fp:
                #    wabdict = pickle.load(fp)
                #with open('b_dict_hp.p', 'rb') as fp:
                #    bdict = pickle.load(fp)
                #wabdict[it] = weights
                #bdict[it] = biases
                #with open('wab_dict_hp.p', 'wb') as fp:
                #    pickle.dump(wabdict, fp, protocol=2)
                #with open('b_dict_hp.p', 'wb') as fp:
                #    pickle.dump(bdict, fp, protocol=2)

            if (it & (it - 1)) == 0:
                inputs = interiorburgershp.prepare_nn_inputs_burgers(
                    burgers_data_loc,
                    N_u,
                    N_f,
                    random_seed=1234,
                    debugging=False)
                u_pred, f_pred = self.predict(self.X_star)

                plotting.plotting(inputs, u_pred, base_plt_dir,
                                  "{}".format(it))

        self.optimizer.minimize(self.sess,
                                feed_dict=tf_dict,
                                fetches=[self.loss],
                                loss_callback=self.callback)

        return losses
예제 #23
0
def main():
    """
    Run script
    """

    title, vsLegends, vsPaths, vsColors, vsLines, \
        truePosIDstr, falsePosIDstr, xAxisName, yAxisName, \
        ref, gui, log, showAUC = parseArgs()

    # Define mode
    mode = "ROC"
    # Define zoom
    zoom = 0.0
    # Define scatterData
    scatterData = False

    # Creating a plotting instance for access to all methods
    p = plotting.plotting(title)

    # Get the truePosID range in list format
    truePosIDlist = p.makeIDlist(truePosIDstr,
                                 "True positive ID list: ",
                                 True)
    falsePosIDlist = p.makeIDlist(falsePosIDstr,
                                  "False positive ID list: ",
                                  True)
    libraryIDlist = truePosIDlist + falsePosIDlist

    # Generate a dictionary containing the refinement ligands, if any
    # refinement ligand was submitted
    if ref:
        refDict = p.makeRefDict(ref)
    else:
        refDict = {}

    # Read the results of each VS and keep only the ligIDs that are common
    # to all of them
    vsIntersects, ligIDintersectSet = p.intersectResults(vsPaths,
                                                         libraryIDlist)

    # Get updated true positive, true negative and library counts given the
    # intersect results
    truePosCount = p.updatedLigCounts(ligIDintersectSet,
                                      truePosIDlist,
                                      "true positives")
    falsePosCount = p.updatedLigCounts(ligIDintersectSet,
                                       falsePosIDlist,
                                       "false positives")
    # This value is actually not used, but it complies with the plot() function
    # libraryCount = truePosCount + falsePosCount
    libraryCount = p.updatedLigCounts(ligIDintersectSet,
                                      libraryIDlist,
                                      "whole library")

    # Calculate % of total curves for each of these (write file + return data)
    vsPockets = []
    for vsPath, vsIntersect in zip(vsPaths, vsIntersects):
        vsPocket = p.writePercFile(vsIntersect, vsPath, mode, refDict,
                                   "true_neg", falsePosIDstr,
                                   falsePosIDlist, falsePosCount,
                                   "true_pos", truePosIDstr,
                                   truePosIDlist, truePosCount)

        vsPockets.append(vsPocket)

    # Extract the data from the vs percent data (in both enrichment curves and
    # ROC curves)
    plotData, xLim, yLim = p.extractPlotData(vsPockets, vsLegends, zoom)

    # Calculate NSQ_AUC values for each curve and append data to plotData list
    p.getAUC_NSQ(plotData)

    # Define title and axis names based on mode
    yAxisName = yAxisName + " (total=" + str(truePosCount) + ")"
    xAxisName = xAxisName + " (total=" + str(falsePosCount) + ")"

    # Plot the data calculated by writePercFile, and read in by extracPlotData
    p.plotROC(title, plotData, vsColors, vsLines, libraryCount, truePosCount,
              xLim, yLim, xAxisName, yAxisName, gui, log,
              zoom, mode, showAUC)

    # Write the command used to execute this script into a log file
    p.writeCommand(title)

    print("\n")