Example #1
0
def hist(fname, data, bins, xlabel, ylabel, title, facecolor='green', alpha=0.5, transparent=True, **kwargs):
    plt.clf()
    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    plt.title(title)
    plt.hist(x=data, bins=bins, facecolor=facecolor, alpha=alpha, **kwargs)
    plt.savefig(fname, transparent=transparent)
def plot_scatter_matrix(df, plotdir):
    "Plot scatter matrix."
    print('plotting scatter matrix, this may take a while')
    plt.clf()
    pd_scatter_matrix(df, figsize=(16,16))
    plt.suptitle("Scatter Matrix", fontsize=14)
    plt.savefig(plotdir + 'scatter_matrix.png')
def plot_figs(fig_num, elev, azim, a):
    fig = plt.figure(fig_num, figsize=(4, 3))
    plt.clf()
    ax = Axes3D(fig, elev=elev, azim=azim)

    ax.scatter(body_mass, work_level, heat_output, c='k', marker='+')
   
    X = np.arange(55, 85, 0.5)
    Y = np.arange(90, 180, 0.5)
    X, Y = np.meshgrid(X, Y)
    Z = a[0] + a[1]*X + +(Y/(a[2]+a[3]*X))
    ax.plot_surface(X, Y, Z,alpha=.5, antialiased=True,rstride=200, cstride=100, cmap=plt.cm.coolwarm)
             
    ax.set_xlabel('BODY_MASS', color='b')
    ax.set_ylabel('WORK_LEVEL', color='b')
    ax.set_zlabel('HEAT_OUTPUT', color='b')
    
    ax.w_xaxis.set_ticklabels([])
    ax.w_yaxis.set_ticklabels([])
    ax.w_zaxis.set_ticklabels([])
    
    ax.zaxis.set_major_locator(plt.LinearLocator(10))  
    ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.f'))
    ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%.f'))
    ax.zaxis.set_major_formatter(plt.FormatStrFormatter('%.f'))
def plot_by_groups(df, plot_dir, af_key, config):
    """Plot allele frequencies of grouped/paired samples.
    """
    out_file = os.path.join(plot_dir, "cohort-group-af-comparison.pdf")
    df["sample_label"] = df.apply(lambda row: "%s\n%s" % (row["group_class"], row["sample"]), axis=1)
    sns.despine()
    sns.set(style="white")
    with PdfPages(out_file) as pdf_out:
        for (cohort, group), cur_df in df.groupby(["cohort", "group"]):
            labels = sorted(list(cur_df["sample_label"].unique()))
            labels.reverse()
            cur_df["sample_label"].categories = labels
            g = sns.violinplot(x=af_key, y="sample_label", data=cur_df, inner=None, bw=.1)
            #sns.swarmplot(x=af_key, y="sample_label", data=cur_df, color="w", alpha=.5)
            try:
                group = int(group)
            except ValueError:
                pass
            g.set_title("%s: %s" % (cohort, group))
            g = _af_violinplot_shared(g)
            pdf_out.savefig(g.figure)
            if config and (cohort, group) in config.group_detailed:
                out_dir = utils.safe_makedir(os.path.join(plot_dir, "detailed"))
                out_file = os.path.join(out_dir, "group-%s-%s.png" % (cohort, group))
                g.figure.savefig(out_file)
            plt.clf()
    return out_file
def do_plot(mode, content, wide):
	global style
	style.apply(mode, content, wide)

	data = np.load("data/prr_AsAu_%s%s.npz"%(content, wide))

	AU, TAU = np.meshgrid(-data["Au_range_dB"], data["tau_range"])
	Zu = data["PRR_U"]
	Zs = data["PRR_S"]

	assert TAU.shape == AU.shape == Zu.shape, "The inputs TAU, AU, PRR_U must have the same shape for plotting!"

	plt.clf()

	if mode in ("sync",):
		# Plot the inverse power ratio, sync signal is stronger for positive ratios
		CSf = plt.contourf(TAU, AU, Zs, levels=(0.0, 0.2, 0.4, 0.6, 0.8, 0.9, 1.0), colors=("1.0", "0.75", "0.5", "0.25", "0.15", "0.0"), origin="lower")
		CS2 = plt.contour(CSf, colors = ("r",)*5+("w",), linewidths=(0.75,)*5+(1.0,), origin="lower", hold="on")
	else:
		CSf  = plt.contourf(TAU, AU, Zs, levels=(0.0, 0.2, 0.4, 0.6, 0.8, 0.9, 1.0), colors=("1.0", "0.75", "0.5", "0.25", "0.15", "0.0"), origin="lower")
		CS2f = plt.contour(CSf, levels=(0.0, 0.2, 0.4, 0.6, 0.8, 1.0), colors=4*("r",)+("w",), linewidths=(0.75,)*4+(1.0,), origin="lower", hold="on")
		#CS2f = plt.contour(TAU, -AU, Zu, levels=(0.9, 1.0), colors=("0.0",), linewidths=(1.0,), origin="lower", hold="on")
		if content in ("unif",):
			CSu  = plt.contourf(TAU, AU, Zu, levels=(0.2, 1.0), hatches=("////",), colors=("0.75",), origin="lower")
			CS2  = plt.contour(CSu, levels=(0.2,), colors = ("r",), linewidths=(1.0,), origin="lower", hold="on")

	style.annotate(mode, content, wide)

	plt.axis([data["tau_range"][0], data["tau_range"][-1], -data["Au_range_dB"][-1], -data["Au_range_dB"][0]])

	plt.ylabel(r"Signal power ratio ($\mathrm{SIR}$)", labelpad=2)
	plt.xlabel(r"Time offset $\tau$ ($/T$)", labelpad=2)

	plt.savefig("pdf/prrc2_%s_%s%s_z.pdf"%(mode, content, wide))
Example #6
0
def compare_chebhist(dname, mylambda, c, Nbin = 25):


    if mylambda == 'Do not exist':
        print('--!!Warning: eig file does not exist, can not display compare histgram')
    else:
        mylambda = 1 - mylambda
        lmin = max(min(mylambda), -1)
        lmax = min(max(mylambda),  1)

        # print c
        cheb_file_content = '\n'.join([str(st) for st in c])
        x = np.linspace(lmin, lmax, Nbin + 1)
        y = plot_chebint(c, x)
        u = (x[1:] + x[:-1]) / 2
        v =  y[1:] - y[:-1]

        plt.clf()
        plt.hold(True)
        plt.hist(mylambda,Nbin)
        plt.plot(u, v, "r.", markersize=10)
        plt.hold(False)
        plt.show()
        filename = 'data/' + dname + '.png'
        plt.savefig(filename)

        cheb_filename = 'data/' + dname + '.cheb'
        f = open(cheb_filename, 'w+')
        f.write(cheb_file_content)
        f.close()
Example #7
0
def run_test(fld, seeds, plot2d=True, plot3d=True, add_title="",
             view_kwargs=None, show=False, scatter_mpl=False, mesh_mvi=True):
    interpolated_fld = viscid.interp_trilin(fld, seeds)
    seed_name = seeds.__class__.__name__
    if add_title:
        seed_name += " " + add_title

    try:
        if not plot2d:
            raise ImportError
        from viscid.plot import vpyplot as vlt
        from matplotlib import pyplot as plt
        plt.clf()
        # plt.plot(seeds.get_points()[2, :], fld)
        mpl_plot_kwargs = dict()
        if interpolated_fld.is_spherical():
            mpl_plot_kwargs['hemisphere'] = 'north'
        vlt.plot(interpolated_fld, **mpl_plot_kwargs)
        plt.title(seed_name)

        plt.savefig(next_plot_fname(__file__, series='2d'))
        if show:
            plt.show()

        if scatter_mpl:
            plt.clf()
            vlt.plot2d_line(seeds.get_points(), fld, symdir='z', marker='o')
            plt.savefig(next_plot_fname(__file__, series='2d'))
            if show:
                plt.show()
    except ImportError:
        pass

    try:
        if not plot3d:
            raise ImportError
        from viscid.plot import vlab

        _ = get_mvi_fig(offscreen=not show)

        try:
            if mesh_mvi:
                mesh = vlab.mesh_from_seeds(seeds, scalars=interpolated_fld)
                mesh.actor.property.backface_culling = True
        except RuntimeError:
            pass

        pts = seeds.get_points()
        p = vlab.points3d(pts[0], pts[1], pts[2], interpolated_fld.flat_data,
                          scale_mode='none', scale_factor=0.02)
        vlab.axes(p)
        vlab.title(seed_name)
        if view_kwargs:
            vlab.view(**view_kwargs)

        vlab.savefig(next_plot_fname(__file__, series='3d'))
        if show:
            vlab.show(stop=True)
    except ImportError:
        pass
 def _plot_histogram(self, data, number_of_devices=1, 
         preamp_timeout=1253):
     if number_of_devices == 0:
         return
     data = np.array(data)
     plt.figure(3)
     plt.ioff()
     plt.get_current_fig_manager().window.wm_geometry("800x550+700+25")
     plt.clf()
     if number_of_devices == 1: 
         plt.hist(data[0,:], bins=preamp_timeout, range=(1, preamp_timeout-1),
             color='b')
     elif number_of_devices == 2:
         plt.hist(data[0,:], bins=preamp_timeout, range=(1, preamp_timeout-1),
             color='r', label='JPM A')
         plt.hist(data[1,:], bins=preamp_timeout, range=(1, preamp_timeout-1),
             color='b', label='JPM B')
         plt.legend()
     elif number_of_devices > 2:
         raise Exception('Histogram plotting for more than two ' +
         'devices is not implemented.')
     plt.xlabel('Timing Information [Preamp Time Counts]')
     plt.ylabel('Counts')
     plt.xlim(0, preamp_timeout)
     plt.draw()
     plt.pause(0.05)
Example #9
0
def plot_precision_recall_n(y_true, y_scores, model_name):
    '''
    Takes the model, plots precision and recall curves
    '''

    precision_curve, recall_curve, pr_thresholds = precision_recall_curve(y_true, y_scores)
    precision_curve = precision_curve[:-1]
    recall_curve = recall_curve[:-1]
    pct_above_per_thresh = []
    number_scored = len(y_scores)

    for value in pr_thresholds:
        num_above_thresh = len(y_scores[y_scores >= value])
        pct_above_thresh = num_above_thresh / float(number_scored)
        pct_above_per_thresh.append(pct_above_thresh)

    pct_above_per_thresh = np.array(pct_above_per_thresh)
    plt.clf()
    fig, ax1 = plt.subplots()
    ax1.plot(pct_above_per_thresh, precision_curve, 'b')
    ax1.set_xlabel('percent of population')
    ax1.set_ylabel('precision', color='b')
    ax2 = ax1.twinx()
    ax2.plot(pct_above_per_thresh, recall_curve, 'r')
    ax2.set_ylabel('recall', color='r')
    name = model_name
    plt.title(name)
    plt.savefig("Eval/{}.png".format(name))
Example #10
0
def Fig_plot(names,title=None,style='',when=0,showLegend=True,what=[],mpcLog=None,mheLog=None,simLog=None):
    assert isinstance(what,list)
    
    fig = plt.figure()
    
    if title is None:
        if isinstance(names,str):
            title = names
        else:
            assert isinstance(names,list)
            if len(names) == 1:
                title = names[0]
            else:
                title = str(names)
    fig.canvas.set_window_title(str(title))

    plt.clf()
    
    if 'mpc' in what:
        if mpcLog == None: raise Exception('you must provide a mpc log to plot its variables')
        mpcLog._plot(names,None,'k',when='all',showLegend=True)
    if 'sim' in what:
        if simLog == None: raise Exception('you must provide a sim log to plot its variables')
        simLog._plot(names,None,'',when=0,showLegend=True)
    if 'mhe' in what:
        if mheLog == None: raise Exception('you must provide a mhe log to plot its variables')
        N = mheLog._log['x'][0].shape[0]
        if not isinstance(names,list):
            names = [names]
        if names[0] in mheLog.xNames:
            mheLog._plot(names,None,'o',when=N-1,showLegend=True)
        elif names[0] in mheLog.uNames:
            mheLog._plot(names,None,'o',when=N-2,showLegend=True)
Example #11
0
def LinRegTest(XTrain, YTrain, close, filename):
	'''
	Using RandomForest learner to predict how much the price will change in 5 days
	@filename: the file's true name is ML4T-filename
	@XTrain: the train data for feature
	@YTrain: the train data for actual price after 5 days
	@close: the actual close price of Test data set
	@k: the number of trees in the forest
	'''
	
	XTest, YTest = TestGenerator(close)

	#plot thge feature
	plt.clf()
	fig = plt.figure()
	fig.suptitle('The value of features')
	plt.plot(range(100), XTest[0:100, 0], 'b', label = 'One day price change')
	plt.plot(range(100), XTest[0:100, 1], 'r', label = 'difference between two day price change')
	plt.legend(loc = 4)
	plt.ylabel('Price')
	filename4 = 'feature' + filename + '.pdf'
	fig.savefig(filename4, format = 'pdf')

	LRL = LinRegLearner()
	cof = LRL.addEvidence(XTrain, YTrain)
	YLearn = LRL.query(XTest, cof)
	return YLearn
Example #12
0
def make_entity_plot(filename, title, fixed_noip, fixed_ip, dynamic_noip, dynamic_ip):
    plt.figure(figsize=(12,5))

    plt.title("Settings comparison - " + title)
    
    plt.xlabel('Time (ms)', fontsize=12)
    plt.xlim([0,62000])

    x = 0
    barwidth = 0.5
    bargroupspacing = 1.5

    fixed_noip_mean,fixed_noip_conf = conf_stats(fixed_noip)
    fixed_ip_mean,fixed_ip_conf = conf_stats(fixed_ip)
    dynamic_noip_mean,dynamic_noip_conf = conf_stats(dynamic_noip)
    dynamic_ip_mean,dynamic_ip_conf = conf_stats(dynamic_ip)

    values = [fixed_noip_mean,fixed_ip_mean,dynamic_noip_mean, dynamic_ip_mean]
    errs = [fixed_noip_conf,fixed_ip_conf,dynamic_noip_conf, dynamic_ip_conf]

    y_pos = numpy.arange(len(values))
    plt.barh(y_pos, values, xerr=errs, align='center', color=['r', 'b', 'r', 'b'],  ecolor='black', alpha=0.7)
    plt.yticks(y_pos, ["Fixed | no I.P.", "Fixed | I.P.", "Dynamic | no I.P.", "Dynamic | I.P."])
    plt.savefig(output_file(filename))
    plt.clf()
Example #13
0
def plot_wav_fft(wav_filename, desc=None):
    plt.clf()
    plt.figure(num=None, figsize=(6, 4))
    sample_rate, X = scipy.io.wavfile.read(wav_filename)
    spectrum = np.fft.fft(X)
    freq = np.fft.fftfreq(len(X), 1.0 / sample_rate)

    plt.subplot(211)
    num_samples = 200.0
    plt.xlim(0, num_samples / sample_rate)
    plt.xlabel("time [s]")
    plt.title(desc or wav_filename)
    plt.plot(np.arange(num_samples) / sample_rate, X[:num_samples])
    plt.grid(True)

    plt.subplot(212)
    plt.xlim(0, 5000)
    plt.xlabel("frequency [Hz]")
    plt.xticks(np.arange(5) * 1000)
    if desc:
        desc = desc.strip()
        fft_desc = desc[0].lower() + desc[1:]
    else:
        fft_desc = wav_filename
    plt.title("FFT of %s" % fft_desc)
    plt.plot(freq, abs(spectrum), linewidth=5)
    plt.grid(True)

    plt.tight_layout()

    rel_filename = os.path.split(wav_filename)[1]
    plt.savefig("%s_wav_fft.png" % os.path.splitext(rel_filename)[0],
                bbox_inches='tight')
Example #14
0
 def plot(self):
     if self.pos == None:
         self.pos = nx.graphviz_layout(self)
     NODE_SIZE = 500
     plt.clf()
     nx.draw_networkx_nodes(self, pos=self.pos,
                            nodelist=self.normal,
                            node_color=NORMAL_COLOR,
                            node_size=NODE_SIZE)
     nx.draw_networkx_nodes(self, pos=self.pos,
                            nodelist=self.contam,
                            node_color=CONTAM_COLOR,
                            node_size=NODE_SIZE)
     nx.draw_networkx_nodes(self, pos=self.pos,
                            nodelist=self.immune,
                            node_color=IMMUNE_COLOR,
                            node_size=NODE_SIZE)
     nx.draw_networkx_nodes(self, pos=self.pos,
                            nodelist=self.dead,
                            node_color=DEAD_COLOR,
                            node_size=NODE_SIZE)
     nx.draw_networkx_edges(self, pos=self.pos,
                            edgelist=self.nondead_edges(),
                            width=2,
                            edge_color='0.2')
     nx.draw_networkx_labels(self, pos=self.pos,
                             font_color='0.95', font_size=11)
     plt.gca().get_xaxis().set_visible(False)
     plt.gca().get_yaxis().set_visible(False)
     plt.draw()
Example #15
0
def make_overview_plot(filename, title, noip_arrs, ip_arrs):
    plt.title("Inner parallelism - " + title)

    
    plt.ylabel('Time (ms)', fontsize=12)

    x = 0
    barwidth = 0.5
    bargroupspacing = 1.5

    for z in zip(noip_arrs, ip_arrs):
        noip,ip = z
        noip_mean,noip_conf = conf_stats(noip)
        ip_mean,ip_conf = conf_stats(ip)

        b_noip = plt.bar(x, noip_mean, barwidth, color='r', yerr=noip_conf, ecolor='black', alpha=0.7)
        x += barwidth

        b_ip = plt.bar(x, ip_mean, barwidth, color='b', yerr=ip_conf, ecolor='black', alpha=0.7)
        x += bargroupspacing

    plt.xticks([0.5, 2.5, 4.5], ['50k', '100k', '200k'], rotation='horizontal')

    fontP = FontProperties()
    fontP.set_size('small')

    plt.legend([b_noip, b_ip], \
        ('no inner parallelism', 'inner parallelism'), \
        prop=fontP, loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True, shadow=True, ncol=2)
   
    plt.ylim([0,62000])
    plt.savefig(output_file(filename))
    plt.clf()
Example #16
0
def delta():     
    beta = 0.99
    N = 1000
    u = lambda c: np.sqrt(c)
    W = np.linspace(0,1,N)
    X, Y = np.meshgrid(W,W)
    Wdiff = (X-Y).T
    index = Wdiff <0
    Wdiff[index] = 0
    util_grid = u(Wdiff)
    util_grid[index] = -10**10
    
    Vprime = np.zeros((N,1))
    delta = np.ones(1)
    tol = 10**-9
    it = 0
    max_iter = 500
    
    while (delta[-1] >= tol) and (it < max_iter):
        V = Vprime
        it += 1;
        val = util_grid + beta*V.T
        Vprime = np.amax(val, axis = 1)
        Vprime = Vprime.reshape((N,1))
        delta = np.append(delta,np.dot((Vprime-V).T,Vprime-V))
        
    plt.figure()
    plt.plot(delta[1:])
    plt.ylabel(r'$\delta_k$')
    plt.xlabel('iteration')
    plt.savefig('convergence.pdf')
    plt.clf()
Example #17
0
def disc_norm():
    x = np.linspace(-3,3,100)
    y = st.norm.pdf(x,0,1)
    fig, ax = plt.subplots()
    fig.canvas.draw()
    
    ax.plot(x,y)
    
    fill1_x = np.linspace(-2,-1.5,100)
    fill1_y = st.norm.pdf(fill1_x,0,1)
    fill2_x = np.linspace(-1.5,-1,100)
    fill2_y = st.norm.pdf(fill2_x,0,1)
    ax.fill_between(fill1_x,0,fill1_y,facecolor = 'blue', edgecolor = 'k',alpha = 0.75)
    ax.fill_between(fill2_x,0,fill2_y,facecolor = 'blue', edgecolor = 'k',alpha = 0.75)
    for label in ax.get_yticklabels():
        label.set_visible(False)
    for tick in ax.get_xticklines():
        tick.set_visible(False)
    for tick in ax.get_yticklines():
        tick.set_visible(False)
    
    plt.rc("font", size = 16)
    plt.xticks([-2,-1.5,-1])
    labels = [item.get_text() for item in ax.get_xticklabels()]
    labels[0] = r"$v_k$"
    labels[1] = r"$\varepsilon_k$"
    labels[2] = r"$v_{k+1}$"
    ax.set_xticklabels(labels)
    plt.ylim([0, .45])

    
    plt.savefig('discnorm.pdf')
    plt.clf()
Example #18
0
def build_plot(profilerResults):
    # Calculate each value.
    x = []
    mean = []
    std = []
    for t in xrange(profilerResults.getLookBack()*-1, profilerResults.getLookForward()+1):
        x.append(t)
        values = np.asarray(profilerResults.getValues(t))
        mean.append(values.mean())
        std.append(values.std())

    # Cleanup
    plt.clf()
    # Plot a line with the mean cumulative returns.
    plt.plot(x, mean, color='#0000FF')

    # Error bars starting on the first lookforward period.
    lookBack = profilerResults.getLookBack()
    firstLookForward = lookBack+1
    plt.errorbar(
        x=x[firstLookForward:], y=mean[firstLookForward:], yerr=std[firstLookForward:],
        capsize=3,
        ecolor='#AAAAFF', alpha=0.5
    )

    # Horizontal line at the level of the first cumulative return.
    plt.axhline(
        y=mean[lookBack],
        xmin=-1*profilerResults.getLookBack(), xmax=profilerResults.getLookForward(),
        color='#000000'
    )

    plt.xlim(profilerResults.getLookBack()*-1-0.5, profilerResults.getLookForward()+0.5)
    plt.xlabel('Time')
    plt.ylabel('Cumulative returns')
Example #19
0
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
    import matplotlib.pyplot as plt

    fig = plt.gcf()
    plt.clf()
    ax = fig.add_subplot(211)
    orig_ax = kwargs.pop('ax', plt.gca())
    orig_axfreq = getattr(orig_ax, 'freq', None)

    ret = f(*args, **kwargs)
    assert(ret is not None)  # do something more intelligent

    ax = kwargs.pop('ax', plt.gca())
    if series is not None:
        dfreq = series.index.freq
        if isinstance(dfreq, DateOffset):
            dfreq = dfreq.rule_code
        if orig_axfreq is None:
            assert(ax.freq == dfreq)

    if freq is not None and orig_axfreq is None:
        assert(ax.freq == freq)

    ax = fig.add_subplot(212)
    try:
        kwargs['ax'] = ax
        ret = f(*args, **kwargs)
        assert(ret is not None)  # do something more intelligent
    except Exception:
        pass

    with ensure_clean() as path:
        plt.savefig(path)
def plotGenomicregions(GPcount, name):
    """
    :param GPcount: is a list of tuples [(region, size),....()]
    :return:
    """
    """ Now we produce some pie charts """

    gr = ['tss', 'intergenic', 'intron', 'exon', 'upstream']
    size = [0, 0, 0, 0, 0]
    for a, b in GPcount:
        if a == 'tss':
            size[0] = b
        if a == 'intergenic':
            size[1] = b
        if a == 'intron':
            size[2] = b
        if a == 'exon':
            size[3] = b
        if a == 'upstream':
            size[4] = b
    colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral', 'cyan']
    explode = (0.1, 0, 0, 0, 0)  # only "explode" the 2nd slice
    plt.pie(size, explode=explode, labels=gr, colors=colors,
            autopct='%1.1f%%', shadow=True, startangle=90)
    # Set aspect ratio to be equal so that pie is drawn as a circle.
    #plt.legend(['tss', 'intergenic', 'intron', 'exon', 'upstream'], loc='upper left')
    plt.axis('equal')
    plt.savefig('/ps/imt/e/20141009_AG_Bauer_peeyush_re_analysis/further_analysis/plots/' + name + '.svg')
    plt.clf()
Example #21
0
def test_contourf_transform_path_counting():
    ax = plt.axes(projection=ccrs.Robinson())
    plt.draw()

    # Capture the size of the cache before our test.
    gc.collect()
    initial_cache_size = len(cgeoaxes._PATH_TRANSFORM_CACHE)

    path_to_geos_counter = CallCounter(cartopy.mpl.patch, 'path_to_geos')
    with path_to_geos_counter:
        x, y, z = sample_data((30, 60))
        cs = plt.contourf(x, y, z, 5, transform=ccrs.PlateCarree())
        n_geom = sum([len(c.get_paths()) for c in cs.collections])
        del cs
        if not six.PY3:
            del c
        plt.draw()

    # Before the performance enhancement, the count would have been 2 * n_geom,
    # but should now be just n_geom.
    msg = ('The given geometry was transformed too many times (expected: %s; '
           'got %s) - the caching is not working.'
           '' % (n_geom, path_to_geos_counter.count))
    assert path_to_geos_counter.count == n_geom, msg

    # Check the cache has an entry for each geometry.
    assert len(cgeoaxes._PATH_TRANSFORM_CACHE) == initial_cache_size + n_geom

    # Check that the cache is empty again once we've dropped all references
    # to the source paths.
    plt.clf()
    gc.collect()
    assert len(cgeoaxes._PATH_TRANSFORM_CACHE) == initial_cache_size

    plt.close()
Example #22
0
def test_solve_poisson_becke_sa():
    sigma = 8.0
    rtf = ExpRTransform(1e-4, 1e2, 500)
    r = rtf.get_radii()
    rhoy = np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5
    rhod = np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5*(-r/sigma)/sigma
    rho = CubicSpline(rhoy, rhod, rtf)
    v = solve_poisson_becke([rho])[0]

    s2s = np.sqrt(2)*sigma
    soly = erf(r/s2s)/r
    sold = np.exp(-(r/s2s)**2)*2/np.sqrt(np.pi)/s2s/r - erf(r/s2s)/r**2

    if False:
        import matplotlib.pyplot as pt
        n = 10
        pt.clf()
        pt.plot(r[:n], soly[:n], label='exact')
        pt.plot(r[:n], v.y[:n], label='spline')
        pt.legend(loc=0)
        pt.savefig('denu.png')

    assert abs(v.y - soly).max()/abs(soly).max() < 1e-6
    assert abs(v.dx - sold).max()/abs(sold).max() < 1e-4
    # Test the boundary condition at zero and infinity
    assert v.extrapolation.l == 0
    np.testing.assert_allclose(v.extrapolation.amp_left, np.sqrt(2/np.pi)/sigma)
    np.testing.assert_allclose(v.extrapolation.amp_right, 1.0)
Example #23
0
    def display_graph_by_specific_mac(self, mac_address):

        G = nx.Graph()

        count = 0
        edges = set()
        edges_list = []


        for pkt in self.pcap_file:

            src = pkt[Dot11].addr1
            dst = pkt[Dot11].addr2

            if mac_address in [src, dst]:
                edges_list.append((src, dst))
                edges.add(src)
                edges.add(dst)

        plt.clf()
        plt.suptitle('Communicating with ' + str(mac_address), fontsize=14, fontweight='bold')
        plt.title("\n Number of Communicating Users: " + str(int(len(edges))))
        plt.rcParams.update({'font.size': 10})
        G.add_edges_from(edges_list)
        nx.draw(G, with_labels=True, node_color=MY_COLORS)
        plt.show()
Example #24
0
    def display_channel_efficiency(self):

        size = 0

        start_time = self.pcap_file[0].time
        end_time = self.pcap_file[len(self.pcap_file) - 1].time

        duration = (end_time - start_time)/1000

        for i in range(len(self.pcap_file) - 1):
            size += len(self.pcap_file[i])
        ans = (((size * 8) / duration) / BW_STANDARD_WIFI) * 100
        ans = float("%.2f" % ans)
        labels = ['utilized', 'unutilized']
        sizes = [ans, 100.0 - ans]
        colors = ['g', 'r']

        # Make a pie graph
        plt.clf()
        plt.figure(num=1, figsize=(8, 6))
        plt.axes(aspect=1)
        plt.suptitle('Channel efficiency', fontsize=14, fontweight='bold')
        plt.title("Bits/s: " + str(float("%.2f" % ((size*8)/duration))),fontsize = 12)
        plt.rcParams.update({'font.size': 17})
        plt.pie(sizes, labels=labels, autopct='%.2f%%', startangle=60, colors=colors, pctdistance=0.7, labeldistance=1.2)

        plt.show()
Example #25
0
    def display_PER(self):

        number_of_pkts = len(self.pcap_file)
        retransmission_pkts = 0

        for pkt in self.pcap_file:

            if (pkt[Dot11].FCfield & 0x8) != 0:
                retransmission_pkts += 1

        ans = (retransmission_pkts / number_of_pkts)*100
        ans = float("%.2f" % ans)
        labels = ['Standard packets', 'Retransmitted packets']
        sizes = [100.0 - ans,ans]


        colors = ['g', 'firebrick']

        # Make a pie graph
        plt.clf()
        plt.figure(num=1, figsize=(8, 6))
        plt.axes(aspect=1)
        plt.suptitle('Retransmitted packet', fontsize=14, fontweight='bold')
        plt.rcParams.update({'font.size': 13})
        plt.pie(sizes, labels=labels, autopct='%.2f%%', startangle=60, colors=colors, pctdistance=0.7, labeldistance=1.2)

        plt.show()
Example #26
0
    def ensemble_pca(self, ref_ensemble=None, ref_first=True):
        data = prepare_pca_input(self._cgs)
        pca = PCA(n_components=2)
        if ref_ensemble:
            ref_data = prepare_pca_input(ref_ensemble)
            if ref_first:
                pca.fit(ref_data)
        if not ref_ensemble or not ref_first:
            pca.fit(data)
        reduced_data = pca.transform(data)
        if ref_ensemble:
            reduced_ref = pca.transform(ref_data)
            plt.scatter(reduced_ref[:, 0], reduced_ref[:, 1],
                        color="green", label="background")
        plt.scatter(reduced_data[:, 0], reduced_data[:,
                                                     1], color="blue", label="sampling")
        if self._reference_cg:
            data_true = prepare_pca_input([self._reference_cg])
            reduced_true = pca.transform(data_true)
            plt.scatter(reduced_true[:, 0], reduced_true[:,
                                                         1], color="red", label="reference")

        plt.xlabel("First principal component")
        plt.ylabel("Second principal component")
        figname = "pca_{}_rf{}.svg".format(self._cgs[0].name, ref_first)
        plt.savefig(figname)
        log.info("Figure {} created".format(figname))
        plt.clf()
        plt.close()
Example #27
0
    def display_graph(self):

        G = nx.Graph()

        count = 0
        edges = set()
        edges_list = []

        for pkt in self.pcap_file:
            if pkt.haslayer(Dot11Elt):
                src = pkt[Dot11].addr1
                dst = pkt[Dot11].addr2

                edges_list.append((src, dst))
                edges.add(src)
                edges.add(dst)


        plt.clf()
        filepath = os.path.splitext(self.path)[0]
        filename = basename(filepath)
        plt.suptitle('Connection Map of: '+ str(filename), fontsize=14, fontweight='bold')
        plt.title("\n Number of Users: " + str(int(len(edges))))
        plt.rcParams.update({'font.size': 10})
        G.add_edges_from(edges_list)
        nx.draw(G, with_labels=True, node_color=MY_COLORS)
        plt.show()
Example #28
0
    def graph_by_sender(self):

        mac_adresses = {}  # new dictionary
        for pkt in self.pcap_file:
            mac_adresses.update({pkt[Dot11].addr2: 0})
        for pkt in self.pcap_file:
            mac_adresses[pkt[Dot11].addr2] += 1

        MA = []
        for ma in mac_adresses:
            MA.append(mac_adresses[ma])

        plt.clf()
        plt.suptitle('Number of packets of every sender', fontsize=14, fontweight='bold')
        plt.bar(range(len(mac_adresses)), sorted(MA), align='center', color=MY_COLORS)

        plt.xticks(range(len(mac_adresses)), sorted(mac_adresses.keys()))

        plt.rcParams.update({'font.size': 10})

        plt.xlabel('Senders mac addresses')
        plt.ylabel('Number of packets')

        # Set tick colors:
        ax = plt.gca()
        ax.tick_params(axis='x', colors='k')
        ax.tick_params(axis='y', colors='r')
        ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=45)

        plt.show()
Example #29
0
    def view_delta_rmsd_vs_steps(self):
        self._calculate_complete_rmsd_matrix()
        fig, axes = plt.subplots(2)
        a_rmsd = np_nans(len(self._cg_sequence) // 2)
        min_rmsd = np_nans(len(self._cg_sequence) // 2)
        max_rmsd = np_nans(len(self._cg_sequence) // 2)
        for d in range(len(a_rmsd)):
            l = [self._rmsd[self._cg_sequence[i], self._cg_sequence[i + d]]
                 for i in range(len(self._cg_sequence) - d)]
            a_rmsd[d] = sum(l) / len(l)
            min_rmsd[d] = min(l)
            max_rmsd[d] = max(l)
        for ax in axes:
            ax.set_xlabel("Steps apart")
            ax.set_ylabel("Average RMSD")
            ax.plot(list(range(len(a_rmsd))), a_rmsd, label="Average RMSD")
            ax.plot(list(range(len(min_rmsd))), min_rmsd, label="Minimal RMSD")
            ax.plot(list(range(len(max_rmsd))), max_rmsd, label="Maximal RMSD")
            ax.plot([0, len(max_rmsd)], [np.max(self._rmsd), np.max(
                self._rmsd)], "-.", label="Maximal RMSD in whole simulation")
            ax.plot([0, len(max_rmsd)], [np.mean(self._rmsd), np.mean(
                self._rmsd)], "-.", label="Average RMSD in whole simulation")
            ax.legend(prop={'size': 6})
        axes[1].set_xlim([0, 50])

        plt.savefig("rmsd_steps_apart_{}.svg".format(self._cgs[0].name))

        plt.clf()
        plt.close()
Example #30
0
def cplot(data, limits=[None,None], CM = 'jet', fname='', ext='png'):
    """Make a color contour plot of data

    Usage: cplot(data, limits=[None,None], fname='')
    If no filename is specified a plot is displayed
    File format is ext (default is png)
    """

    SIZE = 12
    DPI  = 100

    nx, ny = data.shape[0], data.shape[1]
    data = data.reshape(nx,ny)
    scale  = SIZE/float(max(nx,ny))
    plt.figure(figsize=(scale*nx, scale*ny+1.0))
    plt.clf()
    c = plt.imshow(np.transpose(data), cmap=CM)
    plt.clim(limits)
    plt.axis([0,nx,0,ny])
    #cbar = plt.colorbar(c, ticks=np.arange(0.831,0.835,0.001), aspect = 20, orientation='vertical', shrink=0.72, extend='neither', spacing='proportional')
    #cbar = plt.colorbar(c, aspect = 40, orientation='vertical', shrink=0.72, extend='neither', spacing='proportional')
    #cbar = plt.colorbar(c, orientation='horizontal', shrink=1.0)
    cbar = plt.colorbar(c, orientation='vertical', shrink=0.72, extend='neither', spacing='proportional')
    
    cbar.ax.tick_params(labelsize=21,size=10)
    #cbar.ax.yaxis.set_ticks_position('left')
    #c.cmap.set_under(color='black')
    if len(fname) == 0:
        plt.show()
    else:
        plt.savefig(fname+'.'+ ext, format=ext, dpi=DPI, bbox_inches='tight', pad_inches=0.1)
        plt.close()
Example #31
0
def real_time_cost(n_max, cost):
    plt.clf()
    plt.plot(cost[:, 0], cost[:, 1], color='orange')
    plt.xlim(-10, n_max)
    plt.pause(0.000000000001)
    plt.show()
Example #32
0
optional animated bar plot. 
'''

#iterations: a user can manipulate n for any amount of swaps.
n = 100
t = 0
p= n*.01
enumX = list(range(0,amountOfDist+1,1))
while t<n:
    #Optional percentage tracker/animated bar plot
    if t%p==0:
        per = (t/n)*(100)
        sys.stdout.write("\r%f%%" %per)
        sys.stdout.flush()
        if t%10 ==0:
            plt.clf()
            seatWins = gdt.getSeatWins()
            x_pos = [i for i, _ in enumerate(enumX)]
            plt.bar(x_pos, seatWins, color='green')
            display.display(plt.gcf())
            display.clear_output(wait=True)
    #Part of the code that handles swaps:
    proposal = list(rd.choice(swaps))
    x = proposal[0]
    y = proposal[1]
    proposal_G = copy.deepcopy(g)
    x_dist = proposal_G.nodes[x]['district']
    y_dist = proposal_G.nodes[y]['district']
    proposal_G.nodes[x]['district'] = y_dist
    proposal_G.nodes[y]['district'] = x_dist
    error = updateSwaps(proposal_G,x,y)
def main():

    # Parse arguments
    parser = argparse.ArgumentParser(description="built luts for emulation.")
    parser.add_argument('munged_file', type=str, default='munged/combined_training_data.npz')
    parser.add_argument('-plot_figs', type=int, default=1)
    parser.add_argument('-fig_dir', type=str, default='figs')
    parser.add_argument('-save_dir', type=str, default='trained_models')
    parser.add_argument('-holdout_dim', type=int, default=-1)
    parser.add_argument('-holdout_slice', type=int, default=0)
    args = parser.parse_args()

    np.random.seed(13)


    npzf = np.load(args.munged_file)

    modtran_results = npzf['modtran_results']
    sixs_results = npzf['sixs_results']
    points = npzf['points']
    keys = npzf['keys']
    point_names = npzf['point_names']
    print(points.shape)
    simulator_wavelengths = npzf['sixs_wavelengths']
    emulator_wavelengths = npzf['modtran_wavelengths']
    solar_irr = npzf['sol_irr']
    print(keys)
    print(point_names)
    n_bands_modtran = int(modtran_results.shape[-1]/len(keys))
    n_bands_sixs = int(sixs_results.shape[-1]/len(keys))


    if args.holdout_dim == -1:
        perm = np.random.permutation(points.shape[0])
        test = perm[:int(0.1*len(perm))]
        train = perm[int(0.1*len(perm)):]
    elif args.holdout_dim == -2:
        perm = np.random.permutation(points.shape[0])
        train = perm.copy()
        test = train
        #test = perm[:int(0.1*len(perm))]
    else:
        if args.holdout_dim >= points.shape[1]:
            print('Holdout dim {} exceeds point dimension {}'.format(args.holdout_dim, points.shape[1]))
            quit()
        un_dim = np.unique(points[:,args.holdout_dim])
        if (args.holdout_slice >= len(un_dim)):
            print('Holdout slice {} > dim {} length: {}'.format(args.holdout_slice, args.holdout_dim, len(un_dim)))
            quit()
        test = np.where(points[:,args.holdout_dim] == un_dim[args.holdout_slice])[0]
        train = np.where(points[:,args.holdout_dim] != un_dim[args.holdout_slice])[0]


    print(test)
    print(train)

    band_range = np.arange(sixs_results.shape[-1])
    train_sixs = sixs_results[:,band_range]

    sixs_results_match_modtran = np.zeros(modtran_results.shape)
    for key_ind, key in enumerate(keys):
        band_range_m = np.arange(n_bands_modtran * key_ind, n_bands_modtran * (key_ind + 1))
        band_range_s = np.arange(n_bands_sixs * key_ind, n_bands_sixs * (key_ind + 1))

        x = simulator_wavelengths
        y = sixs_results[:,band_range_s]
        finterp = interpolate.interp1d(x,y)
        sixs_results_match_modtran[:,band_range_m] = finterp(emulator_wavelengths)


    train_modtran = modtran_results-sixs_results_match_modtran

    print(train_modtran.shape)
    base_save_name = os.path.join(args.save_dir,'emulator')
    if args.holdout_dim == -1:
        base_save_name += '_random'
    elif args.holdout_dim == -1:
        base_save_name += '_full'
    else:
        base_save_name += 'dim_{}_slice_{}'.format(args.holdout_dim, args.holdout_slice)

    monitor='val_loss'
        
    es = keras.callbacks.EarlyStopping(monitor=monitor, mode='min', verbose=1, patience=20, restore_best_weights=True)
    model = nn_model(train_sixs.shape, modtran_results.shape)

    simple_response_scaler = np.ones(train_modtran.shape[1])*100
    train_modtran *= simple_response_scaler
    model.fit(train_sixs[train,:], train_modtran[train,:], batch_size=1000, epochs=400,
              validation_data=(train_sixs[test,:], train_modtran[test,:]),callbacks=[es])
    train_modtran /= simple_response_scaler

    full_pred = model.predict(train_sixs)/simple_response_scaler
    full_pred = full_pred + sixs_results_match_modtran
   
    pred = full_pred[test, :]

    model.save(base_save_name)
    np.savez(base_save_name + '_aux.npz', lut_names=point_names, 
             rt_quantities=keys, 
             feature_scaler_mean=feature_scaler.mean_,response_scaler_mean=response_scaler.mean_,
             feature_scaler_var=feature_scaler.var_,response_scaler_var=response_scaler.var_,
             feature_scaler_scale=feature_scaler.scale_,response_scaler_scale=response_scaler.scale_,
             solar_irr=solar_irr, emulator_wavelengths=emulator_wavelengths,
             response_scaler=simple_response_scaler,
             simulator_wavelengths=simulator_wavelengths)

    np.savez(base_save_name + '_pred_results.npz', predicted_modtran=full_pred)

    if args.plot_figs == 0:
        quit()

    rdn, rdn_atm = beckman_rdn(full_pred,emulator_wavelengths)
    print(rdn.shape)


    

    fig = plt.figure(figsize=(10, 10 * 2 / (0.5 + 2)))
    gs = gridspec.GridSpec(ncols=len(keys), nrows=2, wspace=0.3, hspace=0.4)

    ref_rdn = np.genfromtxt('../isofit/examples/20171108_Pasadena/remote/ang20171108t184227_rdn_v2p11_BeckmanLawn_424.txt')[:,1]
    rdn_modtran, rdn_modtran_atm = beckman_rdn(modtran_results,emulator_wavelengths)



    cf = 100
    varset = np.where(np.logical_and.reduce((points[:,3] == 2.55, points[:,-1] == 0, points[:,-2] == 180, points[:,1] == 0, points[:,2] == 4, points[:,0] == 0.01)))[0]
    best_modtran = np.argmin(np.sum(np.power(rdn_modtran[:,:cf] - ref_rdn[:cf],2),axis=1))
    best_emu = np.argmin(np.sum(np.power(rdn[:,:cf] - ref_rdn[:cf],2),axis=1))
    plt.plot(emulator_wavelengths, ref_rdn, c='black', linewidth=0.8)

    #plt.fill_between(simulated_wavelengths, np.min(rdn_modtran[varset,:],axis=0), np.max(rdn_modtran[varset,:],axis=0),facecolor='red',alpha=0.5)
    #plt.fill_between(simulated_wavelengths, np.min(rdn[varset,:],axis=0), np.max(rdn[varset,:],axis=0),facecolor='green',alpha=0.5)

    plt.plot(emulator_wavelengths, rdn_modtran[varset,:].flatten(), c='red', linewidth=0.8, ls='--')
    plt.plot(emulator_wavelengths, rdn[varset,:].flatten(), c='green', linewidth=0.8, ls='--')
    
    plt.plot(emulator_wavelengths, rdn_modtran[best_modtran,:], c='red', linewidth=0.8)
    plt.plot(emulator_wavelengths, rdn[best_emu,:], c='green', linewidth=0.8)
    pointstr_modtran = ''
    pointstr_emu = ''
    for point_ind, pn in enumerate(point_names):
        pointstr_modtran += pn + ': {}\n'.format(points[best_modtran,point_ind])
        pointstr_emu += pn + ': {}\n'.format(points[best_emu,point_ind])

    plt.text(1000, 10 , pointstr_modtran, verticalalignment='top')
    plt.text(2000, 10 , pointstr_emu, verticalalignment='top')

    plt.savefig('rdn_plots/best_matches.png',dpi=200,bbox_inches='tight')



    cmap = plt.get_cmap('coolwarm')
    for dim in range(points.shape[-1]):
        #slice = np.take(points,np.arange(0,points.shape[0]),axis=dim)
        slice = points[:,dim]
        un_vals = np.unique(slice)
        print(point_names[dim])

        for _val, val in enumerate(un_vals):
            loc_rdn = np.mean(rdn[slice == val, :], axis=0)
            loc_rdn_var = np.std(rdn[slice == val, :], axis=0)
            loc_rdn_atm = np.mean(rdn_atm[slice == val, :], axis=0)

            plt.plot(emulator_wavelengths, ref_rdn, c='black', linewidth=0.8)
            plt.plot(emulator_wavelengths, loc_rdn, c=cmap(float(_val)/len(un_vals)), linewidth=0.8)
            plt.fill_between(emulator_wavelengths, np.min(rdn[slice==val,:],axis=0), np.max(rdn[slice==val,:],axis=0), alpha=0.5, facecolor=cmap(float(_val)/len(un_vals)))
            #plt.plot(simulated_wavelengths, loc_rdn_atm, c=cmap(float(_val)/len(un_vals)), ls='--', linewidth=0.8)

        plt.ylim([0,11])

        pointstr = '{}: {} - {}'.format(point_names[dim], un_vals[0],un_vals[-1])
        plt.text(2000, 8 , pointstr, verticalalignment='top')

        plt.savefig('{}/dim_{}.png'.format('rdn_plots', dim), dpi=200, bbox_inches='tight')
        plt.clf()


    for dim in range(points.shape[-1]):
        #slice = np.take(points,np.arange(0,points.shape[0]),axis=dim)
        slice = points[:,dim]
        un_vals = np.unique(slice)
        print(point_names[dim])

        for _val, val in enumerate(un_vals):
            loc_rdn = np.mean(rdn_modtran[slice == val, :], axis=0)
            loc_rdn_var = np.std(rdn_modtran[slice == val, :], axis=0)

            plt.plot(emulator_wavelengths, ref_rdn, c='black', linewidth=0.8)
            plt.plot(emulator_wavelengths, loc_rdn, c=cmap(float(_val)/len(un_vals)), linewidth=0.8)
            plt.fill_between(emulator_wavelengths, np.min(rdn_modtran[slice==val,:],axis=0), np.max(rdn_modtran[slice==val,:],axis=0), alpha=0.5, facecolor=cmap(float(_val)/len(un_vals)))
            #plt.plot(simulated_wavelengths, loc_rdn_atm, c=cmap(float(_val)/len(un_vals)), ls='--', linewidth=0.8)

        plt.ylim([0,11])

        pointstr = '{}: {} - {}'.format(point_names[dim], un_vals[0],un_vals[-1])
        plt.text(2000, 8 , pointstr, verticalalignment='top')

        plt.savefig('{}/modtran_dim_{}.png'.format('rdn_plots', dim), dpi=200, bbox_inches='tight')
        plt.clf()


    fig = plt.figure(figsize=(10, 10 * 3 / (0.5 + 3)))
    gs = gridspec.GridSpec(ncols=len(keys), nrows=3, wspace=0.3, hspace=0.4)

    print_keys = ['Total\nTransmittance', 'Atmospheric Path\nReflectance', 'Spherical Albedo']
    n_bands = int(modtran_results.shape[-1]/len(keys))
    for key_ind, key in enumerate(keys):

        ax = fig.add_subplot(gs[0, key_ind])
        band_range = np.arange(n_bands * key_ind, n_bands * (key_ind + 1))
        indices = [test,band_range]

        plt.plot(emulator_wavelengths,np.median(d2_subset(modtran_results,indices),axis=0),c='red')
        #plt.plot(np.mean(d2_subset(sixs_results,indices),axis=0),c='green')

        plt.plot(emulator_wavelengths, np.median(pred[:,band_range], axis=0), c='blue', ls='--')

        if key_ind == 1:
            plt.legend(['Modtran','Emulator'])
        
        plt.title(print_keys[key_ind])
        point_names = npzf['point_names']
        #plt.xlabel('Wavelength [nm]')
        if key_ind == 0:
            plt.ylabel('Modeled Output')


    for key_ind, key in enumerate(keys):

        ax = fig.add_subplot(gs[1, key_ind])
        band_range = np.arange(n_bands * key_ind, n_bands * (key_ind + 1))
        indices = [test, band_range]

        mean_rel_error = np.median(np.abs(d2_subset(modtran_results,indices) - pred[:,band_range]) / d2_subset(modtran_results,indices), axis=0)
        #mean_rel_error = np.mean(np.abs(d2_subset(modtran_results,indices) - pred[:,band_range]), axis=0)
        #mean_rel_sixs_error = np.mean(np.abs(d2_subset(modtran_results,indices) - d2_subset(sixs_results,indices)), axis=0)
        mean_rel_sixs_error = np.median(np.abs(d2_subset(modtran_results,indices) - d2_subset(sixs_results,indices)) / d2_subset(modtran_results,indices), axis=0)

        #plt.title(print_keys[key_ind])
        plt.plot(emulator_wavelengths, mean_rel_error, c='blue')
        plt.plot(emulator_wavelengths, mean_rel_sixs_error, c='green', ls='--')

        if key_ind == 1:
            plt.legend(['Emulator\nResidual','6s Residual'])
        lims = list(ax.get_ylim())
        lims[1] = min(lims[1],1)
        plt.ylim([0,lims[1]])
        #plt.xlabel('Wavelength [nm]')
        if key_ind == 0:
            plt.ylabel('Median Relative Residual')

    for key_ind, key in enumerate(keys):

        ax = fig.add_subplot(gs[2, key_ind])
        band_range = np.arange(n_bands * key_ind, n_bands * (key_ind + 1))
        indices = [test, band_range]

        #mean_rel_error = np.median(np.abs(d2_subset(modtran_results,indices) - pred[:,band_range]) / d2_subset(modtran_results,indices), axis=0)
        mean_rel_error = np.median(np.abs(d2_subset(modtran_results,indices) - pred[:,band_range]), axis=0)
        mean_rel_sixs_error = np.median(np.abs(d2_subset(modtran_results,indices) - d2_subset(sixs_results,indices)), axis=0)
        #mean_rel_sixs_error = np.median(np.abs(d2_subset(modtran_results,indices) - d2_subset(sixs_results,indices)) / d2_subset(modtran_results,indices), axis=0)

        #plt.title(print_keys[key_ind])
        plt.plot(emulator_wavelengths, mean_rel_error, c='blue')
        plt.plot(emulator_wavelengths, mean_rel_sixs_error, c='green', ls='--')

        if key_ind == 1:
            plt.legend(['Emulator\nResidual','6s Residual'])
        lims = list(ax.get_ylim())
        lims[1] = min(lims[1],1)
        plt.ylim([0,lims[1]])
        plt.xlabel('Wavelength [nm]')
        if key_ind == 0:
            plt.ylabel('Median Absolute Residual')



    plt.savefig('{}/mean_test_set.png'.format(args.fig_dir), bbox_inches='tight')
    plt.clf()

    fig = plt.figure(figsize=(10, 10 * 2 / (0.5 + 2)))
    gs = gridspec.GridSpec(ncols=len(keys), nrows=2, wspace=0.3, hspace=0.4)

    error_mean = np.zeros(len(test))
    for key_ind, key in enumerate(keys):
        band_range = np.arange(n_bands * key_ind, n_bands * (key_ind + 1))
        indices = [test, band_range]
        loc_error = np.nansum(np.abs(d2_subset(modtran_results,indices) - pred[:,band_range]),axis=1)
        error_mean += loc_error / np.nanmax(loc_error)
    order = np.argsort(error_mean)
    #order = np.argsort(points[test,0])

    lims_main = [[0, 1], [0, 0.25], [0, 0.35],[0,1]]
    lims_diff = [[0, 0.25], [0, 0.1], [0, 0.1],[0,1]]

    #for row in range(np.sum(test)):
    for row_ind, row in enumerate(order[np.linspace(0,len(order)-1,20,dtype=int)].tolist()):
        n_bands = int(modtran_results.shape[-1]/len(keys))

        for key_ind, key in enumerate(keys):

            ax = fig.add_subplot(gs[0, key_ind])
            band_range = np.arange(n_bands * key_ind, n_bands * (key_ind + 1))
            indices = [test,band_range]

            plt.plot(emulator_wavelengths, d2_subset(modtran_results,indices)[row,:],c='red')
            plt.plot(emulator_wavelengths, d2_subset(sixs_results,indices)[row,:],c='green')

            plt.plot(emulator_wavelengths, pred[row,band_range], c='blue', ls='--')
            plt.title(print_keys[key_ind])
            plt.ylim(lims_main[key_ind])

        for key_ind, key in enumerate(keys):

            ax = fig.add_subplot(gs[1, key_ind])
            band_range = np.arange(n_bands * key_ind, n_bands * (key_ind + 1))
            indices = [test,band_range]

            #mean_rel_error = np.abs(d2_subset(modtran_results,indices)[row,:] - pred[row,band_range]) / d2_subset(modtran_results,indices)[row,:]
            mean_rel_error = np.abs(d2_subset(modtran_results,indices)[row,:] - pred[row,band_range])

            mean_rel_sixs_error = np.abs(d2_subset(modtran_results,indices)[row,:] - d2_subset(sixs_results,indices)[row,:])


            plt.title(print_keys[key_ind])
            plt.plot(emulator_wavelengths, mean_rel_error, c='blue')
            plt.plot(emulator_wavelengths, mean_rel_sixs_error, c='green', ls='--')

            lims = lims_diff[key_ind]
            #lims = list(ax.get_ylim())
            #lims[1] = min(lims[1],1)
            if key_ind == 1:
                pointstr = ''
                for point_ind, pn in enumerate(point_names):
                    pointstr += pn + ': {}\n'.format(points[test,:][row, point_ind])

                plt.text(200, 0.9*(lims[1]-lims[0])+lims[0], pointstr, verticalalignment='top')

            plt.ylim([0,lims[1]])

        plt.savefig('{}/test_set_{}.png'.format(args.fig_dir, row_ind), bbox_inches='tight')
        plt.clf()
Example #34
0
    def plot(self, x_scaling=1, y_scaling=1, y_shift=0, title=None, x_label='x', y_label=None,
            file_name=None, same_axis=True, dpi=300):
        '''
        This will support a few possibities for data storage in the self.__value
        member.

        Pandas Series. If self.__value is a Pandas Series, plot against the index.
        However the type stored in the Series matter. Suppose it is a series
        of a `numpy` array. This must be of the same rank for every entry.
        This plot method assumes it is an iterable type of the same length for every
        entry in the series. A plot of all elements in the type against the index of
        the series will be made. The plot may have all elements in one axis or
        each element in its own axis.
        '''

        plt.clf()
        plt.cla()
        plt.close()

        if not isinstance(self.__value, pandas.core.series.Series):
            return
        if len(self.__value) == 1:
            return

        if not title:
            title = self.info
        if not y_label:
            if self.latex_name != 'null-quantity-latex-name':
                y_label = self.latex_name
            elif self.formal_name != 'null-quantity-formal-name':
                y_label = self.formal_name
            elif self.name != 'null-quantity-name':
                y_label = self.name
            else:
                assert False

        if isinstance(self.__value[0],float) or isinstance(self.__value[0],int) \
                or isinstance(self.__value[0],bool):
            n_dim = 1
            # Turn series of values into a series of a list of one value to allow for
            # the indexing below
            for i in range (len(self.__value[:])):
                self.__value.iat[i] = [ self.__value.iat[i] ]  # list of one element
        else:
           n_dim = len(self.__value[0])

        x = [i*x_scaling for i in self.__value.index]
        #x = self.__value.index # potential bug in matplotlib

        if same_axis:
            fig = plt.figure(self.__formal_name)
        for i in range(n_dim):
            if not same_axis:
                fig = plt.figure(self.__formal_name+str(i))
            y = list()
            for j in range(len(x)):
                y.append( self.__value.iat[j][i] ) # must use iat()

            y = [(k-y_shift)*y_scaling for k in y]

            plt.xlabel(x_label)
            plt.ylabel(y_label)

            plt.title(title)

            plt.plot(x, y)

            if not same_axis and file_name:
                plt.savefig(file_name+str(i)+'.png', dpi=dpi)

        if same_axis and file_name:
            plt.savefig(file_name+'.png',dpi=dpi)

        return
Example #35
0
 def test_plot_loss(self, net_fitted):
     from nolearn.lasagne.visualize import plot_loss
     plot_loss(net_fitted)
     plt.clf()
     plt.cla()
Example #36
0
 def test_plot_conv_weights(self, net_fitted):
     from nolearn.lasagne.visualize import plot_conv_weights
     plot_conv_weights(net_fitted.layers_['conv1'])
     plot_conv_weights(net_fitted.layers_['conv2'], figsize=(1, 2))
     plt.clf()
     plt.cla()
Example #37
0
def trace2xml(traces,parser,outfolder,netsource,doPlot=False,seedresp=None):
    """
    Calibrate accelerometer data, derive peak ground motion values, and write a ShakeMap-compatible data file.

    Takes a sequence of ObsPy Trace objects and an ObsPy Parser (such as from a dataless SEED file) and
    calibrates the data in the Traces, derives peak ground motions for each (pga,pgv,psa) and then 
    writes those data to a ShakeMap-compatible XML data file.
    
    @param traces - Sequence of ObsPy Trace objects, containing acceleration data in units of m/s^2.
    @param parser - ObsPy Parser object.  Can also be None, in which case calibration step is NOT performed, and station coordinates will have to be present in the input traces.
    @param outfolder - Path (string) where output data XML files and QA plots should be written.
    @param netsource - Name of data source (knet, geonet, etc.)
    """
    if parser is not None:
        vdict = parser.getInventory()
    else:
        vdict = None
    #Make the top level tag - stationlist
    stationlist_tag = Tag('stationlist',attributes={'created':datetime.utcnow().strftime('%s')})
    first_station = 1
    current_tag = ''
    plotfiles = []
    hfmt = dates.DateFormatter('%H:%M:%S') #used for formatting dates in plots
    for trace in traces:
        net = trace.stats['network']
        station = trace.stats['station']
        location = trace.stats['location']
        channel = trace.stats['channel']
        channel_id = '%s.%s.%s.%s' % (net,station,location,channel)
        if parser is not None:
            paz = parser.getPAZ(channel_id)
            coordinates = parser.getCoordinates(channel_id)
        else:
            try:
                coordinates = {'latitude':trace.stats['lat'],
                               'longitude':trace.stats['lon'],
                               'elevation':trace.stats['height']}
            except:
                try:
                    coordinates = {'latitude':trace.stats['coordinates']['latitude'],
                                   'longitude':trace.stats['coordinates']['longitude'],
                                   'elevation':trace.stats['coordinates']['elevation']}
                except:
                    sys.stderr.write('Could not get station coordinates from trace object of station %s\n' % station)
                    continue

        #If we have separate calibration data, apply it here
        if parser is not None:
            trace.simulate(paz_remove=paz,remove_sensitivity=True,simulate_sensitivity=False)
            trace.stats['units'] = 'acc' #ASSUMING THAT ANY SAC DATA IS ACCELERATION!
        else:
            if trace.stats['units'] != 'acc':
                if seedresp is None:
                    raise Exception('Must have a PolesAndZeros data structure (i.e., from dataless SEED) or a RESP file.')
                else:
                    pre_filt = (0.01, 0.02, 20, 30)
                    try:
                        trace.simulate(paz_remove=None, pre_filt=pre_filt, seedresp=seedresp)
                    except Exception,error:
                        pass

        #make the component tag to hold the measurements
        comptag = Tag('comp',attributes={'name':channel})
        if trace.stats['units'] == 'acc':
            delta = trace.stats['sampling_rate']
            trace.detrend('linear')
            trace.detrend('demean')
            trace.taper(max_percentage=0.05, type='cosine')
            
            
            trace.filter('highpass',freq=FILTER_FREQ,zerophase=True,corners=CORNERS)
            
            trace.detrend('linear')
            trace.detrend('demean')

            # Get the Peak Ground Acceleration
            pga = abs(trace.max())

            (psa03, psa10, psa30) = smPSA(trace, delta)

            #convert accelerations to %g
            psa03 = psa03/0.0981
            psa10 = psa10/0.0981
            psa30 = psa30/0.0981
            pga = pga/0.0981

            #make the tags for the individual measurements, add them to comp tag
            psa03tag = Tag('psa03',attributes={'value':psa03})
            psa10tag = Tag('psa10',attributes={'value':psa10})
            psa30tag = Tag('psa30',attributes={'value':psa30})
            acctag = Tag('acc',attributes={'value':pga})

            comptag.addChild(acctag)
            comptag.addChild(psa03tag)
            comptag.addChild(psa10tag)
            comptag.addChild(psa30tag)
            
            #plot the acceleration (top) and velocity
            if doPlot:
                plt.clf()
                ax1 = plt.subplot(2,1,1)
                atimes = trace.times()
                atimes = [(trace.stats['starttime'] + t).datetime for t in atimes]
                matimes = dates.date2num(atimes)
                plt.plot(matimes,trace.data)
                ax1.xaxis.set_major_locator(dates.MinuteLocator())
                ax1.xaxis.set_major_formatter(hfmt)
                plt.title('Acceleration %s' % channel_id)
                plt.ylabel('$m/s^2$')
                plt.xticks([])
                #labels = ax1.get_xticklabels()
                #ax1.set_xticklabels( labels, rotation=45 ) ;

        if trace.stats['units'] == 'vel': #don't integrate the broadband
            vtimes = trace.times()
            vtimes = [(trace.stats['starttime'] + t).datetime for t in vtimes]
            mvtimes = dates.date2num(vtimes)
            vtrace = trace.copy()
        else:
            vtrace = trace.copy()
            vtrace.integrate() # vtrace now has velocity
            vtimes = vtrace.times()
            vtimes = [(vtrace.stats['starttime'] + t).datetime for t in vtimes]
            mvtimes = dates.date2num(vtimes)
        if doPlot:
            if trace.stats['units'] == 'acc':
                ax2 = plt.subplot(2,1,2)
            else:
                ax2 = plt.subplot(1,1,1)
            plt.plot(mvtimes,vtrace.data)
            ax2.xaxis.set_major_locator(dates.MinuteLocator())
            ax2.xaxis.set_major_formatter(hfmt)
            plt.title('Velocity %s' % channel_id)
            plt.ylabel('$m/s$')
            plt.xticks(rotation=30)
            pngfile = os.path.join(outfolder,'%s.png' % channel_id)
            plt.savefig(pngfile)
            plotfiles.append(pngfile)
            plt.close()

        # Get the Peak Ground Velocity
        pgv = abs(vtrace.max())

        #convert velocity to cm/s
        pgv = pgv * 100

        #make the tags for the individual measurements
        veltag = Tag('vel',attributes={'value':pgv})
        comptag.addChild(veltag)

        code = '%s.%s' % (net,station)
        if current_tag == code:		# Same station: just add the comp tag
            stationtag.addChild(comptag)
        else:				# New station: start a new station tag
            if not first_station:	# Close out the previous station
                stationlist_tag.addChild(stationtag)
            station_name = 'UNK'
            if vdict is not None:
                for sta in vdict['stations']:
                    if sta['station_id'] == '%s.%s' % (net,station):
                        station_name = sta['station_name']
                        break
                instrument = 'UNK'
                for cha in vdict['channels']:
                    if cha['channel_id'] == channel_id:
                        instrument = cha['instrument']
                        break
                source = ''
                for netw in vdict['networks']:
                    if netw['network_code'] == net:
                        source = netw['network_name']
                        break
            else:
                station_name = trace.stats['station']
                instrument = ''
                source = ''
            lat = coordinates['latitude']
            lon = coordinates['longitude']
            stationtag = Tag('station',attributes={'code':code,'name':station_name,
                                                   'insttype':instrument,'source':source,
                                                   'netid':net,'commtype':'DIG',
                                                   'lat':lat,'lon':lon,
                                                   'loc':station_name})
            stationtag.addChild(comptag)
            current_tag = code
            first_station = 0
    def w_CB(change):
        from scipy.signal import convolve2d
        from cv2 import resize, INTER_CUBIC, cvtColor, COLOR_RGB2GRAY

        data = change['new']
        if len(data[0]) > 2:
            # Get strokes information
            x = np.array(data[0])
            y = np.array(data[1])
            t = np.array(data[2])

            # assuming there is at least 200ms between each stroke
            line_breaks = np.where(np.diff(t) > 200)[0]
            # adding end of array
            line_breaks = np.append(line_breaks, t.shape[0])

            # Plot to canvas
            from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
            fig = plt.figure()
            canvas = FigureCanvas(fig)
            ax = fig.gca()

            # plot all strokes
            plt.plot(x[:line_breaks[0]],
                     y[:line_breaks[0]],
                     color='black',
                     linewidth=4)
            for i in range(1, len(line_breaks)):
                plt.plot(x[line_breaks[i - 1] + 1:line_breaks[i]],
                         y[line_breaks[i - 1] + 1:line_breaks[i]],
                         color='black',
                         linewidth=4)

            plt.xlim(0, 460)
            plt.ylim(0, 250)
            plt.axis("off")

            canvas.draw()  # draw the canvas, cache the renderer

            # convert to numpy array
            imageflat = np.frombuffer(canvas.tostring_rgb(), dtype='uint8')
            # not sure why this size...
            image = np.reshape(imageflat, (288, 432, 3))

            # Cut the part containing the writting
            ind = np.where(image < 255)

            D0 = ind[0].max() - ind[0].min()
            D1 = ind[1].max() - ind[1].min()

            C0 = int(0.5 * (ind[0].max() + ind[0].min()))
            C1 = int(0.5 * (ind[1].max() + ind[1].min()))

            if D0 > D1:
                D = D0
            else:
                D = D1

            L = int(D / 2.0) + 20
            image = image[C0 - L:C0 + L, C1 - L:C1 + L, :]

            # Convert to gray
            image = 255 - cvtColor(image, COLOR_RGB2GRAY)

            # Low pass filter and resize
            k = 12
            I = convolve2d(image, np.ones((k, k)) / k**2.0, mode="same")

            # Resize with opencv
            I = resize(I, dsize=(28, 28), interpolation=INTER_CUBIC)

            # Clip in [0, 1]
            I = I / I.max()
            I = I * 3.0
            I = I.clip(0, 1)

            # Get a feature vector
            if image_input:
                X = I[np.newaxis, :, :, np.newaxis]

            else:
                X = I.reshape((1, 28 * 28)).astype(np.float64)

                # Standardization
                if scaler is not None:
                    X = scaler.transform(X)

            # Apply the classifier
            y_prediction = apply_classifier(X)

            #title = "Prediction: {} ({:.02f})".format(y_prediction, v)
            title = "Prediction: {}".format(y_prediction)

            # draw the converted image
            plt.clf()
            plt.imshow(I,
                       aspect='equal',
                       cmap=mpl.cm.binary,
                       interpolation='none')
            plt.title(title)
            plt.axis("off")
            plt.show()

            # To erase after tracing
            #change['owner'].data = [[], [], []]

            # Schedule for clearing
            out.clear_output(wait=True)
        else:
            pass
def telluric_remove(bstarwave, bstar, bairmass, wave, object, airmass,
                    variance):
    import numpy as np
    import pdb
    import matplotlib.pyplot as plt
    from tmath.wombat.inputter import inputter
    from tmath.wombat.yesno import yesno
    from tmath.wombat.womscipyrebin import womscipyrebin
    from tmath.wombat.womget_element import womget_element
    from tmath.pydux.xcor import xcor
    from tmath.pydux.finalscaler import finalscaler
    bstartmp = womscipyrebin(bstarwave, bstar, wave)
    #    plt.cla()
    #    plt.plot(bstarwave,bstartmp)
    #    plt.pause(0.01)
    #    answer=yesno('y')
    print('\nThe ratio of airmasses (object/B-star) is {}'.format(airmass /
                                                                  bairmass))
    if (airmass / bairmass > 3.0) or (airmass / bairmass < 0.33):
        print('\nWARNING: OBJECT AND B-STAR HAVE WILDLY DIFFERENT')
        print('AIRMASSES: ATMOSPHERIC BAND DIVISION MAY BE LOUSY\n')

    wmin = wave[0]
    wmax = wave[-1]
    npix = len(object)
    wdelt = wave[1] - wave[0]
    print('wdelt', wdelt)
    lag = np.zeros(3)
    lagflag = [False] * 3
    xfactor = 10
    maxlag = 200
    print('\nCross-correlating object with B-star spectrum\n')
    if (wmin < 6200) and (wmax > 6400) and (wmax < 6900):
        indblue = womget_element(wave, 6200)
        indred = womget_element(wave, 6400)
        lag[0] = xcor(object[indblue:indred + 1], bstartmp[indblue:indred + 1],
                      xfactor, maxlag)
        lagflag[0] = True
        print('The shift at the 6250A band is {} angstroms'.format(lag[0] *
                                                                   wdelt))
    if (wmin < 6800) and (wmax > 6500):
        indblue = womget_element(wave, 6800)
        indred = womget_element(wave, 6950)
        obb = object[indblue:indred + 1]
        bb = bstartmp[indblue:indred + 1]
        lag[1] = xcor(obb, bb, xfactor, maxlag)
        lagflag[1] = True
        print('The shift at the B band is {} angstroms'.format(lag[1] * wdelt))
    if (wmin < 7500) and (wmax > 8000):
        indblue = womget_element(wave, 7500)
        indred = womget_element(wave, 8000)
        lag[2] = xcor(object[indblue:indred + 1], bstartmp[indblue:indred + 1],
                      xfactor, maxlag)
        print('The shift at the A band is {} angstroms'.format(lag[2] * wdelt))
        lagflag[2] = True
    if (sum(lagflag) > 0):
        avglag = np.sum(lag) / sum(lagflag)
        angshift = avglag * wdelt
        print('The mean shift is {} Angstroms'.format(angshift))
    else:
        angshift = 0.0
    bstartmpcopy = bstartmp.copy()
    telluric_done = False
    plt.clf()
    while (not telluric_done):
        bstartmp = bstartmpcopy.copy()
        tmp = womscipyrebin(wave + angshift, bstartmp, wave)
        bstartmp = tmp.copy()
        bstartmp = bstartmp**((airmass / bairmass)**0.55)
        newobject = object / bstartmp
        bvar = variance / bstartmp
        print('\nPlotting before and after atmospheric band correction\n')
        plt.cla()
        ymin, ymax = finalscaler(object)
        plt.plot(wave, object, drawstyle='steps-mid', color='r')
        plt.plot(wave, newobject, drawstyle='steps-mid', color='k')
        plt.pause(0.01)
        print('Is this OK?')
        answer = yesno('y')
        if (answer == 'n'):
            angshift = inputter('Enter B-star shift in Angstroms: ', 'float',
                                False)
        else:
            telluric_done = True
    return newobject, bvar, angshift
Example #40
0
def main():
    args = argparser()
    #use the translation table to establish neutral baselines for random selection of mutations from among synonymous and nonsynonymous muation
    alldata = parse_custom(args.input)
    translate = {
        'TTT': 'F',
        'TTC': 'F',
        'TTA': 'L',
        'TTG': 'L',
        'TCT': 'S',
        'TCC': 'S',
        'TCA': 'S',
        'TCG': 'S',
        'TAT': 'Y',
        'TAC': 'Y',
        'TAA': 'Stop',
        'TAG': 'Stop',
        'TGT': 'C',
        'TGC': 'C',
        'TGA': 'Stop',
        'TGG': 'W',
        'CTT': 'L',
        'CTC': 'L',
        'CTA': 'L',
        'CTG': 'L',
        'CCT': 'P',
        'CCC': 'P',
        'CCA': 'P',
        'CCG': 'P',
        'CAT': 'H',
        'CAC': 'H',
        'CAA': 'Q',
        'CAG': 'Q',
        'CGT': 'R',
        'CGC': 'R',
        'CGA': 'R',
        'CGG': 'R',
        'ATT': 'I',
        'ATC': 'I',
        'ATA': 'I',
        'ATG': 'M',
        'ACT': 'T',
        'ACC': 'T',
        'ACA': 'T',
        'ACG': 'T',
        'AAT': 'N',
        'AAC': 'N',
        'AAA': 'K',
        'AAG': 'K',
        'AGT': 'S',
        'AGC': 'S',
        'AGA': 'R',
        'AGG': 'R',
        'GTT': 'V',
        'GTC': 'V',
        'GTA': 'V',
        'GTG': 'V',
        'GCT': 'A',
        'GCC': 'A',
        'GCA': 'A',
        'GCG': 'A',
        'GAT': 'D',
        'GAC': 'D',
        'GAA': 'E',
        'GAG': 'E',
        'GGT': 'G',
        'GGC': 'G',
        'GGA': 'G',
        'GGG': 'G'
    }
    ratios = get_ratios(translate)
    #collect ontology data into a frame.
    ontdf = construct_ont_table(alldata, ratios)
    ontdf = ontdf.dropna()
    for col in ontdf.columns:
        if col != 'Term':
            ontdf['Log' + col] = np.log10(ontdf[col].astype(float))
    genedf = construct_gene_table(alldata, ratios)
    genedf = genedf.dropna()

    for col in genedf.columns:
        if col != 'GeneID' and col != 'Terms':
            genedf['Log' + col] = np.log10(genedf[col].astype(float))

    #insert seaborn calls on the dataframes below.
    sns.scatterplot(x='LogSyn3Rate', y='LogNon12Rate', hue='DnDs', data=ontdf)
    plt.savefig(args.prefix + 'OntologyMutationRates.png')
    plt.clf()
    sns.scatterplot(x='LogSyn3Rate', y='LogNon12Rate', hue='DnDs', data=genedf)
    plt.savefig(args.prefix + "GeneMutationRates.png")
    plt.clf()
    #the above two graphs indicate that synonymous and nonsynonymous rates for each gene, and each ontology, are correlated. They further what proportion of genes fall below or above the DnDs threshold for functional conservation.
    sns.scatterplot(x='LogExonLen', y='LogDnDs', data=ontdf)
    plt.savefig(args.prefix + 'OntologyDnDsVariability.png')
    plt.clf()
    sns.scatterplot(x='LogExonLen', y='LogDnDs', data=genedf)
    plt.savefig(args.prefix + 'GeneDnDsVariability.png')
    plt.clf()
Example #41
0
def create_multiple_timelines_plot(data_dict, procranks_dict, parallelization, path=None, figsize=(12, 8), unordered=False,
                                   cpu=False, title=None, ylabels=None, yaxis=None, add_border=False, show_ranks=True,
                                   label_fontsize=18, title_fontsize=20, ticks_fontsize=12):

    """
    This function ...
    :param data_dict:
    :param procranks_dict:
    :param path:
    :param figsize:
    :param unordered:
    :param cpu:
    :param title:
    :param ylabels:
    :param yaxis:
    :param rpc:
    :param add_border:
    :param show_ranks:
    :param label_fontsize:
    :param title_fontsize:
    :param ticks_fontsize:
    :return:
    """

    # Initialize figure
    fig = plt.figure(figsize=figsize)
    plt.clf()

    ax = plt.gca()
    fig.subplots_adjust(hspace=0.2)

    #   subplot(211)
    #produces a subaxes in a figure which represents the top plot (i.e. the
    #first) in a 2 row by 1 column notional grid

    ntimelines = len(data_dict)

    legend_entries = []
    legend_names = []
    unique_phases = []  # A LIST OF THE UNIQUE PHASE NAMES

    shared_axis = None
    last_axis = None
    for index in range(ntimelines):

        output_path = data_dict.keys()[index]
        data = data_dict[output_path]

        # Add subplot
        subplot_spec = ntimelines * 100 + 10 + index + 1
        ax = plt.subplot(subplot_spec, sharex=shared_axis)

        # Set x axis grid
        ax.xaxis.grid(linestyle="dotted", linewidth=2.0)

        # Determine the number of processes
        procranks = procranks_dict[output_path]
        nprocs = len(procranks)

        durations_list = []
        totaldurations = np.zeros(nprocs)
        patch_handles = []

        # Get the ordering
        if unordered: yticks = np.array(procranks).argsort().argsort()
        else: yticks = procranks

        # Make the timeline plot, consisting of a set of bars of the same color for each simulation phase
        for phase, starttimes, endtimes in data:

            durations = np.array(endtimes) - np.array(starttimes)
            durations_list.append(durations)

            totaldurations += durations

            patch_handle = ax.barh(yticks, durations, color=colors[phase], align='center', left=starttimes, alpha=0.8, lw=0)
            patch_handles.append(patch_handle)

            if index == 0:
                if phase not in unique_phases and not (phase == "comm" and nprocs == 1):
                    unique_phases.append(phase)
                    legend_entries.append(patch_handle)
                    legend_names.append(phase_label_names[phase])

        #plt.plot(t, s1)

        # Set axis limits
        #ax.set_xlim([xmin, xmax])
        ax.set_ylim([-0.5, nprocs-0.5])

        # Hide process ranks
        if not show_ranks:
            ax.set_yticks([])
            ax.set_yticklabels([])

        # Set x label
        if ax.is_last_row(): ax.set_xlabel('Time (s)', fontsize=label_fontsize)

        # Set y label
        ax.set_ylabel("Processes")

        # Set title
        if parallelization[output_path].nprocesses > 1:
            if parallelization[output_path].nthreads > 1:
                subplot_title = "Hybrid task+data parallelization" if parallelization[output_path].data_parallel else "Hybrid task parallelization"
            else:
                subplot_title = "Multiprocessing task+data parallelization" if parallelization[output_path].data_parallel else "Multiprocessing task parallelization"
        else: subplot_title = None
        ax.set_title(subplot_title)

        # Set axes tick formatter
        ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
        ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))

        # Remove border if requested
        if not add_border:

            ax.spines['top'].set_visible(False)
            ax.spines['right'].set_visible(False)
            ax.spines['bottom'].set_visible(False)
            ax.spines['left'].set_visible(False)
            ax.tick_params(axis=u'both', which=u'both', length=0)

        if ax.is_last_row():
            # Set ticks fontsize
            plt.setp(ax.get_xticklabels(), rotation='horizontal', fontsize=ticks_fontsize)
            plt.setp(ax.get_yticklabels(), rotation='horizontal', fontsize=ticks_fontsize)
            last_axis = ax
        else:
            # make these tick labels invisible
            plt.setp(ax.get_xticklabels(), visible=False)

        if index == 0: shared_axis = ax

    # Set the plot title
    if title is not None: plt.suptitle(title, fontsize=title_fontsize)

    # Put a legend below current axis
    legend = last_axis.legend(legend_entries, legend_names, loc='upper center', bbox_to_anchor=(0.5, -0.2), fancybox=False, shadow=False, ncol=4, prop={'size': 12})

    # Change legend properties
    frame = legend.get_frame()
    frame.set_linewidth(0)
    frame.set_facecolor('0.85')
    legend.legendPatch.set_alpha(0.75)

    # Save the figure
    if path is not None: plt.savefig(path, bbox_inches="tight", pad_inches=0.40)
    else: plt.show()
    plt.close()
Example #42
0
def main():
    """Create the network and start the training."""
    model_urls = {'CoarseSN': 'models/DR_CoarseSN/CoarseSN.pth', 'MaskCN': 'models/MaskCN/MaskCN.pth'}

    writer = SummaryWriter('models/' + NAME)

    cudnn.enabled = True

    ############# Create mask-guided classification network.
    MaskCN = Xception_dilation(num_classes=NUM_CLASSES_CLS, input_channel=INPUT_CHANNEL)
    MaskCN.cuda()
    if FP16 is True:
        MaskCN = amp.initialize(MaskCN, opt_level="O1")

    ############# Load pretrained weights
    pretrained_dict = torch.load(model_urls['MaskCN'])
    MaskCN.load_state_dict(pretrained_dict)
    MaskCN.eval()

    ############# Create enhanced segmentation network.
    EnhanceSN = deeplabv3plus_en(num_classes=NUM_CLASSES_SEG)
    optimizer = torch.optim.Adam(EnhanceSN.parameters(), lr=LEARNING_RATE)
    EnhanceSN.cuda()
    if FP16 is True:
        EnhanceSN, optimizer = amp.initialize(EnhanceSN, optimizer, opt_level="O1")
    EnhanceSN = torch.nn.DataParallel(EnhanceSN)

    ############# Load pretrained weights
    pretrained_dict = torch.load(model_urls['CoarseSN'])
    net_dict = EnhanceSN.state_dict()
    pretrained_dict = {k: v for k, v in pretrained_dict.items() if (k in net_dict) and (v.shape == net_dict[k].shape)}
    net_dict.update(pretrained_dict)
    EnhanceSN.load_state_dict(net_dict)
    EnhanceSN.train()
    EnhanceSN.float()

    print(len(net_dict))
    print(len(pretrained_dict))

    DR_loss = loss.Fusin_Dice_rank()

    cudnn.benchmark = True

    ############# Load training and validation data
    data_train_root = 'dataset/seg_data/Training_resize_seg/'
    data_train_root_mask = 'Coarse_masks/Training_EnhancedSN/'
    data_train_list = 'dataset/ISIC/Training_seg.txt'
    trainloader = data.DataLoader(MyDataSet_seg(data_train_root, data_train_list, root_path_coarsemask=data_train_root_mask, crop_size=(w, h)),
                                  batch_size=BATCH_SIZE, shuffle=True, num_workers=8, pin_memory=True)

    data_val_root = 'dataset/seg_data/ISIC-2017_Validation_Data/'
    data_val_root_mask = 'Coarse_masks/Validation_EnhancedSN/'
    data_val_list = 'dataset/ISIC/Validation_seg.txt'
    valloader = data.DataLoader(MyValDataSet_seg(data_val_root, data_val_list, root_path_coarsemask=data_val_root_mask), batch_size=1, shuffle=False,
                                num_workers=8,
                                pin_memory=True)

    ############# Generate CAM for validation data
    val_cams = val_mode_cam(valloader, MaskCN)

    path = 'models/' + NAME
    if not os.path.isdir(path):
        os.mkdir(path)
    f_path = path + 'outputxx.txt'

    val_jac = []

    ############# Start the training
    for epoch in range(EPOCH):

        train_loss_D = []
        train_loss_R = []
        train_loss_total = []
        train_jac = []

        for i_iter, batch in tqdm(enumerate(trainloader)):

            # if i_iter > 50:
            #     continue

            step = (TRAIN_NUM / BATCH_SIZE) * epoch + i_iter

            images, coarsemask, labels, name = batch
            images = images.cuda()
            coarsemask = coarsemask.unsqueeze(1).cuda()
            labels = labels.cuda().squeeze(1)

            with torch.no_grad():
                input_cla = torch.cat((images, coarsemask), dim=1)
                cla_cam = cam(MaskCN, input_cla)

            cla_cam = torch.from_numpy(np.stack(cla_cam)).unsqueeze(1).cuda()

            optimizer.zero_grad()
            lr = adjust_learning_rate(optimizer, step)

            EnhanceSN.train()
            preds = EnhanceSN(images, cla_cam)

            loss_D, loss_R = DR_loss(preds, labels)
            term = loss_D + 0.05 * loss_R

            if FP16 is True:
                with amp.scale_loss(term, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                term.backward()
            optimizer.step()

            writer.add_scalar('learning_rate', lr, step)
            writer.add_scalar('loss', term.cpu().data.numpy(), step)

            train_loss_D.append(loss_D.cpu().data.numpy())
            train_loss_R.append(loss_R.cpu().data.numpy())
            train_loss_total.append(term.cpu().data.numpy())
            train_jac.append(Jaccard(preds, labels))


        print("train_epoch%d: lossTotal=%f, lossDice=%f, lossRank=%f, Jaccard=%f \n" % (
        epoch, np.nanmean(train_loss_total), np.nanmean(train_loss_D), np.nanmean(train_loss_R), np.nanmean(train_jac)))


        ############# Start the validation
        [vacc, vdice, vsen, vspe, vjac_score] = val_mode_seg(valloader, val_cams, EnhanceSN, path, epoch)
        line_val = "val%d: vacc=%f, vdice=%f, vsensitivity=%f, vspecifity=%f, vjac=%f \n" % \
                   (epoch, np.nanmean(vacc), np.nanmean(vdice), np.nanmean(vsen), np.nanmean(vspe),
                    np.nanmean(vjac_score))

        print(line_val)
        f = open(f_path, "a")
        f.write(line_val)

        val_jac.append(np.nanmean(vjac_score))

        ############# Plot val curve
        plt.figure()
        plt.plot(val_jac, label='val jaccard', color='blue', linestyle='--')
        plt.legend(loc='best')

        plt.savefig(os.path.join(path, 'jaccard.png'))
        plt.clf()
        plt.close()
        plt.show()

        plt.close('all')

        writer.add_scalar('val_Jaccard', np.nanmean(vjac_score), epoch)

        ############# Save network
        torch.save(EnhanceSN.state_dict(), path + 'CoarseSN_e' + str(epoch) + '.pth')
Example #43
0
def create_plots(gdf, output_directory, breaks):
    # def find_next(array, value):
    #     array = np.asarray(array)
    #     idx = np.where((array - value) > 0 )[0][0]
    #     return array[idx]

    print('aggregating at the county level')
    t0 = time()
    # handles topology errors
    try:
        county_gdf = gdf[[
            'CountyFips', 'Population', 'CountyName', 'State', 'geometry'
        ]].dissolve(by='CountyFips')
        population_sum = gdf.groupby(by=['CountyFips']).agg(
            {'Population': 'sum'})['Population']
        county_gdf['Population'] = population_sum
        county_gdf['coords'] = county_gdf['geometry'].apply(
            lambda x: x.representative_point().coords[:][0])
    except:
        gdf['geometry'] = gdf.buffer(0)
        county_gdf = gdf[[
            'CountyFips', 'Population', 'CountyName', 'State', 'geometry'
        ]].dissolve(by='CountyFips')
        population_sum = gdf.groupby(by=['CountyFips']).agg(
            {'Population': 'sum'})['Population']
        county_gdf['Population'] = population_sum
        county_gdf['coords'] = county_gdf['geometry'].apply(
            lambda x: x.representative_point().coords[:][0])
    print(time() - t0)

    # print('simplifying geometry')
    # t0 = time()
    # gdf = gdf.drop('Shape', axis=1)
    # gdf['geometry'] = gdf['geometry'].simplify(tolerance=0.00001,preserve_topology=True)
    # print(time() - t0)

    print('patching geometry for faster plotting')
    t0 = time()
    patches, idx = polygons_to_patchcollection(gdf.geometry)
    county_patches, idx_county = polygons_to_patchcollection(
        county_gdf.geometry)
    print(time() - t0)
    t0 = time()

    print('sorting...')
    sorted_gdf = county_gdf.sort_values(by=['Population'], ascending=False)
    poly = copy(patches)
    boundaries = copy(county_patches)
    print(time() - t0)

    print('plotting econloss')
    t0 = time()
    fig = plt.figure(figsize=(2.74, 2.46), dpi=600)
    ax = fig.gca()
    color_vals = [gdf.iloc[[x]]['EconLoss'][0] for x in idx]
    color_array = pd.cut(color_vals,
                         bins=(list(breaks)),
                         labels=[x[0] + 1
                                 for x in enumerate(list(breaks))][0:-1])
    color_array = pd.Series(pd.to_numeric(color_array)).fillna(0)
    poly.set(array=color_array, cmap='Reds')
    ax.add_collection(poly)
    boundaries.set(facecolor='None',
                   edgecolor='#303030',
                   linewidth=0.3,
                   alpha=0.5)
    ax.add_collection(boundaries)
    ax.margins(x=0, y=0.1)
    ax.axis('off')
    ax.axis('scaled')
    annotated = []
    for row in range(len(sorted_gdf)):
        name = sorted_gdf.iloc[[row]]['CountyName'][0]
        if (name not in annotated) and (len(annotated) < 5):
            coords = sorted_gdf.iloc[[row]]['coords'][0]
            plt.annotate(s=name,
                         xy=coords,
                         horizontalalignment='center',
                         size=4,
                         color='white',
                         path_effects=[
                             pe.withStroke(linewidth=1, foreground='#404040')
                         ])
            annotated.append(name)
    fig.tight_layout(pad=0, h_pad=None, w_pad=None, rect=None)
    fig.savefig(output_directory + '/' + 'econloss.png', pad_inches=0)
    print(time() - t0)

    # clearing the figure
    fig.clf()
    plt.clf()
    poly = copy(patches)
    boundaries = copy(county_patches)

    print('plotting PGA')
    t0 = time()

    hazard_colors = {
        '0': {
            'lowValue': 0.0,
            'highValue': 0,
            'color': '#ffffff'
        },
        '1': {
            'lowValue': 0.0,
            'highValue': 0.0017,
            'color': '#dfe6fe'
        },
        '2': {
            'lowValue': 0.0017,
            'highValue': 0.0078,
            'color': '#dfe6fe'
        },
        '3': {
            'lowValue': 0.0078,
            'highValue': 0.014,
            'color': '#82f9fb'
        },
        '4': {
            'lowValue': 0.014,
            'highValue': 0.039,
            'color': '#7efbdf'
        },
        '5': {
            'lowValue': 0.039,
            'highValue': 0.092,
            'color': '#95f879'
        },
        '6': {
            'lowValue': 0.092,
            'highValue': 0.18,
            'color': '#f7f835'
        },
        '7': {
            'lowValue': 0.18,
            'highValue': 0.34,
            'color': '#fdca2c'
        },
        '8': {
            'lowValue': 0.34,
            'highValue': 0.65,
            'color': '#ff701f'
        },
        '9': {
            'lowValue': 0.65,
            'highValue': 1.24,
            'color': '#ec2516'
        },
        '10': {
            'lowValue': 1.24,
            'highValue': 2,
            'color': '#c81e11'
        }
    }

    breaks = [hazard_colors[x]['highValue'] for x in hazard_colors][1:]
    color_vals = [gdf.iloc[[x]]['PGA'][0] for x in idx]
    color_indicies = pd.cut(color_vals,
                            bins=([0] + list(breaks)),
                            labels=[x[0] + 1 for x in enumerate(list(breaks))])
    color_indicies = pd.Series(
        pd.to_numeric(color_indicies)).fillna(0).astype(int)
    hex_array = [hazard_colors[str(x)]['color'] for x in color_indicies]

    fig = plt.figure(figsize=(2.74, 2.46), dpi=600)
    ax = fig.gca()
    ax.add_collection(poly)
    poly.set_color(hex_array)
    boundaries.set(facecolor='None',
                   edgecolor='#303030',
                   linewidth=0.3,
                   alpha=0.5)
    ax.add_collection(boundaries)
    ax.margins(x=0, y=0.1)
    ax.axis('off')
    ax.axis('scaled')
    annotated = []
    for row in range(len(sorted_gdf)):
        name = sorted_gdf.iloc[[row]]['CountyName'][0]
        if (name not in annotated) and (len(annotated) < 5):
            coords = sorted_gdf.iloc[[row]]['coords'][0]
            plt.annotate(s=name,
                         xy=coords,
                         horizontalalignment='center',
                         size=4,
                         color='white',
                         path_effects=[
                             pe.withStroke(linewidth=1, foreground='#404040')
                         ])
            annotated.append(name)
    fig.tight_layout(pad=0, h_pad=None, w_pad=None, rect=None)
    fig.savefig(output_directory + '/' + 'extent.png', pad_inches=0)
    print(time() - t0)
Example #44
0
    def animate(self):
        # show or save a sequence of images demonstrating how the plan would unfold
        from matplotlib.patches import Polygon

        fig = plt.figure()
        ax  = fig.add_subplot(111)

        GREEN     = ( 0.0 , 1.0 , 0.0 , 0.3)
        BLUE      = ( 0.0 , 0.0 , 1.0 , 0.3)
        RED       = ( 1.0 , 0.0 , 0.0 , 0.5)
        INVISIBLE = ( 0.0 , 0.0 , 0.0 , 0.0 )

        portals = np.array([self.a.node[i]['xy'] for i in self.a.nodes_iter()]).T
        
        # Plot all edges lightly
        def dashAllEdges():
            for p,q in self.a.edges_iter():
                plt.plot(portals[0,[p,q]],portals[1,[p,q]],'k:')

        aptotal = 0

        edges   = []
        patches = []

        plt.plot(portals[0],portals[1],'go')
#        plt.plot(portals[0],portals[1],'bo')

        dashAllEdges()

        plt.title('AP:\n%s'%commaGroup(aptotal),ha='center')
        plt.axis('off')
        plt.savefig(self.outputDir+'frame_-1.png'.format(i))
        plt.clf()

        for i in xrange(self.m):
            p,q = self.orderedEdges[i]
#            print p,q,self.a.edge[p][q]['fields']

            plt.plot(portals[0],portals[1],'go')
#            plt.plot(portals[0],portals[1],'bo')

            # Plot all edges lightly
            dashAllEdges()

            for edge in edges:
                plt.plot(edge[0],edge[1],'g-')
#                plt.plot(edge[0],edge[1],'b-')

            # We'll display the new fields in red
            newPatches = []
            for tri in self.a.edge[p][q]['fields']:
#                print 'edge has a field'
                coords = np.array([ self.a.node[v]['xy'] for v in tri ])
                newPatches.append(Polygon(shrink(coords.T).T,facecolor=RED,\
                                                 edgecolor=INVISIBLE))
#                newPatches.append(Polygon(shrink(coords.T).T,facecolor=GREEN,\
#                                                 edgecolor=INVISIBLE))
#            print '%s new patches'%len(newPatches)
            
            aptotal += 313+1250*len(newPatches)

            newEdge = np.array([self.a.node[p]['xy'],self.a.node[q]['xy']]).T

            patches += newPatches
            edges.append(newEdge)

           # plt.arrow( x, y, dx, dy, **kwargs )
#            plt.arrow(              newEdge[0,0],\
#                                    newEdge[1,0],\
#                       newEdge[0,1]-newEdge[0,0],\
#                       newEdge[1,1]-newEdge[1,0],\
#                       fc="k", ec="k")#,head_width=0.0005,head_length=0.001 )
            
            plt.plot(newEdge[0],newEdge[1],'k-',lw=2)
#            plt.plot(newEdge[0],newEdge[1],'g-')

            ax = plt.gca()
#            print 'adding %s patches'%len(patches)
            for patch in patches:
                ax.add_patch(patch)

            ax.set_title('AP:\n%s'%commaGroup(aptotal),ha='center')
            ax.axis('off')
            plt.savefig(self.outputDir+'frame_{0:02d}.png'.format(i))
            ax.cla()

            for patch in newPatches:
                patch.set_facecolor(GREEN)
#                patch.set_facecolor(BLUE)

        plt.plot(portals[0],portals[1],'go')
#        plt.plot(portals[0],portals[1],'bo')
        for edge in edges:
            plt.plot(edge[0],edge[1],'g-')
#            plt.plot(edge[0],edge[1],'b-')
        for patch in patches:
            ax.add_patch(patch)

        ax.set_title('AP:\n%s'%commaGroup(aptotal),ha='center')
        ax.axis('off')
        plt.savefig(self.outputDir+'frame_%s.png'%self.m)
        ax.cla()
def plotdecodeimages():

    dataset = FLAGS.train_file.split('/')[-1]
    dataset = dataset.split('.')[0]

    s = FLAGS.init_checkpoint.split('/')[-1]
    name = (s.split('_')[1]).split('.ckpt')[0]

    name = '{}_{}_{}_{}'.format(FLAGS.model, dataset, 'AcousticMapJet', name)
    data_dir = str.join('/', FLAGS.init_checkpoint.split('/')[:-1] + [name])

    random_pick = True
    build_spectrogram = True
    normalize = False

    # Create data loaders according to the received program arguments
    print('{} - Creating data loaders'.format(datetime.now()))
    modalities = []

    modalities.append(0)
    modalities.append(1)
    modalities.append(2)

    with tf.device('/cpu:0'):
        if FLAGS.datatype == 'old':
            train_data = ActionsDataLoader(FLAGS.train_file, 'testing', batch_size=FLAGS.batch_size, num_epochs=1, sample_length=1,
                                          datakind=FLAGS.datatype, buffer_size=10, shuffle=False, embedding=1,
                                           normalize=normalize, build_spectrogram=build_spectrogram, correspondence=0,
                                           random_pick=random_pick, modalities=modalities, nr_frames=1)
        elif FLAGS.datatype == 'outdoor':
            train_data = SoundDataLoader(FLAGS.train_file, 'testing', batch_size=FLAGS.batch_size, num_epochs=1, sample_length=1,
                                          datakind=FLAGS.datatype, buffer_size=10, shuffle=False, embedding=1,
                                           normalize=normalize, build_spectrogram=build_spectrogram, correspondence=0,
                                           random_pick=random_pick, modalities=modalities, nr_frames=1)

    # Build model
    print('{} - Building model'.format(datetime.now()))

    with tf.device('/gpu:0'):

        model = UNetAc(input_shape=[36, 48, 12])
        model_video = ResNet50Model(input_shape=[224, 298, 3], num_classes=None)

    handle = tf.placeholder(tf.string, shape=())
    iterator = tf.data.Iterator.from_string_handle(handle, train_data.data.output_types,
                                                   train_data.data.output_shapes)
    train_iterat = train_data.data.make_initializable_iterator()
    next_batch = iterator.get_next()

    mfcc = tf.reshape(next_batch[1], shape=[-1, 12])
    images = tf.reshape(next_batch[2], shape=[-1, 224, 298, 3])
    acoustic = tf.reshape(next_batch[0], shape=[-1, 36, 48, 12])

    # mfcc = mfcc - tf.reduce_min(mfcc, axis=[1], keep_dims=True)
    # mfcc = mfcc / tf.reduce_max(mfcc, axis=[1], keep_dims=True)

    mfccmap = tf.reshape(mfcc, (-1, 1, 12))
    mfccmap = tf.tile(mfccmap, (1, 36 * 48, 1))
    mfccmap = tf.reshape(mfccmap, (-1, 36, 48, 12))
    model_video._build_model(images)
    model._build_model(mfccmap, model_video.output)

    output = model.output
    var_list1 = slim.get_variables(model_video.scope + '/')
    var_list2 = slim.get_variables(model.scope + '/')
    var_list = var_list2 + var_list1

    if os.path.exists(data_dir):
        print("Features already computed!")
    else:
        os.makedirs(data_dir)  # mkdir creates one directory, makedirs all intermediate directories

    total_size = 0
    batch_count = 0
    num = 0
    print('{} - Starting'.format(datetime.now()))

    namesimage = ['Acoustic image', 'Reconstructed']

    with tf.Session(
            config=tf.ConfigProto(allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True))) as session:
        train_handle = session.run(train_iterat.string_handle())
        # Initialize student model
        if FLAGS.init_checkpoint is None:
            print('{} - Initializing student model'.format(datetime.now()))
            model.init_model(session, FLAGS.init_checkpoint)
            print('{} - Done'.format(datetime.now()))
        else:
            print('{} - Restoring student model'.format(datetime.now()))
            saver = tf.train.Saver(var_list=var_list)
            saver.restore(session, FLAGS.init_checkpoint)
            print('{} - Done'.format(datetime.now()))
            #variables_in_checkpoint = tf.train.list_variables('path.ckpt')
        session.run(train_iterat.initializer)
        while True:
            try:
                data, reconstructed, im = session.run(
                    [acoustic, output, images],
                    feed_dict={handle: train_handle,
                               model.network['keep_prob']: 1.0,
                               model.network['is_training']: 0,
                               model_video.network['keep_prob']: 1.0,
                               model_video.network['is_training']: 0
                               })
                total_size += reconstructed.shape[0]

                for h in range(np.shape(reconstructed)[0]):
                    # original and reconstructed
                    fig, axs = plt.subplots(1, 2, figsize=(6, 2.9))
                    plt.tight_layout(pad=1.0)
                    imagesvideo = np.stack((data, reconstructed), 0)
                    for i in range(2):
                        x = 0
                        y = i
                        imgray = cv2.cvtColor(im[h], cv2.COLOR_BGR2GRAY)
                        axs[y].imshow(imgray, cmap=plt.cm.gray)
                        map = find_logen(imagesvideo[i, h])
                        map = cv2.resize(map, (298, 224))
                        axs[y].imshow(map, cmap=plt.cm.jet, alpha=0.7)
                        axs[y].axis('off')
                        axs[y].set_title('{}'.format(namesimage[i]))
                    outImage_path = '{}/{}_images_{}.png'.format(data_dir, dataset, num)
                    plt.savefig(outImage_path)
                    plt.clf()
                    num = num + 1
                print(total_size)
            except tf.errors.OutOfRangeError:
                break
            batch_count += 1
            print('{} - Completed, got {} samples'.format(datetime.now(), total_size))
Example #46
0
def create_timeline_plot(data, procranks, path=None, figsize=(12, 8), percentages=False, totals=False, unordered=False,
                         cpu=False, title=None, ylabels=None, yaxis=None, rpc="r", add_border=False, show_ranks=True,
                         label_fontsize=18, title_fontsize=20, ticks_fontsize=12):

    """
    This function actually plots the timeline based on a data structure containing the starttimes and endtimes
    for the different simulation phases
    :param data:
    :param path:
    :param procranks:
    :param figsize:
    :param percentages:
    :param totals:
    :param unordered:
    :param cpu:
    :param title:
    :param ylabels:
    :param yaxis:
    :param rpc: 'rank', 'processes' or 'cores'
    :param add_border:
    :param show_ranks:
    :param label_fontsize:
    :param title_fontsize:
    :param ticks_fontsize:
    :return:
    """

    # Initialize figure
    plt.figure(figsize=figsize)
    plt.clf()

    ax = plt.gca()

    # Set x axis grid
    ax.xaxis.grid(linestyle="dotted", linewidth=2.0)

    legend_entries = []
    legend_names = []
    unique_phases = []   # A LIST OF THE UNIQUE PHASE NAMES

    # Determine the number of processes
    nprocs = len(procranks)

    # Get the ordering
    if unordered: yticks = np.array(procranks).argsort().argsort()
    else: yticks = procranks

    durations_list = []
    totaldurations = np.zeros(nprocs)
    patch_handles = []

    # Make the timeline plot, consisting of a set of bars of the same color for each simulation phase
    for phase, starttimes, endtimes in data:

        durations = np.array(endtimes) - np.array(starttimes)
        durations_list.append(durations)

        totaldurations += durations

        patch_handle = ax.barh(yticks, durations, color=colors[phase], align='center', left=starttimes, alpha=0.8, lw=0)
        patch_handles.append(patch_handle)

        if phase not in unique_phases and not (phase == "comm" and nprocs == 1):

            unique_phases.append(phase)
            legend_entries.append(patch_handle)
            legend_names.append(phase_label_names[phase])

    if percentages:

        # For the different phases
        for phase, patch_handle in enumerate(patch_handles):

            durations = durations_list[phase]

            for sorting_number, rectangle in enumerate(patch_handle.get_children()):

                duration = durations[sorting_number]
                percentage = float(duration) / float(totaldurations[sorting_number]) * 100.0

                x = 0.5 * rectangle.get_width() + rectangle.get_x()
                y = 0.5 * rectangle.get_height() + rectangle.get_y()

                if rectangle.get_width() > 2000:

                    plt.text(x, y, "%d%%" % percentage, ha='center', va='center', fontsize=10)

    if totals:

        for sorting_number, rectangle in enumerate(patch_handles[-1].get_children()):

            width = rectangle.get_width()
            label_text = str(int(totaldurations[sorting_number]))
            plt.text(rectangle.get_x() + width + 0.02*rectangle.get_x(), rectangle.get_y() + rectangle.get_height() / 2., label_text, ha="left", va="center", fontsize=10)

    if unordered:

        #print("YTICKS", yticks)
        #print("PROCRANKS", procranks)
        #plt.yticks(yticks, procranks)
        if rpc == 'r':
            if show_ranks:
                ax.set_yticks(yticks)
                ax.set_yticklabels(procranks)
            else:
                ax.set_yticks([])
                ax.set_yticklabels([])
        else:
            ax.set_yticks(yticks)
            ax.set_yticklabels(procranks)
    else:

        if rpc == 'r':
            if show_ranks:
                ax.set_yticks(procranks)
                ax.set_yticklabels(procranks)
            else:
                ax.set_yticks([])
                ax.set_yticklabels([])
        else:
            ax.set_yticks(procranks)
            ax.set_yticklabels(procranks)

    ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
    ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))

    if not add_border:
        ax.spines['top'].set_visible(False)
        ax.spines['right'].set_visible(False)
        ax.spines['bottom'].set_visible(False)
        ax.spines['left'].set_visible(False)
        ax.tick_params(axis=u'both', which=u'both', length=0)

    # Format the axis ticks and labels
    if cpu: ax.set_xlabel('CPU time (s)', fontsize=label_fontsize)
    else: ax.set_xlabel('Time (s)', fontsize=label_fontsize)

    # Set y label
    if rpc == 'r':
        if show_ranks: ax.set_ylabel('Process rank', fontsize=label_fontsize)
        else: ax.set_ylabel("Processes", fontsize=label_fontsize)
    elif rpc == 'p': ax.set_ylabel('Number of processes', fontsize=label_fontsize)
    elif rpc == 'c': ax.set_ylabel('Number of cores', fontsize=label_fontsize)

    #ax.yaxis.grid(True)

    # Custom y labels
    if ylabels is not None:
        plt.yticks(yticks, ylabels)
        ax.set_ylabel("")

    # Custom y axis label
    if yaxis is not None: ax.set_ylabel(yaxis)

    # Set ticks fontsize
    plt.setp(ax.get_xticklabels(), rotation='horizontal', fontsize=ticks_fontsize)
    plt.setp(ax.get_yticklabels(), rotation='horizontal', fontsize=ticks_fontsize)

    if nprocs == 1:

        ax.set_frame_on(False)
        fig = plt.gcf()
        fig.set_size_inches(10,2)
        ax.xaxis.tick_bottom()
        ax.yaxis.set_visible(False)

    # Shrink current axis's height by 20% on the bottom
    box = ax.get_position()
    ax.set_position([box.x0, box.y0 + box.height * 0.2, box.width, box.height * 0.8])

    # Set the plot title
    #if title is None: plt.title("Timeline of the different simulation phases")
    #else: plt.title(title)
    if title is not None: plt.suptitle(title, fontsize=title_fontsize)

    # Put a legend below current axis
    legend = ax.legend(legend_entries, legend_names, loc='upper center', bbox_to_anchor=(0.5, -0.2), fancybox=False, shadow=False, ncol=4, prop={'size': 12})

    # Change legend properties
    frame = legend.get_frame()
    frame.set_linewidth(0)
    frame.set_facecolor('0.85')
    legend.legendPatch.set_alpha(0.75)

    # Save the figure
    if path is not None: plt.savefig(path, bbox_inches="tight", pad_inches=0.40)
    else: plt.show()
    plt.close()
Example #47
0
def demonsReg(source,
              target,
              sigma_elastic=1,
              sigma_fluid=1,
              num_lev=3,
              use_composition=False,
              use_target_grad=False,
              max_it=1000,
              check_MSD=True,
              disp_freq=5,
              disp_spacing=2,
              scale_update_for_display=10,
              disp_method_df='grid',
              disp_method_up='arrows'):
    """
  Perform a registration between the 2D source image and the 2D target
  image using the demons algorithm. The source image is warped (resampled)
  into the space of the target image.
  
  The final warped image and deformation field can be returned as outputs
  from the function.
    
  The values of sigma_elastic and sigma_fluid can optionally be provided
  to specify the amount of elastic and fluid regularistion to apply. these
  values specify the standard deviation of the Gaussian used to smooth the
  update (fluid) or displacement field (elastic). a value of 0 means no
  smoothing is applied. default values are:
  sigma_elastic = 1
  sigma_fluid = 1
    The registration uses a multi-resolution scheme. The num_lev parameter
  can be used to specify the number of resolution levels to use. The
  default number of levels to use is 3.
   The demons registration can be performed by either adding (classical
  demons) or composing (diffeomorphic demons) the updates at each
  iteration. Set the use_composition parameter to true to compose the
  updates at each iteration. The default value for use_composition is
  false, i.e. the updates will be added by default.
    
   There are a number of other parameters affecting the registration or
  how the results are displayed, which are explained below. These can be
  speficied using name-value pair arguments, e.g.:
  demonsReg(..., 'max_it', 500)
  The default values are given after the parameter name
    use_target_grad = false
        logical (true/false) value indicating whether the target image
        gradient or source image gradient is used when calculating the
        demons forces.
    max_it = 1000
        the maximum number of iterations to perform.
    check_MSD = true
        logical value indicating if the Mean Squared Difference (MSD)
        should be checked for improvement at each iteration. If true, the
        MSD will be evaluated at each iteration, and if there is no
        improvement since the previous iteration the registration will move
        to the next resolution level or finish if it is on the final level.
    disp_freq = 5
        the frequency with which to update the displayed images. the images
        will be updated every disp_freq iterations. If disp_freq is set to
        0 the images will not be displayed during the registration
    disp_spacing = 2
        the spacing between the grid lines or arrows when displaying the
        deformation field and update.
    scale_update_for_display = 10
        the factor used to scale the update field for displaying
    disp_method_df = 'grid'
        the display method for the deformation field.
        can be 'grid' or 'arrows'
    disp_method_up = 'arrows'
        the display method for the update. can be 'grid' or 'arrows'
  """

    # make copies of full resolution images
    source_full = source
    target_full = target

    # loop over resolution levels
    for lev in range(1, num_lev + 1):

        # resample images if not final level
        if lev != num_lev:
            resamp_factor = np.power(2, num_lev - lev)
            target = rescale(target_full,
                             1.0 / resamp_factor,
                             mode='edge',
                             order=3,
                             anti_aliasing=True)
            source = rescale(source_full,
                             1.0 / resamp_factor,
                             mode='edge',
                             order=3,
                             anti_aliasing=True)
        else:
            target = target_full
            source = source_full

        # if first level initialise def_field and disp_field
        if lev == 1:
            [X, Y] = np.mgrid[0:target.shape[0], 0:target.shape[1]]
            def_field = np.zeros((X.shape[0], X.shape[1], 2))
            def_field[:, :, 0] = X
            def_field[:, :, 1] = Y
            disp_field_x = np.zeros(target.shape)
            disp_field_y = np.zeros(target.shape)
        else:
            # otherwise upsample disp_field from previous level
            disp_field_x = 2 * resize(
                disp_field_x, target.shape, mode='edge', order=3)
            disp_field_y = 2 * resize(
                disp_field_y, target.shape, mode='edge', order=3)
            # recalculate def_field for this level from disp_field
            X, Y = np.mgrid[0:target.shape[0], 0:target.shape[1]]
            def_field = np.zeros((X.shape[0], X.shape[1],
                                  2))  # clear def_field from previous level
            def_field[:, :, 0] = X + disp_field_x
            def_field[:, :, 1] = Y + disp_field_y

        #initialise updates
        update_x = np.zeros(target.shape)
        update_y = np.zeros(target.shape)
        update_def_field = np.zeros(def_field.shape)

        # calculate the transformed image at the start of this level
        warped_image = resampImageWithDefField(source, def_field)

        # store the current def_field and MSD value to check for improvements at
        # end of iteration
        def_field_prev = def_field.copy()
        prev_MSD = calcMSD(target, warped_image)

        # pre-calculate the image gradients. only one of source or target
        # gradients needs to be calculated, as indicated by use_target_grad
        if use_target_grad:
            [target_gradient_x, target_gradient_y] = np.gradient(target)
        else:
            [source_gradient_x, source_gradient_y] = np.gradient(source)

        if disp_freq > 0:
            # DISPLAY RESULTS
            # figure 1 - source image (does not change during registration)
            # figure 2 - target image (does not change during registration)
            # figure 3 - source image transformed by current deformation field
            # figure 4 - deformation field
            # figure 5 - update
            plt.figure(1)
            plt.clf()
            dispImage(source)
            plt.pause(0.05)
            plt.figure(2)
            plt.clf()
            dispImage(target)
            plt.pause(0.05)
            plt.figure(3)
            plt.clf()
            dispImage(warped_image)
            x_lims = plt.xlim()
            y_lims = plt.ylim()
            plt.pause(0.05)
            plt.figure(4)
            plt.clf()
            dispDefField(def_field,
                         spacing=disp_spacing,
                         plot_type=disp_method_df)
            plt.xlim(x_lims)
            plt.ylim(y_lims)
            plt.pause(0.05)
            plt.figure(5)
            plt.clf()
            up_field_to_display = scale_update_for_display * np.dstack(
                (update_x, update_y))
            up_field_to_display += np.dstack((X, Y))
            dispDefField(up_field_to_display,
                         spacing=disp_spacing,
                         plot_type=disp_method_up)
            plt.xlim(x_lims)
            plt.ylim(y_lims)
            plt.pause(0.05)

            # if first level pause so user can position figure
            if lev == 1:
                input(
                    'position the figures as desired and then push enter to run the registration'
                )

        # main iterative loop - repeat until max number of iterations reached
        for it in range(max_it):

            # calculate update from demons forces
            #
            # if using target image graident use as is
            if use_target_grad:
                img_grad_x = target_gradient_x
                img_grad_y = target_gradient_y
            else:
                # but if using source image gradient need to transform with
                # current deformation field
                img_grad_x = resampImageWithDefField(source_gradient_x,
                                                     def_field)
                img_grad_y = resampImageWithDefField(source_gradient_y,
                                                     def_field)

            # calculate difference image
            diff = target - warped_image
            # calculate denominator of demons forces
            denom = np.power(img_grad_x, 2) + np.power(img_grad_y,
                                                       2) + np.power(diff, 2)
            # calculate x and y components of numerator of demons forces
            numer_x = diff * img_grad_x
            numer_y = diff * img_grad_y
            # calculate the x and y components of the update
            update_x = numer_x / denom
            update_y = numer_y / denom
            # set nan values to 0
            update_x[np.isnan(update_x)] = 0
            update_y[np.isnan(update_y)] = 0

            # if fluid like regularisation used smooth the update
            if sigma_fluid > 0:
                update_x = gaussian_filter(update_x,
                                           sigma_fluid,
                                           mode='nearest')
                update_y = gaussian_filter(update_y,
                                           sigma_fluid,
                                           mode='nearest')

            # update displacement field using addition (original demons) or
            # composition (diffeomorphic demons)
            if use_composition:
                # compose update with current transformation - this is done by
                # transforming (resampling) the current transformation using the
                # update. we can use the same function as used for resampling
                # images, and treat each component of the current deformation
                # field as an image
                # the update is a displacement field, but to resample an image
                # we need a deformation field, so need to calculate deformation
                # field corresponding to update.
                update_def_field[:, :, 0] = update_x + X
                update_def_field[:, :, 1] = update_y + Y
                # use this to resample the current deformation field, storing
                # the result in the same variable, i.e. we overwrite/update the
                # current deformation field with the composed transformation
                # scipy interpn function rather than resampImageWithDefField so that
                # values outside the def field are calculated via extrapolation rather
                # than being set to NaN
                x_coords = np.arange(def_field.shape[0], dtype='float')
                y_coords = np.arange(def_field.shape[1], dtype='float')
                def_field[:, :, 0] = interpn((x_coords, y_coords),
                                             def_field[:, :, 0],
                                             update_def_field,
                                             fill_value=None,
                                             bounds_error=False,
                                             method='linear')
                def_field[:, :, 1] = interpn((x_coords, y_coords),
                                             def_field[:, :, 1],
                                             update_def_field,
                                             fill_value=None,
                                             bounds_error=False,
                                             method='linear')
                # calculate the displacement field from the composed deformation field
                disp_field_x = def_field[:, :, 0] - X
                disp_field_y = def_field[:, :, 1] - Y
            else:
                # add the update to the current displacement field
                disp_field_x = disp_field_x + update_x
                disp_field_y = disp_field_y + update_y

            # if elastic like regularisation used smooth the displacement field
            if sigma_elastic > 0:
                disp_field_x = gaussian_filter(disp_field_x,
                                               sigma_elastic,
                                               mode='nearest')
                disp_field_y = gaussian_filter(disp_field_y,
                                               sigma_elastic,
                                               mode='nearest')

            # update deformation field from disp field
            def_field[:, :, 0] = disp_field_x + X
            def_field[:, :, 1] = disp_field_y + Y

            # transform the image using the updated deformation field
            warped_image = resampImageWithDefField(source, def_field)

            # update images if required for this iteration
            if disp_freq > 0 and it % disp_freq == 0:
                plt.figure(3)
                dispImage(warped_image)
                plt.pause(0.05)
                plt.figure(4)
                plt.clf()
                dispDefField(def_field,
                             spacing=disp_spacing,
                             plot_type=disp_method_df)
                plt.xlim(x_lims)
                plt.ylim(y_lims)
                plt.pause(0.05)
                plt.figure(5)
                plt.clf()
                up_field_to_display = scale_update_for_display * np.dstack(
                    (update_x, update_y))
                up_field_to_display += np.dstack((X, Y))
                dispDefField(up_field_to_display,
                             spacing=disp_spacing,
                             plot_type=disp_method_up)
                plt.xlim(x_lims)
                plt.ylim(y_lims)
                plt.pause(0.05)

            # calculate MSD between target and warped image
            MSD = calcMSD(target, warped_image)

            # display numerical results
            print('Level {0:d}, Iteration {1:d}: MSD = {2:f}\n'.format(
                lev, it, MSD))

            # check for improvement in MSD if required
            if check_MSD and MSD >= prev_MSD:
                # restore previous results and finish level
                def_field = def_field_prev
                warped_image = resampImageWithDefField(source, def_field)
                print('No improvement in MSD')
                break

            # update previous values of def_field and MSD
            def_field_prev = def_field.copy()
            prev_MSD = MSD.copy()

    # display the final results
    if disp_freq > 0:
        plt.figure(3)
        dispImage(warped_image)
        plt.figure(4)
        plt.clf()
        dispDefField(def_field, spacing=disp_spacing, plot_type=disp_method_df)
        plt.xlim(x_lims)
        plt.ylim(y_lims)
        plt.figure(5)
        plt.clf()
        up_field_to_display = scale_update_for_display * np.dstack(
            (update_x, update_y))
        up_field_to_display += np.dstack((X, Y))
        dispDefField(up_field_to_display,
                     spacing=disp_spacing,
                     plot_type=disp_method_up)
        plt.xlim(x_lims)
        plt.ylim(y_lims)

    # return the transformed image and the deformation field
    return warped_image, def_field
Example #48
0
for idx_algoname, algoname in enumerate(all_algoname):
    print("Algo name: " + str(algoname))
    
    all_nbparticles= numpy.sort(res[(res.algoname == algoname)].nbparticles.unique())
    
    for idx_nbparticles, nbparticles in enumerate(all_nbparticles):
        print("Nb particles: " + str(nbparticles))
        
        reference = res[(res.algoname == algoname) & (res.nbparticles == nbparticles) & (res.nbthreads == 1)].exectime.unique()
        print("reference: " + str(reference))
        
        all_nbthreads = res[(res.algoname == algoname) & (res.nbparticles == nbparticles)].nbthreads
        all_timings = res[(res.algoname == algoname) & (res.nbparticles == nbparticles)].exectime
        all_eff = reference/(all_timings*all_nbthreads)
        
        ax.plot(all_nbthreads, all_eff, marker=markers[idx_nbparticles], color=colors[idx_algoname], label=str(algoname).replace('TbfSmSpetabaruAlgorithm','SPETABARU').replace('TbfOpenmpAlgorithm','OpenMP 4.5') + ' - N = ' + str(nbparticles).replace('10000000','10M').replace('1000000','1M'))

ax.set_ylabel('Parallel efficiency')
ax.set_xlabel('Number of threads')
pyplot.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
           ncol=2, mode="expand", borderaxespad=0.)

for fex in file_extension:
    fig.savefig(csv_file + fex, bbox_inches='tight')
pyplot.clf()




Example #49
0
def Check_Interpolant():
    a = 0.9999
    EpsFun_approximate = Extrapolate(a)
    Eps_97,r_97,Eps_998,r_998,Eps_999999999,r_999999999,r_a4,Eps_a4,r_a09995 ,Eps_a09995, r_a5,Eps_a5 = EpsRelationship()  
    
    EpsFun_exact = interpolate.interp1d(r_998,Eps_998,kind = 'cubic')
    
    r = np.arange(r_998[0],2,0.0001)
    EpsFun_approx_val = EpsFun_approximate(r)
    
    EpsFun_exact_val = EpsFun_exact(r)
    
    plt.plot(r,EpsFun_approx_val,'r--',label = 'approx interpolant')
    plt.plot(r,EpsFun_exact_val,'b',label = 'exact interpolant for a = 0.998')
    plt.xlabel(r'$\tilde{r}$')
    plt.ylabel(r'$\dot{\mathcal{E}}$')
    plt.legend()
    plt.show()
    plt.clf()
    
    error = EpsFun_exact_val/EpsFun_approx_val
    
    plt.plot(r,error)
    plt.show()
    plt.clf()
    
    # =============================================================================
    #     Now check with a larger spin of a = 0.9999
    # =============================================================================
    
    r_a4[0] = risco(0.9999)
    EpsFun_a4 = interpolate.interp1d(r_a4,Eps_a4,kind = 'cubic')
    
    
    r = np.arange(r_a4[0],2,0.0001)
    EpsFun_a4_val = EpsFun_a4(r)
    
    plt.plot(r,EpsFun_a4_val,'k--',label = 'exact a = 0.9999')
    plt.plot(r,EpsFun_approximate(r),'darkviolet',label = 'approximate')
    plt.xlabel(r'$\tilde{r}$')
    plt.ylabel(r'$\dot{\mathcal{E}}$')
    plt.legend()
    plt.show()
    plt.clf()
    
    error = EpsFun_a4_val / EpsFun_approximate(r)
    
    plt.plot(r,error)
    plt.title('Error in a = 0.9999')
    plt.xlabel(r'$\tilde{r}$')
    plt.ylabel(r'Fractional Error')
    plt.show()
    plt.clf()
    
    # =============================================================================
    #     Now check with a spin of a = 0.9995 - pray
    # =============================================================================
    
    r_a09995[0] = risco(0.9995)
    EpsFun_a09995 = interpolate.interp1d(r_a09995,Eps_a09995,kind = 'cubic')
    
    
    r = np.arange(r_a09995[0],2,0.0001)
    EpsFun_a09995_val = EpsFun_a09995(r)
    
    plt.plot(r,EpsFun_a09995_val,'k--',label = 'exact a = 0.9995')
    plt.plot(r,EpsFun_approximate(r),'darkviolet',label = 'approximate')
    plt.xlabel(r'$\tilde{r}$')
    plt.ylabel(r'$\dot{\mathcal{E}}$')
    plt.legend()
    plt.show()
    plt.clf()
    
    error = EpsFun_a09995_val / EpsFun_approximate(r)
    
    plt.plot(r,error)
    plt.title('Error in a = 0.9999')
    plt.xlabel(r'$\tilde{r}$')
    plt.ylabel(r'Fractional Error')
    plt.show()
    plt.clf()
    # =============================================================================
    #     Now check with a spin of a = 0.99
    # =============================================================================
    Eps99 = [0.4148,0.4154,0.4160,0.4177,0.4207,0.4263,0.4434,0.4701,0.5182,
             0.5587,0.5930,0.6665,0.7117,0.7556,0.7813,0.8121,0.8320,0.8469,
             0.8589,0.8689,0.8774,0.8847]
        
    R =[1.000,1.001,1.002,1.005,1.01,1.02,1.05,1.1,1.2,1.3,
         1.4,1.7,2.0,2.5,3,4,5,6,7,8,9,10] 
    
    r_isco99 = risco(0.99)
    
    r_99 = r_isco99 * np.array(R)
    
    EpsFun_99 = interpolate.interp1d(r_99,Eps99,kind = 'cubic')  
    r = np.arange(r_isco99,2,0.0001)
    
    plt.plot(r,EpsFun_99(r),'k--',label = r'$a = 0.99$')
    plt.plot(r,EpsFun_approximate(r),'darkviolet',label = r'approximate')
    plt.xlabel(r'$\tilde{r}$')
    plt.ylabel(r'$\dot{\mathcal{E}}$')
    plt.legend()
    plt.show()
    plt.clf()
    
    error = EpsFun_99(r) / EpsFun_approximate(r)
    
    plt.plot(r,error)
    plt.title('Error in a = 0.999')
    plt.xlabel(r'$\tilde{r}$')
    plt.ylabel(r'Fractional Error')
    plt.show()
    plt.clf()
    # =============================================================================
    #   and with a spin of a = 0.999
    # =============================================================================
    Eps999 = [0.2022,0.2032,0.2041,0.2069,0.2116,0.2208,0.2473,0.2881,0.3581,
       0.4160,0.4648,0.5723,0.6411,0.7089,0.7469,0.7882,0.8118,0.8286,
       0.8416,0.8524,0.8616,0.8695]
        
        
    R =[1.000,1.001,1.002,1.005,1.01,1.02,1.05,1.1,1.2,1.3,
         1.4,1.7,2.0,2.5,3,4,5,6,7,8,9,10] 
    
    r_isco999 = risco(0.999)
    
    r_999 = r_isco999 * np.array(R)
    
    EpsFun_999 = interpolate.interp1d(r_999,Eps999,kind = 'cubic')  
    r = np.arange(r_isco999,2,0.0001)
    
    plt.plot(r,EpsFun_999(r),'k--',label = r'$a = 0.999$')
    plt.plot(r,EpsFun_approximate(r),'darkviolet',label = r'approximate')
    plt.xlabel(r'$\tilde{r}$')
    plt.ylabel(r'$\dot{\mathcal{E}}$')
    plt.legend()
    plt.show()
    plt.clf()
    
    error = EpsFun_999(r) / EpsFun_approximate(r)
    
    plt.plot(r,error)
    plt.title('Error in a = 0.999')
    plt.xlabel(r'$\tilde{r}$')
    plt.ylabel(r'Fractional Error')
    plt.show()
    plt.clf()
    
    # =============================================================================
    #   what about a spin of a = 0.99999
    # =============================================================================
    
    r_a5[0] = risco(0.99999)
    Eps_a5.pop(2);Eps_a5.pop(5)
    r_a5.pop(2);r_a5.pop(5)
    
    
    EpsFun_a5 = interpolate.interp1d(r_a5,Eps_a5,kind = 'cubic')
    
    
    r = np.arange(r_a5[0],2,0.0001)
    EpsFun_a5_val = EpsFun_a5(r)
    
    plt.plot(r,EpsFun_a5_val,'k--',label = 'exact a = 0.99999')
    plt.plot(r,EpsFun_approximate(r),'darkviolet',label = 'approximate')
    plt.xlabel(r'$\tilde{r}$')
    plt.ylabel(r'$\dot{\mathcal{E}}$')
    plt.legend()
    plt.show()
    plt.clf()
    
    error = EpsFun_a5_val / EpsFun_approximate(r)
    
    plt.plot(r,error)
    plt.title('Error in a = 0.99999')
    plt.xlabel(r'$\tilde{r}$')
    plt.ylabel(r'Fractional Error')
    plt.show()
    plt.clf()
def _decision_tree_classification_train(
        table,
        feature_cols,
        label_col,  # fig_size=np.array([6.4, 4.8]),
        criterion='gini',
        splitter='best',
        max_depth=None,
        min_samples_split=2,
        min_samples_leaf=1,
        min_weight_fraction_leaf=0.0,
        max_features=None,
        random_state=None,
        max_leaf_nodes=None,
        min_impurity_decrease=0.0,
        min_impurity_split=None,
        class_weight=None,
        presort=False,
        sample_weight=None,
        check_input=True,
        X_idx_sorted=None):

    feature_names, features = check_col_type(table, feature_cols)
    y_train = table[label_col]

    if (sklearn_utils.multiclass.type_of_target(y_train) == 'continuous'):
        raise_error('0718', 'label_col')

    class_labels = sorted(set(y_train))
    if class_weight is not None:
        if len(class_weight) != len(class_labels):
            raise ValueError(
                "Number of class weights should match number of labels.")
        else:
            class_weight = {
                class_labels[i]: class_weight[i]
                for i in range(len(class_labels))
            }

    classifier = DecisionTreeClassifier(
        criterion, splitter, max_depth, min_samples_split, min_samples_leaf,
        min_weight_fraction_leaf, max_features, random_state, max_leaf_nodes,
        min_impurity_decrease, min_impurity_split, class_weight, presort)
    classifier.fit(features, table[label_col], sample_weight, check_input,
                   X_idx_sorted)

    try:
        from sklearn.externals.six import StringIO
        from sklearn.tree import export_graphviz
        import pydotplus
        dot_data = StringIO()
        export_graphviz(classifier,
                        out_file=dot_data,
                        feature_names=feature_names,
                        class_names=classifier.classes_.astype(np.str),
                        filled=True,
                        rounded=True,
                        special_characters=True)
        graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
        from brightics.common.repr import png2MD
        fig_tree = png2MD(graph.create_png())
    except:
        fig_tree = "Graphviz is needed to draw a Decision Tree graph. Please download it from http://graphviz.org/download/ and install it to your computer."

    # json
    model = _model_dict('decision_tree_classification_model')
    model['feature_cols'] = feature_cols
    model['label_col'] = label_col
    model['classes'] = classifier.classes_
    feature_importance = classifier.feature_importances_
    model['feature_importance'] = feature_importance
    model['max_features'] = classifier.max_features_
    model['n_classes'] = classifier.n_classes_
    model['n_features'] = classifier.n_features_
    model['n_outputs'] = classifier.n_outputs_
    model['tree'] = classifier.tree_
    get_param = classifier.get_params()
    model['parameters'] = get_param
    model['classifier'] = classifier

    # report
    indices = np.argsort(feature_importance)
    sorted_feature_cols = np.array(feature_names)[indices]

    plt.title('Feature Importances')
    plt.barh(range(len(indices)),
             feature_importance[indices],
             color='b',
             align='center')
    for i, v in enumerate(feature_importance[indices]):
        plt.text(v,
                 i,
                 " {:.2f}".format(v),
                 color='b',
                 va='center',
                 fontweight='bold')
    plt.yticks(range(len(indices)), sorted_feature_cols)
    plt.xlabel('Relative Importance')
    plt.xlim(0, 1.1)
    plt.tight_layout()
    fig_feature_importances = plt2MD(plt)
    plt.clf()

    params = dict2MD(get_param)

    # Add tree plot
    rb = BrtcReprBuilder()
    rb.addMD(
        strip_margin("""
    | ## Decision Tree Classification Train Result
    | ### Decision Tree
    | {fig_tree}
    |
    | ### Feature Importance
    | {fig_feature_importances}
    |
    | ### Parameters
    | {list_parameters}
    |
    """.format(fig_tree=fig_tree,
               fig_feature_importances=fig_feature_importances,
               list_parameters=params)))
    model['_repr_brtc_'] = rb.get()
    feature_importance_table = pd.DataFrame(
        [[feature_cols[i], feature_importance[i]]
         for i in range(len(feature_cols))],
        columns=['feature_name', 'importance'])
    model['feature_importance_table'] = feature_importance_table
    return {'model': model}
Example #51
0
def main(args):
    def train_model(model, criterion, optimizer, scheduler, num_epochs):
        since = time()
        best_model_wts = copy.deepcopy(model.state_dict())

        for epoch in range(num_epochs):
            print('Epoch {}/{}'.format(epoch, num_epochs - 1))
            print('-' * 10)

            running_loss = 0.0
            running_corrects = []

            if args.matrix == "yes":
                true = [[] for _ in range(11)]
                pred = [[] for _ in range(11)]
            # true_p = 0
            # true_n = 0
            # false_p = 0
            # false_n = 0

            for j, data in enumerate(train_loader):
                inputs, labels = data

                if torch.cuda.is_available():
                    inputs = inputs.to(device)
                    labels = labels.to(device)
                    if args.matrix == "yes":
                        true.extend(labels.cpu().numpy())

                if not gpu and args.matrix == "yes":
                    for row in labels:
                        for l in range(11):
                            true[l].append(row.numpy()[l])

                optimizer.zero_grad()
                outputs = model(inputs)

                preds = outputs > 0.5

                if args.matrix == "yes":
                    for row in preds:
                        for l in range(11):
                            pred[l].append(row.float().numpy()[l])

                loss = criterion(
                    outputs.view(-1).float(),
                    labels.view(-1).float())

                loss.backward()
                optimizer.step()

                # for q in range(preds.shape[0]):
                # 	for w in range(preds[0].shape[0]):
                # 		if preds[q][w].float().item() == 1:
                # 			if labels[q][w].float().item() == 1:
                # 				true_p += 1
                # 			elif labels[q][w].float().item() == 0:
                # 				false_p += 1
                # 		elif preds[q][w].float().item() == 0:
                # 			if labels[q][w].float().item() == 1:
                # 				false_n += 1
                # 			elif labels[q][w].float().item() == 0:
                # 				true_n += 1

                running_loss += loss.item()
                running_corrects.append(
                    torch.sum((preds.float() == labels.float()) *
                              (labels.float() > 0)).item() /
                    (1e-5 + (preds > 0).sum().item()))

            scheduler.step()

            epoch_loss = running_loss / len(running_corrects)
            epoch_acc = sum(running_corrects) / len(running_corrects)

            model.eval()
            # val_loss, val_acc = evaluate(model, valid_loader)
            model.train()

            plot_train_acc.append(epoch_acc)
            # plot_valid_acc.append(val_acc)
            plot_train_loss.append(epoch_loss)
            # plot_valid_loss.append(val_loss)
            nRec.append(epoch)

            # precision = true_p / (true_p + false_p)
            # recall = true_p / (true_p + false_n)

            print('Train Loss: {:.4f} Train Acc: {:.4f}'.format(
                epoch_loss, epoch_acc))
            # print('Train Loss: {:.4f} Train Acc: {:.4f} Val Loss: {:.4f} Val Acc: {:.4f}'.format(epoch_loss, epoch_acc, val_loss, val_acc))
            # print('TP: %d TN: %d FP: %d FN: %d' % (true_p,true_n,false_p,false_n))
            # print('Precision:  {:.4f} Recall  {:.4f}'.format(precision, recall))
            print()

        test_loss, test_acc = evaluate(model, test_loader)
        print('test Loss: {:.4f} test Acc: {:.4f}'.format(test_loss, test_acc))
        time_elapsed = time() - since
        print('Training complete in {:.0f}m {:.0f}s'.format(
            time_elapsed // 60, time_elapsed % 60))
        # print('Best val Acc: {:4f}'.format(max(plot_valid_acc)))

        model.load_state_dict(best_model_wts)
        return model

    data = pd.read_pickle('%s.pkl' % args.pkl_file)

    data = pd.read_pickle("string_test.pkl")
    str_data = pd.read_pickle("string_test_pt2.pkl")

    labels = data["instruments"].values
    music_data = data["normalized"].values

    str_labels = str_data["instruments"].values
    str_music_data = str_data["normalized"].values

    music_data = np.stack(music_data).reshape(-1, 128 * 65)  #65*128, 1025 * 65
    str_music_data = np.stack(str_music_data).reshape(-1, 128 *
                                                      65)  #65*128, 1025 * 65

    train_data, valid_data, train_labels, valid_labels = train_test_split(
        music_data, labels, test_size=0.1, random_state=1)
    # train_data, valid_data, train_labels, valid_labels = train_data[0:100], valid_data[0:100], train_labels[0:100], valid_labels[0:100]

    train_set = MusicDataset(train_data, train_labels)
    valid_set = MusicDataset(valid_data, valid_labels)
    test_set = MusicDataset(str_music_data, str_labels)
    train_loader = DataLoader(train_set,
                              batch_size=args.batch_size,
                              shuffle=True)
    valid_loader = DataLoader(valid_set,
                              batch_size=args.batch_size,
                              shuffle=True)
    test_loader = DataLoader(test_set,
                             batch_size=args.batch_size,
                             shuffle=True)

    model_ft = MultiInstrumClass(128 * 65, 11, args.emb_dim, args.hidden_dim,
                                 args.model)
    # model_ft = MultiLP(128*64)

    if torch.cuda.is_available():
        model.cuda()

    plot_train_acc, plot_valid_acc, plot_train_loss, plot_valid_loss, nRec = [], [], [], [], []

    optimizer_ft = torch.optim.Adam(model_ft.parameters(),
                                    lr=args.lr,
                                    weight_decay=.04)
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                           step_size=7,
                                           gamma=0.1)
    model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
                           args.epochs)

    fig = plt.figure()
    ax = plt.subplot(1, 2, 1)
    plt.plot(nRec, plot_train_acc, label='Training')
    plt.plot(nRec, plot_valid_acc, label='Validation')
    plt.title('Accuracy vs. Epoch')
    plt.xlabel("Epoch")
    plt.ylabel("Accuracy")
    ax.legend()

    bx = plt.subplot(1, 2, 2)
    bx.plot(nRec, plot_train_loss, label='Training')
    bx.plot(nRec, plot_valid_loss, label='Validation')
    plt.title('Loss vs. Epoch')
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    bx.legend()
    plt.show()
    plt.savefig("%s.png" % args.model)
    plt.clf()
Example #52
0
	def parity_plot(true, pred, set_label):
		if len(true) == 0:
			print('skipping parity plot for empty dataset')
			return

		try:
			# Trim it to recorded values (not NaN)
			true = np.array(true).flatten()
			print(true)
			print(true.shape)
			pred = np.array(pred).flatten()
			print(pred)
			print(pred.shape)

			pred = pred[~np.isnan(true)]
			true = true[~np.isnan(true)]

			# For TOX21
			AUC = 'N/A'
			if len(set(list(true))) <= 2:
				from sklearn.metrics import roc_auc_score, roc_curve, auc
				roc_x, roc_y, _ = roc_curve(true, pred)
				AUC = roc_auc_score(true, pred)
				plt.figure()
				lw = 2
				plt.plot(roc_x, roc_y, color='darkorange',
					lw = lw, label = 'ROC curve (area = %0.3f)' % AUC)
				plt.plot([0, 1], [0, 1], color='navy', lw = lw, linestyle = '--')
				plt.xlim([0.0, 1.0])
				plt.ylim([0.0, 1.05])
				plt.xlabel('False Positive Rate')
				plt.ylabel('True Positive Rate')
				plt.title('ROC for {}'.format(set_label))
				plt.legend(loc = "lower right")
				plt.savefig(test_fpath + ' {} ROC.png'.format(set_label), bbox_inches = 'tight')
				plt.clf()

			min_y = np.min((true, pred))
			max_y = np.max((true, pred))
			mse = stats.mse(true, pred)
			mae = stats.mae(true, pred)
			q = stats.q(true, pred)
			(r2, a) = stats.linreg(true, pred) # predicted v observed
			(r2p, ap) = stats.linreg(pred, true) # observed v predicted

			# Print
			print('{}:'.format(set_label))
			print('  mse = {}, mae = {}'.format(mse, mae))
			print('  q = {}'.format(q))
			print('  r2 through origin = {} (pred v. true), {} (true v. pred)'.format(r2, r2p))
			print('  slope through origin = {} (pred v. true), {} (true v. pred)'.format(a[0], ap[0]))

			# Create parity plot
			plt.scatter(true, pred, alpha = 0.5)
			plt.xlabel('Actual')
			plt.ylabel('Predicted')
			plt.title('Parity plot for {} ({} set, N = {})'.format(y_label, set_label, len(true)) + 
				'\nMSE = {}, MAE = {}, q = {}, AUC = {}'.format(round3(mse), round3(mae), round3(q), AUC) + 
				'\na = {}, r^2 = {}'.format(round3(a), round3(r2)) + 
				'\na` = {}, r^2` = {}'.format(round3(ap), round3(r2p)))
			plt.grid(True)
			plt.plot(true, true * a, 'r--')
			plt.axis([min_y, max_y, min_y, max_y])	
			plt.savefig(test_fpath + ' {}.png'.format(set_label), bbox_inches = 'tight')
			plt.clf()

		except Exception as e:
			print(e)
			pass
Example #53
0
def main(args):
  createDirectoryIfNotExist(args.odir)

  userList = os.listdir(args.dir)
  for user in userList:
    createDirectoryIfNotExist(args.odir + '/' + user)
    sites = os.listdir(args.dir + '/' + user)
    for site in sites:
      relName = user + '/' + site
      createDirectoryIfNotExist(args.odir + '/' + relName)
      stream = util.filterKeystrokes(util.openStream(args.dir + '/' + relName))
      sessions = util.segmentStream(stream)

      # We want histograms of keystroke usage per user
      allLengths = list(util.getAllKeystrokeLengths(stream))
      if len(allLengths) > 0:
        plt.clf()
        plt.hist(list(allLengths), 200)
        plt.savefig(args.odir + '/' + user + '/all-kl-' + site + '.png')

      for i in range(len(keyCombos)):
        lengths = list(util.getKeystrokeLengths(stream, keyCombos[i]))
        if len(lengths) > 0:
          plt.clf()
          plt.hist(list(lengths), 200)
          plt.savefig(args.odir + '/' + user + '/' + names[i] + '-kl-' + site + '.png')

      # Also want histograms of word data, per user
      wordData = zip(*chain(*[util.getWordData(s) for s in sessions]))
      if len(wordData) > 0 and len(wordData[0]) > 0:
        plt.clf()
        plt.hist(list(d for d in wordData[0] if abs(d) < 10), 200)
        plt.savefig(args.odir + '/' + user + '/word-dur-' + site + '.png')
        plt.clf()
        plt.hist(list(wordData[1]), 200)
        plt.savefig(args.odir + '/' + user + '/word-len-' + site + '.png')

      # Key overlaps
      #keyOverlaps = list(chain.from_iterable(util.getKeyOverlaps(s) for s in sessions))
      keyOverlaps = list(x for x in util.getKeyOverlaps(stream) if abs(x) < 5)
      if len(keyOverlaps) > 0:
        plt.clf()
        plt.hist(keyOverlaps, 100)
        plt.savefig(args.odir + '/' + user + '/overlap-' + site + '.png')

      # Word pauses
      wordPauses = list(x for x in util.getWordPauses(stream) if abs(x) < 60)
      if len(wordPauses) > 0:
        plt.clf()
        plt.hist(wordPauses, 100)
        plt.savefig(args.odir + '/' + user + '/word-pause-' + site + '.png')

      # Time between shift key and modified key
      shiftTime = list(x for x in util.getModifierDelays(stream, util._SHIFT) if abs(x) < 5)
      if len(shiftTime) > 0:
        plt.clf()
        plt.hist(shiftTime, 100)
        plt.savefig(args.odir + '/' + user + '/shift-delay-' + site + '.png')

      # Time between shift-to-shift
      shiftShift= list(x for x in util.getModifierDelays(stream, util._SHIFT) if abs(x) < 1200)
      if len(shiftShift) > 0:
        plt.clf()
        plt.hist(shiftShift, 100)
        plt.savefig(args.odir + '/' + user + '/shift-shift-' + site + '.png')
  def log_metrics(self):
    '''
    Creates all plots based on self.METRICS.
    '''
    # Plot Usage rate
    self.S.plot_usage(directory=self.plot_name)
    self.S.plot_customized_usage_rate(directory=self.plot_name)
    self.plot_sharing()

    # Plot basic modules
    if self.T.MTRAIN[0].TrainInput.shape[-1] > 1: return
    print_plot = 0
    input_range = np.linspace(-1,1).reshape((-1,1))
    norm_input_range = self.T.normalize_input(input_range)
    norm_input_torch = torch.FloatTensor(norm_input_range).to(
        device=self.nn_device)
    if self.perm_sample_modules is None:
      self.perm_sample_modules=np.random.choice(len(self.L),
          min(len(self.L), 9), replace=False)
    fig, ax = plt.subplots(nrows=3, ncols=3)
    for i in range(min(len(self.L), 9)):
      net = self.L[self.perm_sample_modules[i]]
      color = 'b'
      if net.inp == 1 and net.out == 1: #plotable function
        ax[i//3, i%3].plot(input_range,
            2.*self.T.denormalize_output(
              net(norm_input_torch).data.cpu().numpy()), c=color)
        ax[i//3, i%3].set_ylim([-1.5,1.5])
        ax[i//3, i%3].set_xlim([-1,1])
        ax[i//3, i%3].set_xticks(np.array([-1,0,1]))
        ax[i//3, i%3].set_yticks(np.array([-1,0,1]))
        print_plot += 1
    if print_plot > 0:
      plt.savefig(os.path.join(self.plot_name, 'sample-modules'))
      if self.store_video:
        plt.savefig(os.path.join(self.plot_name,
          'video/modules_'+str(self.step)))
      plt.cla()
    # Plot basic comparisons
    fig, ax = plt.subplots(nrows=3, ncols=3)
    if self.perm_sample_fns is None:
      self.perm_sample_fns=np.random.choice(len(self.T.MTRAIN),
          min(len(self.T.MTRAIN), 9), replace=False)
    for i in range(min(len(self.T.MTRAIN), 9)):
      dataset = self.T.MTRAIN[self.perm_sample_fns[i]]
      ax[i//3, i%3].scatter(dataset.UValInput,
          dataset.UValOutput, c='g', label='val')
      ax[i//3, i%3].scatter(dataset.UTrainInput,
          dataset.UTrainOutput, c='r', label='train',
          s=18)
      if self.MAML:
        ax[i//3, i%3].scatter(self.T.MTRAIN[self.perm_sample_fns[i]].UValInput,
            self.T.denormalize_output(
              self.MTrainAnswers[self.perm_sample_fns[i]][1]))
      else:
        structure = self.S.TrainStructures[self.perm_sample_fns[i]]
        structure_output = self.T.denormalize_output(self.run_model(structure,
            norm_input_torch).data.cpu().numpy())
        ax[i//3, i%3].plot(input_range, structure_output)
      ax[i//3, i%3].set_xlim([
        np.floor(np.min(self.T.MTRAIN[self.perm_sample_fns[i]].UValInput)),
        np.ceil( np.max(self.T.MTRAIN[self.perm_sample_fns[i]].UValInput))])
      ax[i//3, i%3].set_ylim([
        np.floor(np.min(self.T.MTRAIN[self.perm_sample_fns[i]].UValOutput))-.5,
        np.ceil( np.max(self.T.MTRAIN[self.perm_sample_fns[i]].UValOutput))+.5])
      ax[i//3, i%3].set_xticks(np.array([-1,0,1]))
      ax[i//3, i%3].set_yticks(np.array([-1,0,1]))
    plt.savefig(os.path.join(self.plot_name, 'comparisons'))
    if self.store_video:
      plt.savefig(os.path.join(self.plot_name,
        'video/comparisons_'+str(self.step)))
    plt.clf()
Example #55
0
def genetic(population, crossover, mutation, iterations):
    # print('Running GA with p={}, itt={}, c%={}, m%={}'.format(population,
    #                                                           iterations,
    #                                                           crossover,
    #                                                           mutation))

    range_Kp = [x / 100. for x in range(200, 1800)]
    range_Ti = [x / 100. for x in range(105, 942)]
    range_Td = [x / 100. for x in range(26, 237)]

    pop_Kp = random.sample(range_Kp, population)
    pop_Ti = random.sample(range_Ti, population)
    pop_Td = random.sample(range_Td, population)

    best_matches = []

    current = {k: 0 for k in zip(pop_Kp, pop_Ti, pop_Td)}
    for Kp, Ti, Td in current.keys():
        current[(Kp, Ti, Td)] = fitness(Kp, Ti, Td)
    elite = sorted(current, key=current.get)
    cost = sum(current.values())

    for itt in range(iterations):
        # print('> iteration {} of {}... '.format(itt + 1, iterations), end='',
        #       flush=True)
        children = []

        # parent-selection
        while len(children) < population - 2:
            parent = weighted_choice(current, cost)

            # crossover
            if random.random() < crossover:
                mate = weighted_choice(current, cost)

                child0 = []
                child1 = []
                for (gene0, gene1) in zip(parent, mate):
                    if random.random() < 0.5:
                        child0.append(gene0)
                        child1.append(gene1)
                    else:
                        child0.append(gene1)
                        child1.append(gene0)
                child0 = tuple(child0)  # pylint: disable=R0204
                child1 = tuple(child1)  # pylint: disable=R0204

                children.append(child0)
                children.append(child1)
            else:
                # TODO: this allows duplicate children... is that OK?
                children.append(parent)

        # mutation
        current = {}
        for c in children:
            if random.random() < mutation:
                idx = random.random()
                if idx < 0.33:
                    c = tuple([c[0], c[1], random.choice(range_Td)])
                elif idx < 0.66:
                    c = tuple([c[0], random.choice(range_Ti), c[2]])
                elif idx < 0.99:
                    c = tuple([random.choice(range_Kp), c[1], c[2]])

            current[c] = fitness(c[0], c[1], c[2])

        # elite
        for item in elite[:2]:
            current[item] = fitness(item[0], item[1], item[2])

        # print(current)
        elite = sorted(current, key=current.get)
        cost = sum(current.values())

        best_matches.append(current[elite[0]])
        # print('done!')

    # graph
    plot.clf()
    plot.plot(range(iterations), best_matches)

    plot.xlabel('iteration')
    plot.ylabel('best fitness')
    plot.title('Genetic Algorithm')
    plot.grid(True)
    plot.savefig('q1-i{}-p{}-c{}-m{}.png'.format(iterations, population,
                                                 crossover, mutation))
    plot.show()
Example #56
0
def hw_histogram_from_file(keys, fname, fig_path, fig_name, x_lable, y_label, vert_line=0, remove_points=None, y_points=None,logscale=False,hide_legend=False, ind_multiplier=1,legend_cols=4,err=None,label_replacer=None):
    labels = []
    key_to_list = {}
    with open(fname,'r') as f:
      lines = f.readlines()
      labels = lines[0].strip().split(" ")[1:]
      for i in range(len(labels)):
          if label_replacer != None and labels[i] in label_replacer:
              labels[i] = label_replacer[labels[i]]
      print labels
      lines = lines[1:]
      for line in lines:
          if not line.strip(): continue
          line = line.strip().split(" ")
        #   if line[0] not in keys: continue
          key_to_list[line[0]] = [float(v) for v in line[1:] if v]

    fig, ax = plt.subplots(figsize=(14,10))
    #fig, ax = plt.subplots()

    if remove_points:
        remove_points.sort(reverse=True)
        for point in remove_points:
            #print labels
            labels.pop(point)
            for key in key_to_list:
                print key,key_to_list[key]
                # try:
                key_to_list[key].pop(point)
                #     if err:
                #         err[key][0].pop(point)
                #         err[key][1].pop(point)
                # except:
                    # import pdb; pdb.set_trace()

    width = 0.35
    ind = np.arange( len(labels) )*(len(key_to_list)/2.0) * ind_multiplier #*( len(key_to_list))*width
    for i,stype in enumerate(keys):
        # try:
        # if err:
        #     ax.bar(ind + i*width, key_to_list[stype[1]] ,width, yerr=err[stype[1]], color='white',edgecolor='black', hatch=stype[0], label=stype[1])
        # else:
            lab = stype[1]
            if label_replacer and lab in label_replacer:
                lab = label_replacer[lab]
            ax.bar(ind + i*width, key_to_list[stype[1]] ,width, color='white',edgecolor='black', hatch=stype[0], label=lab)
        # except:
            # import pdb; pdb.set_trace()
    # ax.bar(ind + i*width, key_to_list['Simulation'] ,width, color='white',edgecolor='black', hatch='o', label="Simulation")
    # ax.bar(ind+width, shoal50 ,width, color='white', edgecolor='black', hatch='+',label="Shoal (50G)")
    # ax.bar(ind+width, key_to_list['Prototype'] ,width, color='white', edgecolor='black', hatch='+',label="Prototype")
    # ax.bar(ind+2*width, fattree ,width, color='white', edgecolor='black', hatch='x',label="Ideal")



    ax.set_xticks(ind + width*(len(key_to_list)/2.0)) # ((len(key_to_list))/2.0)*
    ax.set_xticklabels([l.strip() for l in labels], rotation=65)

    if y_points:
        ax.set_yticks(y_points)
    # elif logscale:

    plt.rcdefaults()
    ax.set_ylabel(y_label.strip(), fontsize=font['size'])
    ax.set_xlabel(x_lable.strip(), fontsize=font['size'], labelpad=10)

    plt.rc('font', **font)
    box = ax.get_position()
    ax.set_position([box.x0, box.y0 + box.height * 0.1,
                     box.width, box.height * 0.9])

    if vert_line:
        plt.axhline(vert_line, color="k", linestyle="--")
        ax.text((box.x0+box.width)/1.8, vert_line+0.1, 'Maximum Achievable Throughput',size=20)
        # plt.text(vert_line+0.2,0,'')

    if not hide_legend:
        # Put a legend below current axis
        lgd = ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.55),
                  fancybox=False, shadow=True, ncol=legend_cols, fontsize=font['size']-3)
        #lgd = ax.legend(loc='lower center', bbox_to_anchor=(0.5, 1.),
        #          fancybox=False, shadow=True, ncol=legend_cols, fontsize=font['size']-3)

        # fig.tight_layout()
        fig.savefig(fig_path+fig_name, bbox_extra_artists=(lgd,), bbox_inches='tight')
    else:
        fig.savefig(fig_path+fig_name, bbox_inches='tight')
    fig.clf()
    plt.clf()
    plt.close()
Example #57
0
def plot_perf_data(output_dir, dat, measure):
    hosp_name = dat['Reporting Entity'].values[0]
    hosp_name.replace('Hospital ', '')  # Shorten hospital titles for the label
    clean_hosp_name = ''.join(c for c in hosp_name if c.isalnum())  # Remove special chars

    out_filename = ''.join(clean_hosp_name + '_' + measure + ".png")
    out_filepath = os.path.join(output_dir, out_filename)

    baseline = dat[dat['Timeframe'] == 'Baseline']['Measure Rate'].values[0]

    measure_token = measure.split(sep='-')[1]

    rdx_target = baseline * proj_target_dict.get(measure_token)
    sft_rdx_target = baseline * soft_target_dict.get(measure_token)

    axislbl = 'Rate'
    width_inches = 5
    height_inches = 2
    measure_type = dat['Measure Type'].values[0]

    fig_size = (width_inches, height_inches)
    params = {
        'axes.labelsize': 4,
        'font.size': 4,
        'legend.fontsize': 4,
        'text.usetex': False,  # True causes issues with multiproc.
        'figure.figsize': fig_size,
        'font.family': 'serif',
        'axes.linewidth': 0.5,
        'xtick.labelsize': 3,
        'ytick.labelsize': 4}
    plt.rcParams.update(params)

    perfchart = plt.figure()
    perfchart.clf()

    # Baseline rate plot
    # Create empty dataframe to fill to eval period
    fill_list = []
    for y in range(2016, 2017 + 1):
        for m in range(1, 12 + 1):
            fill_list.append((date(y, m, 1), np.NaN))

    fill_dat = pd.DataFrame(data=fill_list, columns=['Start Date', 'Rate'])
    fill_dat.set_index('Start Date', drop=False, inplace=True)
    dat.sort_values(by='Start Date', inplace=True)
    dat.set_index('Start Date', drop=False, inplace=True)
    dat = dat.combine_first(fill_dat)
    try:
        dat.sort_values(by='Start Date', inplace=True)
    except TypeError:
        # Issue where one facility-measure has a datetime, and the rest have dates.
        dat['Start Date'] = pd.to_datetime(dat['Start Date'])
        dat.sort_values(by='Start Date', inplace=True)

    datecond = dat['Start Date'] >= date(2016, 10, 1)
    measure_cond = dat['HRET_MeasureID'] == measure
    dat = dat[datecond & measure_cond]

    plt.plot(dat[dat['Start Date'] >= date(2016, 10, 1)]['Start Date'].values,
             dat[dat['Start Date'] >= date(2016, 10, 1)]['Measure Rate'],
             color='#FFA500',
             linewidth=2,
             marker='o',
             markersize=4,
             markeredgewidth=1,
             markerfacecolor='#FFA500',
             markeredgecolor='w',
             clip_on=False,
             label=hosp_name)

    try:
        plt.axhline(y=baseline, color='black', linestyle=':', linewidth=.5, label='Facility Baseline')
    except TypeError:
        print('Baseline error? Not required.')
        print(baseline)

    ax = plt.gca()
    ax.axes.xaxis.set_major_formatter(mdates.DateFormatter('%b-%Y'))
    ax.xaxis.set_major_locator(mdates.MonthLocator())
    perfchart.autofmt_xdate()
    plt.ylim(ymin=0)
    plt.ylabel(axislbl.replace('%', '\\%'))
    yax = ax.get_ylim()
    ymin = yax[0]
    ymax = (yax[1] * 1.05)
    ax.set_ylim(ymin, ymax)
    xmin = date(2016, 10, 1)
    xmax = (datetime.now().date().replace(day=1) - timedelta(1)).replace(day=1)
    ax.set_xlim(xmin, xmax)

    if measure_type == 'Outcome' or measure in ("HIIN-CAUTI-3a", "HIIN-CLABSI-3a", "HIIN-CAUTI-3b", "HIIN-CLABSI-3b"):
        try:
            datebool = (dat['Start Date'] >= xmin) & (dat['Start Date'] <= xmax)
            x = dat['Start Date'].values
            ax.fill_between(x, 0, rdx_target, where=datebool, facecolor='green', alpha=0.2, label='Target')
            ax.fill_between(x, 0, sft_rdx_target, where=datebool, facecolor='green', alpha=0.1)
        except TypeError:
            print('Target zone shading error.')
            print('Target value causing error was: ' + str(rdx_target))

    print("\tMaking plot " + str(out_filename))

    legend = plt.legend(loc='upper center', bbox_to_anchor=(0., 1.22, 1., .0122),
                        mode="expand", ncol=3, handlelength=4)
    legend.get_frame().set_edgecolor('white')
    plt.savefig(out_filepath, dpi=300, bbox_extra_artists=[legend], bbox_inches='tight')
    # Clear figure so next plot is empty.
    plt.clf()
    plt.close(perfchart)

    return out_filename, out_filepath
Example #58
0
def rna_draw(g, title="", node_ids=False,
             highlight_edges=None,
             node_labels=None,
             node_colors=None,
             num_clusters=None,
             pos=None,
             pos_offset=(0, 0),
             scale=1,
             ax=None,
             show=False,
             alpha=1,
             save=False,
             node_size=250,
             fontsize=12,
             format='pdf',
             seed=None):
    """
    Draw an RNA with the edge labels used by Leontis Westhof
    :param nx_g:
    :param title:
    :param highlight_edges:
    :param node_colors:
    :param num_clusters:
    :return:
    """
    if ax is None:
        fig, ax = plt.subplots(1, 1)
    pos = circular_layout(g)
    # if pos is None:
    # pos = nx.spring_layout(nx_g, seed=seed)
    # new_pos = {}
    # for n, p in pos.items():
    #     new_pos[n] = scale * pos[n] + np.array(pos_offset)

    # pos = new_pos
    #
    # if node_colors is None:
    #     nodes = nx.draw_networkx_nodes(nx_g, pos,
    #                                    node_size=node_size,
    #                                    node_color='white',
    #                                    linewidths=2,
    #                                    ax=ax,
    #                                    alpha=alpha)
    # else:
    #     nodes = nx.draw_networkx_nodes(nx_g, pos,
    #                                    node_size=node_size,
    #                                    node_color=node_colors,
    #                                    linewidths=2,
    #                                    ax=ax,
    #                                    alpha=alpha)
    #
    # nodes.set_edgecolor('black')
    # if node_ids:
    #     node_labels = {n: str(n).replace("_", "-") for n in nx_g.nodes()}
    #     nx.draw_networkx_labels(nx_g, pos, node_labels, font_size=fontsize, font_color='black', ax=ax)
    # if not node_labels is None:
    #     nx.draw_networkx_labels(nx_g, pos, node_labels, font_size=fontsize, font_color='black', ax=ax)
    #
    # plt.title(r"{0}".format(title))
    # edge_labels = {}
    # for n1, n2, d in nx_g.edges(data=True):
    #     try:
    #         symbol = make_label(d['label'])
    #         edge_labels[(n1, n2)] = symbol
    #     except:
    #         if d['label'] in ['B53', 'pool']:
    #             edge_labels[(n1, n2)] = ''
    #         else:
    #             edge_labels[(n1, n2)] = r"{0}".format(d['label'])
    #         continue
    #
    # non_bb_edges = [(n1, n2) for n1, n2, d in nx_g.edges(data=True) if d['label'] not in ['B53', 'pool']]
    # bb_edges = [(n1, n2) for n1, n2, d in nx_g.edges(data=True) if d['label'] == 'B53']
    # pool_edges = [(n1, n2) for n1, n2, d in nx_g.edges(data=True) if d['label'] == 'pool']
    #
    # nx.draw_networkx_edges(nx_g, pos, edgelist=non_bb_edges, ax=ax)
    # nx.draw_networkx_edges(nx_g, pos, edgelist=bb_edges, width=2, ax=ax)
    # nx.draw_networkx_edges(nx_g, pos, edgelist=pool_edges, width=1, edge_color='grey', style='dashed', ax=ax)
    #
    # if not highlight_edges is None:
    #     nx.draw_networkx_edges(nx_g, pos,
    #                            edgelist=highlight_edges,
    #                            edge_color='y',
    #                            width=8,
    #                            alpha=0.3,
    #                            ax=ax)
    #
    # nx.draw_networkx_edge_labels(nx_g, pos, font_size=fontsize,
    #                              edge_labels=edge_labels, ax=ax)
    # ax.axis('off')

    process_axis(ax,
                 g,
                 subtitle=title,
                 highlight_edges=highlight_edges,
                 node_color=node_colors,
                 node_labels=node_labels)

    if save:
        plt.savefig(save, format=format)
        plt.clf()
    if show:
        plt.show()
    return ax
Example #59
0
def plot_comp_data(output_dir, dat, measure):
    hosp_name = dat['Reporting Entity'].values[0]
    hosp_name.replace('Hospital ', '')  # Shorten hospital titles for the label
    clean_hosp_name = ''.join(c for c in hosp_name if c.isalnum())  # Remove special chars

    datecond = dat['Start Date'] >= date(2016, 10, 1)
    measure_cond = dat['HRET_MeasureID'] == measure
    dat = dat[datecond & measure_cond]

    plot_title = dat['Measure'].values[0]
    plot_title = ''.join(c for c in plot_title if c.isalnum())

    out_filename = ''.join(clean_hosp_name + '_comp_' + measure + plot_title + ".png")
    out_filepath = os.path.join(output_dir, out_filename)


    axislbl = 'Rate'
    width_inches = 5
    height_inches = 2

    fig_size = (width_inches, height_inches)
    params = {
        'axes.labelsize': 4,
        'font.size': 4,
        'legend.fontsize': 4,
        'text.usetex': False,  # True causes issues with multiproc.
        'figure.figsize': fig_size,
        'font.family': 'serif',
        'axes.linewidth': 0.5,
        'xtick.labelsize': 3,
        'ytick.labelsize': 4}
    plt.rcParams.update(params)

    compchart = plt.figure()
    compchart.clf()

    dat['fac_rate_isfinite'] = np.isfinite(dat['Rate'].astype(np.double))
    dat['state_rate_isfinite'] = np.isfinite(dat['All State Organizations Rate'].astype(np.double))
    dat['HRET_rate_isfinite'] = np.isfinite(dat['All Project Organizations Rate'].astype(np.double))

    plt.plot(dat[(dat['HRET_rate_isfinite']) & datecond]['Start Date'].values,
             dat[(dat['HRET_rate_isfinite']) & datecond]['All Project Organizations Rate'],
             marker='s', markersize=1, markerfacecolor='#078F33', markeredgecolor='w',
             color='#078F33',
             linewidth=1,
             label='CDC Strive')
    plt.plot(dat[dat['state_rate_isfinite'] & datecond]['Start Date'].values,
             dat[dat['state_rate_isfinite'] & datecond]['All State Organizations Rate'],
             color='#A5998C',
             linewidth=1,
             label='Kansas Strive')

    datecond = dat['Start Date'] >= date(2016, 10, 1)
    dat = dat[datecond]

    ax = plt.gca()  # gets current axes.
    ax.axes.xaxis.set_major_formatter(mdates.DateFormatter('%b-%Y'))
    ax.xaxis.set_major_locator(mdates.MonthLocator())
    compchart.autofmt_xdate()
    plt.ylim(ymin=0)
    plt.ylabel(axislbl.replace('%', '\\%'))
    yax = ax.get_ylim()
    ymin = yax[0]
    ymax = (yax[1] * 1.05)
    ax.set_ylim(ymin, ymax)
    xmin = date(2016, 10, 1)
    if date(2017, 8, 10) > datetime.now().date():
        xmax = date(2017, 6, 1)
    else:
        xmax = datetime.now().date()
    ax.set_xlim(xmin, xmax)

    legend = plt.legend(loc='lower center', bbox_to_anchor=(0., -0.25, 1., .0122),
                        mode="expand", ncol=4, handlelength=5)
    legend.get_frame().set_edgecolor('white')
    plt.savefig(out_filepath, dpi=300, bbox_extra_artists=[legend], bbox_inches='tight')
    # Clear figure so that the next plot is clean
    plt.clf()
    plt.close(compchart)

    return out_filename, out_filepath
Example #60
0
    def __on_key_press__(self, event):
        # 要取消掉已经注册的事件 不然已经注册的事件还会起作用
        if self.cid is not None:
            self.fig.canvas.mpl_disconnect(self.cid)
        if event.key == 'b':  #重新画
            plt.clf()
            self.points_list_1 = []
            self.points_list_2 = []
            self.__init_subplots__()
        if event.key == 'l':  #在左边draw point
            self.cid = self.fig.canvas.mpl_connect('button_press_event',
                                                   self.__on_mouse_press_1__)
        if event.key == 'r':  #在右边draw point
            self.cid = self.fig.canvas.mpl_connect('button_press_event',
                                                   self.__on_mouse_press_2__)
        if event.key == 'c':
            # 计算并绘制中心
            central_point_1 = self.__cal_centre__(self.points_list_1)
            central_point_2 = self.__cal_centre__(self.points_list_2)
            self.__draw_point__(central_point_1[0],
                                central_point_1[1],
                                color='black',
                                marker='o',
                                axe=self.axe_1)
            self.__draw_point__(central_point_2[0],
                                central_point_2[1],
                                color='black',
                                marker='o',
                                axe=self.axe_2)
            # 构造并绘制向量(X的正半轴)
            vec_1 = np.array(self.points_list_1)[0]
            vec_2 = np.array(self.points_list_2)[0]
            self.__draw_vector__(central_point_1,
                                 vec_1,
                                 self.axe_1,
                                 color='black')
            self.__draw_vector__(central_point_2,
                                 vec_2,
                                 self.axe_2,
                                 color='black')
            central_point_1_n = central_point_1 + (np.array(
                self.points_list_1)[0] - np.array(central_point_1)) * 100
            central_point_2_n = central_point_2 + (np.array(
                self.points_list_2)[0] - np.array(central_point_2)) * 100
            self.__draw_vector__(central_point_1,
                                 central_point_1_n,
                                 self.axe_1,
                                 color='black')
            self.__draw_vector__(central_point_2,
                                 central_point_2_n,
                                 self.axe_2,
                                 color='black')

            # 利用vec_sum 计算旧坐标系 与 新坐标系之间的夹角, 以[1, 0]这个向量代表旧坐标轴
            # 注意!vector_sum要以center坐标作为原点!!!
            # 计算和向量
            # vector_sum_1 = self.__cal_vector_sum__(self.center_p_1, self.points_list_1)
            # vector_sum_2 = self.__cal_vector_sum__(self.center_p_2, self.points_list_2)
            # self.__draw_point__(vector_sum_1[0], vector_sum_1[1], color='green', marker='o', axe=self.axe_1)
            # self.__draw_point__(vector_sum_2[0], vector_sum_2[1], color='green', marker='o', axe=self.axe_2)
            # 绘制向量
            # self.__draw_vector__(self.center_p_1, vector_sum_1, self.axe_1, color='green')
            # self.__draw_vector__(self.center_p_2, vector_sum_2, self.axe_2, color='green')
            # 计算中心
            # 注意!points_list也是一样的, 要以central作为原点!!!
            # 注意central这里代表图形的中心, center代表需要判断的点结构
            vector_sum_1_resize = np.array(vec_1) - np.array(central_point_1)
            vector_sum_2_resize = np.array(vec_2) - np.array(central_point_2)
            points_list_1_resize = np.array(
                self.points_list_1) - np.array(central_point_1)
            points_list_2_resize = np.array(
                self.points_list_2) - np.array(central_point_2)
            changed_points_list_1, direction_1 = self.change_point_coor(
                vector_sum_1_resize, points_list_1_resize)
            changed_points_list_2, direction_2 = self.change_point_coor(
                vector_sum_2_resize, points_list_2_resize)
            center_point_1_resize = np.array(
                self.center_p_1) - np.array(central_point_1)
            center_point_2_resize = np.array(
                self.center_p_2) - np.array(central_point_2)
            changed_center_point_1, _ = self.change_point_coor(
                vector_sum_1_resize, center_point_1_resize)
            changed_center_point_2, _ = self.change_point_coor(
                vector_sum_2_resize, center_point_2_resize)
            # vector_sum_1_n_resize = np.array(vector_sum_1) - np.array(central_point_1)
            # vector_sum_2_n_resize = np.array(vector_sum_2) - np.array(central_point_2)
            # changed_vector_sum_n_resize_1, _ = self.change_point_coor(vector_sum_1_resize, vector_sum_1_n_resize)
            # changed_vector_sum_n_resize_2, _ = self.change_point_coor(vector_sum_2_resize, vector_sum_2_n_resize)
            vector_sum_1_r = self.__clockwise_rotation__(
                np.array(vec_1) - np.array(central_point_1), np.pi / 2)
            vector_sum_2_r = self.__clockwise_rotation__(
                np.array(vec_2) - np.array(central_point_2), np.pi / 2)
            self.__draw_vector__(central_point_1,
                                 100 * vector_sum_1_r +
                                 np.array(central_point_1),
                                 self.axe_1,
                                 color='pink')
            self.__draw_vector__(central_point_2,
                                 100 * vector_sum_2_r +
                                 np.array(central_point_2),
                                 self.axe_2,
                                 color='pink')

            # 归一化changed_points_list_1 / 2
            changed_points_list_1, changed_center_point_1 = \
                self.__become_unit_vecs_for_this__(changed_points_list_1, changed_center_point_1)
            changed_points_list_2, changed_center_point_2 = \
                self.__become_unit_vecs_for_this__(changed_points_list_2, changed_center_point_2)
            # 绘制
            self.__draw_points__(changed_points_list_1, 'r', 'o', self.axe_3)
            self.__draw_points__(changed_points_list_2, 'r', 'o', self.axe_4)
            self.__draw_point__(changed_center_point_1[0],
                                changed_center_point_1[1], 'b', 'o',
                                self.axe_3)
            self.__draw_point__(changed_center_point_2[0],
                                changed_center_point_2[1], 'b', 'o',
                                self.axe_4)
            # self.__draw_point__(changed_vector_sum_n_resize_1[0], changed_vector_sum_n_resize_1[1], 'green', 'o', self.axe_3)
            # self.__draw_point__(changed_vector_sum_n_resize_2[0], changed_vector_sum_n_resize_2[1], 'green', 'o', self.axe_4)

            # 计算新坐标系下, 待判断对应关系点 与 领域点的和向量构成的新坐标系 和向量
            new_neighbor_points_1 = self.__use_vector_sum_update_neighborhood__(
                changed_center_point_1, changed_points_list_1)
            new_neighbor_points_2 = self.__use_vector_sum_update_neighborhood__(
                changed_center_point_2, changed_points_list_2)
            self.__draw_points__(np.array(new_neighbor_points_1), 'y', 'o',
                                 self.axe_3)
            self.__draw_points__(np.array(new_neighbor_points_2), 'y', 'o',
                                 self.axe_4)
            new_neighbor_points_1 = np.array(new_neighbor_points_1)
            new_neighbor_points_2 = np.array(new_neighbor_points_2)

            # # 计算分量间的相似度
            x_sim, y_sim = self.__cal_similarity__(new_neighbor_points_1,
                                                   new_neighbor_points_2)
            # # 输出分量间的相似度
            self.__show_result__(new_neighbor_points_1, '1')
            self.__show_result__(new_neighbor_points_2, '2')
            self.__draw_similarity__(x_sim, self.axe_5,
                                     'x components similarity')
            self.__draw_similarity__(y_sim, self.axe_6,
                                     'y components similarity')
            print("x_similarity:", x_sim)
            print("y_similarity:", y_sim)
        self.fig.canvas.draw_idle()