Example #1
0
def make_lick_individual(targetSN, w1, w2):
    """ Make maps for the kinematics. """
    filename = "lick_corr_sn{0}.tsv".format(targetSN)
    binimg = pf.getdata("voronoi_sn{0}_w{1}_{2}.fits".format(targetSN, w1, w2))
    intens = "collapsed_w{0}_{1}.fits".format(w1, w2)
    extent = calc_extent(intens)
    bins = np.loadtxt(filename, usecols=(0,), dtype=str).tolist()
    bins = np.array([x.split("bin")[1] for x in bins]).astype(int)
    data = np.loadtxt(filename, usecols=np.arange(25)+1).T
    labels = [r'Hd$_A$', r'Hd$_F$', r'CN$_1$', r'CN$_2$', r'Ca4227', r'G4300',
             r'Hg$_A$', r'Hg$_F$', r'Fe4383', r'Ca4455', r'Fe4531', r'C4668',
             r'H$_\beta$', r'Fe5015', r'Mg$_1$', r'Mg$_2$', r'Mg$_b$', r'Fe5270',
             r'Fe5335', r'Fe5406', r'Fe5709', r'Fe5782', r'Na$_D$', r'TiO$_1$',
             r'TiO$_2$']
    mag = "[mag]"
    ang = "[\AA]"
    units = [ang, ang, mag, mag, ang, ang,
             ang, ang, ang, ang, ang, ang,
             ang, ang, mag, mag, ang, ang,
             ang, ang, ang, ang, ang, mag,
             mag]
    lims = [[None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None],
            [None, None], [None, None], [None, None], [None, None]]
    pdf = PdfPages("figs/lick_sn{0}.pdf".format(targetSN))
    fig = plt.figure(1, figsize=(6.25,5))
    plt.subplots_adjust(bottom=0.12, right=0.97, left=0.09, top=0.96)
    plt.minorticks_on()
    ax = plt.subplot(111)
    ax.minorticks_on()
    plot_indices = np.arange(12,22)
    for i, vector in enumerate(data):
        if i not in plot_indices:
            continue
        print "Making plot for {0}...".format(labels[i])
        kmap = np.zeros_like(binimg)
        kmap[:] = np.nan
        for bin,v in zip(bins, vector):
            idx = np.where(binimg == bin)
            kmap[idx] = v
        vmin = lims[i][0] if lims[i][0] else np.median(vector) - 2 * vector.std()
        vmax = lims[i][1] if lims[i][1] else np.median(vector) + 2 * vector.std()
        m = plt.imshow(kmap, cmap="inferno", origin="bottom", vmin=vmin,
                   vmax=vmax, extent=extent, aspect="equal")
        make_contours()
        plt.minorticks_on()
        plt.xlabel("X [kpc]")
        plt.ylabel("Y [kpc]")
        plt.xlim(extent[0], extent[1])
        plt.ylim(extent[2], extent[3])
        cbar = plt.colorbar(m)
        cbar.set_label("{0} {1}".format(labels[i], units[i]))
        pdf.savefig()
        plt.clf()
    pdf.close()
    return
Example #2
0
def show_plot(X, y, n_neighbors=10, h=0.2):
    # Create color maps
    cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#AAAAFF'])
    cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000',])

    for weights in ['uniform', 'distance']:
        # we create an instance of Neighbours Classifier and fit the data.
        clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
        clf.fit(X, y)
        clf.n_neighbors = n_neighbors

        # Plot the decision boundary. For that, we will assign a color to each
        # point in the mesh [x_min, x_max]x[y_min, y_max].
        x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                             np.arange(y_min, y_max, h))
        Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

        # Put the result into a color plot
        Z = Z.reshape(xx.shape)
        plt.figure()
        plt.pcolormesh(xx, yy, Z, cmap=cmap_light)

        # Plot also the training points
        plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
        plt.xlim(xx.min(), xx.max())
        plt.ylim(yy.min(), yy.max())
        plt.title("3-Class classification (k = %i, weights = '%s')"
                  % (n_neighbors, weights))

    plt.show()
Example #3
0
    def statistics_charts(self):
        if plt is None:
            return

        for chart in self.stats_charts:
            if chart["type"] == "plot":
                fig = plt.figure(figsize=(8, 2))
                for xdata, ydata, label in chart["data"]:
                    plt.plot(xdata, ydata, "-", label=label)
                plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
            elif chart["type"] == "timeline":
                fig = plt.figure(figsize=(16, 2))
                for i, (starts, stops, label) in enumerate(chart["data"]):
                    plt.hlines([i] * len(starts), starts, stops, label=label)
                plt.ylim(-1, len(chart["data"]))
            elif chart["type"] == "bars":
                fig = plt.figure(figsize=(16, 4))
                plt.bar(range(len(chart["data"])), chart["data"])
            elif chart["type"] == "boxplot":
                fig = plt.figure(figsize=(16, 4))
                plt.boxplot(chart["data"])
            else:
                raise Exception("Unknown chart")
            png = serialize_fig(fig)
            yield chart["name"], html_embed_img(png)
Example #4
0
def make_intens_all(w1, w2):
    fig = plt.figure(figsize=(6., 6.))
    gs = gridspec.GridSpec(1,1)
    gs.update(left=0.13, right=0.985, bottom = 0.13, top=0.988)
    ax = plt.subplot(gs[0])
    plt.minorticks_on()
    make_contours()
    labels = ["A", "B", "C", "D"]
    for i, field in enumerate(fields):
        os.chdir(os.path.join(data_dir, "combined_{0}".format(field)))
        image = "collapsed_w{0}_{1}.fits".format(w1, w2)
        intens = pf.getdata(image, verify=False)
        extent = calc_extent(image)
        extent = offset_extent(extent, field)
        plt.imshow(intens, cmap="bone", origin="bottom", extent=extent,
                   vmin=-20, vmax=80)
        verts = calc_verts(intens, extent)
        path = Path(verts, [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
                    Path.CLOSEPOLY,])
        patch = patches.PathPatch(path, facecolor='none', lw=2, edgecolor="r")
        ax.add_patch(patch)
        xtext, ytext = np.mean(verts[:-1], axis=0)
        plt.text(xtext-8, ytext+8, labels[i], color="r",
                fontsize=35, fontweight='bold', va='top')
        plt.hold(True)
    plt.xlim(26, -38)
    plt.ylim(-32, 32)
    plt.xlabel("X [kpc]")
    plt.ylabel("Y [kpc]")
    # plt.show()
    plt.savefig(os.path.join(plots_dir, "muse_fields.eps"), dpi=60,
                format="eps")
    plt.savefig(os.path.join(plots_dir, "muse_fields.png"), dpi=200)
    return
Example #5
0
def scree_plot(pca_obj, fname=None): 
    '''
    Scree plot for variance & cumulative variance by component from PCA. 

    Arguments: 
        - pca_obj: a fitted sklearn PCA instance
        - fname: path to write plot to file

    Output: 
        - scree plot 
    '''   
    components = pca_obj.n_components_ 
    variance = pca.explained_variance_ratio_
    plt.figure()
    plt.plot(np.arange(1, components + 1), np.cumsum(variance), label='Cumulative Variance')
    plt.plot(np.arange(1, components + 1), variance, label='Variance')
    plt.xlim([0.8, components]); plt.ylim([0.0, 1.01])
    plt.xlabel('No. Components', labelpad=11); plt.ylabel('Variance Explained', labelpad=11)
    plt.legend(loc='best') 
    plt.tight_layout() 
    if fname is not None:
        plt.savefig(fname)
        plt.close() 
    else:
        plt.show() 
    return 
Example #6
0
File: LVQ.py Project: jayshonzs/ESL
def draw(data, classes, model, resolution=100):
    mycm = mpl.cm.get_cmap('Paired')
    
    one_min, one_max = data[:, 0].min()-0.1, data[:, 0].max()+0.1
    two_min, two_max = data[:, 1].min()-0.1, data[:, 1].max()+0.1
    xx1, xx2 = np.meshgrid(np.arange(one_min, one_max, (one_max-one_min)/resolution),
                     np.arange(two_min, two_max, (two_max-two_min)/resolution))
    
    inputs = np.c_[xx1.ravel(), xx2.ravel()]
    z = []
    for i in range(len(inputs)):
        z.append(predict(model, inputs[i])[0])
    result = np.array(z).reshape(xx1.shape)
    
    plt.contourf(xx1, xx2, result, cmap=mycm)
    plt.scatter(data[:, 0], data[:, 1], s=50, c=classes, cmap=mycm)
    
    t = np.zeros(15)
    for i in range(15):
        if i < 5:
            t[i] = 0
        elif i < 10:
            t[i] = 1
        else:
            t[i] = 2
    plt.scatter(model[:, 0], model[:, 1], s=150, c=t, cmap=mycm)
    
    plt.xlim([0, 10])
    plt.ylim([0, 10])
    
    plt.show()
def plotErrorBars(dict_to_plot, x_lim, y_lim, xlabel, y_label, title, out_file, margin=[0.05, 0.05], loc=2):

    plt.title(title)
    plt.xlabel(xlabel)
    plt.ylabel(y_label)

    if y_lim is None:
        y_lim = [1 * float("Inf"), -1 * float("Inf")]

    max_val_seen_y = y_lim[1] - margin[1]
    min_val_seen_y = y_lim[0] + margin[1]
    print min_val_seen_y, max_val_seen_y
    max_val_seen_x = x_lim[1] - margin[0]
    min_val_seen_x = x_lim[0] + margin[0]
    handles = []
    for k in dict_to_plot:
        means, stds, x_vals = dict_to_plot[k]

        min_val_seen_y = min(min(np.array(means) - np.array(stds)), min_val_seen_y)
        max_val_seen_y = max(max(np.array(means) + np.array(stds)), max_val_seen_y)

        min_val_seen_x = min(min(x_vals), min_val_seen_x)
        max_val_seen_x = max(max(x_vals), max_val_seen_x)

        handle = plt.errorbar(x_vals, means, yerr=stds)
        handles.append(handle)
        print max_val_seen_y
    plt.xlim([min_val_seen_x - margin[0], max_val_seen_x + margin[0]])
    plt.ylim([min_val_seen_y - margin[1], max_val_seen_y + margin[1]])
    plt.legend(handles, dict_to_plot.keys(), loc=loc)
    plt.savefig(out_file)
Example #8
0
def scatter(x, y, equal=False, xlabel=None, ylabel=None, xinvert=False, yinvert=False):
    """
    Plot a scatter with simple formatting options
    """
    plt.scatter(x, y, 200, color=[0.3, 0.3, 0.3], edgecolors="white", linewidth=1, zorder=2)
    sns.despine()
    if xlabel:
        plt.xlabel(xlabel)
    if ylabel:
        plt.ylabel(ylabel)
    if equal:
        plt.axes().set_aspect("equal")
        plt.plot([0, max([x.max(), y.max()])], [0, max([x.max(), y.max()])], color=[0.6, 0.6, 0.6], zorder=1)
        bmin = min([x.min(), y.min()])
        bmax = max([x.max(), y.max()])
        rng = abs(bmax - bmin)
        plt.xlim([bmin - rng * 0.05, bmax + rng * 0.05])
        plt.ylim([bmin - rng * 0.05, bmax + rng * 0.05])
    else:
        xrng = abs(x.max() - x.min())
        yrng = abs(y.max() - y.min())
        plt.xlim([x.min() - xrng * 0.05, x.max() + xrng * 0.05])
        plt.ylim([y.min() - yrng * 0.05, y.max() + yrng * 0.05])
    if xinvert:
        plt.gca().invert_xaxis()
    if yinvert:
        plt.gca().invert_yaxis()
Example #9
0
def draw_stat(actual_price, action):
	price_list = []
	x_list = []
	# idx = np.where(actual_price == 0)[0]
	# print idx
	# print actual_price[np.where(actual_price < 2000)]
	# idx = [0] + idx.tolist()
	# print idx
	# for i in range(len(idx)-1):
	# 	price_list.append(actual_price[idx[i]+1:idx[i+1]-1])
	# 	x_list.append(range(idx[i]+i+1, idx[i+1]+i-1))
	# for i in range(len(idx)-1):
	# 	print x_list[i]
	# 	print price_list[i]
	# 	plt.plot(x_list[i], price_list[i], 'r')
	x_list = range(1,50)
	price_list = actual_price[1:50]
	plt.plot(x_list, price_list, 'k')
	for i in range(1, 50):
		style = 'go'
		if action[i] == 1:
			style = 'ro'
		plt.plot(i, actual_price[i], style)
	plt.ylim(2140, 2144.2)
	# plt.show()
	plt.savefig("action.png")
Example #10
0
def roc_plot(y_true, y_pred):
    """Plots a receiver operating characteristic.

    Parameters
    ----------
    y_true : array_like
        Observed labels, either 0 or 1.
    y_pred : array_like
        Predicted probabilities, floats on [0, 1].

    Notes
    -----
    .. plot:: pyplots/roc_plot.py

    References
    ----------
    .. [1] Pedregosa, F. et al. "Scikit-learn: Machine Learning in Python."
       *Journal of Machine Learning Research* 12 (2011): 2825–2830.
    .. [2] scikit-learn developers. "Receiver operating characteristic (ROC)."
       Last modified August 2013.
       http://scikit-learn.org/stable/auto_examples/plot_roc.html.
    """
    fpr, tpr, __ = roc_curve(y_true, y_pred)
    roc_auc = auc(fpr, tpr)

    plt.plot(fpr, tpr, label='ROC curve (area = {:0.2f})'.format(roc_auc))
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlim([0, 1])
    plt.ylim([0, 1])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic')
    plt.legend(loc='lower right')
Example #11
0
def tuning(x, y, err=None, smooth=None, ylabel=None, pal=None):
    """
    Plot a tuning curve
    """
    if smooth is not None:
        xs, ys = smoothfit(x, y, smooth)
        plt.plot(xs, ys, linewidth=4, color="black", zorder=1)
    else:
        ys = asarray([0])
    if pal is None:
        pal = sns.color_palette("husl", n_colors=len(x) + 6)
        pal = pal[2 : 2 + len(x)][::-1]
    plt.scatter(x, y, s=300, linewidth=0, color=pal, zorder=2)
    if err is not None:
        plt.errorbar(x, y, yerr=err, linestyle="None", ecolor="black", zorder=1)
    plt.xlabel("Wall distance (mm)")
    plt.ylabel(ylabel)
    plt.xlim([-2.5, 32.5])
    errTmp = err
    errTmp[isnan(err)] = 0
    rng = max([nanmax(ys), nanmax(y + errTmp)])
    plt.ylim([0 - rng * 0.1, rng + rng * 0.1])
    plt.yticks(linspace(0, rng, 3))
    plt.xticks(range(0, 40, 10))
    sns.despine()
    return rng
Example #12
0
def make_overview_plot(filename, title, noip_arrs, ip_arrs):
    plt.title("Inner parallelism - " + title)

    
    plt.ylabel('Time (ms)', fontsize=12)

    x = 0
    barwidth = 0.5
    bargroupspacing = 1.5

    for z in zip(noip_arrs, ip_arrs):
        noip,ip = z
        noip_mean,noip_conf = conf_stats(noip)
        ip_mean,ip_conf = conf_stats(ip)

        b_noip = plt.bar(x, noip_mean, barwidth, color='r', yerr=noip_conf, ecolor='black', alpha=0.7)
        x += barwidth

        b_ip = plt.bar(x, ip_mean, barwidth, color='b', yerr=ip_conf, ecolor='black', alpha=0.7)
        x += bargroupspacing

    plt.xticks([0.5, 2.5, 4.5], ['50k', '100k', '200k'], rotation='horizontal')

    fontP = FontProperties()
    fontP.set_size('small')

    plt.legend([b_noip, b_ip], \
        ('no inner parallelism', 'inner parallelism'), \
        prop=fontP, loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True, shadow=True, ncol=2)
   
    plt.ylim([0,62000])
    plt.savefig(output_file(filename))
    plt.clf()
def entries_histogram(turnstile_weather):
    '''
    Before we perform any analysis, it might be useful to take a
    look at the data we're hoping to analyze. More specifically, lets 
    examine the hourly entries in our NYC subway data and determine what
    distribution the data follows. This data is stored in a dataframe
    called turnstile_weather under the ['ENTRIESn_hourly'] column.
    
    Why don't you plot two histograms on the same axes, showing hourly
    entries when raining vs. when not raining. Here's an example on how
    to plot histograms with pandas and matplotlib:
    turnstile_weather['column_to_graph'].hist()
    
    Your histograph may look similar to the following graph:
    http://i.imgur.com/9TrkKal.png
    
    You can read a bit about using matplotlib and pandas to plot
    histograms:
    http://pandas.pydata.org/pandas-docs/stable/visualization.html#histograms
    
    You can look at the information contained within the turnstile weather data at the link below:
    https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
    '''
    plt.figure()
    (turnstile_weather[turnstile_weather.rain==0].ENTRIESn_hourly).hist(bins=175) # your code here to plot a historgram for hourly entries when it is not raining
    (turnstile_weather[turnstile_weather.rain==1].ENTRIESn_hourly).hist(bins=175) # your code here to plot a historgram for hourly entries when it is raining
    plt.ylim(ymax = 45000, ymin = 0)
    plt.xlim(xmax = 6000, xmin = 0)
    return plt
Example #14
0
def disc_norm():
    x = np.linspace(-3,3,100)
    y = st.norm.pdf(x,0,1)
    fig, ax = plt.subplots()
    fig.canvas.draw()
    
    ax.plot(x,y)
    
    fill1_x = np.linspace(-2,-1.5,100)
    fill1_y = st.norm.pdf(fill1_x,0,1)
    fill2_x = np.linspace(-1.5,-1,100)
    fill2_y = st.norm.pdf(fill2_x,0,1)
    ax.fill_between(fill1_x,0,fill1_y,facecolor = 'blue', edgecolor = 'k',alpha = 0.75)
    ax.fill_between(fill2_x,0,fill2_y,facecolor = 'blue', edgecolor = 'k',alpha = 0.75)
    for label in ax.get_yticklabels():
        label.set_visible(False)
    for tick in ax.get_xticklines():
        tick.set_visible(False)
    for tick in ax.get_yticklines():
        tick.set_visible(False)
    
    plt.rc("font", size = 16)
    plt.xticks([-2,-1.5,-1])
    labels = [item.get_text() for item in ax.get_xticklabels()]
    labels[0] = r"$v_k$"
    labels[1] = r"$\varepsilon_k$"
    labels[2] = r"$v_{k+1}$"
    ax.set_xticklabels(labels)
    plt.ylim([0, .45])

    
    plt.savefig('discnorm.pdf')
    plt.clf()
Example #15
0
def make_fish(zoom=False):
    plt.close(1)
    plt.figure(1, figsize=(6, 4))
    plt.plot(plot_limits['pitch'], plot_limits['rolldev'], '-g', lw=3)
    plt.plot(plot_limits['pitch'], -plot_limits['rolldev'], '-g', lw=3)
    plt.plot(pitch.midvals, roll.midvals, '.b', ms=1, alpha=0.7)

    p, r = make_ellipse()  # pitch, off nominal roll
    plt.plot(p, r, '-c', lw=2)

    gf = -0.08  # Fudge on pitch value for illustrative purposes
    plt.plot(greta['pitch'] + gf, -greta['roll'], '.r', ms=1, alpha=0.7)
    plt.plot(greta['pitch'][-1] + gf, -greta['roll'][-1], 'xr', ms=10, mew=2)

    if zoom:
        plt.xlim(46.3, 56.1)
        plt.ylim(4.1, 7.3)
    else:
        plt.ylim(-22, 22)
        plt.xlim(40, 180)
    plt.xlabel('Sun pitch angle (deg)')
    plt.ylabel('Sun off-nominal roll angle (deg)')
    plt.title('Mission off-nominal roll vs. pitch (5 minute samples)')
    plt.grid()
    plt.tight_layout()
    plt.savefig('fish{}.png'.format('_zoom' if zoom else ''))
Example #16
0
def plot_gen(ping, now, t, nans, host, interactive=False, size="1280x640"):
    ''' Generates ping vs time plot '''
    if not interactive:
        import matplotlib
        matplotlib.use("Agg") # no need to load gui toolkit, can run headless
    import matplotlib.pyplot as plt
    
    size      = [int(dim) for dim in size.split('x')]
    datestr   = now[0].ctime().split()
    datestr   = datestr[0] + " " + datestr[1] + " " + datestr[2] + " " + datestr[-1]
    plt.figure(figsize=(size[0]/80.,size[1]/80.)) # dpi is 80
    plt.plot(now[~nans], ping[~nans], drawstyle='steps', marker='+')
    plt.title("Ping Results for {0}".format(host))
    plt.ylabel("Latency [ms]")
    plt.xlabel("Time, {0} [GMT -{1} hrs]".format(datestr, time.timezone/3600))
    plt.xticks(size=10)
    plt.yticks(size=10)
    plt.ylim(ping[~nans].min()-5, ping[~nans].max()+5)
    
    # plot packet losses
    start  = []
    finish = []
    for i in range(len(nans)):
        if nans[i] == True:
            if i == 0:
                start.append(i)
            elif nans[i] != nans[i-1]:
                start.append(i)
            #if i != len(nans) and nans[i+1] != nans[i]:
            #    finish.append(i)
                
    # add the red bars for bad pings
    for i in range(len(start)):
        plt.axvspan(now[start[i]], now[finish[i]+1], color='red')
    return plt
Example #17
0
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
    # setup marker generator and color map
    markers = ('s', 'x', 'o', '^', 'v')
    colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
    cmap = ListedColormap(colors[:len(np.unique(y))])

    # plot the decision surface
    x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
                           np.arange(x2_min, x2_max, resolution))
    Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
    Z = Z.reshape(xx1.shape)
    plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
    plt.xlim(xx1.min(), xx1.max())
    plt.ylim(xx2.min(), xx2.max())

    # plot class samples
    for idx, cl in enumerate(np.unique(y)):
        plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
                    alpha=0.8, c=cmap(idx),
                    marker=markers[idx], label=cl)

    # Highlight test samples
    if test_idx:
        X_test, y_test = X[test_idx, :], y[test_idx]
        plt.scatter(X_test[:, 0],
                    X_test[:, 1],
                    c='',
                    alpha=1.0,
                    linewidths=1,
                    marker='o',
                    s=55, label='test set')
	def plotFFT(self):
	# Generates plot of the FFT output. To view, run plotFFT.py in a separate terminal
		figure1 = plt.figure(num= None, figsize=(12,12), dpi=80, facecolor='w', edgecolor='w')
		plot1 = figure1.add_subplot(111)
		line1, = plot1.plot( np.arange(0,512,0.5), np.zeros(1024), 'g-')
		plt.xlabel('freq (MHz)',fontsize = 12)
		plt.ylabel('Amplitude',fontsize = 12)
		plt.title('Pre-mixer FFT',fontsize = 12)
		plt.xticks(np.arange(0,512,50))
		plt.xlim((0,512))
		plt.grid()
		plt.show(block = False)
		count = 0 
		stop = 1.0e6
		while(count < stop):
			overflow = np.fromstring(self.fpga.read('overflow', 4), dtype = '>B')
			print overflow
			self.fpga.write_int('fft_snap_ctrl',0)
			self.fpga.write_int('fft_snap_ctrl',1)
			fft_snap = (np.fromstring(self.fpga.read('fft_snap_bram',(2**9)*8),dtype='>i2')).astype('float')
			I0 = fft_snap[0::4]
			Q0 = fft_snap[1::4]
			I1 = fft_snap[2::4]
			Q1 = fft_snap[3::4]
			mag0 = np.sqrt(I0**2 + Q0**2)
			mag1 = np.sqrt(I1**2 + Q1**2)
			fft_mags = np.hstack(zip(mag0,mag1))
			plt.ylim((0,np.max(fft_mags) + 300.))
			line1.set_ydata((fft_mags))
			plt.draw()
			count += 1
Example #19
0
def plot_fidelity_lorentzian(constants):
	"""
		Plots the Fidelity vs FSS curve with and without decoherence.
	"""

	qd = QuantumDot(constants.xtau, constants.xxtau, constants.ptau, constants.FSS, constants.crosstau)

	fss = np.linspace(-10., 10., 500)*1e-6

	qd.crosstau = 0.
	no_decoherence = np.array([qd.ideal_fidelity_lorentzian(f)[0] for f in fss])

	qd.crosstau = 1.
	with_decoherence = np.array([qd.ideal_fidelity_lorentzian(f)[0] for f in fss])

	fss = fss/1e-6
	decoherence = qd.ideal_fidelity_lorentzian(1e-6)[1]

	plt.figure(figsize = (16./1.3, 9./1.3))
	plt.plot(fss, no_decoherence, 'r--', fss, with_decoherence, 'b--')

	plt.xlim([-10, 10]) ; plt.ylim([0.45, 1])
	plt.xlabel('Fine structure splitting $eV$') ; plt.ylabel('Fidelity')
	plt.xticks(np.linspace(-10, 10, 11))
	plt.legend(['No decoherence', 'With $1^{st}$ coherence: ' + np.array(decoherence).astype('|S3').tostring()])
	plt.show()
Example #20
0
def plot_obs_expc_new(obs, expc, expc_upper, expc_lower, analysis, log, ax = None):
    """Modified version of obs-expc plot suggested by R2. The points are separated by whether their CIs are above, below, 
    
    or overlapping the empirical value
    Input: 
    obs - list of observed values
    expc_mean - list of mean simulated values for the corresponding observed values
    expc_upper - list of the 97.5% quantile of the simulated vlaues
    expc_lower - list of the 2.5% quantile of the simulated values
    analysis - whether it is patitions or compositions
    log - whether the y axis is to be transformed. If True, expc/obs is plotted. If Flase, expc - obs is plotted.
    ax - whether the plot is generated on a given figure, or a new plot object is to be created
    
    """
    obs, expc, expc_upper, expc_lower = list(obs), list(expc), list(expc_upper), list(expc_lower)
    if not ax:
        fig = plt.figure(figsize = (3.5, 3.5))
        ax = plt.subplot(111)
    
    ind_above = [i for i in range(len(obs)) if expc_lower[i] > obs[i]]
    ind_below = [i for i in range(len(obs)) if expc_upper[i] < obs[i]]
    ind_overlap = [i for i in range(len(obs)) if expc_lower[i] <= obs[i] <= expc_upper[i]]
    
    if log:
        expc_standardize = [expc[i] / obs[i] for i in range(len(obs))]
        expc_upper_standardize = [expc_upper[i] / obs[i] for i in range(len(obs))]
        expc_lower_standardize = [expc_lower[i] / obs[i] for i in range(len(obs))]
        axis_min = 0.9 * min([expc_lower_standardize[i] for i in range(len(expc_lower_standardize)) if expc_lower_standardize[i] != 0])
        axis_max = 1.5 * max(expc_upper_standardize)
    else:
        expc_standardize = [expc[i] - obs[i] for i in range(len(obs))]
        expc_upper_standardize = [expc_upper[i] - obs[i] for i in range(len(obs))]
        expc_lower_standardize = [expc_lower[i] - obs[i] for i in range(len(obs))]
        axis_min = 1.1 * min(expc_lower_standardize)
        axis_max = 1.1 * max(expc_upper_standardize)
   
    if analysis == 'partition': col = '#228B22'
    else: col = '#CD69C9'
    ind_full = [] 
    for index in [ind_below, ind_overlap, ind_above]:
        expc_standardize_ind = [expc_standardize[i] for i in index]
        sort_ind_ind = sorted(range(len(expc_standardize_ind)), key = lambda i: expc_standardize_ind[i])
        sorted_index = [index[i] for i in sort_ind_ind]
        ind_full.extend(sorted_index)

    xaxis_max = len(ind_full)
    for i, ind in enumerate(ind_full):
        plt.plot([i, i],[expc_lower_standardize[ind], expc_upper_standardize[ind]], '-', c = col, linewidth = 0.4)
    plt.scatter(range(len(ind_full)), [expc_standardize[i] for i in ind_full], c = col,  edgecolors='none', s = 8)    
    if log: 
        plt.plot([0, xaxis_max + 1], [1, 1], 'k-', linewidth = 1.5)
        ax.set_yscale('log')
    else: plt.plot([0, xaxis_max + 1], [0, 0], 'k-', linewidth = 1.5)
    plt.plot([len(ind_below) - 0.5, len(ind_below) - 0.5], [axis_min, axis_max], 'k--')
    plt.plot([len(ind_below) + len(ind_overlap) - 0.5, len(ind_below) + len(ind_overlap) - 0.5], [axis_min, axis_max], 'k--')
    plt.xlim(0, xaxis_max)
    plt.ylim(axis_min, axis_max)
    plt.tick_params(axis = 'y', which = 'major', labelsize = 8, labelleft = 'on')
    plt.tick_params(axis = 'x', which = 'major', top = 'off', bottom = 'off', labelbottom = 'off')
    return ax
def plot_stack_candidates(tweets, cands, interval, start = 0, \
  end = MAX_TIME // 60, tic_inc = 120, save_to = None): 
  '''
  Plots stackplot for the candidates in list cands over the time interval
  ''' 

  period = range(start, end, interval)
  percent_dict = tweets.mention_minute_percent(cands, interval, period)

  y = [] 
  fig = plt.figure(figsize = (FIGWIDTH, FIGHEIGHT))
  legends = [] 
  for candidate in percent_dict:
    y.append(percent_dict[candidate]) 
    legends.append(CANDIDATE_NAMES[candidate])
  plt.stackplot(period, y)

  plt.title("Percentage of Mentions per {} minutes before, during, \
    and after debate".format(interval))
  plt.xlabel("Time")
  plt.ylabel("Number of Tweets")
  plt.legend(y, legends)

  ticks_range = range(start, end, tic_inc)
  labels = list(map(lambda x: str(x - start) + " min", ticks_range))
  plt.xticks(ticks_range, labels, rotation = 'vertical')
  plt.xlim( (start, end) )
  plt.ylim( (0.0, 1.0))
  
  if save_to: 
    fig.savefig(save_to)
  plt.show()
Example #22
0
def plt_data():
    t = [[0,1], [1,0], [1, 1], [0, 0]]
    t2 = [1, 1, 1, 0]
    X = np.array(t)
    Y = np.array(t2)

    h = .02  # step size in the mesh

    logreg = linear_model.LogisticRegression(C=1e5)

    # we create an instance of Neighbours Classifier and fit the data.
    logreg.fit(X, Y)
    # Plot the decision boundary. For that, we will assign a color to each
    # point in the mesh [x_min, m_max]x[y_min, y_max].
    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
    Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])

    # Put the result into a color plot
    Z = Z.reshape(xx.shape)
    plt.figure(1, figsize=(4, 3))
    plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)

    # Plot also the training points
    plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
    plt.xlabel('Sepal length')
    plt.ylabel('Sepal width')

    plt.xlim(xx.min(), xx.max())
    plt.ylim(yy.min(), yy.max())
    plt.xticks(())
    plt.yticks(())

    plt.show()
def main( args ):

  hash = get_genes_with_features(args['file'])
  for key, featurearray in hash.iteritems():
    cluster, branch = key.split()
    length = int(featurearray[0][0])
    import matplotlib.pyplot as P
    x = [e+1 for e in range(length+1)]
    y1 = [0] * (length+1)
    y2 = [0] * (length+1)
    for feature in featurearray:
      length, pos, aa, prob = feature[0:4]
      if prob > 0.95: y1[pos] = prob
      else: y2[pos] = prob
    
    P.bar(x, y1, color='#000000', edgecolor='#000000')
    P.bar(x, y2, color='#bbbbbb', edgecolor='#bbbbbb')
    P.ylim(ymin=0, ymax=1)
    P.xlim(xmin=0, xmax=length)
    P.xlabel("position in the ungapped alignment [aa]")
    P.ylabel(r'$P (\omega > 1)$')
    P.title(cluster + " (branch " + branch + ")")

    P.axhline(y=.95, xmin=0, xmax=length, linestyle=":", color="k")
    P.savefig(cluster + "." + branch + ".png", format="png")
    P.close()
Example #24
0
def plot_1D_pmf(coord,title):
    ''' Plot a 1D pmf for a coordinate.'''

    x = get_data(coord)

    path = os.getcwd()
    savedir = path+"/pmfs"
    if os.path.exists(savedir) == False:
        os.mkdir(savedir)

    if coord in ["Rg","rmsd"]:
        skip = 80
    else:
        skip = 4 
    vals = np.unique(list(x))[::skip]

    n,bins = np.histogram(x,bins=vals,density=True)
    np.savetxt(savedir+"/"+coord+"_n.dat",n,delimiter=" ",fmt="%.4f")
    np.savetxt(savedir+"/"+coord+"_bins.dat",bins,delimiter=" ",fmt="%.4f")

    pmf = -np.log(n)
    pmf -= min(pmf)

    plt.figure()
    plt.plot(bins[1:]/max(bins),pmf)
    plt.xlabel(coord,fontsize="xx-large")
    plt.ylabel("F("+coord+") / kT",fontsize="xx-large")
    plt.title("F("+coord+") "+title,fontsize="xx-large")
    plt.ylim(0,6)
    plt.xlim(0,1)
    plt.savefig(savedir+"/"+coord+"_pmf.pdf")
Example #25
0
def test_draw_residual_blh_norm():
    np.random.seed(0)
    data = np.random.randn(1000)
    blh = BinnedLH(gaussian, data)
    blh.draw_residual(args=(0., 1.), norm=True)
    plt.ylim(-4., 3.)
    plt.xlim(-4., 3.)
def example(show=True, save=False):

    # Settings:
    t0 = 0.
    dt = .0001
    dv = .0001
    tf = .1
    verbose = True
    update_method = 'approx'
    approx_order = 1
    tol = 1e-14
    
    # Run simulation:
    simulation = get_simulation(dv=dv, verbose=verbose, update_method=update_method, approx_order=approx_order, tol=tol)
    simulation.run(dt=dt, tf=tf, t0=t0)
    
    # Visualize:
    i1 = simulation.population_list[1]
    plt.figure(figsize=(3,3))
    plt.plot(i1.t_record, i1.firing_rate_record)
    plt.xlim([0,tf])
    plt.ylim(ymin=0)
    plt.xlabel('Time (s)')
    plt.ylabel('Firing Rate (Hz)')
    plt.tight_layout()
    if save == True: plt.savefig('./excitatory_inhibitory.png')
    if show == True: plt.show()
    
    return i1.t_record, i1.firing_rate_record
Example #27
0
def visualizeEigenvalues(eVal, verboseLevel):
	real = []
	imag = []

	for z in eVal:
		rp = z.real
		im = z.imag
		if not (rp == np.inf or rp == - np.inf) \
				and not (im == np.inf or im == - np.inf):
			real.append(rp)
			imag.append(im)

	if verboseLevel>=1:
		print("length of regular real values=" + str(len(real)))
		print("length of regular imag values=" + str(len(imag)))
		print("minimal real part=" + str(min(real)), "& maximal real part=" + str(max(real)))
		print("minimal imag part=" + str(min(imag)), "& maximal imag part=" + str(max(imag)))
	if verboseLevel==2:
		print("all real values:", str(real))
		print("all imag values:", str(imag))


	# plt.scatter(real[4:],img[4:])
	plt.scatter(real, imag)
	plt.grid(True)
	plt.xlabel("realpart")
	plt.ylabel("imagpart")
	plt.xlim(-10, 10)
	plt.ylim(-10, 10)
	plt.show()
    def exec_transmissions():
        IP,IP_AP,files=parser_reduce()
        plt.figure("GRAPHE_D'EVOLUTION_DES_TRANSMISSIONS")
        ENS_TEMPS_, TRANSMISSION_ = transmissions(files)
        plt.plot(ENS_TEMPS_, TRANSMISSION_,"r.", label="Transmissions: ")

        lot = map(inet_aton, IP)
        lot.sort()
        iplist1 = map(inet_ntoa, lot)

        for i in iplist1: #ici j'affiche les annotations et vérifie si j'ai des @ip de longueur 9 ou 8 pour connaitre la taille de la fenetre du graphe
                if len(i)==9:
                    maxim_=i[-2:] #Sera utilisé pour la taille de la fenetre du graphe
                    plt.annotate('   Machine: '+ i ,horizontalalignment='left', xy=(1, float(i[-2:])), xytext=(1, float(i[-2:])-0.4),arrowprops=dict(facecolor='black', shrink=0.05),)
                else:
                    maxim_=i[-1:] #Sera utilisé pour la taille de la fenetre du graphe
                    plt.annotate('   Machine: '+ i ,horizontalalignment='left', xy=(1, float(i[7])), xytext=(1, float(i[7])-0.4),arrowprops=dict(facecolor='black', shrink=0.05),)
        for i in IP_AP: #ACCESS POINT ( cas spécial )
            if i[-2:]:
                plt.annotate('   access point: '+ i , xy=(1, i[7]), xytext=(1, float(i[7])-0.4),arrowprops=dict(facecolor='black', shrink=0.05),)

        plt.ylim(0, (float(maxim_))+1) #C'est à ça que sert le tri
        plt.xlim(1, 1.1)
        plt.legend(loc='best',prop={'size':10})
        plt.xlabel('Temps (s)')
        plt.ylabel('IP machines transmettrices')
        plt.grid(True)
        plt.title("GRAPHE_D'EVOLUTION_DES_TRANSMISSIONS")
        plt.legend(loc='best')
        plt.show()
Example #29
0
def draw(ord_l, gaps):

    axScatter = plt.subplot(3, 1, 1)

    number_samples=0
    # axScatter.scatter([i['seq'] for i in ord_l[-number_samples:]], [i['a'] for i in ord_l[-number_samples:]], s=2, color='r', label='ch1')
    axScatter.scatter([i['seq'] % 24 for i in ord_l[-number_samples:]], [i['d'] for i in ord_l[-number_samples:]], s=2, color='r', label='ch1')
    # axScatter.scatter(time_l[-number_samples:], b_l[-number_samples:], s=2, color='c', label='ch2')
    # axScatter.scatter(time_l[-number_samples:], c_l[-number_samples:], s=2, color='y', label='ch3')
    # axScatter.scatter(time_l[-number_samples:], d_l[-number_samples:], s=2, color='g', label='ch4')
    plt.ylim(-9000000, 9000000)
    plt.legend()
    axScatter.set_xlabel("Sequence Packet")
    axScatter.set_ylabel("Voltage")
    plt.title("Channels Values")


    # time_plot = plt.subplot(3, 1, 2)
    # time_plot.scatter([i['seq'] for i in ord_l[-number_samples:]], [i['delta'] for i in ord_l[-number_samples:]], s=1, color='r', label='delta')
    # time_plot.set_xlabel("Sequence Packet")
    # time_plot.set_ylabel("Delta to referencial")
    # ax2 = time_plot.twinx()
    # ax2.scatter([i['seq'] for i in ord_l[-number_samples:]], [i['ts'] for i in ord_l[-number_samples:]], s=2, color='g', label='Timestamp')
    # ax2.set_ylabel("Kernel time")
    # plt.title("Timestamp deltas")

    gaps_draw = plt.subplot(3, 1, 3)
    gaps_draw.plot([i[0] for i in gaps[-number_samples:]], [i[1] for i in gaps[-number_samples:]], color='b', marker='.', label='gaps')
    gaps_draw.set_ylim(-0.5, 1.5)

    plt.draw()
    # plt.savefig("res.png")
    plt.show()
Example #30
0
def plot_twoscales(name, dict_array, xlabel='', ylabel='', title='', linetypes=['b','r','g','k'], labels=[], xlog=None, ylim=None):
  plt.clf()
  if len(xlabel) > 0:
  	plt.xlabel(xlabel)
  if len(ylabel) > 0:
  	plt.ylabel(ylabel)
  if len(title) > 0:
  	plt.title(title)
  if xlog:
    plt.xscale('log', basex=xlog)
  if ylim:
    plt.ylim(ylim)

  ax1 = plt.figure().add_subplot(111)  
  dicty1 = zip(*sorted(dict_array[0].iteritems()))
  dicty2 = zip(*sorted(dict_array[1].iteritems()))
  ax1.plot(dicty1[0], dicty1[1], linetypes[0], label=labels[0])
  for tl in ax1.get_yticklabels():
    tl.set_color(linetypes[0])
  ax1.set_ylabel(labels[0], color=linetypes[0])
  ax2 = ax1.twinx() 
  ax2.plot(dicty2[0], dicty2[1], linetypes[1], label=labels[1])
  for tl in ax2.get_yticklabels():
    tl.set_color(linetypes[1])
  ax2.set_ylabel(labels[1], color=linetypes[1])
  plt.savefig('%s.eps' % name)
Example #31
0
for label, clf, color, linestyle in zip(clf_labels, classifiers, colors, linestyles):
    clf.fit(X_train, y_train)
    # 令类别序号 = 1为正类
    y_pred_proba = clf.predict_proba(X_test)
    fpr, tpr, thresholds = roc_curve(y_true=y_test, 
                                     y_score=y_pred_proba[:, 1], 
                                     pos_label=1)
    roc_auc = auc(fpr,tpr)
    plt.plot(fpr, 
             tpr, 
             color=color, 
             linestyle=linestyle, label="%s (auc=%0.2f)" % (label, roc_auc))
plt.legend(loc="lower right")
plt.plot([0, 1], [0, 1], linestyle="--", color="gray", linewidth=2)
plt.xlim([-0.1, 1.1])
plt.ylim([-0.1, 1.1])
plt.grid()
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.show()   

# 描绘决策区域,为了便于在同样的尺度下进行展示,因此还是需要对整体特征做标准化
from itertools import product
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
# 得到决策网格
x_min = X_train_std[:, 0].min() - 1
x_max = X_train_std[:, 0].max() + 1
y_min = X_train_std[:, 1].min() - 1
y_max = X_train_std[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1))
Example #32
0
    def test_packet_loss_fixed(self):
        """
        We use visual benchmark by plotting the values...
        If more reliable benchmark values available, use assertAlmostEqual
        :return:
        """

        params = {
            "save_to": "",
            "sim_duration":
            100000,  # long simulations needed to capture packet loss
            "num_resources": 200,
            "traffic_type": "bernoulli",
            "max_iter": 20
        }

        load_range = [0.1 * x for x in range(1, 11)]

        plt.figure()

        degree_distrs = [
            [0, 1],  # slotted aloha
            [0, 0, 1],  # 2-regular CRDSA
            [0, 0, 0, 0, 1],  # 4-regular CRDSA
            [0, 0, 0.5, 0.28, 0, 0, 0, 0, 0.22],
            [0, 0, 0.25, 0.6, 0, 0, 0, 0, 0.15]
        ]

        degree_distr_labels = [
            "s-aloha", "2-CRDSA", "4-CRDSA", r"$\Lambda_3$", r"$\Lambda_4$"
        ]

        results_to_store = {}

        for label, degree_distr in zip(degree_distr_labels, degree_distrs):

            color = next(IrsaTestFixed.COLORS)
            marker = next(IrsaTestFixed.MARKERS)

            params["degree_distr"] = degree_distr
            pktl = []

            for load_idx, load in enumerate(load_range):
                params["num_ues"] = int(params["num_resources"] * load)
                params["act_prob"] = 1
                res = irsa.irsa_run(**params)
                mean_pktl = np.mean(res.packet_loss)
                pktl.append(mean_pktl)

                # FIXME it will be certainly valuable to check whether confidence interval is not too high
                # self.assertAlmostEqual(values[m][load_idx], t, delta=tc)
            results_to_store[label] = pktl
            plt.plot(load_range,
                     pktl,
                     "-" + color + marker,
                     markeredgecolor=color,
                     markerfacecolor="None",
                     label=label)

        with open("../tests/pkt_loss.json", "w") as f:
            json.dump(results_to_store, f)

        plt.ylabel("Packet loss")
        plt.xlabel("Offered Load")
        plt.yscale("log")
        plt.ylim((1e-4, 1e0))
        plt.legend(loc=0)
        plt.grid(True)
        plt.savefig("../tests/packet_loss_fixed.pdf")
tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])
fig = plt.figure()
for method in methods:
    fpr, tpr, _ = roc_curve(label, scores[method])
    roc_auc = auc(fpr, tpr)
    fpr = np.flipud(fpr)
    tpr = np.flipud(tpr)  # select largest tpr at same fpr
    plt.plot(fpr, tpr, color=colours[method], lw=1,
             # label=('[%s (AUC = %0.4f %%)]' % (method.split('-')[-1], roc_auc * 100))
             label = method)
    tpr_fpr_row = []
    tpr_fpr_row.append("%s-%s" % (method, target))
    for fpr_iter in np.arange(len(x_labels)):
        _, min_index = min(list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr)))))
        # tpr_fpr_row.append('%.4f' % tpr[min_index])
        tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100))
    tpr_fpr_table.add_row(tpr_fpr_row)
plt.xlim([10 ** -6, 0.1])
plt.ylim([0.30, 1.0])
plt.grid(linestyle='--', linewidth=1)
plt.xticks(x_labels)
plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True))
plt.xscale('log')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC on {}'.format(title))
plt.legend(loc="lower right")
# plt.show()
fig.savefig(os.path.join(save_path, '%s.pdf' % job))
print(tpr_fpr_table)
Example #34
0
      print "WARNING: Fine method is not the exact integrator..."    

    y_start = np.exp(1j*k*x)
    y_ex    = stab_ex*y_start

    y_coarse= stab_coarse[0,0]*y_start
    y_fine  = stab_fine[0,0]*y_start

    for n in range(0,np.size(niter_show)):
      stab_para = para.get_parareal_stab_function(niter_show[n])
      para_show[n,:] = (stab_para[0,0]*y_start).real

    fs = 8
    rcParams['figure.figsize'] = 2.5, 2.5
    fig = plt.figure()
    plt.plot(x, y_ex.real,      'g-', label='Fine')
    #plt.plot(x, y_coarse.real,  'b--', label='Coarse')
    plt.plot(x, para_show[0,:], 'r-+', label='Parareal k='+str(niter_show[0]), markevery=(5, 20), markersize=fs/2, mew=1)
    #plt.plot(x, para_show[1,:], 'r-s', label='Parareal k='+str(niter_show[1]), markevery=(10,20),  markersize=fs/2, mew=1)
    plt.plot(x, para_show[2,:], 'r-o', label='Parareal k='+str(niter_show[2]), markevery=(15,20),  markersize=fs/2, mew=1)
    plt.legend(loc='lower left', fontsize=fs, prop={'size':fs-2}, handlelength=3)
    plt.title((r'$\kappa$ = %4.2f' % k), fontsize=fs)
    plt.ylim([-1.5, 1.5])
    plt.xlim([x[0], x[-1]])
    plt.xlabel('x', fontsize=fs)
    plt.ylabel('y', fontsize=fs)
    filename = 'parareal-sine-'+str(k_ind)+'.pdf'
    plt.gcf().savefig(filename, bbox_inches='tight')
    call(["pdfcrop", filename, filename])
#    plt.show()
Example #35
0
                    c=colors[l],
                    label=str(l))

plt.title('Linear regression fit')
plt.xlabel('$x_1$ value')
plt.ylabel('$x_2$ value')

# regression line
# x_0 = -1
y_0 = (w[1] - w[0]) /w[2]
#x_1 = 1
y_1 = -( w[1]+w[0]) /w[2]

plt.plot([-1, 1], [y_0, y_1], 'k-', label=('SGD regression'))
plt.xlim([-1,1])
plt.ylim([-1,1])
plt.show()

STOP_EXECUTION_TO_SEE_RESULT()

## d
print('\n EXPERIMENT (d), lineal regression\n')

        
def experiment(featureVector,
               number_of_repetitions = 1000,
               size_training_example = 1000,
               percent_noisy_data = 10.0
               ):
        '''
        INPUT
Example #36
0
def makeaplot(events,
              sensitivities,
              hrf_estimates,
              roi_pair,
              fn=True):
    """
    This produces a time series plot for the roi class comparison specified in
    roi_pair such as roi_pair = ['left FFA', 'left PPA']
    """
    import matplotlib.pyplot as plt

    # take the mean and transpose the sensitivities
    sensitivities_stacked = mv.vstack(sensitivities)

    if bilateral:
        sensitivities_stacked.sa['bilat_ROIs_str'] = map(lambda p: '_'.join(p),
                                                         sensitivities_stacked.sa.bilat_ROIs)
        mean_sens = mv.mean_group_sample(['bilat_ROIs_str'])(sensitivities_stacked)
    else:
        sensitivities_stacked.sa['all_ROIs_str'] = map(lambda p: '_'.join(p),
                                                                sensitivities_stacked.sa.all_ROIs)
        mean_sens = mv.mean_group_sample(['all_ROIs_str'])(sensitivities_stacked)

    mean_sens_transposed = mean_sens.get_mapped(mv.TransposeMapper())

    # some parameters
    # get the conditions
    block_design = sorted(np.unique(events['trial_type']))
    reorder = [0, 6, 1, 7, 2, 8, 3, 9, 4, 10, 5, 11]
    block_design = [block_design[i] for i in reorder]
    # end indices to chunk timeseries into runs
    run_startidx = np.array([0, 157, 313, 469])
    run_endidx = np.array([156, 312, 468, 624])

    runs = np.unique(mean_sens_transposed.sa.chunks)

    for j in range(len(hrf_estimates.fa.bilat_ROIs_str)):
        comparison = hrf_estimates.fa.bilat_ROIs[j][0]
        if (roi_pair[0] in comparison) and (roi_pair[1] in comparison):
            roi_pair_idx = j
    roi_betas_ds = hrf_estimates[:, roi_pair_idx]
    roi_sens_ds = mean_sens_transposed[:, roi_pair_idx]

    for run in runs:
        fig, ax = plt.subplots(1, 1, figsize=[18, 10])
        colors = ['#7b241c', '#e74c3c', '#154360', '#3498db', '#145a32', '#27ae60',
                  '#9a7d0a', '#f4d03f', '#5b2c6f', '#a569bd', '#616a6b', '#ccd1d1']
        plt.suptitle('Timecourse of sensitivities, {} versus {}, run {}'.format(roi_pair[0],
                                                                                roi_pair[1],
                                                                                run + 1),
                     fontsize='large')
        plt.xlim([0, max(mean_sens_transposed.sa.time_coords)])
        plt.ylim([-5, 7])
        plt.xlabel('Time in sec')
        plt.legend(loc=1)
        plt.grid(True)
        # for each stimulus, plot a color band on top of the plot
        for stimulus in block_design:
            onsets = events[events['trial_type'] == stimulus]['onset'].values
            durations = events[events['trial_type'] == stimulus]['duration'].values
            stimulation_end = np.sum([onsets, durations], axis=0)
            r_height = 1
            color = colors[0]
            y = 6

            # get the beta corresponding to the stimulus to later use in label
            beta = roi_betas_ds.samples[hrf_estimates.sa.condition == stimulus.replace(" ", ""), 0]

            for i in range(len(onsets)):
                r_width = durations[i]
                x = stimulation_end[i]
                rectangle = plt.Rectangle((x, y),
                                          r_width,
                                          r_height,
                                          fc=color,
                                          alpha=0.5,
                                          label='_'*i + stimulus.replace(" ", "") + '(' + str('%.2f' % beta) + ')')
                plt.gca().add_patch(rectangle)
                plt.legend(loc=1)
            del colors[0]

        times = roi_sens_ds.sa.time_coords[run_startidx[run]:run_endidx[run]]

        ax.plot(times, roi_sens_ds.samples[run_startidx[run]:run_endidx[run]], '-', color='black', lw=1.0)
        glm_model = hrf_estimates.a.model.results_[0.0].predicted[run_startidx[run]:run_endidx[run], roi_pair_idx]
        ax.plot(times, glm_model, '-', color='#7b241c', lw=1.0)
        model_fit = hrf_estimates.a.model.results_[0.0].R2[roi_pair_idx]
        plt.title('R squared: %.2f' % model_fit)
        if fn:
            plt.savefig(results_dir + 'timecourse_localizer_glm_sens_{}_vs_{}_run-{}.svg'.format(roi_pair[0], roi_pair[1], run + 1))
Example #37
0
        'ytick.labelsize': 16,
        'legend.fontsize': 20,
        'axes.linewidth': 2,
        "pgf.texsystem": "pdflatex",  # change this if using xetex or lautex
        "text.usetex": True,  # use LaTeX to write all text
    })

    plt.figure(1, figsize=(6, 6))
    plt.xticks(fontsize=14)
    plt.yticks(fontsize=14)
    plt.plot(y_train[:, 0], y_train[:, 1], 'ro', label="Training data")
    plt.plot(y_true[:, 0], y_true[:, 1], 'b-', label="Exact Trajectory")
    plt.xlabel('$x_1$', fontsize=18)
    plt.ylabel('$x_2$', fontsize=18)
    plt.xlim((-2.3, 2.3))
    plt.ylim((-5., 6.5))
    plt.legend(loc='upper right', frameon=False, prop={'size': 14})
    plt.savefig('./Training_data.png', dpi=300)

    plt.figure(4, figsize=(12, 6.5))
    plt.xticks(fontsize=22)
    plt.yticks(fontsize=22)
    plt.plot(t_grid_train,
             y_train[:, 0],
             'ro',
             label="Training data of $x_1(t)$")
    plt.plot(t_grid_true,
             y_true[:, 0],
             'r-',
             label="True Trajectory of $x_1(t)$")
    plt.plot(t_grid_true,
import numpy as np
import matplotlib.pyplot as plt 
import scipy as sc
import soundfile as sf

file=open('shelvingConfig.txt','r').read().splitlines()     #reading from the .txt files
gain=int(file[1])
fc=int(file[2])                                             #cutting frequency
 
data,samplerate=sf.read('P_9_1.wav')                        #reading data and sample from the soundfile
sample_fft=np.fft.fft(data)                                   #finding the fft of the original sample data

N= len(sample_fft)                                              #size of the magnitude of the fft data
plt.subplot(2,1,1)
plt.plot(abs(sample_fft[0:int(N/4)]))
plt.ylim(0,2600)
plt.title("Original FFT Signal.")

#Creating the  the shelving filtered signal .
u=10**(gain/20)
theta=(2*np.pi*fc)/samplerate
y=(1-(4/(1+u))*np.tan(theta/2))/(1+(4/(1+u))*np.tan(theta/2))
a=(1-y)/2

selving_filter=[]                                       #new signal with shelving filter applied
input=0                                                        #the initial input is set to 0.
output=0                                                       #the initial output is set to 0.
for n in range(0,len(data)):
    u_n=a*(data[n]+input)+y*output
    y_n=data[n]+u_n*(u-1)
    input=data[n]
def main():

    datasetFiles = ['dataset_100_0_75_0_15_0_45_0_40.csv']
    # datasetFiles = ['dataset_40_0_85_0_15_0_85.csv']

    pst = predictStoppingTime()

    for dataFile in datasetFiles:
        # get data
        fileContent = readDataset(dataFile)
        dataFile = dataFile.replace(".csv","")
        numofAttributes = len(fileContent[0][0])

        # Initializations  
        LambdaError,BetaError,weightError = {},{},{}
        trueLambda = float(dataFile.split("_")[1])
        trueBeta = float(dataFile.split("_")[2]+"."+dataFile.split("_")[3])
        trueweightVector = []
        for i in range(4,4+(numofAttributes*2 - 1),2):
            print(i)
            weight = float(dataFile.split("_")[i] + "."+dataFile.split("_")[i+1])
            trueweightVector.append(weight)
        print(trueweightVector)
        trueweightVector = np.array(trueweightVector)
        weightVector = np.array([float(1/numofAttributes) for i in range(numofAttributes)])
        Lambda,Beta = 0,0
        #prepare data for limits method
        preparedData = pst.prepareData(fileContent,weightVector)
        # print(preparedData)

        preparedData = preparedData
        # print(preparedData)

        for dataPoints in range(len(preparedData)):
            print("----------------------\nDatapoint-{}\n".format(dataPoints))
            partofPreparedData = preparedData[dataPoints]
            partofActualData = fileContent[dataPoints]

            # Prediction
            Lambda,Beta = pst.predictModelParameters(partofPreparedData)
            print("Lambda,Beta - {} and {}".format(Lambda,Beta))
            weightVector = pst.predictWeightVector(partofActualData,Lambda,Beta,numofAttributes,weightVector)
            print("Weight Matrix - {} ".format(weightVector))
            print("----------------------")

            LambdaError[dataPoints] = (trueLambda - Lambda)**2
            BetaError[dataPoints] = (trueBeta - Beta)**2
            weightError[dataPoints] = norm((trueweightVector - weightVector),2)

        
        print("True Lambda - {} and True Beta - {} and True Weights - {}".format(trueLambda,trueBeta,trueweightVector))
        print("Predicted Lambda - {} and predicted Beta - {} and predicted weights - {}".format(Lambda,Beta,weightVector))

        plt.plot(list(LambdaError.keys()),list(LambdaError.values()),'r-*',label='Lambda')
        plt.plot(list(BetaError.keys()),list(BetaError.values()),'b--^',label='Beta')
        plt.plot(list(weightError.keys()),list(weightError.values()),'g-.',label='Weight')
        plt.ylim(0,1)

        plt.legend()
        plt.grid()
        plt.show()
file_wn = load_file('../outs/white_noise/0_white_noise.wav')
folder_wn, labels_wn = load_folder('../outs/white_noise/')

# Silence
file_sil = load_file('../outs/silence/0_silence.wav')
folder_sil, labels_sil = load_folder('../outs/silence')


print('loading model')
model = load_model('models/model.h5')
x = []
cmp = compare(file1, folder1)
for i in range(len(labels1)):
        x.append(int(labels1[i][:-8][-3:]))
plt.scatter(x, cmp, marker='x')
plt.ylim(0, 2500000)

plt.savefig('pitch_variation.png')

plt.clf()
plt.cla()
plt.close()

cmp = compare(file2, folder2)
plt.scatter(range(len(cmp)),cmp, marker='x')
plt.ylim(0, 2500000)
for i in range(len(labels2)):
        labels2[i] = labels2[i][:-12]
        
plt.xticks(range(len(cmp)), labels2, rotation='vertical')
plt.subplots_adjust(bottom=0.40)
Example #41
0
plt.tight_layout()
fig.savefig('./figure/figure3a_Jmeig.png', dpi=600)


eig_Cm_in = eig_Cm[1:-1]
fig = plt.figure(figsize=(8,4))
plt.hist(eig_Cm_in, 40, density=True, label='N='+str(N));
plt.xlim(x_lim)
plt.plot(x,px, linewidth=1.5, label='g='+str(g))
plt.xlabel('cov eigenvalues')
plt.ylabel('probability')
plt.legend()
# plt.title('Bulk spectrum with diverging motifs')
plt.title(r'Bulk cov spectrum with $J+ e b^T$')
plt.tight_layout()
fig.savefig('./figure/figure3a_2_cov_Jm_bulk.png', dpi=600)

fig = plt.figure(figsize=(10,4))
plt.hist(eig_Cm_in, 40, density=True);
# plt.scatter(eig_Cm[0], 0, 100, facecolors='none', edgecolors='m')
plt.scatter(eig_Cm[0], 0, 20, marker=2, color='b')
# plt.scatter(eig_Cm[-1], 0, 100, facecolors='none', edgecolors='m')
plt.scatter(eig_Cm[-1], 0, 20, marker=2, color='b')
plt.plot(x,px, linewidth=1.5)
plt.ylim(bottom=0)
plt.xlabel('cov eigenvalues')
plt.ylabel('probability')
# plt.title(r'Cov spectrum with $J+ e b^T$')
plt.tight_layout()
fig.savefig('./figure/figure3a_3_cov_Jm_all.png', dpi=600)
def main(crowdastro_h5_path,
         training_h5_path,
         results_npy_path,
         overwrite=False,
         plot=False,
         n_trials=25):
    with h5py.File(crowdastro_h5_path, 'r') as crowdastro_h5:
        with h5py.File(training_h5_path, 'r') as training_h5:
            n_instances, = training_h5['labels'].shape
            n_splits, n_test_instances = crowdastro_h5[
                '/wise/cdfs/test_sets'].shape
            n_train_indices = n_instances - n_test_instances
            n_methods = 3  # QBC, Passive, Stratified Passive
            instance_counts = [
                int(i)
                for i in numpy.logspace(numpy.log10(100),
                                        numpy.log10(n_train_indices), n_trials)
            ]
            results = numpy.zeros((n_methods, n_splits, n_trials))
            features = training_h5['features'].value
            labels = training_h5['labels'].value
            norris = crowdastro_h5['/wise/cdfs/norris_labels'].value

            if os.path.exists(results_npy_path):
                with open(results_npy_path, 'rb') as f:
                    logging.info('Loading from NPY file.')
                    results = numpy.load(f, allow_pickle=False)
            else:
                for method_index, Sampler in enumerate([
                        qbc_sampler.QBCSampler, random_sampler.RandomSampler,
                        random_sampler.BalancedSampler
                ]):
                    logging.debug(str(Sampler))
                    for split_id, split in enumerate(
                            crowdastro_h5['/wise/cdfs/test_sets'].value):
                        logging.info('Running split {}/{}'.format(
                            split_id + 1, n_splits))
                        # Set of indices that are not the testing set. This is where
                        # it's valid to query from.
                        train_indices = set(numpy.arange(n_instances))
                        for i in split:
                            train_indices.remove(i)
                        train_indices = sorted(train_indices)

                        # The masked set of labels we can query.
                        queryable_labels = numpy.ma.MaskedArray(
                            training_h5['labels'][train_indices],
                            mask=numpy.ones(n_train_indices))

                        # Initialise by selecting instance_counts[0] random labels,
                        # stratified.
                        _, init_indices = sklearn.cross_validation.train_test_split(
                            numpy.arange(n_train_indices),
                            test_size=instance_counts[0],
                            stratify=queryable_labels.data)
                        logging.info('% positive: {}'.format(
                            queryable_labels.data[init_indices].mean()))
                        queryable_labels.mask[init_indices] = 0
                        sampler = Sampler(
                            features[train_indices],
                            queryable_labels,
                            sklearn.linear_model.LogisticRegression,
                            classifier_params={'class_weight': 'balanced'})

                        results[method_index, split_id,
                                0] = sampler.ba(features[split], norris[split])
                        for count_index, count in enumerate(
                                instance_counts[1:]):
                            # Query until we have seen count labels.
                            n_required_queries = count - (
                                ~sampler.labels.mask).sum()
                            assert n_required_queries >= 0
                            # Make that many queries.
                            logging.debug('Making {} queries.'.format(
                                n_required_queries))
                            queries = sampler.sample_indices(
                                n_required_queries)
                            queried_labels = queryable_labels.data[queries]
                            sampler.add_labels(queries, queried_labels)
                            logging.debug('Total labels known: {}'.format(
                                (~sampler.labels.mask).sum()))
                            results[method_index, split_id,
                                    count_index + 1] = sampler.ba(
                                        features[split], norris[split])

                        with open(results_npy_path, 'wb') as f:
                            numpy.save(f, results, allow_pickle=False)

                        # TODO(MatthewJA): Implement overwrite parameter.
                        # TODO(MatthewJA): Implement loading .npy file.

            matplotlib.rcParams['font.family'] = 'serif'
            matplotlib.rcParams['font.serif'] = ['Palatino Linotype']
            plt.figure(figsize=(6, 6))
            results *= 100
            fillbetween(instance_counts,
                        list(zip(*results[0, :])),
                        facecolour='green',
                        edgecolour='green',
                        facealpha=0.1,
                        linestyle='-',
                        marker='.',
                        facekwargs={
                            'linestyle': '-',
                        })
            fillbetween(instance_counts,
                        list(zip(*results[1, :])),
                        facecolour='blue',
                        edgecolour='blue',
                        facealpha=0.1,
                        linestyle='-.',
                        marker='+',
                        facekwargs={
                            'linestyle': '-.',
                        })
            fillbetween(instance_counts,
                        list(zip(*results[2, :])),
                        facecolour='red',
                        edgecolour='red',
                        facealpha=0.1,
                        linestyle='--',
                        marker='x',
                        facekwargs={
                            'linestyle': '--',
                        })
            plt.grid(b=True,
                     which='both',
                     axis='y',
                     color='grey',
                     linestyle='-',
                     alpha=0.5)
            plt.ylim((70, 100))
            plt.xlim((10**2, 10**4))
            plt.xlabel('Number of training instances')
            plt.ylabel('Balanced accuracy (%)')
            plt.legend(['QBC', 'Passive', 'Balanced Passive'])
            plt.xscale('log')
            plt.show()
Example #43
0
# Predicting the Test set results
y_pred = classifier.predict(X_test)

# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)

# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
                     np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
             alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
    plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
                c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('K-NN Classifier (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()

# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
                     np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
Example #44
0
cmap = KScatteringMap(k,xf,xb)
seed = [[],[]]
xy = [np.array([])]*2
origins = np.array([])
#draw(p,cmap) #Mset描きたい時は使ってください.その時は下のloadtxtはoff
#v(p,cmap)
stat = 1#マウスイベントの状態.
period = 6
seed = np.loadtxt('tree.txt') #ここに読み込みたいMsetのファイルを持ってくる。
theta0 = np.loadtxt('crosspoint.txt')
theta0 = theta0.T
point = np.loadtxt("ctheta_section_period{}.txt".format(period),dtype = np.complex128)
axis = np.log10(np.abs(point[:,6]))
fig = plt.figure(figsize = (6,6))
ax = fig.add_subplot(1,1,1)
ax.plot(seed[:,0],seed[:,1],',k')
ax.plot(theta0[0],theta0[1],'.',color = 'purple')
plt.scatter(point[:,2],point[:,3], s = 100, c = axis,alpha = 1,cmap="jet",marker ="o")
plt.xlim(0.,3.14)

plt.ylim(2.76,5.02)
x1=0
y1=0
x_pres = 0 #保存用.クリックした時に変わらないようにするためのもの.
y_pres = 0

fig.canvas.mpl_connect('button_press_event',onclick)#(なぜonkeyが使えないのか...)

plt.show()
Example #45
0
X = iris.data[:, :2]  # we only take the first two features.
Y = iris.target

x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5

plt.figure(2, figsize=(8, 6))
plt.clf()

# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')

plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())

# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0],
           X_reduced[:, 1],
           X_reduced[:, 2],
           c=Y,
           cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
Example #46
0
def Roc_curve(y_test, y_score, n_classes):
    import numpy as np
    import pandas as pd
    import matplotlib.pyplot as plt
    from itertools import cycle
    from sklearn.metrics import roc_curve, auc
    from scipy import interp

    fpr = dict()
    tpr = dict()
    roc_auc = dict()

    l = len(set(y_score))

    for i in range(l):
        fpr[i], tpr[i], _ = roc_curve(
            np.array(pd.get_dummies(y_test))[:, i],
            np.array(pd.get_dummies(y_score))[:, i])
        roc_auc[i] = auc(fpr[i], tpr[i])

    all_fpr = np.unique(np.concatenate([fpr[i] for i in range(l)]))

    mean_tpr = np.zeros_like(all_fpr)
    for i in range(l):
        mean_tpr += interp(all_fpr, fpr[i], tpr[i])

    mean_tpr = mean_tpr / (n_classes - 1)

    fpr["macro"] = all_fpr
    tpr["macro"] = mean_tpr
    roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])

    lw = 2
    plt.figure(figsize=(8, 5))
    plt.plot(fpr["macro"],
             tpr["macro"],
             label='macro-average ROC curve (area = {0:0.2f})'
             ''.format(roc_auc["macro"]),
             color='green',
             linestyle=':',
             linewidth=4)

    colors = cycle([
        'chocolate', 'aqua', 'darkorange', 'cornflowerblue', 'cadetblue',
        'burntsienna', 'cornflowerblue'
    ])
    for i, color in zip(range(l), colors):
        plt.plot(fpr[i],
                 tpr[i],
                 color=color,
                 lw=lw,
                 label='ROC curve of class {0} (area = {1:0.2f})'
                 ''.format(i, roc_auc[i]))

    plt.plot([0, 1], [0, 1], 'k--', color='red', lw=lw)
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.annotate('Random Guess', (.5, .48), color='red')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic for Multi Layered Perceptron')
    plt.legend(loc="lower right")
    plt.show()
Example #47
0
    Aug5_00UTC_line = np.append(Aug5_00UTC_line,f13['speed'][date_start_index+13+anal_hours[i]])
    Aug5_00UTC_line = np.append(Aug5_00UTC_line,f14['speed'][date_start_index+14+anal_hours[i]])
    Aug5_00UTC_line = np.append(Aug5_00UTC_line,f15['speed'][date_start_index+15+anal_hours[i]])
    
    base = file_start_date
    date_list = [base + timedelta(hours=x) for x in range(0, len(Aug5_00UTC_line))]

    
    plt.plot(date_list,Aug5_00UTC_line,color=color[i],linewidth=lw[i],label='%02d UTC forecast' % (date_list[0].hour))
    plt.scatter(date_list[0],Aug5_00UTC_line[0],s=80,color=color[i],edgecolor=None,zorder=100+i)

#plt.legend(loc=4)
plt.grid()
plt.title(station)
plt.ylabel(r'10-m Wind Speed (ms$\mathregular{^{-1}}$)')
plt.ylim([0,15])
plt.xlim(MW_start,MW_end)

plt.scatter(anal_dates,anal_speed,color=anal_color,zorder=200)

hours = mpl.dates.HourLocator([0,3,6,9,12,15,18,21])
hours_each = mpl.dates.HourLocator()
ax.xaxis.set_major_locator(hours)
ax.xaxis.set_minor_locator(hours_each)
ax.xaxis.set_major_formatter(mpl.dates.DateFormatter('%m/%d\n%H:%M'))
plt.savefig(SAVEDIR+station+'_speed_10m_forecasts_stream_all.png')


#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#### Do the same for wind speed 80-m
# plot on a graph
Example #48
0
#-------------------------- PLOTTING--------------------------------------
# Plot for number of pkts transmitted versus cache size
if PLOT_NUM_OF_PKTS:
	p, ax = plt.subplots()
        p1 = ax.errorbar(CSsizes, avgNumPkts[0,:]/1000.0, yerr=errorBar[0,:]/1000.0, fmt='rd-', linewidth=5.0, ms=20.0, markeredgewidth=2, markeredgecolor='r', markerfacecolor='r')
        p2 = ax.errorbar(CSsizes, avgNumPkts[1,:]/1000.0, yerr=errorBar[1,:]/1000.0, fmt='bo--', linewidth=5.0, ms=20.0, markeredgewidth=2, markeredgecolor='b', markerfacecolor='None')
        p3 = ax.errorbar(CSsizes, avgNumPkts[2,:]/1000.0, yerr=errorBar[2,:]/1000.0, fmt='g*-.', linewidth=5.0, ms=20.0, markeredgewidth=2, markeredgecolor='g', markerfacecolor='g')

	plt.rcParams.update({'font.size': 30})
	plt.xlabel('Cache size (number of content objects)')
	plt.ylabel('Number of messages transmitted (X1000)')
	plt.title('Random caching \n LRU')
        plt.legend([p1, p2, p3], ['Cache prob. 100%', 'Cache prob. 80%', 'Cache prob. 60%'], loc='best')
	plt.xlim([0, 30])
	plt.ylim([0,170])
	plt.grid()

# Plot cache diversity versus cache probability for different CS sizes
if PLOT_CACHE_DIVERSITY:
	off=len(cacheProbs)
        legendStr1 = 'Max. cache size 2'
        legendStr2 = 'Max. cache size 5'
        legendStr3 = 'Max. cache size 15'
        legendStr4 = 'Max. cache size 25'
        legendStr5 = 'Max. cache size 50'
#        f, (axq, axs) = plt.subplots(1, 2, sharey=True)
	f, axs = plt.subplots()
#        q1 = axq.errorbar(cacheProbs, avgCD[0,0:off], yerr=CDerrorBar[0,0:off], fmt='kD-', linewidth=2.0, ms=6.0)
#        q2 = axq.errorbar(cacheProbs, avgCD[1,0:off], yerr=CDerrorBar[1,0:off], fmt='ro-', linewidth=2.0, ms=6.0)
#        q3 = axq.errorbar(cacheProbs, avgCD[2,0:off], yerr=CDerrorBar[2,0:off], fmt='bs-', linewidth=2.0, ms=6.0)
Example #49
0
def main_clf(metric_,
             clf_,
             grid_,
             range_=(2, 7),
             cv_=5,
             verb_=False,
             graphs=False):
    pipe = Pipeline(steps=[('sc', StandardScaler()), ('clf', clf_)])
    max_scoring = 0
    for k in range(*range_):
        denue_wide = pd.read_csv(f"summary/Count/denue_wide_{k}.csv")  ###
        rezago = pd.read_csv("rezago_social/rezago_social.csv")
        rezago_social = rezago[[
            "lgc00_15cl3_2", "Key", "POB_TOTAL", "LAT", "LON"
        ]]
        df = pd.merge(rezago_social, denue_wide, on=['Key'])
        y = rezago_social['lgc00_15cl3_2']
        df.drop(["lgc00_15cl3_2", "Key", "LAT", "LON"], axis=1, inplace=True)
        X = df.div(df.POB_TOTAL, axis=0) * 1000
        X.drop(["POB_TOTAL"], axis=1, inplace=True)
        X["LAT"] = rezago_social["LAT"]
        X["LON"] = rezago_social["LON"]
        print(f'# CLF {k} {X.shape}')
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            stratify=y,
                                                            test_size=0.20,
                                                            random_state=0)
        clf_cv = GridSearchCV(pipe,
                              grid_,
                              cv=cv_,
                              scoring=metric_,
                              verbose=verb_)  # cv_
        clf_cv.fit(X_train, y_train)
        if np.mean(clf_cv.best_score_) > max_scoring:
            max_scoring = clf_cv.best_score_
            print(f"\t # {k} CLF {clf_cv.best_score_} {clf_cv.best_params_}")
            best_params = clf_cv.best_params_
            best_k = k
            Xtrain, ytrain = X_train, y_train
            Xtest, ytest = X_test, y_test
            X_, y_ = X, y
    best_params_ = {k[5:]: v for k, v in best_params.items()}
    best_clf = clf_.set_params(**best_params_)
    best_pipe = Pipeline(steps=[('sc', StandardScaler()), ('clf', best_clf)])
    print('#BEST', best_pipe, max_scoring)
    best_pipe.fit(Xtrain, ytrain)
    print(f"# {best_k}: Train:{best_pipe.score(Xtrain, ytrain) * 100}")
    print(f"# {best_k}: Test:{best_pipe.score(Xtest, ytest) * 100}")
    scores = cross_val_score(best_pipe,
                             X_,
                             y_,
                             cv=cv_,
                             n_jobs=-1,
                             scoring='accuracy')
    print(f"# {best_k}: Accuracy CV5:{np.mean(scores)} +/- {np.std(scores)}")
    scores_ = cross_val_score(best_pipe,
                              X_,
                              y_,
                              cv=cv_,
                              n_jobs=-1,
                              scoring=metric_)
    print(
        f"# {best_k}: {metric_} CV5:{np.mean(scores_)} +/- {np.std(scores_)}")
    y_pred = cross_val_predict(best_pipe, X_, y_, cv=cv_)
    print(classification_report(y_, y_pred, digits=3))

    print(np.unique(np.array(y_pred), return_counts=True))

    if graphs:
        # plot_multiclass_roc(best_pipe, X_, y_, n_classes=3, figsize=(16, 10))
        probas = cross_val_predict(best_pipe,
                                   X_,
                                   y_,
                                   cv=cv_,
                                   method='predict_proba')
        fig, (ax1, ax2) = plt.subplots(1, 2)
        skplt.metrics.plot_roc(y_, probas, ax=ax1, title='')
        handles, labels = ax1.get_legend_handles_labels()
        # print(labels)
        labels = [
            lb.replace(' 1 ', ' A ').replace(' 2 ',
                                             ' M ').replace(' 3 ', ' B ')
            for lb in labels
        ]
        # print(labels)
        ax1.legend(handles, labels)
        ax1.get_figure()
        ax1.set_xlabel('TFP\n(A)')
        skplt.metrics.plot_precision_recall(y_, probas, ax=ax2, title='')
        handles, labels = ax2.get_legend_handles_labels()
        # print(labels)
        labels = [
            lb.replace(' 1 ', ' A ').replace(' 2 ',
                                             ' M ').replace(' 3 ', ' B ')
            for lb in labels
        ]
        # print(labels)
        ax2.legend(handles, labels)
        ax2.get_figure()
        ax2.set_xlabel('S\n(B)')
        plt.show()

        ### 2016
        denue_2016 = pd.read_csv(
            f"summary/201610/denue_wide_{best_k}.csv")  ###
        df_2016 = pd.merge(rezago_social, denue_2016, on=['Key'])
        df_2016.drop(["lgc00_15cl3_2", "Key", "LAT", "LON"],
                     axis=1,
                     inplace=True)
        X_2016 = df_2016.div(df.POB_TOTAL, axis=0) * 1000
        X_2016.drop(["POB_TOTAL"], axis=1, inplace=True)
        X_2016["LAT"] = rezago_social["LAT"]
        X_2016["LON"] = rezago_social["LON"]
        print(X_2016.columns)
        y_pred_2016 = best_pipe.predict(X_2016)
        ### 2017
        denue_2017 = pd.read_csv(
            f"summary/201711/denue_wide_{best_k}.csv")  ###
        df_2017 = pd.merge(rezago_social, denue_2017, on=['Key'])
        df_2017.drop(["lgc00_15cl3_2", "Key", "LAT", "LON"],
                     axis=1,
                     inplace=True)
        X_2017 = df_2017.div(df.POB_TOTAL, axis=0) * 1000
        X_2017.drop(["POB_TOTAL"], axis=1, inplace=True)
        X_2017["LAT"] = rezago_social["LAT"]
        X_2017["LON"] = rezago_social["LON"]
        y_pred_2017 = best_pipe.predict(X_2017)
        # ### 2018
        # denue_2018 = pd.read_csv(f"summary/201811/denue_wide_{best_k}.csv")  ###
        # df_2018 = pd.merge(rezago_social, denue_2018, on=['Key'])
        # df_2018.drop(["lgc00_15cl3", "Key", "LAT", "LON"], axis=1, inplace=True)
        # X_2018 = df_2018.div(df.POB_TOTAL, axis=0) * 1000
        # X_2018.drop(["POB_TOTAL"], axis=1, inplace=True)
        # X_2018["LAT"] = rezago_social["LAT"]
        # X_2018["LON"] = rezago_social["LON"]
        # y_pred_2018 = best_pipe.predict(X_2018)
        # ### 2019
        # denue_2019 = pd.read_csv(f"summary/201911/denue_wide_{best_k}.csv")  ###
        # df_2019 = pd.merge(rezago_social, denue_2019, on=['Key'])
        # df_2019.drop(["lgc00_15cl3", "Key", "LAT", "LON"], axis=1, inplace=True)
        # X_2019 = df_2019.div(df.POB_TOTAL, axis=0) * 1000
        # X_2019.drop(["POB_TOTAL"], axis=1, inplace=True)
        # X_2019["LAT"] = rezago_social["LAT"]
        # X_2019["LON"] = rezago_social["LON"]
        # y_pred_2019 = best_pipe.predict(X_2019)
        # ### 2020
        # denue_2020 = pd.read_csv(f"summary/202011/denue_wide_{best_k}.csv")  ###
        # df_2020 = pd.merge(rezago_social, denue_2020, on=['Key'])
        # df_2020.drop(["lgc00_15cl3", "Key", "LAT", "LON"], axis=1, inplace=True)
        # X_2020 = df_2020.div(df.POB_TOTAL, axis=0) * 1000
        # X_2020.drop(["POB_TOTAL"], axis=1, inplace=True)
        # X_2020["LAT"] = rezago_social["LAT"]
        # X_2020["LON"] = rezago_social["LON"]
        # y_pred_2020 = best_pipe.predict(X_2020)
        # Confusion matrix
        skplt.metrics.plot_confusion_matrix(y_,
                                            y_pred,
                                            normalize=True,
                                            title=" ")
        plt.xticks([0, 1, 2], ['B', 'M', 'A'], rotation='horizontal')
        plt.yticks([0, 1, 2], ['B', 'M', 'A'], rotation='horizontal')
        plt.xlabel('Clases predichas')
        plt.ylabel('Clases verdaderas')
        plt.show()
        # Mapa
        rezago_social['Pred'] = y_pred
        rezago_social['Pred_2016'] = y_pred_2016
        rezago_social['Pred_2017'] = y_pred_2017
        # rezago_social['Pred_2018'] = y_pred_2018
        # rezago_social['Pred_2019'] = y_pred_2019
        # rezago_social['Pred_2020'] = y_pred_2020
        rezago_social.to_csv('predictions.csv')  ###
        rezago_social['Key_'] = rezago_social['Key'].astype(str).str.zfill(5)
        gdf = gpd.read_file('municipios/areas_geoestadisticas_municipales.shp')
        gdf['Key_'] = gdf['CVE_ENT'] + gdf['CVE_MUN']
        gdf = gdf.merge(rezago_social, on='Key_')
        legend_elements = [
            Line2D(
                [0],
                [0],
                marker='o',
                color='w',
                label='B',
                markerfacecolor='g',
                markersize=10,
            ),
            Line2D([0], [0],
                   marker='o',
                   color='w',
                   label='M',
                   markerfacecolor='yellow',
                   markersize=10),
            Line2D([0], [0],
                   marker='o',
                   color='w',
                   label='A',
                   markerfacecolor='r',
                   markersize=10)
        ]
        csfont = {'fontname': 'Times New Roman'}
        font = font_manager.FontProperties(family='Times New Roman',
                                           weight='normal',
                                           style='normal',
                                           size=12)
        colors = {3: 'green', 2: 'yellow', 1: 'red'}
        models = {
            'RandomForestClassifier': 'RF',
            'SCV': 'SVM',
            'LogisticRegression': 'LR'
        }
        ###
        # gdf.plot(color=gdf['Pred_2016'].map(colors))
        # plt.xticks([])
        # plt.yticks([])
        # txt = f"Categorías predichas por modelo {models.get(clf.__class__.__name__, 'ABC')}, para el año 201X."
        # plt.text(800000, 0.01, txt, wrap=True, horizontalalignment='left', fontsize=12, **csfont)
        # plt.legend(handles=legend_elements, prop=font)
        # plt.show()
        ### Mapa
        fig, (ax1, ax2) = plt.subplots(1, 2)
        gdf.plot(ax=ax1, color=gdf['Pred_2016'].map(colors))
        ax1.set_xticks([])
        ax1.set_yticks([])
        # txt = f"(A) Clases predichas con modelo {models.get(clf.__class__.__name__, 'ABC')} en 2016"
        ax1.set_xlabel("(A)", **csfont)
        # ax1.text(800000, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=12, **csfont)
        ax1.legend(handles=legend_elements, prop=font)
        gdf.plot(ax=ax2, color=gdf['Pred_2017'].map(colors))
        ax2.set_xticks([])
        ax2.set_yticks([])
        # txt = f"(B) Clases predichas con modelo {models.get(clf.__class__.__name__, 'ABC')} en 2017"
        ax2.set_xlabel("(B)", **csfont)
        # ax2.text(800000, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=12, **csfont)
        ax2.legend(handles=legend_elements, prop=font)
        plt.show()

        ### Mapa
        fig, (ax1, ax2) = plt.subplots(1, 2)
        gdf.plot(ax=ax1, color=gdf['lgc00_15cl3_2'].map(colors), legend=True)
        ax1.set_xticks([])
        ax1.set_yticks([])
        # txt = "(A) Clases de acuerdo a Valdés-Cruz y Vargas-Chanes (2017)"
        ax1.set_xlabel("(A)", **csfont)
        # ax1.text(800000, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=12, **csfont)
        ax1.legend(handles=legend_elements, prop=font)
        gdf.plot(ax=ax2, color=gdf['Pred'].map(colors))
        ax2.set_xticks([])
        ax2.set_yticks([])
        # txt = f"(B) Clases predichas con modelo {models.get(clf.__class__.__name__, 'ABC')} en 2015"
        ax2.set_xlabel("(B)", **csfont)
        # ax2.text(800000, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=12, **csfont)
        ax2.legend(handles=legend_elements, prop=font)
        plt.show()
        # Curva ROC
        y_bin = label_binarize(y, classes=[1, 2, 3])
        n_classes = y_bin.shape[1]
        y_score = cross_val_predict(best_pipe,
                                    X_,
                                    y_,
                                    cv=cv_,
                                    method='predict_proba')
        fpr = dict()
        tpr = dict()
        roc_auc = dict()
        for i in range(n_classes):
            fpr[i], tpr[i], _ = roc_curve(y_bin[:, i], y_score[:, i])
            roc_auc[i] = auc(fpr[i], tpr[i])
        all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
        mean_tpr = np.zeros_like(all_fpr)
        for i in range(n_classes):
            mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
        mean_tpr /= n_classes
        fpr["macro"] = all_fpr
        tpr["macro"] = mean_tpr
        roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
        plt.figure()
        plt.plot(fpr["macro"],
                 tpr["macro"],
                 label='ROC macro (AUC = {0:0.3f})'
                 ''.format(roc_auc["macro"]),
                 color='navy',
                 linestyle=':',
                 linewidth=4)
        rezago = {1: 'B', 2: 'M', 3: 'A'}
        colors = cycle(['green', 'yellow', 'red'])
        for i, color in zip(range(n_classes), colors):
            plt.plot(fpr[i],
                     tpr[i],
                     color=color,
                     lw=2,
                     label='Clase de rezago {0} (AUC = {1:0.3f})'
                     ''.format(rezago[i + 1], roc_auc[i]))
        plt.plot([0, 1], [0, 1], 'k--', lw=2)
        plt.xlim([-0.05, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('TFP', fontsize=12, **csfont)
        plt.ylabel('TVP', fontsize=12, **csfont)
        plt.legend(loc="lower right", prop=font)
        plt.show()
    return scores_
Example #50
0
    for i, algo in enumerate(all_algos):
        E = dict_Es[algo]
        use_acc = algo[1]
        linestyle = 'dashed' if use_acc else 'solid'

        ax.semilogy(
            dict_coef[algo[0]] * f_gap * np.arange(len(E)), E - p_star,
            label=algo_names[algo],
            color=dict_color[algo[0]], linestyle=linestyle)

    ax.semilogy(
        np.arange(len(E_optimal)), E_optimal - p_star,
        label=label_opt, color='black', linestyle='dashdot')

    dict_dataset = {}
    dict_dataset["rcv1_train"] = "rcv1"
    dict_dataset["real-sim"] = "real_sim"  # use _ not - for latex
    dict_dataset["leukemia"] = "leukemia"

    str_info = "%s (%i st columns)" % (dataset, n_features)
    title = pb + str_info

    plt.ylabel(r"$f(x^{(k)}) - f(x^{*})$")
    plt.xlabel("nb gradient calls")
    plt.ylim((1e-10, None))
    plt.tight_layout()

    plt.legend()
    plt.title(title.replace('_', ' '))
    plt.show(block=False)
Example #51
0
def plot_histograms(holder):

    # Limit the data to the interesting interval
    holder.cut_time(2700., 3000.)

    # Extract and process the temperature data
    temp_data1 = dataholders.TempData(holder.time, holder.T1)
    temp_data1.preprocess(filters.median_filter, 21)
    temp_data1.process(heuristics.base_median_heuristic, 7)
    temp_data1.detect(.015)

    temp_data2 = dataholders.TempData(holder.time, holder.T2)
    temp_data2.preprocess(filters.median_filter, 21)
    temp_data2.process(heuristics.base_median_heuristic, 5)
    temp_data2.detect(.010)

    # Extract and process the wind data
    wind_data = dataholders.WindData(holder.v1,
                                     holder.v2,
                                     holder.v3,
                                     degrees=DEGREES)

    print("Percentage of anomalies in T1: %.2f%%" %
          (temp_data1.mask.mean() * 100))
    print("Percentage of anomalies in T2: %.2f%%" %
          (temp_data2.mask.mean() * 100))

    print(temp_data1.mask.sum())
    print(temp_data2.mask.sum())

    both_mask = np.logical_or(temp_data1.mask, temp_data2.mask)

    print(both_mask.sum())

    print("Percentage of anomalies in either: %.2f%%" %
          (both_mask.mean() * 100))

    # Select the angles with and without anomalies

    mask = temp_data1.mask

    theta_bad = wind_data.theta[mask]
    phi_bad = wind_data.phi[mask]

    theta_good = wind_data.theta[~mask]
    phi_good = wind_data.phi[~mask]

    # Compute the histograms
    hist_bad, xedg_bad, yedg_bad = np.histogram2d(theta_bad,
                                                  phi_bad,
                                                  bins=100,
                                                  normed=True)
    hist_good, xedg_good, yedg_good = np.histogram2d(theta_good,
                                                     phi_good,
                                                     bins=100,
                                                     normed=True)
    hist_all, xedg_all, yedg_all = np.histogram2d(wind_data.theta,
                                                  wind_data.phi,
                                                  bins=100,
                                                  normed=True)

    # Build the plot
    fig = plt.figure(figsize=(22, 8))

    limits_x = (89, 98) if DEGREES else (1.56, 1.72)
    limits_y = (83, 92) if DEGREES else (1.45, 1.61)

    # Only bad points
    fig.add_subplot(141, axisbg='black')
    plt.title("Anomalous points")
    plt.xlabel(r"$\theta$", fontsize=30)
    plt.ylabel(r"$\varphi$", fontsize=30, rotation=0)
    plt.xlim(*limits_x)
    plt.ylim(*limits_y)
    im1 = plt.imshow(
        hist_bad,
        interpolation='nearest',
        origin='low',
        cmap='gnuplot',
        alpha=1.,
        extent=[xedg_bad[0], xedg_bad[-1], yedg_bad[0], yedg_bad[-1]])
    # Only good points
    fig.add_subplot(142, axisbg='black')
    plt.title("Non-anomalous points")
    plt.xlabel(r"$\theta$", fontsize=30)
    plt.ylabel(r"$\varphi$", fontsize=30, rotation=0)
    plt.xlim(*limits_x)
    plt.ylim(*limits_y)
    im2 = plt.imshow(
        hist_good,
        interpolation='nearest',
        origin='low',
        cmap='cubehelix',
        alpha=1.,
        extent=[xedg_good[0], xedg_good[-1], yedg_good[0], yedg_good[-1]])

    # Both good and bad points
    fig.add_subplot(143, axisbg='black')
    plt.title("All points")
    plt.xlabel(r"$\theta$", fontsize=30)
    plt.ylabel(r"$\varphi$", fontsize=30, rotation=0)
    plt.xlim(*limits_x)
    plt.ylim(*limits_y)
    im1 = plt.imshow(
        hist_bad,
        interpolation='nearest',
        origin='low',
        cmap='gnuplot',
        alpha=1.,
        extent=[xedg_bad[0], xedg_bad[-1], yedg_bad[0], yedg_bad[-1]])

    im2 = plt.imshow(
        hist_good,
        interpolation='nearest',
        origin='low',
        cmap='cubehelix',
        alpha=0.5,
        extent=[xedg_good[0], xedg_good[-1], yedg_good[0], yedg_good[-1]])

    fig.add_subplot(144, axisbg='black')
    plt.title("All points")
    plt.xlabel(r"$\theta$", fontsize=30)
    plt.ylabel(r"$\varphi$", fontsize=30, rotation=0)
    plt.xlim(*limits_x)
    plt.ylim(*limits_y)
    im1 = plt.imshow(
        hist_all,
        interpolation='nearest',
        origin='low',
        cmap='gnuplot',
        alpha=0.9,
        extent=[xedg_all[0], xedg_all[-1], yedg_all[0], yedg_all[-1]])

    plt.show()
Example #52
0
post_error = evaluate_posterior_error(U, EnKF_values, Time)
post_error_AI = evaluate_posterior_error(U, EnKF_AI_values, Time)
xaxis = np.arange(0.0, T, h)

plt.figure(figsize=(8, 2.5))

ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)

ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()

plt.ylim(0, ymax)

plt.plot(xaxis, post_error, color=(31 / 255, 119 / 255, 180 / 255), lw=0.5)
plt.plot(xaxis, post_error_AI, color=(255 / 255, 127 / 255, 14 / 255), lw=0.5)
plt.plot(xaxis, Benchmark_RMSE * np.ones(Time), '--', color='black', lw=0.3)

plt.gca().legend(('EnKF', 'EnKF-AI'))

plt.show()

post_error_CI = evaluate_posterior_error(U, EnKF_CI_values, Time)
post_error_CAI = evaluate_posterior_error(U, EnKF_CAI_values, Time)

plt.figure(figsize=(8, 2.5))

ax = plt.subplot(111)
def process_results(args, result_type, plot=False, stream_sig=""):
    result_names = get_result_names(result_type, stream_sig=stream_sig)
    result_lists, result_map = get_result_defs(args)
    num_seen = 0
    num_anoms = 0
    n_batches = 0
    all_results = list()
    for i, r_name in enumerate(result_names):
        parent_folder = "./temp/aad/%s" % args.dataset
        rs = result_map[r_name]
        r_avg, r_sd, r_n = rs.get_results(parent_folder)
        # logger.debug("[%s]\navg:\n%s\nsd:\n%s" % (rs.name, str(list(r_avg)), str(list(r_sd))))
        num_seen = max(num_seen, len(r_avg))
        num_anoms = max(num_anoms, rs.num_anoms)
        orig_labels = rs.get_original_labels()
        # logger.debug("original labels:\n%s" % str(list(orig_labels)))
        queried = rs.get_queried(parent_folder)
        # logger.debug("queried:\n%s" % str(list(queried)))
        window_indexes = rs.get_window_indexes(parent_folder)
        # logger.debug("window_indexes:\n%s" % str(list(window_indexes)))
        discovered = get_num_discovered_classes_per_batch(
            queried, orig_labels, batch_size=3, window_indexes=window_indexes)
        avg_classes = np.mean(discovered, axis=0)
        n_batches = max(n_batches, len(avg_classes))
        # logger.debug("[%s] discovered:\n%s" % (r_name, str(list(avg_classes))))
        all_results.append([rs.name, r_avg, r_sd, r_n, avg_classes])

    c_mean = np.cumsum(all_results[1][4] - all_results[0][4]) / np.arange(
        1, len(all_results[1][4]) + 1, dtype=np.float32)
    logger.debug("c_mean:\n%s" % str(list(c_mean)))

    if plot:
        plot_results(all_results,
                     "./temp/aad_plots/class_diff/results_class_%s%s_%s.pdf" %
                     (args.dataset, stream_sig, result_type),
                     num_seen=num_seen,
                     num_anoms=num_anoms)

        plot_class_discovery(
            all_results,
            "./temp/aad_plots/class_diff/results_classes_per_batch_%s%s_%s.pdf"
            % (args.dataset, stream_sig, result_type),
            batch_size=3,
            n_batches=n_batches)

        test_mean = np.mean(all_results[1][4] - all_results[0][4])
        test_sd = np.std(all_results[1][4] - all_results[0][4]) / np.sqrt(
            len(all_results[1][4]))
        logger.debug("[%s] mean diff: %f (%f)" %
                     (args.dataset, test_mean, test_sd))

        dp = DataPlotter(
            pdfpath=
            "./temp/aad_plots/class_diff/results_diff_classes_%s%s_%s.pdf" %
            (args.dataset, stream_sig, result_type),
            rows=1,
            cols=1)
        pl = dp.get_next_plot()
        plt.xlabel('number of batches from start (batch size=3)', fontsize=16)
        plt.ylabel('avg. classes per batch', fontsize=16)
        plt.xlim([0, len(c_mean)])
        plt.ylim([np.min(c_mean), np.max(c_mean)])
        pl.plot(np.arange(len(c_mean)),
                c_mean,
                '--',
                color="red",
                linewidth=1,
                label="diff in num classes")
        pl.axhline(0., color="black", linewidth=1)
        pl.legend(loc='lower right', prop={'size': 16})
        dp.close()

    return result_type, stream_sig, c_mean
    W = W - learning_rate * dw
    b = b - learning_rate * db
    
    if epoch%100==0:
        print("Loss after",epoch,"iterations is",loss)

print()
print("Variables W and b are ",W,b)#----I want to delete this too!

plt.scatter(x_train[:,0], x_train[:, 1], c=y_train.ravel())
ax = plt.gca()
xvals = np.array(ax.get_xlim()).reshape(-1,1)
yvals = -(xvals * W[0][0] + b)/ W[1][0]

plt.plot(xvals,yvals)
plt.ylim(0,100)

######################################################################
# Code to send W and b goes here!
# or, you may consider making the above a function/class
# and calling it from the code that actually sends the variables.
# In such a case, The above function will return W,b
######################################################################

#Prediction on Training set
preds = []
for i in sigmoid(Z):
	if i>0.5:
		preds.append(1)
	else:
		preds.append(0)
Example #55
0
if __name__ == '__main__':
    ip_address = '127.0.0.1'
    port = 1865

    integration_time = 15  # [seconds]

    spectrometer = Spectrometer(ip_address, port)
    print('Version:', spectrometer.get_version())
    print('Serial:', spectrometer.get_serial())
    spectrometer.set_integration(integration_time * 1e6)
    print('Integration time: %s µs' % spectrometer.get_integration())

    wavelengths = spectrometer.get_wavelengths()
    wavelengths = [float(v) for v in wavelengths.split()]

    print('Getting spectrum...')
    print('Spectrum:')
    spectrum = spectrometer.get_spectrum()
    spectrum = [int(v) for v in spectrum.split()]

    print('done.\nCurrent status:', spectrometer.get_current_status())

    import matplotlib.pyplot as plt
    plt.plot(wavelengths, spectrum)
    plt.xlim(wavelengths[0], wavelengths[len(wavelengths) - 1])
    plt.ylim(1000, 16500)
    plt.ylabel('Intensity')
    plt.xlabel('Wavelength')
    #plt.savefig('foo.png', bbox_inches='tight')
    plt.show()
Example #56
0
    plt.figure(figsize=(10, 8), facecolor='w')
    for i, clf in enumerate(clfs):
        clf.fit(x, y)

        y_hat = clf.predict(x)
        # show_accuracy(y_hat, y) # 正确率
        # show_recall(y, y_hat)   # 召回率
        print(i + 1, '次:')
        print('accuracy:\t', accuracy_score(y, y_hat))
        print('precision:\t', precision_score(y, y_hat, pos_label=1))
        print('recall:\t', recall_score(y, y_hat, pos_label=1))
        print('F1-score:\t', f1_score(y, y_hat, pos_label=1))
        print()

        # 画图
        plt.subplot(2, 2, i + 1)
        grid_hat = clf.predict(grid_test)  # 预测分类值
        grid_hat = grid_hat.reshape(x1.shape)  # 使之与输入的形状相同
        plt.pcolormesh(x1, x2, grid_hat, cmap=cm_light, alpha=0.8)
        plt.scatter(x[:, 0], x[:, 1], c=y, edgecolors='k', s=s,
                    cmap=cm_dark)  # 样本的显示
        plt.xlim(x1_min, x1_max)
        plt.ylim(x2_min, x2_max)
        plt.title(titles[i])
        plt.grid(b=True, ls=':')
    plt.suptitle('不平衡数据的处理', fontsize=18)
    plt.tight_layout(1.5)
    plt.subplots_adjust(top=0.92)
    plt.savefig('2.png')
    plt.show()
# Remove the plot frame lines. They are unnecessary chartjunk.    
ax = plt.subplot(121)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_linewidth(0.5)
ax.spines['bottom'].set_color('black')
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
#ax.set_facecolor('white')
ax.set_facecolor("white")

# Ensure that the axis ticks only show up on the bottom and left of the plot.    
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()

# Limit the range of the plot to only where the data is.    
plt.ylim(min_completion, 105)
max_x=0
for k in data:
	if len(data[k])>max_x: max_x=len(data[k])
plt.xlim(0, max_x)

# Make sure your axis ticks are large enough to be easily read.    
plt.yticks(range(min_completion, 105, 10), [str(x) + "%" for x in range(min_completion, 105, 10)], fontsize=14)    
plt.xticks(fontsize=14)    

# Provide tick lines across the plot to help your viewers trace along    
for y in range(min_completion, 105, 10):    
	plt.plot(range(0, max_x), [y] * len(range(0, max_x)), "--", lw=0.5, color="black", alpha=0.3)    
  
# Remove the tick marks; they are unnecessary with the tick lines we just plotted.    
plt.tick_params(axis="both", which="both", bottom="off", top="off", labelbottom="on", left="off", right="off", labelleft="on")    
            min_diff = min(min_diff, np.min(class_diff))
            max_diff = max(max_diff, np.max(class_diff))
            x_lim = max(x_lim, len(class_diff))

    if len(class_diffs) > 0:
        dataset_legend_only = False
        dp = DataPlotter(
            pdfpath="./temp/aad_plots/class_diff/results_diff_classes%s_all.pdf"
            % stream_sig,
            rows=1,
            cols=1)
        pl = dp.get_next_plot()
        plt.xlabel('number of batches from start (batch size=3)', fontsize=14)
        plt.ylabel('avg. difference in #unique classes per batch', fontsize=14)
        plt.xlim([0, min(100, x_lim)])
        plt.ylim([min_diff, max_diff])
        pl.axhline(0., color="black", linewidth=1)
        legend_handles = list()
        result_idx = 0
        for dataset, result_type, stream_sig, class_diff in class_diffs:
            dataset_name = dataset_configs[dataset][4]
            if dataset_legend_only:
                label = "%s" % (dataset_name, )
            else:
                label = "%s (%s)" % (dataset, result_type)
            ln, = pl.plot(np.arange(len(class_diff)),
                          class_diff,
                          line_types[result_type],
                          color=dataset_colors[dataset],
                          linewidth=1,
                          label=label)
Example #59
0
def test(img_dir, split_test, split_name, model, batch_size, img_size, crop_size, gpu_id):

	# -------------------- SETTINGS: CXR DATA TRANSFORMS -------------------
	normalizer = [[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]]
	data_transforms = {split_name: transforms.Compose([
		transforms.Resize(img_size),
		# transforms.RandomResizedCrop(crop_size),
		transforms.CenterCrop(crop_size),
		transforms.ToTensor(),
		transforms.Normalize(normalizer[0], normalizer[1])])}

	# -------------------- SETTINGS: DATASET BUILDERS -------------------
	datasetTest = DataGenerator(img_dir=img_dir, split_file=split_test,
								transform=data_transforms[split_name])
	dataLoaderTest = DataLoader(dataset=datasetTest, batch_size=batch_size,
								shuffle=False, num_workers=32, pin_memory=True)

	dataloaders = {}
	dataloaders[split_name] = dataLoaderTest

	print('Number of testing CXR images: {}'.format(len(datasetTest)))
	dataset_sizes = {split_name: len(datasetTest)}
 
	# -------------------- TESTING -------------------
	model.eval()
	running_corrects = 0
	output_list = []
	label_list = []
	preds_list = []

	with torch.no_grad():
		# Iterate over data.
		for data in dataloaders[split_name]:
			inputs, labels, img_names = data

			labels_auc = labels
			labels_print = labels
			labels_auc = labels_auc.type(torch.FloatTensor)
			labels = labels.type(torch.LongTensor) #add for BCE loss
			
			# wrap them in Variable
			inputs = inputs.cuda(gpu_id, non_blocking=True)
			labels = labels.cuda(gpu_id, non_blocking=True)
			labels_auc = labels_auc.cuda(gpu_id, non_blocking=True)

			labels = labels.view(labels.size()[0],-1) #add for BCE loss
			labels_auc = labels_auc.view(labels_auc.size()[0],-1) #add for BCE loss
			# forward
			outputs = model(inputs)
			# _, preds = torch.max(outputs.data, 1)
			score = torch.sigmoid(outputs)
			score_np = score.data.cpu().numpy()
			preds = score>0.5
			preds_np = preds.data.cpu().numpy()
			preds = preds.type(torch.cuda.LongTensor)

			labels_auc = labels_auc.data.cpu().numpy()
			outputs = outputs.data.cpu().numpy()

			for j in range(len(img_names)):
				print(str(img_names[j]) + ': ' + str(score_np[j]) + ' GT: ' + str(labels_print[j]))

			for i in range(outputs.shape[0]):
				output_list.append(outputs[i].tolist())
				label_list.append(labels_auc[i].tolist())
				preds_list.append(preds_np[i].tolist())

			# running_corrects += torch.sum(preds == labels.data)
			# labels = labels.type(torch.cuda.FloatTensor)
			running_corrects += torch.sum(preds.data == labels.data) #add for BCE loss

	acc = np.float(running_corrects) / dataset_sizes[split_name]
	auc = metrics.roc_auc_score(np.array(label_list), np.array(output_list), average=None)
	# print(auc)
	fpr, tpr, _ = metrics.roc_curve(np.array(label_list), np.array(output_list))
	roc_auc = metrics.auc(fpr, tpr)

	ap = metrics.average_precision_score(np.array(label_list), np.array(output_list))
	
	tn, fp, fn, tp = metrics.confusion_matrix(label_list, preds_list).ravel()

	recall = tp/(tp+fn)
	precision = tp/(tp+fp)
	f1 = 2*precision*recall/(precision+recall)
	sensitivity = recall
	specificity = tn/(tn+fp)
	PPV = tp/(tp+fp)
	NPV = tn/(tn+fn)
	print('Test Accuracy: {0:.4f}  Test AUC: {1:.4f}  Test_AP: {2:.4f}'.format(acc, auc, ap))
	print('TP: {0:}  FP: {1:}  TN: {2:}  FN: {3:}'.format(tp, fp, tn, fn))
	print('Sensitivity: {0:.4f}  Specificity: {1:.4f}'.format(sensitivity, specificity))
	print('Precision: {0:.2f}%  Recall: {1:.2f}%  F1: {2:.4f}'.format(precision*100, recall*100, f1))
	print('PPV: {0:.4f}  NPV: {1:.4f}'.format(PPV, NPV))
	# Plot all ROC curves
	plt.figure()
	plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = %0.4f)' % roc_auc)
	plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
	plt.xlim([0.0, 1.0])
	plt.ylim([0.0, 1.0])
	plt.xlabel('False Positive Rate')
	plt.ylabel('True Positive Rate')
	plt.title('ROC curve of abnormal/normal classification: '+args.arch)
	plt.legend(loc="lower right")
	plt.savefig('ROC_abnormal_normal_cls_'+args.arch+'_'+args.test_labels+'.pdf', bbox_inches='tight')
	plt.show()
Example #60
0
matplotlib.rcParams['figure.figsize'] = [7, 7] # for square canvas
matplotlib.rcParams['figure.subplot.left'] = 0
matplotlib.rcParams['figure.subplot.bottom'] = 0
matplotlib.rcParams['figure.subplot.right'] = 1
matplotlib.rcParams['figure.subplot.top'] = 1
plt.scatter(fpr,tpr)
plt.plot(fpr,tpr,label='AF intersectional bias',linewidth=2,color='black',markeredgecolor='black',marker='o')
plt.plot([0,1],[0,1],ls='--',linewidth=2,color='black')
plt.xlabel('False positive rate',fontsize=40,weight='bold')
plt.ylabel('True positive rate',fontsize=40,weight='bold')
plt.legend(loc='lower right',fontsize=32,prop={'weight':'bold','size':30})
plt.xticks(fontsize= 26)
plt.yticks(fontsize= 26)
plt.scatter(0.10714285714285714, 0.35714285714285715,s=200,color='black')
plt.xlim(-0.01,1.01)
plt.ylim(-0.01,1.01)
plt.savefig('roc/af_inter.pdf',bbox_inches='tight')
plt.show()

tpr_lf_use = []
for i in list(zip(tpr_lf,fpr_lf,ac_lst_1)):
    if i not in tpr_lf_use:
        tpr_lf_use.append(i)

fpr = [i[1] for i in tpr_lf_use]
tpr = [i[0] for i in tpr_lf_use]

import matplotlib
matplotlib.rcParams['figure.figsize'] = [7, 7] # for square canvas
matplotlib.rcParams['figure.subplot.left'] = 0
matplotlib.rcParams['figure.subplot.bottom'] = 0