Пример #1
0
def histo_datalink(mode,
                   lambda_val,
                   attribute,
                   ci=95,
                   datalinks=range(0, NUM_DATA_LINK),
                   save=False):
    stats = scalar_stats(scalar_parse(mode, lambda_val))

    for u in datalinks:
        attr = attribute + '-' + str(u)
        bar = stats['mean'][attr]
        error = np.array([
            bar - stats['ci' + str(ci) + '_l'][attr],
            stats['ci' + str(ci) + '_h'][attr] - bar
        ]).reshape(2, 1)
        plt.bar('User ' + str(u),
                bar,
                yerr=error,
                align='center',
                alpha=0.95,
                ecolor='k',
                capsize=7)

    # Show graphic
    plt.title(attribute + ": " + MODE_DESCRIPTION[mode])
    if save:
        plt.savefig("histousers_" + attribute + "_" + mode + "_" + lambda_val +
                    ".pdf",
                    bbox_inches="tight")
        plt.clf()
    else:
        plt.show()
    return
Пример #2
0
def correlationPGN(dataset):
    allColsAsFeatures = dataset.columns.values[:-1]
    X = dataset[allColsAsFeatures]
    corr_matrix = X.corr()
    # Generate a mask for the upper triangle
    mask = np.zeros_like(corr_matrix, dtype=np.bool)
    mask[np.triu_indices_from(mask)] = True
    # Set up the matplotlib figure
    f, ax = plt.subplots(figsize=(11, 9))
    # Generate a custom diverging colormap
    cmap = sns.diverging_palette(220, 10, as_cmap=True)
    # Draw the heatmap with the mask and correct aspect ratio
    sns_corrplot = sns.heatmap(corr_matrix,
                               mask=mask,
                               cmap=cmap,
                               vmax=1,
                               vmin=-1,
                               center=0,
                               square=True,
                               linewidths=.5,
                               cbar_kws={"shrink": .8})
    sns_corrplot.get_figure()
    path = WORKING_PATH + "/___corrrr.png"
    plt.savefig(path)
    plt.clf()
    return path
Пример #3
0
def all_lorenz(mode,
               lambda_val,
               attribute,
               iterations=range(0, NUM_ITERATIONS),
               save=False):
    data = scalar_parse(mode, lambda_val)

    # Plot the mean lorenz
    sel = data[data.name.str.startswith(attribute + '-')]
    sel['user'] = sel.name.str.split('-', expand=True)[1].astype(int)
    sorted_data = pd.DataFrame()

    for r in iterations:
        tmp = sel[sel.run == r]
        sorted_data['run-' + str(r)] = np.sort(tmp.value.values)
        plot_lorenz_curve(sorted_data['run-' + str(r)],
                          color='grey',
                          alpha=0.25)

    # return sorted_data
    plot_lorenz_curve(sorted_data.mean(axis=1))

    plt.plot([0, 1], [0, 1], 'k', alpha=0.85)
    plt.title(attribute + ": " + MODE_DESCRIPTION[mode] + ' - Mean Gini: ' +
              str(gini(sorted_data.mean(axis=1))))

    if save:
        plt.savefig("lorenz_responseTime_" + mode + "_" + lambda_val + ".pdf")
        plt.clf()
    else:
        plt.show()

    return
Пример #4
0
def plotFeatImportance(pathOut,
                       imp,
                       oob,
                       oos,
                       method,
                       tag=0,
                       simNum=0,
                       **kargs):
    # plot mean imp bars with std
    mpl.figure(figsize=(10, imp.shape[0] / 5.))
    imp = imp.sort_values('mean', ascending=True)
    ax = imp['mean'].plot(kind='barh',
                          color='b',
                          alpha=0.25,
                          xerr=imp['std'],
                          error_kw={'ecolor': 'r'})
    if method == 'MDI':
        mpl.xlim([0, imp.sum(axis=1).max()])
        mpl.axvline(1. / imp.shape[0], lw=1., color='r', ls='dotted')
    ax.get_yaxis().set_visible(False)
    for i, j in zip(ax.patches, imp.index):
        ax.text(i.get_width() / 2,
                i.get_y() + i.get_height() / 2,
                j,
                ha='center',
                va='center',
                color='k')
    mpl.title('tag=' + tag + ' | simNUm=' + str(simNum) + ' | oob=' +
              str(round(oob, 4)) + ' | oos=' + str(round(oos, 4)))
    mpl.savefig(pathOut + 'featImportance_' + str(simNum) + '.png', dpi=100)
    mpl.clf()
    mpl.close()
    return
Пример #5
0
    def plot_stacked_barchart(self, dataframe, sort, title, xlable, ylable,
                              kurs):
        x = []
        tutor = []
        y = []
        for i in dataframe['tutor']:
            if i not in tutor:
                tutor.append(i)
                y.append([])

        for i, elem in enumerate(dataframe[sort]):
            print(y, elem)
            if elem in x:
                y[tutor.index(dataframe['tutor'][i])][x.index(elem)] += 1
            else:
                x.append(elem)
                for j, elem2 in enumerate(tutor):
                    y[j].append(0)
                y[tutor.index(dataframe['tutor'][i])][x.index(elem)] += 1

        for i, elem in enumerate(y):
            plt.bar(range(len(elem)), elem, label=tutor[i])
        plt.xlabel(xlable)
        plt.ylabel(ylable)
        plt.legend(loc="best")
        plt.savefig('./PDFcreater/Plots/{}/{}.png'.format(kurs, title))
        #plt.show()
        # loescht den Plot fuer den Naechsten Plot
        plt.clf()
        plt.cla()
        plt.close()
Пример #6
0
def histo(column):
    mpg = pandas.read_csv("mpg.csv")
    plt.clf()
    if column in list(mpg.column):
        plt.hist(column)
        plt.title(column)
        plt.savefig("static/histo.png")
    else:
        print("There is no such an attribute in the given data.")
    return app.send_static_file("static/histo.png")
Пример #7
0
 def plot_boring_barchart(self,dataframe,x,y,title,xlable,ylable, kurs):
     plt.bar(x, y, color='blue')
     #plt.title(title)
     plt.xlabel(xlable)
     plt.ylabel(ylable)
     plt.savefig('./PDFcreater/Plots/{}/{}.png'.format(kurs,title))
     #loescht den Plot fuer den Naechsten Plot
     plt.clf()
     plt.cla()
     plt.close()
Пример #8
0
def save_fidelity(fname, fidelity):
    steps = [
        i * environment_configs.get_interval_width()
        for i in range(len(fidelity))
    ]
    plt.clf()
    plt.plot(steps, fidelity)
    plt.ylabel('fidelity')
    plt.xlabel('step')
    plt.savefig(fname + 'fidelity' + '_')
Пример #9
0
 def plot_pie(self, nx_dataframe, topic, se_title, kurs):
     gender = self.sort_column(nx_dataframe[topic])
     labels = [gender[0][i] for i,elem in enumerate(gender[0])]
     fracs = [gender[1][i] for i,elem in enumerate(gender[1])]
     explode = [0.05 for i,elem in enumerate(gender[1])]
     plt.pie(fracs, explode=explode, labels=labels, autopct='%.0f%%', shadow=True)
     plt.savefig('./PDFcreater/Plots/{}/1{}.png'.format(kurs,se_title))
     plt.clf()
     plt.cla()
     plt.close()
Пример #10
0
def save_controls(control_name, fname, action):
    steps = [
        i * environment_configs.get_interval_width()
        for i in range(action.shape[0])
    ]
    plt.clf()
    plt.step(steps, action)
    plt.ylabel(control_name)
    plt.xlabel('t')
    plt.savefig(fname + '_' + control_name)
Пример #11
0
 def plot_training_val_accuracy(history, epochs):
     plt.clf()
     acc = history.history['acc']
     val_acc = history.history['val_acc']
     plt.plot(epochs, acc, 'g', label='Training acc')
     plt.plot(epochs, val_acc, 'y', label='Validation acc')
     plt.title('Training and validation accuracy')
     plt.xlabel('Epochs')
     plt.ylabel('Accuracy')
     plt.legend()
     plt.show()
Пример #12
0
def goplot(a, b):

    amax = max(np.max(a), np.max(b))
    amin = min(np.max(a), np.min(b))

    plt.clf()
    plt.plot([amin, amax], [amin, amax], 'k')
    plt.plot(t[:, 0], t[:, 1], '.', color='green', markersize=5)
    plt.plot(t[:, 0], t[:, 1], '.', color='orange', markersize=8)

    plt.show()
Пример #13
0
def draw_heatmap(name, heatmap_data, x_labels, y_labels, cmap, center=0, path_to_dir=r""):
	#maximum = 0
	#minimum = 0
	#for i in heatmap_data:
	#	for j in i:
	#		if j > maximum: maximum = j
	#		if j < minimum: minimum = j
	#if center is None: plot = sns.heatmap(heatmap_data, xticklabels=x_labels, yticklabels=y_labels, robust=True, cmap=cmap, vmax=maximum, vmin=minimum)
	plot = sns.heatmap(heatmap_data, xticklabels=x_labels, yticklabels=y_labels, center=0, vmin=-1, vmax=1, robust=True, cmap=cmap)
	plt.savefig(Path(Path(path_to_dir) / Path("{}.png".format(name))))
	plt.clf()
Пример #14
0
 def plot(self, output):
     plt.figure(figsize=output.fsize, dpi=output.dpi)
     for ii in range(0, len(self.v)):
         imsize = [self.t[0], self.t[-1], self.x[ii][-1], self.x[ii][0]]
         lim = amax(absolute(self.v[ii])) / output.scale_sat
         plt.imshow(self.v[ii], extent=imsize, vmin=-lim, vmax=lim, cmap=cm.gray, origin='upper', aspect='auto')
         plt.title("%s-Velocity for Trace #%i" % (self.comp.upper(), ii))
         plt.xlabel('Time (s)')
         plt.ylabel('Offset (km)')
         #plt.colorbar()
         plt.savefig("Trace_%i_v%s.pdf" % (ii, self.comp))
         plt.clf()
Пример #15
0
 def plot_training_validation_loss(history):
     plt.clf()
     loss = history.history['loss']
     val_loss = history.history['val_loss']
     epochs = range(1, len(loss) + 1)
     plt.plot(epochs, loss, 'g', label='Training loss')
     plt.plot(epochs, val_loss, 'y', label='Validation loss')
     plt.title('Training and validation loss')
     plt.xlabel('Epochs')
     plt.ylabel('Loss')
     plt.legend()
     plt.show()
Пример #16
0
def plotCentroidFitDiagnostic(img, hdr, ccdMod, ccdOut, res, prfObj):
    """Some diagnostic plots showing the performance of fitPrfCentroid()

    Inputs:
    -------------
    img
        (np 2d array) Image of star to be fit. Image is in the
        format img[row, col]. img should not contain Nans

    hdr
        (Fits header object) header associated with the TPF file the
        image was drawn from

    ccdMod, ccdOut
        (int) CCD module and output of image. Needed to
        create the correct PRF model

    prfObj
        An object of the class prf.KeplerPrf()


    Returns:
    -------------
    **None**

    Output:
    ----------
    A three panel subplot is created
    """
    mp.figure(1)
    mp.clf()
    mp.subplot(131)
    plotTpf.plotCadence(img, hdr)
    mp.colorbar()
    mp.title("Input Image")

    mp.subplot(132)
    c, r = res.x[0], res.x[1]
    bbox = getBoundingBoxForImage(img, hdr)
    model = prfObj.getPrfForBbox(ccdMod, ccdOut, c, r, bbox)
    model *= res.x[2]
    plotTpf.plotCadence(model, hdr)
    mp.colorbar()
    mp.title("Best fit model")

    mp.subplot(133)
    diff = img - model
    plotTpf.plotCadence(diff, hdr)
    mp.colorbar()
    mp.title("Residuals")

    print "Performance %.3f" % (np.max(np.abs(diff)) / np.max(img))
Пример #17
0
def plotCentroidFitDiagnostic(img, hdr, ccdMod, ccdOut, res, prfObj):
    """Some diagnostic plots showing the performance of fitPrfCentroid()

    Inputs:
    -------------
    img
        (np 2d array) Image of star to be fit. Image is in the
        format img[row, col]. img should not contain Nans

    hdr
        (Fits header object) header associated with the TPF file the
        image was drawn from

    ccdMod, ccdOut
        (int) CCD module and output of image. Needed to
        create the correct PRF model

    prfObj
        An object of the class prf.KeplerPrf()


    Returns:
    -------------
    **None**

    Output:
    ----------
    A three panel subplot is created
    """
    mp.figure(1)
    mp.clf()
    mp.subplot(131)
    plotTpf.plotCadence(img, hdr)
    mp.colorbar()
    mp.title("Input Image")

    mp.subplot(132)
    c,r = res.x[0], res.x[1]
    bbox = getBoundingBoxForImage(img, hdr)
    model = prfObj.getPrfForBbox(ccdMod, ccdOut, c, r, bbox)
    model *= res.x[2]
    plotTpf.plotCadence(model, hdr)
    mp.colorbar()
    mp.title("Best fit model")

    mp.subplot(133)
    diff = img-model
    plotTpf.plotCadence(diff, hdr)
    mp.colorbar()
    mp.title("Residuals")

    print "Performance %.3f" %(np.max(np.abs(diff))/np.max(img))
Пример #18
0
def main():

    #DEFINITON OF THE PATHS TO THE FILES WITH THE CONTENT
    path_to_train_accuracy = 'accuracy_train_data.csv'
    path_to_train_loss = 'loss_train_data.csv'
    path_to_validation_accuracy = 'accuracy_validation_data.csv'
    path_to_validation_loss = '"loss_validation_data.csv"'

    # CREATE LIST OF NUMBER OF EPOCHS COMPUTED
    eval_indices = range(1, EPOCHS + 1)

    #LOADS THE DATA FROM THE FILES
    accuracy_train, loss_train, accuracy_validation, loss_validation = read_data(path_to_train_accuracy,path_to_train_loss,
                                                                                 path_to_validation_accuracy,path_to_validation_loss)

    #SHOW THE INFORMATION FOR CONTROL OF QUALITY
    print(eval_indices)
    print("Accuracy Train: ",accuracy_train)
    print("Loss Train: " ,loss_train)
    print("Accuracy Validation: ", accuracy_validation)
    print("Loss validation: ", loss_validation)

    # DRAW THE ACCURACY GRAPH FOR VALIDATION AND TRAIN
    plt.clf()
    plt.subplot(211)
    plt.plot(eval_indices, accuracy_train, 'k--', label='TREINO')
    plt.plot(eval_indices, accuracy_validation, 'g-x', label='VALIDAÇÃO')
    plt.legend(loc='upper right')
    plt.xlabel('Épocas')
    plt.ylabel('ACERTO')
    plt.grid(which='major', axis='both')

    # DRAW THE LOSS GRAPH FOR VALIDATION AND TRAIN
    plt.subplot(212)
    # plt.plot(eval_indices, train, 'g-x', label='Train Set Accuracy')
    plt.plot(eval_indices, loss_train, 'r-x', label='TREINO')
    # plt.plot(eval_indices, np.ones(len(eval_indices))/TOT_CLASSES, 'k--')
    plt.plot(eval_indices, loss_validation, 'k--', label='VALIDAÇÃO')
    plt.legend(loc="upper right")
    plt.xlabel('Épocas')
    plt.ylabel('ERRO')
    plt.ylim(0, 1)
    plt.grid(which='both', axis='y')

    plt.subplots_adjust(left=0.2, wspace=0.2, hspace=0.3)

    plt.show()
    plt.pause(0.01)

    #SAVES BOTH OF THE GRAPHICS IN ONE FILE NAMED "Learning.png"
    plt.savefig('Learning.png')
Пример #19
0
def show_graph(lr_lists, epochs, steps, out_name='test'):
    import matplotlib.pyplot as plt
    plt.clf()
    plt.rcParams['figure.figsize'] = [20, 5]
    x = list(range(epochs * steps))
    plt.plot(x, lr_lists, label="line L")
    plt.plot()
    plt.ylim(10e-5, 1)
    plt.yscale("log")
    plt.xlabel("iterations")
    plt.ylabel("learning rate")
    plt.title("Check Cosine Annealing Learing Rate with {}".format(out_name))
    plt.legend()
    plt.show()
Пример #20
0
def plot_feature_importances_cancer(scores):
    names, val_scores = [name for name, _, _, _, _, _, _ in scores
                         ], [score for _, score, _, _, _, _, _ in scores]

    plt.rcParams["figure.figsize"] = [15, 9]
    n_features = len(names)
    plt.barh(range(n_features), val_scores, align='center')
    plt.yticks(np.arange(n_features), names)
    plt.xlabel("Accuracy")
    plt.ylabel("Model")
    path = WORKING_PATH + "/___comparison_cancer_model.png"
    plt.savefig(path)
    plt.clf()
    return path
Пример #21
0
def show_acc(history):
    """ 绘制精度曲线 """
    plt.clf()
    history_dict = history.history
    acc = history_dict['binary_accuracy']
    val_acc = history_dict['val_binary_accuracy']

    epochs = range(1, len(val_acc) + 1)

    plt.plot(epochs, acc, 'bo', label='Training acc')
    plt.plot(epochs, val_acc, 'b', label='Validation acc')
    plt.xlabel('Epochs')
    plt.ylabel('Acc')
    plt.legend()

    plt.show()
Пример #22
0
def graph(x,y,xLabel,yLabel,title,figname):
    plt.clf()
    plt.hist(x,color="c",edgecolor="k",alpha=0.5)
    plt.axvline(np.array(x).mean(),color="k",linestyle="dashed",linewidth=3,label="average")
    plt.xlabel(xLabel)
    plt.ylabel(yLabel)
    plt.title(title)
    
    yAxis = np.arange(0,10,1)
    acRes = [y]
    z = np.array(acRes*10)
    plt.plot(z,yAxis,label="model accuracy")
    p_value = ttest_ind(x,[y])[1]
    plt.plot([],[],label=f"p-value: {np.round(p_value,4)}",color="w")
    plt.legend()
    plt.savefig(figname)
Пример #23
0
def make_mean_carrier(dm):
    #Create the statistics on a data set sorted also by sector.
	mean_count = dm['Carrier'].mean()
	if mean_count.dtype == float and np.isnan(mean_count):
	    mean_count = 0
	std_count = dm['Carrier'].std()
	if std_count.dtype == float and np.isnan(std_count):
	    std_count = 0
	max_count = dm['Carrier'].max()
	min_count = dm['Carrier'].min()
	top_count_sector = mean_count + std_count

	bad_sectors = dm.loc[dm['Carrier']>top_count_sector]

	sector_count = float(len(dm))
	try: 
	    bad_sectors = float(bad_sectors)
	except:
	    bad_sectors =0
	#Dropped Call Failure reason
	#Call Final Class qualifier
     
        print '++++++++++++++++++++\n','Total Sector Count = %4d'%(sector_count)
        print '\nNumber of Top Offending Sectors = %4d '%bad_sectors
        print '%4.2f percent'%((100)*(bad_sectors/sector_count))
        # Find the cutoff statistics
        print '\nMean =%4d'%(mean_count), 'Standard deviation ',std_count, 'Mean + 1 sigma = %4d'%(top_count_sector)
        if False:
            fig =plt.figure()
            plt.clf()
            bin_tick =np.arange(top_count_sector,max_count,50, dtype=int)

            #dm2.hist()
            #dm2.plot(x=['ECP','Cell'], y ='Carrier',kind='line')
            #dm.plot(x=['ECP','Cell'], y='Carrier' )
            #plt.subplots(2,2)
            ax1 = fig.add_subplot(2,2,1)

            _ = ax1.plot(dm['Carrier'].values, drawstyle='steps-post', label ='steps-post')
            ax2 = fig.add_subplot(2,2,3)

            bin_tick = np.arange(min_count,max_count,50, dtype=int)

            _ = ax2.hist(dm['Carrier'],bins=bin_tick)
            ax3 = fig.add_subplot(2,2,4)
 	    norm_cdf(68, std_count, min_count, max_count)
	return top_count_sector
Пример #24
0
def plot_facet(name: str, X, cluster_predictions):
    temp = X.copy().reset_index(drop=True)
    temp[f"{name}_cluster"] = cluster_predictions
    temp = temp.melt(id_vars=f"{name}_cluster")

    means = temp.groupby([f"{name}_cluster", "variable"]).mean().reset_index()

    g = sns.FacetGrid(means,
                      col="variable",
                      hue=f"{name}_cluster",
                      col_wrap=5,
                      height=2,
                      sharey=False)
    g = g.map(plt.bar, f"{name}_cluster", "value").set_titles("{col_name}")
    g.savefig(f"outputs/{name}_facetgrid.png")
    plt.clf()

    return
Пример #25
0
 def plot(self, output):
     plt.figure(figsize=output.fsize, dpi=output.dpi)
     for ii in range(0, len(self.v)):
         imsize = [self.t[0], self.t[-1], self.x[ii][-1], self.x[ii][0]]
         lim = amax(absolute(self.v[ii])) / output.scale_sat
         plt.imshow(self.v[ii],
                    extent=imsize,
                    vmin=-lim,
                    vmax=lim,
                    cmap=cm.gray,
                    origin='upper',
                    aspect='auto')
         plt.title("%s-Velocity for Trace #%i" % (self.comp.upper(), ii))
         plt.xlabel('Time (s)')
         plt.ylabel('Offset (km)')
         #plt.colorbar()
         plt.savefig("Trace_%i_v%s.pdf" % (ii, self.comp))
         plt.clf()
 def plot_rewards(self):
     plt.figure(1)
     plt.clf()
     plt.title('Training...')
     plt.xlabel('Time')
     plt.ylabel('Reward')
     cumulative = []
     for i in range(len(self.rewards[self.n_training])):
         if i == 0:
             cumulative.append(self.rewards[self.n_training][i])
         else:
             cumulative.append(self.rewards[self.n_training][i] +
                               cumulative[-1])
     x = np.linspace(2.0,
                     len(self.rewards[self.n_training]),
                     num=len(self.rewards[self.n_training]))
     plt.plot(x, cumulative)
     plt.show()
     plt.pause(0.001)  # pause a bit so that plots are updated
Пример #27
0
def getHistogram2PGN(df):
    y = df.iloc[:, -1]
    c = Counter(y)
    numDiffClasses = len(y.unique())
    target_names = y.unique()
    colors = colors = ['lightcoral', 'gold', 'yellowgreen',
                       'cyan'][:numDiffClasses]
    plt.rcParams["figure.figsize"] = [10, 5]
    plt.pie([c[i] / len(y) * 100.0 for i in c],
            labels=target_names,
            colors=colors,
            autopct='%1.1f%%',
            shadow=True,
            startangle=90)
    plt.axis('equal')
    plt.title(df.columns.values[-1])
    path = WORKING_PATH + "/___circle_labels.png"
    plt.savefig(path)
    plt.clf()
    return path
Пример #28
0
 def plotLoss(self):
     """ """
     loss = self.loss_log
     y0, y1 = [], []
     for row in loss:
         y0.append(float(row[0]))
         y1.append(float(row[1]))
     if len(y0) == 0:
         return
     window_size = 100
     window = np.ones(int(window_size))/float(window_size)
     y_av0 = np.convolve(y0, window, 'same')
     y_av1 = np.convolve(y1, window, 'same')
     arr = np.array(y_av0)
     plt.clf()  # Clear.
     plt.title("loss")
     plt.plot(y_av0[:-50])
     #plt.plot(y_av1[:-50])
     plt.ylabel('Smoothed Loss')
     plt.show()
Пример #29
0
def checkCircle(data):
    import matplotlib as plot
    # Transform data by rotation so that all data appears in the firt and
    # second quadrants (so that positive square root solution is valid)
    rot = np.arctan2(data[-1, 1] - data[0, 1], data[-1, 0] - data[0, 0])
    R = getRotMat(rot + np.pi)
    trans = np.matmul(data, R)
    trans[:, 1] *= -1

    # Display transformed data used for the actual fit
    plot.figure(2)
    plot.clf()
    plot.plot(trans[:, 0], trans[:, 1], '*')
    plot.axis("equal")

    thresh = 0.01
    E = 10
    count = 0
    # Perform gradient descent up to four times with slightly different guesses
    # until a low E solution is found.
    while E > thresh and count < 4:
        count += 1
        xBar = np.mean(trans[:, 0]) * (0.95 + np.random.rand(1) * 0.1)[0]
        yBar = np.min(trans[:, 1]) * (0.95 + np.random.rand(1) * 0.1)[0]
        rBar = np.abs((trans[0, 0] - trans[-1, 0]) /
                      2) * (0.95 + np.random.rand(1) * 0.1)[0]
        x0, y0, r, E = descent(trans, xBar, yBar, rBar)
        print(E)

    # More plotting of transformed data
    circle = plot.Circle((x0, y0), r, color='r', fill=False)
    plot.gca().add_artist(circle)

    # Rotate back to original coordinates
    y0 *= -1
    center = np.matmul([x0, y0], np.linalg.inv(R))
    if E < thresh:
        return center, r
    else:
        return None, None
Пример #30
0
def exampleDiffImgCentroiding():
    k2id =  206103150
    campaign = 3

    ar = mastio.K2Archive()
    fits, hdr = ar.getLongTpf(k2id, campaign, header=True)
    hdr0 = ar.getLongTpf(k2id, campaign, ext=0)
    cube = tpf.getTargetPixelArrayFromFits(fits, hdr)
    idx = np.isfinite(cube)
    cube[~idx] = 0  #Remove Nans

    flags = fits['QUALITY']
    ccdMod = hdr0['module']
    ccdOut = hdr0['output']

    #Compute roll phase
    llc = ar.getLongCadence(k2id, campaign)
    time= llc['TIME']
    cent1 = llc['MOM_CENTR1']
    cent2 = llc['MOM_CENTR2']
    centColRow = np.vstack((cent1, cent2)).transpose()
    rot = arclen.computeArcLength(centColRow, flags>0)
    rollPhase = rot[:,0]
    rollPhase[flags>0] = -9999    #A bad value

    prfObj = prf.KeplerPrf("/home/fergal/data/keplerprf")
    bbox = getBoundingBoxForImage(cube[0], hdr)

    period =  	4.1591409
    epoch = fits['time'][491]
    dur = 3.0

    out, log = measureDiffOffset(period, epoch, dur, time, prfObj, \
        ccdMod, ccdOut, cube, bbox, rollPhase, flags)

    idx = out[:,1] > 0
    mp.clf()
    mp.plot(out[:,3]-out[:,1], out[:,4]- out[:,2], 'ro')
    return out
Пример #31
0
def ecdf_sca(data,
             attribute,
             aggregate=False,
             datalinks=range(0, NUM_DATA_LINK),
             save=False):
    if aggregate:
        selected_ds = data[data.name.isin([
            attribute + '-' + str(i) for i in datalinks
        ])].groupby('run').mean()
    else:
        selected_ds = data[data.name == attribute]

    plot_ecdf(selected_ds.value.to_numpy())
    plt.title("ECDF for " + attribute +
              (" (aggregated mean)" if aggregate else ""))

    if save:
        plt.savefig("ecdf_" + attribute + ".pdf", bbox_inches="tight")
        plt.clf()
    else:
        plt.show()
    return
Пример #32
0
def exampleDiffImgCentroiding():
    k2id = 206103150
    campaign = 3

    ar = mastio.K2Archive()
    fits, hdr = ar.getLongTpf(k2id, campaign, header=True)
    hdr0 = ar.getLongTpf(k2id, campaign, ext=0)
    cube = tpf.getTargetPixelArrayFromFits(fits, hdr)
    idx = np.isfinite(cube)
    cube[~idx] = 0  #Remove Nans

    flags = fits['QUALITY']
    ccdMod = hdr0['module']
    ccdOut = hdr0['output']

    #Compute roll phase
    llc = ar.getLongCadence(k2id, campaign)
    time = llc['TIME']
    cent1 = llc['MOM_CENTR1']
    cent2 = llc['MOM_CENTR2']
    centColRow = np.vstack((cent1, cent2)).transpose()
    rot = arclen.computeArcLength(centColRow, flags > 0)
    rollPhase = rot[:, 0]
    rollPhase[flags > 0] = -9999  #A bad value

    prfObj = prf.KeplerPrf("/home/fergal/data/keplerprf")
    bbox = getBoundingBoxForImage(cube[0], hdr)

    period = 4.1591409
    epoch = fits['time'][491]
    dur = 3.0

    out, log = measureDiffOffset(period, epoch, dur, time, prfObj, \
        ccdMod, ccdOut, cube, bbox, rollPhase, flags)

    idx = out[:, 1] > 0
    mp.clf()
    mp.plot(out[:, 3] - out[:, 1], out[:, 4] - out[:, 2], 'ro')
    return out
Пример #33
0
def plot_scores(scores):
    plt.figure(figsize=(15, 6))
    names, val_scores = [name for name, _, _, _, _, _, _ in scores
                         ], [score for _, score, _, _, _, _, _ in scores]
    ax = sns.barplot(x=names, y=val_scores)

    for p, score in zip(ax.patches, val_scores):
        height = p.get_height()
        ax.text(p.get_x() + p.get_width() / 2.,
                height + 0.005,
                '{:1.3f}'.format(score),
                ha="center",
                fontsize=14)

    plt.xlabel('method', fontsize=18)
    plt.ylabel('Mean Val. Accuracy', fontsize=18)
    plt.xticks(rotation=90, fontsize=16)
    plt.yticks(fontsize=16)
    plt.ylim(0.6, 1)
    path = WORKING_PATH + "/___comparison.png"
    plt.savefig(path)
    plt.clf()
    return path
def plotdatatree(treeID, scale1, mass1):
	plot_title="Mass Accretion History Tree " + str(treeID)   #Can code the number in with treemax
	x_axis="scale time"
	y_axis="total mass"
	figure_name=os.path.expanduser('~/figureTree' + str(treeID))
	#Choose which type of plot you would like: Commented out.
	plt.plot(scale1, mass1, linestyle="-", marker="o")
	#plt.scatter(scale1, mass1, label="first tree")

	plt.title(plot_title)
	plt.xlabel(x_axis)
	plt.ylabel(y_axis)
	#plt.yscale("log")

	plt.savefig(figure_name)

	#In order to Plot only a single tree on a plot must clear lists before loop. 
	#Comment out to over plot curves.			
	plt.clf()

	clearmass = []
	clearscale = []

	return clearmass, clearscale
Пример #35
0
def train_dcgan_labeled(gen, dis, epoch0=0):
    print('CHAINER begin training');    sys.stdout.flush()

    o_gen = optimizers.Adam(alpha=0.0002, beta1=0.5)
    o_dis = optimizers.Adam(alpha=0.0002, beta1=0.5)
    print('CHAINER begin gen');    sys.stdout.flush()
    o_gen.setup(gen)
    o_dis.setup(dis)
    print('CHAINER begin add');    sys.stdout.flush()
    o_gen.add_hook(chainer.optimizer.WeightDecay(0.00001))
    o_dis.add_hook(chainer.optimizer.WeightDecay(0.00001))
    print('CHAINER begin zvis');    sys.stdout.flush()
    # zvis = (xp.random.uniform(-1, 1, (100, nz), dtype=np.float32))
    print('CHAINER begin for');    sys.stdout.flush()
    for epoch in range(epoch0,n_epoch):
        print("epoch:",epoch)
        sys.stdout.flush()
        perm = np.random.permutation(n_train)
        sum_l_dis = np.float32(0)
        sum_l_gen = np.float32(0)
        
        for i in range(0, n_train, batchsize):
            # discriminator
            # 0: from dataset
            # 1: from noise

            #print "load image start ", i
            x2 = np.zeros((batchsize, 3, 96, 96), dtype=np.float32)
            for j in range(batchsize):
                #try:
                    rnd = np.random.randint(len(dataset))
                    rnd2 = np.random.randint(2)

                    img = np.asarray(Image.open(io.BytesIO(dataset[rnd])).convert('RGB')).astype(np.float32).transpose(2, 0, 1)
                    x2[j,:,:,:] = (img[:,0:96,0:96]-128.0)/128.0
                #except:
                #    print('read image error occured', fs[rnd])
            #print "load image done"
            
            # train generator
            z = Variable(xp.random.uniform(-1, 1, (batchsize, nz), dtype=np.float32))
            x = gen(z)
            yl = dis(x)
            L_gen = F.softmax_cross_entropy(yl, Variable(xp.zeros(batchsize, dtype=np.int32)))
            L_dis = F.softmax_cross_entropy(yl, Variable(xp.ones(batchsize, dtype=np.int32)))
            
            # train discriminator
                    
            x2 = Variable(cuda.to_gpu(x2))
            yl2 = dis(x2)
            L_dis += F.softmax_cross_entropy(yl2, Variable(xp.zeros(batchsize, dtype=np.int32)))
            
            #print "forward done"

            o_gen.zero_grads()
            L_gen.backward()
            o_gen.update()
            
            o_dis.zero_grads()
            L_dis.backward()
            o_dis.update()
            
            sum_l_gen += L_gen.data.get()
            sum_l_dis += L_dis.data.get()
            
            #print "backward done"

            if i%image_save_interval==0:
                pylab.rcParams['figure.figsize'] = (16.0,16.0)
                pylab.clf()
                vissize = 100
                z = zvis
                z[50:,:] = (xp.random.uniform(-1, 1, (50, nz), dtype=np.float32))
                z = Variable(z)
                x = gen(z, test=True)
                x = x.data.get()
                for i_ in range(100):
                    tmp = ((np.vectorize(clip_img)(x[i_,:,:,:])+1)/2).transpose(1,2,0)
                    pylab.subplot(10,10,i_+1)
                    pylab.imshow(tmp)
                    pylab.axis('off')
                pylab.savefig('%s/vis_%d_%d.png'%(out_image_dir, epoch,i))
                
        serializers.save_hdf5("%s/dcgan_model_dis_%d.h5"%(out_model_dir, epoch),dis)
        serializers.save_hdf5("%s/dcgan_model_gen_%d.h5"%(out_model_dir, epoch),gen)
        serializers.save_hdf5("%s/dcgan_state_dis_%d.h5"%(out_model_dir, epoch),o_dis)
        serializers.save_hdf5("%s/dcgan_state_gen_%d.h5"%(out_model_dir, epoch),o_gen)
        print('epoch end', epoch, sum_l_gen/n_train, sum_l_dis/n_train)
Пример #36
0
#find CIs, using ssm

ci_upper=np.zeros( (1,100))
ci_lower=np.zeros( (1,100))
m_boot=np.zeros( (1,100))
 
for i in range(100):
    ci_upper[0,i]=ssm.scoreatpercentile(bootdata[i,:],97.5)
    ci_lower[0,i]=ssm.scoreatpercentile(bootdata[i,:],02.5)
    m_boot[0,i]=np.mean(bootdata[i,:])

print "PLOTTING"

# plot -------------------------------------------
# thank you tomas http://www.staff.ncl.ac.uk/tom.holderness/software/pythonlinearfit
plt.clf()
    
# plot sample data
plot(plot_timespread,'ro',label='Sample observations')
 
# plot line of best fit
plot(m_boot[(0,)],'b-',label='bootstrap_mean')

# plot confidence limits
plot(ci_lower[(0,)],'b--',label='confidence limits (95%)')
plot(ci_upper[(0,)],'b--')

# configure legend
legend(loc=4) #lower left http://matplotlib.org/users/legend_guide.html
leg = gca().get_legend()
ltext = leg.get_texts()
Пример #37
0
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test  = diabetes.target[-20:]


regr = linear_model.LinearRegression()
regr.fit(diabetes_X_train, diabetes_y_train)

# >> LinearRegression(copy_X=True, fit_intercept=True, normalize=False)
print regr.coef_

# The mean square error
np.mean((regr.predict(diabetes_X_test)-diabetes_y_test)**2)

# Explained variance score: 1 is perfect prediction
# and 0 means that there is no linear relationship
# between X and Y.
regr.score(diabetes_X_test, diabetes_y_test) 

# Plot results

pl.clf()          # Clear plots

pl.plot(diabetes_X_test, regr.fit(diabetes_X_train, diabetes_y_train));

pl.title('Linear regression of sample diabetes data\n'
         'Centroids are marked with white cross')
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
pl.show()