Esempio n. 1
0
 def plot(self, mispts=None, vec=None, save=False):
     fig = plt.figure(figsize=(5, 5))
     plt.xlim(-1, 1)
     plt.ylim(-1, 1)
     V = self.V
     a, b = -V[1] / V[2], -V[0] / V[2]
     l = np.linspace(-1, 1)
     plt.plot(l, a * l + b, 'k-')
     cols = {1: 'r', -1: 'b'}
     for x, s in self.X:
         plt.plot(x[1], x[2], cols[s] + 'o')
     if mispts:
         for x, s in mispts:
             plt.plot(x[1], x[2], cols[s] + '.')
     if vec != None:
         aa, bb = -vec[1] / vec[2], -vec[0] / vec[2]
         plt.plot(l, aa * l + bb, 'g-', lw=2)
     if save:
         if not mispts:
             plt.title('N = %s' % (str(len(self.X))))
         else:
             plt.title('N = %s with %s test points' \
                       % (str(len(self.X)), str(len(mispts))))
         plt.savefig('p_N%s' % (str(len(self.X))), \
                     dpi=200, bbox_inches='tight')
Esempio n. 2
0
def cobweb(f, x0, n, xmin, xmax, ymin, ymax):
    x = x0
    ynext = f(x)
    X = []
    Y = []
    for i in range(0, n, 2):
        xnew = ynext
        xold = x
        x = xnew
        ynext = f(x)
        X.append(xold)
        X.append(x)
        X.append(x)
        Y.append(xnew)
        Y.append(xnew)
        Y.append(ynext)

    k = np.linspace(0, xmax, n + 1)
    y = f(k)
    i = np.linspace(0, xmax, n + 1)
    plt.figure()
    plt.plot(X, Y, label='cobweb')
    plt.plot(i, i, label='y=x')
    plt.plot(k, y, label='f(x)')
    plt.ylim([0, ymax])
    plt.legend()
    plt.show()
Esempio n. 3
0
def plot_feature_importances_cancer(model, cancer):
    n_features = cancer.data.shape[1]
    plt.barh(np.arange(n_features), model.feature_importances_, align='center')
    plt.yticks(np.arange(n_features), cancer.feature_names)
    plt.xlabel("Feature importance")
    plt.ylabel("Feature")
    plt.ylim(-1, n_features)
def check_iid(samples, attribute, mode, aggregate=False, save=False):

    pd.plotting.lag_plot(samples)
    plt.title("Lag-Plot for " + attribute + (" (mean) " if aggregate else ""))

    if aggregate:
        plt.ylim(samples.min().value - samples.std().value,
                 samples.max().value + samples.std().value)
        plt.xlim(samples.min().value - samples.std().value,
                 samples.max().value + samples.std().value)

    plt.savefig(
        f"C:\\Users\\Leonardo Poggiani\\Documents\\GitHub\\PECSNproject\\analysis\\lagPlot\\responseTime\\{mode}.png"
    )
    plt.show()

    pd.plotting.autocorrelation_plot(samples)
    plt.title("Autocorrelation plot for " + attribute +
              (" (mean) " if aggregate else ""))

    plt.savefig(
        f"C:\\Users\\Leonardo Poggiani\\Documents\\GitHub\\PECSNproject\\analysis\\autocorrelation\\responseTime\\{mode}.png"
    )
    plt.show()

    return
Esempio n. 5
0
def plot_boundary(model, x, y, **kwargs):
    assert (x.shape[-1] == 2)
    cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
    cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
    if 'h' in kwargs:
        h = kwargs['h']
    else:
        h = 0.1

    x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1
    y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1
    x_grid, y_grid = np.meshgrid(np.arange(x_min, x_max, h),
                                 np.arange(y_min, y_max, h))
    Z = model.predict(np.c_[x_grid.ravel(), y_grid.ravel()])

    # Put the result into a color plot
    Z = Z.reshape(x_grid.shape)
    plt.figure()
    plt.pcolormesh(x_grid, y_grid, Z, cmap=cmap_light)

    # Plot also the training points
    plt.scatter(x[:, 0], x[:, 1], c=y, cmap=cmap_bold, edgecolor='k', s=20)
    plt.xlim(x_grid.min(), x_grid.max())
    plt.ylim(y_grid.min(), y_grid.max())

    if 'title' in kwargs:
        plt.suptitle(kwargs['title'])
    if 'accuracy' in kwargs:
        plt.title("Accuracy: %.1f%%" % (kwargs['accuracy'] * 100), fontsize=10)
    plt.show()
Esempio n. 6
0
        def GraficarFuncion(self, entrada_tiempo):

            # generacion de la grafica del tiempo ingresado
            time = np.arange(0, entrada_tiempo, 0.01)
            x = cord_x(self, time)
            y = cord_y(self, time)

            # grafica completa del lanzamiento
            time_complete = np.arange(0, time_impact(self) + 4, 0.01)
            x2 = cord_x(self, time_complete)
            y2 = cord_y(self, time_complete)

            # generacion del punto de posicion a medir
            x3 = cord_x(self, entrada_tiempo)
            y3 = cord_y(self, entrada_tiempo)

            # estetica de la grafica
            mpl.title("Aceleracion")
            mpl.xlim(0, alcance_max(self) + self.x0)
            mpl.ylim(0, altura_max(self) + self.y0)
            mpl.xlabel("-Distancia-")
            mpl.ylabel("-Altura-")

            # generamiento de las curvas
            mpl.plot(self.x0, self.y0, "k-o")  # punto pos inicial
            mpl.plot(x, y, "y-")  # curva del usuario
            mpl.plot(x2, y2, "k--")  # lanzamiento completo
            mpl.plot(x3, y3, "r-o")  # punto del usuario
            mpl.grid()  # cuadriculado

            # generacion del vector con origen en el punto de posicion
            mpl.plot(x3, y3 - time_impact(self), "g-o")

            mpl.show()
            return 0
Esempio n. 7
0
def plot_decision_regions(X, y, classifier, resolution=0.02):
    # prepare marker and color map
    markers = ('s', 'x', 'o', '^', 'v')
    colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
    cmap = ListedColormap(colors[:len(np.unique(y))])

    # plot decision regions
    x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    # generate grid point
    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
                           np.arange(x2_min, x2_max, resolution))
    # translate features into array and predict
    Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
    # translate result to grid point
    Z = Z.reshape(xx1.shape)
    # plot contour line of grid point
    plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)

    plt.xlim(xx1.min(), xx1.max())
    plt.ylim(xx2.min(), xx2.max())

    # plot sample by each class
    for idx, cl in enumerate(np.unique(y)):
        plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx),
                    marker=markers[idx], label=cl)
Esempio n. 8
0
def pre_plot():
    labels = [chr(x) for x in range(ord('A'), ord('H')+1)]
    fig, ax = plt.subplots()
    colors = reversed(plt.cm.hsv(np.linspace(0,1,10)))
    ax.set_color_cycle(colors)
    plt.ylim(-1,6.5)
    return labels
def cmp_overall_qoe_per_region(regions, regionName):
    google_QoE_folder = geographical_data_folder + "google/dataQoE/"
    azure_QoE_folder = geographical_data_folder + "azure/dataQoE/"
    amazon_QoE_folder = geographical_data_folder + "amazon/dataQoE/"

    qoogle_regional_qoes = load_all_session_qoes_per_region(google_QoE_folder)
    azure_regional_qoes = load_all_session_qoes_per_region(azure_QoE_folder)
    amazon_regional_qoes = load_all_session_qoes_per_region(amazon_QoE_folder)

    google_to_draw = []
    azure_to_draw = []
    amazon_to_draw = []
    for r in regions:
        google_to_draw.extend(qoogle_regional_qoes[r])
        azure_to_draw.extend(azure_regional_qoes[r])
        amazon_to_draw.extend(amazon_regional_qoes[r])

    fig, ax = plt.subplots()

    draw_cdf(google_to_draw, styles[0], "Google Cloud CDN")
    draw_cdf(azure_to_draw, styles[1], "Azure CDN (Verizon)")
    draw_cdf(amazon_to_draw, styles[2], "Amazon CloudFront")

    ax.set_xlabel(r'Session QoE (0-5)', fontsize=18)
    ax.set_ylabel(r'Percentage of PlanetLab users', fontsize=18)
    plt.xlim([0, 5])
    plt.ylim([0, 1])
    plt.legend(loc=2)

    imgName = img_folder + "compare_cloud_cdns_QoE_region_" + regionName
    plt.savefig(imgName + ".jpg")
    plt.savefig(imgName + ".pdf")
    plt.savefig(imgName + ".png")
    plt.show()
Esempio n. 10
0
def live_plotter(x_vec, y1_data, line1, identifier='', pause_time=0.1):
    if line1 == []:
        # this is the call to matplotlib that allows dynamic plotting
        plt.ion()
        fig = plt.figure(figsize=(13, 6))
        ax = fig.add_subplot(111)
        # create a variable for the line so we can later update it
        line1, = ax.plot(x_vec, y1_data, '-o', alpha=0.8)
        #update plot label/title
        plt.ylabel('Y Label')
        plt.title('Title: {}'.format(identifier))
        plt.show()

    # after the figure, axis, and line are created, we only need to update the y-data
    line1.set_ydata(y1_data)
    # adjust limits if new data goes beyond bounds
    if np.min(y1_data) <= line1.axes.get_ylim()[0] or np.max(
            y1_data) >= line1.axes.get_ylim()[1]:
        plt.ylim([
            np.min(y1_data) - np.std(y1_data),
            np.max(y1_data) + np.std(y1_data)
        ])
    # this pauses the data so the figure/axis can catch up - the amount of pause can be altered above
    plt.pause(pause_time)

    # return line so we can update it again in the next iteration
    return line1
def cmp_overall_qoe_cps():
    google_QoE_folder = geographical_data_folder + "google/dataQoE/"
    azure_QoE_folder = geographical_data_folder + "azure/dataQoE/"
    amazon_QoE_folder = geographical_data_folder + "amazon/dataQoE/"

    qoogle_session_qoes = load_all_session_qoes(google_QoE_folder)
    azure_session_qoes = load_all_session_qoes(azure_QoE_folder)
    amazon_session_qoes = load_all_session_qoes(amazon_QoE_folder)

    fig, ax = plt.subplots()

    draw_cdf(qoogle_session_qoes, styles[0], "Google Cloud CDN")
    draw_cdf(azure_session_qoes, styles[1], "Azure CDN (Verizon)")
    draw_cdf(amazon_session_qoes, styles[2], "Amazon CloudFront")

    ax.set_xlabel(r'Session QoE (0-5)', fontsize=18)
    ax.set_ylabel(r'Percentage of PlanetLab users', fontsize=18)
    plt.xlim([0, 5])
    plt.ylim([0, 1])
    plt.legend(loc=2)

    imgName = img_folder + "compare_cloud_cdns_QoE_overall"
    plt.savefig(imgName + ".jpg")
    plt.savefig(imgName + ".pdf")
    plt.savefig(imgName + ".png")
    plt.show()
Esempio n. 12
0
def print_path_prob_figure(filename,
                           bins,
                           histo,
                           dx,
                           path_prob,
                           smooth_path_prob,
                           cutoff=200):
    assert isinstance(filename, str), 'filename must be a string'
    filename = os.path.splitext(filename)[0] + '.png'

    matplotlib = try_import_matplotlib()
    if matplotlib is None:
        return
    else:
        from matplotlib import pyplot as plt

    figure = plt.figure(figsize=(7, 7))
    s = np.sum(histo, axis=0)
    v1 = np.where(s >= cutoff, path_prob, 0)
    v2 = np.where(s < cutoff, path_prob, 0)
    v3 = np.where(s >= cutoff, smooth_path_prob, 0.)
    plt.bar(bins[:-1], v1, width=dx, align='edge', color='red', alpha=1)
    plt.bar(bins[:-1], v2, width=dx, align='edge', color='red', alpha=0.7)
    plt.plot(bins[:-1] + dx / 2, v3, color='orange')
    plt.ylabel('pathogenicity prob.')
    plt.xlabel('predicted score')
    plt.ylim((0, 1))
    figure.savefig(filename, format='png', bbox_inches='tight')
    plt.close()
    plt.rcParams.update(plt.rcParamsDefault)
    LOGGER.info(f'Pathogenicity plot saved to {filename}')
Esempio n. 13
0
def graph():
    #Sets some values.
    x1 = datlo['ligand_rms_no_super_X']
    y1 = datlo['interface_delta_X']
    x2 = dathi['ligand_rms_no_super_X']
    y2 = dathi['interface_delta_X']
    #Calls actual max values for ligand_rms_no_super_X and interface_delta_X
    maxrmsd = data['ligand_rms_no_super_X'].max()
    minrmsd = data['ligand_rms_no_super_X'].min()
    maxint = data['interface_delta_X'].max()
    minint = data['interface_delta_X'].min()
    #Following lines define everything about the actual figure
    plt.figure(figsize=[16,9])
    plt.xlim(xmin = minrmsd, xmax = maxrmsd)
    plt.ylim(ymin = minint, ymax = maxint)
    plot1 = plt.scatter(x1,y1, s=4, c='Blue', marker='o')
    plot2 = plt.scatter(x2,y2, s=4, c='Red', marker='o')
    plt.tick_params(axis='both',direction='inout',width=1,length=6,labelsize=13,pad=4)
    plt.title('interface_delta_x vs ligand_rms_no_super_X', size=16)
    plt.xlabel("ligand_rms_no_super_X", fontsize=13)
    plt.ylabel("interface_delta_X", fontsize=13)
    plt.legend(['total_score <= average', 'total_score > average'], markerscale=5, fontsize=12)
    #Prompts user to decide on whether to export png file
    printfile()
    #Displays plot
    plt.show()
Esempio n. 14
0
def scatter_plot(P, L, pcIdx1, pcIdx2, letterList, rev):
    fig = plt.figure()
    # following the convention in lecture note ScatterPlot.html
    colors = ["r", "lime", "b", "y", "c", "m", "k", "tan", "pink", "darkred"]
    for i, letter in enumerate(letterList):
        plt.scatter(P[L == letter, pcIdx2],
                    P[L == letter, pcIdx1],
                    s=0.1,
                    c=colors[i],
                    label=letter)
    plt.axes().set_aspect('equal')
    #plt.axes().set_aspect('equal', 'datalim')
    plt.xlabel("Principle Component {}".format(pcIdx2))
    plt.ylabel("Principle Component {}".format(pcIdx1))
    plt.axhline(0, color='grey')
    plt.axvline(0, color='grey')
    plt.ylim([-5000, 5000])
    plt.xlim([-5000, 5000])
    plt.legend()
    plt.gca().invert_yaxis()
    fig.set_size_inches(8, 8)
    fName = os.path.join(
        pDir, 'scatter_PC{}_PC{}_{}_{}.png'.format(pcIdx1, pcIdx2,
                                                   "".join(letterList), rev))
    savefig(fName, bbox_inches='tight')
    plt.show()
Esempio n. 15
0
def print_ROC_figure(filename, fpr, tpr, auc_stat):
    assert isinstance(filename, str), 'filename must be a string'
    filename = os.path.splitext(filename)[0] + '.png'

    matplotlib = _try_import_matplotlib()
    if matplotlib is None:
        return
    else:
        from matplotlib import pyplot as plt

    fig = plt.figure(figsize=(7, 7))
    plt.plot([0, 1], [0, 1], linestyle='--', lw=1, color='k')
    plt.plot(fpr,
             tpr,
             linestyle='-',
             lw=2,
             color='r',
             label='AUROC = {:.3f} +/- {:.3f}'.format(*auc_stat))
    plt.xlim([-0.05, 1.05])
    plt.ylim([-0.05, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('mean ROC curve from cross-validation')
    plt.legend(loc="lower right")
    fig.savefig(filename, format='png', bbox_inches='tight')
    plt.close()
    plt.rcParams.update(plt.rcParamsDefault)
    LOGGER.info(f'ROC plot saved to {filename}')
Esempio n. 16
0
def makeFig():
    plt.ylim(0, 40)  # Set y min and max values
    plt.title('Reading Sensor Data')  # Plot title
    plt.grid(True)  # Turn grid on
    plt.ylabel('Temp C')  # Set y-label
    plt.xlabel('Reading count')
    plt.plot(temp_c, 'ro-', label='C`')  # plot temperature
    plt.legend(loc='upper left')  # plot egend

    plt.autoscale()
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
    plt.plot(thresholds,
             precisions[:-1],
             "b--",
             label="Precision",
             linewidth=2)
    plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2)
    plt.xlabel("Threshold", fontsize=16)
    plt.legend(loc="upper left", fontsize=16)
    plt.ylim([0, 1])
Esempio n. 18
0
def createPlot():
    plt.title("Robot Speed")
    plt.ylim(0, 200)
    plt.ylabel("Speed (cm/s)")
    plt.grid(True)
    plt.plot(Left_Speed, 'ro-', label="Left Speed")
    plt.legend(loc='upper left')
    plt2 = plt.twinx()
    plt.ylim(0, 200)
    plt2.plot(Right_Speed, 'bo-', label="Right Speed")
    plt2.legend(loc='upper right')
Esempio n. 19
0
    def plot_various_trial_analyses(self,neuron_ind, var_level):
        plt.figure(figsize=(16, 5))

        #the first thing we want to do is just plot the data average
        #so first get the data for all trials
        neuron_i_data_by_trial = self.by_trial_IT_Neural_Data_objmeans_sorted_by_category[var_level][:, :, neuron_ind]
        #now take the mean over the second dimension -- the trial dimension
        neuron_i_data_trial_mean = neuron_i_data_by_trial.mean(1)
        #for convenience, let's compute the min and max values of the neural response
        minval = neuron_i_data_trial_mean.min()
        maxval = neuron_i_data_trial_mean.max()
        #now let's plot the responses across objects
        plt.plot(neuron_i_data_trial_mean)
        #and block stuff to make the categories easier to see
        plt.fill_between(np.arange(64), minval, maxval, 
                         where=(np.arange(64) / 8) % 2, color='k', alpha=0.2)
        plt.xticks(np.arange(0, 64, 8) + 4, self.unique_categories, rotation=30);
        plt.ylabel('Neural Response of neuron %d' % neuron_ind)
        plt.ylim(minval, maxval)
        plt.xlabel('Responses for Variation %s images' % var_level)

        #now let's look at two trials -- the first and 6th ones, for example 
        t1 = 0; t2 = 5
        t1_data = neuron_i_data_by_trial[:, t1]
        t2_data = neuron_i_data_by_trial[:, t2]
        plt.figure(figsize=(12, 5))
        plt.subplot(1, 2, 1)
        plt.plot(t1_data)
        plt.xticks(np.arange(0, 64, 8), self.unique_categories, rotation=30);
        plt.title('Neuron %d, trial %d, var %s' % (neuron_ind, t1, var_level))
        plt.subplot(1, 2, 2)
        plt.plot(t2_data)
        plt.xticks(np.arange(0, 64, 8), self.unique_categories, rotation=30);
        plt.title('Neuron %d, trial %d, var %s' % (neuron_ind, t2, var_level))

        #let's do a scatter plot of the responses to one trial vs the other
        plt.figure()
        plt.scatter(t1_data, t2_data)
        plt.xlabel('responses of neuron %d, trial %d, %s'% (neuron_ind, t1, var_level))
        plt.ylabel('responses of neuron %d, trial %d, %s'% (neuron_ind, t2, var_level))

        #how correlated are they exactly between trials? let's use pearson correlation
        rval = stats.pearsonr(t1_data, t2_data)[0]
        plt.title('Correlation for varlevel %s images = %.3f' % (var_level, rval))

        #in fact, let's have a look at the correlation for all pairs of trials 
        fig = plt.figure(figsize = (7, 7))
        #the numpy corrcoef function basically gets the pairwise pearson correlation efficiently
        corrs = np.corrcoef(neuron_i_data_by_trial.T)
        #now let's plot the matrix of correlations using the matshow function
        plt.colorbar(fig.gca().matshow(corrs))
        plt.xlabel('trials of neuron %d' % neuron_ind)
        plt.ylabel('trials of neuron %d' % neuron_ind)
        plt.title('Between-trial correlations for varlevel %s' % var_level)
Esempio n. 20
0
def Plot_Circle(c_list):
    plt.figure()
    plt.axes().set_aspect('equal')
    plt.xlim([-1, 1])
    plt.ylim([-1, 1])
    theta = np.linspace(0, 2 * np.pi, 90)
    for c in c_list:
        x = c.x
        y = c.y
        r = c.radius
        plt.plot(x + r * np.cos(theta), y + r * np.sin(theta), 'r')
    plt.show()
    def make_spider(self, df, row, title, color):
        """
            Function which draw the radar charts of the clusters
            obtained with K-Means
        """
        # number of variable
        categories = list(df)[1:]
        N = len(categories)

        # What will be the angle of each axis in the plot? (we divide the plot / number of variable)
        angles = [n / float(N) * 2 * pi for n in range(N)]
        angles += angles[:1]

        # Initialise the spider plot
        # fig, ax = plt.subplots(2, 2, row + 1, polar=True)
        fig = plt.figure()
        ax = fig.add_subplot(int(str(22) + str(row + 1)), polar=True)

        # If you want the first axis to be on top:
        ax.set_theta_offset(pi / 2)
        ax.set_theta_direction(-1)

        # Draw one axe per variable + add labels labels yet
        plt.xticks(angles[:-1], categories, color='black', size=8)

        # Draw ylabels
        ax.set_rlabel_position(0)
        plt.yticks([10, 20, 30], ["10", "20", "30"], color="black", size=7)
        plt.ylim(0, 40)

        # Ind1
        values = df.loc[row].drop('group').values.flatten().tolist()
        values += values[:1]
        ax.plot(angles, values, color=color, linewidth=2, linestyle='solid')
        ax.fill(angles, values, color=color, alpha=0.4)

        # Add a title
        # plt.title(title, size=11, color=color, y=1.1)

        # ------- PART 2: Apply to all individuals
        # initialize the figure
        my_dpi = 96
        plt.figure(figsize=(1000 / my_dpi, 1000 / my_dpi), dpi=my_dpi)

        # Create a color palette:
        my_palette = plt.cm.get_cmap("Set2", len(df.index))

        # Loop to plot
        for row in range(0, len(df.index)):
            make_spider(row=row,
                        title='group ' + df['group'][row],
                        color=my_palette(row))
        plt.show()
def histogram_alignment():
    img = cv2.imread("put.png")
    cv2.imshow('img', img)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    equalized = cv2.equalizeHist(img)
    cv2.imshow('equalized img', equalized)
    hist = cv2.calcHist([equalized], [0], None, [256], [0, 256])
    plt.plot(hist)
    plt.ylim(0, 256)
    plt.show()
    key = cv2.waitKey(0)
Esempio n. 23
0
    def plot_points(self):
        points = self.points
        x_pts = [pt[0] for pt in points]
        y_pts = [pt[1] for pt in points]
        col = [pt[3] for pt in points]

        plt.figure()
        plt.scatter(x_pts, y_pts, c=col)
        # plt.axes([0, 10, 0, 10])
        plt.ylim(-15, 15)
        plt.xlim(0, 15)
        # plt.axes(xlim=(-5, 5), ylim=(0, 3.5))
        plt.show()
Esempio n. 24
0
def makeFig():
    #plt.ylim(-30,50)
    plt.xticks(numpy.arange(-30,50,1.0))
    plt.grid(True)
    plt.ylabel("Temp")
    plt.plot(temp,'ro-',label= "Celsiusta")
    plt.legend(loc= 'upper left')
    plt2 = plt.twinx()
    plt.ylim(0,100)
    plt2.plot(hum,"b^-",label="Ilmankosteus %")
    plt2.set_ylabel("Ilmankosteus %")
    plt2.ticklabel_format(useOffset= False)
    plt2.legend(loc = 'upper right')
Esempio n. 25
0
def plotLoss(train, test):
    import matplotlib as plt

    plt.plot(train)
    plt.plot(test)
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.ylim(10**-1.5, 10**3)
    plt.yscale('log')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Validation'], loc='upper left')
    plt.savefig('data/loss.png')  # Change later to Files
    plt.show()
Esempio n. 26
0
def test_results():
    # testing and plotting curtailment results to compare to aviva's plots

    (plantType, hr, fuelAndCoalType, coolType, fgdType, state,
     capac) = getKeyCurtailParams(gen, genFleet)
    coeffs = getCoeffsForGenOrTech(plantType, coolType, genparam.ptCurtailed,
                                   regCoeffs, genparam.coolDesignT)

    x = np.arange(start=70, stop=110.1, step=0.1)
    y = np.arange(start=20, stop=110.1, step=0.1)

    xy = np.array(np.meshgrid(x, y)).reshape(2, x.shape[0] * y.shape[0]).T

    metAndWaterData = pd.DataFrame({
        'airF': xy[:, 0],
        'rh': xy[:, 1],
        'airF:rh': xy[:, 0] * xy[:, 1]
    })

    hourlyCurtailments = runCurtailRegression(metAndWaterData, coeffs,
                                              genparam.incCurtailments,
                                              plantType, coolType,
                                              genparam.ptCurtailed)

    xx, yy = np.meshgrid(x, y)
    zz = np.array(hourlyCurtailments).reshape(xx.shape)

    fig, ax = plt.subplots(1)

    N = 10
    base = plt.cm.get_cmap(plt.get_cmap('YlGn'))
    color_list = base(np.linspace(0, 1, N))
    cmap_name = base.name + str(N)
    my_map = base.from_list(cmap_name, color_list, N)

    bins = np.concatenate(([-np.inf], np.arange(start=0.1, stop=1,
                                                step=0.1), [np.inf]))
    values = np.digitize(zz, bins)

    im = ax.pcolormesh(xx, yy, values, cmap=my_map)

    plt.xlim(70, 110)
    plt.ylim(20, 100)

    cbar = fig.colorbar(im, orientation='vertical')
    cbar.set_ticks([])
    for j in np.arange(N + 1, step=2):
        cbar.ax.text(1, j / N, j / N, ha='left', va='center')

    plt.savefig('./example.png')
    plt.close(fig)
def main():

    #DEFINITON OF THE PATHS TO THE FILES WITH THE CONTENT
    path_to_train_accuracy = 'accuracy_train_data.csv'
    path_to_train_loss = 'loss_train_data.csv'
    path_to_validation_accuracy = 'accuracy_validation_data.csv'
    path_to_validation_loss = '"loss_validation_data.csv"'

    # CREATE LIST OF NUMBER OF EPOCHS COMPUTED
    eval_indices = range(1, EPOCHS + 1)

    #LOADS THE DATA FROM THE FILES
    accuracy_train, loss_train, accuracy_validation, loss_validation = read_data(path_to_train_accuracy,path_to_train_loss,
                                                                                 path_to_validation_accuracy,path_to_validation_loss)

    #SHOW THE INFORMATION FOR CONTROL OF QUALITY
    print(eval_indices)
    print("Accuracy Train: ",accuracy_train)
    print("Loss Train: " ,loss_train)
    print("Accuracy Validation: ", accuracy_validation)
    print("Loss validation: ", loss_validation)

    # DRAW THE ACCURACY GRAPH FOR VALIDATION AND TRAIN
    plt.clf()
    plt.subplot(211)
    plt.plot(eval_indices, accuracy_train, 'k--', label='TREINO')
    plt.plot(eval_indices, accuracy_validation, 'g-x', label='VALIDAÇÃO')
    plt.legend(loc='upper right')
    plt.xlabel('Épocas')
    plt.ylabel('ACERTO')
    plt.grid(which='major', axis='both')

    # DRAW THE LOSS GRAPH FOR VALIDATION AND TRAIN
    plt.subplot(212)
    # plt.plot(eval_indices, train, 'g-x', label='Train Set Accuracy')
    plt.plot(eval_indices, loss_train, 'r-x', label='TREINO')
    # plt.plot(eval_indices, np.ones(len(eval_indices))/TOT_CLASSES, 'k--')
    plt.plot(eval_indices, loss_validation, 'k--', label='VALIDAÇÃO')
    plt.legend(loc="upper right")
    plt.xlabel('Épocas')
    plt.ylabel('ERRO')
    plt.ylim(0, 1)
    plt.grid(which='both', axis='y')

    plt.subplots_adjust(left=0.2, wspace=0.2, hspace=0.3)

    plt.show()
    plt.pause(0.01)

    #SAVES BOTH OF THE GRAPHICS IN ONE FILE NAMED "Learning.png"
    plt.savefig('Learning.png')
def plotHistogram(clustaArray=[]):
    if len(clustaArray) < 1:
        print "Nothing to plot!"
    else:
        # create list of times that maps to each spike
        p_sptimes = []
        for a in clustaArray:
            for b in a.spike_samples:
                p_sptimes.append(b)
        sptimes = np.array(p_sptimes)

        p_clusters = []
        for c in clustaArray:
            for d in c.id_of_spike:
                p_clusters.append(c.id_of_clusta)
        clusters = np.array(p_clusters)

        # dynamically generate cluster list
        clusterList = []
        for a in clustaArray:
            clusterList.append(a.id_of_clusta)

        # plot raster for all clusters
        # nclusters = 20

        # #for n in range(nclusters):
        timesList = []
        for n in clusterList:
            # if n<>9:
            ctimes = sptimes[clusters == n]
            timesList.append(ctimes)
            # plt.plot(ctimes, np.ones(len(ctimes))*n, '|')
        # plt.show()

        # plot frequency in Hz over time
        dt = 1 / 30000.0  # in seconds
        binSize = 1  # in seconds
        binSizeSamples = round(binSize / dt)
        recLen = np.max(sptimes)
        nbins = round(recLen / binSizeSamples)

        binCount = []
        cluster = 3
        for b in np.arange(0, nbins - 1):
            n = np.sum((timesList[cluster] > b * binSizeSamples) & (timesList[cluster] < (b + 1) * binSizeSamples))
            binCount.append(n / binSize)  # makes Hz

        plt.plot(binCount)
        plt.ylim([0, 20])
        plt.show()
Esempio n. 29
0
def plotAccuracy(train, test):
    import matplotlib as plt

    minplot = min(min(train), min(test))

    plt.plot(train)
    plt.plot(test)
    plt.title('Model accuracy')
    plt.ylabel('Accuracy')
    plt.ylim(minplot, 1.0)
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Validation'], loc='upper left')
    plt.savefig('/accuracy.png')  # change later to use Files
    plt.show()
Esempio n. 30
0
def show_graph(lr_lists, epochs, steps, out_name='test'):
    import matplotlib.pyplot as plt
    plt.clf()
    plt.rcParams['figure.figsize'] = [20, 5]
    x = list(range(epochs * steps))
    plt.plot(x, lr_lists, label="line L")
    plt.plot()
    plt.ylim(10e-5, 1)
    plt.yscale("log")
    plt.xlabel("iterations")
    plt.ylabel("learning rate")
    plt.title("Check Cosine Annealing Learing Rate with {}".format(out_name))
    plt.legend()
    plt.show()
Esempio n. 31
0
def plot_value_array(i, predictions_array, true_label, number_of_classes=3):
    predictions_array, true_label = predictions_array, true_label[i]
    plt.style.use(['classic'])
    plt.grid(False)
    plt.xticks(range(number_of_classes))
    plt.yticks([])
    thisplot = plt.bar(range(number_of_classes), 1, color="#FFFFFF")
    plt.ylim([0, 1])
    predicted_label = np.argmax(predictions_array)
    #print(true_label[0])
    #print(predicted_label)

    thisplot[predicted_label].set_color('red')
    thisplot[true_label].set_color('blue')
Esempio n. 32
0
 def makeFig(self):  # Create a function that makes our desired plot
     plt.ylim(90, 1024)  # Set y min and max values
     plt.title('')  # Plot the title
     plt.grid(True)  # Turn the grid on
     plt.ylabel('p1')  # Set ylabels
     plt.plot(self.d1, 'ro-', label='Potentiometr_1')
     plt.legend(loc='upper left')  # plot the legend
     plt2 = plt.twinx()  # Create a second y axis
     plt.ylim(
         90, 1024
     )  # Set limits of second y axis- adjust to readings you are getting
     plt2.plot(self.d2, 'b^-', label='Potentiometr_2')
     plt2.set_ylabel('p2')  # label second y axis
     plt2.ticklabel_format(
         useOffset=False)  # Force matplotlib to NOT autoscale y axis
     plt2.legend(loc='upper right')  # plot the legend
Esempio n. 33
0
def blca(datafile,numprocs): 
	#run c++ blocking routine, saves txt data file with blocking data
	os.system("make --silent")
	os.system("mpirun -n %i blocking.out 100 3000 2 %i %s"%(nprocs,numprocs,datafile))#VMC
#os.system("mpirun -n %i blocking.out 5000 100 20000 %i %s"%(nprocs,numprocs,datafile))#DMC
	#read txt file and save plot
	data = np.genfromtxt(fname=datafile+'.txt')
	fig=plt.figure()
	plt.plot(data[:,0],data[:,2],'k+')
	plt.xlabel(r'$\tau_{trial}$', size=20)
	plt.ylabel(r'$\epsilon$', size=20)
	plt.xlim(np.min(data[:,0]),np.max(data[:,0]))
	plt.ylim(np.min(data[:,2]),np.max(data[:,2]))
	fig.savefig(datafile+'.eps',format='eps')
	#open plot if -p in argv
	if plot_res:
		os.system('evince %s%s '%(datafile+'.eps','&'))
	print("plot saved : %s"%(datafile+'.eps'))
Esempio n. 34
0
def create_figure_surface(figid, aspect, xx, yy, cmin, cmax, levels, slices, v3d):
    ''' creates a plot of the surface of a tracer data
    Parameters
    ----------
    figid : int  
            id of figure
    aspect: float
            aspect ratio of figure
    xx    : array 
            scale of x axis
    yy    : array 
            scale of y axis     
    cmin,cmax : array
                minimum and maxminm of the color range
    levels : array
            range of contourlines
    slices : array
            location of slices
    v3d    : vector data in geometry format
    Returns
    -------
    plot :  of surface data
    '''
    # prepare matplotlib
    import matplotlib
    matplotlib.rc("font",**{"family":"sans-serif"})
    matplotlib.rc("text", usetex=True)
    #matplotlib.use("PDF")
    import matplotlib.pyplot as plt
    # basemap
    from mpl_toolkits.basemap import Basemap
    # numpy
    import numpy as np

    # data
    vv = v3d[0,:,:,0]
    # shift
    vv = np.roll(vv, 64, axis=1)

    # plot surface
    plt.figure(figid)
    # colormap
    cmap = plt.cm.bone_r
    # contour fill
    p1 = plt.contourf(xx, yy, vv, cmap=cmap, levels=levels, origin="lower")#, hold="on")
    plt.clim(cmin, cmax)
    # contour lines
    p2 = plt.contour(xx, yy, vv, levels=levels, linewidths = (1,), colors="k")#, hold="on")
    plt.clabel(p2, fmt = "%2.1f", colors = "k", fontsize = 14)
    #plt.colorbar(p2,shrink=0.8, extend='both')
    # slices
    #s1 = xx[np.mod(slices[0]+64, 128)]
    #s2 = xx[np.mod(slices[1]+64, 128)]
    #s3 = xx[np.mod(slices[2]+64, 128)]
#    print s1, s2, s3
    #plt.vlines([s1, s2, s3], -90, 90, color='k', linestyles='--')
    # set aspect ratio of axes
    plt.gca().set_aspect(aspect)

    # basemap
    m = Basemap(projection="cyl")
    m.drawcoastlines(linewidth = 0.5)

    # xticks
    plt.xticks(range(-180, 181, 45), range(-180, 181, 45))
    plt.xlim([-180, 180])
    plt.xlabel("Longitude [degrees]", labelpad=8)
    # yticks
    plt.yticks(range(-90, 91, 30), range(-90, 91, 30))
    plt.ylim([-90, 90])
    plt.ylabel("Latitude [degrees]")


    # write to file
    plt.savefig("solution-surface", bbox_inches="tight")
    plt.show()
Esempio n. 35
0
		kwarg={'size':6 }
		
		plt.legend(loc='upper right', prop=kwarg)
	
		if log2gene1:
			plt.xscale('log', basex=2)
			plt.xlabel('Expression (log2) %s'%in_gene)
			plt.xlim(xmax=plt.xlim()[1]*1.1)
		else:
			plt.xlabel('%s'%in_gene)
			plt.xlim(xmax=plt.xlim()[1]*1.05) #make room for the legend
		
		if log2gene2:
			plt.yscale('log', basey=2)
			plt.ylabel('Expression (log2) %s'%in_gene2)
			plt.ylim(ymax=plt.ylim()[1]*1.1) #make room for the legend
		else:
			
			plt.ylabel('%s'%in_gene2)
			plt.ylim(ymax=plt.ylim()[1]*1.05) #make room for the legend
		
		
		
		if organism == 'mouse':
			genename1=in_gene.capitalize()
			genename2=in_gene2.capitalize()
		else: #it is human
			genename1= in_gene.upper()
			genename2= in_gene2.upper()
		
		if not fold_change:
    
    gfapLevel = data_genes.loc[data_genes['gene_id']==geneToSearch,sampleName]
    pairs[0,sampleId] = sampleNumCells#gfapLevel#sampleNumCells#spearmanCorrToMeanRep#gfapLevel#sampleNumCells
    pairs[1,sampleId] = spearmanCorrToMeanRep #spearmanCorrToMeanRep
    #print(sampleName)
    #print(gfapLevel)
matplotlib.rcParams.update({'font.size': 18})
#print(sampleNumCells)
plt.scatter(pairs[0,:],pairs[1,:])
#plt.xlabel('Spearman correlation to bio replicate', fontsize=18)
plt.xlabel('Num cells in sample', fontsize=18)
#textToPlot = geneToSearch + ' level, log(1+TPM)'
textToPlot = 'Spearman correlation to mean thalamic value'#'Trp53 level (log(1+TPM))'
plt.ylabel(textToPlot, fontsize=18)
fig = plt.gcf()
plt.ylim((0.85,1))
#plt.xlim((0.85,1))
fig.set_size_inches(10,10)
    
x = pairs[0,:]
y =pairs[1,:]
plt.plot(x, numpy.poly1d(numpy.polyfit(x, y, 1))(x))




# In[72]:

stats.spearmanr(pairs[0,:],pairs[1,:])

Esempio n. 37
0
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test  = diabetes.target[-20:]


regr = linear_model.LinearRegression()
regr.fit(diabetes_X_train, diabetes_y_train)

# >> LinearRegression(copy_X=True, fit_intercept=True, normalize=False)
print regr.coef_

# The mean square error
np.mean((regr.predict(diabetes_X_test)-diabetes_y_test)**2)

# Explained variance score: 1 is perfect prediction
# and 0 means that there is no linear relationship
# between X and Y.
regr.score(diabetes_X_test, diabetes_y_test) 

# Plot results

pl.clf()          # Clear plots

pl.plot(diabetes_X_test, regr.fit(diabetes_X_train, diabetes_y_train));

pl.title('Linear regression of sample diabetes data\n'
         'Centroids are marked with white cross')
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
pl.show()
Esempio n. 38
0
# Cmu_err_low = list(data['Cmu_err_low'])
# # EE_err_low = list(data['EE_err_low'])
#
# hmu_err_high = list(data['hmu_err_high'])
# Cmu_err_high = list(data['Cmu_err_high'])
# # EE_err_high = list(data['EE_err_high'])
#
#
#
# plt.plot(T, hmu, markersize = 3, lw = 1.5, color = 'k')
# plt.plot(T, S, markersize = 6, lw = 3, alpha = 0.8, marker = 'o', label = '2.5D')



plt.xlim(left = 0, right = 5)
plt.ylim(bottom = -0.05, top = 1.05)

plt.xlabel('$T$', fontsize = 20)
plt.ylabel('Entropy', fontsize = 20, labelpad = 20)

plt.tick_params(axis = 'both', which = 'major', labelsize = 20)

plt.subplots_adjust(left = 0.15, right = 0.92, top = 0.92, bottom = 0.15)
# plt.errorbar(T, EE)
# plt.xlabel('T')
# plt.ylabel('Excess entropy')


# plt.xlim(left = 1.5, right = 3.1)

plt.legend(loc = 4, fontsize = 18)
Esempio n. 39
0
import matplotlib as plt
import numpy as np

sT = np.arange(0, 40, 5)
k = 15
s0 = 10
c = 2
y0 = np.zeros(len(sT))
y1 = sT - s0  # stock only
y2 = (abs(sT - k) + sT - k) / 2 - c  # long a call
y3 = y1 - y2  # covered call
plt.ylim(-10, 30)
plt.plot(sT, y1)
plt.plot(sT, y2)
plt.plot(sT, y3, "red")
plt.plot(sY, y0, "b-.")
plt.plot([k, k], [-10, 10], "black")
title("Covered call ( long one share and short one call)")
xlabel("Stock price")
ylabel("Profit (loss)")
plt.annotate(
    "Stock only (long one share)", xy=(24, 15), xytext=(15, 20), arrowprops=dict(facecolor="blue", shrink=0.01)
)
plt.annotate("Long one share, short a call", xy=(10, 4), xytext=(9, 25), arrowprops=dict(facecolor="red", shrink=0.01))
plt.annotate("Exercise price= " + str(k), xy=(k + 0.2, -10 + 0.5))

show()
Esempio n. 40
0
im = plt.imread('chicago.png')
implot = plt.imshow(im)

x = (df['west'] - df['west'].min())*477/(df['east'].max() - df['west'].min())
y = 798-(df['north'] - df['south'].min())*798/(df['north'].max() - df['south'].min())
s = df['currentspeed'] / df['currentspeed'].max()
plt.scatter(x,y,c=s,linewidth=0,s=1000,alpha=0.1)

#x0 = (df.ix[0]['west'] - df['west'].min())*477/(df['east'].max() - df['west'].min())
#y0 = 798-(df.ix[0]['north'] - df['south'].min())*798/(df['north'].max() - df['south'].min())
#plt.scatter(x0,y0,c='r',s=2000)
#x0 = (df.ix[0]['east'] - df['west'].min())*477/(df['east'].max() - df['west'].min())
#y0 = 798-(df.ix[0]['south'] - df['south'].min())*798/(df['north'].max() - df['south'].min())
#plt.scatter(x0,y0,c='r',s=2000)
plt.xlim(0,477)
plt.ylim(798,0)
plt.xticks([])
plt.yticks([])
#plt.plot([df['west'],df['west'],df['east'],df['east'],df['west']],[df['south'],df['north'],df['north'],df['south'],df['south']],linewidth=20,alpha=0.2)

# <codecell>

plt.figure(figsize=(15,15))

patches = []
verts = [(df['west'],df['south']),
    (df['west'],df['north']),
    (df['east'],df['north']),
    (df['east'],df['south']),
    (df['west'],df['south'])]
Esempio n. 41
0
    Y = list(data_subset[to_plot])
    Y_err = [0] * len(Y)

#    Y_err = list(data_subset[to_plot + '_std'])
#    plt.errorbar(T[:], Y[:], yerr = Y_err, markersize = 5, marker = 'o', label = type)

    plt.plot(T[3:], Y[3:], markersize = 5, lw = 3, marker = 'o', label = type[4:-8] + ' spins')

    # if i == 0:
    #      plt.plot(T[:], Y[:], markersize = 5, lw = 3, marker = 'o', label = '1D')
    # elif i == 1 :
    #      plt.plot(T[1:], Y[1:], markersize = 5, lw = 3, marker = 'o', label = '1.5D')
    # elif i == 2 :
    #      plt.plot(T[1:], Y[1:], markersize = 5, lw = 3, marker = 'o', label = '2D')
    # else:
    #      plot(T[7:], Y[7:], markersize = 5, lw = 3, marker = 'o', label = '2.5D')

plt.xlabel('$T$', fontsize = 20)
plt.ylabel('$E$', fontsize = 20, rotation = 'horizontal', labelpad = 25)

#plt.axvline(x = 2.2, lw = 5, color = 'k', alpha = 0.2)

plt.subplots_adjust(left = 0.15, right = 0.92, top = 0.92, bottom = 0.15)
plt.tick_params(axis = 'both', which = 'major', labelsize = 20)


plt.xlim(left = 0, right = 5)
plt.ylim(bottom = -2.1, top = 0)
legend = plt.legend(fontsize = 18, loc = 2)
show()
            totalhist += hist
        f.close()
        print totalhist

        ax.plot(10 ** binmids, totalhist, color=colors[i], linewidth=1.5, alpha=0.8)
    ax.set_yscale("log")
    ax.set_xscale("log")

    ax.set_xlim(5.0e18, 1.0e24)
    ax.set_ylim(1, 1.0e5)

    set_ticks(ax, "0.6")
    ax.xaxis.grid(False, which="minor")
    ax.yaxis.grid(False, which="minor")

    plotlim = mpl.xlim() + mpl.ylim()
    print plotlim
    ax.imshow([0, 0], [1, 1], cmap=mp.cm.Grays, interpolation="bicubic", extent=plotlim)

    ax.set_xlabel(r"surface density / $\mathdefault{cm^{-2}}$", fontproperties=tfm, size=15)
    # ax.set_ylabel('d', fontproperties = tfm, size = 15)
    ax.set_ylabel(r"volume weighted PDF", fontproperties=tfm, size=15)

    (time, unit_t) = get_time(infoname)
    timeMyr = time * unit_t / 31557600.0 / 1.0e6
    horiz = 5.0e22
    vert = 2.0e4
    ax.text(
        horiz,
        vert,
        r"%.1f" % timeMyr,