예제 #1
0
def plot_results(predicted_data, true_data):
    fig = plt.figure(facecolor='white')
    ax = fig.add_subplot(111)
    ax.plot(true_data, label='True Data')
    plt.plot(predicted_data, label='Prediction')
    plt.legend()
    plt.show()
예제 #2
0
def graph():
    #Sets some values.
    x1 = datlo['ligand_rms_no_super_X']
    y1 = datlo['interface_delta_X']
    x2 = dathi['ligand_rms_no_super_X']
    y2 = dathi['interface_delta_X']
    #Calls actual max values for ligand_rms_no_super_X and interface_delta_X
    maxrmsd = data['ligand_rms_no_super_X'].max()
    minrmsd = data['ligand_rms_no_super_X'].min()
    maxint = data['interface_delta_X'].max()
    minint = data['interface_delta_X'].min()
    #Following lines define everything about the actual figure
    plt.figure(figsize=[16,9])
    plt.xlim(xmin = minrmsd, xmax = maxrmsd)
    plt.ylim(ymin = minint, ymax = maxint)
    plot1 = plt.scatter(x1,y1, s=4, c='Blue', marker='o')
    plot2 = plt.scatter(x2,y2, s=4, c='Red', marker='o')
    plt.tick_params(axis='both',direction='inout',width=1,length=6,labelsize=13,pad=4)
    plt.title('interface_delta_x vs ligand_rms_no_super_X', size=16)
    plt.xlabel("ligand_rms_no_super_X", fontsize=13)
    plt.ylabel("interface_delta_X", fontsize=13)
    plt.legend(['total_score <= average', 'total_score > average'], markerscale=5, fontsize=12)
    #Prompts user to decide on whether to export png file
    printfile()
    #Displays plot
    plt.show()
예제 #3
0
def plot_array(value_array, label="line1"):
    length = len(list(value_array))
    x = range(0, length)
    y = list(value_array)
    plt.plot(x, y, label=label)
    plt.legend()
    plt.show()
예제 #4
0
def polyfit(x_raw):
    raw_array = np.add.accumulate(x_raw) + 10000
    raw_array = raw_array * 100 / float(raw_array[0])

    resultmax = np.max(np.where(raw_array == np.max(raw_array)))
    resultmin = np.max(np.where(raw_array == np.min(raw_array)))
    start_idx = min(resultmin, resultmax)
    length = len(raw_array) - start_idx
    x = list(range(0, length))
    y = raw_array[-length:]
    weights = np.polyfit(x, y, 2)
    result = 2 * weights[0] * x[-1] + weights[1]
    result = result * 100
    # print(result)
    if random.random() < 1 and False:
        print(len(raw_array))
        model = np.poly1d(weights)
        x_output = x
        y1 = list(y)
        y2 = model(x_output)
        plt.plot(x_output, y1, label="line 1")
        plt.plot(x_output, y2, label="line 2")
        plt.legend()
        plt.show()
    return ["polyslope"], [round(result * 100, 0)]
def cmp_overall_qoe_per_region(regions, regionName):
    google_QoE_folder = geographical_data_folder + "google/dataQoE/"
    azure_QoE_folder = geographical_data_folder + "azure/dataQoE/"
    amazon_QoE_folder = geographical_data_folder + "amazon/dataQoE/"

    qoogle_regional_qoes = load_all_session_qoes_per_region(google_QoE_folder)
    azure_regional_qoes = load_all_session_qoes_per_region(azure_QoE_folder)
    amazon_regional_qoes = load_all_session_qoes_per_region(amazon_QoE_folder)

    google_to_draw = []
    azure_to_draw = []
    amazon_to_draw = []
    for r in regions:
        google_to_draw.extend(qoogle_regional_qoes[r])
        azure_to_draw.extend(azure_regional_qoes[r])
        amazon_to_draw.extend(amazon_regional_qoes[r])

    fig, ax = plt.subplots()

    draw_cdf(google_to_draw, styles[0], "Google Cloud CDN")
    draw_cdf(azure_to_draw, styles[1], "Azure CDN (Verizon)")
    draw_cdf(amazon_to_draw, styles[2], "Amazon CloudFront")

    ax.set_xlabel(r'Session QoE (0-5)', fontsize=18)
    ax.set_ylabel(r'Percentage of PlanetLab users', fontsize=18)
    plt.xlim([0, 5])
    plt.ylim([0, 1])
    plt.legend(loc=2)

    imgName = img_folder + "compare_cloud_cdns_QoE_region_" + regionName
    plt.savefig(imgName + ".jpg")
    plt.savefig(imgName + ".pdf")
    plt.savefig(imgName + ".png")
    plt.show()
def write_pc_hist(df, p, args):
    # Pivot, and then drop the "cycles" level of the multi-index.
    df = pd.pivot_table(df, index=["pc"], columns="operation")
    df = df.fillna(0)
    df = df.droplevel(level=0, axis="columns")

    # Use the colors key order above to stack the bars,
    # but first we have to pick stalls that are actually IN the CSV (not all are printed)
    cols = [k for k in colors.keys() if k in df.columns]
    df = df[cols]

    # Remove all PCs that were specified using the without flag
    filts = {int(pc, 16) for pc in args.without}
    fi = [pc for pc in df.index if int(pc, 16) not in filts]
    removed = [pc for pc in df.index if int(pc, 16) in filts]

    df = df.loc[fi]

    print(f"Removed PCs: {removed}")

    height = df.shape[0] * (labelsize + 4) / 72
    ax = df.plot.barh(stacked=True, figsize=(11, height), color=colors)
    ax.set_ylabel("Program Counter")
    ax.set_xlabel(f"Cycles * 10^{math.floor(math.log10(ax.get_xlim()[1]))}")
    ax.set_title(f"HammerBlade Program Counter Cycles Histogram")
    ax.tick_params(labelsize=labelsize)
    fig = ax.get_figure()
    plt.gca().invert_yaxis()
    plt.legend(loc="upper left")
    plt.tight_layout()
    fig.savefig(p / "pc_hist.pdf")
    plt.close(fig)
def draw_learning_curves(X, y, estimator, num_trainings):
    train_sizes, train_scores, test_scores = learning_curve(
        estimator,
        X2,
        y2,
        cv=None,
        n_jobs=1,
        train_sizes=np.linspace(.1, 1.0, num_trainings))

    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)

    plt.grid()

    plt.title("Learning Curves")
    plt.xlabel("Training examples")
    plt.ylabel("Score")

    plt.plot(train_scores_mean, 'o-', color="g", label="Training score")
    plt.plot(test_scores_mean, 'o-', color="y", label="Cross-validation score")

    plt.legend(loc="best")

    plt.show()
예제 #8
0
def animate_plotting(subdir_path,):
    average_filename = 'averaged_out.txt'  
    if os.path.exists( os.path.join(subdir_path,average_filename) ):
            print(subdir_path+average_filename+' already exists please use hotPlot.py')        
            #import existing data for average at the end           
#            data_out = numpy.genfromtxt(os.path.join(subdir_path,average_filename))
#            averaged_data = numpy.array(data_out[:,1])
#            angles = data_out[:,0]
            #os.remove( os.path.join(subdir_path,average_filename))
    else:
        files = os.listdir(subdir_path)     
            #files = [d for d in os.listdir(subdir_path) if os.path.isdir(os.path.join(subdir_path, d))]
        onlyfiles_path = [os.path.join(subdir_path,f) for f in files if os.path.isfile(os.path.join(subdir_path,f))]
        onlyfiles_path = natsort.natsorted(onlyfiles_path)          
        averaged_data = []
        angles = []
        for f in onlyfiles_path:
            data = numpy.genfromtxt(f,delimiter = ',')       
            #data = pandas.read_csv(f)
            averaged_data.append(numpy.mean(data))
            angle = os.path.basename(f).split('_')[0]
            angles.append(float(angle))
        fig = plt.plot(angles, averaged_data,'o')
        plt.yscale('log')
        plt.xscale('log')
        plt.legend(loc='upper right')
        plt.title(base_path)
        plt.grid(True)
        plt.xlabel(r'$\theta$ $[deg.]}$')
        #plt.xlabel(r'$\mathrm{xlabel\;with\;\LaTeX\;font}$')
        plt.ylabel(r'I($\theta$) $[a.u.]$')
예제 #9
0
def test_random_forest(T=1000):
    """
    Method for testing the random forest algorithm with bank dataset.
    Problem #2.2d in HW2 for CS3505
    T = number of times algorithm will be run
    """
    import matplotlib.pyplot as plt

    S_train = read_file('train.csv', "bank")
    S_train, medians, majority = process_bank_data(S_train, "train")
    S_test = read_file('test.csv', "bank")
    S_test, medians, _ = process_bank_data(S_test, "test", medians)
    master_list = create_attribute_dictionary("bank")


    for s in (2,4,6):
        training_errors = []
        testing_errors = []
        for i in range(T):
            ensemble = random_forest(S_train, master_list, i, s)
            training_errors.append(100 - test_ensemble(ensemble, S_train)*100)
            testing_errors.append(100 - test_ensemble(ensemble, S_test)*100)

        #plot the error rates versus the no of trees
        print("Sample size: " + str(s))
        plt.title('Error rates per no of trees\nRandom Forest')
        plt.xlabel('No. of Trees')
        plt.ylabel('Percentage Incorrect')
        plt.plot(training_errors, label="train")
        plt.plot(testing_errors, label="test")
        plt.legend(loc='lower right')
        plt.yticks(np.arange(0, 20, 5))
        plt.show()
예제 #10
0
def vol_tasa(accion='MSFT',tasa_libre='^IRX'):
  accion = yf.Ticker(str(accion))
  hist = accion.history(period="365d")
  df_movimientos=hist['Close']
  df_movimientos=pd.DataFrame(df_movimientos)
  a=df_movimientos.diff()

  a=a.iloc[1:]
  ultimo_precio=df_movimientos.iloc[-1:]
  volatilidad=np.var(a)**(1/2)

  TBILL = yf.Ticker(str(tasa_libre))
  tasa_libre = TBILL.history(period="1d") 
  r=tasa_libre['Close']


  ultimo_precio=ultimo_precio.values
  volatilidad=volatilidad.values
  r=np.log(1+r.values)

  sns.set()
  hist['Close'].plot(figsize=(16, 9),label='Historia')
  plt.title(str(accion))
  plt.legend()
  return ultimo_precio[0][0],volatilidad[0],r[0]
예제 #11
0
def cobweb(f, x0, n, xmin, xmax, ymin, ymax):
	x = x0
	ynext = f(x)
	X = []
	Y = []
	for i in range(0, n, 2):
		xnew = ynext
		xold = x 
		x = xnew
		ynext = f(x)
		X.append(xold)
		X.append(x) 
		X.append(x)
		Y.append(xnew) 
		Y.append(xnew) 
		Y.append(ynext)

	k = np.linspace(xmin, xmax, n+1)
	y = f(k)
	i = np.linspace(xmin, xmax, n+1)
	plt.figure()
	plt.plot(X, Y, label='cobweb')
	plt.plot(i, i, label='y=x')
	plt.plot(k, y, label='cos(x)')
	plt.xlabel('x')
	plt.ylabel('y')
	plt.ylim([ymin, ymax])
	plt.legend()
	plt.show()
예제 #12
0
def plot_df(
    df1, df2, plot_title, x_axis, y_axis, plot, save
):  # function for plotting high-dimension and low-dimension eigenvalues
    y1 = df1[df1.columns[1]]
    x1 = df1['M']
    y2 = df2[df2.columns[1]]
    x2 = df2['M']

    plt.figure(figsize=(8, 8))
    plt.plot(x1, y1, color='red')
    plt.plot(x2, y2, color='blue')
    plt.grid(color='black', linestyle='-',
             linewidth=0.1)  # parameters for plot grid
    plt.xticks(np.arange(0,
                         max(x1) * 1.1,
                         int(max(x1) / 10)))  # adjusting the intervals to 250
    plt.yticks(np.arange(0, max(y1) * 1.1, int(max(y1) / 10)))
    plt.title(plot_title).set_position([0.5, 1.05])
    plt.xlabel(x_axis)
    plt.ylabel(y_axis)
    plt.legend(loc='best')  # creating legend and placing in at the top right
    if save == 'yes':
        plt.savefig(plot_title)
    if plot == 'yes':
        plt.show()
    plt.close()
def main():
    x_data, y_label = produceData(10, 6, -4, 1000)
    ###训练
    for i in range(2000):
        sess.run(train_step, feed_dict={xs: x_data, ys: y_label})

    ### 产生空间随机数据
    X_NUM = produce_random_data(10, 6, -4, 5000)

    ###边界数据采样
    X_b = collect_boundary_data(X_NUM)

    ###画出数据
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)

    ###设置坐标轴名称
    plt.xlabel('x1')
    plt.ylabel('x2')
    ax.scatter(x_data[:, 0], x_data[:, 1], marker='x')

    ###用采样的边界数据拟合边界曲线 7次曲线最佳
    z1 = np.polyfit(X_b[:, 0], X_b[:, 1], 7)
    p1 = np.poly1d(z1)
    x = X_b[:, 0]
    x.sort()
    yvals = p1(x)
    plt.plot(x, yvals, 'r', label='boundray line')
    plt.legend(loc=4)

    #plt.ion()
    plt.show()
    print('DONE!')
예제 #14
0
def plotPC(PC1, PC2, labelList):
    """Plots a scatter plot of the any 2 specified dimensions after running PCA."""
    pc1 = [[], [], [], [], [], [], [], [], [], []]
    pc2 = [[], [], [], [], [], [], [], [], [], []]
    for l in range(len(labelList)):
        # l returns a number within a numpy array
        actualNum = labelList[l][0]
        pc1[actualNum].append(PC1[l])
        pc2[actualNum].append(PC2[l])
    fig = plt.figure()
    ax = fig.add_subplot(111)
    colorList = [
        "red", "green", "blue", "black", "gray", "yellow", "cyan", "magenta",
        "burlywood", "purple"
    ]
    for count in range(10):
        plt.scatter(pc1[count],
                    pc2[count],
                    c=colorList[count],
                    lw=0,
                    label=str(count))
    plt.legend(scatterpoints=1)
    ax.set_xlabel("PC1")
    ax.set_ylabel("PC2")
    fig.savefig("2D_10MNistGraph.png")
    plt.close()
예제 #15
0
def print_ROC_figure(filename, fpr, tpr, auc_stat):
    assert isinstance(filename, str), 'filename must be a string'
    filename = os.path.splitext(filename)[0] + '.png'

    matplotlib = _try_import_matplotlib()
    if matplotlib is None:
        return
    else:
        from matplotlib import pyplot as plt

    fig = plt.figure(figsize=(7, 7))
    plt.plot([0, 1], [0, 1], linestyle='--', lw=1, color='k')
    plt.plot(fpr,
             tpr,
             linestyle='-',
             lw=2,
             color='r',
             label='AUROC = {:.3f} +/- {:.3f}'.format(*auc_stat))
    plt.xlim([-0.05, 1.05])
    plt.ylim([-0.05, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('mean ROC curve from cross-validation')
    plt.legend(loc="lower right")
    fig.savefig(filename, format='png', bbox_inches='tight')
    plt.close()
    plt.rcParams.update(plt.rcParamsDefault)
    LOGGER.info(f'ROC plot saved to {filename}')
예제 #16
0
def print_pred_distrib_figure(filename, bins, histo, dx, J_opt):
    assert isinstance(filename, str), 'filename must be a string'
    filename = os.path.splitext(filename)[0] + '.png'

    matplotlib = _try_import_matplotlib()
    if matplotlib is None:
        return
    else:
        from matplotlib import pyplot as plt

    figure = plt.figure(figsize=(7, 7))
    plt.bar(bins[:-1],
            histo[0],
            width=dx,
            align='edge',
            color='blue',
            alpha=0.7,
            label='neutral')
    plt.bar(bins[:-1],
            histo[1],
            width=dx,
            align='edge',
            color='red',
            alpha=0.7,
            label='deleterious')
    plt.axvline(x=J_opt, color='k', ls='--', lw=1)
    plt.ylabel('distribution')
    plt.xlabel('predicted score')
    plt.legend()
    figure.savefig(filename, format='png', bbox_inches='tight')
    plt.close()
    plt.rcParams.update(plt.rcParamsDefault)
    LOGGER.info(f'Predictions distribution saved to {filename}')
예제 #17
0
def plot_response_time_various_X():
    dataframe = pd.DataFrame()
    index = []
    meanResponseTime = []

    for X in malus:
        index.append(f"X={X}s")

    dataframe['index'] = index
    plt.xticks([k for k in range(len(index))], [k for k in index])

    for X in malus:
        df = scalar_df_parse(f"./csv/pool_classico_vario_X/{modes[1]}{X}.csv")
        response = df[df.name == "queueLength"]
        meanResponseTime.append(response.value.mean())

    plt.plot(index, meanResponseTime, label=f"X={X}s")

    # plt.xticks(x_pos, index)
    # plt.xticks([k for k in range(len(index))], [k for k in index])
    plt.xlabel("Value of t")
    plt.ylabel("Queue length")
    plt.title(f"Comparison of various values of X")
    plt.legend(loc='best')
    # plt.savefig(f"./analysis/Experiment2/Queue Length Xvario-mfisso exponential_m{m}.png")
    plt.show()
예제 #18
0
    def plot_stacked_barchart(self, dataframe, sort, title, xlable, ylable, kurs):
        x = []
        tutor=[]
        y = []
        for i in dataframe['tutor']:
            if i not in tutor:
                tutor.append(i)
                y.append([])

        for i,elem in enumerate(dataframe[sort]):
            print(y, elem)
            if elem in x:
                y[tutor.index(dataframe['tutor'][i])][x.index(elem)] += 1
            else:
                x.append(elem)
                for j,elem2 in enumerate(tutor):
                    y[j].append(0)
                y[tutor.index(dataframe['tutor'][i])][x.index(elem)] += 1

        for i,elem in enumerate(y):
            plt.bar(range(len(elem)), elem, label=tutor[i])
        plt.xlabel(xlable)
        plt.ylabel(ylable)
        plt.legend(loc="best")
        plt.savefig('./PDFcreater/Plots/{}/{}.png'.format(kurs,title))
        #plt.show()
        # loescht den Plot fuer den Naechsten Plot
        plt.clf()
        plt.cla()
        plt.close()
예제 #19
0
def plot_response_time_various_X_k():
    dataframe = pd.DataFrame()
    index = []
    error = []

    for k in interarrival_time:
        index.append(f"k={k}s")

    dataframe['index'] = index
    plt.xticks([k for k in range(len(index))], [k for k in index])

    for m in monitoring_time:
        for X in malus:
            meanResponseTime = []

            for k in interarrival_time:
                df = scalar_df_parse(
                    f"./csv/pool_classico_variano_X_k/m={m}s/{modes[0]}{k},{X}.csv"
                )
                response = df[df.name == "responseTime"]
                meanResponseTime.append(response.value.mean())

        plt.plot(index, meanResponseTime, label=f"X={X}s")

        # plt.xticks(x_pos, index)
        # plt.xticks([k for k in range(len(index))], [k for k in index])
        plt.xlabel("Value of k")
        plt.ylabel("Response time")
        plt.title(f"Comparison of various values of X, m={m}s")
        plt.legend(loc='best')
        # plt.savefig(f"./analysis/Experiment2/Queue Length Xvario-mfisso exponential_m{m}.png")
        plt.show()
예제 #20
0
def plot_response_time_variousNDL():
    dataframe = pd.DataFrame()
    index = []

    for k in number_datalink:
        index.append(f"nA={k}")

    dataframe['index'] = index

    for mode in modes:
        meanResponseTime = []
        var = mode.split("-")[0]
        var2 = mode.split("-")[1]

        for i in number_datalink:
            df = scalar_df_parse(f"csv/pool_classico_varia_NA/{mode}{i}.csv")
            response = df[df.name == "responseTime"]
            meanResponseTime.append(response.value.mean())

        dataframe[f'responseTime{mode}{i}'] = meanResponseTime

        plt.plot(meanResponseTime, ":o", label=f"{var} {var2}")

    plt.xticks([k for k in range(len(index))], [k for k in index])
    plt.xticks(rotation=25)
    plt.xlabel("Value of nA")
    plt.ylabel("Response time")
    plt.title("Comparison of various values of nA")
    plt.legend(loc='best')
    plt.savefig(
        "./analysis/variandoNDLeNA/responseTimeAlVariareDinAT=2sk=20msm=3sx=0.05s.png"
    )
    plt.show()
예제 #21
0
    def plot_convergence(self, i, fig_conv):
        """
        Plots the convergence curve
        
        This method plots the convergence of the best fitness and average 
        fitness of the population over each generation. 
        
        Parameters: 
            i: current iteration
            fig_conv: figure that refers to the convergence plot
            
        """

        if i == 1:
            plt.semilogy([i - 1, i],
                         [self.fitness_best[i - 1], self.fitness_best[i]],
                         'b',
                         label='Best merit function')
            plt.semilogy([i - 1, i],
                         [self.fitness_avg[i - 1], self.fitness_avg[i]],
                         'r--',
                         label='Average merit function')
            plt.legend(loc='upper right')
            fig_conv.show()
            plt.pause(0.05)

        elif i > 1:
            plt.semilogy([i - 1, i],
                         [self.fitness_best[i - 1], self.fitness_best[i]], 'b')
            plt.semilogy([i - 1, i],
                         [self.fitness_avg[i - 1], self.fitness_avg[i]], 'r--')
            fig_conv.show()
            plt.pause(0.05)

        return
예제 #22
0
def president():
   for president, texts in speeches()[-7:]:
      growth = list(vocab_growth(texts))[:10000]
      matplotlib.plot(growth, label=president, linewidth=2)
      matplotlib.title('Vocabulary Growth in State-of-the-Union Addresses')
      matplotlib.legend(loc='lower right')
      matplotlib.show()
예제 #23
0
def plot_3df(
    df, plot_title, x_axis, y_axis, plot, save
):  # function for plotting high-dimension and low-dimension eigenvalues
    x1 = df[df.columns[0]]
    y1 = df[df.columns[1]]
    y2 = df[df.columns[2]]
    y3 = df[df.columns[3]]
    min1 = df[df.columns[1]].min()
    min2 = df[df.columns[2]].min()
    min3 = df[df.columns[3]].min()
    df_min = min(min1, min2, min3)
    plt.figure(figsize=(8, 8))
    plt.plot(x1, y1, color='red')
    plt.plot(x1, y2, color='blue')
    plt.plot(x1, y3, color='green')
    plt.grid(color='black', linestyle='-',
             linewidth=0.1)  # parameters for plot grid
    plt.xticks(np.arange(0,
                         max(x1) * 1.1,
                         int(max(x1) / 10)))  # adjusting the intervals to 250
    plt.yticks(np.arange(df_min * 0.9, 100, int(max(y1) / 10)))
    plt.title(plot_title).set_position([0.5, 1.05])
    plt.xlabel(x_axis)
    plt.ylabel(y_axis)
    plt.legend(loc='best')  # creating legend and placing in at the top right
    if save == 'yes':
        plt.savefig(plot_title)
    if plot == 'yes':
        plt.show()
    plt.close()
예제 #24
0
def check_stationarity(df):
    # Determing rolling statistics
    rolling_mean = df.rolling(window=52, center=False).mean()
    rolling_std = df.rolling(window=52, center=False).std()

    # Plot rolling statistics:
    plt.plot(df, df.values, color='blue', label='Original')
    plt.plot(rolling_mean,
             rolling_mean.values,
             color='red',
             label='Rolling Mean')
    plt.plot(rolling_std,
             rolling_std.values,
             color='black',
             label='Rolling Std')
    plt.legend(loc='best')
    plt.title('Rolling Mean & Standard Deviation')
    plt.show(block=False)

    # Perform Dickey-Fuller test:
    print('Results of Dickey-Fuller Test:')
    dickey_fuller_test = adfuller(df, autolag='AIC')
    dfresults = pd.Series(dickey_fuller_test[0:4],
                          index=[
                              'Test Statistic', 'p-value', '#Lags Used',
                              'Number of Observations Used'
                          ])
    for key, value in dickey_fuller_test[4].items():
        dfresults['Critical Value (%s)' % key] = value
    print(dfresults)
예제 #25
0
def evaluation(classifier, X_test, y_test, y_pred, ROC=False):
    from sklearn import metrics
    import matplotlib as plt
    print ("accuracy score: ", metrics.accuracy_score(y_test, y_pred))
    print ("confusion matrix: ", "\n", metrics.confusion_matrix(y_test, y_pred))
    print ("precision score: ",  metrics.precision_score(y_test, y_pred))
    print ("recall score: ",  metrics.recall_score(y_test, y_pred))
    print ("F1 score: ",  metrics.f1_score(y_test, y_pred))
    print ("AUC: ", metrics.roc_auc_score(y_test, y_pred))

    score = {"accuracy score": metrics.accuracy_score(y_test, y_pred),\
    "confusion matrix": metrics.confusion_matrix(y_test, y_pred), \
    "precision score": metrics.precision_score(y_test, y_pred), \
    "recall score": metrics.recall_score(y_test, y_pred), \
    "F1 score": metrics.f1_score(y_test, y_pred), \
    "AUC" : metrics.roc_auc_score(y_test, y_pred)} 
    return score
    
    
    if ROC == True:
        fpr_lr, tpr_lr, _ = metrics.roc_curve(y_test, y_pred) #classifier.predict_proba(X_test)[:,1]
            
        plt.figure(figsize=(10,6))
        plt.plot(fpr_lr, tpr_lr, color='darkorange', label='Classifier (area = %0.2f)' % metrics.auc(fpr_lr, tpr_lr))
        plt.plot([0, 1], [0, 1], color='black', linestyle='--')
        # plt.xlim([0.0, 1.0])
        # plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('Receiver operating characteristic example')
        plt.legend(loc="lower right")
        plt.savefig("ROC.eps")
        plt.show()
def cobweb(f, x0, n, xmin, xmax, ymin, ymax):
	x = x0
	ynext = f(x)
	X = []
	Y = []
	for i in range(0, n, 2):
		xnew = ynext
		xold = x 
		x = xnew
		ynext = f(x)
		X.append(xold)
		X.append(x) 
		X.append(x)
		Y.append(xnew) 
		Y.append(xnew) 
		Y.append(ynext)
	plt.figure()
	plt.plot(X, Y)
	
	x1 = np.linspace(0, 1, 200)
	y1 = np.cos(x1)
	plt.plot(x1, y1)

	diagnolline = []
	for i in range(0,2):
		diagnolline.append(i)

	plt.plot(diagnolline)
	plt.legend()
	plt.show()
def cmp_overall_qoe_cps():
    google_QoE_folder = geographical_data_folder + "google/dataQoE/"
    azure_QoE_folder = geographical_data_folder + "azure/dataQoE/"
    amazon_QoE_folder = geographical_data_folder + "amazon/dataQoE/"

    qoogle_session_qoes = load_all_session_qoes(google_QoE_folder)
    azure_session_qoes = load_all_session_qoes(azure_QoE_folder)
    amazon_session_qoes = load_all_session_qoes(amazon_QoE_folder)

    fig, ax = plt.subplots()

    draw_cdf(qoogle_session_qoes, styles[0], "Google Cloud CDN")
    draw_cdf(azure_session_qoes, styles[1], "Azure CDN (Verizon)")
    draw_cdf(amazon_session_qoes, styles[2], "Amazon CloudFront")

    ax.set_xlabel(r'Session QoE (0-5)', fontsize=18)
    ax.set_ylabel(r'Percentage of PlanetLab users', fontsize=18)
    plt.xlim([0, 5])
    plt.ylim([0, 1])
    plt.legend(loc=2)

    imgName = img_folder + "compare_cloud_cdns_QoE_overall"
    plt.savefig(imgName + ".jpg")
    plt.savefig(imgName + ".pdf")
    plt.savefig(imgName + ".png")
    plt.show()
예제 #28
0
def cobweb(f, x0=0.1, n=200, xmin=0.5, xmax=2.5, ymin=0, ymax=2.5):
    x = x0
    ynext = f(x)
    X = []
    Y = []
    for i in range(0, n, 2):
        xnew = ynext
        xold = x 
        x = xnew
        ynext = f(x)
        X.append(xold)
        X.append(x) 
        X.append(x)
        Y.append(xnew) 
        Y.append(xnew) 
        Y.append(ynext)

    t = np.arange(0, 1.1, 0.1)

    y = []
    for i in range(len(t)):
        y.append(g(t[i]))

    diagonal = range(0, 2)

    plt.figure()
    plt.plot(X, Y, label='cobweb')
    plt.plot(t, y, label='tent map') # tent map
    plt.plot(diagonal, diagonal, label='y=x') # diaganol line
    plt.xlabel('x')
    plt.ylabel('y')
    plt.legend()
    plt.show()
    plt.savefig(fname='ex6', dpi=70)
예제 #29
0
 def draw_ride(self, lx, ly):
     plt.plot(lx, ly)
     plt.xlabel('x in meters')
     plt.ylabel('y in meters')
     plt.title("Robot ride")
     plt.legend()
     plt.show()
예제 #30
0
def scatter_plot(P, L, pcIdx1, pcIdx2, letterList, rev):
    fig = plt.figure()
    # following the convention in lecture note ScatterPlot.html
    colors = ["r", "lime", "b", "y", "c", "m", "k", "tan", "pink", "darkred"]
    for i, letter in enumerate(letterList):
        plt.scatter(P[L == letter, pcIdx2],
                    P[L == letter, pcIdx1],
                    s=0.1,
                    c=colors[i],
                    label=letter)
    plt.axes().set_aspect('equal')
    #plt.axes().set_aspect('equal', 'datalim')
    plt.xlabel("Principle Component {}".format(pcIdx2))
    plt.ylabel("Principle Component {}".format(pcIdx1))
    plt.axhline(0, color='grey')
    plt.axvline(0, color='grey')
    plt.ylim([-5000, 5000])
    plt.xlim([-5000, 5000])
    plt.legend()
    plt.gca().invert_yaxis()
    fig.set_size_inches(8, 8)
    fName = os.path.join(
        pDir, 'scatter_PC{}_PC{}_{}_{}.png'.format(pcIdx1, pcIdx2,
                                                   "".join(letterList), rev))
    savefig(fName, bbox_inches='tight')
    plt.show()
예제 #31
0
def train_lstm(dir_train_file, dir_model_save):   
    t = time.time()
    nn = model_lstm
    step_sample = 0.05    # overlap mỗi mẫu 50ms  
    train_files = file_name(dir_train_file)
    for len_sample in [0.25]: #độ dài thời gian mẫu 0.25s
        for f in [16000]:
            for hop_len in [256.0,512.0,1024.0]:
                print('FREQ:', f, hop_len, len_sample)                
                t = time.time()
             
                len_mfcc = get_len_mfcc(len_sample, hop_len, f)    
                step_mfcc = get_step_mfcc(step_sample, hop_len, f)     
                print('len_mfcc',len_mfcc)
                print('step_mfcc',step_mfcc)
                X, Y = generateDatasets(train_files, True, len_mfcc, step_mfcc, hop_len=hop_len, freq=f, dir = dir_train_file, model = "RNN")
                
                rand = np.random.permutation(np.arange(len(Y)))
                X = X[rand]
                Y = Y[rand]
                X = np.array([ np.rot90(val) for val in X ])
                X = X - np.mean(X, axis=0)
            
                print('\nTrain data shape: ', X.shape,'\nNumber of label 0: ', len(Y[Y==0]),'\nNumber of label 1:', len(Y[Y==1]), float(len(Y[Y==0]))/len(Y[Y==1]))
                
                if X.shape[1] == 0:
                    print("NEXT\n")
                    continue
                input_shape = (X.shape[1], X.shape[2]) 
                model = nn(input_shape)            
                earlyStopping = EarlyStopping(monitor='val_loss', min_delta=0.00001, verbose=0, mode='min', patience=5)
                filename = dir_model_save + '/model_RNN_' + str(f) + '_' + str(len_mfcc) + '_' + str(step_mfcc) + '_' + str(hop_len) + '.hdf5'
                checkpoint = ModelCheckpoint(filepath=filename, monitor='val_loss', verbose=0, save_best_only=True)
                callbacks_list = [earlyStopping, checkpoint]
                model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.001), metrics=['accuracy'])
                hist = model.fit(X, Y, epochs=100, batch_size=32, shuffle=True, validation_split = 0.2, verbose=1, callbacks=callbacks_list)
                model.summary()    
                print('accuracy:', max(hist.history['accuracy']))
                print('val_accuracy:', max(hist.history['val_accuracy']))
                plt.plot(hist.history['accuracy'])
                plt.plot(hist.history['val_accuracy'])
                plt.title('model accuracy')
                plt.ylabel('accuracy')
                plt.xlabel('epoch')
                plt.legend(['train', 'val'], loc='upper left')
                plt.show()
                print('loss:', min(hist.history['loss']));
                print('val_loss:', min(hist.history['val_loss']))
                plt.plot(hist.history['loss'])
                plt.plot(hist.history['val_loss'])
                plt.title('model loss')
                plt.ylabel('loss')
                plt.xlabel('epoch')
                plt.legend(['train', 'val'], loc='upper left')
                plt.show()
                print("Total training time:", (time.time()-t)/60)
                print("-----------------------------")
                print("-----------------------------")
                print("-----------------------------")
                print("-----------------------------\n\n\n")
예제 #32
0
def viz_losses(filename, losses):
  if '.' not in filename: filename += '.png'

  x = history['epoch']
  legend = losses.keys

  for v in losses.values: plt.plot(np.arange(len(v)) + 1, v, marker='.')

  plt.title('Loss over epochs')
  plt.xlabel('Epochs')
  plt.xticks(history['epoch'], history['epoch'])
  plt.legend(legend, loc = 'upper right')
  plt.savefig(filename)
예제 #33
0
 def __save(self,n,plot,sfile):
     p.figure(figsize=sfile)
     p.xlabel(plot.xlabel)
     p.ylabel(plot.ylabel)
     p.xscale(plot.xscale)
     p.yscale(plot.yscale)
     p.grid()
     for curve in plot.curves: 
         if curve[1] == None: p.plot(curve[0],curve[2], label=curve[3])
         else: p.plot(curve[0],  curve[1], curve[2], label=curve[3])
     p.rc('legend', fontsize='small')
     p.legend(shadow=0, loc='best')
     p.axes().set_aspect(plot.aspect)
     if not plot.dir: plot.dir = './plots/'
     if not plot.name: plot.name = self.__global_name+'_%0*i'%(2,n)
     if not os.path.isdir(plot.dir): os.mkdir(plot.dir)
     if plot.pgf: p.savefig(plot.dir+plot.name+'.pgf')
     else: p.savefig(plot.dir+plot.name+'.pdf', bbox_inches='tight')
     p.close()
예제 #34
0
def fit_background(q,I):
    
    ## Working on background calculation
    ## mkak 2016.09.28
    
    x = q
    y = I
    pfit = np.polyfit(x,y,4)
    yfit = np.polyval(pfit,x)
    #panel.plot(xrd_spectra[0], xrd_spectra[1]-yfit, label='no bkg')
    #panel.plot(xrd_spectra[0], yfit, color='blue', label='bkg')
    
    ### calculation works, but plotting here wipes previous plots - only shows last
    import matplotlib as plt
    plt.figure()
    plt.plot(x,y,label='raw data')
    plt.plot(x,yfit,label='background')
    plt.plot(x,y-yfit,label='background subtracted')
    plt.legend()
    plt.show()
예제 #35
0
    def display(self, data, candidates, fname, display):
        
        finallist=[]
        for c in candidates:
            finallist.append(c[0])
        #print finallist
        part1 = finallist[:len(finallist)/2]
        part2 = finallist[len(finallist)/2:]
        
        meandiff=int(np.sqrt(np.power(np.mean(part2),2)-np.power(np.mean(part1),2)))
        rangeA = max(part1)-min(part1)
        rangeB = max(part2)-min(part2)
        span = int((rangeA+rangeB)/2)
        dspan = int(meandiff/span)
        theta = float(meandiff/(rangeA+rangeB))
        oneortwo=""
        if dspan >3 and meandiff > 20 or meandiff>36:
            oneortwo = "Two distributions \n\n MD: %d \n Span: %d \n Dspan: %d \n theta: %d" % (meandiff, span, dspan, theta) 
        else:
            oneortwo = "One distribution \n\n MD: %d \n Span: %d \n Dspan: %d \n theta: %d" % (meandiff, span, dspan, theta)

        cans = np.array(candidates)
        plt.plot(cans[:,0],cans[:,1],'ro')
        plt.axhline(max(cans[:,1])/4, color='r')
        plt.axhline(max(cans[:,1]/2), color='r')
        plt.axhline(int(max(cans[:,1]))*0.75, color='r')
        red_patch = mpatches.Patch(color='red', label='75%, 50% and 25% \nof maximum frequency')
        plt.legend(handles=[red_patch])
        plt.ylabel('Frequency of occurence')
        plt.xlabel('separate items')
        plt.title('Frequency distribution estimation graph: %s' %(fname))
        plt.text(max(data)*1.1, max(cans[:,1])*0.62, oneortwo, fontsize = 11, color = 'r')
        plt.hist(data,range(int(min(data)),int(max(data)),1))
        ofile = fname[0:-3]+"png"
        print ("Writing outfile: %s") % (ofile)
        plt.savefig(ofile, bbox_inches='tight')
        if display == True: 
            plt.show()
        return;
        options_data.loc[option]['PRICE'],
        sigma_est=2.,   #Estimate for implied volatility
        it=100)
options_data['IMP_VOL'].loc[option] = imp_vol

futures_data['MATURITY']
    #Select column with name MATURITY
options_data.loc[46170]
#Select Data row for index 46170
options_data.loc[46710]['STRIKE']
#Select only value in column STRIKE
plot_data = options_data[options_data['IMP_VOL'] > 0]
maturities = sorted(set(options_data['MATURITY']))

maturities

#Reiterate over all maturities and plot
import matplotlib as plt
#%matplotlib inline
plt.figure(figsize=(8,6))
for maturity in maturities:
    data = plot_data[options_data.MATURITY == maturity]
    #Select data for this maturity
    plt.plot(data['STRIKE'], data['IMP_VOL'], label=maturity.date(), lw=1.5)
    plt.plot(datadata['STRIKE'], data['IMP_VOL'], 'r.')
plt.grid(True)
plt.xlabel('strike')
plt.ylabel('implied volatility of volatility')
plt.legend()
plt.show()
예제 #37
0
#The fit ellipse equations
#b_f is the minor axis of the fit ellipse
b_f=a*(np.cos(out_x[0][0]))

#equation of fit ellipse only considering inclination
x2=a*(np.cos(t/T))
y2=b_f*(np.sin(t/T))

#equation of the fit ellipse with error values and considering longitude of 
#ascending node
x_f=(x2*np.cos(out_y[0][1]))-(y2*np.sin(out_y[0][1]))
y_f=(x2*np.sin(out_y[0][1]))-(y2*np.cos(out_y[0][1]))
print (out_x), (out_y)

#plotting the fit ellipse
plt.plot(x_f,y_f, color='b')


#plot customization   
plt.xlabel("Declination(deg)")
plt.ylabel("Right Ascention(deg)")
plt.title("Position of the pulsar")
#chose these values ti show declination and right ascension
plt.axis([-90, 90, -180, 180])
plt.legend(('Actual ellipse','fit ellipse') , loc='upper right')
plt.locator_params(nbins=10)
#plt.text(i, w, fontsize=20, fontname='Times New Roman')
print "i=", (i), "w=", (w)
plt.show()
예제 #38
0
파일: Nch.py 프로젝트: viratupadhyay/ida
xg1000,Ng1000 = ida.Nch_logscale("../Nch/gamma/gam1000.dat")
plt.loglog(xr05/10,Nr05,'d',markersize=8,color=c1,label=r'$\sigma=0.05h_0$')
plt.loglog(xr10/10,Nr10,'o',markersize=8,color=c1,label=r'$\sigma=0.10h_0$')
plt.loglog(xr20/10,Nr20,'s',markersize=8,color=c1,label=r'$\sigma=0.20h_0$')
plt.loglog(xr25/10,Nr25,'^',markersize=8,color=c1,label=r'$\sigma=0.25h_0$')
plt.loglog(xr30/10,Nr30,'v',markersize=8,color=c1,label=r'$\sigma=0.30h_0$')
plt.loglog(xr40/10,Nr40,'<',markersize=8,color=c1,label=r'$\sigma=0.40h_0$')
plt.loglog(xr50/10,Nr50,'>',markersize=8,color=c1,label=r'$\sigma=0.50h_0$')
plt.loglog(xg2/10,Ng2,'d',markersize=8,color=c2,label=r'$\gamma=0.2l_p$')
plt.loglog(xg4/10,Ng4,'o',markersize=8,color=c2,label=r'$\gamma=0.4l_p$')
plt.loglog(xg10/10,Ng10,'s',markersize=8,color=c2,label=r'$\gamma=l_p$')
plt.loglog(xg20/10,Ng20,'^',markersize=8,color=c2,label=r'$\gamma=2l_p$')
plt.loglog(xg40/10,Ng40,'v',markersize=8,color=c2,label=r'$\gamma=4l_p$')
plt.loglog(xg100/10,Ng100,'<',markersize=8,color=c2,label=r'$\gamma=10l_p$')
plt.loglog(xg1000/10,Ng1000,'>',markersize=8,color=c2,label=r'$\gamma=100l_p$')
plt.legend(loc='upper right',prop={'size':17})
plt.xticks((10**0, 10**1, 10**2), (r'$10^{0}$', r'$10^{1}$', r'$10^{2}$'), fontsize=24)
plt.yticks((10**0, 10**1, 10**2, 10**3), (r'$10^{0}$', r'$10^{1}$', r'$10^{2}$', r'$10^{3}$'), fontsize=24)
ax = plt.gca()
ax.tick_params(axis='both',reset=False,which='both',length=10,width=2,direction='bottom')
ax.yaxis.set_tick_params(length=20,width=2,direction='bottom')
ax.xaxis.set_tick_params(length=20,width=2,direction='bottom')
ax.tick_params(axis='both',reset=False,which='minor',length=10,width=1,direction='bottom')
ax.xaxis.set_tick_params(length=20,width=1,direction='bottom')
ax.yaxis.set_tick_params(length=20,width=1,direction='bottom')
plt.annotate(r'$N_{LSA}=173$', fontsize=24, xy=(6, 173), xytext=(9,173), horizontalalignment='left', verticalalignment='center', arrowprops=dict(facecolor='black', shrink=0.05))
plt.xlabel(r'$\mathit{L_w/l_p}$',fontsize=24)
plt.ylabel(r'$\mathit{N(L_{w})}$',fontsize=24)
l1 = [3,80]
l2 = [80,3]
scatter(l1,l2)
예제 #39
0
파일: JARID2.py 프로젝트: nrapin/JARID2
	#	print all_classes


		#compute corr coef line.
		lin_fit = numpy.polyfit(numpy.array(fc),numpy.array(fc2),1) # this returns the coef of the polynomial fit
		corr_coef = numpy.corrcoef(numpy.array(fc),numpy.array(fc2))[0][1] # this is R
		line_x = numpy.linspace(numpy.array(fc).min(),numpy.array(fc).max()) # this is to have some points to actually draw the line. 

		plots=[]
		for i,classe in enumerate(all_classes):
			a=plt.plot(numpy.array(fc)[all_classes[classe]],numpy.array(fc2)[all_classes[classe]],'o',alpha=.5, color=colors[i], marker=markers[i], label=classe)
			plots.append(a)
		plots.append( plt.plot(line_x, line_x*lin_fit[0] + lin_fit[1] , '--b', label='$R^2$ = %.2f'%(corr_coef*corr_coef) )) #we append the plot of the line here
		kwarg={'size':6 }
		
		plt.legend(loc='upper right', prop=kwarg)
	
		if log2gene1:
			plt.xscale('log', basex=2)
			plt.xlabel('log2 expression of %s'%in_gene)
			plt.xlim(xmax=plt.xlim()[1]+2^10) #make room for the legend
		else:
			plt.xlabel('%s'%in_gene)
			plt.xlim(xmax=plt.xlim()[1]+1000) #make room for the legend
		
		if log2gene2:
			plt.yscale('log', basey=2)
			plt.ylabel('log2 expression of %s'%in_gene2)
			plt.xlim(xmax=plt.xlim()[1]+2^10) #make room for the legend
		else:
			
예제 #40
0
    Y = list(data_subset[to_plot])
    Y_err = [0] * len(Y)

#    Y_err = list(data_subset[to_plot + '_std'])
#    plt.errorbar(T[:], Y[:], yerr = Y_err, markersize = 5, marker = 'o', label = type)

    plt.plot(T[3:], Y[3:], markersize = 5, lw = 3, marker = 'o', label = type[4:-8] + ' spins')

    # if i == 0:
    #      plt.plot(T[:], Y[:], markersize = 5, lw = 3, marker = 'o', label = '1D')
    # elif i == 1 :
    #      plt.plot(T[1:], Y[1:], markersize = 5, lw = 3, marker = 'o', label = '1.5D')
    # elif i == 2 :
    #      plt.plot(T[1:], Y[1:], markersize = 5, lw = 3, marker = 'o', label = '2D')
    # else:
    #      plot(T[7:], Y[7:], markersize = 5, lw = 3, marker = 'o', label = '2.5D')

plt.xlabel('$T$', fontsize = 20)
plt.ylabel('$E$', fontsize = 20, rotation = 'horizontal', labelpad = 25)

#plt.axvline(x = 2.2, lw = 5, color = 'k', alpha = 0.2)

plt.subplots_adjust(left = 0.15, right = 0.92, top = 0.92, bottom = 0.15)
plt.tick_params(axis = 'both', which = 'major', labelsize = 20)


plt.xlim(left = 0, right = 5)
plt.ylim(bottom = -2.1, top = 0)
legend = plt.legend(fontsize = 18, loc = 2)
show()
예제 #41
0
#
# hmu_err_high = list(data['hmu_err_high'])
# Cmu_err_high = list(data['Cmu_err_high'])
# # EE_err_high = list(data['EE_err_high'])
#
#
#
# plt.plot(T, hmu, markersize = 3, lw = 1.5, color = 'k')
# plt.plot(T, S, markersize = 6, lw = 3, alpha = 0.8, marker = 'o', label = '2.5D')



plt.xlim(left = 0, right = 5)
plt.ylim(bottom = -0.05, top = 1.05)

plt.xlabel('$T$', fontsize = 20)
plt.ylabel('Entropy', fontsize = 20, labelpad = 20)

plt.tick_params(axis = 'both', which = 'major', labelsize = 20)

plt.subplots_adjust(left = 0.15, right = 0.92, top = 0.92, bottom = 0.15)
# plt.errorbar(T, EE)
# plt.xlabel('T')
# plt.ylabel('Excess entropy')


# plt.xlim(left = 1.5, right = 3.1)

plt.legend(loc = 4, fontsize = 18)

show()
예제 #42
0
tyd = data[['title', 'year']]
tyd['decade'] = decade

tyd.head()

decade_mean = data.groupby(decade).score.mean()
decade_mean.name = 'Decade Mean'
print decade_mean

plt.plot(decade_mean.index, decade_mean.values, 'o-',
        color='r', lw=3, label='Decade Average')
plt.scatter(data.year, data.score, alpha=.04, lw=0, color='k')
plt.xlabel("Year")
plt.ylabel("Score")
plt.legend(frameon=False)
remove_border()
#Again, there were AttributeErrors when I tried to create graphs

grouped_scores = data.groupby(decade).score

mean = grouped_scores.mean()
std = grouped_scores.std()

plt.plot(decade_mean.index, decade_mean.values, 'o-',
        color='r', lw=3, label='Decade Average')
plt.fill_between(decade_mean.index, (decade_mean + std).values,
                 (decade_mean - std).values, color='r', alpha=.2)
plt.scatter(data.year, data.score, alpha=.04, lw=0, color='k')
plt.xlabel("Year")
plt.ylabel("Score")
예제 #43
0
adc_cal.set_ogp(ogp0,0)
adc_cal.set_ogp(ogp1,1)

sfdr,sinad = adc_cal.do_sfdr_sinad_cw_sweep(freqarray=freqarray)

sinad_values,freqs= dic2arr(sinad)
sfdr_values,f = dic2arr(sfdr)

data_4=(sfdr_values,sinad_values)

plot_sinad_sfdr (label=label_4, data_x=freqs, data_y=data_4, chans=[0,1],
                     titles=['SFDR','SINAD'])



plt.legend(loc=0)

savefig('/home/sandra/wares_spec/Images/SfdrSinad031_OGPnoise.png')
savefig('/home/sandra/wares_spec/Images/SfdrSinad031_OGPnoise.eps')


"""
For channels 2 and 3
"""
adc_cal.clear_ogp()
adc_cal.clear_inl()

freqarray=[50,800,30]

sfdr,sinad=adc_cal.do_sfdr_sinad_cw_sweep(chans=[2,3], freqarray=freqarray)
    else:
        f = h5py.File('results/' + assembly + '-errors.h5', 'r')
    for j, x in enumerate(x_axis):
        value_keys = f[test][sorted_keys[j]].keys()
        for key in value_keys:
            if 'Kinf_Error' in key:
                kinf_list.append(f[test][sorted_keys[j]][key][...]*10**5)
    plt.plot(x_axis,kinf_list, colors[i] + 'o-', ms = 10, lw = 2)
    f.close()

plt.axis([max(x_axis), 0, 0, 400])
plt.title('Error in K-Infinity')
plt.xlabel('Track Spacing [cm]')
plt.ylabel('K-Infinity Error [pcm]')
plt.grid()
plt.legend(legend)
plt.show()
fig.savefig('K-Infinity-Error-TS.png')

fig = plt.figure()
for i, assembly in enumerate(assembly_list):
    mean_list = []
    filename = assembly + '-trackspacing-errors.h5'
    if os.path.isfile('results/' + filename):
        f = h5py.File('results/' + filename, 'r')
    else:
        f = h5py.File('results/' + assembly + '-errors.h5', 'r')
    for j, x in enumerate(x_axis):
        value_keys = f[test][sorted_keys[j]].keys()
        for key in value_keys:
            if 'Min' in key: