コード例 #1
0
ファイル: bbvi.py プロジェクト: wonyeol/reparam-nondiff
def plot_res(bm_fname, optz_cfg=None, plot_cfg=None):
    # load bm_fname
    bm = importlib.import_module(bm_fname.rsplit('.', 1)[0])
    e       = bm.e; decorate_stind(e)
    compare = bm.compare
    if optz_cfg is None: optz_cfg = bm.optz_cfg
    if plot_cfg is None: plot_cfg = bm.plot_cfg

    # {optz,plot}_detail
    optz_detail = get_optz_detail(bm_fname, optz_cfg)
    plot_detail = get_plot_detail(bm_fname, optz_cfg, plot_cfg)
    print('\n===== PLOT: %s =====' % plot_detail)

    # load res's from files
    thts_res_l = []
    alg_str_l  = []
    for alg_str in compare:
        thts_res = load_res(optz_detail, alg_str)
        thts_res = [(t,thts) for (t,thts,_,_,_) in thts_res]
        thts_res_l += [thts_res]
        alg_str_l  += [alg_str]
    
    # plot & save graph
    plot_fname = '%s%s' % (plot_detail, PLOT_EXT)
    text_fname = plot_fname[:-len(PLOT_EXT)] + RES_EXT
    objc_func = lambda thts, e=e: elbo_val.elbo_val(e, thts,
                                                    sample_n = plot_cfg['sample_n'])
    util.plot_graph(thts_res_l, objc_func,
                    plot_fname = plot_fname,
                    text_fname = text_fname,
                    legend_l = alg_str_l,
                    step = plot_cfg['step'])
コード例 #2
0
def registry(filename,nf, ptitle, kfstart=2, kfend=5, kstart=1, kend=5):
	''' starts the project. For each fold it calculates mean accuracy,
		standard deviation and plot the corresponding graph.
	'''
	dataset = load_dataset(filename)
	kf_accuracy = []
	for kf in range(kfstart, kfend+1):
		kf_accuracy.append(get_Allknn_acc_for_kfold(dataset, kf, kstart, kend, nf))
	kf_mean_acc = [sum(acclist)/len(acclist) for acclist in kf_accuracy]
	sd = [numpy.std(acclist) for acclist in kf_accuracy]

	for kf, acclist in zip(range(kfstart, kfend+1),kf_accuracy):
		print kf, "fold validation ===> accuracy of", sum(acclist)/len(acclist)
	# print kf_mean_acc
	mean_sd = sum(sd)/len(sd)
	mean_acc = sum(kf_mean_acc)/len(kf_mean_acc)
	print "Mean accuracy : ", mean_acc
	print "Mean S.D : ", mean_sd
	plot_graph(kf_accuracy, kstart, kend,sd, ptitle)
コード例 #3
0
def registry(filename, nf, ptitle, kfstart=2, kfend=5, kstart=1, kend=5):
    ''' starts the project. For each fold it calculates mean accuracy,
		standard deviation and plot the corresponding graph.
	'''
    dataset = load_dataset(filename)
    kf_accuracy = []
    for kf in range(kfstart, kfend + 1):
        kf_accuracy.append(
            get_Allknn_acc_for_kfold(dataset, kf, kstart, kend, nf))
    kf_mean_acc = [sum(acclist) / len(acclist) for acclist in kf_accuracy]
    sd = [numpy.std(acclist) for acclist in kf_accuracy]

    for kf, acclist in zip(range(kfstart, kfend + 1), kf_accuracy):
        print kf, "fold validation ===> accuracy of", sum(acclist) / len(
            acclist)
    # print kf_mean_acc
    mean_sd = sum(sd) / len(sd)
    mean_acc = sum(kf_mean_acc) / len(kf_mean_acc)
    print "Mean accuracy : ", mean_acc
    print "Mean S.D : ", mean_sd
    plot_graph(kf_accuracy, kstart, kend, sd, ptitle)
コード例 #4
0
ファイル: lstm.py プロジェクト: kthoma46/masters-thesis
        X_test, y_test = convert_to_nn_input(predict_df,
                                             look_back=look_back,
                                             look_forward=look_forward)

        model = Sequential()
        model.add(LSTM(10, input_shape=(look_back * no_of_features, 1)))
        model.add(Dense(1, activation='relu'))
        model.compile(optimizer='adam', loss='mse')

        X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
        X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))
        history = model.fit(X_train,
                            y_train,
                            epochs=100,
                            validation_data=(X_test, y_test),
                            shuffle=False)
        y_test_predictions = model.predict(X_test)

        # Calculate the RMSE and MAE between ground truth and predictions and
        # add to the CSV file
        rmse_val = mean_squared_error(y_test, y_test_predictions)**0.5
        mae_val = mean_absolute_error(y_test, y_test_predictions)
        csv_file.add_row(
            [company_name, regressor_name,
             str(rmse_val),
             str(mae_val)])

        # Plot the graph comparing ground truth with predictions
        plot_graph(y_test, y_test_predictions, [i for i in range(len(y_test))],
                   directory + company_name + "_" + regressor_name + ".png")
コード例 #5
0
# A large batch size of 64 reviews is used to space out weight updates.
history = model.fit(X_train,
                    y_train,
                    batch_size=64,
                    epochs=3,
                    verbose=1,
                    validation_data=(X_test, y_test))
# Evaluation of the model with training data
scores_train = model.evaluate(X_train, y_train, verbose=0)
print("Training Data: ")
print(
    "Accuracy: %.2f%%, F_1Score: %.2f%% , Precision: %.2f%%, Recall: %.2f%% " %
    (scores_train[1] * 100, scores_train[2] * 100, scores_train[3] * 100,
     scores_train[4] * 100))

# Evaluation of the model with test data
scores = model.evaluate(X_test, y_test, verbose=0)
print("Test Data:")
print(
    "Accuracy: %.2f%%, F_1Score: %.2f%% , Precision: %.2f%%, Recall: %.2f%%" %
    (scores[1] * 100, scores[2] * 100, scores[3] * 100, scores[4] * 100))

if PLOT_GRAPH:
    plot_graph(history)

if PLOT_MODEL:
    img_file = 'model_diagrams/cnn.png'
    keras.utils.plot_model(model,
                           to_file=img_file,
                           show_shapes=True,
                           show_layer_names=True)
コード例 #6
0
x_vals = np.array([1.1, 2.2, 3.3, 4.4]).astype(np.float)
y_vals = np.array([5.5, 6.6, 7.7, 8.8]).astype(np.float)

# read files
pass

# create plots
# implementations in boolean flag to prevent interactive plots
if False:
    util.plot_normal(0, 1)
if True:
    X_range = np.array(range(0, len(x_vals)))
    plot1 = util.getYPlotObj(x_vals, "line", "X1", "red")
    plot2 = util.getYPlotObj(y_vals, "scatter", "X2", "green")
    util.plot_graph(X_range, [plot1, plot2], showLegend=True)

# getStats
[x_mean, x_var, x_min, x_max] = util.getStats(x_vals)
[x_mean, x_var, x_min, x_max] = util.getStats(x_vals, printStats=True)

# tests
result = util.walds_test_2_population(x_vals, y_vals, thres=1.962)
result = util.paired_t_test(x_vals, y_vals, thres=1.962)
result = util.permutation_test(x_vals, y_vals, thres=1.962)

# time series
[average_error, errors, predictions] = util.make_predictions(x_vals,
                                                             method="ewma",
                                                             ewma_factor=0.5)
# [average_error, errors, predictions] = util.make_predictions(x_vals, method="seasonal", season_factor=2)
コード例 #7
0
    spearman_connectivity,
    community_alg=c_a,
    thresh_func=t_f,
    reorder=True,
    threshold=t,
    layout='circle',
    plot_threshold=plot_t,
    print_options={'lookup': {}},
    plot_options={'inline': False})

subgraph = community_reorder(get_subgraph(G_w, 3))
print_community_members(subgraph)
subgraph_visual_style = get_visual_style(subgraph,
                                         vertex_size='eigen_centrality')
plot_graph(subgraph,
           visual_style=subgraph_visual_style,
           layout='circle',
           inline=False)

#**********************************
# Threshold and Stability Analysis
#**********************************

# plot number of communities as a function of threshold
thresholds = np.arange(.15, .35, .01)
partition_distances = []
cluster_size = []
for t in thresholds:
    # simulate threhsold 100 times
    clusters = simulate(100,
                        fun=lambda: Graph_Analysis(
                            corr_data, threshold=t, weight=True, display=False)