def test_different_training(color_channels, no_of_persons, samples_person, combining_functions):
    plot_number_of_training_samples = []
    x_min = 1
    x_max = samples_person - 1
    number_of_diff_trainings = x_max + 1 - x_min
    number_of_tests = 15
    number_of_results = 3
    plot_recognition_rate = numpy.empty((number_of_results, number_of_tests * number_of_diff_trainings))
    count = 0
    for test_no in range(number_of_tests):
        sys.stdout.write("\r%d%%" % (test_no * 100 // number_of_tests))
        sys.stdout.flush()
        for samples_training in range(x_min, x_max + 1):
            results = train_and_test(
                color_channels, no_of_persons, samples_person, samples_training, combining_functions)

            plot_number_of_training_samples.append(samples_training)
            plot_recognition_rate[:, count] = results
            count += 1

    print()

    # Plot results:
    plot_results(
        x_axis=plot_number_of_training_samples,
        y_axis=plot_recognition_rate,
        x_min=x_min,
        x_max=x_max,
        labels=[name for func, name in combining_functions]
    )
예제 #2
0
def process_postproc(postproc_list, title, results, outfilename, remote=False):
    """Runs the post-processing operations, such as plotting.
	postproc_list: list of post processing operations as returned by main()
	title: the deck title
	results: the results to be plotted (including the ones that are not needed)
	outfilename: if the plots are saved to disk, this is the filename without extension
	remote: boolean, do not show plots if True (such as ssh without X11 forwarding)

	Returns: None
	"""
    index = 0
    if outfilename == 'stdout':
        printing.print_warning(
            "Plotting and printing the results to stdout are incompatible options. Plotting skipped."
        )
        return
    for postproc in postproc_list:
        #print postproc["analysis"], results.keys(), results.has_key(postproc["analysis"]), results[postproc["analysis"]] is None #DEBUG
        plotting.plot_results(
            title, postproc["x"], postproc["l2l1"],
            results[postproc["analysis"]],
            "%s-%d.%s" % (outfilename, index, options.plotting_outtype))
        index = index + 1
    if len(postproc_list) and not remote:
        plotting.show_plots()
    return None
def sweep_train_ratio(train_images, test_images):
    train_ratio_list = np.arange(0.1, 1, 0.1)
    loss_list = []

    # Initialise computational graph
    print("Initialising computational graph...")
    csr = ContinuousSetRegressor()
    for train_ratio in train_ratio_list:
        model_name = "CSR, TR = {:.4}".format(train_ratio)
        loss, _ = train_csr(
            csr,
            train_images,
            test_images,
            train_ratio=train_ratio,
            model_name=model_name,
        )
        loss_list.append(loss)

    print(train_ratio_list, loss_list)
    plot_results(train_ratio_list,
                 loss_list,
                 "Train ratio",
                 title="Final loss vs train ratio (max 75 eval points)",
                 filename="results/sweep_train_ratio")
    np.savetxt("results/sweep_train_ratio.txt", [train_ratio_list, loss_list],
               fmt="%10.5g")
    tf.reset_default_graph()
def sweep_noise_var(train_images, test_images):
    noise_var_list = np.arange(0.0, 0.2, 0.01)
    loss_list = []

    print("Initialising computational graph...")
    csr = ContinuousSetRegressor()
    for noise_std in noise_var_list:
        model_name = "CSR, noise var = {:.4}".format(noise_std)
        loss, _ = train_csr(csr,
                            train_images,
                            test_images,
                            noise_std=noise_std,
                            model_name=model_name,
                            save_model=True)
        loss_list.append(loss)

    print(noise_var_list, loss_list)
    plot_results(noise_var_list,
                 loss_list,
                 "Noise variance",
                 title="Final MSE vs variance of Gaussian noise",
                 filename="results/sweep_noise_var")
    np.savetxt("results/sweep_noise_prob.txt", [noise_var_list, loss_list],
               fmt="%10.5g")
    tf.reset_default_graph()
def sweep_flip_prob(train_images, test_images):
    flip_prob_list = np.arange(0.0, 0.40, 0.05)
    loss_list = []

    print("Initialising computational graph...")
    csr = ContinuousSetRegressor()
    for flip_prob in flip_prob_list:
        model_name = "CSR, flip prob = {:.4}".format(flip_prob)
        loss, _ = train_csr(
            csr,
            train_images,
            test_images,
            flip_prob=flip_prob,
            model_name=model_name,
        )
        loss_list.append(loss)

    print(flip_prob_list, loss_list)
    plot_results(flip_prob_list,
                 loss_list,
                 "Flip probability",
                 title="Final loss vs probability of pixel-flip noise",
                 filename="results/sweep_noise_prob")
    np.savetxt("results/sweep_noise_prob.txt", [flip_prob_list, loss_list],
               fmt="%10.5g")
    tf.reset_default_graph()
예제 #6
0
def validate(expected_numbers_of_worms_per_video, cluster_threshold, greyscale_threshold):
    logging.info("Validating...")
    for video_path in expected_numbers_of_worms_per_video.keys():
        detected_worms_coord_list, num_frames_compared = video_processing.find_motion_in_video(video_path, greyscale_threshold)
        if detected_worms_coord_list:
            counted_number_of_worms, which_cluster_each_point_is_in, points_to_be_clustered = clustering.cluster_motion_detection_data(detected_worms_coord_list, cluster_threshold, num_frames_compared)
            if clusters is not None:
                plotting.plot_results(counted_number_of_worms, which_cluster_each_point_is_in, points_to_be_clustered, video_path, expected_number_of_worms=expected_numbers_of_worms_per_video[video_path])
    return 0  
예제 #7
0
def count_worms_in_videos(video_paths, cluster_threshold=DEFAULT_CLUSTER_THRESHOLD, greyscale_threshold=DEFAULT_GREYSCALE_THRESHOLD):
    for video_path, expected_number_of_worms in video_paths.items():
        detected_worms_coord_list, num_frames_compared = video_processing.find_motion_in_video(video_path, greyscale_threshold)
        if len(detected_worms_coord_list) > 0:
            counted_number_of_worms, which_cluster_each_point_is_in, points_to_be_clustered = clustering.cluster_motion_detection_data(detected_worms_coord_list, cluster_threshold, num_frames_compared, algorithm="dbscan")
            if which_cluster_each_point_is_in is not None:
                plotting.plot_results(counted_number_of_worms, which_cluster_each_point_is_in, points_to_be_clustered, video_path, expected_number_of_worms=expected_number_of_worms)
            else:
                logging.info("No clusters found")
        else:
            logging.info("No moving blobs detected")
    return 0
예제 #8
0
파일: ahkab.py 프로젝트: vovkd/ahkab
def process_postproc(postproc_list, title, results, outfilename, remote=False):
	"""Runs the post-processing operations, such as plotting.
	postproc_list: list of post processing operations as returned by main()
	title: the deck title
	results: the results to be plotted (including the ones that are not needed)
	outfilename: if the plots are saved to disk, this is the filename without extension
	remote: boolean, do not show plots if True (such as ssh without X11 forwarding)

	Returns: None
	"""
	index = 0
	if outfilename == 'stdout':
		printing.print_warning("Plotting and printing the results to stdout are incompatible options. Plotting skipped.")
		return
	for postproc in postproc_list:
		#print postproc["analysis"], results.keys(), results.has_key(postproc["analysis"]), results[postproc["analysis"]] is None #DEBUG
		plotting.plot_results(title, postproc["x"], postproc["l2l1"], results[postproc["analysis"]], "%s-%d.%s" % (outfilename, index, options.plotting_outtype))
		index = index +1
	if len(postproc_list) and not remote:
		plotting.show_plots()
	return None
예제 #9
0
파일: ahkab.py 프로젝트: weilawei/ahkab
def process_postproc(postproc_list, title, results, outfilename):
    """Runs the post-processing operations, such as plotting.
    postproc_list: list of post processing operations as returned by main()
    title: the deck title
    results: the results to be plotted (including the ones that are not needed)
    outfilename: if the plots are saved to disk, this is the filename without extension

    Returns: None
    """
    index = 0
    if outfilename == 'stdout':
        printing.print_warning(
            "Plotting and printing the results to stdout are incompatible options. Plotting skipped.")
        return
    for postproc in postproc_list:
        plotting.plot_results(title, postproc["l2l1"], results[
                              postproc["analysis"]], "%s-%d.%s" % (outfilename, index, options.plotting_outtype))
        index = index + 1
    if len(postproc_list) and options.plotting_show_plots:
        plotting.show_plots()
    return None
예제 #10
0
def process_postproc(postproc_list, title, results, outfilename):
    """Runs the post-processing operations, such as plotting.
    postproc_list: list of post processing operations as returned by main()
    title: the deck title
    results: the results to be plotted (including the ones that are not needed)
    outfilename: if the plots are saved to disk, this is the filename without extension

    Returns: None
    """
    index = 0
    if outfilename == 'stdout':
        printing.print_warning(
            "Plotting and printing the results to stdout are incompatible options. Plotting skipped."
        )
        return
    for postproc in postproc_list:
        plotting.plot_results(
            title, postproc["l2l1"], results[postproc["analysis"]],
            "%s-%d.%s" % (outfilename, index, options.plotting_outtype))
        index = index + 1
    if len(postproc_list) and options.plotting_show_plots:
        plotting.show_plots()
    return None
예제 #11
0
def _main():
    start = time.time()
    if not os.path.exists("results/"):
        os.mkdir("results/")
    if not os.path.exists("results/timings/"):
        os.mkdir("results/timings/")
    if not os.path.exists("data/"):
        os.mkdir("data/")

    if isinstance(ec.domain_name, str):
        ec.domain_name = [ec.domain_name]

    for domain_name in ec.domain_name:
        all_results = defaultdict(list)
        for curiosity_name in ac.curiosity_methods_to_run:
            if curiosity_name in ac.cached_results_to_load:
                for pkl_fname in glob.glob(
                        os.path.join("results/", domain_name, ac.learning_name,
                                     curiosity_name, "*.pkl")):
                    with open(pkl_fname, "rb") as f:
                        saved_results = pickle.load(f)
                    all_results[curiosity_name].append(saved_results)
                if curiosity_name not in all_results:
                    print("WARNING: Found no results to load for {}".format(
                        curiosity_name))
            else:
                for seed in range(gc.num_seeds):
                    seed = seed + 20
                    print("\nRunning curiosity method: {}, with seed: {}\n".
                          format(curiosity_name, seed))
                    single_seed_results = _run_single_seed(
                        seed, domain_name, curiosity_name, ac.learning_name)
                    for cur_name, results in single_seed_results.items():
                        all_results[cur_name].append(results)
                    plot_results(domain_name, ac.learning_name, all_results)
                    plot_results(domain_name,
                                 ac.learning_name,
                                 all_results,
                                 dist=True)

        plot_results(domain_name, ac.learning_name, all_results)
        plot_results(domain_name, ac.learning_name, all_results, dist=True)

    print("\n\n\n\n\nFinished in {} seconds".format(time.time() - start))
예제 #12
0
def main():
    argv = sys.argv
    print("loading %s ..." % argv[1])
    ssvm = SaveLogger(file_name=argv[1]).load()
    if hasattr(ssvm, 'problem'):
        ssvm.model = ssvm.problem
    print(ssvm)
    if hasattr(ssvm, 'base_ssvm'):
        ssvm = ssvm.base_ssvm
    print("Iterations: %d" % len(ssvm.objective_curve_))
    print("Objective: %f" % ssvm.objective_curve_[-1])
    inference_run = None
    if hasattr(ssvm, 'cached_constraint_'):
        inference_run = ~np.array(ssvm.cached_constraint_)
        print("Gap: %f" %
              (np.array(ssvm.primal_objective_curve_)[inference_run][-1] -
               ssvm.objective_curve_[-1]))

    if len(argv) <= 2:
        argv.append("acc")

    if len(argv) <= 3:
        dataset = 'nyu'
    else:
        dataset = argv[3]

    if argv[2] == 'acc':

        ssvm.n_jobs = 1

        for data_str, title in zip(["train", "val"],
                                   ["TRAINING SET", "VALIDATION SET"]):
            print(title)
            edge_type = "pairwise"

            if dataset == 'msrc':
                ds = MSRC21Dataset()
                data = msrc_helpers.load_data(data_str, which="piecewise_new")
                #data = add_kraehenbuehl_features(data, which="train_30px")
                data = msrc_helpers.add_kraehenbuehl_features(data, which="train")
            elif dataset == 'pascal':
                ds = PascalSegmentation()
                data = pascal_helpers.load_pascal(data_str, sp_type="cpmc")
                #data = pascal_helpers.load_pascal(data_str)
            elif dataset == 'nyu':
                ds = NYUSegmentation()
                data = nyu_helpers.load_nyu(data_str, n_sp=500, sp='rgbd')
            else:
                raise ValueError("Excepted dataset to be 'nyu', 'pascal' or 'msrc',"
                                 " got %s." % dataset)

            if type(ssvm.model).__name__ == "LatentNodeCRF":
                print("making data hierarchical")
                data = pascal_helpers.make_cpmc_hierarchy(ds, data)
                #data = make_hierarchical_data(
                    #ds, data, lateral=True, latent=True, latent_lateral=False,
                    #add_edge_features=False)
            else:
                data = add_edges(data, edge_type)

            if type(ssvm.model).__name__ == 'EdgeFeatureGraphCRF':
                data = add_edge_features(ds, data, depth_diff=True, normal_angles=True)

            if type(ssvm.model).__name__ == "EdgeFeatureLatentNodeCRF":
                data = add_edge_features(ds, data)
                data = make_hierarchical_data(
                    ds, data, lateral=True, latent=True, latent_lateral=False,
                    add_edge_features=True)
            #ssvm.model.inference_method = "qpbo"
            Y_pred = ssvm.predict(data.X)

            if isinstance(ssvm.model, LatentNodeCRF):
                Y_pred = [ssvm.model.label_from_latent(h) for h in Y_pred]
            Y_flat = np.hstack(data.Y)

            print("superpixel accuracy: %.2f"
                  % (np.mean((np.hstack(Y_pred) == Y_flat)[Y_flat != ds.void_label]) * 100))

            if dataset == 'msrc':
                res = msrc_helpers.eval_on_pixels(data, Y_pred,
                                                  print_results=True)
                print("global: %.2f, average: %.2f" % (res['global'] * 100,
                                                       res['average'] * 100))
                #msrc_helpers.plot_confusion_matrix(res['confusion'])
            else:
                hamming, jaccard = eval_on_sp(ds, data, Y_pred,
                                              print_results=True)
                print("Jaccard: %.2f, Hamming: %.2f" % (jaccard.mean(),
                                                        hamming.mean()))

        plt.show()

    elif argv[2] == 'plot':
        data_str = 'val'
        if len(argv) <= 4:
            raise ValueError("Need a folder name for plotting.")
        if dataset == "msrc":
            ds = MSRC21Dataset()
            data = msrc_helpers.load_data(data_str, which="piecewise")
            data = add_edges(data, independent=False)
            data = msrc_helpers.add_kraehenbuehl_features(
                data, which="train_30px")
            data = msrc_helpers.add_kraehenbuehl_features(
                data, which="train")

        elif dataset == "pascal":
            ds = PascalSegmentation()
            data = pascal_helpers.load_pascal("val")
            data = add_edges(data)

        elif dataset == "nyu":
            ds = NYUSegmentation()
            data = nyu_helpers.load_nyu("test")
            data = add_edges(data)

        if type(ssvm.model).__name__ == 'EdgeFeatureGraphCRF':
            data = add_edge_features(ds, data, depth_diff=True, normal_angles=True)
        Y_pred = ssvm.predict(data.X)

        plot_results(ds, data, Y_pred, argv[4])
예제 #13
0
                                # Plot training & validation accuracy/loss values
                                tr_acc = history.history['acc']
                                val_acc = history.history['val_acc']
                                tr_loss = history.history['loss']
                                val_loss = history.history['val_loss']
                                tr_prec = history.history['precision']
                                tr_rec = history.history['recall']
                                val_prec = history.history['val_precision']
                                val_rec = history.history['val_recall']

                                path = plotting.plot_results(tr_acc,
                                                             val_acc,
                                                             tr_loss,
                                                             val_loss,
                                                             epoch,
                                                             batch,
                                                             optimizer,
                                                             learning_rate,
                                                             momentum,
                                                             model,
                                                             im_color,
                                                             save_image=True)

                                # Save Model
                                if test_accuracy >= best_test_acc:
                                    modelX.save(path + 'best_model.h5')
                                    best_test_acc = test_accuracy

                                temp_metrics = pd.Series(
                                    [
                                        tr_acc[-1], val_acc[-1], tr_loss[-1],
                                        val_loss[-1], test_accuracy, test_loss,
예제 #14
0
    Dependencies: numpy, matplotlib

    Note: Due to the random generation of the graph it can generate targets that are unable to reach.
          Therefore, if no path is plotted try running the simulation again.
"""

from dijkstra import dijkstra
from graph import Graph
from plotting import plot_results

import numpy as np
import matplotlib.pyplot as plt

# generate random graph
graph = Graph()
graph.create_random_graph(n_nodes=10, max_edges_per_node=4, graph_size=20)

# randomly select start and end point
start_at = int(np.random.rand() * graph.get_number_of_nodes())
end_at = start_at
while (end_at == start_at):
    end_at = int(np.random.rand() * graph.get_number_of_nodes())

# generate shortest path
path = dijkstra(graph, start_at, end_at)
print(graph)

# visualize results
fig = plot_results(graph, path, start_at, end_at)
plt.show()
예제 #15
0
"""
Main file.
Uncomment all commented lines and comment "frame = run()" line (14) to
only plot the results without running the whole experiment.
"""

# import pandas as pd

# from consts import RUN_DATA_FILE
from plotting import plot_results

from running import run

frame = run()

# frame = pd.read_csv(RUN_DATA_FILE)

plot_results(frame)

print()
예제 #16
0
model_file = args.model
image_file = args.img
factor = args.factor[0]
mixcomp = args.mixcomp

import gv
import numpy as np
from PIL import Image
import matplotlib.pylab as plt

from plotting import plot_results

detector = gv.Detector.load(model_file)

img = np.array(Image.open(image_file)).astype(np.float64) / 255.0

if factor is not None:
    bbs, x, small = detector.detect_coarse_unfiltered_at_scale(
        img, factor, mixcomp)
    xx = (x - x.mean()) / x.std()
    plot_results(detector, img, xx, small, mixcomp, bbs)
    print 'max response', x.max()
    print 'max response (xx)', xx.max()
else:
    bbs = detector.detect_coarse(img, mixcomp)
    plot_results(detector, img, None, None, mixcomp, bbs)

print 'kernel sum', np.fabs(detector.kernels[mixcomp] - 0.5).sum()

plt.show()
예제 #17
0
def evaluateModels(**parsed_args):

    logger = logging.getLogger('EvalModel')

    # log arguments
    for argkey, argvalue in sorted(parsed_args.items()):
        if argvalue is None:
            continue
        logger.info('Argument {}: {}'.format(argkey, argvalue))

    #################
    # Variables
    #################
    observable_dict = read_dict_from_json(parsed_args['observable_config'])

    logger.info("Features used in training: {}".format(', '.join(
        parsed_args['observables'])))
    # detector level
    vars_det = [
        observable_dict[key]['branch_det']
        for key in parsed_args['observables']
    ]
    # truth level
    vars_mc = [
        observable_dict[key]['branch_mc'] for key in parsed_args['observables']
    ]

    # event weights
    wname = parsed_args['weight']

    #################
    # Load data
    #################
    logger.info("Loading data")

    fnames_d = parsed_args['data']
    logger.info("(Pseudo) data files: {}".format(' '.join(fnames_d)))
    dataHandle = DataHandler(fnames_d,
                             wname,
                             variable_names=vars_det + vars_mc)
    logger.info("Total number of pseudo data events: {}".format(
        dataHandle.get_nevents()))

    fnames_s = parsed_args['signal']
    logger.info("Simulation files: {}".format(' '.join(fnames_s)))
    simHandle = DataHandler(fnames_s, wname, variable_names=vars_det + vars_mc)
    logger.info("Total number of simulation events: {}".format(
        simHandle.get_nevents()))

    ####
    #dataHandle = DataToy(1000000, 1, 1.5)
    #simHandle = DataToy(1000000, 0, 1)
    #vars_mc = ['x_truth']
    ####

    #################
    # Event weights
    # pseudo data weights
    w_d = dataHandle.get_weights(rw_type=parsed_args['reweight_data'],
                                 vars_dict=observable_dict)

    # prior simulation weights
    w_s = simHandle.get_weights()

    # normalize simulation weights to pseudo data
    ndata = w_d.sum()
    nsim = w_s.sum()
    w_s *= ndata / nsim

    #################
    # Input datasets
    #################
    # Training arrays
    # Truth level

    # FIXME hard code input variables for pfn for now
    if parsed_args['model_name'] == 'pfn':
        vars_mc = [['th_pt_MC', 'th_y_MC', 'th_phi_MC', 'th_e_MC'],
                   ['tl_pt_MC', 'tl_y_MC', 'tl_phi_MC', 'tl_e_MC']]

    X, Y, w = get_training_inputs(vars_mc,
                                  dataHandle,
                                  simHandle,
                                  rw_type=parsed_args['reweight_data'],
                                  vars_dict=observable_dict)

    # Split into training, validation, and test sets: 75%, 15%, 10%
    X_train, X_test, Y_train, Y_test, w_train, w_test = train_test_split(
        X, Y, w, test_size=0.25)
    X_val, X_test, Y_val, Y_test, w_val, w_test = train_test_split(
        X_test, Y_test, w_test, test_size=0.4)

    #################
    # Train model and reweight simulation
    weights_rw = []
    for i in range(parsed_args['nrun']):
        logger.info("RUN {}".format(i))

        model_dir = os.path.join(parsed_args['outputdir'],
                                 'Models_{}'.format(i))

        model = train_model((X_train, Y_train, w_train), (X_val, Y_val, w_val),
                            (X_test, Y_test, w_test),
                            model_name=parsed_args['model_name'],
                            model_dir=model_dir,
                            batch_size=parsed_args['batch_size'],
                            load_model=parsed_args['load_model'])

        # Reweight simulation to the truth in pseudo data
        # reweighting factors
        X_prior = X[np.argmax(Y, axis=1) == 0]
        lr = reweight(model, X_prior)

        logger.info("Plot distribution of reweighitng factors")
        fname_hlr = os.path.join(model_dir, 'rhist')
        plotting.plot_LR_distr(fname_hlr, [lr])

        # New weights for simulation
        weights_rw.append(w_s * lr)

    #################
    # Compare reweighted simulation prior to pseudo truth

    w_s_rw = weights_rw[0]

    for varname in parsed_args['observables']:
        logger.info(varname)
        bins = get_bins(varname, parsed_args['binning_config'])
        vname_mc = observable_dict[varname]['branch_mc']

        # pseudo truth
        hist_truth, hist_truth_err = dataHandle.get_histogram(
            vname_mc, w_d, bins)

        # simulation prior
        hist_prior, hist_prior_err = simHandle.get_histogram(
            vname_mc, w_s, bins)

        # reweighted simulation distributions
        hists_rw, hists_rw_err = simHandle.get_histogram(
            vname_mc, weights_rw, bins)

        # plot the first reweighted distribution
        assert (len(hists_rw) > 0)
        hist_rw = hists_rw[0]
        hist_rw_err = hists_rw_err[0]
        #hist_rw = np.mean(np.asarray(hists_rw), axis=0)
        #hist_rw_err = np.std(np.asarray(hists_rw), axis=0, ddof=1)

        # plot histograms and their ratio
        figname = os.path.join(parsed_args['outputdir'],
                               'Reweight_{}'.format(varname))
        logger.info("Plot reweighted distribution: {}".format(figname))

        # Compute chi2s
        text_chi2 = write_chi2(hist_truth,
                               hist_truth_err, [hist_rw, hist_prior],
                               [hist_rw_err, hist_truth_err],
                               labels=['Reweighted', 'Prior'])
        logger.info("  " + "    ".join(text_chi2))

        # Compute triangular discriminator
        text_tria = write_triangular_discriminators(
            hist_truth, [hist_rw, hist_prior], labels=['Reweighted', 'Prior'])
        logger.info("  " + "    ".join(text_tria))

        # Compute KS test statistic
        arr_truth = dataHandle.get_variable_arr(vname_mc)
        arr_sim = simHandle.get_variable_arr(vname_mc)
        text_ks = write_ks(arr_truth,
                           w_d, [arr_sim, arr_sim], [w_s_rw, w_s],
                           labels=['Reweighted', 'Prior'])

        logger.info("  " + "    ".join(text_ks))

        plotting.plot_results(bins, (hist_prior, hist_prior_err),
                              (hist_rw, hist_rw_err),
                              histogram_truth=(hist_truth, hist_truth_err),
                              figname=figname,
                              texts=text_ks,
                              **observable_dict[varname])

        ####
        # plot all trials
        if len(hists_rw) > 1:
            figname_all = os.path.join(parsed_args['outputdir'],
                                       'Reweight_{}_allruns'.format(varname))
            plotting.plot_hists_resamples(figname_all, bins, hists_rw,
                                          hist_prior, hist_truth,
                                          **observable_dict[varname])

        # plot the distribution of KS test statistic
        ks_list = []
        for rw_s in weights_rw:
            ks = ks_2samp_weighted(arr_truth, arr_sim, w_d, rw_s)[0]
            ks_list.append(ks)
        hist_ks, bins_ks = np.histogram(ks_list)
        fname_ks = os.path.join(parsed_args['outputdir'],
                                'KSDistr_{}'.format(varname))
        plotting.plot_histograms1d(fname_ks, bins_ks, [hist_ks], xlabel="KS")
예제 #18
0
        'batch_size': 64,
        'verbose': True
    }

    plot = True
    save = True

    training_set, validation_set, test_set = get_mnist_dataset()

    all_models = {
        'normal': [AutoEncoderNetwork(784, bn_dim) for bn_dim in [2, 4]]
    }

    for label, models in all_models.items():
        for model in models:
            name = label + '_bottleneck_dim_{}'.format(model.bottleneck_dim)
            results = run_training(model, **training_params)

            if plot:
                plot_results(results, save=True, name=name)
                plot_montage(model, test_set, save=True, name=name)
                plot_decoder_outputs(model, test_set, save=True, name=name)
                if model.bottleneck_dim == 2:
                    plot_bottleneck_outputs(model,
                                            test_set,
                                            save=True,
                                            name=name)

            if save:
                torch.save(model.state_dict(), 'models/{}'.format(name))
예제 #19
0
model_file = args.model
image_file = args.img
factor = args.factor[0]
mixcomp = args.mixcomp

import gv
import numpy as np
from PIL import Image
import matplotlib.pylab as plt

from plotting import plot_results

detector = gv.Detector.load(model_file)

img = np.array(Image.open(image_file)).astype(np.float64) / 255.0

if factor is not None:
    bbs, x, small = detector.detect_coarse_unfiltered_at_scale(img, factor, mixcomp) 
    xx = (x - x.mean()) / x.std()
    plot_results(detector, img, xx, small, mixcomp, bbs)
    print 'max response', x.max()
    print 'max response (xx)', xx.max()
else:
    bbs = detector.detect_coarse(img, mixcomp) 
    plot_results(detector, img, None, None, mixcomp, bbs)

print 'kernel sum', np.fabs(detector.kernels[mixcomp] - 0.5).sum()

plt.show()

예제 #20
0
    assert mixcomp is not None
    #bbs, x, small = detector.detect_coarse_unfiltered_at_scale(grayscale_img, side, mixcomp) 

    factor = side/max(detector.settings['image_size'])
    print(factor)
    bbs, x, bkgcomp, feats, img_resized = detector.detect_coarse_single_factor(grayscale_img, factor, mixcomp)
    detector.label_corrects(bbs, fileobj)
    #bbs = detector.nonmaximal_suppression(bbs)

    #print('small', small.shape)
    if bb_limit is not None:
        bbs = bbs[:bb_limit]

    for bb in bbs:
        print(bb)
    plot_results(detector, img, x, feats, mixcomp, bbs, img_resized=img_resized)
    import pylab as plt
    plt.show()
    #plt.imshow(bkgcomp, interpolation='nearest'); plt.colorbar(); plt.show()
    print('max response', x.max())
else:
    if mixcomp is None:
        import time
        start = time.time()
        bbs = detector.detect_coarse(grayscale_img, fileobj=fileobj)
        print('score', bbs[0].score)
        print("Elapsed:", (time.time() - start))
    else:
        bbs, res_map = detector.detect_coarse(grayscale_img, fileobj=fileobj, mixcomps=[mixcomp], return_resmaps=True)