예제 #1
0
    def run(self):
        results_path = Path(str(directives.path(self.arguments[0])))
        if not results_path.is_absolute():
            source = str(self.state_machine.input_lines.source(
                    self.lineno - self.state_machine.input_offset - 1))
            results_path = Path(source).resolve().parent().join(results_path)
        results = load_results(results_path)
        for result in results:
            self.state.document.settings.record_dependencies.add(str(
                result["path"]))

        headers = ["Label", "Angles", "Scales", "Input", "Features", "Metric",\
                "Feature Parameters", "Mean Correlation"]
        data = [self.extract_result(r) for r in results]

        return [self.create_table([headers, ] + data), ]
예제 #2
0
    def __init__(self):
        self.db_indivs = load_indivs().to_numpy()
        self.digits, self.spread = load_symbol_properties()
        self.db_results = []
        quotes_time = list(
            list(int(time) for time in quotes)
            for quotes in load_quotes_time())
        quotes_time_string = load_quotes_time_string()

        for idx, result in enumerate(load_results()):
            quotes = np.array([float(quote) for quote in result[6:]])

            self.db_results.append(
                Quote(int(result[0]), int(result[1]), int(result[2]),
                      float(result[3]), float(result[4]), bool(int(result[5])),
                      quotes[:int(len(quotes) / 2)],
                      quotes[int(len(quotes) / 2):], quotes_time[idx],
                      quotes_time_string[idx]))
예제 #3
0
    def __init__(self):
        self.digits, self.spread = load_symbol_properties()
        self.db_indivs = load_indivs().to_numpy()
        self.db_results = []
        self.quotes = []
        self.params = None
        self.quotes_time = [
            np.resize(np.array(quotes).astype(int), 400)
            for quotes in load_quotes_time()
        ]

        for result in load_results():
            self.quotes.append(np.array(result[6:]).astype(float))
            self.db_results.append(np.array(result[:6]).astype(float))

        self.db_results = np.array(self.db_results)
        self.quotes = np.array(self.quotes)
        self.quotes_time = np.array(self.quotes_time)
예제 #4
0
import argparse
import matplotlib.pyplot as plt

from loader  import load_results
from metrics import calculate_metrics, plot_metric

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser()
    parser.add_argument('results_path', action='store',      help='path to file containing results in csv format')
    parser.add_argument('--scalable',   action='store_true', help='switch to scalable metrics')
    parser.add_argument('--output',     action='store',      help='path to output plot file')
    args = parser.parse_args()

    # load results
    results = load_results(args.results_path, grouping_col='npoints')
    results = sorted(results.iteritems())

    # calculate metrics
    results = calculate_metrics(results, scalable=args.scalable)

    # plot metrics
    plt.figure(figsize=(22, 16), dpi=80)

    metrics_type = 'scalable' if args.scalable else 'non-scalable'
    plot_title = 'Monte Carlo approximation of PI number - %s metrics of parallel computation' % metrics_type
    plt.suptitle(plot_title, size=24)

    plot_opts   = {'label': lambda npoints: '%s points' % npoints}
    legend_opts = {'loc': 'upper center', 'ncol': 3}
예제 #5
0
import argparse
import matplotlib.pyplot as plt

from loader   import load_results
from operator import itemgetter
from metrics  import plot_metric

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser()
    parser.add_argument('results_path', action='store', help='path to file containing results in csv format')
    parser.add_argument('--output',     action='store', help='path to output plot file')
    args = parser.parse_args()

    # load results
    results = load_results(args.results_path, grouping_col='ncores')
    results = sorted(results.iteritems())

    # plot performance
    plt.figure(figsize=(12, 8), dpi=80)

    plot_opts = {
        'label': lambda ncores: '%s cores' % ncores,
        'title': 'Monte Carlo approximation of PI number - performance of parallel computation'
    }
    legend_opts = {'loc': 'upper left', 'ncol': 1}

    plot_metric('npoints', 'time', results,
        metric_arg_name='Number of points',
        metric_val_name='Walltime [s]',
        plot_opts=plot_opts,
예제 #6
0
import argparse
import matplotlib.pyplot as plt

from loader  import load_results
from metrics import calculate_metrics, plot_metric

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser()
    parser.add_argument('results_path', action='store',      help='path to file containing results in csv format')
    parser.add_argument('--scalable',   action='store_true', help='switch to scalable metrics')
    parser.add_argument('--output',     action='store',      help='path to output plot file')
    args = parser.parse_args()

    # load results
    results = load_results(args.results_path, grouping_col='size')
    results = sorted(results.iteritems())

    # calculate metrics
    results = calculate_metrics(results, scalable=args.scalable)

    # plot metrics
    plt.figure(figsize=(22, 16), dpi=80)

    metrics_type = 'scalable' if args.scalable else 'non-scalable'
    plot_title = 'Matri multiplication - %s metrics of parallel computation' % metrics_type
    plt.suptitle(plot_title, size=24)

    plot_opts   = {'label': lambda size: 'Size: %s' % size}
    legend_opts = {'loc': 'upper center', 'ncol': 3}
예제 #7
0
import collections
#import sys

import matplotlib.pyplot as pyplot
import numpy
from pathlib import Path

from loader import format_label, load_results, naturally_sorted

results = load_results(Path("../results").resolve())

bar_width = 0.8 / len(results)
indices = numpy.arange(len(results[0]["correlations"]))

sketch_correlations = collections.defaultdict(dict)
for result_index, result in enumerate(results):
    for sketch_index, sketch in enumerate(result["sketches"]):
        sketch_correlations[sketch][result_index] =\
                result["correlations"][sketch_index]

sketches = naturally_sorted(sketch_correlations.keys())

bar_plots = []
for result_index, result in enumerate(results):
    correlations = [sketch_correlations[s][result_index] for s in sketches]
    bar_plots.append(pyplot.bar(
        indices + bar_width * result_index,
        correlations,
        width=bar_width,
        color=result["plot_color"],
        label=format_label(result),