Esempio n. 1
0
File: lib.py Progetto: mxm/yoka
 def run(self, ignore_failures=False):
     # execute benchmarks
     for benchmark in self.benchmarks:
         # in case this class got reused
         benchmark.runs = []
         for run_id in range(0, benchmark.times):
             failed = False
             try:
                 benchmark.setup()
                 benchmark.run()
             except:
                 logger.exception("Exception in %s run %d" % (benchmark, run_id))
                 failed = True
             finally:
                 try:
                     benchmark.shutdown()
                 except:
                     pass
             # save logs
             log_paths, failed = self.save_logs(benchmark, run_id+1, failed)
             # keep list of results (make copy!)
             benchmark.runs.append(benchmark.run_times.copy())
             # save current result immediately
             result = Result(self, benchmark, log_paths)
             result.save(failed)
             # TODO this could be re-initialized somewhere else
             # CAUTION: run_times holds the same pointer as the decorator Timer
             #          if run_times gets reassigned, this pointer is lost
             benchmark.run_times.clear()
             # raise exception if desired
             if failed and not ignore_failures:
                 raise Exception("Exception raised in %s run %d (see logs)." % (benchmark, run_id))
Esempio n. 2
0
 def run(self, ignore_failures=False):
     # generate data
     for generator in self.generators:
         generator.setup()
         generator.run()
         generator.shutdown()
     # execute benchmarks
     for benchmark in self.benchmarks:
         for run_id in range(0, benchmark.times):
             failed = False
             try:
                 benchmark.setup()
                 benchmark.run()
             except:
                 logger.exception("Exception in %s run %d" % (benchmark, run_id))
                 failed = True
             finally:
                 try:
                     benchmark.shutdown()
                 except:
                     pass
             # get system logs
             log_paths = {}
             for system in self.systems:
                 unique_full_path = "logs/%s/%s/%d/%s" % (self.uid,
                                                          benchmark.id,
                                                          run_id+1,
                                                          system)
                 system.save_log(unique_full_path)
                 log_paths[system] = unique_full_path
             # save result
             result = Result(self, benchmark, log_paths)
             result.save(failed)
             if failed and not ignore_failures:
                 raise Exception("Exception raised in %s run %d (see logs)." % (benchmark, run_id))
Esempio n. 3
0
class Testcase(unittest.TestCase):
    """ Submodule for unittests, derives from unittest.TestCase """

    first = Result("First Quest", "Tom, Punisher", "Magneto, Cyclops", "Heroes")
    second = Result("This_Quest", "Super Mario", "Tom Riddle", "Bad guys")

    def test_attribute(self):
        """ return true if values are the same """

        self.assertEqual(self.first.quest_name, "First Quest")
        self.assertEqual(self.second.quest_name, "This_Quest")
        self.assertEqual(self.first.winner, "Heroes")
        self.assertEqual(self.second.villans, "Tom Riddle")

    def test_if_instance_notequal(self):
        """ Return true if instance is not same """
        self.assertIsNot(self.first, self.second)

    def test_if_instance_equal(self):
        """ Return true if instance is not same """
        self.assertIs(self.first, self.first)

    def test_type(self):
        """ Return true if value is correct type """
        self.assertTrue(isinstance(self.second.villans, str))
        self.assertTrue(isinstance(self.second.heroes, str))
        self.assertTrue(isinstance(self.first.quest_name, str))
Esempio n. 4
0
def outputToResults(output, path, diff, avgTime, flags):
    #Create a results object out of TRIMMED Tamarin Output
    fileHash = sha256(open(path, 'rb').read()).hexdigest()
    if "TIMEOUT" in output:
        return Result(fileHash, diff, "TIMEOUT", 0.0, flags)
    elif len(output) == 0:
        return Result(fileHash, diff, "NOLEMMAS", 0.0, flags)
    return Result(fileHash, diff, extractLemmas(output), avgTime, flags)
def create_and_test_image(planner_name,
                          planners,
                          benchmarks,
                          config,
                          stored_result=None,
                          force_overwrite=False):
    result = Result()
    test_params = []

    pool = Pool(int(config.getNumberProccessor()))
    image_path = os.path.join(IMAGES_DIR, "%s.img" % (planner_name))

    if file_exists(image_path, force_overwrite):
        print("Build skipped because file exists")
    else:
        # Build the image.
        print("Building %s" % image_path)
        planner_path = os.path.join(
            PLANNER_DIR, "%s" % (planners[planner_name].getFolder()))
        result.build_successful, result.build_logs = try_build_image(
            planner_path, image_path, planner_name)

        if not result.build_successful:
            print("Building %s failed" % image_path)
            return result
        print("Successfully built %s" % image_path)

    results_path = os.path.join(RESULT_OUTPUT, "%s/" % (planner_name))

    if not os.path.exists(results_path):
        os.mkdir(results_path)

    for key, values in benchmarks.iteritems():

        for value in values:
            #print value
            benchmark = {key: [value]}
            #print benchmark
            test_params.append(
                ["" + image_path, benchmark, "" + results_path, config])

            benchmark_results_path = "" + results_path + key
            if not os.path.exists(benchmark_results_path):
                os.mkdir(benchmark_results_path)

    # print('The test parameters: ', test_params[1])

    # TODO: fix KeyboardInterrupt bug - https://jreese.sh/blog/python-multiprocessing-keyboardinterrupt https://stackoverflow.com/questions/21104997/keyboard-interrupt-with-pythons-multiprocessing/21106459#21106459
    # Test the image, each domain receives a different processor.
    pool.map(test_container_multiProcessor, test_params)

    #result.benchmark_results = test_container(image_path, benchmarks, results_path)
    result.labels = try_extract_labels(image_path)

    return result
Esempio n. 6
0
File: lib.py Progetto: aljoscha/yoka
 def run(self, ignore_failures=False):
     # execute benchmarks
     for benchmark in self.benchmarks:
         # in case this class got reused
         benchmark.runs = []
         for run_id in range(0, benchmark.times):
             failed = False
             try:
                 benchmark.setup()
                 benchmark.run()
             except:
                 logger.exception("Exception in %s run %d" % (benchmark, run_id))
                 failed = True
             finally:
                 try:
                     benchmark.shutdown()
                 except:
                     pass
             # get system logs
             log_paths = {}
             for system in benchmark.systems:
                 unique_full_path = "results/logs/%s/%s/%d/%s" % (
                                     self.uid,
                                     benchmark.id,
                                     run_id+1,
                                     system)
                 # create directories
                 os.makedirs(unique_full_path)
                 system.save_log(unique_full_path)
                 log_paths[system] = unique_full_path
                 # check log for exceptions
                 if not failed:
                     try:
                         for filename in os.listdir(unique_full_path):
                             with open(unique_full_path + "/" + filename) as file:
                                 for number, line in enumerate(file):
                                     if re.search("(error|exception)", line, flags=re.IGNORECASE):
                                         logger.info("Error detected in line %d:\n%s" % (number, line))
                                         # for now, just fail the benchmark an error has been detected
                                         failed = True
                                         break
                     except:
                         logger.exception("Failed to scan log for errors.")
             # keep list of results (make copy!)
             benchmark.runs.append(benchmark.run_times.copy())
             # save current result immediately
             result = Result(self, benchmark, log_paths)
             result.save(failed)
             # TODO this could be re-initialized somewhere else
             # CAUTION: run_times holds the same pointer as the decorator Timer
             #          if run_times gets reassigned, this pointer is lost
             benchmark.run_times.clear()
             # raise exception if desired
             if failed and not ignore_failures:
                 raise Exception("Exception raised in %s run %d (see logs)." % (benchmark, run_id))
Esempio n. 7
0
def stochastic_gradient_descent(model,
                                dataset,
                                n_iters=4000,
                                eval_every=500,
                                verbose=True,
                                learning_rate=1e-3,
                                name="SGD"):
    """
    stochastic_gradient_descent: given a model and a dataset, perform simple
    stochastic gradient descent to optimise the model for the dataset, using a
    fixed learning rate.

    Required inputs:
    -   model: should be an instance of models.NeuralNetwork, and should contain
        get_parameter_vector, get_gradient_vector, and set_parameter_vector
        methods
    -   dataset: should be an instance of data.DataSet, and should contain
        x_train, y_train, x_test, and y_test attributes
    """
    # Get initial parameters and start time, and initialise results dictionary
    w = model.get_parameter_vector()
    result = Result(name)
    for i in range(n_iters):
        # Evaluate the model
        if i % eval_every == 0: result.update(model, dataset, i, 1)
        # Update parameters
        dEdw = model.get_gradient_vector(dataset.x_train, dataset.y_train)
        w -= learning_rate * dEdw
        model.set_parameter_vector(w)
    # Evaluate final performance
    result.update(model, dataset, n_iters, 1)
    if verbose: result.display_summary(n_iters)
    return result
Esempio n. 8
0
File: lib.py Progetto: mxm/yoka
 def run(self, ignore_failures=False):
     # execute benchmarks
     for benchmark in self.benchmarks:
         # in case this class got reused
         benchmark.runs = []
         for run_id in range(0, benchmark.times):
             failed = False
             try:
                 benchmark.setup()
                 benchmark.run()
             except:
                 logger.exception("Exception in %s run %d" %
                                  (benchmark, run_id))
                 failed = True
             finally:
                 try:
                     benchmark.shutdown()
                 except:
                     pass
             # save logs
             log_paths, failed = self.save_logs(benchmark, run_id + 1,
                                                failed)
             # keep list of results (make copy!)
             benchmark.runs.append(benchmark.run_times.copy())
             # save current result immediately
             result = Result(self, benchmark, log_paths)
             result.save(failed)
             # TODO this could be re-initialized somewhere else
             # CAUTION: run_times holds the same pointer as the decorator Timer
             #          if run_times gets reassigned, this pointer is lost
             benchmark.run_times.clear()
             # raise exception if desired
             if failed and not ignore_failures:
                 raise Exception(
                     "Exception raised in %s run %d (see logs)." %
                     (benchmark, run_id))
Esempio n. 9
0
def sgd_2way_tracking(model,
                      dataset,
                      n_iters=5000,
                      eval_every=500,
                      verbose=True,
                      t0=1,
                      alpha=0.8,
                      beta=0.5,
                      name="SGD with line-search"):
    """
    sgd_2way_tracking: given a model and a dataset, perform stochastic gradient
    descent to optimise the model for the dataset, using a bidirectional
    line-search to find a good step size during each iteration; the step size
    which is found during each iteration persists as the initial step size
    during the next iteration.

    Inputs:
    -   model: the model which will be optimised
    -   dataset: the dataset which the model will be trained on
    -   n_iters: the number of outer loop iterations to perform
    -   eval_every: how frequently to evaluate model performance
    -   verbose: whether to print model performance to stdout every time it
        is evaluated
    -   t0: initial step size to take
    -   alpha: fraction of the theoretical approximate step size which is
        considered acceptible
    -   beta: factor with which the step size will be multiplied during each
        iteration of back-tracking; for forward-tracking, it is the inverse of
        the factor by which the step-size is multiplied
    """
    # Get initial parameters, step size and start time
    w = model.get_parameter_vector()
    t = t0
    result = Result(name)
    for i in range(n_iters):
        # Evaluate the model
        if i % eval_every == 0:
            result.update(model, dataset, i, t)
        # Get the gradient and mean error for the current parameters
        dEdw = model.get_gradient_vector(dataset.x_train, dataset.y_train)
        E0 = model.mean_total_error(dataset.y_train)
        # Check if the current step size gives sufficient error reduction
        backtrack_params = (model, w, -dEdw, dataset, alpha, dEdw, E0)
        if old_backtrack_condition(t, *backtrack_params):
            # Reduce step size until error reduction is good enough
            t *= beta
            while old_backtrack_condition(t, *backtrack_params):
                t *= beta
        else:
            # Increase step size until error reduction is not good enough
            t /= beta
            while not old_backtrack_condition(t, *backtrack_params):
                t /= beta
            # Try also, keep forward tracking until E starts to increase

        w -= t * dEdw
        model.set_parameter_vector(w)
    # Evaluate final performance
    result.update(model, dataset, n_iters, t)
    if verbose: result.display_summary(n_iters)
    return result
Esempio n. 10
0
start_time = time.time()

print("\n...pre-processing the data...\n")
(data, cl_labels) = Pprocess.preprocess(args["dataset"], HXW)
(lb, cl_labels, num_classes) = Pprocess.class_labels(cl_labels)
loss_type = Pprocess.binary_or_categorical(num_classes)
(train_X, test_X, train_Y, test_Y) = Pprocess.split(data, cl_labels,
                                                    num_classes)
aug = Pprocess.data_aug(args["aug"])

print("\n...building the model...\n")
model = Tune.build_model(args["model"], HXW, channels, kernel, num_classes)
opt = Tune.optimizer(args["opt"], num_epochs)
model.compile(loss=loss_type, optimizer=opt, metrics=["accuracy"])

print("\n...training the model...\n")
hist_obj = Tune.fit(model, aug, num_epochs, batch_size, train_X, train_Y,
                    test_X, test_Y)
Result.save_model(model, model_path, lb, HXW)

print("\n...getting results of training & testing...\n")
predictions = model.predict(test_X, batch_size=batch_size)
Result.save_info(start_time, Result.acc_score(test_Y,
                                              predictions), args["model"],
                 num_epochs, args["opt"], args["aug"], HXW, batch_size, kernel,
                 len(data), args["plot"], notes)
Result.display_metrix(test_X, test_Y, predictions, model, lb.classes_, aug)
Result.display_plot(args["plot"], num_epochs, hist_obj)

##
Esempio n. 11
0
    latency['p99'] = get_metric(metrics_values, '99thPercentileLatency',
                                client)
    return stats


def client_stats(stats, client):
    resultdoc = {'result': {}}
    resultdoc['result']['runtime'] = one(stats['OVERALL'], 'RunTime', client)
    resultdoc['result']['throughput'] = one(stats['OVERALL'], 'Throughput',
                                            client)
    resultdoc['result']['read'] = operation_stats(stats['READ'], client)
    resultdoc['result']['write'] = operation_stats(stats['UPDATE'], client)
    return resultdoc


if __name__ == '__main__':
    import pprint

    class DummyArgs:
        def __getattr__(self, item):
            if item == 'path':
                return '.'
            if item == 'time':
                return datetime.datetime.now()
            return None

    args = DummyArgs()
    result = Result(args)
    parse_results(args, result)
    print pprint.pprint(result.resultdoc())
Esempio n. 12
0
def main(args):
    result = Result(db=parse_server_info(args.server))
    num_files = 0
    sample = None
    stats = None

    for path in args.path:
        if os.path.isdir(path):
            for root, _, files in os.walk(path):
                for f in files:
                    try:
                        filename = os.path.join(root, f)
                        if args.verbose or True:
                            print("Opening '{}'".format(filename))

                        sample = Sample(filename,
                                        load_libs=args.load_libs,
                                        show_cfg=args.cfg,
                                        show_cg=args.callgraph,
                                        verbose=args.verbose)

                    except Exception as err:
                        print(err)
                        continue

                    if args.statistics:
                        stats = sample.statistics()
                    else:
                        stats = sample.analyze()

                    result.add_statistics(stats)
                    if args.verbose:
                        print_statistics(stats)

                    num_files += 1

                args.depth -= 1
                if args.depth <= 0:
                    break

        else:
            try:
                sample = Sample(path, load_libs=args.load_libs,
                                show_cfg=args.cfg, show_cg=args.callgraph,
                                verbose=args.verbose)

            except Exception as err:
                print(err)
                continue

            if args.statistics:
                stats = sample.statistics()
            else:
                stats = sample.analyze()

            result.add_statistics(stats)

            num_files += 1
            if args.verbose:
                print_statistics(stats, strings=True)

    for k, v in result.collect_statistics().items():
        print("{}: {}".format(k.replace("_", " "), v))