Ejemplo n.º 1
0
    def test_add_export_key_error(self):
        exporter = E.CSVExporter('TE')
        with self.assertRaises(KeyError) as e_cm:
            exporter.add_export('a', [2])

        ex = e_cm.exception
        self.assertEqual(str(ex), "'a'")
Ejemplo n.º 2
0
    def test_init_None(self):
        with self.assertRaises(RuntimeError) as ex_cm:
            exporter = E.CSVExporter(None)

        exception = ex_cm.exception
        self.assertEqual(str(exception),
                         'name argument for CSVExport-ctor must not be None')
Ejemplo n.º 3
0
    def test_add_new_export_key_exists(self):
        exporter = E.CSVExporter('TE')
        exporter.add_new_export('a', [1])
        with self.assertRaises(KeyError) as e_cm:
            exporter.add_new_export('a', [2])

        ex = e_cm.exception
        # This seems to be an incosistency in Python (?), as the other exceptions do not show this sort of behavior
        self.assertEqual(str(ex), "'Key already exists'")
Ejemplo n.º 4
0
 def test_export(self):
     rre = E.RunResultExporter()
     rr = M.RunResult(1, 1)
     rr.add_values(2, 2)
     rre.add_row("test", rr)
     rre.export("test_file")
     with open("test_file", "r") as tf:
         data = tf.read()
         expected_data = '"Type of Run","Accumulated Runtime","Number of Runs","Accumulated Runtime","Number of Runs"\n"test","1","1","2","2"\n'
         self.assertEqual(data, expected_data)
     os.remove("test_file")
Ejemplo n.º 5
0
 def test_add_row(self):
     rre = E.RunResultExporter()
     rr = M.RunResult(1, 1)
     rr.add_values(2, 2)
     rre.add_row("test", rr)
     self.assertEqual(len(rre.rows), 1)
     self.assertEqual(len(rre.rows[0]), 5)
     self.assertEqual(rre.rows[0][0], "test")
     self.assertEqual(rre.rows[0][1], 1)
     self.assertEqual(rre.rows[0][2], 1)
     self.assertEqual(rre.rows[0][3], 2)
     self.assertEqual(rre.rows[0][4], 2)
     self.assertEqual(rre.width, 5)
Ejemplo n.º 6
0
    def test_add_new_export_None_arguments(self):
        exporter = E.CSVExporter('TE')
        with self.assertRaises(RuntimeError) as e_cm:
            exporter.add_new_export(None, [1, 2])

        exception = e_cm.exception
        self.assertEqual(str(exception), 'name argument needs to be not None')

        with self.assertRaises(RuntimeError) as e_cm:
            exporter.add_new_export('key', None)

        exception = e_cm.exception
        self.assertEqual(str(exception),
                         'values argument needs to be not None')
Ejemplo n.º 7
0
def execute_with_config(runner: Runner, analyzer: A, pira_iters: int,
                        target_config: TargetConfiguration,
                        csv_config: CSVConfiguration) -> None:
    try:
        instrument = False
        pira_iterations = pira_iters
        hybrid_filtering = target_config.is_hybrid_filtering()
        compile_time_filtering = target_config.is_compile_time_filtering()
        hybrid_filter_iters = target_config.get_hybrid_filter_iters()

        rr_exporter = E.RunResultExporter()

        # Build without any instrumentation
        L.get_logger().log(
            'Building vanilla version for baseline measurements', level='info')
        vanilla_builder = BU(target_config, instrument)
        tracker = T.TimeTracker()
        tracker.m_track('Vanilla Build', vanilla_builder, 'build')

        # Run without instrumentation for baseline
        L.get_logger().log('Running baseline measurements', level='info')
        vanilla_rr = runner.do_baseline_run(target_config)
        L.get_logger().log('Pira::execute_with_config: RunResult: ' +
                           str(vanilla_rr) + ' | avg: ' +
                           str(vanilla_rr.get_average()),
                           level='debug')
        instr_file = ''

        if (csv_config.should_export()):
            rr_exporter.add_row('Vanilla', vanilla_rr)

        for x in range(0, pira_iterations):
            L.get_logger().log('Running instrumentation iteration ' + str(x),
                               level='info')

            # Only run the pgoe to get the functions name
            iteration_tracker = T.TimeTracker()

            # Analysis Phase
            instr_file = analyzer.analyze(target_config, x)
            L.get_logger().log('[WHITELIST] $' + str(x) + '$ ' +
                               str(U.lines_in_file(instr_file)),
                               level='perf')
            U.shell('stat ' + instr_file)

            # After baseline measurement is complete, do the instrumented build/run
            # This is only necessary in every iteration when run in compile-time mode.
            # For hybrid-filtering this is done after the specified amount of iterations
            if (hybrid_filtering and (x % hybrid_filter_iters is 0)
                ) or x is 0 or compile_time_filtering:
                instrument = True
                instr_builder = BU(target_config, instrument, instr_file)
                tracker.m_track('Instrument Build', instr_builder, 'build')

            #Run Phase
            L.get_logger().log('Running profiling measurements', level='info')
            instr_rr = runner.do_profile_run(target_config, x)

            if (csv_config.should_export()):
                rr_exporter.add_row('Instrumented ' + str(x), instr_rr)

            # Compute overhead of instrumentation
            ovh_percentage = instr_rr.compute_overhead(vanilla_rr)
            L.get_logger().log('[RUNTIME] $' + str(x) + '$ ' +
                               str(instr_rr.get_average()),
                               level='perf')
            L.get_logger().log('[OVERHEAD] $' + str(x) + '$ ' +
                               str(ovh_percentage),
                               level='perf')

            iteration_tracker.stop()
            user_time, system_time = iteration_tracker.get_time()
            L.get_logger().log('[ITERTIME] $' + str(x) + '$ ' +
                               str(user_time) + ', ' + str(system_time),
                               level='perf')

        if (csv_config.should_export()):
            file_name = target_config.get_target(
            ) + '_' + target_config.get_flavor() + '.csv'
            csv_file = os.path.join(csv_config.get_csv_dir(), file_name)
            try:
                U.make_dir(csv_config.get_csv_dir())
                rr_exporter.export(csv_file, csv_config.get_csv_dialect())
            except Exception as e:
                L.get_logger().log(
                    'Pira::execute_with_config: Problem writing CSV file\nMessage:\n'
                    + str(e),
                    level='error')

    except Exception as e:
        L.get_logger().log(
            'Pira::execute_with_config: Problem during preparation of run.\nMessage:\n'
            + str(e),
            level='error')
        raise RuntimeError(str(e))
Ejemplo n.º 8
0
# -*- coding: utf-8 -*-
__author__ = 'viktor'

from lib import Exporter
import os

data_path = os.path.join(os.path.dirname(__file__), 'data')
project_path = '/var/www/shelepen/drupal/sites/default/files'
exporter = Exporter('root', '', 'shelepen', project_path, data_path)
exporter.prepare_files()
exporter.prepare_articles()
exporter.prepare_pages()
Ejemplo n.º 9
0
 def test_init(self):
     rre = E.RunResultExporter()
     self.assertIsNotNone(rre)
Ejemplo n.º 10
0
 def test_add_export(self):
     exporter = E.CSVExporter('TE')
     exporter.add_new_export('a', [1])
     exporter.add_export('a', [2])
     self.assertEqual(exporter._exports['a'], [1, 2])
Ejemplo n.º 11
0
 def test_init(self):
     exporter = E.CSVExporter('test exporter')
     self.assertIsNotNone(exporter)
     self.assertEqual(exporter.get_name(), 'test exporter')