def execute(self):
     """Executed requested action."""
     if self.action == 'print-config':
         json.dump(self.config, sys.stdout, indent=4, sort_keys=True)
         print('')
     elif self.action == 'build':
         self.build_plan(serialize=True)
     elif self.action == 'run':
         self.build_plan()
         logging.info("Plan was built with %d experiments", len(self.plan))
         Processor(self.param_info).compute_variables(self.plan)
         if self.validation:
             validator = Validator(self.plan)
             validator.validate()
             if not validator.plan_ok:
                 validator.report()
                 logging.warn(
                     "Plan has not been validated. See reason (s) above.")
                 logging.warn(
                     "If you believe validator is wrong, rerun experimenter with `--no-validation` flag."
                 )
             else:
                 logging.info("Plan has been validated")
         if not self.validation or validator.plan_ok:
             Launcher.run(self.plan, self.__progress_file)
     elif self.action == 'validate':
         self.build_plan()
         Processor(self.param_info).compute_variables(self.plan)
         validator = Validator(self.plan)
         validator.validate()
         validator.report()
 def test_builder_8(self):
     """dlbs  ->  TestBuilder::test_builder_8                         [Test for plan builder #8.]"""
     plan = Builder.build(
         {
             'parameters': {
                 'exp.framework': 'bvlc_caffe',
                 'exp.model': 'vgg16'
             },
             'extensions': [{
                 'condition': {
                     'exp.framework': "([^_]+)_(.+)"
                 },
                 'parameters': {
                     'exp.device_batch': 128,
                     'exp.framework_id':
                     '${__condition.exp.framework_0}',  # bvlc_caffe
                     'exp.fork': '${__condition.exp.framework_1}',  # bvlc
                     'exp.framework':
                     '${__condition.exp.framework_2}'  # caffe
                 }
             }]
         },
         {},
         {})
     Processor().compute_variables(plan)
     self.assertListEqual(plan, [{
         'exp.framework': "caffe",
         'exp.model': 'vgg16',
         'exp.device_batch': 128,
         'exp.framework_id': 'bvlc_caffe',
         'exp.fork': 'bvlc'
     }])
 def test_builder_7(self):
     """dlbs  ->  TestBuilder::test_builder_7                         [Test for plan builder #7.]"""
     plan = Builder.build(
         {
             'parameters': {
                 'exp.framework': 'TensorFlow',
                 'exp.model': 'vgg16'
             },
             'extensions': [{
                 'condition': {
                     'exp.framework': "TensorFlow"
                 },
                 'parameters': {
                     'exp.device_batch': 128
                 }
             }, {
                 'condition': {
                     'exp.framework': "TensorFlow"
                 },
                 'parameters': {
                     'exp.disabled': 'true'
                 }
             }]
         }, {}, {})
     Processor().compute_variables(plan)
     self.assertListEqual(plan, [{
         'exp.framework': "TensorFlow",
         'exp.model': 'vgg16',
         'exp.device_batch': 128,
         'exp.disabled': 'true'
     }])
 def test_builder_5(self):
     """dlbs  ->  TestBuilder::test_builder_5                         [Test for plan builder #5.]"""
     plan = Builder.build(
         {
             'parameters': {
                 'exp.framework': 'TensorFlow',
                 'exp.device_batch': 256
             },
             'variables': {
                 'exp.model': ['vgg16', 'text_cnn']
             },
             'extensions': [{
                 'condition': {
                     'exp.framework': "TensorFlow",
                     'exp.model': 'text_cnn'
                 },
                 'parameters': {
                     'exp.device_batch': 512
                 }
             }]
         }, {}, {})
     Processor().compute_variables(plan)
     self.assertListEqual(plan, [{
         'exp.framework': "TensorFlow",
         'exp.model': 'vgg16',
         'exp.device_batch': 256
     }, {
         'exp.framework': "TensorFlow",
         'exp.model': 'text_cnn',
         'exp.device_batch': 512
     }])
def update_benchmarks(args):
    """Update benchmarks by overriding parameters provided by a user.

    :param argparse args: Command line arguments.

    The following command line arguments are used:
    * ``args.input_file`` A file with benchmark results.
    * ``args.params``     Specification of mandatory parameters. For format,
                          read comments of ``get_params`` function
    * ``args.output_file`` An output file with updated benchmark results.
    """
    # Load benchmarks and parameters.
    benchmarks = load_json_file(args.input_file)['data']
    prefix = '__'
    params = {prefix + k: v for k, v in get_params(args.params).items()}
    # Add prefixed parameters to all benchmarks.
    for benchmark in benchmarks:
        benchmark.update(params)
    # Process and compute variables
    Processor().compute_variables(benchmarks)
    # Replace prefix overwriting variables in case of a conflict
    prefixed_keys = params.keys()
    prefix_len = len(prefix)

    output_benchmarks = []
    for benchmark in benchmarks:
        for k in prefixed_keys:
            benchmark[k[prefix_len:]] = benchmark[k]
            del benchmark[k]
        if benchmark['exp.model'] != '':
            output_benchmarks.append(benchmark)
    benchmarks = output_benchmarks
    # Serialize updated benchmarks.
    DictUtils.dump_json_to_file({"data": benchmarks}, args.output_file)
    def update(self, query, use_processor=False):
        """Update benchmarks returning updated copy.

        Args:
            query: dict or callable.
            use_processor (bool): If true, apply variable processor. Will silently produce wrong results if
                benchmarks contain values that are dicts or lists.

        Returns:
            BenchData: Updated copy of benchmarks.
        """
        update_fn = query
        if isinstance(query, dict):
            def dict_update_fn(bench): bench.update(query)
            update_fn = dict_update_fn
        if not callable(update_fn):
            raise ValueError("Invalid update object (type='%s'). Expecting callable." % type(update_fn))

        benchmarks = copy.deepcopy(self.__benchmarks)
        for benchmark in benchmarks:
            update_fn(benchmark)

        if use_processor:
            Processor().compute_variables(benchmarks)
        return BenchData(benchmarks, create_copy=False)
Beispiel #7
0
 def compute_vars(self, inputs, expected_outputs):
     plan = copy.deepcopy(self.plan)
     exp = plan[0]
     for input_param in inputs:
         exp[input_param[0]] = input_param[1]
     Processor(self.param_info).compute_variables(plan)
     for expected_output in expected_outputs:
         self.assertEqual(
             exp[expected_output[0]], expected_output[1],
             "Actual output %s = %s differs from expected %s." %
             (expected_output[0], str(
                 exp[expected_output[0]]), expected_output[1]))
    def parse_log_files(filenames, opts=None):
        """ Parses files and returns their parameters.

        :param list filenames: List of file names to parse.
        :param dict opts:      Dictionary of options.

        :rtype:  tuple<list, list>
        :return: A tuple of two lists - succeeded and failed benchmarks
        """
        opts = {} if opts is None else opts
        for key in ('filter_params', 'filter_query', 'output_params'):
            DictUtils.ensure_exists(opts, key)
        DictUtils.ensure_exists(opts, 'failed_benchmarks', 'discard')
        DictUtils.ensure_exists(opts, '_extended_params', {})
        DictUtils.ensure_exists(opts, 'ignore_errors', False)

        succeeded_benchmarks = []
        failed_benchmarks = []
        for filename in filenames:
            # Parse log file
            params = LogParser.parse_log_file(
                filename, ignore_errors=opts['ignore_errors'])
            # Check if this benchmark does not match filter
            if len(params) == 0 or \
               not DictUtils.contains(params, opts['filter_params']) or \
               not DictUtils.match(params, opts['filter_query']):
                continue
            # Add extended parameters and compute them
            if len(opts['_extended_params']) > 0:
                params.update(opts['_extended_params'])
                Processor().compute_variables([params])
                # params = params[0]
            # Identify is this benchmark succeeded of failed.
            succeeded = 'results.throughput' in params and \
                        isinstance(params['results.throughput'], (int, long, float)) and \
                        params['results.throughput'] > 0
            # Get only those key/values that need to be serialized
            params = DictUtils.subdict(params, opts['output_params'])
            # Append benchmark either to succeeded or failed list
            if succeeded:
                succeeded_benchmarks.append(params)
            else:
                if opts['failed_benchmarks'] == 'keep':
                    succeeded_benchmarks.append(params)
                elif opts['failed_benchmarks'] == 'keep_separately':
                    failed_benchmarks.append(params)
            #
        return succeeded_benchmarks, failed_benchmarks
    def assert_match_is_corrent(experiment, condition):
        """ Checks that parameters in **condition** have constant values in **experiment**.

        :param dict experiment: Dictionary of parameters for current experiment
        :param dict condition: Dictionary of parameters constraints. Here, we are
                               interested only in parameter names

        If match cannot be performed correctly, program terminates. Incorrect match
        is a match when parameter in **experiment** is not constant i.e. depends on
        other parameters.
        """
        for param in condition:
            # If parameter not in experiment, just do not consider it
            if param not in experiment:
                continue
            assert Processor.is_param_constant(experiment[param]),\
                   "Condition must not use parameter that's not a constant (%s=%s)" % (param, experiment[param])