示例#1
0
    def from_experiments(cls,
                         experiments: List[rv.ExperimentConfig],
                         commands_to_run: List[str],
                         splits: int = 1):
        command_definitions = []

        for experiment in experiments:
            e = deepcopy(experiment)
            log.debug(
                'Generating command definitions for experiment {}...'.format(
                    e.id))
            commands_to_update = rv.all_commands() + \
                                 list(set(commands_to_run) - set(rv.all_commands()))
            for command_type in commands_to_update:
                log.debug(
                    'Updating config for command {}...'.format(command_type))
                e.update_for_command(command_type, e)
                if command_type in commands_to_run:
                    log.debug(
                        'Creating command configurations for {}...'.format(
                            command_type))
                    base_command_config = e.make_command_config(command_type)
                    for command_config in base_command_config.split(splits):
                        io_def = command_config.report_io()
                        command_def = cls(e.id, command_config, io_def)
                        command_definitions.append(command_def)

        return command_definitions
示例#2
0
def run_test(test, use_tf, temp_dir):
    errors = []
    experiment = get_experiment(test, use_tf, temp_dir)
    commands_to_run = rv.all_commands()

    # Check serialization
    pp_uri = os.path.join(experiment.bundle_uri, 'predict_package.zip')
    experiment.task.predict_package_uri = pp_uri
    msg = experiment.to_proto()
    experiment = rv.ExperimentConfig.from_proto(msg)

    # Check that running doesn't raise any exceptions.
    try:
        IntegrationTestExperimentRunner(os.path.join(temp_dir, test.lower())) \
            .run(experiment, rerun_commands=True, splits=2,
                 commands_to_run=commands_to_run)

    except Exception:
        errors.append(
            TestError(test, 'raised an exception while running',
                      traceback.format_exc()))
        return errors

    # Check that the eval is similar to expected eval.
    errors.extend(check_eval(test, temp_dir))

    if not errors:
        errors.extend(test_prediction_package(experiment, test, temp_dir))
        errors.extend(
            test_prediction_package(experiment,
                                    test,
                                    temp_dir,
                                    check_channel_order=True))

    return errors
示例#3
0
    def fully_resolve(self):
        """Returns a fully resolved copy of this  experiment.

        A fully resolved experiment has all implicit paths put into place,
        and is constructed by calling update_for_command for each command.
        """
        e = deepcopy(self)
        for command_type in rv.all_commands():
            e.update_for_command(command_type, e)
        return e
    def run(self,
            experiments: Union[List[rv.ExperimentConfig], rv.ExperimentConfig],
            command_configs: Union[List[rv.CommandConfig],
                                   rv.CommandConfig] = None,
            commands_to_run=None,
            rerun_commands=False,
            skip_file_check=False,
            dry_run: bool = False,
            splits: int = 1):
        if not commands_to_run:
            commands_to_run = rv.all_commands()

        if not isinstance(experiments, list):
            experiments = [experiments]

        log.debug('Generating command definitions from experiments...')
        command_definitions = CommandDefinition.from_experiments(
            experiments, commands_to_run, splits)

        if command_configs:
            if not isinstance(command_configs, list):
                command_configs = [command_configs]
            log.debug('Generating command definitions from commands...')
            command_definitions.extend(
                CommandDefinition.from_command_configs(command_configs,
                                                       commands_to_run,
                                                       splits))

        # Filter  out commands that don't have any output.
        log.debug('Filtering commands that do not have any output...')
        (command_definitions,
         no_output) = CommandDefinition.filter_no_output(command_definitions)

        # Print commands that have no output
        if dry_run:
            if no_output:
                print()
                click.echo(
                    click.style(
                        'Commands not run because they have no output:',
                        fg='yellow',
                        bold=True,
                        underline=True))
                for command in no_output:
                    self.print_command(command)
                print()

        # Check if there are any unsatisfied inputs.
        log.debug('Checking for missing input from configuration...')
        missing_inputs = CommandDefinition.get_missing_inputs(
            command_definitions)
        if missing_inputs:
            # TODO: Replace exception with logging?
            s = ''
            for exp_id in missing_inputs:
                s += 'In {}:\n\t{}\n'.format(
                    exp_id, '\t{}\n'.join(missing_inputs[exp_id]))

            raise rv.ConfigError('There were missing input URIs '
                                 'that are required, but were not '
                                 'able to be derived: \n{}'.format(s))

        # Remove duplicate commands, defining equality for a command by
        # the tuple (command_type, input_uris, output_uris)
        log.debug('Removing duplicate commands...')
        (unique_commands, skipped_duplicate_commands
         ) = CommandDefinition.remove_duplicates(command_definitions)

        if dry_run:
            if skipped_duplicate_commands:
                print()
                msg = ('Commands determined to be '
                       'duplicates based on input and output:')
                click.echo(
                    click.style(msg, fg='yellow', bold=True, underline=True))
                for command in skipped_duplicate_commands:
                    self.print_command(command)
                print()

        # Ensure that for each type of command, there are none that clobber
        # each other's output.
        log.debug("Ensuring commands do not overwrite each other's outputs...")
        clashing_commands = CommandDefinition.get_clashing_commands(
            unique_commands)

        if clashing_commands:
            clashing_msgs = []
            for (output_uri, c_defs) in clashing_commands:
                command_type = c_defs[0].command_config.command_type
                experiments = ', '.join(
                    map(lambda c: c.experiment_id or command_type, c_defs))
                clashing_msgs.append(
                    'The {} command in the following experiments '
                    'output {}, but are not equal: {}'.format(
                        command_type, output_uri, experiments))
            # TODO: Replace with logging?
            s = '\t\n'.join(clashing_msgs)

            raise rv.ConfigError('ERROR: Command outputs will'
                                 'override each other: \n{}\n'.format(s))

        log.debug('Constructing command DAG...')
        command_dag = CommandDAG(unique_commands,
                                 rerun_commands,
                                 skip_file_check=skip_file_check)

        # Print conflicating or alread fulfilled commands
        if dry_run:
            skipped_commands = command_dag.skipped_commands
            if skipped_commands:
                print()
                msg = 'Commands skipped because output already exists:'
                click.echo(
                    click.style(msg, fg='yellow', bold=True, underline=True))
                for command in skipped_commands:
                    self.print_command(command)
                print()

        # Save experiment configs
        experiments_by_id = dict(map(lambda e: (e.id, e), experiments))
        seen_ids = set([])
        for command_def in command_dag.get_command_definitions():
            if command_def.experiment_id is not None and \
               command_def.experiment_id not in seen_ids:
                seen_ids.add(command_def.experiment_id)
                experiment = experiments_by_id[command_def.experiment_id]
                if not dry_run:
                    log.debug(
                        'Saving experiment configuration for experiment {}'.
                        format(experiment.id))
                    experiment.fully_resolve().save_config()

        if dry_run:
            print()
            sorted_command_ids = command_dag.get_sorted_command_ids()
            if len(sorted_command_ids) == 0:
                click.echo(
                    click.style('No commands to run!', fg='red', bold=True))
                print()
            else:
                click.echo(
                    click.style('Commands to be run in this order:',
                                fg='green',
                                bold=True,
                                underline=True))
                for command_id in command_dag.get_sorted_command_ids():
                    command_def = command_dag.get_command_definition(
                        command_id)
                    self.print_command(command_def, command_id, command_dag)
                    print()
            self._dry_run(command_dag)
        else:
            log.debug('Running experiment...')
            self._run_experiment(command_dag)
示例#5
0
def run(runner, commands, experiment_module, dry_run, skip_file_check, arg,
        prefix, methods, path, filters, rerun, tempdir, splits):
    """Run Raster Vision commands from experiments, using the
    experiment runner named RUNNER."""

    if tempdir:
        RVConfig.set_tmp_dir(tempdir)

    # Validate runner
    valid_runners = list(
        map(lambda x: x.lower(), rv.ExperimentRunner.list_runners()))
    if runner not in valid_runners:
        print_error('Invalid experiment runner: "{}". '
                    'Must be one of: "{}"'.format(runner,
                                                  '", "'.join(valid_runners)))
        sys.exit(1)

    runner = ExperimentRunner.get_runner(runner)

    if experiment_module and path:
        print_error('Must specify only one of experiment_module or path')
        sys.exit(1)

    if not commands:
        commands = rv.all_commands()
    else:
        commands = list(map(lambda x: x.upper(), commands))

    experiment_args = {}
    for k, v in arg:
        experiment_args[k] = v

    loader = ExperimentLoader(experiment_args=experiment_args,
                              experiment_method_prefix=prefix,
                              experiment_method_patterns=methods,
                              experiment_name_patterns=filters)
    try:
        if experiment_module:
            experiments, command_configs = loader.load_from_module(
                experiment_module)
        elif path:
            experiments, command_configs = loader.load_from_file(path)
        else:
            experiments, command_configs = loader.load_from_module('__main__')
    except LoaderError as e:
        print_error(str(e))
        sys.exit(1)

    if not experiments and not commands:
        if experiment_module:
            print_error(
                'No experiments found in {}.'.format(experiment_module))
        elif path:
            print_error('No experiments found in {}.'.format(path))
        else:
            print_error('No experiments found.')

    runner.run(experiments,
               command_configs=command_configs,
               commands_to_run=commands,
               rerun_commands=rerun,
               skip_file_check=skip_file_check,
               dry_run=dry_run,
               splits=splits)