def baseline(self):
        """
        Performs baselining for each environment
        """

        #
        # We want to process each environment (needs to be in a list, to avoid
        # unpacking the environment path into a list of strings)
        #
        routine_context = [[env] for env in self.env_tsts]

        # What Python routine do we want to call?
        routine = self.baseline_one_environment

        steps_per_stage = 0
        # build environment + run build-in test-case generation
        steps_per_stage += 1
        # execute once and create expecteds
        steps_per_stage += 1
        # for each time we prune the expected results
        steps_per_stage += self.baseline_iterations
        # copy .tst to Manage folder
        steps_per_stage += 1

        atg_misc.print_msg(
            "Baselining all environments ({:d} steps per environment) ...".
            format(steps_per_stage))

        # Run this routine in parallel given the provided context
        self.run_routine_parallel(routine,
                                  routine_context,
                                  steps_per_stage=steps_per_stage)
    def run_atg(self):
        """
        Runs ATG in parallel, with parallelism at the routine level
        """

        # Product of environments with routines in that environment
        routine_contexts = []

        # For each impacted environment ...
        for env in self.impacted_environments:

            # For each source file ...
            for src_file in self.envs_to_units[env]:

                # For each routine ...
                for routine_name in self.envs_to_units[env][src_file]:

                    # Store this combination
                    routine_contexts.append((env, src_file, routine_name))

        # What Python routine do we want to call?
        routine = self.run_atg_one_routine

        atg_misc.print_msg("Generating baseline test-cases ...")

        # Run this routine in parallel given the provided contexts
        self.run_routine_parallel(routine, routine_contexts)
    def gen_fptrs(self):
        """
        Generates function pointer mappings in parallel
        """

        # We want to process all environments
        routine_context = [[env] for env in self.impacted_environments]

        # What Python routine do we want to call?
        routine = self.gen_fptrs_one_environment

        atg_misc.print_msg("Generating function pointers...")

        # Run this routine in parallel given the provided contexts
        self.run_routine_parallel(routine, routine_context)
    def prune_and_merge(self):
        """
        Performs baselining for each environment
        """

        #
        # We want to process each environment (needs to be in a list, to avoid
        # unpacking the environment path into a list of strings)
        #
        routine_context = [[env] for env in self.env_tsts]

        # What Python routine do we want to call?
        routine = self.prune_and_merge_one_environment

        atg_misc.print_msg("Pruning test-cases ...")

        # Run this routine in parallel given the provided context
        self.run_routine_parallel(routine, routine_context)
    def merge_atg_routine_tst(self):
        """
        Merges the routine-level tst files into one big file, with parallelism
        at the environment level
        """

        #
        # We want to process each environment (needs to be in a list, to avoid
        # unpacking the environment path into a list of strings)
        #
        routine_context = [[env] for env in self.env_tsts]

        # What Python routine do we want to call?
        routine = self.merge_one_environment

        atg_misc.print_msg("Merging all ATG test-cases ...")

        # Run this routine in parallel given the provided context
        self.run_routine_parallel(routine, routine_context)
示例#6
0
    def process(self):
        """
        Calculates the 'interesting information' for a given set of environments
        """

        atg_misc.print_msg("Discovering environment dependencies")

        execution_context = []

        # For each environment/build directory
        for env_name, build_dir in self.environments:

            # Calculate the full path to the environment
            env_path = os.path.join(build_dir, env_name)

            # We expect that this environment has been built!
            assert os.path.exists(env_path) and os.path.isdir(env_path)

            execution_context.append([env_path])

        self.run_routine_parallel(self.process_env, execution_context)

        atg_misc.print_msg("Environment dependencies discovered")
    def process(self):
        """
        Processes the Manage project
        """
        atg_misc.print_msg("Processing Manage project")

        if not self.skip_build:
            # Get a temporary file with a Python suffix
            with tempfile.NamedTemporaryFile(suffix=".py") as temp_file:

                # Full path
                full_temp_file = temp_file.name

                # Basename
                basename = os.path.basename(full_temp_file)

                # Add the script
                self.add_script(full_temp_file, basename)

                # Populate Manage's build folder
                self.populate_build_folder()

                # Remove the script
                self.remove_script(basename)

        # Find all of the build environments (starting from our Manage project root)
        self.discover_environments()

        if not self.skip_build:
            # Build all found environments
            self.build_environments()
        else:
            # Find those that have already built
            self.check_built_environments()

        atg_misc.print_msg("Manage project processed")
    def store_envs(self):
        """
        After generating function pointers, need to migrate the changes back
        into the Manage 'environment' folder
        """

        atg_misc.print_msg("Storing updated environments ...")

        # Name of the vcm
        vcm_name = os.path.basename(self.configuration.manage_vcm_path)

        # Where does the vcm live?
        manage_parent_folder = os.path.dirname(
            self.configuration.manage_vcm_path)

        # Where are our environments?
        manage_envs_folder = os.path.join(
            os.path.splitext(self.configuration.manage_vcm_path)[0],
            "environment")

        # What are the name of our environments?
        all_envs = glob.glob(os.path.join(manage_envs_folder, "*"))

        # What files have we backed-up?
        preserved = []

        # For each environment
        for env in all_envs:

            # Find all of the artefacts
            all_artefacts = glob.glob(os.path.join(env, "*"))

            # For each artefact
            for artefact in all_artefacts:

                # Get the extension
                ext = os.path.splitext(artefact)[1]

                # We don't want .env files!
                if ext == ".env":
                    continue

                # Create a back-up
                to_keep = (artefact,
                           "{artefact:s}.atg".format(artefact=artefact))
                preserved.append(to_keep)
                shutil.copyfile(*to_keep)

        # What's our command?
        cmd = os.path.expandvars(
            "{manage:s} -p {vcm_name:s} --apply-changes --force --verbose".
            format(
                manage=os.path.expandvars(
                    os.path.join("$VECTORCAST_DIR", "manage")),
                vcm_name=vcm_name,
            ))

        # Where do we want the log to go?
        manage_log_file = os.path.join(manage_parent_folder, "apply_changes")

        # Run Manage
        atg_misc.run_cmd(
            cmd,
            cwd=manage_parent_folder,
            log_file_prefix=manage_log_file,
        )

        # Restore the changed files
        for overwrite, backup in preserved:
            shutil.move(backup, overwrite)
示例#9
0
def atg_execution(options):
    """
    Performs ATG
    """

    process_options(options)
    configuration = load_configuration(options)

    if configuration.find_unchanged_files is not None:
        atg_misc.print_warn(
            "Finding unchanged files was configured, discovering changed files"
        )
        unchanged_files = configuration.find_unchanged_files()
    else:
        atg_misc.print_warn(
            "Finding unchanged files was not configured, all files will be processed"
        )
        unchanged_files = set()

    # Create our Manage project
    manage_builder = build_manage.ManageBuilder(configuration)
    manage_builder.process()

    # Discover the environments (not neccessarily tied to Manage!)
    environment_dependencies = atg_discover.DiscoverEnvironmentDependencies(
        configuration, manage_builder)
    environment_dependencies.process()

    # Our set of impacted environments
    impacted_envs = set()

    # For each environment with its dependencies ...
    for environment, dependencies in environment_dependencies.envs_to_fnames.items(
    ):

        # ... check it if *only* uses preserved files
        uses_only_unchanged_files = dependencies.issubset(unchanged_files)

        # If not ...
        if not uses_only_unchanged_files:
            # ... flag it as impacted!
            impacted_envs.add(environment)

    atg_misc.print_warn(
        "{impacted:d} environments need processing (total: {total:d} environments)"
        .format(impacted=len(impacted_envs),
                total=len(manage_builder.all_environments)))

    # Dry run or reporting
    if options.report:

        # Generate the report
        atg_debug_report.debug_report(
            configuration,
            unchanged_files,
            manage_builder,
            environment_dependencies,
            impacted_envs,
        )

    if options.dry_run:

        # Let the user know something has happened
        atg_misc.print_warn("Dry-run mode: analysis only, no tests generated")

        # If we're dry run, finish here
        return 0

    # Create an incremental ATG object
    ia = atg_processor.ProcessProject(
        configuration,
        impacted_envs,
        environment_dependencies,
    )

    # Process our environments
    ia.process()

    # Store files
    configuration.store_updated_tests(ia.updated_files)

    atg_misc.print_msg("Processing completed!")

    return 0
 def build_environments(self):
     # Build the environments in parallel
     atg_misc.print_msg("Building Manage environments ...")
     self.run_routine_parallel(self.build_env, self.all_environments)