Beispiel #1
0
    def _build_resources(self):
        for name, dest, content in self.new_files:
            filename = self._get_abs_path(dest)
            tools.makedirs(os.path.dirname(filename))
            with open(filename, 'w') as file:
                logging.debug('Writing file "%s"' % filename)
                file.write(content)
                if dest == 'run':
                    # Make run script executable.
                    # TODO: Replace by adding an "executable" kwarg in add_new_file().
                    os.chmod(filename, 0755)

        for name, source, dest, required, symlink in self.resources:

            if required and not os.path.exists(source):
                logging.critical('Required resource not found: %s' % source)
            dest = self._get_abs_path(dest)
            if not dest.startswith(self.path):
                # Only copy resources that reside in the experiment/run dir.
                continue
            if symlink:
                # Do not create a symlink if the file doesn't exist.
                if not os.path.exists(source):
                    continue
                source = self._get_rel_path(source)
                os.symlink(source, dest)
                logging.debug('Linking from %s to %s' % (source, dest))
                continue

            # Even if the directory containing a resource has already been added,
            # we copy the resource since we might want to overwrite it.
            logging.debug('Copying %s to %s' % (source, dest))
            tools.copy(source, dest, required, self.ignores)
Beispiel #2
0
    def _build_resources(self):
        for name, dest, content in self.new_files:
            filename = self._get_abs_path(dest)
            tools.makedirs(os.path.dirname(filename))
            with open(filename, 'w') as file:
                logging.debug('Writing file "%s"' % filename)
                file.write(content)
                if dest == 'run':
                    # Make run script executable.
                    # TODO: Replace by adding an "executable" kwarg in add_new_file().
                    os.chmod(filename, 0755)

        for name, source, dest, required, symlink in self.resources:

            if required and not os.path.exists(source):
                logging.critical('Required resource not found: %s' % source)
            dest = self._get_abs_path(dest)
            if not dest.startswith(self.path):
                # Only copy resources that reside in the experiment/run dir.
                continue
            if symlink:
                # Do not create a symlink if the file doesn't exist.
                if not os.path.exists(source):
                    continue
                source = self._get_rel_path(source)
                os.symlink(source, dest)
                logging.debug('Linking from %s to %s' % (source, dest))
                continue

            # Even if the directory containing a resource has already been added,
            # we copy the resource since we might want to overwrite it.
            logging.debug('Copying %s to %s' % (source, dest))
            tools.copy(source, dest, required, self.ignores)
    def create_commulative_h_ff_to_h_table(self, domain, table):
        print "---------------------------"
        print table
        print "---------------------------"

        #calculate number of solved problems
        total_solved = len(table)

        ratio_set = list(set(table))  # remove duplicate element
        ratio_dict = {}
        for value in sorted(ratio_set):
            smaller_than_value_counter = 0
            for compared_value in table:
                if compared_value <= value:
                    smaller_than_value_counter += 1
            ratio_dict[value] = smaller_than_value_counter * 100 / total_solved

        print ratio_dict
        print "---------------------------"

        #write results into .cvs file:
        domain_dir = self.outFile + '/' + domain
        tools.makedirs(domain_dir)
        domain_file = domain_dir + '/' + 'PAC_Commulative_h-ff_to_h-star.csv'
        file = open(domain_file, "w")

        sorted_ratio_dict_keys = sorted(ratio_dict.keys())
        for hstar in sorted_ratio_dict_keys:
            toWrite = str(hstar) + ',' + str(ratio_dict[hstar]) + '\n'
            file.write(toWrite)
        file.close()
Beispiel #4
0
    def build(self, overwrite=False, only_main_script=False, no_main_script=False):
        """Apply all the actions to the filesystem.

        If *overwrite* is True and the experiment directory exists, it is
        overwritten without prior confirmation.
        """
        logging.info('Exp Dir: "%s"' % self.path)

        self._set_run_dirs()

        # TODO: Currently no_main_script is always False.
        if not no_main_script:
            # This is the first part where we only write the main script.
            # We only overwrite the exp dir in the first part.
            if os.path.exists(self.path):
                runs_exist = any(path.startswith('runs')
                                 for path in os.listdir(self.path))
                logging.info('The directory "%s" contains run directories: %s' %
                             (self.path, runs_exist))
                # Overwrite if overwrite is True or if no runs exist.
                tools.overwrite_dir(self.path, overwrite or not runs_exist)
            else:
                tools.makedirs(self.path)
            self._build_main_script()
        if only_main_script:
            return

        # This is the second part where we write everything else
        self._build_resources()
        self._build_runs()
        self._build_properties_file()
Beispiel #5
0
    def create_commulative_h_ff_to_h_table(self, domain, table):
        print "---------------------------"
        print table
        print "---------------------------"

        #calculate number of solved problems
        total_solved = len(table)

        ratio_set = list(set(table)) # remove duplicate element
        ratio_dict = {}
        for value in sorted(ratio_set):
            smaller_than_value_counter = 0;
            for compared_value in table:
                if compared_value <= value:
                    smaller_than_value_counter += 1
            ratio_dict[value] = smaller_than_value_counter*100 / total_solved

        print ratio_dict
        print "---------------------------"

        #write results into .cvs file:
        domain_dir = self.outFile + '/' + domain
        tools.makedirs(domain_dir)
        domain_file = domain_dir + '/' + 'PAC_Commulative_h-ff_to_h-star.csv'
        file = open(domain_file, "w")

        sorted_ratio_dict_keys = sorted(ratio_dict.keys())
        for hstar in sorted_ratio_dict_keys:
            toWrite = str(hstar) + ',' + str(ratio_dict[hstar]) + '\n'
            file.write(toWrite)

        file.close()
Beispiel #6
0
    def save_stat_table_to_file(self, domain, table):

        #write results into .cvs file:
        domain_dir = self.outFile + '/' + domain
        tools.makedirs(domain_dir)
        domain_file = domain_dir + '/' + 'PAC_Statistics.csv'
        outfile = open(domain_file, "w")

        

        first_line = 'Problem,isSolved,h*,ff_h(s),h(s),h*/h(s)\n'
        outfile.write(first_line)
        for prob in table:

            row = table[prob]

            # print '----------------------'
            # print  row
            # print '----------------------'
            

            probName = prob
            isSolved = row['WORK-lmcut']
            h_star   = row['h*']
            ff_h     = row['ff_h(s)']
            h        = row['h(s)']
            ratio    = row['h*/h(s)']

            curr_line = str(probName) + ',' + str(isSolved) + ',' + str(h_star) + ',' + str(ff_h) + ',' + str(h) + ',' + str(ratio) + '\n'
            outfile.write(curr_line)
Beispiel #7
0
    def cache(self, revision_cache):
        self._path = os.path.join(revision_cache, self._hashed_name)
        if os.path.exists(self.path):
            logging.info('Revision is already cached: "%s"' % self.path)
            if not os.path.exists(self._get_sentinel_file()):
                logging.critical(
                    'The build for the cached revision at {} is corrupted '
                    'or was made with an older Lab version. Please delete '
                    'it and try again.'.format(self.path))
        else:
            tools.makedirs(self.path)

            if not os.path.exists(os.path.join(self.repo, 'export.sh')):
                logging.critical(
                    'export.sh script not found. Make sure you\'re using a recent version of the planner.'
                )
            # First export the main repo
            script = os.path.join(self.repo, "export.sh")
            retcode = tools.run_command((script, self.global_rev, self.path),
                                        cwd=self.repo)

            if retcode != 0:
                shutil.rmtree(self.path)
                logging.critical('Failed to make checkout.')
            self._compile()
            self._cleanup()
Beispiel #8
0
 def _build_new_files(self):
     for dest, content, permissions in self.new_files:
         filename = self._get_abs_path(dest)
         tools.makedirs(os.path.dirname(filename))
         logging.debug(f'Writing file "{filename}"')
         tools.write_file(filename, content)
         os.chmod(filename, permissions)
Beispiel #9
0
    def cache(self, revision_cache):
        self.path = os.path.join(revision_cache, self.name)
        if os.path.exists(self.path):
            logging.info(f'Revision is already cached: "{self.path}"')
            if not os.path.exists(self._get_sentinel_file()):
                logging.critical(
                    f"The build for the cached revision at {self.path} is corrupted. "
                    f"Please delete it and try again.")
        else:
            tools.makedirs(self.path)
            tar_archive = os.path.join(self.path, "solver.tgz")
            cmd = ["git", "archive", "--format", "tar", self.global_rev]
            with open(tar_archive, "w") as f:
                retcode = tools.run_command(cmd, stdout=f, cwd=self.repo)

            if retcode == 0:
                with tarfile.open(tar_archive) as tf:
                    tf.extractall(self.path)
                tools.remove_path(tar_archive)

                for exclude_dir in self.exclude:
                    path = os.path.join(self.path, exclude_dir)
                    if os.path.exists(path):
                        tools.remove_path(path)

            if retcode != 0:
                shutil.rmtree(self.path)
                logging.critical("Failed to make checkout.")
            self._compile()
            self._cleanup()
Beispiel #10
0
    def build(self,
              overwrite=False,
              only_main_script=False,
              no_main_script=False):
        """Apply all the actions to the filesystem.

        If *overwrite* is True and the experiment directory exists, it is
        overwritten without prior confirmation.
        """
        logging.info('Exp Dir: "%s"' % self.path)

        self._set_run_dirs()

        # TODO: Currently no_main_script is always False.
        if not no_main_script:
            # This is the first part where we only write the main script.
            # We only overwrite the exp dir in the first part.
            if os.path.exists(self.path):
                runs_exist = any(
                    path.startswith('runs') for path in os.listdir(self.path))
                logging.info(
                    'The directory "%s" contains run directories: %s' %
                    (self.path, runs_exist))
                # Overwrite if overwrite is True or if no runs exist.
                tools.overwrite_dir(self.path, overwrite or not runs_exist)
            else:
                tools.makedirs(self.path)
            self._build_main_script()
        if only_main_script:
            return

        # This is the second part where we write everything else
        self._build_resources()
        self._build_runs()
        self._build_properties_file()
    def save_stat_table_to_file(self, domain, table):
        #write results into .cvs file:
        domain_dir = self.outFile + '/' + domain
        tools.makedirs(domain_dir)
        domain_file = domain_dir + '/' + 'PAC_Statistics.csv'
        outfile = open(domain_file, "w")

        first_line = 'Problem,isSolved,h*,ff_h(s),h(s),h*/h(s)\n'
        outfile.write(first_line)
        for prob in table:
            row = table[prob]

            # print '----------------------'
            # print row
            # print '----------------------'

            probName = prob
            isSolved = row['WORK-lmcut']
            h_star = row['h*']
            ff_h = row['ff_h(s)']
            h = row['h(s)']
            ratio = row['h*/h(s)']

            curr_line = str(probName) + ',' + str(isSolved) + ',' + str(
                h_star) + ',' + str(ff_h) + ',' + str(h) + ',' + str(
                    ratio) + '\n'
            outfile.write(curr_line)
        outfile.close()
Beispiel #12
0
 def write(cls, report, filename):
     lines = ([
         r'\documentclass[tikz]{standalone}', r'\usepackage{pgfplots}',
         r'\begin{document}', r'\begin{tikzpicture}'
     ] + cls._get_plot(report) + [r'\end{tikzpicture}', r'\end{document}'])
     tools.makedirs(os.path.dirname(filename))
     tools.write_file(filename, '\n'.join(lines))
     logging.info('Wrote file://%s' % filename)
Beispiel #13
0
 def write(self):
     if not len(self.algorithms) == 2:
         logging.critical(
             f"Scatter plots need exactly 2 algorithms: {self.algorithms}")
     suffix = "." + self.output_format
     if not self.outfile.endswith(suffix):
         self.outfile += suffix
     tools.makedirs(os.path.dirname(self.outfile))
     self._write_plot(self.runs.values(), self.outfile)
Beispiel #14
0
 def write(cls, report, filename, scatter=False):
     lines = []
     lines.append('\\begin{tikzpicture}')
     lines.extend(cls._get_plot(report))
     lines.append('\\end{tikzpicture}')
     tools.makedirs(os.path.dirname(filename))
     with open(filename, 'w') as f:
         f.write('\n'.join(lines))
     logging.info('Wrote file://%s' % filename)
Beispiel #15
0
 def write(cls, report, filename, scatter=False):
     lines = []
     lines.append('\\begin{tikzpicture}')
     lines.extend(cls._get_plot(report))
     lines.append('\\end{tikzpicture}')
     tools.makedirs(os.path.dirname(filename))
     with open(filename, 'w') as f:
         f.write('\n'.join(lines))
     logging.info('Wrote file://%s' % filename)
Beispiel #16
0
 def write(cls, report, filename):
     lines = ([
         r"\documentclass[tikz]{standalone}",
         r"\usepackage{pgfplots}",
         r"\begin{document}",
         r"\begin{tikzpicture}",
     ] + cls._get_plot(report) + [r"\end{tikzpicture}", r"\end{document}"])
     tools.makedirs(os.path.dirname(filename))
     tools.write_file(filename, "\n".join(lines))
     logging.info(f"Wrote file://{filename}")
 def write(self):
     """
     Overwrite this method if you want to write the report directly. You
     should write the report to *self.outfile*.
     """
     content = self.get_text()
     tools.makedirs(os.path.dirname(self.outfile))
     with open(self.outfile, 'w') as file:
         file.write(content)
         logging.info('Wrote file://%s' % self.outfile)
Beispiel #18
0
 def write(self):
     """
     Overwrite this method if you want to write the report directly. You
     should write the report to *self.outfile*.
     """
     content = self.get_text()
     tools.makedirs(os.path.dirname(self.outfile))
     with open(self.outfile, 'w') as file:
         file.write(content)
         logging.info('Wrote file://%s' % self.outfile)
Beispiel #19
0
    def write(self):
        if not len(self.configs) == 2:
            logging.critical('Scatterplots need exactly 2 configs: %s' % self.configs)
        self.xlabel = self.xlabel or self.configs[0]
        self.ylabel = self.ylabel or self.configs[1]

        suffix = '.' + self.output_format
        if not self.outfile.endswith(suffix):
            self.outfile += suffix
        tools.makedirs(os.path.dirname(self.outfile))
        self._write_plot(self.runs.values(), self.outfile)
Beispiel #20
0
    def write(self):
        if not len(self.algorithms) == 2:
            logging.critical('Scatter plots need exactly 2 algorithms: %s' %
                             self.algorithms)
        self.xlabel = self.xlabel or self.algorithms[0]
        self.ylabel = self.ylabel or self.algorithms[1]

        suffix = '.' + self.output_format
        if not self.outfile.endswith(suffix):
            self.outfile += suffix
        tools.makedirs(os.path.dirname(self.outfile))
        self._write_plot(self.runs.values(), self.outfile)
Beispiel #21
0
    def write(self):
        if len(self.algorithms) < 1:
            logging.critical('Cactus plots need >=1 algorithms: %s' %
                             self.algorithms)
        self.xlabel = self.xlabel or "Time (s)"
        self.ylabel = self.ylabel or "Number/ Count"

        suffix = '.' + self.output_format
        if not self.outfile.endswith(suffix):
            self.outfile += suffix
        tools.makedirs(os.path.dirname(self.outfile))
        self._write_plot(self.runs.values(), self.outfile)
Beispiel #22
0
    def write(self):
        logging.info('Using score attribute "%s"' % self.score)
        logging.info('Adding column with best value: %s' %
                     self.best_value_column)

        self.total_scores = self._compute_total_scores()

        tools.makedirs(os.path.dirname(self.outfile))
        with open(self.outfile, 'w') as file:
            sys.stdout = file
            self.print_report()
            sys.stdout = sys.__stdout__
        logging.info('Wrote file://%s' % self.outfile)
Beispiel #23
0
    def write(self):
        if not (len(self.algorithms) == 1 and self.x_algo == self.algorithms[0]
                and self.y_algo == self.algorithms[0]):
            logging.critical(
                'Scatter plots need exactly 1 algorithm that must match x_algo and y_algo: %s, %s, %s'
                % (self.algorithms, self.x_algo, self.y_algo))
        self.xlabel = self.xlabel or self.x_algo + ": " + self.x_attribute
        self.ylabel = self.ylabel or self.y_algo + ": " + self.y_attribute

        suffix = '.' + self.output_format
        if not self.outfile.endswith(suffix):
            self.outfile += suffix
        tools.makedirs(os.path.dirname(self.outfile))
        self._write_plot(self.runs.values(), self.outfile)
Beispiel #24
0
    def write(self):
        """
        Write the report files.

        By default this method calls :meth:`.get_text` and writes the
        obtained text to *outfile*.

        Overwrite this method if you want to write the report file(s)
        directly. You should write them to *self.outfile*.

        """
        content = self.get_text()
        tools.makedirs(os.path.dirname(self.outfile))
        tools.write_file(self.outfile, content)
        logging.info(f"Wrote file://{self.outfile}")
Beispiel #25
0
    def fetch_dir(self,
                  run_dir,
                  eval_dir,
                  copy_all=False,
                  run_filter=None,
                  parsers=None):
        run_filter = run_filter or tools.RunFilter()
        parsers = parsers or []
        # Allow specyfing a list of multiple parsers or a single parser.
        if not isinstance(parsers, (tuple, list)):
            parsers = [parsers]
        # Make sure parsers is a list.
        parsers = list(parsers)

        prop_file = os.path.join(run_dir, 'properties')

        # Somehow '../..' gets inserted into sys.path and more strangely the
        # system lab.tools module gets called.
        # TODO: This HACK should be removed once the source of the error is clear.
        props = tools.Properties(filename=prop_file)
        if props.get('search_returncode'
                     ) is not None and props.get("coverage") is None:
            logging.warning('search_parser.py exited abnormally for %s' %
                            run_dir)
            logging.info('Rerunning search_parser.py')
            parsers.append(os.path.join(run_dir, '../../search_parser.py'))

        for parser in parsers:
            rel_parser = os.path.relpath(parser, start=run_dir)
            subprocess.call([rel_parser], cwd=run_dir)

        props = tools.Properties(filename=prop_file)
        props = run_filter.apply_to_run(props)
        if not props:
            return None, None
        run_id = props.get('id')
        # Abort if an id cannot be read.
        if not run_id:
            logging.critical('id is not set in %s.' % prop_file)

        if copy_all:
            dest_dir = os.path.join(eval_dir, *run_id)
            tools.makedirs(dest_dir)
            tools.fast_updatetree(run_dir, dest_dir, symlinks=True)

        return run_id, props
Beispiel #26
0
 def _cache(self, compilation_options):
     path = self.get_path()
     if os.path.exists(path):
         logging.info('Revision is already cached: "%s"' % path)
         if not os.path.exists(self._sentinel_file):
             logging.critical(
                 'The build for the cached revision at "%s" is corrupted '
                 'or was made with an older lab version. Please delete '
                 'it and try again.' % path)
     else:
         tools.makedirs(path)
         retcode = tools.run_command(
             ['hg', 'archive', '-r', self.rev, '-I', 'src', path], cwd=self.repo)
         if retcode != 0:
             shutil.rmtree(path)
             logging.critical('Failed to make checkout.')
         self._compile(compilation_options)
         self._cleanup()
Beispiel #27
0
 def _cache(self, compilation_options):
     path = self.get_path()
     if os.path.exists(path):
         logging.info('Revision is already cached: "%s"' % path)
         if not os.path.exists(self._sentinel_file):
             logging.critical(
                 'The build for the cached revision at "%s" is corrupted '
                 'or was made with an older lab version. Please delete '
                 'it and try again.' % path)
     else:
         tools.makedirs(path)
         retcode = tools.run_command(
             ['hg', 'archive', '-r', self.rev, '-I', 'src', path],
             cwd=self.repo)
         if retcode != 0:
             shutil.rmtree(path)
             logging.critical('Failed to make checkout.')
         self._compile(compilation_options)
         self._cleanup()
Beispiel #28
0
    def run_steps(self, steps):
        """
        We can't submit jobs from within the grid, so we submit them
        all at once with dependencies. We also can't rewrite the job
        files after they have been submitted.
        """
        self.exp.build(write_to_disk=False)

        # Prepare job dir.
        job_dir = self.exp.path + "-grid-steps"
        if os.path.exists(job_dir):
            tools.confirm_or_abort(
                f'The path "{job_dir}" already exists, so the experiment has '
                f"already been submitted. Are you sure you want to "
                f"delete the grid-steps and submit it again?"
            )
            tools.remove_path(job_dir)

        # Overwrite exp dir if it exists.
        if any(is_build_step(step) for step in steps):
            self.exp._remove_experiment_dir()

        # Remove eval dir if it exists.
        if os.path.exists(self.exp.eval_dir):
            tools.confirm_or_abort(
                f'The evaluation directory "{self.exp.eval_dir}" already exists. '
                f"Do you want to remove it?"
            )
            tools.remove_path(self.exp.eval_dir)

        # Create job dir only when we need it.
        tools.makedirs(job_dir)

        prev_job_id = None
        for step in steps:
            job_name = self._get_job_name(step)
            job_file = os.path.join(job_dir, job_name)
            job_content = self._get_job(step, is_last=(step == steps[-1]))
            tools.write_file(job_file, job_content)
            prev_job_id = self._submit_job(
                job_name, job_file, job_dir, dependency=prev_job_id
            )
Beispiel #29
0
    def cache(self, revision_cache):
        self.path = os.path.join(revision_cache, self.name)
        if os.path.exists(self.path):
            logging.info('Revision is already cached: "%s"' % self.path)
            if not os.path.exists(self._get_sentinel_file()):
                logging.critical(
                    "The build for the cached revision at {} is corrupted. "
                    "Please delete it and try again.".format(self.path)
                )
        else:
            tools.makedirs(self.path)
            vcs = get_version_control_system(self.repo)
            if vcs == MERCURIAL:
                retcode = tools.run_command(
                    ["hg", "archive", "-r", self.global_rev]
                    + [f"-X{d}" for d in self.exclude]
                    + [self.path],
                    cwd=self.repo,
                )
            elif vcs == GIT:
                tar_archive = os.path.join(self.path, "solver.tgz")
                cmd = ["git", "archive", "--format", "tar", self.global_rev]
                with open(tar_archive, "w") as f:
                    retcode = tools.run_command(cmd, stdout=f, cwd=self.repo)

                if retcode == 0:
                    with tarfile.open(tar_archive) as tf:
                        tf.extractall(self.path)
                    tools.remove_path(tar_archive)

                    for exclude_dir in self.exclude:
                        path = os.path.join(self.path, exclude_dir)
                        if os.path.exists(path):
                            tools.remove_path(path)
            else:
                _raise_unknown_vcs_error(vcs)

            if retcode != 0:
                shutil.rmtree(self.path)
                logging.critical("Failed to make checkout.")
            self._compile()
            self._cleanup()
def regression_test_handler(test, rev, success):
    if not success:
        tools.makedirs(REGRESSIONS_DIR)
        tarball = os.path.join(REGRESSIONS_DIR,
                               "{test}-{rev}.tar.gz".format(**locals()))
        subprocess.check_call([
            "tar", "-czf", tarball, "-C", BASE_DIR,
            os.path.relpath(EXPERIMENTS_DIR, start=BASE_DIR)
        ])
        logging.error(
            "Regression found. To inspect the experiment data for the failed regression test, run\n"
            "sudo ./extract-regression-experiment.sh {test}-{rev}\n"
            "in the ~/infrastructure/hosts/linux-buildbot-worker directory "
            "on the Linux buildbot computer.".format(**locals()))
    exp_dir = get_exp_dir(rev, test)
    eval_dir = exp_dir + "-eval"
    shutil.rmtree(exp_dir)
    shutil.rmtree(eval_dir)
    if not success:
        sys.exit(1)
Beispiel #31
0
    def __init__(self, path, environment=None, cache_dir=None):
        """
        Create a new experiment that will be built at *path* using the methods
        provided by :ref:`Environment <environments>` *environment*. If
        *environment* is None, ``LocalEnvironment`` is used (default).

        Lab will use the *cache_dir* for storing temporary files.
        In case you run :py:class:`Fast Downward experiments
        <downward.experiments.DownwardExperiment>` this directory can become
        very large (tens of GB) since it is used to cache revisions and
        preprocessed tasks. By default *cache_dir* points to ``~/lab``.

        An experiment consists of multiple steps. Every experiment will need at
        least the following steps:

        * Build the experiment.
        * Run it.
        * Fetch the results.
        * Make a report.

        In the "Run it" step all runs that have been added to the experiment
        will be executed. Each run consists of one or multiple commands.
        """
        _Buildable.__init__(self)
        self.path = os.path.abspath(path)
        if any(char in self.path for char in (':', ',')):
            logging.critical('Path contains commas or colons: %s' % self.path)
        self.environment = environment or LocalEnvironment()
        self.environment.exp = self
        self.cache_dir = cache_dir or tools.DEFAULT_USER_DIR
        tools.makedirs(self.cache_dir)
        self.shard_size = SHARD_SIZE

        self.runs = []

        self.set_property('experiment_file', self._script)

        self.steps = Sequence()
        self.add_step(Step('build', self.build))
        self.add_step(Step('start', self.run))
        self.add_fetcher(name='fetch')
Beispiel #32
0
    def __init__(self, path, environment=None, cache_dir=None):
        """
        Create a new experiment that will be built at *path* using the methods
        provided by :ref:`Environment <environments>` *environment*. If
        *environment* is None, ``LocalEnvironment`` is used (default).

        Lab will use the *cache_dir* for storing temporary files.
        In case you run :py:class:`Fast Downward experiments
        <downward.experiments.DownwardExperiment>` this directory can become
        very large (tens of GB) since it is used to cache revisions and
        preprocessed tasks. By default *cache_dir* points to ``~/lab``.

        An experiment consists of multiple steps. Every experiment will need at
        least the following steps:

        * Build the experiment.
        * Run it.
        * Fetch the results.
        * Make a report.

        In the "Run it" step all runs that have been added to the experiment
        will be executed. Each run consists of one or multiple commands.
        """
        _Buildable.__init__(self)
        self.path = os.path.abspath(path)
        if any(char in self.path for char in (':', ',')):
            logging.critical('Path contains commas or colons: %s' % self.path)
        self.environment = environment or LocalEnvironment()
        self.environment.exp = self
        self.cache_dir = cache_dir or tools.DEFAULT_USER_DIR
        tools.makedirs(self.cache_dir)
        self.shard_size = SHARD_SIZE

        self.runs = []

        self.set_property('experiment_file', self._script)

        self.steps = Sequence()
        self.add_step(Step('build', self.build))
        self.add_step(Step('start', self.run))
        self.add_fetcher(name='fetch')
 def cache(self, revision_cache):
     self._path = os.path.join(revision_cache, self._hashed_name)
     if os.path.exists(self.path):
         logging.info('Revision is already cached: "%s"' % self.path)
         if not os.path.exists(self._get_sentinel_file()):
             logging.critical(
                 'The build for the cached revision at {} is corrupted '
                 'or was made with an older Lab version. Please delete '
                 'it and try again.'.format(self.path))
     else:
         tools.makedirs(self.path)
         excludes = ['-X{}'.format(d) for d in ['experiments', 'misc']]
         retcode = tools.run_command(
             ['hg', 'archive', '-r', self.global_rev] + excludes +
             [self.path],
             cwd=self.repo)
         if retcode != 0:
             shutil.rmtree(self.path)
             logging.critical('Failed to make checkout.')
         self._compile()
         self._cleanup()
Beispiel #34
0
    def create_commulative_h_star_table(self, domain):
        #get relevant value from original table
        cost_attr = Attribute('cost', min_wins=False, absolute=True)
        cost_table = AbsoluteReport._get_table(self, cost_attr, domain)

        #define list of costs:
        cost_list = []

        #calculate number of solved problems
        total_solved = 0
        for row in cost_table.row_names:
            curr_val = cost_table.get(row)
            val = curr_val[self.nick]
            if val > 0:
                total_solved = total_solved + 1
                cost_list.append(val)

        cost_set = list(set(cost_list)) # remove duplicate element
        cost_dict = {}
        for value in sorted(cost_set):
            smaller_than_value_counter = 0;
            for compared_value in cost_list:
                if compared_value <= value:
                    smaller_than_value_counter += 1
            cost_dict[value] = smaller_than_value_counter*100 / total_solved


        #write results into .cvs file:
        domain_dir = self.outFile + '/' + domain
        tools.makedirs(domain_dir)
        domain_file = domain_dir + '/' + 'PAC_Commulative_hstar.csv'
        file = open(domain_file, "w")

        sorted_cost_dict_keys = sorted(cost_dict.keys())
        for hstar in sorted_cost_dict_keys:
            toWrite = str(hstar) + ',' + str(cost_dict[hstar]) + '\n'
            file.write(toWrite)

        file.close()
Beispiel #35
0
    def fetch_dir(self, run_dir, eval_dir, copy_all=False, run_filter=None, parsers=None):
        run_filter = run_filter or tools.RunFilter()
        parsers = parsers or []
        # Allow specyfing a list of multiple parsers or a single parser.
        if not isinstance(parsers, (tuple, list)):
            parsers = [parsers]
        # Make sure parsers is a list.
        parsers = list(parsers)

        prop_file = os.path.join(run_dir, 'properties')

        # Somehow '../..' gets inserted into sys.path and more strangely the
        # system lab.tools module gets called.
        # TODO: This HACK should be removed once the source of the error is clear.
        props = tools.Properties(filename=prop_file)
        if props.get('search_returncode') is not None and props.get("coverage") is None:
            logging.warning('search_parser.py exited abnormally for %s' % run_dir)
            logging.info('Rerunning search_parser.py')
            parsers.append(os.path.join(run_dir, '../../search_parser.py'))

        for parser in parsers:
            rel_parser = os.path.relpath(parser, start=run_dir)
            subprocess.call([rel_parser], cwd=run_dir)

        props = tools.Properties(filename=prop_file)
        props = run_filter.apply_to_run(props)
        if not props:
            return None, None
        run_id = props.get('id')
        # Abort if an id cannot be read.
        if not run_id:
            logging.critical('id is not set in %s.' % prop_file)

        if copy_all:
            dest_dir = os.path.join(eval_dir, *run_id)
            tools.makedirs(dest_dir)
            tools.fast_updatetree(run_dir, dest_dir, symlinks=True)

        return run_id, props
    def create_commulative_h_star_table(self, domain):
        #get relevant value from original table
        cost_attr = Attribute('cost', min_wins=False, absolute=True)
        cost_table = AbsoluteReport._get_table(self, cost_attr, domain)

        #define list of costs:
        cost_list = []

        #calculate number of solved problems
        total_solved = 0
        for row in cost_table.row_names:
            curr_val = cost_table.get(row)
            val = curr_val[self.nick]
            if val > 0:
                total_solved = total_solved + 1
                cost_list.append(val)

        cost_set = list(set(cost_list))  # remove duplicate element
        cost_dict = {}
        for value in sorted(cost_set):
            smaller_than_value_counter = 0
            for compared_value in cost_list:
                if compared_value <= value:
                    smaller_than_value_counter += 1
            cost_dict[value] = smaller_than_value_counter * 100 / total_solved

        #write results into .cvs file:
        domain_dir = self.outFile + '/' + domain
        tools.makedirs(domain_dir)
        domain_file = domain_dir + '/' + 'PAC_Commulative_hstar.csv'
        file = open(domain_file, "w")

        sorted_cost_dict_keys = sorted(cost_dict.keys())
        for hstar in sorted_cost_dict_keys:
            toWrite = str(hstar) + ',' + str(cost_dict[hstar]) + '\n'
            file.write(toWrite)

        file.close()
Beispiel #37
0
    def build(self, write_to_disk=True):
        """
        Finalize the internal data structures, then write all files
        needed for the experiment to disk.

        If *write_to_disk* is False, only compute the internal data
        structures. This is only needed on grids for
        FastDownwardExperiments.build() which turns the added algorithms
        and benchmarks into Runs.

        """
        if not write_to_disk:
            return

        logging.info(f'Experiment path: "{self.path}"')
        self._remove_experiment_dir()
        tools.makedirs(self.path)
        self.environment.write_main_script()

        self._build_new_files()
        self._build_resources()
        self._build_runs()
        self._build_properties_file(STATIC_EXPERIMENT_PROPERTIES_FILENAME)
    def __call__(self,
                 src_dir,
                 eval_dir=None,
                 merge=None,
                 filter=None,
                 **kwargs):
        """
        This method can be used to copy properties from an exp-dir or
        eval-dir into an eval-dir. If the destination eval-dir already
        exist, the data will be merged. This means *src_dir* can either
        be an exp-dir or an eval-dir and *eval_dir* can be a new or
        existing directory.

        We recommend using lab.Experiment.add_fetcher() to add fetchers
        to an experiment. See the method's documentation for a
        description of the parameters.

        """
        if not os.path.isdir(src_dir):
            logging.critical(
                "{} is missing or not a directory".format(src_dir))
        run_filter = tools.RunFilter(filter, **kwargs)

        eval_dir = eval_dir or src_dir.rstrip("/") + "-eval"
        logging.info("Fetching properties from {} to {}".format(
            src_dir, eval_dir))

        if merge is None:
            _check_eval_dir(eval_dir)
        elif merge:
            # No action needed, data will be merged.
            pass
        else:
            tools.remove_path(eval_dir)

        # Load properties in the eval_dir if there are any already.
        combined_props = tools.Properties(os.path.join(eval_dir, "properties"))
        fetch_from_eval_dir = not os.path.exists(
            os.path.join(src_dir, "runs-00001-00100"))
        if fetch_from_eval_dir:
            src_props = tools.Properties(
                filename=os.path.join(src_dir, "properties"))
            run_filter.apply(src_props)
            combined_props.update(src_props)
            logging.info("Fetched properties of {} runs.".format(
                len(src_props)))
        else:
            slurm_err_content = tools.get_slurm_err_content(src_dir)
            if slurm_err_content:
                logging.error("There was output to *-grid-steps/slurm.err")

            new_props = tools.Properties()
            run_dirs = sorted(glob(os.path.join(src_dir, "runs-*-*", "*")))
            total_dirs = len(run_dirs)
            logging.info(
                "Scanning properties from {:d} run directories".format(
                    total_dirs))
            for index, run_dir in enumerate(run_dirs, start=1):
                loglevel = logging.INFO if index % 100 == 0 else logging.DEBUG
                logging.log(loglevel,
                            "Scanning: {:6d}/{:d}".format(index, total_dirs))
                props = self.fetch_dir(run_dir)
                if slurm_err_content:
                    props.add_unexplained_error("output-to-slurm.err")
                id_string = "-".join(props["id"])
                new_props[id_string] = props
            run_filter.apply(new_props)
            combined_props.update(new_props)

        unexplained_errors = 0
        for props in combined_props.values():
            error_message = tools.get_unexplained_errors_message(props)
            if error_message:
                logging.error(error_message)
                unexplained_errors += 1

        tools.makedirs(eval_dir)
        combined_props.write()
        logging.info("Wrote properties file (contains {unexplained_errors} "
                     "runs with unexplained errors).".format(**locals()))
Beispiel #39
0
    def __init__(self, path, repo, environment=None, combinations=None,
                 compact=True, limits=None, cache_dir=None):
        """
        The experiment will be built at *path*.

        *repo* must be the path to a Fast Downward repository. Among other things
        this repository is used to search for benchmark files.

        *environment* must be an :ref:`Environment <environments>` instance.
        By default the experiment is run locally.

        If given, *combinations* must be a list of :ref:`Checkout <checkouts>`
        tuples of the form (Translator, Preprocessor, Planner). If combinations
        is None (default), perform an experiment with the working copy in *repo*.

        The *compact* parameter is only relevant for the search
        stage. If *compact* is ``False``, the preprocessed task and
        the two PDDL files are **copied** into the respective run
        directories for all configurations. This requires a lot of
        space (tens of GB), so it is strongly recommended to use the
        default (``compact=True``) which only references these
        files. Use ``compact=False`` only if you really need a
        portable experiment.

        If *limits* is given, it must be a dictionary that maps a
        subset of the keys below to seconds and MiB. It will be used
        to overwrite the default limits::

            default_limits = {
                'translate_time': 7200,
                'translate_memory': 8192,
                'preprocess_time': 7200,
                'preprocess_memory': 8192,
                'search_time': 1800,
                'search_memory': 2048,
            }

        *cache_dir* is used to cache Fast Downward clones and preprocessed
        tasks. By default it points to ``~/lab``.

        .. note::

            The directory *cache_dir* can grow very large (tens of GB).

        Example: ::

            repo = '/path/to/downward-repo'
            env = GkiGridEnvironment(queue='xeon_core.q', priority=-2)
            combos = [(Translator(repo, rev=123),
                       Preprocessor(repo, rev='e2a018c865f7'),
                       Planner(repo, rev='tip')]
            exp = DownwardExperiment('/tmp/path', repo, environment=env,
                                     combinations=combos,
                                     limits={'search_time': 30,
                                             'search_memory': 1024})

        """
        Experiment.__init__(self, path, environment=environment, cache_dir=cache_dir)

        if not repo or not os.path.isdir(repo):
            logging.critical('The path "%s" is not a local Fast Downward '
                             'repository.' % repo)
        self.repo = repo
        self.orig_path = self.path
        self.search_exp_path = self.path
        self.preprocess_exp_path = self.path + '-p'
        self._path_to_python = None
        Checkout.REV_CACHE_DIR = os.path.join(self.cache_dir, 'revision-cache')
        self.preprocessed_tasks_dir = os.path.join(self.cache_dir, 'preprocessed-tasks')
        tools.makedirs(self.preprocessed_tasks_dir)

        self.combinations = (combinations or
                             [(Translator(repo), Preprocessor(repo), Planner(repo))])

        self.compact = compact
        self.suites = defaultdict(list)
        self._algorithms = []
        self._portfolios = []

        limits = limits or {}
        for key, value in limits.items():
            if key not in LIMITS:
                logging.critical('Unknown limit: %s' % key)
        self.limits = LIMITS
        self.limits.update(limits)

        # Save if this is a compact experiment i.e. preprocessed tasks are referenced.
        self.set_property('compact', compact)

        # TODO: Integrate this into the API.
        self.include_preprocess_results_in_search_runs = True

        self.compilation_options = ['-j%d' % self._jobs]

        self._search_parsers = []
        self.add_search_parser(os.path.join(DOWNWARD_SCRIPTS_DIR, 'search_parser.py'))

        # Remove the default experiment steps
        self.steps = Sequence()

        self.add_step(Step('build-preprocess-exp', self.build, stage='preprocess'))
        self.add_step(Step('run-preprocess-exp', self.run, stage='preprocess'))
        self.add_fetcher(src=self.preprocess_exp_path,
                         dest=self.preprocessed_tasks_dir,
                         name='fetch-preprocess-results',
                         copy_all=True,
                         write_combined_props=False)
        self.add_step(Step('build-search-exp', self.build, stage='search'))
        self.add_PAC_fetcher(src='/home/gal-d/downward/lab/examples/PAC_Preprocess_Output-eval/preprocess',#TODO change to be parameter
                         dest=self.search_exp_path,
                         name='fetch-preprocess-results',
                         copy_all=True,
                         write_combined_props=False)#new featcher to copy preprocess for PAC results
        self.add_step(Step('run-search-exp', self.run, stage='search'))
        self.add_fetcher(src=self.search_exp_path, name='fetch-search-results')
Beispiel #40
0
 def write(self):
     if os.path.isfile(self.outfile):
         logging.critical('outfile must be a directory for this report.')
     tools.makedirs(self.outfile)
     self._write_plots(self.outfile)
Beispiel #41
0
    def _get_commulative_table(self, domain):
        #init new table
        title = 'Commulative'
        columns = {'Percentage','h*/h(s)'}
        min_wins = False
        colored = True
        table = reports.Table(title=title, min_wins=min_wins, colored=colored)
        table.set_column_order(columns)
        link = '#%s' % title
        formatter = reports.CellFormatter(link=link)
        table.cell_formatters[table.header_row][table.header_column] = formatter
        domain_dir = self.outFile + '/' + domain
        tools.makedirs(domain_dir)
        domain_file = domain_dir + '/' + 'PAC_Commulative_ratio.csv'
        file = open(domain_file, "w")

        #get relevant value from original table
        ratio_attr = Attribute('hstar_to_h', min_wins=False, absolute=True)
        ratio_table = AbsoluteReport._get_table(self, ratio_attr, domain)
        #define arrays to work
        ratios = [0.75,0.8,0.85,0.9,0.95,1,1.05,1.1,1.15,1.2,1.25,1.3,1.35,1.4,1.45,1.5,1.55,1.6,1.65,1.7,1.75,1.8,1.85,1.9,1.95,2,2.05,2.1,2.15,2.2,2.25,2.3,2.35,2.4,2.45,2.5,2.55,2.6,2.65,2.7,2.75,2.8,2.85,2.9,2.95,3.0,3.05,3.1,3.15,3.2,3.25,3.3,3.35,3.4,3.45,3.5,3.55,3.6,3.65,3.7,3.75,3.80,3.85,3.9,3.95,4.0,4.05,4.1,4.15,4.2,2.25,4.3,4.35,4.4,4.45,4.5]
        names = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','za','zb','zc','zd','ze','zf','zg','zh','zi','zj','zk','zl','zm','zn','zo','aa','ab','ac','ad','ae','af','ag','ah','ai','aj','ak','al','am','an','ao','ap','aq','ar','as','at',  'au','av','aw','ax','ay','az','ba','bb','bc','bd','be','bf','bg','bh','bi']
        counter = 0

        #calculate number of solved problems
        total_solved = 0
        for row in ratio_table.row_names:
                curr_val = ratio_table.get(row)
                val = curr_val[self.nick]
                if val > 0:
                    total_solved = total_solved + 1

        #for each ratio (1,1.05...), find the number of problems with this ratio, calc percentage and add row
        for ratio in ratios:
            _sum = 0
            for row in ratio_table.row_names:
                curr_val = ratio_table.get(row)
                val = curr_val[self.nick]
                if val <= ratio and val > 0:
                    _sum = _sum + 1

            if total_solved == 0:
                _sum_percent = 0
            else:
                _sum_percent = _sum*100 / total_solved

            #add new row
            row_to_add = {}
            row_to_add['Percentage'] = _sum_percent
            row_to_add['h*/h(s)'] = ratio
            table.add_row(names[counter],row_to_add)
            counter = counter + 1
            #TODO - save only one ratio per percentage
            toWrite = str(ratio)+','+str(_sum_percent)+'\n'
            file.write(toWrite)

        file.close()

        self.create_commulative_h_star_table(domain)
        


        return table
def write_properties(eval_dir):
    tools.makedirs(eval_dir)
    with open(os.path.join(eval_dir, 'properties'), 'w') as f:
        json.dump(PROPERTIES, f)
    def _get_commulative_table(self, domain):
        #init new table
        title = 'Commulative'
        columns = {'Percentage', 'h*/h(s)'}
        min_wins = False
        colored = True
        table = reports.Table(title=title, min_wins=min_wins, colored=colored)
        table.set_column_order(columns)
        link = '#%s' % title
        formatter = reports.CellFormatter(link=link)
        table.cell_formatters[table.header_row][
            table.header_column] = formatter
        domain_dir = self.outFile + '/' + domain
        tools.makedirs(domain_dir)
        domain_file = domain_dir + '/' + 'PAC_Commulative_ratio.csv'
        file = open(domain_file, "w")

        #get relevant value from original table
        ratio_attr = Attribute('hstar_to_h', min_wins=False, absolute=True)
        ratio_table = AbsoluteReport._get_table(self, ratio_attr, domain)
        #define arrays to work
        ratios = [
            0.75, 0.8, 0.85, 0.9, 0.95, 1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3,
            1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9,
            1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5,
            2.55, 2.6, 2.65, 2.7, 2.75, 2.8, 2.85, 2.9, 2.95, 3.0, 3.05, 3.1,
            3.15, 3.2, 3.25, 3.3, 3.35, 3.4, 3.45, 3.5, 3.55, 3.6, 3.65, 3.7,
            3.75, 3.80, 3.85, 3.9, 3.95, 4.0, 4.05, 4.1, 4.15, 4.2, 2.25, 4.3,
            4.35, 4.4, 4.45, 4.5
        ]
        names = [
            'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
            'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
            'za', 'zb', 'zc', 'zd', 'ze', 'zf', 'zg', 'zh', 'zi', 'zj', 'zk',
            'zl', 'zm', 'zn', 'zo', 'aa', 'ab', 'ac', 'ad', 'ae', 'af', 'ag',
            'ah', 'ai', 'aj', 'ak', 'al', 'am', 'an', 'ao', 'ap', 'aq', 'ar',
            'as', 'at', 'au', 'av', 'aw', 'ax', 'ay', 'az', 'ba', 'bb', 'bc',
            'bd', 'be', 'bf', 'bg', 'bh', 'bi'
        ]
        counter = 0

        #calculate number of solved problems
        total_solved = 0
        for row in ratio_table.row_names:
            curr_val = ratio_table.get(row)
            val = curr_val[self.nick]
            if val > 0:
                total_solved = total_solved + 1

        #for each ratio (1,1.05...), find the number of problems with this ratio, calc percentage and add row
        for ratio in ratios:
            _sum = 0
            for row in ratio_table.row_names:
                curr_val = ratio_table.get(row)
                val = curr_val[self.nick]
                if val <= ratio and val > 0:
                    _sum = _sum + 1

            if total_solved == 0:
                _sum_percent = 0
            else:
                _sum_percent = _sum * 100 / total_solved

            #add new row
            row_to_add = {}
            row_to_add['Percentage'] = _sum_percent
            row_to_add['h*/h(s)'] = ratio
            table.add_row(names[counter], row_to_add)
            counter = counter + 1
            #TODO - save only one ratio per percentage
            toWrite = str(ratio) + ',' + str(_sum_percent) + '\n'
            file.write(toWrite)

        file.close()

        self.create_commulative_h_star_table(domain)

        return table
    def __init__(self, path, repo, environment=None, combinations=None,
                 compact=True, limits=None, cache_dir=None):
        """
        The experiment will be built at *path*.

        *repo* must be the path to a Fast Downward repository. Among other things
        this repository is used to search for benchmark files.

        *environment* must be an :ref:`Environment <environments>` instance.
        By default the experiment is run locally.

        If given, *combinations* must be a list of :ref:`Checkout <checkouts>`
        tuples of the form (Translator, Preprocessor, Planner). If combinations
        is None (default), perform an experiment with the working copy in *repo*.

        The *compact* parameter is only relevant for the search
        stage. If *compact* is ``False``, the preprocessed task and
        the two PDDL files are **copied** into the respective run
        directories for all configurations. This requires a lot of
        space (tens of GB), so it is strongly recommended to use the
        default (``compact=True``) which only references these
        files. Use ``compact=False`` only if you really need a
        portable experiment.

        If *limits* is given, it must be a dictionary that maps a
        subset of the keys below to seconds and MiB. It will be used
        to overwrite the default limits::

            default_limits = {
                'translate_time': 7200,
                'translate_memory': 8192,
                'preprocess_time': 7200,
                'preprocess_memory': 8192,
                'search_time': 1800,
                'search_memory': 2048,
            }

        *cache_dir* is used to cache Fast Downward clones and preprocessed
        tasks. By default it points to ``~/lab``.

        .. note::

            The directory *cache_dir* can grow very large (tens of GB).

        Example: ::

            repo = '/path/to/downward-repo'
            env = GkiGridEnvironment(queue='xeon_core.q', priority=-2)
            combos = [(Translator(repo, rev=123),
                       Preprocessor(repo, rev='e2a018c865f7'),
                       Planner(repo, rev='tip')]
            exp = DownwardExperiment('/tmp/path', repo, environment=env,
                                     combinations=combos,
                                     limits={'search_time': 30,
                                             'search_memory': 1024})

        """
        Experiment.__init__(self, path, environment=environment, cache_dir=cache_dir)

        #if not repo or not os.path.isdir(repo):
        #    logging.critical('The path "%s" is not a local Fast Downward '
        #                     'repository.' % repo)
        self.repo = repo
        self.orig_path = self.path
        self.search_exp_path = self.path
        self.preprocess_exp_path = self.path + '-p'
        self._path_to_python = None
        Checkout.REV_CACHE_DIR = os.path.join(self.cache_dir, 'revision-cache')
        self.preprocessed_tasks_dir = os.path.join(self.cache_dir, 'preprocessed-tasks')
        tools.makedirs(self.preprocessed_tasks_dir)

        self.combinations = (combinations or
                             [(Translator(repo), Preprocessor(repo), Planner(repo))])

        self.compact = compact
        self.suites = defaultdict(list)
        self._algorithms = []
        self._portfolios = []

        limits = limits or {}
        for key, value in limits.items():
            if key not in LIMITS:
                logging.critical('Unknown limit: %s' % key)
        self.limits = LIMITS
        self.limits.update(limits)

        # Save if this is a compact experiment i.e. preprocessed tasks are referenced.
        self.set_property('compact', compact)

        # TODO: Integrate this into the API.
        self.include_preprocess_results_in_search_runs = True

        self.compilation_options = ['-j%d' % self._jobs]

        self._search_parsers = []
        self.add_search_parser(os.path.join(DOWNWARD_SCRIPTS_DIR, 'search_parser.py'))

        # Remove the default experiment steps
        self.steps = Sequence()

        self.add_step(Step('build-preprocess-exp', self.build, stage='preprocess'))
        self.add_step(Step('run-preprocess-exp', self.run, stage='preprocess'))
        self.add_fetcher(src=self.preprocess_exp_path,
                         dest=self.preprocessed_tasks_dir,
                         name='fetch-preprocess-results',
                         copy_all=True,
                         write_combined_props=False)
        self.add_step(Step('build-search-exp', self.build, stage='search'))
        self.add_PAC_fetcher(src='/home/sternron/gal-dreiman/downward/lab/examples/PAC_Preprocess_Output-eval/preprocess',#TODO change to be parameter
                         dest=self.search_exp_path,
                         name='fetch-preprocess-results',
                         copy_all=True,
                         write_combined_props=False)#new featcher to copy preprocess for PAC results
        self.add_step(Step('run-search-exp', self.run, stage='search'))
        self.add_fetcher(src=self.search_exp_path, name='fetch-search-results')
Beispiel #45
0
    def __call__(self, src_dir, eval_dir=None, copy_all=False, write_combined_props=True,
                 filter=None, parsers=None, **kwargs):
        """
        This method can be used to copy properties from an exp-dir or eval-dir
        into an eval-dir. If the destination eval-dir already exist, the data
        will be merged. This means *src_dir* can either be an exp-dir or an
        eval-dir and *eval_dir* can be a new or existing directory.

        If *copy_all* is True (default: False), copy all files from the run
        dirs to a new directory tree at *eval_dir*. Without this option only
        the combined properties file is written do disk.

        If *write_combined_props* is True (default), write the combined
        properties file.

        You can include only specific domains or configurations by using
        :py:class:`filters <.Report>`.

        *parsers* can be a list of paths to parser scripts. If given, each
        parser is called in each run directory and the results are added to
        the properties file which is fetched afterwards. This option is
        useful if you haven't parsed all or some values already during the
        experiment.

        Examples:

        Fetch all results and write a single combined properties file to the
        default evaluation directory (this step is added by default)::

            exp.add_step(Step('fetch', Fetcher(), exp.path))

        Read the combined properties file at ``<eval_dir1>/properties`` and
        merge it into the combined properties file at
        ``<combined_eval_dir>/properties``::

            exp.add_step(Step('combine', Fetcher(), eval_dir1, combined_eval_dir))

        Fetch only the runs for certain configuration from an older experiment::

            exp.add_step(Step('fetch', Fetcher(), src_dir,
                              filter_config_nick=['config_1', 'config_5']))
        """
        if not os.path.isdir(src_dir):
            logging.critical('%s is not a valid directory' % src_dir)
        run_filter = tools.RunFilter(filter, **kwargs)

        src_props = tools.Properties(filename=os.path.join(src_dir, 'properties'))
        fetch_from_eval_dir = 'runs' not in src_props or src_dir.endswith('-eval')
        if fetch_from_eval_dir:
            src_props = run_filter.apply(src_props)
            for prop in src_props.values():
                if prop.get('error', '').startswith('unexplained'):
                    logging.warning("Unexplained error in '%s': %s" %
                        (prop.get('run_dir'), prop.get('error')))

        eval_dir = eval_dir or src_dir.rstrip('/') + '-eval'
        logging.info('Fetching files from %s -> %s' % (src_dir, eval_dir))
        logging.info('Fetching from evaluation dir: %s' % fetch_from_eval_dir)

        if write_combined_props:
            # Load properties in the eval_dir if there are any already.
            combined_props = tools.Properties(os.path.join(eval_dir, 'properties'))
            if fetch_from_eval_dir:
                combined_props.update(src_props)

        # Get all run_dirs. None will be found if we fetch from an eval dir.
        run_dirs = sorted(glob(os.path.join(src_dir, 'runs-*-*', '*')))
        total_dirs = len(run_dirs)
        logging.info('Scanning properties from %d run directories' % total_dirs)
        unxeplained_errors = 0
        for index, run_dir in enumerate(run_dirs, start=1):
            loglevel = logging.INFO if index % 100 == 0 else logging.DEBUG
            logging.log(loglevel, 'Scanning: %6d/%d' % (index, total_dirs))
            run_id, props = self.fetch_dir(run_dir, eval_dir, copy_all=copy_all,
                                           run_filter=run_filter, parsers=parsers)
            if props is None:
                continue

            assert run_id, 'Dir %s has no id' % props.get('run_dir')
            if write_combined_props:
                combined_props['-'.join(run_id)] = props
            if props.get('error', '').startswith('unexplained'):
                logging.warning('Unexplained error in {run_dir}: {error}'.format(**props))
                unxeplained_errors += 1

        if unxeplained_errors:
            logging.warning('There were %d runs with unexplained errors.'
                            % unxeplained_errors)
        tools.makedirs(eval_dir)
        if write_combined_props:
            combined_props.write()