Esempio n. 1
0
    def to_yaml_file(self, name):
        """Convert fit result to YAML format.

        File name is determined by get_fit_result_path.

        Arguments:
            name (str): Name of the fit result.

        Return:
            str: Output file name.

        Raise:
            NotInitializedError: If the fit result has not been initialized.

        """
        with _paths.work_on_file(
                name, path_func=_paths.get_fit_result_path) as file_name:
            write_config(self.to_yaml(), file_name)
        return file_name
Esempio n. 2
0
    def write_to_disk(self, name, link_from=None):
        """Write efficiency object to disk.

        Arguments:
            name (str): Name of the efficiency object.
            link_from (str, optional): Storage to link from. Defaults to
                no link.

        Return:
            str: Path of the output file.

        """
        if not self.MODEL_NAME:
            raise NotImplementedError("Cannot save generic Efficiency")
        with work_on_file(name, get_efficiency_path, link_from) as file_name:
            write_config(
                {
                    'model': self.MODEL_NAME,
                    'variables': self.get_variables(),
                    'parameters': self._config
                }, file_name)
        return file_name
Esempio n. 3
0
def main():
    """Toy fitting submission application.

    Parses the command line, configures the toy fitters and submit the
    jobs, catching intermediate errors and transforming them to status codes.

    Status codes:
        0: All good.
        1: Error in the configuration files.
        2: Error in preparing the output folders.
        3: Conflicting options given.
        4: A non-matching configuration file was found in the output.
        5: The queue submission command cannot be found.
        128: Uncaught error. An exception is logged.

    """

    def flatten(list_, typ_):
        """Flatten a list."""
        return list(sum(list_, typ_))

    parser = argparse.ArgumentParser()
    parser.add_argument('-v', '--verbose',
                        action='store_true',
                        help="Verbose output")
    parser.add_argument('--link-from',
                        action='store', type=str,
                        help="Folder to actually store the toy files")
    parser.add_argument('--extend',
                        action='store_true', default=False,
                        help="Extend previous production")
    parser.add_argument('--overwrite',
                        action='store_true', default=False,
                        help="Overwrite previous production")
    parser.add_argument('config',
                        action='store', type=str, nargs='+',
                        help="Configuration file")
    args = parser.parse_args()
    if args.verbose:
        get_logger('analysis').setLevel(1)
        logger.setLevel(1)
    try:
        config = _config.load_config(*args.config)
        # Which type of toy are we running?
        script_to_run = None
        submitter = None
        for toy_type, (toy_class, script_name) in TOY_TYPES.items():
            if toy_type in config:
                script_to_run = script_name
                submitter = toy_class
        if submitter is None:
            raise KeyError("Unknown job type")
        # Is there something to scan?
        scan_config = 'scan' in config
        if scan_config:
            config_files = []
            base_config = _config.unfold_config(config)
            scan_groups = []
            for scan_group in config['scan']:
                scan_group_dict = {}
                for key, val_str in scan_group.items():
                    scan_group_dict[key] = process_scan_val(val_str, scan_group_dict)
                scan_groups.append(scan_group_dict)
            # Check lengths
            if not all(len({len(val) for val in scan_group.values()}) == 1
                       for scan_group in scan_groups):
                raise ValueError("Unmatched length in scan parameters")
            # Build values to scan
            keys, values = list(zip(*[zip(*scan_group.items()) for scan_group in scan_groups]))
            keys = flatten(keys, tuple())
            for value_tuple in itertools.product(*[zip(*val) for val in values]):
                values = dict(zip(keys, flatten(value_tuple, tuple())))
                temp_config = dict(base_config)
                del temp_config['scan']
                temp_config['name'] = temp_config['name'].format(**values)
                for key, value in values.items():
                    temp_config[key] = value
                logger.debug("Creating configuration %s for scan values -> %s",
                             temp_config['name'],
                             ", ".join('{}: {}'.format(*val) for val in values.items()))
                # Write temp_file
                with tempfile.NamedTemporaryFile(delete=False) as file_:
                    file_name = file_.name
                _config.write_config(_config.fold_config(list(temp_config.items())), file_name)
                config_files.append(file_name)
        else:
            config_files = args.config
    # pylint: disable=W0702
    except:
        logger.exception("Bad configuration given")
        parser.exit(1)
    try:
        script_to_run = os.path.join(get_global_var('BASE_PATH'),
                                     'toys',
                                     script_to_run)
        for config_file in config_files:
            submitter(config_files=[config_file],
                      link_from=args.link_from,
                      extend=args.extend,
                      overwrite=args.overwrite,
                      verbose=args.verbose).run(script_to_run, )
            if scan_config:
                os.remove(config_file)
        exit_status = 0
    except KeyError:
        logger.error("Bad configuration given")
        exit_status = 1
    except OSError as error:
        logger.error(str(error))
        exit_status = 2
    except ValueError:
        logger.error("Conflicting options found")
        exit_status = 3
    except AttributeError:
        logger.error("Mismatching configuration found")
        exit_status = 4
    except AssertionError:
        logger.error("Cannot find the queue submission command")
        exit_status = 5
    # pylint: disable=W0703
    except Exception as error:
        exit_status = 128
        logger.exception('Uncaught exception -> %s', repr(error))
    finally:
        parser.exit(exit_status)
Esempio n. 4
0
    def run(self, script_to_run):
        """Run the script.

        If the output exists and no extension or overwrite has been configured, nothing
        is done.

        Arguments:
            script_to_run (str): Script to run in the cluster.

        Raise:
            AssertionError: If the qsub command cannot be found.
            AttributeError: If non-matching configuration file was found.
            OSError: If there is a problem preparing the output path.

        """
        flat_config = dict(_config.unfold_config(self.config))
        # Check if it has not been produced yet
        # pylint: disable=E1102
        config_file_dest = self.TOY_CONFIG_PATH_GETTER(self.config['name'])
        # First check the config (we may have already checked)
        if os.path.exists(config_file_dest):  # It exists, check they match
            config_dest = _config.load_config(config_file_dest)
            if _config.compare_configs(flat_config, config_dest).difference(self.allowed_config_diffs):
                logger.error("Non-matching configuration already exists with that name!")
                raise AttributeError()
        # Now check output
        _, expected_src, expected_dest = _paths.prepare_path(name=self.config['name'],
                                                             path_func=self.TOY_PATH_GETTER,
                                                             link_from=self.config['link-from'])
        # Check file existence
        if os.path.exists(expected_src):
            logger.warning("Output data file exists! %s", expected_src)
            if self.overwrite:
                os.remove(expected_src)
                if os.path.exists(expected_dest):
                    os.remove(expected_dest)
            else:
                # Create de symlink if necessary
                if not os.path.exists(expected_dest):
                    os.symlink(expected_src, expected_dest)
                if not self.extend:
                    logger.info("Nor --extend nor --overwrite have been specified. Nothing to do.")
                    return
        # Source doesn't exist, delete the destination if needed
        else:
            if os.path.exists(expected_dest):
                os.remove(expected_dest)
        # Some bookkeeping
        if not os.path.exists(script_to_run):
            raise OSError("Cannot find {}!".format(script_to_run))
        script_args = []
        if self.config['link-from']:
            script_args.append('--link-from={}'.format(self.config['link-from']))
        if self.verbose:
            script_args.append('--verbose')
        script_args.append(config_file_dest)
        # Prepare paths
        # pylint: disable=E1101
        _, log_file_fmt, _ = _paths.prepare_path(name=self.config['name'],
                                                 path_func=_paths.get_log_path,
                                                 link_from=None)  # No linking is done for logs
        # Calculate number of jobs and submit
        ntoys = flat_config[self.NTOYS_KEY]
        ntoys_per_job = flat_config.get(self.NTOYS_PER_JOB_KEY, ntoys)
        n_jobs = int(1.0 * ntoys / ntoys_per_job)
        if ntoys % ntoys_per_job:
            n_jobs += 1
        # Submit!
        _config.write_config(self.config, config_file_dest)
        for _ in range(n_jobs):
            # Write the config file
            job_id = self.batch_system.submit_script(job_name=self.config['name'],
                                                     cmd_script=script_to_run,
                                                     script_args=script_args,
                                                     log_file=log_file_fmt,
                                                     **self.config.get('batch', {}))
            logger.info('Submitted JobID: %s', job_id)