Exemple #1
0
    def __init__(self, config, **kwargs):
        import yaml

        self._config = self.get_config()
        self._configdir = None

        if utils.isstr(config) and os.path.isfile(config):
            self._configdir = os.path.abspath(os.path.dirname(config))
            config_dict = yaml.load(open(config))
        elif isinstance(config, dict) or config is None:
            config_dict = config
        elif utils.isstr(config) and not os.path.isfile(config):
            raise Exception('Invalid path to configuration file: %s' % config)
        else:
            raise Exception('Invalid config argument.')

        self.configure(config_dict, **kwargs)

        if self.configdir and 'fileio' in self.config and \
                self.config['fileio']['outdir'] is None:
            self.config['fileio']['outdir'] = self.configdir
Exemple #2
0
def cast_config(config, defaults):
    for key, item in config.items():

        if key not in defaults:
            continue

        if isinstance(item, dict):
            cast_config(config[key], defaults[key])
        elif item is None:
            continue
        else:
            value, comment, item_type = defaults[key]
            if item_type is None or isinstance(item_type, tuple):
                continue

            if utils.isstr(item) and item_type == list:
                config[key] = [item]
            else:
                config[key] = item_type(config[key])
Exemple #3
0
def dict_to_table(input_dict):

    from astropy.table import Table, Column

    cols = []

    for k, v in sorted(input_dict.items()):

        if isinstance(v, dict):
            continue
        elif isinstance(v, float):
            cols += [Column(name=k, dtype='f8', data=np.array([v]))]
        elif isinstance(v, bool):
            cols += [Column(name=k, dtype=bool, data=np.array([v]))]
        elif utils.isstr(v):
            cols += [Column(name=k, dtype='S32', data=np.array([v]))]
        elif isinstance(v, np.ndarray):
            cols += [Column(name=k, dtype=v.dtype, data=np.array([v]))]

    return Table(cols)
Exemple #4
0
def dict_to_table(input_dict):

    from astropy.table import Table, Column

    cols = []

    for k, v in sorted(input_dict.items()):

        if isinstance(v, dict):
            continue
        elif isinstance(v, float):
            cols += [Column(name=k, dtype='f8', data=np.array([v]))]
        elif isinstance(v, bool):
            cols += [Column(name=k, dtype=bool, data=np.array([v]))]
        elif utils.isstr(v):
            cols += [Column(name=k, dtype='S32', data=np.array([v]))]
        elif isinstance(v, np.ndarray):
            cols += [Column(name=k, dtype=v.dtype, data=np.array([v]))]

    return Table(cols)
Exemple #5
0
def get_target_skydir(config, ref_skydir=None):
    if ref_skydir is None:
        ref_skydir = SkyCoord(0.0, 0.0, unit=u.deg)

    radec = config.get('radec', None)

    if utils.isstr(radec):
        return SkyCoord(radec, unit=u.deg)
    elif isinstance(radec, list):
        return SkyCoord(radec[0], radec[1], unit=u.deg)

    ra = config.get('ra', None)
    dec = config.get('dec', None)

    if ra is not None and dec is not None:
        return SkyCoord(ra, dec, unit=u.deg)

    glon = config.get('glon', None)
    glat = config.get('glat', None)

    if glon is not None and glat is not None:
        return SkyCoord(glon, glat, unit=u.deg,
                        frame='galactic').transform_to('icrs')

    offset_ra = config.get('offset_ra', None)
    offset_dec = config.get('offset_dec', None)

    if offset_ra is not None and offset_dec is not None:
        return offset_to_skydir(ref_skydir, offset_ra, offset_dec,
                                coordsys='CEL')[0]

    offset_glon = config.get('offset_glon', None)
    offset_glat = config.get('offset_glat', None)

    if offset_glon is not None and offset_glat is not None:
        return offset_to_skydir(ref_skydir, offset_glon, offset_glat,
                                coordsys='GAL')[0].transform_to('icrs')

    return ref_skydir
Exemple #6
0
def get_target_skydir(config, ref_skydir=None):
    if ref_skydir is None:
        ref_skydir = SkyCoord(0.0, 0.0, unit=u.deg)

    radec = config.get('radec', None)

    if utils.isstr(radec):
        return SkyCoord(radec, unit=u.deg)
    elif isinstance(radec, list):
        return SkyCoord(radec[0], radec[1], unit=u.deg)

    ra = config.get('ra', None)
    dec = config.get('dec', None)

    if ra is not None and dec is not None:
        return SkyCoord(ra, dec, unit=u.deg)

    glon = config.get('glon', None)
    glat = config.get('glat', None)

    if glon is not None and glat is not None:
        return SkyCoord(glon, glat, unit=u.deg,
                        frame='galactic').transform_to('icrs')

    offset_ra = config.get('offset_ra', None)
    offset_dec = config.get('offset_dec', None)

    if offset_ra is not None and offset_dec is not None:
        return offset_to_skydir(ref_skydir, offset_ra, offset_dec,
                                coordsys='CEL')[0]

    offset_glon = config.get('offset_glon', None)
    offset_glat = config.get('offset_glat', None)

    if offset_glon is not None and offset_glat is not None:
        return offset_to_skydir(ref_skydir, offset_glon, offset_glat,
                                coordsys='GAL')[0].transform_to('icrs')

    return ref_skydir
Exemple #7
0
def main():
    usage = "usage: %(prog)s [config file]"
    description = "Dispatch analysis jobs to LSF."
    parser = argparse.ArgumentParser(usage=usage, description=description)

    parser.add_argument('--config', default='sample_config.yaml')
    parser.add_argument('--resources', default=None, type=str,
                        help='Set the LSF resource string.')
    parser.add_argument('--time', default=1500, type=int,
                        help='Set the wallclock time allocation for the '
                             'job in minutes.')
    parser.add_argument('--max_jobs', default=500, type=int,
                        help='Limit on the number of running or queued jobs.  '
                             'New jobs will only be dispatched if the number of '
                             'existing jobs is smaller than this parameter.')
    parser.add_argument('--jobs_per_cycle', default=20, type=int,
                        help='Maximum number of jobs to submit in each cycle.')
    parser.add_argument('--time_per_cycle', default=15, type=float,
                        help='Time per submission cycle in seconds.')
    parser.add_argument('--max_job_age', default=90, type=float,
                        help='Max job age in minutes.  Incomplete jobs without '
                             'a return code and a logfile modification '
                             'time older than this parameter will be restarted.')
    parser.add_argument('--dry_run', default=False, action='store_true')
    parser.add_argument('--overwrite', default=False, action='store_true',
                        help='Force all jobs to be re-run even if the job has '
                             'completed successfully.')
    parser.add_argument('--runscript', default=None, required=True,
                        help='Set the name of the job execution script.  A '
                             'script with this name must be located in each '
                             'analysis subdirectory.')
    parser.add_argument('--ncpu', default=1, type=int,
                        help='Set the number of CPUs that are used for each job.')
    parser.add_argument('dirs', nargs='+', default=None,
                        help='List of directories in which the analysis will '
                             'be run.')

    args = parser.parse_args()

    dirs = [d for argdir in args.dirs for d in utils.collect_dirs(argdir)]
    jobs = collect_jobs(dirs, args.runscript,
                        args.overwrite, args.max_job_age)

    lsf_opts = {'W': args.time,
                'n': args.ncpu,
                'R': 'bullet,hequ,kiso && scratch > 5'}

    if args.resources is not None:
        lsf_opts['R'] = args.resources

    lsf_opt_string = ''
    for optname, optval in lsf_opts.items():

        if utils.isstr(optval):
            optval = '\"%s\"' % optval

        lsf_opt_string += '-%s %s ' % (optname, optval)

    while (1):

        print('-' * 80)
        print(datetime.datetime.now())
        print(len(jobs), 'jobs in queue')

        if len(jobs) == 0:
            break

        status = get_lsf_status()

        njob_to_submit = min(args.max_jobs - status['NJOB'],
                             args.jobs_per_cycle)

        pprint.pprint(status)
        print('njob_to_submit ', njob_to_submit)

        if njob_to_submit > 0:

            print('Submitting ', njob_to_submit, 'jobs')

            for job in jobs[:njob_to_submit]:
                cmd = 'bsub %s -oo %s bash %s' % (lsf_opt_string,
                                                  job['logfile'],
                                                  job['runscript'])
                print(cmd)
                if not args.dry_run:
                    print('submitting')
                    os.system(cmd)

            del jobs[:njob_to_submit]

        print('Sleeping %f seconds' % args.time_per_cycle)
        sys.stdout.flush()
        time.sleep(args.time_per_cycle)
Exemple #8
0
def main():
    usage = "usage: %(prog)s [config file]"
    description = "Dispatch analysis jobs to LSF."
    parser = argparse.ArgumentParser(usage=usage, description=description)

    parser.add_argument('--config', default='sample_config.yaml')
    parser.add_argument('--time',
                        default=1500,
                        type=int,
                        help='Set the wallclock time allocation for the '
                        'job in minutes.')
    parser.add_argument('--max_jobs',
                        default=500,
                        type=int,
                        help='Limit on the number of running or queued jobs.  '
                        'New jobs will only be dispatched if the number of '
                        'existing jobs is smaller than this parameter.')
    parser.add_argument('--jobs_per_cycle',
                        default=20,
                        type=int,
                        help='Maximum number of jobs to submit in each cycle.')
    parser.add_argument('--time_per_cycle',
                        default=15,
                        type=float,
                        help='Time per submission cycle in seconds.')
    parser.add_argument(
        '--max_job_age',
        default=90,
        type=float,
        help='Max job age in minutes.  Incomplete jobs without '
        'a return code and a logfile modification '
        'time older than this parameter will be restarted.')
    parser.add_argument('--dry_run', default=False, action='store_true')
    parser.add_argument('--overwrite',
                        default=False,
                        action='store_true',
                        help='Force all jobs to be re-run even if the job has '
                        'completed successfully.')
    parser.add_argument('--runscript',
                        default=None,
                        required=True,
                        help='Set the name of the job execution script.  A '
                        'script with this name must be located in each '
                        'analysis subdirectory.')

    parser.add_argument('dirs',
                        nargs='+',
                        default=None,
                        help='List of directories in which the analysis will '
                        'be run.')

    args = parser.parse_args()

    dirs = [d for argdir in args.dirs for d in utils.collect_dirs(argdir)]
    jobs = collect_jobs(dirs, args.runscript, args.overwrite, args.max_job_age)

    lsf_opts = {'W': args.time, 'R': 'bullet,hequ,kiso'}

    lsf_opt_string = ''
    for optname, optval in lsf_opts.items():

        if utils.isstr(optval):
            optval = '\"%s\"' % optval

        lsf_opt_string += '-%s %s ' % (optname, optval)

    while (1):

        print('-' * 80)
        print(datetime.datetime.now())
        print(len(jobs), 'jobs in queue')

        if len(jobs) == 0:
            break

        status = get_lsf_status()

        njob_to_submit = min(args.max_jobs - status['NJOB'],
                             args.jobs_per_cycle)

        pprint.pprint(status)
        print('njob_to_submit ', njob_to_submit)

        if njob_to_submit > 0:

            print('Submitting ', njob_to_submit, 'jobs')

            for job in jobs[:njob_to_submit]:
                cmd = 'bsub %s -oo %s bash %s' % (
                    lsf_opt_string, job['logfile'], job['runscript'])
                print(cmd)
                if not args.dry_run:
                    print('submitting')
                    os.system(cmd)

            del jobs[:njob_to_submit]

        print('Sleeping %f seconds' % args.time_per_cycle)
        sys.stdout.flush()
        time.sleep(args.time_per_cycle)