示例#1
0
def load_config_with_defaults(config_path=None):
    """Loads the configuration map to use, merging in the defaults"""
    base_config = __load_base_config()
    base_config = base_config or {}
    base_config = deep_merge(DEFAULT_CONFIG, base_config)
    config_path, config = __load_local_config(config_path)
    config = config or {}
    config = deep_merge(base_config, config)
    logging.debug(f'using configuration: {config}')
    return config_path, config
示例#2
0
def load_config_with_defaults(config_path=None):
    """Loads the configuration map to use, merging in the defaults"""
    _, config = load_config(config_path)
    config = config or {}
    config = deep_merge(DEFAULT_CONFIG, config)
    logging.debug(f'using configuration: {config}')
    return config
示例#3
0
文件: submit.py 项目: yueri/Cook
def parse_raw_job_spec(job, r):
    """
    Parse a JSON string containing raw job data and merge with job template.
    Job data can either be a dict of job attributes (indicating a single job),
    or a list of dicts (indicating multiple jobs). In either case, the job attributes
    are merged with (and override) the `job` template attributes.
    Throws a ValueError if there is a problem parsing the data.
    """
    try:
        content = json.loads(r)

        if type(content) is dict:
            return [deep_merge(job, content)]
        elif type(content) is list:
            return [deep_merge(job, c) for c in content]
        else:
            raise ValueError('invalid format for raw job')
    except Exception:
        raise ValueError('malformed JSON for raw job')
示例#4
0
文件: cli.py 项目: dPeS/Cook
def run(args):
    """
    Main entrypoint to the cook scheduler CLI. Loads configuration files, 
    processes global command line arguments, and calls other command line 
    sub-commands (actions) if necessary.
    """
    args = vars(parser.parse_args(args))

    print_version = args.pop('version')
    if print_version:
        print(f'cs version {version.VERSION}')
        return 0

    util.silent = args.pop('silent')
    verbose = args.pop('verbose') and not util.silent

    log_format = '%(asctime)s [%(levelname)s] [%(name)s] %(message)s'
    if verbose:
        logging.getLogger('').handlers = []
        logging.basicConfig(format=log_format, level=logging.DEBUG)
    else:
        logging.disable(logging.FATAL)

    logging.debug('args: %s' % args)

    action = args.pop('action')
    config_path = args.pop('config')
    cluster = args.pop('cluster')
    url = args.pop('url')

    if action is None:
        parser.print_help()
    else:
        config_map = configuration.load_config_with_defaults(config_path)
        try:
            metrics.initialize(config_map)
            metrics.inc('command.%s.runs' % action)
            clusters = load_target_clusters(config_map, url, cluster)
            http.configure(config_map)
            args = {k: v for k, v in args.items() if v is not None}
            defaults = config_map.get('defaults')
            action_defaults = (defaults.get(action)
                               if defaults else None) or {}
            result = actions[action](clusters,
                                     deep_merge(action_defaults,
                                                args), config_path)
            logging.debug('result: %s' % result)
            return result
        finally:
            metrics.close()

    return None
示例#5
0
def load_config(config_path=None):
    """Loads the configuration map to use"""
    if config_path:
        if os.path.isfile(config_path):
            with open(config_path) as json_file:
                config = json.load(json_file)
        else:
            raise Exception(
                'The configuration path specified (%s) is not valid' %
                config_path)
    else:
        config = load_first_json_file(DEFAULT_CONFIG_PATHS) or {}
    config = deep_merge(DEFAULT_CONFIG, config)
    logging.debug('using configuration: %s' % config)
    return config
示例#6
0
文件: submit.py 项目: m4ce/Cook
def submit(clusters, args):
    """
    Submits a job (or multiple jobs) to cook scheduler. Assembles a list of jobs,
    potentially getting data from configuration, the command line, and stdin.
    """
    logging.debug('submit args: %s' % args)
    job = args
    raw = job.pop('raw', None)
    command_from_command_line = job.pop('command', None)
    application_name = job.pop('application_name', 'cook-scheduler-cli')
    application_version = job.pop(
        'application_version',
        pkg_resources.require('cook_client')[0].version)
    job['application'] = {
        'name': application_name,
        'version': application_version
    }

    if raw:
        if command_from_command_line:
            raise Exception(
                'You cannot specify a command at the command line when using --raw/-r.'
            )

        jobs_json = read_jobs_from_stdin()
        jobs = parse_raw_job_spec(job, jobs_json)
    else:
        commands = acquire_commands(command_from_command_line)

        if job.get('uuid') and len(commands) > 1:
            raise Exception(
                'You cannot specify multiple subcommands with a single UUID.')

        if job.get('env'):
            job['env'] = dict([e.split('=', maxsplit=1) for e in job['env']])

        jobs = [deep_merge(job, {'command': c}) for c in commands]

    for j in jobs:
        if not j.get('uuid'):
            j['uuid'] = str(uuid.uuid4())

        if not j.get('name'):
            j['name'] = '%s_job' % current_user()

    logging.debug('jobs: %s' % jobs)
    return submit_federated(clusters, jobs)
示例#7
0
文件: submit.py 项目: yueri/Cook
def submit(clusters, args, _):
    """
    Submits a job (or multiple jobs) to cook scheduler. Assembles a list of jobs,
    potentially getting data from configuration, the command line, and stdin.
    """
    guard_no_cluster(clusters)
    logging.debug('submit args: %s' % args)
    job_template = args
    raw = job_template.pop('raw', None)
    command_from_command_line = job_template.pop('command', None)
    command_prefix = job_template.pop('command-prefix')
    application_name = job_template.pop('application-name',
                                        'cook-scheduler-cli')
    application_version = job_template.pop('application-version',
                                           version.VERSION)
    job_template['application'] = {
        'name': application_name,
        'version': application_version
    }
    pool = job_template.pop('pool-name', None)

    docker_image = job_template.pop('docker-image', None)
    if docker_image:
        job_template['container'] = {
            'type': 'docker',
            'docker': {
                'image': docker_image,
                'network': 'HOST',
                'force-pull-image': False
            }
        }

    group = None
    if 'group-name' in job_template:
        # If the user did not also specify a group uuid, generate
        # one for them, and place the job(s) into the group
        if 'group' not in job_template:
            job_template['group'] = str(uuid.uuid4())

        # The group name is specified on the group object
        group = {
            'name': job_template.pop('group-name'),
            'uuid': job_template['group']
        }

    if raw:
        if command_from_command_line:
            raise Exception(
                'You cannot specify a command at the command line when using --raw/-r.'
            )

        jobs_json = read_jobs_from_stdin()
        jobs = parse_raw_job_spec(job_template, jobs_json)
    else:
        commands = acquire_commands(command_from_command_line)

        if job_template.get('uuid') and len(commands) > 1:
            raise Exception(
                'You cannot specify multiple subcommands with a single UUID.')

        if job_template.get('env'):
            job_template['env'] = dict(
                [e.split('=', maxsplit=1) for e in job_template['env']])

        if job_template.get('label'):
            labels = dict(
                [l.split('=', maxsplit=1) for l in job_template['label']])
            job_template.pop('label')
            if 'labels' not in job_template:
                job_template['labels'] = {}
            job_template['labels'].update(labels)

        jobs = [deep_merge(job_template, {'command': c}) for c in commands]

    for job in jobs:
        if not job.get('uuid'):
            job['uuid'] = str(uuid.uuid4())

        if not job.get('name'):
            job['name'] = '%s_job' % current_user()

        if command_prefix:
            job['command'] = f'{command_prefix}{job["command"]}'

    logging.debug('jobs: %s' % jobs)
    return submit_federated(clusters, jobs, group, pool)
示例#8
0
文件: submit.py 项目: scrosby/Cook
def submit(clusters, args, _):
    """
    Submits a job (or multiple jobs) to cook scheduler. Assembles a list of jobs,
    potentially getting data from configuration, the command line, and stdin.
    """
    guard_no_cluster(clusters)
    logging.debug('submit args: %s' % args)
    job_template = args
    raw = job_template.pop('raw', None)
    command_from_command_line = job_template.pop('command', None)
    command_prefix = job_template.pop('command-prefix')
    application_name = job_template.pop('application-name',
                                        'cook-scheduler-cli')
    application_version = job_template.pop('application-version',
                                           version.VERSION)
    job_template['application'] = {
        'name': application_name,
        'version': application_version
    }
    pool = job_template.pop('pool-name', None)
    checkpoint = job_template.pop('checkpoint', False)
    checkpoint_mode = job_template.pop('checkpoint-mode', None)
    checkpoint_preserve_paths = job_template.pop('checkpoint-preserve-paths',
                                                 None)
    checkpoint_period_sec = job_template.pop('checkpoint-period-sec', None)
    disk_request = job_template.pop('disk-request', None)
    disk_limit = job_template.pop('disk-limit', None)
    disk_type = job_template.pop('disk-type', None)

    docker_image = job_template.pop('docker-image', None)
    if docker_image:
        job_template['container'] = {
            'type': 'docker',
            'docker': {
                'image': docker_image,
                'network': 'HOST',
                'force-pull-image': False
            }
        }

    group = None
    if 'group-name' in job_template:
        # If the user did not also specify a group uuid, generate
        # one for them, and place the job(s) into the group
        if 'group' not in job_template:
            job_template['group'] = str(make_temporal_uuid())

        # The group name is specified on the group object
        group = {
            'name': job_template.pop('group-name'),
            'uuid': job_template['group']
        }

    if raw:
        if command_from_command_line:
            raise Exception(
                'You cannot specify a command at the command line when using --raw/-r.'
            )

        jobs_json = read_jobs_from_stdin()
        jobs = parse_raw_job_spec(job_template, jobs_json)
    else:
        commands = acquire_commands(command_from_command_line)

        if job_template.get('uuid') and len(commands) > 1:
            raise Exception(
                'You cannot specify multiple subcommands with a single UUID.')

        if job_template.get('env'):
            job_template['env'] = dict(
                [e.split('=', maxsplit=1) for e in job_template['env']])

        if job_template.get('label'):
            labels = dict(
                [l.split('=', maxsplit=1) for l in job_template['label']])
            job_template.pop('label')
            if 'labels' not in job_template:
                job_template['labels'] = {}
            job_template['labels'].update(labels)

        if job_template.get('constraint'):
            constraints = []
            for c in job_template['constraint']:
                parts = c.split('=', maxsplit=1)
                if len(parts) != 2:
                    raise Exception(
                        'Invalid constraint, must be of the form K=V')
                constraints.append([parts[0], 'EQUALS', parts[1]])
            job_template.pop('constraint')
            job_template['constraints'] = constraints

        jobs = [deep_merge(job_template, {'command': c}) for c in commands]

    for job in jobs:
        if not job.get('uuid'):
            job['uuid'] = str(make_temporal_uuid())

        if not job.get('name'):
            job['name'] = '%s_job' % current_user()

        if command_prefix:
            job['command'] = f'{command_prefix}{job["command"]}'

        if checkpoint or checkpoint_mode:
            checkpoint = {
                'mode': checkpoint_mode if checkpoint_mode else 'auto'
            }
            if checkpoint_preserve_paths:
                checkpoint['options'] = {
                    'preserve-paths': checkpoint_preserve_paths
                }
            if checkpoint_period_sec:
                checkpoint['periodic-options'] = {
                    'period-sec': checkpoint_period_sec
                }
            job['checkpoint'] = checkpoint

        if disk_request or disk_limit or disk_type:
            disk = {}
            if disk_request:
                disk['request'] = disk_request
            if disk_limit:
                disk['limit'] = disk_limit
            if disk_type:
                disk['type'] = disk_type
            job['disk'] = disk
    logging.debug('jobs: %s' % jobs)
    return submit_federated(clusters, jobs, group, pool)
示例#9
0
def run(args, plugins):
    """
    Main entrypoint to the cook scheduler CLI. Loads configuration files, 
    processes global command line arguments, and calls other command line 
    sub-commands (actions) if necessary.

    plugins is a map from plugin-name -> function or Class.SubCommandPlugin
    """

    # This has to happen before we parse the args, otherwise we might
    # get subcommand not found.
    for name, instance in plugins.items():
        if isinstance(instance, SubCommandPlugin):
            logging.debug('Adding SubCommandPlugin %s' % name)
            try:
                instance.register(subparsers.add_parser,
                                  configuration.add_defaults)
                logging.debug('Done adding SubCommandPlugin %s' % name)
                name = instance.name()
                if name in actions:
                    raise Exception(
                        'SubCommandPlugin %s clashes with an existing subcommand.'
                        % name)
                actions[name] = instance.run
            except Exception as e:
                print('Failed to load SubCommandPlugin %s: %s' % (name, e),
                      file=sys.stderr)

    args = vars(parser.parse_args(args))

    util.silent = args.pop('silent')
    verbose = args.pop('verbose') and not util.silent

    log_format = '%(asctime)s [%(levelname)s] [%(name)s] %(message)s'
    if verbose:
        logging.getLogger('').handlers = []
        logging.basicConfig(format=log_format, level=logging.DEBUG)
    else:
        logging.disable(logging.FATAL)

    logging.debug('args: %s', args)

    action = args.pop('action')
    config_path = args.pop('config')
    cluster = args.pop('cluster')
    url = args.pop('url')

    if action is None:
        parser.print_help()
    else:
        _, config_map = configuration.load_config_with_defaults(config_path)
        try:
            metrics.initialize(config_map)
            metrics.inc('command.%s.runs' % action)
            clusters = load_target_clusters(config_map, url, cluster)
            http.configure(config_map, plugins)
            cook.plugins.configure(plugins)
            args = {k: v for k, v in args.items() if v is not None}
            defaults = config_map.get('defaults')
            action_defaults = (defaults.get(action)
                               if defaults else None) or {}
            logging.debug('going to execute % action' % action)
            result = actions[action](clusters,
                                     deep_merge(action_defaults,
                                                args), config_path)
            logging.debug('result: %s' % result)
            return result
        finally:
            metrics.close()

    return None