Exemplo n.º 1
0
Arquivo: submit.py Projeto: m4ce/Cook
def submit(clusters, args):
    """
    Submits a job (or multiple jobs) to cook scheduler. Assembles a list of jobs,
    potentially getting data from configuration, the command line, and stdin.
    """
    logging.debug('submit args: %s' % args)
    job = args
    raw = job.pop('raw', None)
    command_from_command_line = job.pop('command', None)
    application_name = job.pop('application_name', 'cook-scheduler-cli')
    application_version = job.pop(
        'application_version',
        pkg_resources.require('cook_client')[0].version)
    job['application'] = {
        'name': application_name,
        'version': application_version
    }

    if raw:
        if command_from_command_line:
            raise Exception(
                'You cannot specify a command at the command line when using --raw/-r.'
            )

        jobs_json = read_jobs_from_stdin()
        jobs = parse_raw_job_spec(job, jobs_json)
    else:
        commands = acquire_commands(command_from_command_line)

        if job.get('uuid') and len(commands) > 1:
            raise Exception(
                'You cannot specify multiple subcommands with a single UUID.')

        if job.get('env'):
            job['env'] = dict([e.split('=', maxsplit=1) for e in job['env']])

        jobs = [deep_merge(job, {'command': c}) for c in commands]

    for j in jobs:
        if not j.get('uuid'):
            j['uuid'] = str(uuid.uuid4())

        if not j.get('name'):
            j['name'] = '%s_job' % current_user()

    logging.debug('jobs: %s' % jobs)
    return submit_federated(clusters, jobs)
Exemplo n.º 2
0
def register(add_parser, add_defaults):
    """Adds this sub-command's parser and returns the action function"""
    parser = add_parser(
        'usage', help='show breakdown of usage by application and group')
    parser.add_argument('--user', '-u', help='show usage for a user')
    parser.add_argument('--pool',
                        '-p',
                        action='append',
                        help='filter by pool (can be repeated)')
    parser.add_argument('--json',
                        help='show the data in JSON format',
                        dest='json',
                        action='store_true')

    add_defaults('usage', {'user': current_user()})

    return usage
Exemplo n.º 3
0
def register(add_parser, add_defaults):
    """Adds this sub-command's parser and returns the action function"""
    list_parser = add_parser('list',
                             help='list jobs by state / user / time / name')
    list_parser.add_argument('--state',
                             '-s',
                             help='list jobs by status (can be repeated)',
                             action='append',
                             choices=('waiting', 'running', 'completed',
                                      'failed', 'success', 'all'))
    list_parser.add_argument('--user', '-u', help='list jobs for a user')
    list_parser.add_argument('--lookback',
                             '-t',
                             help='list jobs for the last X hours',
                             type=float)
    list_parser.add_argument(
        '--name',
        '-n',
        help=
        "list jobs with a particular name pattern (name filters can contain "
        "alphanumeric characters, '.', '-', '_', and '*' as a wildcard)")
    list_parser.add_argument('--limit',
                             '-l',
                             help='limit the number of results',
                             type=check_positive)
    list_parser.add_argument('--json',
                             help='show the data in JSON format',
                             dest='json',
                             action='store_true')

    add_defaults('list', {
        'state': ['running'],
        'user': current_user(),
        'lookback': 6,
        'limit': 150
    })

    return list_jobs
Exemplo n.º 4
0
Arquivo: jobs.py Projeto: pschorf/Cook
def register(add_parser, add_defaults):
    """Adds this sub-command's parser and returns the action function"""
    parser = add_parser('jobs', help='list jobs by state / user / time / name')
    parser.add_argument('--waiting',
                        '-w',
                        help='include waiting jobs',
                        dest='states',
                        action='append_const',
                        const='waiting')
    parser.add_argument('--running',
                        '-r',
                        help='include running jobs',
                        dest='states',
                        action='append_const',
                        const='running')
    parser.add_argument('--completed',
                        '-c',
                        help='include completed jobs',
                        dest='states',
                        action='append_const',
                        const='completed')
    parser.add_argument('--failed',
                        '-f',
                        help='include failed jobs',
                        dest='states',
                        action='append_const',
                        const='failed')
    parser.add_argument('--success',
                        '-s',
                        help='include successful jobs',
                        dest='states',
                        action='append_const',
                        const='success')
    parser.add_argument('--all',
                        '-a',
                        help='include all jobs, regardless of status',
                        dest='states',
                        action='append_const',
                        const='all')
    parser.add_argument('--user', '-u', help='list jobs for a user')
    parser.add_argument('--pool', '-P', help='list jobs for a pool')
    parser.add_argument(
        '--lookback',
        '-t',
        help=
        f'list jobs submitted in the last HOURS hours (default = {DEFAULT_LOOKBACK_HOURS})',
        type=float,
        metavar='HOURS')
    parser.add_argument('--submitted-after',
                        '-A',
                        help=f'list jobs submitted after the given time')
    parser.add_argument('--submitted-before',
                        '-B',
                        help=f'list jobs submitted before the given time')
    parser.add_argument(
        '--name',
        '-n',
        help=
        "list jobs with a particular name pattern (name filters can contain "
        "alphanumeric characters, '.', '-', '_', and '*' as a wildcard)")
    parser.add_argument(
        '--limit',
        '-l',
        help=f'limit the number of results (default = {DEFAULT_LIMIT})',
        type=check_positive)
    parser.add_argument('--exclude-custom-executor',
                        help=f'exclude jobs with a custom executor',
                        action='store_true')
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--json',
                       help='show the data in JSON format',
                       dest='json',
                       action='store_true')
    group.add_argument(
        '--urls',
        '-1',
        help='list one job URL per line, without table formatting',
        dest='one-per-line',
        action='store_true')

    add_defaults('jobs', {
        'states': ['running'],
        'user': current_user(),
        'limit': DEFAULT_LIMIT
    })

    return jobs
Exemplo n.º 5
0
Arquivo: submit.py Projeto: yueri/Cook
def submit(clusters, args, _):
    """
    Submits a job (or multiple jobs) to cook scheduler. Assembles a list of jobs,
    potentially getting data from configuration, the command line, and stdin.
    """
    guard_no_cluster(clusters)
    logging.debug('submit args: %s' % args)
    job_template = args
    raw = job_template.pop('raw', None)
    command_from_command_line = job_template.pop('command', None)
    command_prefix = job_template.pop('command-prefix')
    application_name = job_template.pop('application-name',
                                        'cook-scheduler-cli')
    application_version = job_template.pop('application-version',
                                           version.VERSION)
    job_template['application'] = {
        'name': application_name,
        'version': application_version
    }
    pool = job_template.pop('pool-name', None)

    docker_image = job_template.pop('docker-image', None)
    if docker_image:
        job_template['container'] = {
            'type': 'docker',
            'docker': {
                'image': docker_image,
                'network': 'HOST',
                'force-pull-image': False
            }
        }

    group = None
    if 'group-name' in job_template:
        # If the user did not also specify a group uuid, generate
        # one for them, and place the job(s) into the group
        if 'group' not in job_template:
            job_template['group'] = str(uuid.uuid4())

        # The group name is specified on the group object
        group = {
            'name': job_template.pop('group-name'),
            'uuid': job_template['group']
        }

    if raw:
        if command_from_command_line:
            raise Exception(
                'You cannot specify a command at the command line when using --raw/-r.'
            )

        jobs_json = read_jobs_from_stdin()
        jobs = parse_raw_job_spec(job_template, jobs_json)
    else:
        commands = acquire_commands(command_from_command_line)

        if job_template.get('uuid') and len(commands) > 1:
            raise Exception(
                'You cannot specify multiple subcommands with a single UUID.')

        if job_template.get('env'):
            job_template['env'] = dict(
                [e.split('=', maxsplit=1) for e in job_template['env']])

        if job_template.get('label'):
            labels = dict(
                [l.split('=', maxsplit=1) for l in job_template['label']])
            job_template.pop('label')
            if 'labels' not in job_template:
                job_template['labels'] = {}
            job_template['labels'].update(labels)

        jobs = [deep_merge(job_template, {'command': c}) for c in commands]

    for job in jobs:
        if not job.get('uuid'):
            job['uuid'] = str(uuid.uuid4())

        if not job.get('name'):
            job['name'] = '%s_job' % current_user()

        if command_prefix:
            job['command'] = f'{command_prefix}{job["command"]}'

    logging.debug('jobs: %s' % jobs)
    return submit_federated(clusters, jobs, group, pool)
Exemplo n.º 6
0
def submit(clusters, args, _):
    """
    Submits a job (or multiple jobs) to cook scheduler. Assembles a list of jobs,
    potentially getting data from configuration, the command line, and stdin.
    """
    guard_no_cluster(clusters)
    logging.debug('submit args: %s' % args)
    job_template = args
    raw = job_template.pop('raw', None)
    command_from_command_line = job_template.pop('command', None)
    command_prefix = job_template.pop('command-prefix')
    application_name = job_template.pop('application-name',
                                        'cook-scheduler-cli')
    application_version = job_template.pop('application-version',
                                           version.VERSION)
    job_template['application'] = {
        'name': application_name,
        'version': application_version
    }
    pool = job_template.pop('pool-name', None)
    checkpoint = job_template.pop('checkpoint', False)
    checkpoint_mode = job_template.pop('checkpoint-mode', None)
    checkpoint_preserve_paths = job_template.pop('checkpoint-preserve-paths',
                                                 None)
    checkpoint_period_sec = job_template.pop('checkpoint-period-sec', None)
    disk_request = job_template.pop('disk-request', None)
    disk_limit = job_template.pop('disk-limit', None)
    disk_type = job_template.pop('disk-type', None)

    docker_image = job_template.pop('docker-image', None)
    if docker_image:
        job_template['container'] = {
            'type': 'docker',
            'docker': {
                'image': docker_image,
                'network': 'HOST',
                'force-pull-image': False
            }
        }

    group = None
    if 'group-name' in job_template:
        # If the user did not also specify a group uuid, generate
        # one for them, and place the job(s) into the group
        if 'group' not in job_template:
            job_template['group'] = str(make_temporal_uuid())

        # The group name is specified on the group object
        group = {
            'name': job_template.pop('group-name'),
            'uuid': job_template['group']
        }

    if raw:
        if command_from_command_line:
            raise Exception(
                'You cannot specify a command at the command line when using --raw/-r.'
            )

        jobs_json = read_jobs_from_stdin()
        jobs = parse_raw_job_spec(job_template, jobs_json)
    else:
        commands = acquire_commands(command_from_command_line)

        if job_template.get('uuid') and len(commands) > 1:
            raise Exception(
                'You cannot specify multiple subcommands with a single UUID.')

        if job_template.get('env'):
            job_template['env'] = dict(
                [e.split('=', maxsplit=1) for e in job_template['env']])

        if job_template.get('label'):
            labels = dict(
                [l.split('=', maxsplit=1) for l in job_template['label']])
            job_template.pop('label')
            if 'labels' not in job_template:
                job_template['labels'] = {}
            job_template['labels'].update(labels)

        if job_template.get('constraint'):
            constraints = []
            for c in job_template['constraint']:
                parts = c.split('=', maxsplit=1)
                if len(parts) != 2:
                    raise Exception(
                        'Invalid constraint, must be of the form K=V')
                constraints.append([parts[0], 'EQUALS', parts[1]])
            job_template.pop('constraint')
            job_template['constraints'] = constraints

        jobs = [deep_merge(job_template, {'command': c}) for c in commands]

    for job in jobs:
        if not job.get('uuid'):
            job['uuid'] = str(make_temporal_uuid())

        if not job.get('name'):
            job['name'] = '%s_job' % current_user()

        if command_prefix:
            job['command'] = f'{command_prefix}{job["command"]}'

        if checkpoint or checkpoint_mode:
            checkpoint = {
                'mode': checkpoint_mode if checkpoint_mode else 'auto'
            }
            if checkpoint_preserve_paths:
                checkpoint['options'] = {
                    'preserve-paths': checkpoint_preserve_paths
                }
            if checkpoint_period_sec:
                checkpoint['periodic-options'] = {
                    'period-sec': checkpoint_period_sec
                }
            job['checkpoint'] = checkpoint

        if disk_request or disk_limit or disk_type:
            disk = {}
            if disk_request:
                disk['request'] = disk_request
            if disk_limit:
                disk['limit'] = disk_limit
            if disk_type:
                disk['type'] = disk_type
            job['disk'] = disk
    logging.debug('jobs: %s' % jobs)
    return submit_federated(clusters, jobs, group, pool)
Exemplo n.º 7
0
import logging
import socket

from cook.util import current_user

__line_formats = None
__conn = None
__host = socket.gethostname()
__user = current_user()
__disabled = True


def initialize(config):
    """
    Initializes the metrics module using the given
    config; note that metrics can be completely
    disabled in which case this is essentially a no-op
    """
    global __disabled
    try:
        metrics_config = config.get('metrics')
        __disabled = metrics_config.get('disabled')
        if __disabled:
            return

        global __conn
        global __line_formats
        __line_formats = metrics_config.get('line-formats')
        __conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        __conn.settimeout(metrics_config.get('timeout'))
        address = (metrics_config.get('host'), metrics_config.get('port'))