コード例 #1
0
def ls(clusters, args, _):
    """Lists contents of the corresponding Mesos sandbox path by job or instance uuid."""
    guard_no_cluster(clusters)
    entity_refs, clusters_of_interest = parse_entity_refs(clusters, args.get('uuid'))
    path = args.get('path')
    long_format = args.get('long_format')
    as_json = args.get('json')
    literal = args.get('literal')

    if len(entity_refs) > 1:
        # argparse should prevent this, but we'll be defensive anyway
        raise Exception(f'You can only provide a single uuid.')

    if path and not literal and any(c in path for c in '*?[]{}'):
        message = 'It looks like you are trying to glob, but ls does not support globbing. ' \
                  f'You can use the {terminal.bold("ssh")} command instead:\n' \
                  '\n' \
                  f'  cs ssh {entity_refs[0]}\n' \
                  '\n' \
                  f'Or, if you want the literal path {terminal.bold(path)}, add {terminal.bold("--literal")}:\n' \
                  '\n' \
                  f'  cs ls {terminal.bold("--literal")} {entity_refs[0]} {path}'
        print(message)
        return 1

    command_fn = partial(ls_for_instance, path=path, long_format=long_format, as_json=as_json)
    query_unique_and_run(clusters_of_interest, entity_refs[0], command_fn)
コード例 #2
0
ファイル: show.py プロジェクト: hdrodz97/Cook
def show(clusters, args, _):
    """Prints info for the jobs / instances / groups with the given UUIDs."""
    guard_no_cluster(clusters)
    as_json = args.get('json')
    entity_refs, _ = parse_entity_refs(clusters, args.get('uuid'))
    query_result, clusters_of_interest = query_with_stdin_support(
        clusters, entity_refs)
    if as_json:
        print(json.dumps(query_result))
    else:
        for cluster_name, entities in query_result['clusters'].items():
            if 'jobs' in entities:
                show_data(cluster_name, entities['jobs'], tabulate_job)

            if 'instances' in entities:
                show_data(cluster_name, entities['instances'],
                          tabulate_instance)

            if 'groups' in entities:
                show_data(cluster_name, entities['groups'], tabulate_group)

    if query_result['count'] > 0:
        return 0
    else:
        if not as_json:
            print_no_data(clusters_of_interest)
        return 1
コード例 #3
0
ファイル: ssh.py プロジェクト: yueri/Cook
def ssh(clusters, args, _):
    """Attempts to ssh (using os.execlp) to the Mesos agent corresponding to the given job or instance uuid."""
    guard_no_cluster(clusters)
    entity_refs, clusters_of_interest = parse_entity_refs(clusters, args.get('uuid'))
    if len(entity_refs) > 1:
        # argparse should prevent this, but we'll be defensive anyway
        raise Exception(f'You can only provide a single uuid.')

    query_unique_and_run(clusters_of_interest, entity_refs[0], ssh_to_instance)
コード例 #4
0
def cat(clusters, args, _):
    """Outputs the contents of the corresponding Mesos sandbox path by job or instance uuid."""
    guard_no_cluster(clusters)
    entity_refs, clusters_of_interest = parse_entity_refs(clusters, args.get('target-entity'))
    paths = args.get('path')

    # argparse should prevent these, but we'll be defensive anyway
    assert len(entity_refs) == 1, 'Only a single UUID or URL is supported.'
    assert len(paths) == 1, 'Only a single path is supported.'

    command_fn = partial(cat_for_instance, path=paths[0])
    query_unique_and_run(clusters_of_interest, entity_refs[0], command_fn)
コード例 #5
0
ファイル: kill.py プロジェクト: pschorf/Cook
def kill(clusters, args, _):
    """Attempts to kill the jobs / instances / groups with the given UUIDs."""
    guard_no_cluster(clusters)
    entity_refs, _ = parse_entity_refs(clusters, args.get('uuid'))
    query_result, clusters_of_interest = query_with_stdin_support(
        clusters, entity_refs)
    if query_result['count'] == 0:
        print_no_data(clusters_of_interest)
        return 1

    # If the user provides UUIDs that map to more than one entity,
    # we will raise an Exception that contains the details
    guard_against_duplicates(query_result)

    return kill_entities(query_result, clusters_of_interest)
コード例 #6
0
def jobs(clusters, args, _):
    """Prints info for the jobs with the given list criteria"""
    guard_no_cluster(clusters)
    as_json = args.get('json')
    one_per_line = args.get('one-per-line')
    states = args.get('states')
    user = args.get('user')
    lookback_hours = args.get('lookback')
    submitted_after = args.get('submitted_after')
    submitted_before = args.get('submitted_before')
    name = args.get('name')
    limit = args.get('limit')
    include_custom_executor = not args.get('exclude_custom_executor')
    pool = args.get('pool')

    if lookback_hours and (submitted_after or submitted_before):
        raise Exception(
            'You cannot specify both lookback hours and submitted after / before times.'
        )

    if submitted_after is not None or submitted_before is not None:
        start_ms = date_time_string_to_ms_since_epoch(
            submitted_after if submitted_after is not None else
            f'{DEFAULT_LOOKBACK_HOURS} hours ago')
        end_ms = date_time_string_to_ms_since_epoch(
            submitted_before if submitted_before is not None else 'now')
    else:
        if states == ['running']:
            default_lookback_hours = 24 * 7
        else:
            default_lookback_hours = DEFAULT_LOOKBACK_HOURS
        start_ms, end_ms = lookback_hours_to_range(lookback_hours
                                                   or default_lookback_hours)

    query_result = query(clusters, states, user, start_ms, end_ms, name, limit,
                         include_custom_executor, pool)
    found_jobs = query_result['count'] > 0
    if as_json:
        print_as_json(query_result)
    elif one_per_line:
        print_as_one_per_line(query_result, clusters)
    elif found_jobs:
        print_as_table(query_result)
    else:
        print_no_data(clusters, states, user)
    return 0
コード例 #7
0
def wait(clusters, args, _):
    """Waits for jobs / instances / groups with the given UUIDs to complete."""
    guard_no_cluster(clusters)
    timeout = args.get('timeout')
    interval = args.get('interval')
    entity_refs, _ = parse_entity_refs(clusters, args.get('uuid'))
    timeout_text = (
        'up to %s' %
        seconds_to_timedelta(timeout)) if timeout else 'indefinitely'
    print_info('Will wait %s.' % timeout_text)
    query_result, clusters_of_interest = query_with_stdin_support(
        clusters, entity_refs, all_jobs_completed, all_instances_completed,
        all_groups_completed, timeout, interval)
    if query_result['count'] > 0:
        return 0
    else:
        print_no_data(clusters_of_interest)
        return 1
コード例 #8
0
ファイル: usage.py プロジェクト: pschorf/Cook
def usage(clusters, args, _):
    """Prints cluster usage info for the given user"""
    guard_no_cluster(clusters)
    as_json = args.get('json')
    user = args.get('user')
    pools = args.get('pool')

    query_result = query(clusters, user)

    if pools:
        query_result = filter_query_result_by_pools(query_result, pools)

    if as_json:
        print_as_json(query_result)
    else:
        print_formatted(query_result)

    return 0
コード例 #9
0
ファイル: admin.py プロジェクト: scrosby/Cook
def instances(clusters, args):
    """Prints the count of instances with the given criteria"""
    guard_no_cluster(clusters)

    if len(clusters) != 1:
        raise Exception(f'You must specify a single cluster to query.')

    status = args.get('status')
    started_after = args.get('started_after')
    started_before = args.get('started_before')

    status = status or 'success'
    start_ms = date_time_string_to_ms_since_epoch(started_after or '10 minutes ago')
    end_ms = date_time_string_to_ms_since_epoch(started_before or 'now')

    cluster = clusters[0]
    data = query_instances_on_cluster(cluster, status, start_ms, end_ms)
    print(data['count'])
    return 0
コード例 #10
0
ファイル: tail.py プロジェクト: dPeS/Cook
def tail(clusters, args, _):
    """Tails the contents of the corresponding Mesos sandbox path by job or instance uuid."""
    guard_no_cluster(clusters)
    entity_refs, clusters_of_interest = parse_entity_refs(clusters, args.get('uuid'))
    paths = args.get('path')
    lines = args.get('lines')
    follow = args.get('follow')
    sleep_interval = args.get('sleep-interval')

    if len(entity_refs) > 1:
        # argparse should prevent this, but we'll be defensive anyway
        raise Exception(f'You can only provide a single uuid.')

    if len(paths) > 1:
        # argparse should prevent this, but we'll be defensive anyway
        raise Exception(f'You can only provide a single path.')

    command_fn = partial(tail_for_instance, path=paths[0], num_lines_to_print=lines,
                         follow=follow, follow_sleep_seconds=sleep_interval)
    query_unique_and_run(clusters_of_interest, entity_refs[0], command_fn)
コード例 #11
0
ファイル: submit.py プロジェクト: yueri/Cook
def submit(clusters, args, _):
    """
    Submits a job (or multiple jobs) to cook scheduler. Assembles a list of jobs,
    potentially getting data from configuration, the command line, and stdin.
    """
    guard_no_cluster(clusters)
    logging.debug('submit args: %s' % args)
    job_template = args
    raw = job_template.pop('raw', None)
    command_from_command_line = job_template.pop('command', None)
    command_prefix = job_template.pop('command-prefix')
    application_name = job_template.pop('application-name',
                                        'cook-scheduler-cli')
    application_version = job_template.pop('application-version',
                                           version.VERSION)
    job_template['application'] = {
        'name': application_name,
        'version': application_version
    }
    pool = job_template.pop('pool-name', None)

    docker_image = job_template.pop('docker-image', None)
    if docker_image:
        job_template['container'] = {
            'type': 'docker',
            'docker': {
                'image': docker_image,
                'network': 'HOST',
                'force-pull-image': False
            }
        }

    group = None
    if 'group-name' in job_template:
        # If the user did not also specify a group uuid, generate
        # one for them, and place the job(s) into the group
        if 'group' not in job_template:
            job_template['group'] = str(uuid.uuid4())

        # The group name is specified on the group object
        group = {
            'name': job_template.pop('group-name'),
            'uuid': job_template['group']
        }

    if raw:
        if command_from_command_line:
            raise Exception(
                'You cannot specify a command at the command line when using --raw/-r.'
            )

        jobs_json = read_jobs_from_stdin()
        jobs = parse_raw_job_spec(job_template, jobs_json)
    else:
        commands = acquire_commands(command_from_command_line)

        if job_template.get('uuid') and len(commands) > 1:
            raise Exception(
                'You cannot specify multiple subcommands with a single UUID.')

        if job_template.get('env'):
            job_template['env'] = dict(
                [e.split('=', maxsplit=1) for e in job_template['env']])

        if job_template.get('label'):
            labels = dict(
                [l.split('=', maxsplit=1) for l in job_template['label']])
            job_template.pop('label')
            if 'labels' not in job_template:
                job_template['labels'] = {}
            job_template['labels'].update(labels)

        jobs = [deep_merge(job_template, {'command': c}) for c in commands]

    for job in jobs:
        if not job.get('uuid'):
            job['uuid'] = str(uuid.uuid4())

        if not job.get('name'):
            job['name'] = '%s_job' % current_user()

        if command_prefix:
            job['command'] = f'{command_prefix}{job["command"]}'

    logging.debug('jobs: %s' % jobs)
    return submit_federated(clusters, jobs, group, pool)
コード例 #12
0
ファイル: submit.py プロジェクト: scrosby/Cook
def submit(clusters, args, _):
    """
    Submits a job (or multiple jobs) to cook scheduler. Assembles a list of jobs,
    potentially getting data from configuration, the command line, and stdin.
    """
    guard_no_cluster(clusters)
    logging.debug('submit args: %s' % args)
    job_template = args
    raw = job_template.pop('raw', None)
    command_from_command_line = job_template.pop('command', None)
    command_prefix = job_template.pop('command-prefix')
    application_name = job_template.pop('application-name',
                                        'cook-scheduler-cli')
    application_version = job_template.pop('application-version',
                                           version.VERSION)
    job_template['application'] = {
        'name': application_name,
        'version': application_version
    }
    pool = job_template.pop('pool-name', None)
    checkpoint = job_template.pop('checkpoint', False)
    checkpoint_mode = job_template.pop('checkpoint-mode', None)
    checkpoint_preserve_paths = job_template.pop('checkpoint-preserve-paths',
                                                 None)
    checkpoint_period_sec = job_template.pop('checkpoint-period-sec', None)
    disk_request = job_template.pop('disk-request', None)
    disk_limit = job_template.pop('disk-limit', None)
    disk_type = job_template.pop('disk-type', None)

    docker_image = job_template.pop('docker-image', None)
    if docker_image:
        job_template['container'] = {
            'type': 'docker',
            'docker': {
                'image': docker_image,
                'network': 'HOST',
                'force-pull-image': False
            }
        }

    group = None
    if 'group-name' in job_template:
        # If the user did not also specify a group uuid, generate
        # one for them, and place the job(s) into the group
        if 'group' not in job_template:
            job_template['group'] = str(make_temporal_uuid())

        # The group name is specified on the group object
        group = {
            'name': job_template.pop('group-name'),
            'uuid': job_template['group']
        }

    if raw:
        if command_from_command_line:
            raise Exception(
                'You cannot specify a command at the command line when using --raw/-r.'
            )

        jobs_json = read_jobs_from_stdin()
        jobs = parse_raw_job_spec(job_template, jobs_json)
    else:
        commands = acquire_commands(command_from_command_line)

        if job_template.get('uuid') and len(commands) > 1:
            raise Exception(
                'You cannot specify multiple subcommands with a single UUID.')

        if job_template.get('env'):
            job_template['env'] = dict(
                [e.split('=', maxsplit=1) for e in job_template['env']])

        if job_template.get('label'):
            labels = dict(
                [l.split('=', maxsplit=1) for l in job_template['label']])
            job_template.pop('label')
            if 'labels' not in job_template:
                job_template['labels'] = {}
            job_template['labels'].update(labels)

        if job_template.get('constraint'):
            constraints = []
            for c in job_template['constraint']:
                parts = c.split('=', maxsplit=1)
                if len(parts) != 2:
                    raise Exception(
                        'Invalid constraint, must be of the form K=V')
                constraints.append([parts[0], 'EQUALS', parts[1]])
            job_template.pop('constraint')
            job_template['constraints'] = constraints

        jobs = [deep_merge(job_template, {'command': c}) for c in commands]

    for job in jobs:
        if not job.get('uuid'):
            job['uuid'] = str(make_temporal_uuid())

        if not job.get('name'):
            job['name'] = '%s_job' % current_user()

        if command_prefix:
            job['command'] = f'{command_prefix}{job["command"]}'

        if checkpoint or checkpoint_mode:
            checkpoint = {
                'mode': checkpoint_mode if checkpoint_mode else 'auto'
            }
            if checkpoint_preserve_paths:
                checkpoint['options'] = {
                    'preserve-paths': checkpoint_preserve_paths
                }
            if checkpoint_period_sec:
                checkpoint['periodic-options'] = {
                    'period-sec': checkpoint_period_sec
                }
            job['checkpoint'] = checkpoint

        if disk_request or disk_limit or disk_type:
            disk = {}
            if disk_request:
                disk['request'] = disk_request
            if disk_limit:
                disk['limit'] = disk_limit
            if disk_type:
                disk['type'] = disk_type
            job['disk'] = disk
    logging.debug('jobs: %s' % jobs)
    return submit_federated(clusters, jobs, group, pool)