示例#1
0
def get_job(id):
    """Get a job's metadata by API Job ID.
    Args:
        id (str): Job ID to get
    Returns:
        JobMetadataResponse: Response containing relevant metadata
    """
    proj_id, job_id, task_id, attempt = job_ids.api_to_dsub(
        id, _provider_type())
    provider = providers.get_provider(_provider_type(), proj_id, _auth_token())

    jobs = []
    try:
        jobs = execute_redirect_stdout(lambda: dstat.dstat_job_producer(
            provider=provider,
            statuses={'*'},
            job_ids={job_id},
            task_ids={task_id} if task_id else None,
            task_attempts={attempt} if attempt else None,
            full_output=True).next())
    except apiclient.errors.HttpError as error:
        _handle_http_error(error, proj_id)

    # A job_id and task_id define a unique job (should only be one)
    if len(jobs) > 1:
        raise BadRequest('Found more than one job with ID {}'.format(id))
    elif len(jobs) == 0:
        raise NotFound('Could not find any jobs with ID {}'.format(id))
    return _metadata_response(id, jobs[0])
示例#2
0
def abort_job(id):
    """Abort a job by API Job ID.

    Args:
        id (str): Job ID to be aborted

    Returns: None
    """
    # Attempt is unused in aborting because only one attempt can be running at
    # a time.
    proj_id, job_id, task_id, _ = job_ids.api_to_dsub(id, _provider_type())
    provider = providers.get_provider(_provider_type(), proj_id, _auth_token())

    # TODO(bryancrampton): Add flag to ddel to support deleting only
    # 'singleton' tasks.
    status = get_job(id).status

    # TODO(https://github.com/googlegenomics/dsub/issues/81): Remove this
    # provider-specific logic
    if isinstance(provider, stub.StubJobProvider):
        status = status[0]

    if status != job_statuses.ApiStatus.RUNNING:
        raise PreconditionFailed(
            'Job already in terminal status `{}`'.format(status))

    # TODO(https://github.com/googlegenomics/dsub/issues/92): Remove this
    # hacky re-routing of stdout once dsub removes it from the python API
    deleted = execute_redirect_stdout(
        lambda: ddel.ddel_tasks(provider=provider,
                                job_ids={job_id},
                                task_ids={task_id} if task_id else None))
    if len(deleted) != 1:
        raise InternalServerError('Failed to abort dsub job')
示例#3
0
def generate_jobs_by_window(provider, project_id, window_min, window_max=None):
    """Get the generator of jobs for aggregation.
    If the window_max is specified, running jobs will not be returned.

    Args:
        provider (str): type of provider
        project_id (str): the project id
        window_min (datetime): the earliest time of aggregation time window
        window_max (datetime): the latest time of aggregation time window

    Returns:
        generator: Retrieved jobs
    """
    create_time_min = window_min - datetime.timedelta(days=_MAX_RUNTIME_DAYS)

    jobs = execute_redirect_stdout(
        lambda: dstat.lookup_job_tasks(provider=provider,
                                       statuses=None,
                                       user_ids=None,
                                       job_ids=None,
                                       task_ids=None,
                                       create_time_min=create_time_min,
                                       create_time_max=window_max,
                                       job_names=None,
                                       labels=None))

    for j in jobs:
        job = _query_jobs_result(j, project_id)
        # Filter jobs that do no end within the time window
        if job.end and (job.end < window_min
                        or window_max and job.end > window_max):
            continue

        yield job
示例#4
0
def generate_jobs(provider, query, create_time_max=None, offset_id=None):
    """Get the generator of jobs according to the specified filters.

    Args:
        provider (str): type of provider
        query (QueryJobsRequest): query body
        create_time_max (datetime): the latest job create time
        offset_id (str): paginator offset id

    Returns:
        generator: Retrieved jobs
    """
    proj_id = query.extensions.project_id if query.extensions else None
    dstat_params = query_parameters.api_to_dsub(query)

    # If create_time_max is not set, but we have to client-side filter by
    # end-time, set create_time_max = query.end because all the jobs with
    # create_time >= query.end cannot possibly match the query.
    if not create_time_max and query.end:
        create_time_max = query.end

    # If submission time is not specified, set the create_time_min to (start time - 7days)
    # to avoid iterating through the whole job list.
    create_time_min = dstat_params.get('create_time')
    if not create_time_min and query.start:
        create_time_min = query.start - datetime.timedelta(
            days=_MAX_RUNTIME_DAYS)

    jobs = execute_redirect_stdout(
        lambda: dstat.lookup_job_tasks(provider=provider,
                                       statuses=dstat_params['statuses'],
                                       user_ids=dstat_params.get('user_ids'),
                                       job_ids=dstat_params.get('job_ids'),
                                       task_ids=dstat_params.get('task_ids'),
                                       create_time_min=create_time_min,
                                       create_time_max=create_time_max,
                                       job_names=dstat_params.get('job_names'),
                                       labels=dstat_params.get('labels')))

    last_create_time = None
    job_buffer = []
    for j in jobs:
        job = _query_jobs_result(j, proj_id)
        # Filter pending vs. running jobs since dstat does not have
        # a corresponding status (both RUNNING)
        if query.statuses and job.status not in query.statuses:
            continue
        if query.start and (not job.start or job.start < query.start):
            continue
        if query.end and (not job.end or job.end > query.end):
            continue

        # If this job is from the last page, skip it and continue generating
        if create_time_max and job.submission == create_time_max:
            if offset_id and job.id < offset_id:
                continue

        # Build up a buffer of jobs with the same create time. Once we get a
        # job with an older create time we yield all the jobs in the buffer
        # sorted by job-id + task-id
        job_buffer.append(job)
        if job.submission != last_create_time:
            for j in sorted(job_buffer, key=lambda j: j.id):
                yield j
            job_buffer = []
        last_create_time = job.submission

    # If we hit the end of the dstat job generator, ensure to yield the jobs
    # stored in the buffer before returning
    for j in sorted(job_buffer, key=lambda j: j.id):
        yield j
示例#5
0
        def start_job(self,
                      command,
                      name=None,
                      envs={},
                      labels={},
                      inputs={},
                      inputs_recursive={},
                      outputs={},
                      outputs_recursive={},
                      task_count=1,
                      wait=False):
            logging = param_util.build_logging_param(self.log_path)
            resources = job_model.Resources(image=DOCKER_IMAGE,
                                            logging=logging,
                                            zones=['us-central1*'])

            env_data = {param_util.EnvParam(k, v) for (k, v) in envs.items()}
            label_data = {
                job_model.LabelParam(k, v)
                for (k, v) in labels.items()
            }

            # This is mostly an extraction dsubs argument parsing here:
            # https://github.com/googlegenomics/dsub/blob/master/dsub/lib/param_util.py#L720
            # Reworked it to handle dictionaries rather than a list of items
            # of the form 'key=val'
            input_file_param_util = param_util.InputFileParamUtil('input')
            input_data = set()
            for (recursive, items) in ((False, inputs.items()),
                                       (True, inputs_recursive.items())):
                for (name, value) in items:
                    name = input_file_param_util.get_variable_name(name)
                    input_data.add(
                        input_file_param_util.make_param(
                            name, value, recursive))

            output_file_param_util = param_util.OutputFileParamUtil('output')
            output_data = set()
            for (recursive, items) in ((False, outputs.items()),
                                       (True, outputs_recursive.items())):
                for (name, value) in items:
                    name = output_file_param_util.get_variable_name(name)
                    output_data.add(
                        output_file_param_util.make_param(
                            name, value, recursive))

            job_params = {
                'envs': env_data,
                'inputs': input_data,
                'outputs': output_data,
                'labels': label_data,
            }

            if task_count > 1:
                task_descriptors = [
                    job_model.TaskDescriptor({'task-id': i + 1}, {
                        'envs': env_data,
                        'inputs': input_data,
                        'outputs': output_data,
                        'labels': label_data,
                    }, job_model.Resources()) for i in xrange(task_count)
                ]
                all_task_data = [{
                    'task-id': i + 1
                } for i in xrange(task_count)]
            else:
                task_descriptors = [
                    job_model.TaskDescriptor({'task-id': None}, {
                        'labels': set(),
                        'envs': set(),
                        'inputs': set(),
                        'outputs': set()
                    }, job_model.Resources())
                ]

            return execute_redirect_stdout(
                lambda: dsub.run(self.provider,
                                 resources,
                                 job_params,
                                 task_descriptors,
                                 name=name,
                                 command=command,
                                 wait=wait,
                                 disable_warning=True))