コード例 #1
0
def format_scattered_task(task_name, task_metadata):
    filtered_shards = []
    current_shard = ''
    min_start = _parse_datetime(
        task_metadata[0].get('start')) or _parse_datetime(
            task_metadata[0].get('end')) or offset_aware_now
    max_end = _parse_datetime(task_metadata[-1].get('end'))
    execution_events = _get_execution_events(task_metadata)

    # go through calls in reverse to grab the latest attempt if there are multiple
    for shard in task_metadata[::-1]:
        if current_shard != shard.get('shardIndex'):
            failure_messages = None
            if shard.get('failures'):
                failure_messages = [
                    f.get('message') for f in shard.get('failures')
                ]
            filtered_shards.append(
                Shard(execution_status=task_statuses.cromwell_execution_to_api(
                    shard.get('executionStatus')),
                      start=_parse_datetime(shard.get('start'))
                      or _parse_datetime(shard.get('end')) or offset_aware_now,
                      end=_parse_datetime(shard.get('end')),
                      shard_index=shard.get('shardIndex'),
                      backend_log=shard.get('backendLogs').get('log')
                      if shard.get('backendLogs') else None,
                      call_root=shard.get('callRoot'),
                      operation_id=shard.get('jobId'),
                      attempts=shard.get('attempt'),
                      failure_messages=failure_messages,
                      job_id=shard.get('subWorkflowId')))
            if shard.get('start') and min_start > _parse_datetime(
                    shard.get('start')):
                min_start = _parse_datetime(shard.get('start'))
            if shard.get('executionStatus') not in [
                    'Failed', 'Done', 'Aborted'
            ]:
                max_end = None
            if max_end is not None and max_end < _parse_datetime(
                    shard.get('end')):
                max_end = _parse_datetime(shard.get('end'))
        current_shard = shard.get('shardIndex')

    sorted_shards = sorted(filtered_shards, key=lambda t: t.shard_index)

    return TaskMetadata(
        name=remove_workflow_name(task_name),
        execution_status=_get_scattered_task_status(sorted_shards),
        attempts=len(sorted_shards),
        start=min_start,
        end=max_end,
        call_root=remove_shard_path(task_metadata[-1].get('callRoot')),
        shards=sorted_shards,
        execution_events=execution_events,
        call_cached=False)
コード例 #2
0
def format_task(task_name, task_metadata):
    return TaskMetadata(
        name=remove_workflow_name(task_name),
        execution_id=task_metadata.get('jobId'),
        execution_status=task_statuses.cromwell_execution_to_api(
            task_metadata.get('executionStatus')),
        start=_parse_datetime(task_metadata.get('start')),
        end=_parse_datetime(task_metadata.get('end')),
        stderr=task_metadata.get('stderr'),
        stdout=task_metadata.get('stdout'),
        inputs=update_key_names(task_metadata.get('inputs', {})),
        return_code=task_metadata.get('returnCode'),
        attempts=task_metadata.get('attempt'),
        call_root=task_metadata.get('callRoot'),
        job_id=task_metadata.get('subWorkflowId'))
コード例 #3
0
def _convert_to_attempt(item):
    attempt = IndividualAttempt(
        execution_status=task_statuses.cromwell_execution_to_api(
            item.get('executionStatus')),
        attempt_number=item.get('attempt'),
        call_cached=_is_call_cached(item.get('callCaching')),
        stdout=item.get('stdout'),
        stderr=item.get('stderr'),
        call_root=item.get('callRoot'),
        inputs=item.get('inputs'),
        outputs=item.get('outputs'),
        start=_parse_datetime(item.get('start')),
        end=_parse_datetime(item.get('end')))

    if item.get('failures'):
        attempt.failure_messages = [
            f.get('message') for f in item.get('failures')
        ]

    return attempt
コード例 #4
0
def _convert_to_attempt(item):
    attempt = IndividualAttempt(
        execution_status=task_statuses.cromwell_execution_to_api(
            item.get('executionStatus')),
        attempt_number=item.get('attempt'),
        call_cached=_is_call_cached(item.get('callCaching')),
        backend_log=item.get('backendLogs').get('log')
        if item.get('backendLogs') else None,
        call_root=item.get('callRoot'),
        operation_id=item.get('jobId'),
        inputs=item.get('inputs'),
        outputs=item.get('outputs'),
        start=_parse_datetime(item.get('start'))
        or _parse_datetime(item.get('end')) or offset_aware_now,
        end=_parse_datetime(item.get('end')) or offset_aware_now)

    if item.get('failures'):
        attempt.failure_messages = [
            f.get('message') for f in item.get('failures')
        ]

    return attempt
コード例 #5
0
def format_task(task_name, task_metadata):
    # check to see if task is scattered
    if task_metadata[0].get('shardIndex') != -1:
        return format_scattered_task(task_name, task_metadata)
    latest_attempt = task_metadata[-1]

    call_cached = False
    if latest_attempt.get('callCaching'):
        call_cached = latest_attempt.get('callCaching') and (_is_call_cached(
            latest_attempt.get('callCaching')))

    execution_events = _get_execution_events(task_metadata)

    failure_messages = None
    if latest_attempt.get('failures'):
        failure_messages = [
            f.get('message') for f in latest_attempt.get('failures')
        ]

    return TaskMetadata(
        name=remove_workflow_name(task_name),
        execution_status=task_statuses.cromwell_execution_to_api(
            latest_attempt.get('executionStatus')),
        start=_parse_datetime(latest_attempt.get('start'))
        or _parse_datetime(latest_attempt.get('end')) or offset_aware_now,
        end=_parse_datetime(latest_attempt.get('end')),
        backend_log=latest_attempt.get('backendLogs').get('log')
        if latest_attempt.get('backendLogs') else None,
        inputs=update_key_names(latest_attempt.get('inputs', {})),
        outputs=update_key_names(latest_attempt.get('outputs', {})),
        return_code=latest_attempt.get('returnCode'),
        attempts=latest_attempt.get('attempt'),
        call_root=latest_attempt.get('callRoot'),
        operation_id=latest_attempt.get('jobId'),
        job_id=latest_attempt.get('subWorkflowId'),
        shards=None,
        call_cached=call_cached,
        execution_events=execution_events,
        failure_messages=failure_messages)
コード例 #6
0
 def test_unrecognized_task_status_causes_exception(self):
     with self.assertRaises(ValueError):
         task_statuses.cromwell_execution_to_api('Not a valid task status')
コード例 #7
0
 def test_cromwell_execution_to_api_maps_all_task_execution_statuses_correctly(self):
     self.assertEqual(task_statuses.cromwell_execution_to_api('NotStarted'), 'Submitted')
     self.assertEqual(task_statuses.cromwell_execution_to_api('WaitingForQueueSpace'), 'Submitted')
     self.assertEqual(task_statuses.cromwell_execution_to_api('QueuedInCromwell'), 'Submitted')
     self.assertEqual(task_statuses.cromwell_execution_to_api('Starting'), 'Submitted')
     self.assertEqual(task_statuses.cromwell_execution_to_api('Running'), 'Running')
     self.assertEqual(task_statuses.cromwell_execution_to_api('Aborting'), 'Aborting')
     self.assertEqual(task_statuses.cromwell_execution_to_api('Unstartable'), 'Failed')
     self.assertEqual(task_statuses.cromwell_execution_to_api('Aborted'), 'Aborted')
     self.assertEqual(task_statuses.cromwell_execution_to_api('Bypassed'), 'Submitted')
     self.assertEqual(task_statuses.cromwell_execution_to_api('RetryableFailure'), 'Failed')
     self.assertEqual(task_statuses.cromwell_execution_to_api('Failed'), 'Failed')
     self.assertEqual(task_statuses.cromwell_execution_to_api('Done'), 'Succeeded')