Beispiel #1
0
def test_unified_job_workflow_attributes():
    with mock.patch('django.db.ConnectionRouter.db_for_write'):
        job = UnifiedJob(id=1, name="job-1", launch_type="workflow")
        job.unified_job_node = WorkflowJobNode(workflow_job=WorkflowJob(pk=1))

        assert job.spawned_by_workflow is True
        assert job.workflow_job_id == 1
Beispiel #2
0
def test_result_stdout_raw_handle_file__found(exists, open):
    unified_job = UnifiedJob()
    unified_job.result_stdout_file = 'dummy'

    with mock.patch('os.stat', return_value=Mock(st_size=1)):
        result = unified_job.result_stdout_raw_handle()

    assert result == 'my_file_handler'
Beispiel #3
0
def test_result_stdout_raw_handle__pending(exists):
    unified_job = UnifiedJob()
    unified_job.result_stdout_file = 'dummy'
    unified_job.finished = None

    result = unified_job.result_stdout_raw_handle()

    assert isinstance(result, StringIO)
    assert result.read() == 'Waiting for results...'
Beispiel #4
0
def test_result_stdout_raw_handle__missing(exists):
    unified_job = UnifiedJob()
    unified_job.result_stdout_file = 'dummy'
    unified_job.finished = now()

    result = unified_job.result_stdout_raw_handle()

    assert isinstance(result, StringIO)
    assert result.read() == 'stdout capture is missing'
Beispiel #5
0
def test_log_representation():
    '''
    Common representation used inside of log messages
    '''
    uj = UnifiedJob(status='running', id=4)
    job = Job(status='running', id=4)
    assert job.log_format == 'job 4 (running)'
    assert uj.log_format == 'unified_job 4 (running)'
Beispiel #6
0
def handle_work_success(task_actual):
    try:
        instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
    except ObjectDoesNotExist:
        logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
        return
    if not instance:
        return

    schedule_task_manager()
Beispiel #7
0
def unified_job(mocker):
    mocker.patch.object(UnifiedJob, 'can_cancel', return_value=True)
    j = UnifiedJob()
    j.status = 'pending'
    j.cancel_flag = None
    j.save = mocker.MagicMock()
    j.websocket_emit_status = mocker.MagicMock()
    return j
Beispiel #8
0
def test_unified_job_list_field_consistency():
    """
    Example of what is being tested:
    The endpoint /project_updates/ should have the same fields as that
    project update when it is serialized by the unified job template serializer
    in /unified_jobs/
    """
    for cls in UnifiedJob.__subclasses__():
        list_serializer = getattr(serializers,
                                  '{}ListSerializer'.format(cls.__name__))
        unified_serializer = serializers.UnifiedJobListSerializer(
        ).get_sub_serializer(cls())
        assert set(list_serializer().fields.keys()) == set(
            unified_serializer().fields.keys()
        ), 'Mismatch between {} list serializer & unified list serializer'.format(
            cls)
Beispiel #9
0
def test_list_views_use_list_serializers(all_views):
    '''
    Check that the list serializers are only used for list views,
    and vice versa
    '''
    list_serializers = tuple(
        getattr(serializers, '{}ListSerializer'.format(cls.__name__))
        for cls in (UnifiedJob.__subclasses__() + [UnifiedJob]))
    for View in all_views:
        if hasattr(View, 'model') and issubclass(getattr(View, 'model'),
                                                 UnifiedJob):
            if issubclass(View, ListAPIView):
                assert issubclass(View.serializer_class, list_serializers), (
                    'View {} serializer {} is not a list serializer'.format(
                        View, View.serializer_class))
            else:
                assert not issubclass(View.model, list_serializers)
Beispiel #10
0
def test_unified_job_detail_exclusive_fields():
    """
    For each type, assert that the only fields allowed to be exclusive to
    detail view are the allowed types
    """
    allowed_detail_fields = frozenset(
        ('result_traceback', 'job_args', 'job_cwd', 'job_env',
         'event_processing_finished'))
    for cls in UnifiedJob.__subclasses__():
        list_serializer = getattr(serializers,
                                  '{}ListSerializer'.format(cls.__name__))
        detail_serializer = getattr(serializers,
                                    '{}Serializer'.format(cls.__name__))
        list_fields = set(list_serializer().fields.keys())
        detail_fields = set(
            detail_serializer().fields.keys()) - allowed_detail_fields
        assert list_fields == detail_fields, 'List / detail mismatch for serializers of {}'.format(
            cls)
Beispiel #11
0
def handle_work_error(task_id, *args, **kwargs):
    subtasks = kwargs.get('subtasks', None)
    logger.debug('Executing error task id %s, subtasks: %s' %
                 (task_id, str(subtasks)))
    first_instance = None
    first_instance_type = ''
    if subtasks is not None:
        for each_task in subtasks:
            try:
                instance = UnifiedJob.get_instance_by_type(
                    each_task['type'], each_task['id'])
                if not instance:
                    # Unknown task type
                    logger.warning("Unknown task type: {}".format(
                        each_task['type']))
                    continue
            except ObjectDoesNotExist:
                logger.warning('Missing {} `{}` in error callback.'.format(
                    each_task['type'], each_task['id']))
                continue

            if first_instance is None:
                first_instance = instance
                first_instance_type = each_task['type']

            if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
                instance.status = 'failed'
                instance.failed = True
                if not instance.job_explanation:
                    instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
                        first_instance_type,
                        first_instance.name,
                        first_instance.id,
                    )
                instance.save()
                instance.websocket_emit_status("failed")

    # We only send 1 job complete message since all the job completion message
    # handling does is trigger the scheduler. If we extend the functionality of
    # what the job complete message handler does then we may want to send a
    # completion event for each job here.
    if first_instance:
        schedule_task_manager()
        pass