Beispiel #1
0
    def queue_new_job(self, job_type, data, event, configuration=None):
        """Creates a new job for the given type and data. The new job is immediately placed on the queue. The new job,
        job_exe, and queue models are saved in the database in an atomic transaction.

        :param job_type: The type of the new job to create and queue
        :type job_type: :class:`job.models.JobType`
        :param data: The job data to run on
        :type data: :class:`job.configuration.data.job_data.JobData`
        :param event: The event that triggered the creation of this job
        :type event: :class:`trigger.models.TriggerEvent`
        :param configuration: The optional initial execution configuration
        :type configuration: :class:`job.configuration.json.execution.exe_config.ExecutionConfiguration`
        :returns: The new queued job
        :rtype: :class:`job.models.Job`

        :raises job.configuration.data.exceptions.InvalidData: If the job data is invalid
        """

        job = Job.objects.create_job(job_type, event)
        if not configuration:
            configuration = ExecutionConfiguration()
        job.configuration = configuration.get_dict()
        job.save()

        # No lock needed for this job since it doesn't exist outside this transaction yet
        Job.objects.populate_job_data(job, data)
        self._queue_jobs([job])

        return job
Beispiel #2
0
    def test_convert_1_0_to_current(self):
        """Tests converting execution configuration 1.0 to current"""

        old_dict = {'version': '1.0', 'job_task': {'workspaces': [{'name': 'name1', 'mode': 'ro'}]}}
        exe_config = ExecutionConfiguration(old_dict)
        new_dict = exe_config.get_dict()
        self.assertEqual(new_dict['version'], '2.0')
        self.assertEqual(3, len(new_dict['tasks']))  # Version 1.0 will auto-create pre and post tasks
        self.assertEqual('main', new_dict['tasks'][1]['type'])
Beispiel #3
0
    def test_populate_mounts(self):
        """Tests the addition of mount volumes to the configuration."""

        exe_config = ExecutionConfiguration()

        config_dict = {
            'version': '2.0',
            'mounts': {
                'mount_1': {
                    'type': 'host',
                    'host_path': '/host/path'
                },
                'mount_2': {
                    'type': 'volume',
                    'driver': 'x-driver',
                    'driver_opts': {
                        'foo': 'bar'
                    }
                }
            }
        }

        interface_dict = {
            'version':
            '1.4',
            'command':
            'the cmd',
            'command_arguments':
            'foo',
            'mounts': [{
                'name': 'mount_1',
                'path': '/mount_1',
                'mode': 'ro'
            }, {
                'name': 'mount_2',
                'path': '/mount_2',
                'mode': 'rw'
            }]
        }

        job_exe = MagicMock()
        job_exe.get_job_configuration.return_value = JobConfiguration(
            config_dict)
        job_exe.get_job_interface.return_value = JobInterface(interface_dict)
        job_exe.get_cluster_id.return_value = 'scale_1234'

        exe_config.populate_mounts(job_exe)

        docker_params = exe_config.get_job_task_docker_params()
        self.assertEqual(docker_params[0].flag, 'volume')
        self.assertEqual(docker_params[0].value, '/host/path:/mount_1:ro')
        self.assertEqual(docker_params[1].flag, 'volume')
        mount_2 = '$(docker volume create --name scale_1234_mount_mount_2 --driver x-driver --opt foo=bar):/mount_2:rw'
        self.assertEqual(docker_params[1].value, mount_2)
Beispiel #4
0
    def test_create_copy(self):
        """Tests the create_copy() method"""

        config = {
            'version': '2.0',
            'input_files': {
                'INPUT_1': [{
                    'id': 1234,
                    'type': 'PRODUCT',
                    'workspace_name': 'wksp-name',
                    'workspace_path': 'the/workspace/path/file.json',
                    'local_file_name': 'file_abcdfeg.json',
                    'is_deleted': False,
                }]
            },
            'output_workspaces': {
                'OUTPUT_1': 'WORKSPACE_1'
            },
            'tasks': [
                {
                    'task_id': 'task-1234',
                    'type': 'main',
                    'resources': {'cpu': 1.0},
                    'args': 'foo ${INPUT_1} ${JOB_OUTPUT_DIR}',
                    'env_vars': {'ENV_VAR_NAME': 'ENV_VAR_VALUE'},
                    'workspaces': {'WORKSPACE_NAME': {'mode': 'ro'}},
                    'mounts': {'MOUNT_NAME': 'MOUNT_VOLUME_NAME'},
                    'settings': {'SETTING_NAME': 'SETTING_VALUE'},
                    'volumes': {
                        'VOLUME_NAME_1': {
                            'container_path': '/the/container/path',
                            'mode': 'ro',
                            'type': 'host',
                            'host_path': '/the/host/path'
                        },
                        'VOLUME_NAME_2': {
                            'container_path': '/the/other/container/path',
                            'mode': 'rw',
                            'type': 'volume',
                            'driver': 'SUPER_DRIVER_5000',
                            'driver_opts': {'turbo': 'yes-pleez'}
                        }
                    },
                    'docker_params': [{'flag': 'hello', 'value': 'scale'}]
                }
            ]
        }
        exe_config = ExecutionConfiguration(config)

        copy = exe_config.create_copy()
        self.assertDictEqual(copy.get_dict(), config)
Beispiel #5
0
 def test_successful(self, mock_get_dict):
     """Tests calling ExecutionConfiguration.convert_configuration() successfully."""
     mock_get_dict.return_value = self.job_configuration_dict
     job_configuration = ExecutionConfiguration.convert_configuration(
         self.job_configuration_dict)
     self.assertEqual(job_configuration['version'], '1.1')
     self.assertFalse(job_configuration['job_task']['settings'])
Beispiel #6
0
def create_queue(job_type=None, priority=1, timeout=3600, cpus_required=1.0, mem_required=512.0, disk_in_required=200.0,
                 disk_out_required=100.0, disk_total_required=300.0, queued=timezone.now()):
    """Creates a queue model for unit testing

    :param job_type: The job type
    :type job_type: :class:`job.models.JobType`
    :param priority: The priority
    :type priority: int
    :param timeout: The timeout
    :type timeout: int
    :param cpus_required: The CPUs required in MiB
    :type cpus_required: float
    :param mem_required: The memory required in MiB
    :type mem_required: float
    :param disk_in_required: The input disk space required in MiB
    :type disk_in_required: float
    :param disk_out_required: The output disk space required in MiB
    :type disk_out_required: float
    :param disk_total_required: The total disk space required in MiB
    :type disk_total_required: float
    :param queued: The time the execution was queued
    :type queued: :class:`datetime.datetime`
    """

    job = job_test_utils.create_job(job_type=job_type, status='QUEUED')
    resources = NodeResources([Cpus(cpus_required), Mem(mem_required), Disk(disk_total_required)])

    return Queue.objects.create(job_type=job.job_type, job=job, exe_num=job.num_exes, priority=priority,
                                timeout=timeout, input_file_size=disk_in_required,
                                interface=job.get_job_interface().get_dict(),
                                configuration=ExecutionConfiguration().get_dict(),
                                resources=resources.get_json().get_dict(), queued=queued)
Beispiel #7
0
    def get_execution_configuration(self):
        """Returns the execution configuration for this queued job

        :returns: The execution configuration for this queued job
        :rtype: :class:`job.configuration.json.execution.exe_config.ExecutionConfiguration`
        """

        return ExecutionConfiguration(self.configuration, do_validate=False)
Beispiel #8
0
    def test_init_validation(self):
        """Tests the validation done in __init__"""

        # Try minimal acceptable configuration
        ExecutionConfiguration()

        # Invalid version
        config = {'version': 'BAD'}
        self.assertRaises(InvalidExecutionConfiguration, ExecutionConfiguration, config)
Beispiel #9
0
    def test_init_validation(self):
        """Tests the validation done in __init__"""

        # Try minimal acceptable configuration
        ExecutionConfiguration()

        # Duplicate workspace name in pre-task
        config = {
            'pre_task': {
                'workspaces': [{
                    'name': 'name1',
                    'mode': 'ro'
                }, {
                    'name': 'name1',
                    'mode': 'ro'
                }]
            },
            'job_task': {
                'workspaces': []
            }
        }
        self.assertRaises(InvalidExecutionConfiguration,
                          ExecutionConfiguration, config)

        # Duplicate workspace name in job-task
        config = {
            'job_task': {
                'workspaces': [{
                    'name': 'name1',
                    'mode': 'ro'
                }, {
                    'name': 'name1',
                    'mode': 'ro'
                }]
            }
        }
        self.assertRaises(InvalidExecutionConfiguration,
                          ExecutionConfiguration, config)

        # Duplicate workspace name in post-task
        config = {
            'post_task': {
                'workspaces': [{
                    'name': 'name1',
                    'mode': 'ro'
                }, {
                    'name': 'name1',
                    'mode': 'ro'
                }]
            },
            'job_task': {
                'workspaces': []
            }
        }
        self.assertRaises(InvalidExecutionConfiguration,
                          ExecutionConfiguration, config)
Beispiel #10
0
    def test_determine_error(self):
        """Tests that a post-task successfully determines the correct error"""

        scale_errors = [
            ScaleDatabaseError(),
            ScaleIOError(),
            ScaleOperationalError(),
            InvalidResultsManifest(''),
            MissingRequiredOutput('')
        ]

        for scale_error in scale_errors:
            config = ExecutionConfiguration()
            config.create_tasks(['pre'])
            config.set_task_ids(self.job_exe.get_cluster_id())
            task = PostTask('agent_1', self.job_exe, self.job_exe.job_type,
                            config)
            update = job_test_utils.create_task_status_update(
                task.id, task.agent_id, TaskStatusUpdate.RUNNING, now())
            task.update(update)
            update = job_test_utils.create_task_status_update(
                task.id,
                task.agent_id,
                TaskStatusUpdate.FAILED,
                now(),
                exit_code=scale_error.exit_code)
            error = task.determine_error(update)
            self.assertEqual(scale_error.error_name, error.name)
Beispiel #11
0
    def test_populate_default_job_settings(self):
        """Tests the addition of default settings to the configuration."""

        exe_config = ExecutionConfiguration()

        config_dict = {
            'version': '1.0',
            'default_settings': {
                'setting_name': 'some_val',
                'setting2': 'other_val'
            }
        }

        interface_dict = {
            'version': '1.4',
            'command': 'the cmd',
            'command_arguments': 'foo',
            'settings': [{
                'name': 'setting_name'
            }, {
                'name': 'setting2'
            }]
        }

        job_exe = MagicMock()
        job_exe.get_job_configuration.return_value = JobConfiguration(
            config_dict)
        job_exe.get_job_interface.return_value = JobInterface(interface_dict)

        exe_config.populate_default_job_settings(job_exe)

        populated_config = exe_config.get_dict()
        populated_settings = populated_config['job_task']['settings']

        populated_setting_values = [x.values() for x in populated_settings]
        results_dict = {x[0]: x[1] for x in populated_setting_values}

        self.assertDictEqual(results_dict, config_dict['settings'])
Beispiel #12
0
    def init_with_database(self):
        """Initializes the job execution metrics with the execution history from the database
        """

        oldest_time = self._finished_metrics_over_time.time_blocks[0].start
        blank_config = ExecutionConfiguration()
        for job_exe_end in JobExecutionEnd.objects.get_recent_job_exe_end_metrics(
                oldest_time):
            running_job_exe = RunningJobExecution('', job_exe_end.job_exe,
                                                  job_exe_end.job_type,
                                                  blank_config, 0)
            running_job_exe._set_final_status(job_exe_end.status,
                                              job_exe_end.ended,
                                              job_exe_end.error)
            self._finished_metrics.add_job_execution(running_job_exe)
            self._finished_metrics_over_time.add_job_execution(running_job_exe)
Beispiel #13
0
    def start_ingest_tasks(self, ingests, scan_id=None, strike_id=None):
        """Starts a batch of tasks for the given scan in an atomic transaction.

        One of scan_id or strike_id must be set.

        :param ingests: The ingest models
        :type ingests: list[:class:`ingest.models.Ingest`]
        :param scan_id: ID of Scan that generated ingest
        :type scan_id: int
        :param strike_id: ID of Strike that generated ingest
        :type strike_id: int
        """

        # Create new ingest job and mark ingest as QUEUED
        ingest_job_type = Ingest.objects.get_ingest_job_type()

        for ingest in ingests:
            logger.debug('Creating ingest task for %s', ingest.file_name)

            when = ingest.transfer_ended if ingest.transfer_ended else now()
            desc = {'file_name': ingest.file_name}

            if scan_id:
                # Use result from query to get ingest ID
                # We need to find the id of each ingest that was created.
                # Using scan_id and file_name together as a unique composite key
                ingest_id = Ingest.objects.get(scan_id=ingest.scan_id, file_name=ingest.file_name).id

                desc['scan_id'] = scan_id
                event = TriggerEvent.objects.create_trigger_event('SCAN_TRANSFER', None, desc, when)
            elif strike_id:
                ingest_id = ingest.id
                desc['strike_id'] = strike_id
                event = TriggerEvent.objects.create_trigger_event('STRIKE_TRANSFER', None, desc, when)
            else:
                raise Exception('One of scan_id or strike_id must be set')

            data = JobData()
            data.add_property_input('Ingest ID', str(ingest_id))

            exe_configuration = ExecutionConfiguration()
            if ingest.workspace:
                exe_configuration.add_job_task_workspace(ingest.workspace.name, MODE_RW)
            if ingest.new_workspace:
                exe_configuration.add_job_task_workspace(ingest.new_workspace.name, MODE_RW)
            ingest_job = Queue.objects.queue_new_job(ingest_job_type, data, event, exe_configuration)

            ingest.job = ingest_job
            ingest.status = 'QUEUED'
            ingest.save()

            logger.debug('Successfully created ingest task for %s', ingest.file_name)
    def populate_job_configuration(apps, schema_editor):
        from job.configuration.json.execution.exe_config import ExecutionConfiguration, MODE_RO, MODE_RW
        from job.configuration.data.job_data import JobData
        # Go through all of the job models that have job data and populate their configuration
        Job = apps.get_model('job', 'Job')
        ScaleFile = apps.get_model('storage', 'ScaleFile')
        Workspace = apps.get_model('storage', 'Workspace')
        total_count = Job.objects.all().count()
        workspaces = {}
        for workspace in Workspace.objects.all().iterator():
            workspaces[workspace.id] = workspace
        print 'Populating new configuration field for %s jobs' % str(
            total_count)
        done_count = 0
        batch_size = 1000
        while done_count < total_count:
            percent = (float(done_count) / float(total_count)) * 100.00
            print 'Completed %s of %s jobs (%f%%)' % (done_count, total_count,
                                                      percent)
            batch_end = done_count + batch_size
            for job in Job.objects.select_related('job_type').order_by(
                    'id')[done_count:batch_end]:

                # Ignore jobs that don't have their job data populated yet
                if not job.data:
                    continue

                data = JobData(job.data)
                input_file_ids = data.get_input_file_ids()
                input_files = ScaleFile.objects.filter(
                    id__in=input_file_ids).select_related(
                        'workspace').iterator()
                input_workspaces = set()
                for input_file in input_files:
                    input_workspaces.add(input_file.workspace.name)

                configuration = ExecutionConfiguration()
                for name in input_workspaces:
                    configuration.add_job_task_workspace(name, MODE_RO)
                if not job.job_type.is_system:
                    for name in input_workspaces:
                        configuration.add_pre_task_workspace(name, MODE_RO)
                        # We add input workspaces to post task so it can perform a parse results move if requested by the
                        # job's results manifest
                        configuration.add_post_task_workspace(name, MODE_RW)
                    for workspace_id in data.get_output_workspace_ids():
                        workspace = workspaces[workspace_id]
                        if workspace.name not in input_workspaces:
                            configuration.add_post_task_workspace(
                                workspace.name, MODE_RW)
                elif job.job_type.name == 'scale-ingest':
                    ingest_id = data.get_property_values(['Ingest ID'
                                                          ])['Ingest ID']
                    from ingest.models import Ingest
                    ingest = Ingest.objects.select_related('workspace').get(
                        id=ingest_id)
                    configuration.add_job_task_workspace(
                        ingest.workspace.name, MODE_RW)

                job.configuration = configuration.get_dict()
                job.save()
            done_count += batch_size
        print 'All %s jobs completed' % str(total_count)
Beispiel #15
0
def create_job_exe(job_type=None, job=None, exe_num=None, node=None, timeout=None, input_file_size=10.0, queued=None,
                   started=None, status='RUNNING', error=None, ended=None, output=None, task_results=None):
    """Creates a job_exe model for unit testing, may also create job_exe_end and job_exe_output models depending on
    status

    :returns: The job_exe model
    :rtype: :class:`job.execution.job_exe.RunningJobExecution`
    """

    when = timezone.now()
    if not job:
        job = create_job(job_type=job_type, input_file_size=input_file_size)
    job_type = job.job_type

    job_exe = JobExecution()
    job_exe.job = job
    job_exe.job_type = job_type
    if not exe_num:
        exe_num = job.num_exes
    job_exe.exe_num = exe_num
    job_exe.set_cluster_id('1234', job.id, job.num_exes)
    if not node:
        node = node_utils.create_node()
    job_exe.node = node
    if not timeout:
        timeout = job.timeout
    job_exe.timeout = timeout
    job_exe.input_file_size = input_file_size
    job_exe.resources = job.get_resources().get_json().get_dict()
    job_exe.configuration = ExecutionConfiguration().get_dict()
    if not queued:
        queued = when
    job_exe.queued = queued
    if not started:
        started = when + datetime.timedelta(seconds=1)
    job_exe.started = started
    job_exe.save()

    if status in ['COMPLETED', 'FAILED', 'CANCELED']:
        job_exe_end = JobExecutionEnd()
        job_exe_end.job_exe_id = job_exe.id
        job_exe_end.job = job
        job_exe_end.job_type = job_type
        job_exe_end.exe_num = exe_num
        if not task_results:
            task_results = TaskResults()
        job_exe_end.task_results = task_results.get_dict()
        job_exe_end.status = status
        if status == 'FAILED' and not error:
            error = error_test_utils.create_error()
        job_exe_end.error = error
        job_exe_end.node = node
        job_exe_end.queued = queued
        job_exe_end.started = started
        if not ended:
            ended = started + datetime.timedelta(seconds=1)
        job_exe_end.ended = ended
        job_exe_end.save()

    if status == 'COMPLETED':
        job_exe_output = JobExecutionOutput()
        job_exe_output.job_exe_id = job_exe.id
        job_exe_output.job = job
        job_exe_output.job_type = job_type
        job_exe_output.exe_num = exe_num
        if not output:
            output = JobResults()
        job_exe_output.output = output.get_dict()
        job_exe_output.save()

    return job_exe
Beispiel #16
0
    def configure_queued_job(self, job):
        """Creates and returns an execution configuration for the given queued job. The given job model should have its
        related job_type and job_type_rev models populated.

        :param job: The queued job model
        :type job: :class:`job.models.Job`
        :returns: The execution configuration for the queued job
        :rtype: :class:`job.configuration.json.execution.exe_config.ExecutionConfiguration`
        """

        config = ExecutionConfiguration()
        data = job.get_job_data()

        # Add input file meta-data
        input_files_dict = self._create_input_file_dict(data)
        config.set_input_files(input_files_dict)

        # Set up env vars for job's input data
        env_vars = {}
        input_values = {}
        # TODO: refactor after Seed upgrade
        # This step makes sure that all inputs get replaced with blank if a value is not provided
        for input_data_dict in job.get_job_interface().definition['input_data']:
            input_values[input_data_dict['name']] = ''  # Everything gets a blank value by default
        # TODO: refactor this to use JobData method after Seed upgrade
        for data_input in data.get_dict()['input_data']:
            input_name = data_input['name']
            env_var_name = normalize_env_var_name(input_name)
            if 'value' in data_input:
                env_vars[env_var_name] = data_input['value']
                input_values[input_name] = data_input['value']
            if 'file_id' in data_input:
                input_file = input_files_dict[input_name][0]
                file_name = os.path.basename(input_file.workspace_path)
                if input_file.local_file_name:
                    file_name = input_file.local_file_name
                env_vars[env_var_name] = os.path.join(SCALE_JOB_EXE_INPUT_PATH, input_name, file_name)
                input_values[input_name] = os.path.join(SCALE_JOB_EXE_INPUT_PATH, input_name, file_name)
            elif 'file_ids' in data_input:
                env_vars[env_var_name] = os.path.join(SCALE_JOB_EXE_INPUT_PATH, input_name)
                input_values[input_name] = os.path.join(SCALE_JOB_EXE_INPUT_PATH, input_name)

        task_workspaces = {}
        if job.job_type.is_system:
            # Add any workspaces needed for this system job
            task_workspaces = QueuedExecutionConfigurator._system_job_workspaces(job)
        else:
            # Set any output workspaces needed
            # TODO: In the future, output workspaces can be moved from job data to configuration, moving this step to
            # the ScheduledExecutionConfigurator
            self._cache_workspace_names(data.get_output_workspace_ids())
            output_workspaces = {}
            for output, workspace_id in data.get_output_workspaces().items():
                output_workspaces[output] = self._cached_workspace_names[workspace_id]
            config.set_output_workspaces(output_workspaces)

        # Create main task with fields populated from input data
        args = job.get_job_interface().get_command_args()
        # TODO: command arg input param replacement can be removed when old-style job type support is dropped
        args = JobInterface._replace_command_parameters(args, input_values)
        config.create_tasks(['main'])
        config.add_to_task('main', args=args, env_vars=env_vars, workspaces=task_workspaces)
        return config
Beispiel #17
0
def create_job_exe(job_type=None,
                   job=None,
                   status='RUNNING',
                   configuration=None,
                   error=None,
                   command_arguments='test_arg',
                   timeout=None,
                   node=None,
                   created=None,
                   queued=None,
                   started=None,
                   pre_started=None,
                   pre_completed=None,
                   job_started=None,
                   job_completed=None,
                   post_started=None,
                   post_completed=None,
                   ended=None,
                   last_modified=None):
    """Creates a job execution model for unit testing

    :returns: The job execution model
    :rtype: :class:`job.models.JobExecution`
    """

    when = timezone.now()
    if not job:
        job = create_job(job_type=job_type)
    if not configuration:
        configuration = ExecutionConfiguration().get_dict()
    if not timeout:
        timeout = job.timeout
    if not node:
        node = node_utils.create_node()
    if not created:
        created = when
    if not queued:
        queued = when
    if not started:
        started = when
    if not last_modified:
        last_modified = when

    job_exe = JobExecution.objects.create(job=job,
                                          status=status,
                                          error=error,
                                          configuration=configuration,
                                          command_arguments=command_arguments,
                                          timeout=timeout,
                                          node=node,
                                          created=created,
                                          queued=queued,
                                          started=started,
                                          pre_started=pre_started,
                                          pre_completed=pre_completed,
                                          job_started=job_started,
                                          job_completed=job_completed,
                                          post_started=post_started,
                                          post_completed=post_completed,
                                          ended=ended,
                                          last_modified=last_modified)
    job_exe.set_cluster_id('1234')
    return job_exe