def process_event(self, event, last_event=None): """See :meth:`job.clock.ClockEventProcessor.process_event`. Compares the new event with the last event any missing metrics jobs. """ # Attempt to get the daily metrics job type try: job_type = JobType.objects.filter(name='scale-daily-metrics').last() except JobType.DoesNotExist: raise ClockEventError('Missing required job type: scale-daily-metrics') if last_event: # Build a list of days that require metrics day_count = xrange((event.occurred.date() - last_event.occurred.date()).days) days = [last_event.occurred.date() + datetime.timedelta(days=d) for d in day_count] else: # Use the previous day when first triggered days = [timezone.now().date() - datetime.timedelta(days=1)] # Schedule one job for each required day for day in days: job_data = JobData() job_data.add_property_input('Day', day.strftime('%Y-%m-%d')) Queue.objects.queue_new_job(job_type, job_data, event)
def _handle_job_finished(self, job_exe): """Handles a job execution finishing (reaching a final status of COMPLETED, FAILED, or CANCELED). The caller must have obtained a model lock on the given job_exe model. All database changes occur in an atomic transaction. :param job_exe: The job execution that finished :type job_exe: :class:`job.models.JobExecution` """ if not job_exe.is_finished: raise Exception('Job execution is not finished in status %s' % job_exe.status) # Start a cleanup job if this execution requires it if job_exe.requires_cleanup: if job_exe.cleanup_job: raise Exception('Job execution already has a cleanup job') cleanup_type = JobType.objects.get_cleanup_job_type() data = JobData() data.add_property_input('Job Exe ID', str(job_exe.id)) desc = {'job_exe_id': job_exe.id, 'node_id': job_exe.node_id} event = TriggerEvent.objects.create_trigger_event( 'CLEANUP', None, desc, timezone.now()) cleanup_job_id = Queue.objects.queue_new_job( cleanup_type, data, event).id job_exe.cleanup_job_id = cleanup_job_id job_exe.save()
def _start_ingest_task(self, ingest): """Starts a task for the given ingest in an atomic transaction :param ingest: The ingest model :type ingest: :class:`ingest.models.Ingest` """ logger.info('Creating ingest task for %s', ingest.file_name) # Create new ingest job and mark ingest as QUEUED ingest_job_type = Ingest.objects.get_ingest_job_type() data = JobData() data.add_property_input('Ingest ID', str(ingest.id)) desc = {'strike_id': self.strike_id, 'file_name': ingest.file_name} when = ingest.transfer_ended if ingest.transfer_ended else now() event = TriggerEvent.objects.create_trigger_event('STRIKE_TRANSFER', None, desc, when) job_configuration = JobConfiguration() if ingest.workspace: job_configuration.add_job_task_workspace(ingest.workspace.name, MODE_RW) if ingest.new_workspace: job_configuration.add_job_task_workspace(ingest.new_workspace.name, MODE_RW) ingest_job = Queue.objects.queue_new_job(ingest_job_type, data, event, job_configuration) ingest.job = ingest_job ingest.status = 'QUEUED' ingest.save() logger.info('Successfully created ingest task for %s', ingest.file_name)
def _handle_job_finished(self, job_exe): """Handles a job execution finishing (reaching a final status of COMPLETED, FAILED, or CANCELED). The caller must have obtained a model lock on the given job_exe model. All database changes occur in an atomic transaction. :param job_exe: The job execution that finished :type job_exe: :class:`job.models.JobExecution` """ if not job_exe.is_finished: raise Exception('Job execution is not finished in status %s' % job_exe.status) # Start a cleanup job if this execution requires it if job_exe.requires_cleanup: if job_exe.cleanup_job: raise Exception('Job execution already has a cleanup job') cleanup_type = JobType.objects.get_cleanup_job_type() data = JobData() data.add_property_input('Job Exe ID', str(job_exe.id)) desc = {'job_exe_id': job_exe.id, 'node_id': job_exe.node_id} event = TriggerEvent.objects.create_trigger_event('CLEANUP', None, desc, timezone.now()) cleanup_job_id = Queue.objects.queue_new_job(cleanup_type, data, event).id job_exe.cleanup_job_id = cleanup_job_id job_exe.save()
def queue_scan(self, scan_id, dry_run=True): """Retrieves a Scan model and uses metadata to place a job to run the Scan process on the queue. All changes to the database will occur in an atomic transaction. :param scan_id: The unique identifier of the Scan process. :type scan_id: int :param dry_run: Whether the scan will execute as a dry run :type dry_run: bool :returns: The new Scan process :rtype: :class:`ingest.models.Scan` """ scan = Scan.objects.select_for_update().get(pk=scan_id) scan_type = self.get_scan_job_type() job_data = JobData() job_data.add_property_input('Scan ID', unicode(scan.id)) job_data.add_property_input('Dry Run', str(dry_run)) event_description = {'scan_id': scan.id} if scan.job: raise ScanIngestJobAlreadyLaunched if dry_run: event = TriggerEvent.objects.create_trigger_event('DRY_RUN_SCAN_CREATED', None, event_description, now()) scan.dry_run_job = Queue.objects.queue_new_job(scan_type, job_data, event) else: event = TriggerEvent.objects.create_trigger_event('SCAN_CREATED', None, event_description, now()) scan.job = Queue.objects.queue_new_job(scan_type, job_data, event) scan.save() return scan
def process_event(self, event, last_event=None): """See :meth:`job.clock.ClockEventProcessor.process_event`. Compares the new event with the last event any missing metrics jobs. """ # Attempt to get the daily metrics job type try: job_type = JobType.objects.filter( name='scale-daily-metrics').last() except JobType.DoesNotExist: raise ClockEventError( 'Missing required job type: scale-daily-metrics') if last_event: # Build a list of days that require metrics day_count = xrange( (event.occurred.date() - last_event.occurred.date()).days) days = [ last_event.occurred.date() + datetime.timedelta(days=d) for d in day_count ] else: # Use the previous day when first triggered days = [timezone.now().date() - datetime.timedelta(days=1)] # Schedule one job for each required day for day in days: job_data = JobData() job_data.add_property_input('Day', day.strftime('%Y-%m-%d')) Queue.objects.queue_new_job(job_type, job_data, event)
def create_batch(self, recipe_type, definition, title=None, description=None): """Creates a new batch that represents a group of recipes that should be scheduled for re-processing. This method also queues a new system job that will process the batch request. All database changes occur in an atomic transaction. :param recipe_type: The type of recipes that should be re-processed :type recipe_type: :class:`recipe.models.RecipeType` :param definition: The definition for running a batch :type definition: :class:`batch.configuration.definition.batch_definition.BatchDefinition` :param title: The human-readable name of the batch :type title: string :param description: An optional description of the batch :type description: string :returns: The newly created batch :rtype: :class:`batch.models.Batch` :raises :class:`batch.exceptions.BatchError`: If general batch parameters are invalid """ # Attempt to get the batch job type try: job_type = JobType.objects.filter( name='scale-batch-creator').last() except JobType.DoesNotExist: raise BatchError('Missing required job type: scale-batch-creator') # Create an event to represent this request trigger_desc = {'user': '******'} event = TriggerEvent.objects.create_trigger_event( 'USER', None, trigger_desc, timezone.now()) batch = Batch() batch.title = title batch.description = description batch.recipe_type = recipe_type batch.definition = definition.get_dict() batch.event = event batch.save() # Setup the job data to process the batch data = JobData() data.add_property_input('Batch ID', str(batch.id)) # Schedule the batch job job = Queue.objects.queue_new_job(job_type, data, event) batch.creator_job = job batch.save() return batch
def start_ingest_tasks(self, ingests, scan_id=None, strike_id=None): """Starts a batch of tasks for the given scan in an atomic transaction. One of scan_id or strike_id must be set. :param ingests: The ingest models :type ingests: list[:class:`ingest.models.Ingest`] :param scan_id: ID of Scan that generated ingest :type scan_id: int :param strike_id: ID of Strike that generated ingest :type strike_id: int """ # Create new ingest job and mark ingest as QUEUED ingest_job_type = Ingest.objects.get_ingest_job_type() for ingest in ingests: logger.debug('Creating ingest task for %s', ingest.file_name) when = ingest.transfer_ended if ingest.transfer_ended else now() desc = {'file_name': ingest.file_name} if scan_id: # Use result from query to get ingest ID # We need to find the id of each ingest that was created. # Using scan_id and file_name together as a unique composite key ingest_id = Ingest.objects.get(scan_id=ingest.scan_id, file_name=ingest.file_name).id desc['scan_id'] = scan_id event = TriggerEvent.objects.create_trigger_event('SCAN_TRANSFER', None, desc, when) elif strike_id: ingest_id = ingest.id desc['strike_id'] = strike_id event = TriggerEvent.objects.create_trigger_event('STRIKE_TRANSFER', None, desc, when) else: raise Exception('One of scan_id or strike_id must be set') data = JobData() data.add_property_input('Ingest ID', str(ingest_id)) exe_configuration = ExecutionConfiguration() if ingest.workspace: exe_configuration.add_job_task_workspace(ingest.workspace.name, MODE_RW) if ingest.new_workspace: exe_configuration.add_job_task_workspace(ingest.new_workspace.name, MODE_RW) ingest_job = Queue.objects.queue_new_job(ingest_job_type, data, event, exe_configuration) ingest.job = ingest_job ingest.status = 'QUEUED' ingest.save() logger.debug('Successfully created ingest task for %s', ingest.file_name)
def test_successful(self): """Tests calling JobData.add_property_input() successfully.""" data = {'input_data': []} job_data = JobData(data) # Method to test, we will test it by calling validate below job_data.add_property_input('Param1', 'Value1') properties = {'Param1': True} # No exception is success warnings = JobData(data).validate_properties(properties) self.assertFalse(warnings)
def test_successful(self): """Tests calling JobData.add_property_input() successfully.""" data = {'input_data': []} job_data = JobData(data) # Method to test, we will test it by calling validate below job_data.add_property_input('Param1', 'Value1') properties = {'Param1': True} # No exception is success warnings = JobData(data).validate_properties(properties) self.assertFalse(warnings)
def create_batch(self, recipe_type, definition, title=None, description=None): """Creates a new batch that represents a group of recipes that should be scheduled for re-processing. This method also queues a new system job that will process the batch request. All database changes occur in an atomic transaction. :param recipe_type: The type of recipes that should be re-processed :type recipe_type: :class:`recipe.models.RecipeType` :param definition: The definition for running a batch :type definition: :class:`batch.configuration.definition.batch_definition.BatchDefinition` :param title: The human-readable name of the batch :type title: string :param description: An optional description of the batch :type description: string :returns: The newly created batch :rtype: :class:`batch.models.Batch` :raises :class:`batch.exceptions.BatchError`: If general batch parameters are invalid """ # Attempt to get the batch job type try: job_type = JobType.objects.filter(name='scale-batch-creator').last() except JobType.DoesNotExist: raise BatchError('Missing required job type: scale-batch-creator') # Create an event to represent this request trigger_desc = {'user': '******'} event = TriggerEvent.objects.create_trigger_event('USER', None, trigger_desc, timezone.now()) batch = Batch() batch.title = title batch.description = description batch.recipe_type = recipe_type batch.definition = definition.get_dict() batch.event = event batch.save() # Setup the job data to process the batch data = JobData() data.add_property_input('Batch ID', str(batch.id)) # Schedule the batch job job = Queue.objects.queue_new_job(job_type, data, event) batch.creator_job = job batch.save() return batch
def create_strike(self, name, title, description, configuration): """Creates a new Strike process with the given configuration and returns the new Strike model. The Strike model will be saved in the database and the job to run the Strike process will be placed on the queue. All changes to the database will occur in an atomic transaction. :param name: The identifying name of this Strike process :type name: string :param title: The human-readable name of this Strike process :type title: string :param description: A description of this Strike process :type description: string :param configuration: The Strike configuration :type configuration: dict :returns: The new Strike process :rtype: :class:`ingest.models.Strike` :raises :class:`ingest.strike.configuration.exceptions.InvalidStrikeConfiguration`: If the configuration is invalid. """ # Validate the configuration, no exception is success config = StrikeConfiguration(configuration) config.validate() strike = Strike() strike.name = name strike.title = title strike.description = description strike.configuration = config.get_dict() strike.save() strike_type = self.get_strike_job_type() job_data = JobData() job_data.add_property_input('Strike ID', unicode(strike.id)) event_description = {'strike_id': strike.id} event = TriggerEvent.objects.create_trigger_event( 'STRIKE_CREATED', None, event_description, now()) strike.job = Queue.objects.queue_new_job(strike_type, job_data, event) strike.save() return strike
def create_strike(self, name, title, description, configuration): """Creates a new Strike process with the given configuration and returns the new Strike model. The Strike model will be saved in the database and the job to run the Strike process will be placed on the queue. All changes to the database will occur in an atomic transaction. :param name: The identifying name of this Strike process :type name: string :param title: The human-readable name of this Strike process :type title: string :param description: A description of this Strike process :type description: string :param configuration: The Strike configuration :type configuration: dict :returns: The new Strike process :rtype: :class:`ingest.models.Strike` :raises :class:`ingest.strike.configuration.exceptions.InvalidStrikeConfiguration`: If the configuration is invalid. """ # Validate the configuration, no exception is success config = StrikeConfiguration(configuration) config.validate() strike = Strike() strike.name = name strike.title = title strike.description = description strike.configuration = config.get_dict() strike.save() strike_type = self.get_strike_job_type() job_data = JobData() job_data.add_property_input('Strike ID', unicode(strike.id)) event_description = {'strike_id': strike.id} event = TriggerEvent.objects.create_trigger_event('STRIKE_CREATED', None, event_description, now()) strike.job = Queue.objects.queue_new_job(strike_type, job_data, event) strike.save() return strike