示例#1
0
    def postprocess(self, task_id):
        """Runs our postprocessing code, then our possible parent's evidence.

    This is is a wrapper function that will run our post-processor, and will
    then recurse down the chain of parent Evidence and run those post-processors
    in order.

    Args:
      task_id(str): The id of a given Task.
    """
        log.info('Starting post-processor for evidence {0:s}'.format(
            self.name))
        log.debug('Evidence state: {0:s}'.format(self.format_state()))

        is_detachable = True
        if self.resource_tracked:
            with filelock.FileLock(config.RESOURCE_FILE_LOCK):
                # Run postprocess to either remove task_id or resource_id.
                is_detachable = resource_manager.PostProcessResourceState(
                    self.resource_id, task_id)
                if not is_detachable:
                    # Prevent from running post process code if there are other tasks running.
                    log.info(
                        'Resource ID {0:s} still in use. Skipping detaching Evidence...'
                        .format(self.resource_id))

        if is_detachable:
            self._postprocess()
        if self.parent_evidence:
            self.parent_evidence.postprocess(task_id)
示例#2
0
    def testPostProcessResourceState(self):
        """Tests the PostProcessResourceState() method."""
        resource_id_1 = "resource_id_1"
        resource_id_2 = "resource_id_2"
        task_id_1 = "task_id_1"
        task_id_2 = "task_id_2"

        # Add test resource ids and task ids into state file
        resource_manager.PreprocessResourceState(resource_id_1, task_id_1)
        resource_manager.PreprocessResourceState(resource_id_1, task_id_2)
        resource_manager.PreprocessResourceState(resource_id_2, task_id_1)

        # Test that task id was removed from resource id
        json_out_1 = {resource_id_1: [task_id_2], resource_id_2: [task_id_1]}
        is_detachable = resource_manager.PostProcessResourceState(
            resource_id_1, task_id_1)
        self.assertEqual(resource_manager.RetrieveResourceState(), json_out_1)
        self.assertEqual(is_detachable, False)

        # Test that resource id was removed from resource state
        json_out_2 = {resource_id_2: [task_id_1]}
        is_detachable = resource_manager.PostProcessResourceState(
            resource_id_1, task_id_2)
        self.assertEqual(resource_manager.RetrieveResourceState(), json_out_2)
        self.assertEqual(is_detachable, True)

        # Test that non existent task did not throw an error
        is_detachable = resource_manager.PostProcessResourceState(
            resource_id_2, task_id_2)
        self.assertEqual(resource_manager.RetrieveResourceState(), json_out_2)
        self.assertEqual(is_detachable, False)

        # Test removing all from resource state
        json_out_3 = {}
        is_detachable = resource_manager.PostProcessResourceState(
            resource_id_2, task_id_1)
        self.assertEqual(resource_manager.RetrieveResourceState(), json_out_3)
        self.assertEqual(is_detachable, True)
示例#3
0
    def close(self, task, success, status=None):
        """Handles closing of this result and writing logs.

    Normally this should be called by the Run method to make sure that the
    status, etc are set correctly, but if there is an exception thrown when the
    task executes, then run_wrapper will call this with default arguments
    indicating a failure.

    Args:
      task (TurbiniaTask): The calling Task object
      success: Bool indicating task success
      status: One line descriptive task status.
    """

        if self.closed:
            # Don't try to close twice.
            return
        self.successful = success
        self.run_time = datetime.now() - self.start_time
        if success:
            turbinia_worker_tasks_completed_total.inc()
        else:
            turbinia_worker_tasks_failed_total.inc()
        if not status and self.successful:
            status = 'Completed successfully in {0:s} on {1:s}'.format(
                str(self.run_time), self.worker_name)
        elif not status and not self.successful:
            status = 'Run failed in {0:s} on {1:s}'.format(
                str(self.run_time), self.worker_name)
        self.log(status)
        self.status = status

        for evidence in self.evidence:
            if evidence.source_path:
                if os.path.exists(evidence.source_path):
                    self.saved_paths.append(evidence.source_path)
                    if evidence.copyable:
                        task.output_manager.save_evidence(evidence, self)
                else:
                    self.log(
                        'Evidence {0:s} has missing file at source_path {1!s} so '
                        'not saving.'.format(evidence.name,
                                             evidence.source_path))
            else:
                self.log('Evidence {0:s} has empty source_path so '
                         'not saving.'.format(evidence.name))

            if not evidence.request_id:
                evidence.request_id = self.request_id

        if self.input_evidence:
            try:
                self.input_evidence.postprocess(task_id=self.task_id)
            # Adding a broad exception here because we want to try post-processing
            # to clean things up even after other failures in the task, so this could
            # also fail.
            # pylint: disable=broad-except
            except Exception as exception:
                message = 'Evidence post-processing for {0!s} failed: {1!s}'.format(
                    self.input_evidence.name, exception)
                self.log(message, level=logging.ERROR)
                with filelock.FileLock(config.RESOURCE_FILE_LOCK):
                    resource_manager.PostProcessResourceState(
                        self.input_evidence.resource_id, self.task_id)
        else:
            self.log(
                'No input evidence attached to the result object so post-processing '
                'cannot be run. This usually means there were previous failures '
                'during Task execution and this may result in resources (e.g. '
                'mounted disks) accumulating on the Worker.',
                level=logging.WARNING)

        # Now that we've post-processed the input_evidence, we can unset it
        # because we don't need to return it.
        self.input_evidence = None

        if not self.no_output_manager:
            # Write result log info to file
            logfile = os.path.join(self.output_dir, 'worker-log.txt')
            # Create default log text just so that the worker log is created to
            # avoid confusion if it doesn't exist.
            if not self._log:
                self._log.append('No worker messages were logged.')
            if self.output_dir and os.path.exists(self.output_dir):
                with open(logfile, 'w') as f:
                    f.write('\n'.join(self._log))
                    f.write('\n')
                task.output_manager.save_local_file(logfile, self)

        self.closed = True
        log.debug('Result close successful. Status is [{0:s}]'.format(
            self.status))