def from_json(self, json_str): """Loads JSON serialized data into self. Args: json_str (str): Json serialized TurbiniaRequest object. Raises: TurbiniaException: If json can not be loaded, or deserialized object is not of the correct type. """ try: if isinstance(json_str, six.binary_type): json_str = codecs.decode(json_str, 'utf-8') obj = json.loads(json_str) except ValueError as e: raise TurbiniaException( 'Can not load json from string {0:s}'.format(str(e))) if obj.get('type', None) != self.type: raise TurbiniaException( 'Deserialized object does not have type of {0:s}'.format( self.type)) obj['evidence'] = [ evidence.evidence_decode(e) for e in obj['evidence'] ] # pylint: disable=attribute-defined-outside-init self.__dict__ = obj
def testEvidenceSerialization(self): """Test that evidence serializes/unserializes.""" rawdisk = evidence.RawDisk(name='My Evidence', source_path='/tmp/foo') rawdisk_json = rawdisk.to_json() self.assertTrue(isinstance(rawdisk_json, str)) rawdisk_new = evidence.evidence_decode(json.loads(rawdisk_json)) self.assertIsInstance(rawdisk_new, evidence.RawDisk) self.assertEqual(rawdisk_new.name, 'My Evidence')
def deserialize(cls, input_dict): """Converts an input dictionary back into a TurbiniaTaskResult object. Args: input_dict (dict): TurbiniaTaskResult object dictionary. Returns: TurbiniaTaskResult: Deserialized object. """ result = TurbiniaTaskResult() result.__dict__.update(input_dict) if result.run_time: result.run_time = timedelta(seconds=result.run_time) result.start_time = datetime.strptime(result.start_time, DATETIME_FORMAT) if result.input_evidence: result.input_evidence = evidence_decode(result.input_evidence) result.evidence = [evidence_decode(x) for x in result.evidence] return result
def testEvidenceSerialization(self): """Test that evidence serializes/unserializes.""" rawdisk = evidence.RawDisk( name=u'My Evidence', local_path=u'/tmp/foo', mount_path=u'/mnt/foo') rawdisk_json = rawdisk.to_json() self.assertTrue(isinstance(rawdisk_json, str)) rawdisk_new = evidence.evidence_decode(json.loads(rawdisk_json)) self.assertTrue(isinstance(rawdisk_new, evidence.RawDisk)) self.assertEqual(rawdisk_new.name, u'My Evidence') self.assertEqual(rawdisk_new.mount_path, u'/mnt/foo')
def testEvidenceCollectionDeserialization(self): """Test that EvidenceCollection deserializes.""" rawdisk = evidence.RawDisk(name='My Evidence', source_path='/tmp/foo.img') collection = evidence.EvidenceCollection() collection.name = 'testCollection' collection.add_evidence(rawdisk) collection_json = collection.to_json() self.assertTrue(isinstance(collection_json, str)) collection_new = evidence.evidence_decode(json.loads(collection_json)) rawdisk_new = collection_new.collection[0] # Make sure that both the collection, and the things in the collection # deserializd to the correct types. self.assertIsInstance(collection_new, evidence.EvidenceCollection) self.assertIsInstance(rawdisk_new, evidence.RawDisk) self.assertEqual(collection_new.name, 'testCollection') self.assertEqual(rawdisk_new.name, 'My Evidence') self.assertEqual(rawdisk_new.source_path, '/tmp/foo.img')
def run_wrapper(self, evidence): """Wrapper to manage TurbiniaTaskResults and exception handling. This wrapper should be called to invoke the run() methods so it can handle the management of TurbiniaTaskResults and the exception handling. Otherwise details from exceptions in the worker cannot be propagated back to the Turbinia TaskManager. This method should handle (in no particular order): - Exceptions thrown from run() - Verifying valid TurbiniaTaskResult object is returned - Check for bad results (non TurbiniaTaskResults) returned from run() - Auto-close results that haven't been closed - Verifying that the results are serializeable - Locking to make sure only one task is active at a time Args: evidence (dict): To be decoded into Evidence object Returns: A TurbiniaTaskResult object """ # Avoid circular dependency. from turbinia.jobs import manager as job_manager log.debug('Task {0:s} {1:s} awaiting execution'.format(self.name, self.id)) evidence = evidence_decode(evidence) try: self.result = self.setup(evidence) self.result.update_task_status(self, 'queued') turbinia_worker_tasks_queued_total.inc() except TurbiniaException as exception: message = ( '{0:s} Task setup failed with exception: [{1!s}]'.format( self.name, exception)) # Logging explicitly here because the result is in an unknown state trace = traceback.format_exc() log.error(message) log.error(trace) if self.result: if hasattr(exception, 'message'): self.result.set_error(exception.message, traceback.format_exc()) else: self.result.set_error(exception.__class__, traceback.format_exc()) self.result.status = message else: self.result = self.create_result( message=message, trace=traceback.format_exc()) return self.result.serialize() with filelock.FileLock(config.LOCK_FILE): log.info('Starting Task {0:s} {1:s}'.format(self.name, self.id)) original_result_id = None turbinia_worker_tasks_started_total.inc() task_runtime_metrics = self.get_metrics() with task_runtime_metrics.time(): try: original_result_id = self.result.id # Check if Task's job is available for the worker. active_jobs = list(job_manager.JobsManager.GetJobNames()) if self.job_name.lower() not in active_jobs: message = ( 'Task will not run due to the job: {0:s} being disabled ' 'on the worker.'.format(self.job_name)) self.result.log(message, level=logging.ERROR) self.result.status = message return self.result.serialize() self.evidence_setup(evidence) if self.turbinia_version != turbinia.__version__: message = ( 'Worker and Server versions do not match: {0:s} != {1:s}' .format(self.turbinia_version, turbinia.__version__)) self.result.log(message, level=logging.ERROR) self.result.status = message return self.result.serialize() self.result.update_task_status(self, 'running') self._evidence_config = evidence.config self.result = self.run(evidence, self.result) # pylint: disable=broad-except except Exception as exception: message = ( '{0:s} Task failed with exception: [{1!s}]'.format( self.name, exception)) # Logging explicitly here because the result is in an unknown state trace = traceback.format_exc() log_and_report(message, trace) if self.result: self.result.log(message, level=logging.ERROR) self.result.log(trace) if hasattr(exception, 'message'): self.result.set_error(exception.message, traceback.format_exc()) else: self.result.set_error(exception.__class__, traceback.format_exc()) self.result.status = message else: log.error( 'No TurbiniaTaskResult object found after task execution.') self.result = self.validate_result(self.result) if self.result: self.result.update_task_status(self) # Trying to close the result if possible so that we clean up what we can. # This has a higher likelihood of failing because something must have gone # wrong as the Task should have already closed this. if self.result and not self.result.closed: message = 'Trying last ditch attempt to close result' log.warning(message) self.result.log(message) if self.result.status: status = self.result.status else: status = 'No previous status' message = ( 'Task Result was auto-closed from task executor on {0:s} likely ' 'due to previous failures. Previous status: [{1:s}]'.format( self.result.worker_name, status)) self.result.log(message) try: self.result.close(self, False, message) # Using broad except here because lots can go wrong due to the reasons # listed above. # pylint: disable=broad-except except Exception as exception: log.error('TurbiniaTaskResult close failed: {0!s}'.format(exception)) if not self.result.status: self.result.status = message # Check the result again after closing to make sure it's still good. self.result = self.validate_result(self.result) if original_result_id != self.result.id: log.debug( 'Result object {0:s} is different from original {1!s} after task ' 'execution which indicates errors during execution'.format( self.result.id, original_result_id)) else: log.debug( 'Returning original result object {0:s} after task execution'.format( self.result.id)) return self.result.serialize()
def run_wrapper(self, evidence): """Wrapper to manage TurbiniaTaskResults and exception handling. This wrapper should be called to invoke the run() methods so it can handle the management of TurbiniaTaskResults and the exception handling. Otherwise details from exceptions in the worker cannot be propagated back to the Turbinia TaskManager. This method should handle (in no particular order): - Exceptions thrown from run() - Verifing valid TurbiniaTaskResult object is returned - Check for bad results (non TurbiniaTaskResults) returned from run() - Auto-close results that haven't been closed - Verifying that the results are serializeable - Locking to make sure only one task is active at a time Args: evidence (dict): To be decoded into Evidence object Returns: A TurbiniaTaskResult object """ log.debug('Task {0:s} {1:s} awaiting execution'.format(self.name, self.id)) evidence = evidence_decode(evidence) with filelock.FileLock(config.LOCK_FILE): log.info('Starting Task {0:s} {1:s}'.format(self.name, self.id)) original_result_id = None try: self.result = self.setup(evidence) original_result_id = self.result.id if self.turbinia_version != turbinia.__version__: msg = ( 'Worker and Server versions do not match: {0:s} != {1:s}'.format( self.turbinia_version, turbinia.__version__)) self.result.log(msg, level=logging.ERROR) self.result.status = msg return self.result self._evidence_config = evidence.config self.result = self.run(evidence, self.result) # pylint: disable=broad-except except Exception as e: msg = '{0:s} Task failed with exception: [{1!s}]'.format(self.name, e) # Logging explicitly here because the result is in an unknown state trace = traceback.format_exc() log.error(msg) log.error(trace) if self.result: self.result.log(msg, level=logging.ERROR) self.result.log(trace) if hasattr(e, 'message'): self.result.set_error(e.message, traceback.format_exc()) else: self.result.set_error(e.__class__, traceback.format_exc()) self.result.status = msg else: log.error('No TurbiniaTaskResult object found after task execution.') self.result = self.validate_result(self.result) # Trying to close the result if possible so that we clean up what we can. # This has a higher likelihood of failing because something must have gone # wrong as the Task should have already closed this. if self.result and not self.result.closed: msg = 'Trying last ditch attempt to close result' log.warning(msg) self.result.log(msg) if self.result.status: status = self.result.status else: status = 'No previous status' msg = ( 'Task Result was auto-closed from task executor on {0:s} likely ' 'due to previous failures. Previous status: [{1:s}]'.format( self.result.worker_name, status)) self.result.log(msg) try: self.result.close(self, False, msg) # Using broad except here because lots can go wrong due to the reasons # listed above. # pylint: disable=broad-except except Exception as e: log.error('TurbiniaTaskResult close failed: {0!s}'.format(e)) if not self.result.status: self.result.status = msg # Check the result again after closing to make sure it's still good. self.result = self.validate_result(self.result) if original_result_id != self.result.id: log.debug( 'Result object {0:s} is different from original {1!s} after task ' 'execution which indicates errors during execution'.format( self.result.id, original_result_id)) else: log.debug( 'Returning original result object {0:s} after task execution'.format( self.result.id)) return self.result.serialize()