def run(self, node_args): """To be transparently called by Selinon. Selinon transparently calls run(), which takes care of task audit and some additional checks and calls execute(). """ # SQS guarantees 'deliver at least once', so there could be multiple # messages of a type, give up immediately if self.storage and isinstance(self.storage, (BayesianPostgres, PackagePostgres)): if self.storage.get_worker_id_count(self.task_id) > 0: raise TaskAlreadyExistsError("Task with ID '%s'" " was already processed" % self.task_id) start = datetime.utcnow() try: result = self.execute(node_args) finally: # remove all files that were downloaded for this task ObjectCache.wipe() end = datetime.utcnow() if result: # Ensure result complies with the defined schema (if any) before saving self.validate_result(result) if result is None: # Keep track of None results and add _audit and _release keys result = {} if self.add_audit_info: # `_audit` key is added to every analysis info submitted result['_audit'] = { 'started_at': json_serial(start), 'ended_at': json_serial(end), 'version': 'v1' } ecosystem_name = node_args.get('ecosystem') result['_release'] = '{}:{}:{}'.format(ecosystem_name, node_args.get('name'), node_args.get('version')) return result
def run(self, node_args): """To be transparently called by Selinon. Selinon transparently calls run(), which takes care of task audit and some additional checks and calls execute(). """ # SQS guarantees 'deliver at least once', so there could be multiple # messages of a type, give up immediately if self.storage and isinstance(self.storage, (BayesianPostgres, PackagePostgres)): if self.storage.get_worker_id_count(self.task_id) > 0: raise TaskAlreadyExistsError("Task with ID '%s'" " was already processed" % self.task_id) start = datetime.utcnow() try: result = self.execute(node_args) except Exception as exc: if self.add_audit_info: # `_audit` key is added to every analysis info submitted end = datetime.utcnow() result = dict() self._add_audit_info( task_result=result, task_start=start, task_end=end, node_args=node_args, ) # write the audit info to the storage self.storage.store_error( node_args=node_args, flow_name=self.flow_name, task_name=self.task_name, task_id=self.task_id, exc_info=sys.exc_info(), result=result ) raise exc finally: # remove all files that were downloaded for this task ObjectCache.wipe() end = datetime.utcnow() if result: # Ensure result complies with the defined schema (if any) before saving self.validate_result(result) if result is None: # Keep track of None results and add _audit and _release keys result = {} if self.add_audit_info: # `_audit` key is added to every analysis info submitted self._add_audit_info( task_result=result, task_start=start, task_end=end, node_args=node_args, ) return result