def receive_event(self, topic, data): if topic.split('.')[-1] != "testjob": return lava_id = data.get('job') if not lava_id: return if 'sub_id' in data.keys(): lava_id = data['sub_id'] lava_status = data.get('state', 'Unknown') db_test_job_list = self.data.test_jobs.filter(submitted=True, fetched=False, job_id=lava_id) if db_test_job_list.exists() and \ len(db_test_job_list) == 1: self.log_debug("interesting message received: %r" % data) else: return job = db_test_job_list[0] job.job_status = lava_status if lava_status == 'Finished': lava_health = data.get('health', 'Unknown') job.job_status = lava_health if job.name is None: # fetch job name once data = self.__get_job_details__(lava_id) definition = yaml.safe_load(data['definition']) job.name = definition['job_name'][:255] job.save() if job.job_status in self.complete_statuses: self.log_info("scheduling fetch for job %s" % job.job_id) fetch.apply_async(args=[job.id])
def receive_event(self, topic, data): if topic.split('.')[-1] != "testjob": return lava_id = data.get('job') if not lava_id: return if 'sub_id' in data.keys(): lava_id = data['sub_id'] lava_status = data['status'] db_test_job_list = self.data.test_jobs.filter(submitted=True, fetched=False, job_id=lava_id) if db_test_job_list.exists() and \ len(db_test_job_list) == 1: self.log_debug("interesting message received: %r" % data) else: return job = db_test_job_list[0] job.job_status = lava_status if job.name is None: # fetch job name once data = self.__get_job_details__(lava_id) if data['is_pipeline'] is False: return definition = yaml.load(data['definition']) if data['multinode_definition']: definition = yaml.load(data['multinode_definition']) job.name = definition['job_name'][:255] job.save() if lava_status in self.complete_statuses: self.log_info("scheduling fetch for job %s" % job.job_id) # introduce 2 min delay to allow LAVA for storing all results # this workaround should be removed once LAVA issue is fixed fetch.apply_async(args=[job.id], countdown=120)
def listen(self): max_id = 0 while True: time.sleep(random.randint(1, 5)) jobs = self.data.test_jobs.filter( submitted=True, fetched=False, id__gt=max_id, ).order_by('id') for job in jobs: fetch.apply_async(args=[job.id]) max_id = job.id
def handle(self, *args, **options): backend_name = options.get("BACKEND") job_id = options.get("JOBID") group_slug, project_slug = options.get("PROJECT").split('/') backend = Backend.objects.get(name=backend_name) group, _ = Group.objects.get_or_create(slug=group_slug) project, _ = group.projects.get_or_create(slug=project_slug) build = project.builds.create(version=str(time.time())) testjob = backend.test_jobs.create(target=project, job_id=job_id, target_build=build) if options.get("background"): fetch.apply_async(args=(testjob.id, ), task_id=task_id(testjob)) else: backend.fetch(testjob.id)
def receive_event(self, topic, data): if topic.split('.')[-1] != "testjob": return lava_id = data.get('job') if not lava_id: return if 'sub_id' in data.keys(): lava_id = data['sub_id'] lava_status = data.get('state', 'Unknown') db_test_job_list = self.data.test_jobs.filter( submitted=True, fetched=False, job_id=lava_id) if db_test_job_list.exists() and \ len(db_test_job_list) == 1: self.log_debug("interesting message received: %r" % data) else: return job = db_test_job_list[0] job.job_status = lava_status if lava_status == 'Finished': lava_health = data.get('health', 'Unknown') job.job_status = lava_health if job.name is None: # fetch job name once data = self.__get_job_details__(lava_id) definition = yaml.load(data['definition']) if data['multinode_definition']: definition = yaml.load(data['multinode_definition']) job.name = definition['job_name'][:255] job.save() if job.job_status in self.complete_statuses: self.log_info("scheduling fetch for job %s" % job.job_id) # introduce 2 min delay to allow LAVA for storing all results # this workaround should be removed once LAVA issue is fixed fetch.apply_async(args=[job.id], countdown=120)
def fetch_job(modeladmin, request, queryset): for test_job in queryset: fetch.apply_async(args=(test_job.id, ), task_id=task_id(test_job))