def watch_job(request, group_slug, project_slug, version, environment_slug): backend_name = request.POST.get('backend') if backend_name is None: return HttpResponseBadRequest("backend field is required") backend = None try: backend = Backend.objects.get(name=request.POST.get('backend')) except Backend.DoesNotExist: return HttpResponseBadRequest("requested backend does not exist") # project has to exist or request will result with 400 project = request.project if backend is None or project is None: return HttpResponseBadRequest("malformed request") # create Build object build, _ = project.builds.get_or_create(version=version) # testjob_id points to the backend's test job testjob_id = request.POST.get('testjob_id', None) if testjob_id is None: return HttpResponseBadRequest("testjob_id is required") # create TestJob object test_job = TestJob( backend=backend, target=project, target_build=build, environment=environment_slug, submitted=True, job_id=testjob_id ) # sanitize job_url try: backend.get_implementation().job_url(test_job) except Exception as e: return HttpResponseBadRequest(e) # save it to db test_job.save() log_addition(request, test_job, "Watch Job submission") # schedule a fetch task on this job right away fetch.delay(test_job.id) # return ID of test job return HttpResponse(test_job.id, status=201)
def handle(self, *args, **options): backend_name = options.get("BACKEND") job_id = options.get("JOBID") group_slug, project_slug = options.get("PROJECT").split('/') backend = Backend.objects.get(name=backend_name) group, _ = Group.objects.get_or_create(slug=group_slug) project, _ = group.projects.get_or_create(slug=project_slug) build = project.builds.create(version=str(time.time())) testjob = backend.test_jobs.create(target=project, job_id=job_id, target_build=build) if options.get("background"): fetch.delay(testjob.id) else: backend.fetch(testjob)
def listen(self): listener_url = self.get_listener_url() self.log_debug("connecting to %s" % listener_url) self.context = zmq.Context() self.socket = self.context.socket(zmq.SUB) self.socket.setsockopt_string(zmq.SUBSCRIBE, "") try: # requires PyZMQ to be built against ZeroMQ 4.2+ self.socket.setsockopt(zmq.HEARTBEAT_IVL, 1000) # 1 s self.socket.setsockopt(zmq.HEARTBEAT_TIMEOUT, 10000) # 10 s except AttributeError: self.log_warn( 'PyZMQ has no support for heartbeat (requires ZeroMQ library 4.2+), connection may be unstable' ) pass self.socket.connect(listener_url) self.log_debug("connected to %s" % listener_url) while True: try: message = self.socket.recv_multipart() self.log_debug("message received: %r" % message) (topic, uuid, dt, username, data) = (u(m) for m in message[:]) data = json.loads(data) lava_id = data['job'] if 'sub_id' in data.keys(): lava_id = data['sub_id'] lava_status = data['status'] if lava_status in self.complete_statuses: db_test_job_list = self.data.test_jobs.filter( submitted=True, fetched=False, job_id=lava_id) if db_test_job_list.exists() and \ len(db_test_job_list) == 1: job = db_test_job_list[0] self.log_info("scheduling fetch for job %s" % job.job_id) fetch.delay(job.id) except Exception as e: self.log_error(str(e) + "\n" + traceback.format_exc())
for group_slug in projects_slugs.keys(): for slug in projects_slugs[group_slug]: p = m.Project.objects.filter(group__slug=group_slug, slug=slug).first() print('Working on %s' % p.full_name, flush=True) for tj in ci_m.TestJob.objects.filter( target=p, fetched=True).order_by('-created_at')[:num_testjobs]: print('Recreating testjob %s' % tj, flush=True) # Save data backend = tj.backend build = tj.target_build environment = tj.environment job_id = tj.job_id # Delete TestRun and TestJob (cascade delete) tj.testrun.delete() # Create fresh new_tj = ci_m.TestJob.objects.create(backend=backend, target=p, target_build=build, environment=environment, submitted=True, job_id=job_id) job_ids.append(new_tj.id) # Send all to queue print('Send jobs to queue', flush=True) for tj_id in job_ids: fetch.delay(tj_id)
def fetch_job(modeladmin, request, queryset): for test_job in queryset: fetch.delay(test_job.id)