def test_delete_job_reserved_by_other(c: Client) -> None: c.put('', ttr=1) other = Client(port=PORT) job = other.reserve() with pytest.raises(NotFoundError): c.delete(job) time.sleep(1) c.delete(job)
def test_basic_usage(c: Client) -> None: c.use("emails") id = c.put("测试@example.com".encode("utf-8")) c.watch("emails") c.ignore("default") job = c.reserve() assert id == job.id assert job.body.decode("utf-8") == "测试@example.com" c.delete(job)
def test_initialize_with_tubes(c: Client) -> None: c.put(b"www.example.com") job = c.reserve() assert job.body == b"www.example.com" c.delete(job.id) c.use("default") c.put(b"") with pytest.raises(TimedOutError): c.reserve(timeout=0)
def test_basic_usage(c: Client) -> None: c.use('emails') id = c.put('测试@example.com') c.watch('emails') c.ignore('default') job = c.reserve() assert id == job.id assert job.body == '测试@example.com' c.delete(job)
def test_initialize_with_tubes(c: Client) -> None: c.put('www.example.com') job = c.reserve() assert job.body == 'www.example.com' c.delete(job.id) c.use('default') c.put('') with pytest.raises(TimedOutError): c.reserve(timeout=0)
def test_reserve_job(c: Client) -> None: id1 = c.put(b"a") id2 = c.put(b"b") j1 = c.reserve_job(id1) j2 = c.reserve_job(id2) with pytest.raises(NotFoundError): c.reserve_job(id1) with pytest.raises(NotFoundError): c.reserve_job(id2) with pytest.raises(TimedOutError): c.reserve(timeout=0) c.delete(j1) c.delete(j2) with pytest.raises(TimedOutError): c.reserve(timeout=0)
def test_delete_job_reserved_by_other(c: Client) -> None: c.put(b"", ttr=1) with Client(DEFAULT_INET_ADDRESS) as other: job = other.reserve() with pytest.raises(NotFoundError): c.delete(job)
def test_job_not_found(c: Client) -> None: with pytest.raises(NotFoundError): c.delete(87)
def handle_creation(b: greenstalk.Client, job: greenstalk.Job): global logger data = json.loads(job.body) try: application = InstanceApplication.objects.get( id=data["application_id"]) except ObjectDoesNotExist: logger.warn("Unable to find application #%d, burying" % data["application_id"]) try_log( mail_admins, "Burying job #%d" % job.id, "Please inspect job #%d (application %d) manually" % (job.id, data["application_id"])) b.bury(job) close_old_connections() return finally: close_old_connections() logger.info("Handling %s (job: %d)", application.hostname, application.job_id) while True: sleep(15) logger.info("Checking %s (job: %d)", application.hostname, application.job_id) status = application.cluster.get_job_status(application.job_id) if status["end_ts"]: logger.info("%s (job: %d) done. Status: %s", application.hostname, application.job_id, status["status"]) if status["status"] == "error": application.status = STATUS_FAILED application.backend_message = smart_str(status["opresult"]) application.save() logger.warn("%s (job: %d) failed. Notifying admins", application.hostname, application.job_id) try_log( mail_admins, "Instance creation failure for %s on %s" % (application.hostname, application.cluster), json.dumps(status, indent=2)) else: application.status = STATUS_SUCCESS application.backend_message = None application.save() logger.info("Mailing %s about %s", application.applicant.email, application.hostname) fqdn = Site.objects.get_current().domain instance_url = "https://%s%s" % \ (fqdn, urlresolvers.reverse("instance-detail", args=(application.cluster.slug, application.hostname))) mail_body = render_to_string( "instances/emails/instance_created_mail.txt", { "application": application, "instance_url": instance_url, "BRANDING": settings.BRANDING }) mail_body_managers = render_to_string( "instances/emails/instance_created_mail.txt", { "application": application, "reviewer": application.reviewer, "instance_url": instance_url, "BRANDING": settings.BRANDING }) try_log( send_mail, settings.EMAIL_SUBJECT_PREFIX + "Instance %s is ready" % application.hostname, mail_body, settings.SERVER_EMAIL, [application.applicant.email]) logger.info("Mailing managers about %s" % application.hostname) try_log(mail_managers, "Instance %s is ready" % application.hostname, mail_body_managers) b.delete(job) close_old_connections() break b.delete(job)
def handle_job_lock(b: greenstalk.Client, job: greenstalk.Job): global logger data = json.loads(job.body) lock_key = data["lock_key"] instance = data["instance"] job_id = int(data["job_id"]) logger.info("Handling lock key %s (job %d)" % (lock_key, job_id)) try: cluster = Cluster.objects.get(slug=data["cluster"]) except ObjectDoesNotExist: logger.warn("Got lock key %s for unknown cluster %s, burying" % (data["lock_key"], data["cluster"])) b.bury(job) close_old_connections() return finally: close_old_connections() pi = next_poll_interval() while True: logger.debug("Checking lock key %s (job: %d)" % (lock_key, job_id)) reason = cache.get(lock_key) if reason is None: logger.info("Lock key %s vanished, forgetting it" % lock_key) b.delete(job) return logger.debug("Polling job %d" % job_id) try: status = cluster.get_job_status(job_id) except Exception as err: logger.warn("Error polling job: %s" % str(err)) close_old_connections() sleep(next(pi)) continue finally: close_old_connections() logger.debug("Done") if status["end_ts"]: logger.info("Job %d finished, removing lock %s" % (job_id, lock_key)) if "flush_keys" in data: for key in data["flush_keys"]: cache.delete(key) cache.delete(lock_key) locked_instances = cache.get('locked_instances') # This should contain at least 1 instance if locked_instances is not None: try: locked_instances.pop("%s" % instance) except KeyError: pass if len(locked_instances) == 0: cache.delete('locked_instances') else: cache.set('locked_instances', locked_instances, 90) else: # This could be due to a cache fail or restart. For the time log it logger.warn( "Unable to find instance %s in locked instances cache key" % instance) clear_cluster_users_cache(cluster.slug) b.delete(job) return # Touch the key cache.set(lock_key, reason, 30) b.touch(job) sleep(next(pi))