def test_run_jobs(self): qda = ModelAccess(get_pg_core(CONNECTION_STRING)).open(autocommit=True) queue = pypgq.Queue( qda, worker_count=2, cooperative=None)#pypgq.Cooperative(pypgq.Cooperative.advisory_lock, (1, 1))) queue._sleep_time = 1.0 queue.add_handler("sleep_job", sleep_job) queue.add_handler("exception_job", exception_job) self.assertEqual(queue.status(), {"waiting_jobs": 0, "running_jobs": 0, "running_job_ids": (), "completed_jobs": 0, "stop_mode": pypgq.StopMode.never}) j1, _ = pypgq.queue_job(self.da, "sleep_job", {"seconds": 5}) j2, _ = pypgq.queue_job(self.da, "sleep_job", {"seconds": 5}) j3, _ = pypgq.queue_job(self.da, "exception_job", {"seconds": 5}) sqt = Thread(target=queue.start) sqt.start() # NOTE: this is kind of tricy. We need to wait for the right intervals to check the status, and # it's not easy to get right. The better result would be to have some kind of event system # on the queue to call functions when it starts, stops, etc. jobs. sleep(2.5) self.assertEqual(queue.status(), {"waiting_jobs": 1, "running_jobs": 2, "running_job_ids": (j1.id, j2.id), "completed_jobs": 0, "stop_mode": pypgq.StopMode.never}) sleep(3.5) self.assertEqual(queue.status(), {"waiting_jobs": 0, "running_jobs": 1, "running_job_ids": (j3.id, ), "completed_jobs": 2, "stop_mode": pypgq.StopMode.never}) sleep(5.0) self.assertEqual(queue.status(), {"waiting_jobs": 0, "running_jobs": 0, "running_job_ids": (), "completed_jobs": 3, "stop_mode": pypgq.StopMode.never}) queue.stop(None, pypgq.StopMode.when_all_done) sqt.join()
def configure_postgres(args): from psycopg2.extras import Json from psycopg2.extensions import register_adapter from bidon.db.core import get_pg_core port = args.port or 5432 CONFIG.update(core=get_pg_core("dbname=bidon_test user=postgres host=localhost port={}".format(port)), test_rowcount=True, test_callproc=True, is_pg=True) CONFIG.freeze() register_adapter(dict, lambda d: Json(d))
def start_queue( connection_string, job_filepath, schema_name="public", workers=10, cooperative=None, schedule_frequency=None, job_args=None, children=None, ): """Creates, configures and runs a job queue. :param connection_string: a Postgres connection string :param job_filepath: the path to a python script that can configure the queue :param schema_name: the name of the schema that contains the queue tables :param workers: the number of concurrent workers to run """ model_access = ModelAccess(get_pg_core(connection_string), search_path=schema_name) model_access.open(autocommit=True) queue = Queue(model_access, worker_count=workers, cooperative=cooperative, schedule_frequency=schedule_frequency) job_module = load_module(job_filepath, None) job_module.setup_jobs(queue, job_args) def stop(sig, _): """Stops the queue in the manner specified by the signal. :param sig: the signal receieved """ queue.stop(stop_mode=STOP_SIGNALS[sig]) if children: for pid in children: os.kill(pid, sig) try: os.waitpid(pid, 0) except ChildProcessError: # Child already shut down before we started waiting on it. pass for sig in STOP_SIGNALS: signal.signal(sig, stop) signal.signal(signal.SIGINFO, lambda n, f: print(queue.status(), file=sys.stderr)) log_queue_info(job_filepath, workers) queue.start()
def test_job_serialization(self): qda = ModelAccess(get_pg_core(CONNECTION_STRING)).open(autocommit=True) queue = pypgq.Queue(qda, worker_count=2) queue._sleep_time = 1.0 queue.add_handler("sleep_job", sleep_job) queue.add_handler("exception_job", exception_job) j1, _ = pypgq.queue_job(self.da, "sleep_job", {"seconds": 3}, "serialize") j2, _ = pypgq.queue_job(self.da, "sleep_job", {"seconds": 3}, "serialize") sqt = Thread(target=queue.start) sqt.start() sleep(1.5) self.assertEqual(queue.status(), {"waiting_jobs": 1, "running_jobs": 1, "running_job_ids": (j1.id, ), "completed_jobs": 0, "stop_mode": pypgq.StopMode.never}) sleep(3.0) self.assertEqual(queue.status(), {"waiting_jobs": 0, "running_jobs": 1, "running_job_ids": (j2.id, ), "completed_jobs": 1, "stop_mode": pypgq.StopMode.never}) sleep(2.0) self.assertEqual(queue.status(), {"waiting_jobs": 0, "running_jobs": 0, "running_job_ids": (), "completed_jobs": 2, "stop_mode": pypgq.StopMode.never}) queue.stop(None, pypgq.StopMode.when_all_done) sqt.join()
def setUp(self): self.da = ModelAccess(get_pg_core(CONNECTION_STRING)).open(autocommit=True) self._clean_tables()