def test_kill_horse_command(self): """Ensure that shutdown command works properly.""" connection = self.testconn queue = Queue('foo', connection=connection) job = queue.enqueue(long_running_job, 4) worker = Worker('foo', connection=connection) def _send_kill_horse_command(): """Waits 0.25 seconds before sending kill-horse command""" time.sleep(0.25) send_kill_horse_command(connection, worker.name) p = Process(target=_send_kill_horse_command) p.start() worker.work(burst=True) p.join(1) job.refresh() self.assertTrue(job.id in queue.failed_job_registry) def start_work(): worker.work() p = Process(target=start_work) p.start() p.join(2) send_kill_horse_command(connection, worker.name) worker.refresh() # Since worker is not busy, command will be ignored self.assertEqual(worker.get_state(), WorkerStatus.IDLE) send_shutdown_command(connection, worker.name)
def cancel_optimization(self, request, public_id=None): transport_network_obj = self.get_object() if transport_network_obj.optimization_status in [ TransportNetwork.STATUS_ERROR, TransportNetwork.STATUS_FINISHED ]: raise ValidationError('Optimization is not running or queued') redis_conn = get_connection() workers = Worker.all(redis_conn) for worker in workers: if worker.state == WorkerStatus.BUSY and \ worker.get_current_job_id() == str(transport_network_obj.job_id): send_kill_horse_command(redis_conn, worker.name) # remove from queue cancel_job(str(transport_network_obj.job_id), connection=redis_conn) transport_network_obj.optimization_status = None transport_network_obj.optimization_ran_at = None transport_network_obj.optimization_error_message = None transport_network_obj.save() return Response( TransportNetworkSerializer(transport_network_obj).data, status.HTTP_200_OK)
def delete_job(job_id): redis_conn = get_connection() workers = Worker.all(redis_conn) for worker in workers: if worker.state == WorkerStatus.BUSY and \ worker.get_current_job_id() == str(job_id): send_kill_horse_command(redis_conn, worker.name) try: # remove from queue cancel_job(str(job_id), connection=redis_conn) except NoSuchJobError: pass
def handle(self, *args, **options): with Connection(REDIS_CLIENT): workers = Worker.all(REDIS_CLIENT) for worker in workers: send_kill_horse_command(REDIS_CLIENT, worker.name) send_shutdown_command(REDIS_CLIENT, worker.name) worker.register_death() job_ids = AsyncCronMail.objects.values_list('job_id').filter(started_at__isnull=False,status=False).first() if AsyncCronMail.objects.filter(started_at__isnull=False,status=False).count() > 0: try: job = Job.fetch(job_ids[0], connection=REDIS_CLIENT) DEFAULT_QUEUE.empty() DEFAULT_QUEUE.enqueue_job(job) except: print('Job does not exist') topper_registry = FailedJobRegistry(queue=TOPPER_QUEUE) for job_id in topper_registry.get_job_ids(): topper_registry.remove(job_id, delete_job=True) w = Worker([DEFAULT_QUEUE,TOPPER_QUEUE], connection=REDIS_CLIENT, name='default_worker') w.work()
def _send_kill_horse_command(): """Waits 0.25 seconds before sending kill-horse command""" time.sleep(0.25) send_kill_horse_command(connection, worker.name)