def test_twitter_search_gets_processed(): """Ensures the data can be loaded from twitter and stored as a raw source Run one search query Run worker Check two tweets in raw source Run worker Check the pixels have been averaged out """ with Connection(connection=redis_db): source_queue = Queue(name='source') process_queue = Queue(name='process') query = 'Test Query' size = 2 twitter.search(input=query, size=size) worker = SimpleWorker([source_queue]) worker.work(burst=True) assert len(Tweet.keys()) == size worker = SimpleWorker([process_queue]) worker.work(burst=True) # # assert len(Tweet.keys()) == size
def _new_rq_worker() -> Worker: return SimpleWorker( queues=queue_names(), connection=redis_connection(), queue_class=Queue, # (1) job_class=Job, # (2) exception_handlers=[RqExcMan.exc_handler]) # (3)
def test_func__create_user_with_mail_notification__ok__nominal_case( self, mailhog, user_api_factory, app_config ): api = user_api_factory.get(current_user=None) u = api.create_user( email="bob@bob", password="******", name="bob", timezone="+2", do_save=True, do_notify=True, ) assert u is not None assert u.email == "bob@bob" assert u.validate_password("password") assert u.display_name == "bob" assert u.timezone == "+2" # Send mail async from redis queue redis = get_redis_connection(app_config) queue = get_rq_queue(redis, "mail_sender") worker = SimpleWorker([queue], connection=queue.connection) worker.work(burst=True) # check mail received response = mailhog.get_mailhog_mails() headers = response[0]["Content"]["Headers"] assert headers["From"][0] == "Global manager via Tracim <test_user_from+1@localhost>" assert headers["To"][0] == "bob <bob@bob>" assert headers["Subject"][0] == "[Tracim] Created account"
def test_sync_jobs(mocker): qm = queuing.QueueManager() qm.job_func = run job_status = { "0": "queued", "1": "queued", "3": "queued", } qm.q.enqueue(fail, ValueError, "0 isnt 1", job_id="0") w = SimpleWorker([qm.q], connection=qm.redis_conn) w.work(burst=True) assert len(qm.q.failed_job_registry) == 1 queued = {str(i): "user" for i in range(5)} qm.enqueue_job("4", "user") assert qm.q.job_ids == ["4"] mocker.patch("solarperformanceinsight_api.queuing.time.sleep", side_effect=KeyboardInterrupt) jmi = mocker.MagicMock() startt = jmi.start_transaction.return_value.__enter__.return_value startt.list_queued_jobs.return_value = queued startt.list_status_of_jobs.return_value = job_status mocker.patch( "solarperformanceinsight_api.queuing._get_job_management_interface", return_value=jmi, ) queuing.sync_jobs() # 0 failed, 2 is missing assert set(qm.q.job_ids) == {"1", "3"} assert startt.report_job_failure.call_count == 1
def setUp(self): views.db.init_app(self.app) with self.app.test_request_context(): views.db.create_all() self.queue = Queue(async=False, connection=fakeredis.FakeStrictRedis()) self.worker = SimpleWorker([self.queue], connection=self.queue.connection)
def test_job_retried_correctly(self): """To check if the job was correctly failed etc., we should use a Worker and not the queue provided with FakeRedisTestCaseMixin.""" # Create an asynchronous queue. # The name `separate_queue` used here is to ensure the queue isn't used # anywhere else. queue = Queue("separate_queue", connection=self.connection) worker = SimpleWorker([queue], connection=queue.connection) # log admin user self._logSuperuserIn() # this job will fail job = queue.enqueue(dummy_fail_job) self.assertEqual(job.get_status(), "queued") # log the job in our system as RQJob rqjob = RQJob.objects.create(job_id=job.id, trigger=self.trigger) # run the worker worker.work(burst=True) self.assertEqual(job.get_status(), "failed") url = reverse("admin:autoemails_rqjob_retry", args=[rqjob.pk]) rv = self.client.post(url, follow=True) self.assertIn( f"The job {job.id} was requeued. It will be run shortly.", rv.content.decode("utf-8"), ) self.assertEqual(job.get_status(refresh=True), "queued")
def test_worker(client): queue = Queue(connection=Redis()) task = queue.enqueue(count_words, args=('https://www.facebook.com', None)) assert len(queue) > 0 worker = SimpleWorker([queue], connection=queue.connection) worker.work(burst=True) # Runs enqueued job assert task.get_id()
def test_enqueue_background_job_successful(tasks_col, mock_redis): """Test enqueuing a fake task using "enqueue_background_job" and ensure it can be executed properly. """ job_id = enqueue_background_job( task_func="tests.unit_tests.jobs_test.successful_task", queue_name="upload", username="******", job_kwargs={"value": "spam"}) # Ensure the job is enqueued and MongoDB entry exists pending_jobs = list(tasks_col.find("test_project", "pending")) assert len(pending_jobs) == 1 job = pending_jobs[0] assert str(job["_id"]) == job_id assert job["project"] == "test_project" assert job["message"] == "processing" assert job["status"] == "pending" # Check that the Redis queue has the same job upload_queue = get_job_queue("upload") assert upload_queue.job_ids == [job_id] # Job can be finished SimpleWorker([upload_queue], connection=mock_redis).work(burst=True) rq_job = upload_queue.fetch_job(job_id) assert rq_job.result == "Task ID = {}, value = spam".format(job_id)
def test_coverage_summary_by_changeset(coverage_builds): from rq import Queue from codecoverage_backend import api from tests.conftest import mock_coverage_by_changeset_job_success # patch the queue to be sync to allow it run without workers. http://python-rq.org/docs/testing/ with mock.patch('codecoverage_backend.api.q', Queue(connection=FakeStrictRedis())) as q: # patch the mock_coverage_by_changeset with mock.patch('codecoverage_backend.api.coverage_by_changeset_job', mock_coverage_by_changeset_job_success): # Get changeset coverage information for changeset, expected in coverage_builds['summary'].items(): result, code = api.coverage_summary_by_changeset(changeset) assert code == 202 # test that in the case of exception it will return 500 result, code = api.coverage_summary_by_changeset( 'mozilla test changeset') assert code == 202 # run simple worker to run all tasks w = SimpleWorker([q], connection=q.connection) w.work(burst=True) # Everything should be 200 now for changeset, expected in coverage_builds['summary'].items(): result, code = api.coverage_summary_by_changeset(changeset) assert result == expected assert code == 200 # except the incorrect changeset, should be 500 result, code = api.coverage_summary_by_changeset( 'mozilla test changeset') assert code == 500
def test_enqueue_background_job_failing_out_of_sync(tasks_col, mock_redis): """Test enqueuing a fake task using "enqueue_background_job" and ensure it is handled properly if the failure is recorded in RQ but not MongoDB. """ job_id = enqueue_background_job( task_func="tests.unit_tests.jobs_test.failing_task", queue_name="upload", username="******", job_kwargs={}) # Check that the Redis queue has the same job upload_queue = get_job_queue("upload") assert upload_queue.job_ids == [job_id] # Job can be finished SimpleWorker([upload_queue], connection=mock_redis).work(burst=True) rq_job = upload_queue.fetch_job(job_id) assert rq_job.is_failed # Update the status in MongoDB to appear in-progress, while in RQ # it has already failed tasks_col.update_message(job_id, "processing") tasks_col.update_status(job_id, "pending") # Retrieve the task from MongoDB; it should be automatically updated # to match the status in RQ task = tasks_col.get(job_id) assert task["message"] == "Internal server error" assert task["status"] == "error"
def test_func__reset_password__ok__nominal_case(self): uapi = UserApi( current_user=None, session=self.session, config=self.app_config, ) current_user = uapi.get_one_by_email('*****@*****.**') uapi.reset_password_notification(current_user, do_save=True) transaction.commit() # Send mail async from redis queue redis = get_redis_connection(self.app_config) queue = get_rq_queue( redis, 'mail_sender', ) worker = SimpleWorker([queue], connection=queue.connection) worker.work(burst=True) # check mail received response = requests.get('http://127.0.0.1:8025/api/v1/messages') response = response.json() headers = response[0]['Content']['Headers'] assert headers['From'][ 0] == 'Tracim Notifications <test_user_from+0@localhost>' # nopep8 assert headers['To'][0] == 'Global manager <*****@*****.**>' assert headers['Subject'][0] == '[TRACIM] Reset Password Request'
def test_freeze_objects_delete_jobs(session, redis, freeze_objects, museum_object_factory): """ Freeze object with one pending and one failed job, and ensure they are both deleted """ def successful_job(): return ":)" def failing_job(): raise RuntimeError(":(") museum_object_factory(id=123456) queue_a = get_queue(QueueType.DOWNLOAD_OBJECT) queue_b = get_queue(QueueType.SUBMIT_SIP) queue_a.enqueue(successful_job, job_id="download_object_123456") queue_b.enqueue(failing_job, job_id="submit_sip_123456") SimpleWorker([queue_b], connection=queue_b.connection).work(burst=True) freeze_objects(["--delete-jobs", "--reason", "Deleting job", "123456"]) assert len(queue_a.job_ids) == 0 assert len(queue_b.job_ids) == 0 assert session.query(MuseumObject).filter_by( id=123456, freeze_reason="Deleting job").count() == 1
def test_create_upload_delete_compute(client, nocommit_transaction, new_job, weather_df, async_queue, mocker, auth0_id): cr = client.post("/jobs/", data=new_job.json()) assert cr.status_code == 201 new_id = cr.json()["object_id"] response = client.get(f"/jobs/{new_id}") assert response.status_code == 200 stored_job = response.json() assert len(stored_job["data_objects"]) == 1 data_id = stored_job["data_objects"][0]["object_id"] iob = BytesIO() weather_df.to_feather(iob) iob.seek(0) response = client.post( f"/jobs/{new_id}/data/{data_id}", files={ "file": ("test.arrow", iob, "application/vnd.apache.arrow.file") }, ) assert response.status_code == 200 response = client.get(f"/jobs/{new_id}/status") assert response.json()["status"] == "prepared" response = client.post(f"/jobs/{new_id}/compute") assert response.status_code == 202 response = client.get(f"/jobs/{new_id}/status") assert response.json()["status"] == "queued" with storage.StorageInterface(user=auth0_id).start_transaction() as st: st.delete_job(new_id) ww = SimpleWorker([async_queue], connection=async_queue.connection) log = mocker.spy(ww, "log") ww.work(burst=True) # worker logs error when exception raised in job assert log.error.call_count == 0
def test_func__create_user_with_mail_notification__ok__nominal_case(self): api = UserApi( current_user=None, session=self.session, config=self.app_config, ) u = api.create_user( email='bob@bob', password='******', name='bob', timezone='+2', do_save=True, do_notify=True, ) assert u is not None assert u.email == "bob@bob" assert u.validate_password('password') assert u.display_name == 'bob' assert u.timezone == '+2' # Send mail async from redis queue redis = get_redis_connection(self.app_config) queue = get_rq_queue( redis, 'mail_sender', ) worker = SimpleWorker([queue], connection=queue.connection) worker.work(burst=True) # check mail received response = self.get_mailhog_mails() headers = response[0]['Content']['Headers'] assert headers['From'][ 0] == 'Tracim Notifications <test_user_from+0@localhost>' # nopep8 assert headers['To'][0] == 'bob <bob@bob>' assert headers['Subject'][0] == '[TRACIM] Created account'
def test_reenqueue_object_success(reenqueue_object, session, redis, museum_object, museum_package): # Create fake DB entries museum_package.downloaded = True museum_package.packaged = True museum_package.uploaded = True museum_package.rejected = True session.commit() # Create a job that was completed prior to re-enqueuing queue = get_queue(QueueType.CONFIRM_SIP) queue.enqueue(successful_job, job_id="confirm_sip_123456") SimpleWorker([queue], connection=queue.connection).work(burst=True) finished_registry = FinishedJobRegistry(queue=queue) assert finished_registry.get_job_ids() == ["confirm_sip_123456"] result = reenqueue_object(["123456"]) assert "Object 123456 re-enqueued" in result.stdout # New RQ task was enqueued queue = get_queue(QueueType.DOWNLOAD_OBJECT) assert "download_object_123456" in queue.job_ids # Database was updated db_museum_object = session.query(MuseumObject).filter_by(id=123456).one() assert len(db_museum_object.packages) == 1 assert not db_museum_object.latest_package # Prior finished job was removed assert finished_registry.get_job_ids() == []
def test_monitor_job_with_retry2(client): """Test monitoring a job for a task that fails stops after max retries""" with client.application.app_context(): app = client.application app.redis.flushall() task_id = str(uuid4()) t = Task.create_task(task_id) j = t.create_job() job_id = j.job_id j.metadata["retries"] = 3 j.metadata["retry_count"] = 3 ex = j.create_execution("image", "command") j.save() exec_mock = MagicMock() exec_mock.get_result.return_value = MagicMock( exit_code=1, log="".encode("utf-8"), error="error".encode("utf-8") ) client.application.executor = exec_mock queue = Queue("monitor", is_async=False, connection=client.application.redis) result = queue.enqueue(job_mod.monitor_job, t.task_id, job_id, ex.execution_id) worker = SimpleWorker([queue], connection=queue.connection) worker.work(burst=True) t.reload() expect(t.jobs).to_length(1) job = t.jobs[0] expect(job.executions).to_length(1) execution = job.executions[0] expect(execution.image).to_equal("image") expect(execution.command).to_equal("command") hash_key = f"rq:job:{result.id}" res = app.redis.exists(hash_key) expect(res).to_be_true() res = app.redis.hget(hash_key, "status") expect(res).to_equal("finished") res = app.redis.hexists(hash_key, "data") expect(res).to_be_true() keys = app.redis.keys() next_job_id = [ key for key in keys if key.decode("utf-8").startswith("rq:job") and not key.decode("utf-8").endswith(result.id) ] expect(next_job_id).to_length(0)
def test_full_run_through_job_timeout(app, queue, mocker): def dosleep(*args, **kwargs): time.sleep(5) mocker.patch('sfa_api.jobs.exchange_token', return_value='token') mocker.patch('sfa_api.jobs.fetch_and_validate_all_observations', new=dosleep) fail = mocker.MagicMock() gjq = mocker.patch('rq_scheduler.Scheduler.get_jobs_to_queue') class US(jobs.UpdateMixin, Scheduler): pass sch = US(queue=queue, connection=queue.connection) jobs.schedule_jobs(sch) (job, exc_time) = list(sch.get_jobs(with_times=True))[0] job.timeout = 1 assert exc_time == dt.datetime.utcnow().replace( hour=0, minute=0, second=0, microsecond=0) + dt.timedelta(days=1) gjq.return_value = [job] sch.run(burst=True) assert job in queue.jobs def my_err(job, *exc_info): assert exc_info[0] == JobTimeoutException fail() w = SimpleWorker([queue], connection=queue.connection, disable_default_exception_handler=True, exception_handlers=[my_err]) w.work(burst=True) assert fail.called
def setUp(self): app.config['TESING'] = True self.app = app.test_client() app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db' # self.app.config() self.worker = SimpleWorker([queue], connection=queue.connection) db.create_all()
def get_simple_worker(*queue_names): """ Returns a RQ worker for all queues or specified ones. """ queues = get_queues(*queue_names) return SimpleWorker(queues, connection=queues[0].connection, exception_handlers=get_exception_handlers() or None)
def test_func__create_new_content_with_notification__ok__nominal_case( self): uapi = UserApi( current_user=None, session=self.session, config=self.app_config, ) current_user = uapi.get_one_by_email('*****@*****.**') # Create new user with notification enabled on w1 workspace wapi = WorkspaceApi( current_user=current_user, session=self.session, config=self.app_config, ) workspace = wapi.get_one_by_label('Recipes') user = uapi.get_one_by_email('*****@*****.**') wapi.enable_notifications(user, workspace) api = ContentApi( current_user=user, session=self.session, config=self.app_config, ) item = api.create( content_type_list.Folder.slug, workspace, None, 'parent', do_save=True, do_notify=False, ) item2 = api.create( content_type_list.File.slug, workspace, item, 'file1', do_save=True, do_notify=True, ) # Send mail async from redis queue redis = get_redis_connection(self.app_config) queue = get_rq_queue( redis, 'mail_sender', ) worker = SimpleWorker([queue], connection=queue.connection) worker.work(burst=True) # check mail received response = requests.get('http://127.0.0.1:8025/api/v1/messages') response = response.json() headers = response[0]['Content']['Headers'] assert headers['From'][ 0] == '"Bob i. via Tracim" <test_user_from+3@localhost>' # nopep8 assert headers['To'][0] == 'Global manager <*****@*****.**>' assert headers['Subject'][0] == '[TRACIM] [Recipes] file1 (Open)' assert headers['References'][0] == 'test_user_refs+22@localhost' assert headers['Reply-to'][ 0] == '"Bob i. & all members of Recipes" <test_user_reply+22@localhost>' # nopep8
def test_simpleworker_heartbeat_ttl(self): """SimpleWorker's key must last longer than job.timeout when working""" queue = Queue('foo') worker = SimpleWorker([queue]) job_timeout = 300 job = queue.enqueue(save_key_ttl, worker.key, job_timeout=job_timeout) worker.work(burst=True) job.refresh() self.assertGreater(job.meta['ttl'], job_timeout)
def run_job(): queue = Queue(connection=FakeStrictRedis()) worker = SimpleWorker([queue], connection=queue.connection) def inner(fn, *a, **kw): job = queue.enqueue(fn, *a, **kw) worker.work(burst=True) return job return inner
def test_create_upload_compute_success(client, nocommit_transaction, new_job, async_queue, mocker, weather_df): new_job.irradiance_type = "standard" cr = client.post("/jobs/", data=new_job.json()) assert cr.status_code == 201 new_id = cr.json()["object_id"] response = client.get(f"/jobs/{new_id}") assert response.status_code == 200 stored_job = response.json() assert len(stored_job["data_objects"]) == 1 data_id = stored_job["data_objects"][0]["object_id"] iob = BytesIO() weather_df.rename(columns={ "poa_global": "ghi", "poa_diffuse": "dhi", "poa_direct": "dni" }).to_feather(iob) iob.seek(0) response = client.post( f"/jobs/{new_id}/data/{data_id}", files={ "file": ("test.arrow", iob, "application/vnd.apache.arrow.file") }, ) assert response.status_code == 200 response = client.get(f"/jobs/{new_id}/status") assert response.json()["status"] == "prepared" response = client.post(f"/jobs/{new_id}/compute") assert response.status_code == 202 response = client.get(f"/jobs/{new_id}/status") assert response.json()["status"] == "queued" w = SimpleWorker([async_queue], connection=async_queue.connection) w.work(burst=True) response = client.get(f"/jobs/{new_id}/status") assert response.json()["status"] == "complete" response = client.get(f"/jobs/{new_id}/results") rj = response.json() # system level weather, one inverter, one array sp_type = {(r["definition"]["schema_path"], r["definition"]["type"]): r["object_id"] for r in rj} assert set(sp_type.keys()) == { ("/", "monthly summary"), ("/", "daytime flag"), ("/", "performance data"), ("/inverters/0", "performance data"), ("/inverters/0/arrays/0", "weather data"), } rid = sp_type[("/", "monthly summary")] response = client.get(f"/jobs/{new_id}/results/{rid}") data = response.text assert data.split("\n")[0] == ( "month,total_energy,plane_of_array_insolation," "effective_insolation,average_daytime_cell_temperature")
def process_worker_jobs(): # We need to do this while testing to avoid strange errors on Circle. # # See: # # http://python-rq.org/docs/testing/ # https://github.com/ui/django-rq/issues/123 queue = django_rq.get_queue() worker = SimpleWorker([queue], connection=queue.connection) worker.work(burst=True)
def test_work_via_simpleworker(self): """Worker processes work, with forking disabled, then returns.""" fooq, barq = Queue('foo'), Queue('bar') w = SimpleWorker([fooq, barq]) self.assertEqual(w.work(burst=True), False, 'Did not expect any work on the queue.') job = fooq.enqueue(say_pid) self.assertEqual(w.work(burst=True), True, 'Expected at least some work done.') self.assertEqual(job.result, os.getpid(), 'PID mismatch, fork() is not supposed to happen here')
def start_worker(): """Connect to the database and start listening works in redis""" # Check if database is empty for populating places if queries.is_database_empty(): logger.info("Database is empty, populating places and first measures") refresh_data.populate_places() elif environment.REFRESH_AT_STARTUP: logger.info( "Existing database detected, retrieving measures for today") refresh_data.pull_measures() with Connection(conn): worker = SimpleWorker(map(Queue, listen)) worker.work()
def test_get_object_id2queue_map(redis): """ Test that 'get_object_id2queue_map' returns a correct dictionary """ queue_a = get_queue(QueueType.DOWNLOAD_OBJECT) queue_b = get_queue(QueueType.SUBMIT_SIP) queue_a.enqueue(successful_job, job_id="download_object_123456") queue_b.enqueue(failing_job, job_id="submit_sip_654321") SimpleWorker([queue_b], connection=queue_b.connection).work(burst=True) queue_map = get_object_id2queue_map([123456, 654321, 111111]) assert queue_map[123456] == ["download_object"] assert queue_map[654321] == ["submit_sip", "failed"] assert queue_map[111111] == []
def process_jobs(self): """ Run background tasks. """ # create a request context with self.app.test_request_context('/'): # set up the request context self.app.preprocess_request() # create an in-process worker worker = SimpleWorker([g.queue], connection=g.queue.connection) # process jobs worker.work(burst=True)
def test_func__reset_password__ok__nominal_case(self, user_api_factory, mailhog, app_config): uapi = user_api_factory.get() current_user = uapi.get_one_by_email("*****@*****.**") uapi.reset_password_notification(current_user, do_save=True) transaction.commit() # Send mail async from redis queue redis = get_redis_connection(app_config) queue = get_rq_queue(redis, "mail_sender") worker = SimpleWorker([queue], connection=queue.connection) worker.work(burst=True) # check mail received response = mailhog.get_mailhog_mails() headers = response[0]["Content"]["Headers"] assert headers["From"][0] == "Tracim Notifications <test_user_from+0@localhost>" assert headers["To"][0] == "Global manager <*****@*****.**>" assert headers["Subject"][0] == "[Tracim] A password reset has been requested"
def setUp(self): redis_conn = FakeStrictRedis() # create queues main_queue = Queue("test_medium", connection=redis_conn, failed_ttl=DELETE_FAILED_TIMEOUT, default_timeout=DELETE_FINISHED_TIMEOUT) scheduled_queue = Queue("test_scheduled", connection=redis_conn, failed_ttl=DELETE_FAILED_TIMEOUT, default_timeout=DELETE_FINISHED_TIMEOUT) self.worker = SimpleWorker([main_queue, scheduled_queue], connection=main_queue.connection) self.postman = Postman({"viber": Viber(), "whatsapp": WhatsApp(), "telegram": Telegram()}) self.postman.redis_conn = redis_conn self.postman.scheduled_queue = scheduled_queue self.postman.main_queue = main_queue