class WorkerConfig: functions = (send_calendar_notifications, send_calendar_multi_notifications) cron_jobs = ( cron(schedule_calendar_notifications, hour=0, minute=0, second=0), cron(schedule_tokens_refresh, weekday=1, hour=0, minute=0, second=0), ) keep_result = 0 redis_settings = get_redis_settings() on_startup = on_startup on_shutdown = on_shutdown
class WorkerSettings(arq.worker.Worker): redis_settings = constants.ARQ_REDIS max_jobs = constants.MAX_JOBS max_tries = constants.MAX_QUEUED_JOB_RETRIES on_startup = startup on_shutdown = shutdown cron_jobs = [arq.cron(update_board_job, second={0, 10, 20, 30, 40, 50})]
class WorkerSettings: """ Settings for the ARQ worker. """ redis_settings = redis_settings functions = [add_pair_stat] # TODO: поменять расписание на 1 раз в час cron_jobs = [cron(update_all_stats, minute={i for i in range(0, 60, 2)})]
class WorkerSettings: redis_settings = REDIS job_timeout = JOB_TIMEOUT max_jobs = MAX_JOBS queue_name = REDIS_QUEUE # run cleanup every 60 minutes cron_jobs = [cron(cleanup_files, run_at_startup=True, minute=0, second=0)] functions = [create_custom_report, create_summary_unit_report]
class WorkerSettings: functions = [ process_image, process_image_mass, submit_booking, submit_enquiry, update_contractors, update_enquiry_options, ] cron_jobs = [ cron(delete_old_appointments, hour={0, 3, 6, 9, 12, 15, 18, 21}, minute=0), cron(kill_worker, hour=3, minute=0), ] on_startup = startup on_shutdown = shutdown
class WorkerSettings: max_jobs = 20 keep_result = 5 max_tries = len( email_retrying) + 1 # so we try all values in email_retrying functions = worker_functions on_startup = startup on_shutdown = shutdown cron_jobs = [cron(update_aggregation_view, minute=12, timeout=1800)]
class TimingWorkerSettings: on_startup = startup on_shutdown = shutdown queue_name = ArqQueue.timing.value redis_settings = RedisSettings(**settings.ARQ) cron_jobs = [ cron(timing_monitor, hour=0, minute=0), # cron(timing_, minute={x for x in range(0, 60, 3)}) ]
class WorkerSettings: redis_settings = settings on_startup = startup on_shutdown = shutdown cron_jobs = [ cron(team_tasks.initialize_weekly_notifications, weekday='mon', hour=6, minute=5), cron(team_tasks.initialize_daily_notifications, weekday={0, 1, 2, 3, 4}, hour=6, minute=5), ] functions = [ team_tasks.create_periodic_notifications, team_tasks.create_live_notifications, notification_tasks.send_url_notifications, notification_tasks.send_email_notifications, ]
async def test_schedule__success(pg_cursor, site_check_pg_manager, event_pg_manager, arq_worker, httpx_mock): assert [e async for e in site_check_pg_manager.get_all()] == [] assert [e async for e in event_pg_manager.get_all()] == [] await site_check_pg_manager.create("http://test.com", "test") schedule_worker = arq_worker( cron_jobs=[ arq.cron(schedule_availability_checks, hour=1, run_at_startup=True) ], queue_name=CheckSchedulerWorkerSettings.queue_name, ) await schedule_worker.main() assert schedule_worker.jobs_complete == 1 assert schedule_worker.jobs_failed == 0 assert schedule_worker.jobs_retried == 0 httpx_mock.add_response(status_code=200, data=b"ok") check_worker = arq_worker( functions=[availability_check], queue_name=AvailabilityCheckerWorkerSettings.queue_name) await check_worker.main() assert check_worker.jobs_complete == 1 assert check_worker.jobs_failed == 0 assert check_worker.jobs_retried == 0 transfer_worker = arq_worker( cron_jobs=[ arq.cron(kafka_to_pg_transfer, hour=1, run_at_startup=True) ], queue_name=KafkaToPostgresTransferWorkerSettings.queue_name, ) await transfer_worker.main() assert transfer_worker.jobs_complete == 1 assert transfer_worker.jobs_failed == 0 assert transfer_worker.jobs_retried == 0 events = [e async for e in event_pg_manager.get_all()] assert len(events) == 1 assert events[0].url == "http://test.com" assert events[0].status_code == 200
class KafkaToPostgresTransferWorkerSettings: redis_settings = arq.connections.RedisSettings(**config.REDIS_CONFIG) queue_name = "arq:queue:kafka_to_postgres_transfer" on_startup = partial(startup, postgres=True, kafka_consumer=True) on_shutdown = shutdown # in general this job can be run in parallel, but it require more efforts to rewrite runner # so sorry if you see this ugly approach with cron unique job and infinite loop inside cron_jobs = [ arq.cron(kafka_to_pg_transfer, unique=True, minute={i for i in range(60)}) ]
class CheckSchedulerWorkerSettings: redis_settings = arq.connections.RedisSettings(**config.REDIS_CONFIG) queue_name = "arq:queue:check_scheduler" on_startup = partial(startup, postgres=True, redis=True) on_shutdown = shutdown # assume we don't need to parallelize scheduling, can sacrifice redundancy and assume workers # handle checks faster than queue grows to keep current implementation more simple cron_jobs = [ arq.cron(schedule_availability_checks, unique=True, minute={i for i in range(60)}) ]
class WorkerSettings: cron_jobs = [cron(run, hour=1, timeout=600)]
class WorkerSettings: functions = [mention_users] redis_settings = RedisSettings.from_url(REDIS_URL) cron_jobs = [cron(flush_to_db, hour=None)]
class WorkerSettings: on_startup = arq.on_startup on_shutdown = arq.on_shutdown redis_settings = redis_settings functions: list = FUNCTIONS cron_jobs = [cron(scrap_hackernews, hour=1)]
class WorkerSettings: cron_jobs = [cron(run_regularly, hour={9, 12, 18}, minute=12)]
class WorkerSettings: functions = [mention_users] cron_jobs = [cron(flush_to_db, hour=None)]