def test_rq_namespace(self): """Make sure that user can access only their respective namespace. """ # Current bench ID bench_id = frappe.conf.get('bench_id') conn = get_redis_conn() conn.set('rq:queue:test_bench1:abc', 'value') conn.set(f'rq:queue:{bench_id}:abc', 'value') # Create new Redis Queue user tmp_bench_id = 'test_bench1' username, password = tmp_bench_id, 'password1' conn.acl_deluser(username) frappe.conf.update({'bench_id': tmp_bench_id}) _ = RedisQueue(conn).add_user(username, password) test_bench1_conn = RedisQueue.get_connection(username, password) self.assertEqual(test_bench1_conn.get('rq:queue:test_bench1:abc'), b'value') # User should not be able to access queues apart from their bench queues with self.assertRaises(redis.exceptions.NoPermissionError): test_bench1_conn.get(f'rq:queue:{bench_id}:abc') frappe.conf.update({'bench_id': bench_id}) conn.acl_deluser(username)
def get_info(show_failed=False): conn = get_redis_conn() queues = Queue.all(conn) workers = Worker.all(conn) jobs = [] def add_job(j, name): if j.kwargs.get('site')==frappe.local.site: jobs.append({ 'job_name': j.kwargs.get('kwargs', {}).get('playbook_method') \ or str(j.kwargs.get('job_name')), 'status': j.status, 'queue': name, 'creation': format_datetime(convert_utc_to_user_timezone(j.created_at)), 'color': colors[j.status] }) if j.exc_info: jobs[-1]['exc_info'] = j.exc_info for w in workers: j = w.get_current_job() if j: add_job(j, w.name) for q in queues: if q.name != 'failed': for j in q.get_jobs(): add_job(j, q.name) if cint(show_failed): for q in queues: if q.name == 'failed': for j in q.get_jobs()[:10]: add_job(j, q.name) return jobs
def get_info(show_failed=False): conn = get_redis_conn() queues = Queue.all(conn) workers = Worker.all(conn) jobs = [] def add_job(j, name): if j.kwargs.get('site')==frappe.local.site: jobs.append({ 'job_name': j.kwargs.get('kwargs', {}).get('playbook_method') \ or str(j.kwargs.get('job_name')), 'status': j.status, 'queue': name, 'creation': format_datetime(j.created_at), 'color': colors[j.status] }) if j.exc_info: jobs[-1]['exc_info'] = j.exc_info for w in workers: j = w.get_current_job() if j: add_job(j, w.name) for q in queues: if q.name != 'failed': for j in q.get_jobs(): add_job(j, q.name) if cint(show_failed): for q in queues: if q.name == 'failed': for j in q.get_jobs()[:10]: add_job(j, q.name) return jobs
def get_all_tasks_site(site): try: print "site: {0}".format(site) frappe.init(site=site) frappe.connect() tasks = frappe.get_all("Job Scheduler", filters={"enabled": "1"}, fields=[ "name", "job_id", "method", "kwargs", "run", "minute", "hour", "day_of_week", "day_of_month", "cron_style", "queue" ]) for task in tasks: if (task.run == "Cron Style"): print "cron string: {0} ".format(task.cron_style) else: print get_cron_string(task) set_schedule(get_redis_conn(), task, task.cron_style) except: # it should try to enqueue other sites print frappe.get_traceback() finally: frappe.destroy()
def remove_failed_jobs(): conn = get_redis_conn() queues = get_queues() for queue in queues: fail_registry = queue.failed_job_registry for job_id in fail_registry.get_job_ids(): job = queue.fetch_job(job_id) fail_registry.remove(job, delete_job=True)
def test_adding_redis_user(self): acl_list = RedisQueue.gen_acl_list() username, password = acl_list[1]['bench'] conn = get_redis_conn() conn.acl_deluser(username) _ = RedisQueue(conn).add_user(username, password) self.assertTrue(conn.acl_getuser(username)) conn.acl_deluser(username)
def validate(self): cron = None if self.run == "Hourly": check_minutes(self.minute) cron = get_cron_string(self) elif self.run == "Daily": check_hours(self.hour) check_minutes(self.minute) cron = get_cron_string(self) elif self.run == "Weekly": check_day_of_week(self.day_of_week) check_hours(self.hour) check_minutes(self.minute) cron = get_cron_string(self) elif self.run == "Monthly": check_day_of_month(self.run, self.day_of_month) check_hours(self.hour) check_minutes(self.minute) cron = get_cron_string(self) elif self.run == "Yearly": check_day_of_month(self.run, self.day_of_month, self.month) check_hours(self.hour) check_minutes(self.minute) cron = get_cron_string(self) elif self.run == "Cron Style": check_cron(str(self.cron_string)) cron = str(self.cron_string) if self.enabled: # cancel_job(get_redis_conn(), self.job_id) self.job_id = set_schedule(get_redis_conn(), self, cron) frappe.msgprint(self.run + " Job scheduled, id = " + self.job_id) elif self.job_id: self.job_id = cancel_job(get_redis_conn(), self.job_id) frappe.msgprint("Job disabled")
def get_info(show_failed=False) -> List[Dict]: if isinstance(show_failed, str): show_failed = json.loads(show_failed) conn = get_redis_conn() queues = Queue.all(conn) workers = Worker.all(conn) jobs = [] def add_job(job: "Job", name: str) -> None: if job.kwargs.get("site") == frappe.local.site: job_info = { "job_name": job.kwargs.get("kwargs", {}).get("playbook_method") or job.kwargs.get("kwargs", {}).get("job_type") or str(job.kwargs.get("job_name")), "status": job.get_status(), "queue": name, "creation": format_datetime(convert_utc_to_user_timezone(job.created_at)), "color": JOB_COLORS[job.get_status()], } if job.exc_info: job_info["exc_info"] = job.exc_info jobs.append(job_info) # show worker jobs for worker in workers: job = worker.get_current_job() if job: add_job(job, worker.name) for queue in queues: # show active queued jobs if queue.name != "failed": for job in queue.jobs: add_job(job, queue.name) # show failed jobs, if requested if show_failed: fail_registry = queue.failed_job_registry for job_id in fail_registry.get_job_ids(): job = queue.fetch_job(job_id) if job: add_job(job, queue.name) return jobs
def get_info(show_failed=False) -> List[Dict]: if isinstance(show_failed, str): show_failed = json.loads(show_failed) conn = get_redis_conn() queues = Queue.all(conn) workers = Worker.all(conn) jobs = [] def add_job(job: 'Job', name: str) -> None: if job.kwargs.get('site') == frappe.local.site: job_info = { 'job_name': job.kwargs.get('kwargs', {}).get('playbook_method') or job.kwargs.get('kwargs', {}).get('job_type') or str(job.kwargs.get('job_name')), 'status': job.get_status(), 'queue': name, 'creation': format_datetime(convert_utc_to_user_timezone(job.created_at)), 'color': JOB_COLORS[job.get_status()] } if job.exc_info: job_info['exc_info'] = job.exc_info jobs.append(job_info) # show worker jobs for worker in workers: job = worker.get_current_job() if job: add_job(job, worker.name) for queue in queues: # show active queued jobs if queue.name != 'failed': for job in queue.jobs: add_job(job, queue.name) # show failed jobs, if requested if show_failed: fail_registry = queue.failed_job_registry for job_id in fail_registry.get_job_ids(): job = queue.fetch_job(job_id) if job: add_job(job, queue.name) return jobs
def test_job_method(**kwargs): print " ********* start jobs execution ".encode("utf-8") frappe.init('') conn = get_redis_conn() print "** empty failed queue" q = get_failed_queue(connection=conn) e = q.empty() print "** get scheduled jobs" scheduler = Scheduler(connection=conn) print "jobs scheduled : {0}".format(scheduler.get_jobs()) print "** get kwards args" print "kwargs = {0}".format(kwargs) print " ********* end jobs execution ".encode("utf-8")
def test_remove_failed_jobs(self): frappe.enqueue(method="frappe.tests.test_background_jobs.fail_function", queue="short") # wait for enqueued job to execute time.sleep(2) conn = get_redis_conn() queues = Queue.all(conn) for queue in queues: if queue.name == generate_qname("short"): fail_registry = queue.failed_job_registry self.assertGreater(fail_registry.count, 0) remove_failed_jobs() for queue in queues: if queue.name == generate_qname("short"): fail_registry = queue.failed_job_registry self.assertEqual(fail_registry.count, 0)
def wrapper(*args, **kwargs): conn = get_redis_conn() redis_version = conn.execute_command('info')['redis_version'] if version_tuple(redis_version) < version_tuple(version): return return func(*args, **kwargs)
def get_workers(): with Connection(get_redis_conn()): workers = Worker.all() return workers
def on_trash(self): cancel_job(get_redis_conn(), self.job_id)