def redis_network(): use_connection(redis_conn) q = Queue('high', connection=redis_conn) job = q.enqueue(setNetworkDevs) job = q.enqueue(setLocalIP) job = q.enqueue(setGlobalIP) job = q.enqueue(setPingInfo, get_globalparam("pinghost"), 10)
def get_scheduled_tasks(request): from rq import use_connection from rq_scheduler import Scheduler import json use_connection() # Use RQ's default Redis connection scheduler = Scheduler() # Get a scheduler for the "default" queue list_of_job_instances = scheduler.get_jobs() jobdata = list() for job in list_of_job_instances: if "interval" in job.meta: interval = job.meta["interval"] else: interval = 0 job_dict = { 'job_id': job._id, 'task': job.description, 'period': interval, 'args': job.args, 'queue': "default" } jobdata.append(job_dict) # scheduler = Scheduler('parser') # Get a scheduler for the "parser" queue # list_of_job_instances = scheduler.get_jobs() # # for job in list_of_job_instances: # if "interval" in job.meta: # interval = job.meta["interval"] # else: # interval = 0 # job_dict = { 'job_id': job._id, 'task': job.description, 'period': interval, 'queue': "parser" } # jobdata.append(job_dict) data = json.dumps(jobdata) return HttpResponse(data, content_type='application/json')
def update_scheduled_connection(connection): ''' schedule a new scrape of the connection source interval was changed or the last job was finished and the next needs to be scheduled ''' repeating_task = connection.schedule # check to see if schedule is available -- abort if not # note that ready_to_connect does not verify this if not repeating_task.interval: return False # connect to the rq scheduler redis_config = app.config['REDIS_CONFIG'] use_connection(Redis(redis_config['host'], redis_config['port'] , password=redis_config['password'])) scheduler = Scheduler() # see if this schedule had a job that was already enqueued if repeating_task.next_task_id: # instantiate the job job = Job(id=repeating_task.next_task_id) # cancel the old job scheduler.cancel(job) # determine how many seconds to wait delay = _calculate_schedule_delay(repeating_task.interval) # start a new job job = scheduler.enqueue_in(datetime.timedelta(seconds=int(delay)) , connect_to_source, connection.id) # save this id and when it runs next repeating_task.update(set__next_task_id = job.id) repeating_task.update(set__next_run_time = (datetime.datetime.utcnow() + datetime.timedelta(seconds=delay)))
def main(): import sys sys.path.insert(0, '.') opts, args, parser = parse_args() use_connection() queues = ('default', 'high', 'low') sample_calls = [ (dummy.do_nothing, [], {}), (dummy.sleep, [1], {}), (dummy.fib, [8], {}), # normal result (dummy.fib, [24], {}), # takes pretty long (dummy.div_by_zero, [], {}), # 5 / 0 => div by zero exc (dummy.random_failure, [], {}), # simulate random failure (handy for requeue testing) ] for i in range(opts.count): import random f, args, kwargs = random.choice(sample_calls) q = Queue(random.choice(queues)) q.enqueue(f, *args, **kwargs)
def main(): # pragma: no cover tornado.options.parse_command_line() connection=redis.client.Redis( settings.REDIS_HOST, settings.REDIS_PORT ) use_connection(connection) q = Queue() job = q.enqueue(handlers.sample_queue_job) for i in range(12): if job.result: break sleep(i / 10.0) if i > 0 and not i % 3: print "Waiting to see if Queue workers are awake..." else: raise SystemError("Queue workers not responding") http_server = tornado.httpserver.HTTPServer(Application()) print "Starting tornado on port", options.port http_server.listen(options.port) try: tornado.ioloop.IOLoop.instance().start() except KeyboardInterrupt: pass
def setup_redis(args): if args.url is not None: redis_conn = redis.from_url(args.url, db=args.db) else: redis_conn = redis.Redis(host=args.host, port=args.port, db=args.db, password=args.password) use_connection(redis_conn)
def r(request): if (request.method == "GET"): path = request.GET.get('path', '/home/hduser') mode = request.GET.get('mode', 'upload') server = request.GET.get('server', 'amazonserver') username = request.GET.get('username', '') password = request.GET.get('password', '') use_connection() queue = Queue(connection=Redis()) y = queue.enqueue(main, path, mode, server, username, password) #x=print_status(y.key) #y=queue.enqueue(add,5,4) x = y.id #print type(x) #print_status(x) #time.sleep(5) #return HttpResponse(y.result) job = Job().fetch(x) time.sleep(4) z = 1 while z: if (job.is_finished): return render(request, 'visualization.html', {'question': question}) elif (job.is_failed): return HttpResponse("failed") #else: # return render(request,'ex.html', {'x': x}) return HttpResponse("yeah") return HttpResponse("not allowed to access")
def r(request): if(request.method=="POST"): path=request.POST.get('path','~') mode=request.POST.get('mode','upload') server=request.POST.get('server','amazonserver') username=request.POST.get('username','') password=request.POST.get('password','') use_connection() queue=Queue(connection=Redis()) y=queue.enqueue_call(func=main,args=(path,mode,server,username,password,),timeout=500) x=y.id #z=print_status() job=Job().fetch(x) time.sleep(4) z=1 while z: if(job.is_finished): return render(request, 'print_status.html', {'question': 'question'}) elif(job.is_failed): return HttpResponse("failed") #return HttpResponse("yeah") #url=reverse('print_status',args=(),kwargs={'id1':x}) return HttpResponseRedirect("url")
def handle(self, *args, **options): pid = options.get('pid') if pid: with open(os.path.expanduser(pid), "w") as fp: fp.write(str(os.getpid())) try: # Instantiate a worker worker_class = import_attribute(options['worker_class']) queues = get_queues(*args, queue_class=import_attribute(options['queue_class'])) w = worker_class( queues, connection=queues[0].connection, name=options['name'], exception_handlers=get_exception_handlers() or None, default_worker_ttl=options['worker_ttl'] ) # Call use_connection to push the redis connection into LocalStack # without this, jobs using RQ's get_current_job() will fail use_connection(w.connection) w.work(burst=options.get('burst', False)) except ConnectionError as e: print(e) sys.exit(1)
def add_scheduled_task(request): task = request.GET.get('task') period = request.GET.get('period') queue = request.GET.get('queue') parameters = request.GET.get('parameters') from rq import use_connection from rq_scheduler import Scheduler from datetime import datetime use_connection() # Use RQ's default Redis connection scheduler = Scheduler(queue) # Get a scheduler for the "default" queue if parameters: scheduler.schedule( scheduled_time=datetime.now(), # Time for first execution func=getattr(tasks, task), # Function to be queued args=[int(parameters)], interval=period, # Time before the function is called again, in seconds repeat=None # Repeat this number of times (None means repeat forever) ) else: scheduler.schedule( scheduled_time=datetime.now(), # Time for first execution func=getattr(tasks, task), # Function to be queued interval=period, # Time before the function is called again, in seconds repeat=None # Repeat this number of times (None means repeat forever) ) return HttpResponse('Success')
def start_lib_worker(self, worker) : # Preload libraries # from newsle.core.models.orm.stat_configs import StatConfigs # Provide queue names to listen to as arguments to this script, # similar to rqworker from rq import Queue, Connection, Worker, use_connection import redis if "server" in worker and len( worker["server"] ) > 0 : server = worker["server"] host = server.get("host", "127.0.0.1") port = server.get("port", "6379") password = server.get("password", None) redis_conn = redis.StrictRedis(host=host, port=port, db=None, password=password, unix_socket_path=None) use_connection(redis_conn) with Connection(): queues = ["default"] if "queues" in worker and len( worker["queues"] ) > 0 : queues = worker["queues"] qs = map(Queue, queues) w = Worker(qs) w.work()
def submit_work(executor, args, queue="default"): use_connection(redis_connection) q = Queue(queue) result = q.enqueue(executor, args) return result
def handle(self, *args, **options): pid = options.get('pid') if pid: with open(os.path.expanduser(pid), "w") as fp: fp.write(str(os.getpid())) try: # Instantiate a worker worker_class = import_attribute(options['worker_class']) queues = get_queues(*options.get('queues'), queue_class=import_attribute( options['queue_class'])) w = worker_class(queues, connection=queues[0].connection, name=options['name'], exception_handlers=get_exception_handlers() or None, default_worker_ttl=options['worker_ttl']) # Call use_connection to push the redis connection into LocalStack # without this, jobs using RQ's get_current_job() will fail use_connection(w.connection) w.work(burst=options.get('burst', False)) except ConnectionError as e: print(e) sys.exit(1)
def main(): redis_conn = Redis(decode_responses=True) use_connection(redis_conn) q = Queue() sql_con, sql_cur = setup_db() conns = Connections(q, sql_con, sql_cur) cohort_size = int(input("Enter cohort size: ")) gen = int(input("Enter current generation (0 to start from scratch): ")) if gen == 0: gen = 1 cohort = evolution.first_generation(cohort_size) queue_cohort(conns, gen, cohort) while True: wait_for_gen(conns, gen) old_cohort = fetch_results_from_gen(conns, gen) highscore = max(i[1] for i in old_cohort) logger.info("Generation complete. Max score: %.2f", highscore) if gen % 10 == 0: send_message( "Generation Complete", "Gen {0}, max score {1:.2f}".format(gen, highscore)) new_cohort = evolution.new_generation(old_cohort) members = len(new_cohort) if members < cohort_size: # timeout or fails new_cohort += evolution.first_generation(cohort_size - members) gen += 1 logger.info("Enqueueing tasks for generation %d...", gen) queue_cohort(conns, gen, new_cohort) logger.info("Tasks queued, waiting for completion...")
def r(request): if (request.method == "POST"): path = request.POST.get('path', '~') mode = request.POST.get('mode', 'upload') server = request.POST.get('server', 'amazonserver') username = request.POST.get('username', '') password = request.POST.get('password', '') use_connection() queue = Queue(connection=Redis()) y = queue.enqueue_call(func=main, args=( path, mode, server, username, password, ), timeout=500) x = y.id #z=print_status() job = Job().fetch(x) time.sleep(4) z = 1 while z: if (job.is_finished): return render(request, 'print_status.html', {'question': 'question'}) elif (job.is_failed): return HttpResponse("failed") #return HttpResponse("yeah") #url=reverse('print_status',args=(),kwargs={'id1':x}) return HttpResponseRedirect("url")
def setup_redis(args): if args.url is not None: redis_conn = redis.StrictRedis.from_url(args.url) else: redis_conn = redis.StrictRedis(host=args.host, port=args.port, db=args.db, password=args.password, unix_socket_path=args.socket) use_connection(redis_conn)
def add_scheduled_task(request): task = request.GET.get('task') period = request.GET.get('period') queue = request.GET.get('queue') parameters = request.GET.get('parameters') from rq import use_connection from rq_scheduler import Scheduler from datetime import datetime use_connection() # Use RQ's default Redis connection scheduler = Scheduler(queue) # Get a scheduler for the "default" queue if parameters: scheduler.schedule( scheduled_time=datetime.now(), # Time for first execution func=getattr(tasks, task), # Function to be queued args=[int(parameters)], interval= period, # Time before the function is called again, in seconds repeat= None # Repeat this number of times (None means repeat forever) ) else: scheduler.schedule( scheduled_time=datetime.now(), # Time for first execution func=getattr(tasks, task), # Function to be queued interval= period, # Time before the function is called again, in seconds repeat= None # Repeat this number of times (None means repeat forever) ) return HttpResponse('Success')
def handle(self, *args, **kwargs): try: worker = get_worker(NOTIFICATIONS_QUEUE_NAME) use_connection(worker.connection) worker.push_exc_handler(notifications_error_handler) worker.work() except ConnectionError as e: raise SystemExit(e)
def handle(self, *args, **kwargs): try: worker = get_worker(QUEUE_NAME) use_connection(worker.connection) worker.push_exc_handler(notifications_error_handler) worker.work() except ConnectionError as e: raise SystemExit(e)
def renderings(org_label, project_label, rendering_id): ''' deleting or downloading renderings /organizations/aquaya/projects/water-quality/renderings/4cmb1?delete=true : remove a rendering from the system and s3 /organizations/aquaya/projects/water-quality/renderings/4cmb1?download=true : remove a rendering from the system and s3 ''' user = User.objects(email=session['email'])[0] orgs = Organization.objects(label=org_label) if not orgs: abort(404) org = orgs[0] # permission-check if org not in user.organizations and not user.admin_rights: app.logger.error('%s tried to view a project but was \ denied for want of admin rights' % session['email']) abort(404) # find the project projects = Project.objects(label=project_label, organization=org) if not projects: abort(404) # find the specified rendering renderings = Rendering.objects(id=rendering_id) if not renderings: abort(404) rendering = renderings[0] # save the report for later redirect report = rendering.report if request.args.get('delete', '') == 'true': # remove the rendering utilities.delete_rendering(rendering, user.email) flash('Rendering successfully deleted.', 'success') return redirect(url_for('reports' , org_label=report.project.organization.label , project_label=report.project.label, report_label=report.label)) if request.args.get('download', '') == 'true': absolute_filename = utilities.download_rendering_from_s3(rendering) # delay the deletion so we have time to serve the file redis_config = app.config['REDIS_CONFIG'] use_connection(Redis(redis_config['host'], redis_config['port'] , password=redis_config['password'])) scheduler = Scheduler() scheduler.enqueue_in(datetime.timedelta(seconds=60) , delete_local_file, absolute_filename) return send_file(absolute_filename, as_attachment=True) else: abort(404)
def __init__(self): import rq import redis from rq_scheduler import Scheduler self.conn = redis.from_url(settings.REDIS_URL) self.queue = rq.Queue("default", connection=self.conn, default_timeout=RedisWorker.TIMEOUT) self.scheduler = Scheduler("high", connection=self.conn) rq.use_connection(self.conn)
def handle(self, *args, **options): try: w = get_worker(*args) # Call use_connection to push the redis connection into LocalStack # without this, jobs using RQ's get_current_job() will fail use_connection(w.connection) w.work(burst=options.get('burst', False)) except ConnectionError as e: print(e)
def pidis_killer(): redis_conn = Redis() use_connection(redis_conn) q = Queue('high', connection=redis_conn) q.empty() jobs = q.job_ids for j in jobs: cancel_job(j) killZombies()
def job(args): #[Sleep Time], [Queues], use_connection() tsleep = args[0].pop() time.sleep(float(tsleep)) if len(args[1])>1: current_queue = args[1].pop() print current_queue q = Queue(str(current_queue)) q.enqueue(job,args) return True
def main(): max_workers = 5 if "GOL_WORKERS" in os.environ: max_workers = int(os.getenv("GOL_WORKERS")) processes = [] q = None for _ in range(0, 3): try: rq.use_connection(redis.Redis()) q = rq.Queue("lifeboxQueue") break except Exception: time.sleep(5) if q is None: print("Could not establish connection to redis, exiting") sys.exit(99) for _ in range(0, max_workers): try: proc = multiprocessing.Process(target=rq.Worker(q).work) proc.start() processes.append(proc) except Exception as ex: print(ex) traceback.print_exc() try: while True: tbd = [] for proc in processes: if not proc.is_alive(): try: tbd.append(proc) newproc = multiprocessing.Process( target=rq.Worker(q).work) newproc.start() processes.append(newproc) except Exception as ex: print(ex) traceback.print_exc() for proc in tbd: processes.remove(proc) time.sleep(10) except KeyboardInterrupt: print("interrupred, exiting and killing") pass finally: for proc in processes: proc.kill()
def redis_connection(): """ Return the currently open redis connection object. If there is no connection currently open, one is created using the url specified in config['redis', 'url'] """ conn = rq.get_current_connection() if conn: return conn rq.use_connection(redis=redis.Redis.from_url(config["redis", "url"])) return rq.get_current_connection()
def redis_connection(): """ Return the currently open redis connection object. If there is no connection currently open, one is created using the keyword arguments specified in config.REDIS_CONNECTION_KWARGS """ conn = rq.get_current_connection() if conn: return conn kwargs = config.REDIS_CONNECTION_KWARGS rq.use_connection(redis=redis.Redis(**kwargs)) return rq.get_current_connection()
def run(self, debug=False): db = core.get_db() use_connection() q = Queue() for list_getter, obj_processor in self.pipelines: objects = list_getter(db) for obj in objects: if debug: obj_processor(obj) else: q.enqueue(obj_processor, obj)
def approve(proposer, approver): """ Send an approve ticket to the queue """ from .redis_con import get_redis from . import work redis = get_redis() work.unlock() use_connection(redis) q = Queue(connection=redis) q.enqueue(work.approve, args=(), kwargs=dict(proposer=proposer, approver=approver))
def handle(self, *args, **options): pid = options.get('pid') if pid: with open(os.path.expanduser(pid), "w") as fp: fp.write(str(os.getpid())) sentry_dsn = options.get('sentry-dsn') if sentry_dsn is None: sentry_dsn = getattr(settings, 'SENTRY_DSN', None) # Verbosity is defined by default in BaseCommand for all commands verbosity = options.get('verbosity') if verbosity >= 2: level = 'DEBUG' elif verbosity == 0: level = 'WARNING' else: level = 'INFO' setup_loghandlers(level) sentry_dsn = options.get('sentry-dsn') or getattr( settings, 'SENTRY_DSN', None) try: # Instantiate a worker worker_kwargs = { 'worker_class': options['worker_class'], 'queue_class': options['queue_class'], 'job_class': options['job_class'], 'name': options['name'], 'default_worker_ttl': options['worker_ttl'], } w = get_worker(*args, **worker_kwargs) # Call use_connection to push the redis connection into LocalStack # without this, jobs using RQ's get_current_job() will fail use_connection(w.connection) # Close any opened DB connection before any fork reset_db_connections() if sentry_dsn: try: from rq.contrib.sentry import register_sentry register_sentry(sentry_dsn) except ImportError: self.stdout.write( self.style.ERROR( "Please install sentry-sdk using `pip install sentry-sdk`" )) sys.exit(1) w.work(burst=options.get('burst', False)) except ConnectionError as e: print(e) sys.exit(1)
def get_current_job(request): from rq import get_current_job from rq import use_connection from redis import Redis from rq import Queue use_connection() redis_conn = Redis() q = Queue(connection=redis_conn) job = get_current_job(q) import json data = json.dumps(job) return HttpResponse(data, content_type='application/json')
def handle(self, *args, **options): try: # Instantiate a worker worker_class = import_attribute(options.get('worker_class', 'rq.Worker')) queues = get_queues(*args) w = worker_class(queues, connection=queues[0].connection) # Call use_connection to push the redis connection into LocalStack # without this, jobs using RQ's get_current_job() will fail use_connection(w.connection) w.work(burst=options.get('burst', False)) except ConnectionError as e: print(e)
def print_status(request): use_connection() queue = Queue(connection=Redis()) id1 = request.GET.get("x", "") return id1 job = Job.fetch(id1) return job.result if (job.is_finished): return "completed loading" elif (job.is_failed): return "failed" else: return render(request, 'ex.html', {'x': x})
def main(): use_connection() q = Queue('failed', connection=r) print(len(q)) q.delete(delete_jobs=True) q = Queue('default', connection=r) print(len(q)) q.delete(delete_jobs=True) stations = station_list() for station in stations: q = Queue(station, connection=r) print(len(q)) q.delete(delete_jobs=True)
def __init__(self,zmq_cont,ip='127.0.0.1',port=6000): self.zmq_cont = zmq_cont ## M - Machines by N queues Q = (M,N) use_connection() self.listen = self.zmq_cont.socket(zmq.REP) #fix for dist network self.listen.bind('tcp://*:'+str(port+1)) self.pinger = self.zmq_cont.socket(zmq.PUB) self.pinger.bind('tcp://*:' + str(port)) #Important thigns... self.machines = [] self.queues = [] self.queue_names = []
def handle(self, *args, **options): try: # Instantiate a worker worker_class = import_attribute( options.get('worker_class', 'rq.Worker')) queues = get_queues(*args) w = worker_class(queues, connection=queues[0].connection) # Call use_connection to push the redis connection into LocalStack # without this, jobs using RQ's get_current_job() will fail use_connection(w.connection) w.work(burst=options.get('burst', False)) except ConnectionError as e: print(e)
def run(self, email, isPreview=False, batchId=""): global settings global safile_settings redis = Redis(settings["redis.host"], settings["redis.port"], password=settings["redis.password"]) use_connection(redis) q = Queue() result = q.enqueue( run_build, args=(settings, safile_settings, self.oice_id, self.ks_view_url, self.oice_communication_url, self.og_image_button_url, self.og_image_origin_url, email, isPreview, batchId), timeout=600)
def handle(self, *args, **options): pid = options.get('pid') if pid: with open(os.path.expanduser(pid), "w") as fp: fp.write(str(os.getpid())) sentry_dsn = options.get('sentry-dsn') if sentry_dsn is None: sentry_dsn = getattr(settings, 'SENTRY_DSN', None) # Verbosity is defined by default in BaseCommand for all commands verbosity = options.get('verbosity') if verbosity >= 2: level = 'DEBUG' elif verbosity == 0: level = 'WARNING' else: level = 'INFO' setup_loghandlers(level) try: # Instantiate a worker worker_kwargs = { 'worker_class': options['worker_class'], 'queue_class': options['queue_class'], 'job_class': options['job_class'], 'name': options['name'], 'default_worker_ttl': options['worker_ttl'], } w = get_worker(*args, **worker_kwargs) # Call use_connection to push the redis connection into LocalStack # without this, jobs using RQ's get_current_job() will fail use_connection(w.connection) # Close any opened DB connection before any fork reset_db_connections() if sentry_dsn: try: from rq.contrib.sentry import register_sentry register_sentry(sentry_dsn) except ImportError: self.stdout.write(self.style.ERROR("Please install sentry-sdk using `pip install sentry-sdk`")) sys.exit(1) w.work(burst=options.get('burst', False)) except ConnectionError as e: print(e) sys.exit(1)
def handle(self, *args, **options): pid = options.get('pid') if pid: with open(os.path.expanduser(pid), "w") as fp: fp.write(str(os.getpid())) # Verbosity is defined by default in BaseCommand for all commands verbosity = options.get('verbosity') if verbosity >= 2: level = 'DEBUG' elif verbosity == 0: level = 'WARNING' else: level = 'INFO' setup_loghandlers(level) sentry_dsn = options.pop('sentry_dsn') if sentry_dsn: try: configure_sentry(sentry_dsn, **options) except ImportError: self.stderr.write( "Please install sentry-sdk using `pip install sentry-sdk`") sys.exit(1) try: # Instantiate a worker worker_kwargs = { 'worker_class': options['worker_class'], 'queue_class': options['queue_class'], 'job_class': options['job_class'], 'name': options['name'], 'default_worker_ttl': options['worker_ttl'], } w = get_worker(*args, **worker_kwargs) # Call use_connection to push the redis connection into LocalStack # without this, jobs using RQ's get_current_job() will fail use_connection(w.connection) # Close any opened DB connection before any fork reset_db_connections() w.work(burst=options.get('burst', False), with_scheduler=options.get('with_scheduler', False), logging_level=level) except ConnectionError as e: self.stderr.write(str(e)) sys.exit(1)
def execute(self): # It is always possible that the Redis connection is not yet set print "ENTER" if not get_current_connection(): conn = Redis('localhost', settings.REDIS_PORT) use_connection(conn) if not get_current_connection(): log.error(u'Unable to create redis connection') # use the 'default' queue. We only used this one; q = Queue() # if the queue is not empty then some old idle workers may have to be cleaned if not q.is_empty(): for w in Worker.all(): if w.state == 'idle' and q in w.queues: log.info(u'Work %s will die gently' % w.name) w.register_death()
def create_rendering(report): ''' instantiate a new rendering queue it up for pdf-creation and s3-upload ''' creation_time = datetime.datetime.utcnow() # create a suitable filename filename = '%s--%s.pdf' % (report.label , creation_time.strftime('%Y-%m-%dT%H-%M-%SZ')) if '..' in filename or filename.startswith('/'): # set filename as just the date filename = 'report--%s.pdf' % \ creation_time.strftime('%y-%m-%dT%H-%M-%SZ') # null bytes in the original unicode were causing problems filename = str(filename) # make sure we have access to request context with app.test_request_context(): # the public-facing url of this report # used so wkhtmltopdf can run sans auth # have to generate url here while we're in the right context url = url_for('public_reports' , org_label=report.project.organization.label , project_label = report.project.label , public_label = report.public_label) public_url = urlparse.urljoin(app.config['APP_ROOT'], url) # create new rendering object # punting on saving components for now frozen_components = [] new_rendering = Rendering( components = frozen_components , creation_time = creation_time , filename = filename , label = generate_random_string(12) , report = report ) new_rendering.save() # enqueue pdf-generation with rq-scheduler redis_config = app.config['REDIS_CONFIG'] use_connection(Redis(redis_config['host'], redis_config['port'] , password=redis_config['password'])) queue = Queue() queue.enqueue(render_report, new_rendering.id, public_url) return new_rendering
def __init__(self,machine_name,ip='127.0.0.1',port=6000): self.zmq_cont = zmq.Context() self.redis = Redis('127.0.0.1',6379) use_connection(self.redis) self.opt_recv = self.zmq_cont.socket(zmq.SUB) self.opt_recv.setsockopt(zmq.SUBSCRIBE,'') self.opt_recv.connect('tcp://' + ip + ':' + str(port)) self.opt_send = self.zmq_cont.socket(zmq.REQ) #fix for dist network self.opt_send.connect('tcp://'+ip+':'+str(port+1)) self.machine_name = machine_name #manager state is a Dictionary mapping queues to number of workers. self.state = {} #workers is a dictionary mapping queue to lists of workers self.workers = {} #Dictionary{'queue name':rq.Queue} self.queues = {}
def run(self): redis = Redis( settings["redis.host"], settings["redis.port"], password=settings["redis.password"], ) use_connection(redis) result = Queue().enqueue(transcode_audio_assets, args=( settings, safile_settings, self.job_id, self.assets, self.asset_files, ), timeout=600)
def test_use_connection(self): """Test function use_connection works as expected.""" conn = new_connection() use_connection(conn) self.assertEqual(conn, get_current_connection()) use_connection() self.assertNotEqual(conn, get_current_connection()) use_connection(self.testconn) # Restore RQTestCase connection with self.assertRaises(AssertionError): with Connection(new_connection()): use_connection() with Connection(new_connection()): use_connection()
def handle(self, *args, **options): database_name = options.get("database_name") if database_name is not None: settings.DATABASES["default"]["NAME"] = database_name try: # Instantiate a worker worker_class = import_attribute(options.get("worker_class", "rq.Worker")) queues = get_queues(*args) w = worker_class(queues, connection=queues[0].connection, name=options["name"]) # Call use_connection to push the redis connection into LocalStack # without this, jobs using RQ's get_current_job() will fail use_connection(w.connection) w.work(burst=options.get("burst", False)) except ConnectionError as e: print(e)
def create_worker(self, *args, **options): try: # Instantiate a worker worker_class = import_attribute(options['worker_class']) queues = get_queues(*args, queue_class=import_attribute(options['queue_class'])) w = worker_class( queues, connection=queues[0].connection, name=options['name'], exception_handlers=get_exception_handlers() or None, default_worker_ttl=options['worker_ttl'] ) # Call use_connection to push the redis connection into LocalStack # without this, jobs using RQ's get_current_job() will fail use_connection(w.connection) w.work(burst=options.get('burst', False)) except ConnectionError as e: print(e)
def run(self): redis = Redis( settings["redis.host"], settings["redis.port"], password=settings["redis.password"], ) use_connection(redis) result = Queue().enqueue(import_oice_script, args=( settings, safile_settings, self.user_email, self.job_id, self.oice, self.script_file, self.language, ), timeout=600)
def run(self): global settings global safile_settings redis = Redis(settings["redis.host"], settings["redis.port"], password=settings["redis.password"]) use_connection(redis) q = Queue() result = q.enqueue(run_export, args=( settings, safile_settings, self.story_export_id, ), timeout=600) print(result)
def update_scheduled_send(schedule_id): ''' schedule a new sending interval was changed or the last job has finished and next needs to be scheduled ''' schedules = Schedule.objects(id=schedule_id) if not schedules: return False schedule = schedules[0] # confirm that schedule is valid if not schedule.interval: return False # connect to the rq scheduler redis_config = app.config['REDIS_CONFIG'] use_connection(Redis(redis_config['host'], redis_config['port'] , password=redis_config['password'])) scheduler = Scheduler() # see if this schedule had a job that was already enqueued if schedule.next_task_id: # instantiate the job job = Job(id=schedule.next_task_id) # cancel the old job # tried rescheduling but that was not working scheduler.cancel(job) # determine how many seconds to wait delay = _calculate_schedule_delay(schedule.interval) # start a new job job = scheduler.enqueue_in(datetime.timedelta(seconds=int(delay)) , send_scheduled_report, schedule.id) # save the id of this job and when it next runs schedule.update(set__next_task_id = job.id) schedule.update(set__next_run_time = (datetime.datetime.utcnow() + datetime.timedelta(seconds=delay)))