def test_schedule_job(app): rq = RQ(app, async=True) scheduler = rq.get_scheduler() assert scheduler.count() == 0 rq.job(add) job1 = add.schedule(timedelta(seconds=1), 1, 2) assert scheduler.count() == 1 assert job1 in scheduler.get_jobs() purge(scheduler) job2 = add.schedule(datetime.utcnow() + timedelta(seconds=1), 3, 4) assert scheduler.count() == 1 assert job2 in scheduler.get_jobs() purge(scheduler) job3_id = uuid.uuid4().hex job3_description = 'custom description' job3 = add.schedule(timedelta(seconds=1), 5, 6, repeat=10, interval=2, description=job3_description, job_id=job3_id) assert job3 in scheduler.get_jobs() assert job3.meta.get('repeat') == 10 assert job3.meta.get('interval') == 2 assert job3.id == job3_id assert job3.description == job3_description purge(scheduler)
def test_cron_job(app): rq = RQ(app, async=True) scheduler = rq.get_scheduler() assert scheduler.count() == 0 rq.job(add) cron_string = '* * * * *' cron_name = 'add-it-for-real' job1 = add.cron(cron_string, cron_name, 1, 2) assert scheduler.count() == 1 assert job1 in scheduler.get_jobs() assert job1.meta['cron_string'] == cron_string assert job1.id == 'cron-' + cron_name # purge(scheduler) job2 = add.cron(cron_string, cron_name, 3, 4) assert scheduler.count() == 1 # no duplicate here assert job2 in scheduler.get_jobs() assert job2.meta['cron_string'] == cron_string assert job2.id == 'cron-' + cron_name job3 = add.cron(cron_string, cron_name + '-pro', 3, 4) assert scheduler.count() == 2 # second cron assert job3 in scheduler.get_jobs() assert job3.id == 'cron-' + cron_name + '-pro' purge(scheduler)
def test_init_app(app, config): rq = RQ() assert 'rq2' not in getattr(app, 'extensions', {}) assert getattr(rq, 'module', None) is None rq.init_app(app) assert rq.redis_url == config.RQ_REDIS_URL assert isinstance(rq.connection, StrictRedis) assert 'rq2' in getattr(app, 'extensions', {})
def test_init_app(app, config): rq = RQ() assert 'rq2' not in getattr(app, 'extensions', {}) assert getattr(rq, 'module', None) is None rq.init_app(app) assert rq.redis_url == config.RQ_REDIS_URL assert isinstance(rq.connection, StrictRedis) assert 'rq2' in getattr(app, 'extensions', {})
def test_init_app(app, config): rq = RQ() assert 'rq2' not in getattr(app, 'extensions', {}) assert getattr(rq, 'module', None) is None rq.init_app(app) assert rq.url == config.RQ_REDIS_URL assert isinstance(rq.connection, Redis) assert 'rq2' in getattr(app, 'extensions', {}) assert rq.module_path == 'flask_rq2.backend_%s' % app.name
class WorkerCommand(Command): """runs Worker instance""" def __init__(self): super(WorkerCommand, self).__init__() self.rq = RQ() def run(self, **kwargs): app.logger.info("Running {} with arguments {}".format( self.__class__.__name__, kwargs)) self.__dict__.update(**kwargs) # update self's with kwargs self.rq.get_worker().work()
def test_job_override(app, config): rq = RQ(app, async_=True) rq.job(add, timeout=123, result_ttl=456, ttl=789) assert add.helper.timeout == 123 assert add.helper.result_ttl == 456 assert add.helper.ttl == 789 job1 = add.queue(timeout=111, result_ttl=222, ttl=333) assert job1.timeout == 111 assert job1.result_ttl == 222 assert job1.ttl == 333
def test_job_override(app, config): rq = RQ(app, is_async=True) rq.job(add, timeout=123, result_ttl=456, ttl=789) assert add.helper.timeout == 123 assert add.helper.result_ttl == 456 assert add.helper.ttl == 789 job1 = add.queue(timeout=111, result_ttl=222, ttl=333) assert job1.timeout == 111 assert job1.result_ttl == 222 assert job1.ttl == 333
def test_queue_job(app): rq = RQ(app, is_async=True) rq.connection.flushdb() rq.job(add) job1 = add.queue(1, 2) assert isinstance(job1, import_attribute(rq.job_class)) assert job1.args == (1, 2) assert job1.kwargs == {} assert job1.timeout == add.helper.timeout == rq.default_timeout job2 = add.queue(3, 4, description='job 2') assert job2.description == 'job 2' job3_id = uuid.uuid4().hex job3 = add.queue(5, 6, job_id=job3_id) assert job3.id == job3_id job4 = add.queue(7, 8, depends_on=job3) assert job4.dependency.id == job3.id other_queue = 'other_queue' job5 = add.queue(9, 10, queue=other_queue) # job will be scheduled in the other queue eventually assert job5.origin == other_queue job6 = add.queue(11, 12) result = job6.perform() assert result == 23 queue = rq.get_queue() assert job1 in queue.jobs assert job2 in queue.jobs assert job3 in queue.jobs # job 4 is a dependency on job 3, so not queued yet assert job4 not in queue.jobs assert job3.result is None assert job4.result is None response = rq.get_worker('default').work(True) assert response assert job4.dependency.result == 11 assert job4.result == 15 assert len(queue.jobs) == 0
def test_factory_pattern(app, config): rq = RQ(default_timeout=123) rq.init_app(app) rq.default_timeout = 456 # override default timeout rq.job(add) add.helper.timeout == 456 add.helper.timeout = 789 add.helper.timeout == 789
def init_rq(app): """ :param app: flask app :return: redis queue handle """ # redis host url app.config['RQ_REDIS_URL'] = REDIS_URL # redis task queue rq = RQ(app) return rq
def test_factory_pattern(app, config): rq = RQ(default_timeout=111) rq.init_app(app) # override some rq defaults rq.default_timeout = 222 rq.default_result_ttl = 333 rq.default_queue = 'non-default' rq.job(add) # then check if those default have been passed to the helper assert add.helper.timeout == 222 assert add.helper.result_ttl == 333 assert add.helper.queue_name == 'non-default' # then queue if the values have been passed to the job as well job = add.queue(1, 2) assert job.timeout == 222 assert job.result_ttl == 333 assert job.ttl is None assert job.origin == 'non-default' # change the values in the helpr and see if that works add.helper.timeout = 444 assert add.helper.timeout == 444 add.helper.result_ttl = 555 assert add.helper.result_ttl == 555 add.helper.queue_name = 'totally-different' assert add.helper.queue_name == 'totally-different' # assert the helper's values job = add.queue(1, 2) assert job.timeout == 444 assert job.result_ttl == 555 assert job.ttl is None assert job.origin == 'totally-different' # now finally override the values while queueing job = add.queue(1, 2, queue='yet-another', timeout=666, result_ttl=777, ttl=888) assert job.timeout == 666 assert job.result_ttl == 777 assert job.ttl == 888 assert job.origin == 'yet-another'
def test_schedule_job(app): rq = RQ(app, is_async=True) scheduler = rq.get_scheduler() purge(scheduler) assert scheduler.count() == 0 rq.job(add) job1 = add.schedule(timedelta(seconds=1), 1, 2) assert scheduler.count() == 1 assert job1 in scheduler.get_jobs() purge(scheduler) job2 = add.schedule(datetime.utcnow() + timedelta(seconds=1), 3, 4) assert scheduler.count() == 1 assert job2 in scheduler.get_jobs() purge(scheduler) job3_id = uuid.uuid4().hex job3_description = 'custom description' job3 = add.schedule(timedelta(seconds=1), 5, 6, repeat=10, interval=2, description=job3_description, job_id=job3_id) assert job3 in scheduler.get_jobs() assert job3.meta.get('repeat') == 10 assert job3.meta.get('interval') == 2 assert job3.id == job3_id assert job3.description == job3_description purge(scheduler) other_queue = 'other-queue' job4 = add.schedule(timedelta(seconds=1), 5, 6, queue=other_queue) # job will be scheduled in the other queue eventually assert job4.origin == other_queue # one more. the scheduler will have all jobs, no matter what # queue the job will eventually be queued in. assert job4 in scheduler.get_jobs() purge(scheduler)
def test_queue_job(app): rq = RQ(app, async=True) rq.job(add) job1 = add.queue(1, 2) assert isinstance(job1, rq.job_cls) assert job1.args == (1, 2) assert job1.kwargs == {} assert job1.timeout == add.helper.timeout == rq.default_timeout job2 = add.queue(3, 4, description='job 2') assert job2.description == 'job 2' job3_id = uuid.uuid4().hex job3 = add.queue(5, 6, job_id=job3_id) assert job3.id == job3_id job4 = add.queue(7, 8, depends_on=job3) assert job4.dependency.id == job3.id job5 = add.queue(9, 10) result = job5.perform() assert result == 19 queue = rq.get_queue() assert job1 in queue.jobs assert job2 in queue.jobs assert job3 in queue.jobs # job 4 is a dependency on job 3, so not queued yet assert job4 not in queue.jobs assert job3.result is None assert job4.result is None response = rq.get_worker('default').work(True) assert response assert job4.dependency.result == 11 assert job4.result == 15 assert len(queue.jobs) == 0
def test_app_context(app): rq = RQ(app) class ContextCommand(RQCommand): def run(self): assert current_app == app return current_app.name command = ContextCommand(rq) assert command.rq == rq result = command(app) assert result == app.name
def test_cron_job(app): rq = RQ(app, async_=True) scheduler = rq.get_scheduler() purge(scheduler) assert scheduler.count() == 0 rq.job(add) cron_string = '* * * * *' cron_name = 'add-it-for-real' job1 = add.cron(cron_string, cron_name, 1, 2) assert scheduler.count() == 1 assert job1 in scheduler.get_jobs() assert job1.meta['cron_string'] == cron_string assert job1.id == 'cron-' + cron_name purge(scheduler) job2 = add.cron(cron_string, cron_name, 3, 4) assert scheduler.count() == 1 # no duplicate here assert job2 in scheduler.get_jobs() assert job2.meta['cron_string'] == cron_string assert job2.id == 'cron-' + cron_name job3 = add.cron(cron_string, cron_name + '-pro', 3, 4) assert scheduler.count() == 2 # second cron assert job3 in scheduler.get_jobs() assert job3.id == 'cron-' + cron_name + '-pro' other_queue = 'other-queue' job4 = add.cron(cron_string, cron_name + '-other', 3, 4, queue=other_queue) # job will be scheduled in the other queue eventually assert job4.origin == other_queue # one more. the scheduler will have all jobs, no matter what # queue the job will eventually be queued in. assert job4 in scheduler.get_jobs() assert scheduler.count() == 3 purge(scheduler)
def test_cron_job(app): rq = RQ(app, is_async=True) scheduler = rq.get_scheduler() purge(scheduler) assert scheduler.count() == 0 rq.job(add) cron_string = '* * * * *' cron_name = 'add-it-for-real' job1 = add.cron(cron_string, cron_name, 1, 2) assert scheduler.count() == 1 assert job1 in scheduler.get_jobs() assert job1.meta['cron_string'] == cron_string assert job1.id == 'cron-' + cron_name purge(scheduler) job2 = add.cron(cron_string, cron_name, 3, 4) assert scheduler.count() == 1 # no duplicate here assert job2 in scheduler.get_jobs() assert job2.meta['cron_string'] == cron_string assert job2.id == 'cron-' + cron_name job3 = add.cron(cron_string, cron_name + '-pro', 3, 4) assert scheduler.count() == 2 # second cron assert job3 in scheduler.get_jobs() assert job3.id == 'cron-' + cron_name + '-pro' other_queue = 'other-queue' job4 = add.cron(cron_string, cron_name + '-other', 3, 4, queue=other_queue) # job will be scheduled in the other queue eventually assert job4.origin == other_queue # one more. the scheduler will have all jobs, no matter what # queue the job will eventually be queued in. assert job4 in scheduler.get_jobs() assert scheduler.count() == 3 purge(scheduler)
def test_schedule_job(app): rq = RQ(app, is_async=True) scheduler = rq.get_scheduler() purge(scheduler) assert scheduler.count() == 0 rq.job(add) job1 = add.schedule(timedelta(seconds=1), 1, 2) assert scheduler.count() == 1 assert job1 in scheduler.get_jobs() purge(scheduler) job2 = add.schedule(datetime.utcnow() + timedelta(seconds=1), 3, 4) assert scheduler.count() == 1 assert job2 in scheduler.get_jobs() purge(scheduler) job3_id = uuid.uuid4().hex job3_description = 'custom description' job3 = add.schedule(timedelta(seconds=1), 5, 6, repeat=10, interval=2, description=job3_description, job_id=job3_id) assert job3 in scheduler.get_jobs() assert job3.meta.get('repeat') == 10 assert job3.meta.get('interval') == 2 assert job3.id == job3_id assert job3.description == job3_description purge(scheduler) other_queue = 'other-queue' job4 = add.schedule(timedelta(seconds=1), 5, 6, queue=other_queue) # job will be scheduled in the other queue eventually assert job4.origin == other_queue # one more. the scheduler will have all jobs, no matter what # queue the job will eventually be queued in. assert job4 in scheduler.get_jobs() purge(scheduler)
def test_scheduler_command_pid(config, app, monkeypatch, tmpdir): monkeypatch.setattr(flask_rq2_app.Scheduler, 'run', lambda *args, **kwargs: None) rq = RQ(app) manager = RQManager(app=app, rq=rq) pid = tmpdir.join('rq2_scheduler.pid') assert not pid.exists() monkeypatch.setattr(sys, 'argv', ['rq', 'scheduler', '--pid', pid.strpath]) try: manager.run() except SystemExit as e: exit_code = e.code else: exit_code = None assert exit_code == 0 assert pid.read() != ''
def test_factory_pattern(app, config): rq = RQ(default_timeout=111) rq.init_app(app) # override some rq defaults rq.default_timeout = 222 rq.default_result_ttl = 333 rq.default_queue = 'non-default' rq.job(add) # then check if those default have been passed to the helper assert add.helper.timeout == 222 assert add.helper.result_ttl == 333 assert add.helper.queue_name == 'non-default' # then queue if the values have been passed to the job as well job = add.queue(1, 2) assert job.timeout == 222 assert job.result_ttl == 333 assert job.ttl is None assert job.origin == 'non-default' # change the values in the helpr and see if that works add.helper.timeout = 444 assert add.helper.timeout == 444 add.helper.result_ttl = 555 assert add.helper.result_ttl == 555 add.helper.queue_name = 'totally-different' assert add.helper.queue_name == 'totally-different' # assert the helper's values job = add.queue(1, 2) assert job.timeout == 444 assert job.result_ttl == 555 assert job.ttl is None assert job.origin == 'totally-different' # now finally override the values while queueing job = add.queue(1, 2, queue='yet-another', timeout=666, result_ttl=777, ttl=888) assert job.timeout == 666 assert job.result_ttl == 777 assert job.ttl == 888 assert job.origin == 'yet-another'
def test_scheduler_command_verbose(config, app, monkeypatch): monkeypatch.setattr(flask_rq2_app.Scheduler, 'run', lambda *args, **kwargs: None) rq = RQ(app) manager = RQManager(app=app, rq=rq) def setup_loghandlers(level): assert level == 'DEBUG' monkeypatch.setattr(flask_rq2_script, 'setup_loghandlers', setup_loghandlers) monkeypatch.setattr(sys, 'argv', ['rq', 'scheduler', '--verbose']) try: manager.run() except SystemExit as e: exit_code = e.code else: exit_code = None assert exit_code == 0
def test_commands(command, output, uses_logging, app, caplog, capsys, monkeypatch, request): rq = RQ(app) manager = RQManager(app=app, rq=rq) monkeypatch.setattr(sys, 'argv', ['manage.py'] + command.split()) try: manager.run() except SystemExit as e: exit_code = e.code else: exit_code = None assert exit_code == 0 if uses_logging: caplog.setLevel(logging.INFO, logger='rq.worker') out = caplog.text() else: out, err = capsys.readouterr() assert output in out def flush(): rq.connection.flushdb() request.addfinalizer(flush)
def test_queue_job(app): rq = RQ(app, async_=True) rq.connection.flushdb() rq.job(add) job1 = add.queue(1, 2) assert isinstance(job1, import_attribute(rq.job_class)) assert job1.args == (1, 2) assert job1.kwargs == {} assert job1.timeout == add.helper.timeout == rq.default_timeout job2 = add.queue(3, 4, description='job 2') assert job2.description == 'job 2' job3_id = uuid.uuid4().hex job3 = add.queue(5, 6, job_id=job3_id) assert job3.id == job3_id job4 = add.queue(7, 8, depends_on=job3) assert job4.dependency.id == job3.id other_queue = 'other_queue' job5 = add.queue(9, 10, queue=other_queue) # job will be scheduled in the other queue eventually assert job5.origin == other_queue job6 = add.queue(11, 12) result = job6.perform() assert result == 23 queue = rq.get_queue() assert job1 in queue.jobs assert job2 in queue.jobs assert job3 in queue.jobs # job 4 is a dependency on job 3, so not queued yet assert job4 not in queue.jobs assert job3.result is None assert job4.result is None response = rq.get_worker('default').work(True) assert response assert job4.dependency.result == 11 assert job4.result == 15 assert len(queue.jobs) == 0
def test_rq_outside_flask(): rq = RQ() # the redis connection is none since the Flask app context isn't there assert rq.connection is None
from mxcache import MxCache from metrics import Metrics from flask_mail import Mail app = Flask('app') app.config.from_object('app.config') configure_logging(app.config.get('DEBUG')) mxcache = MxCache(app) metrics = Metrics(app) mail = Mail(app) from flask_rq2 import RQ RQ_ASYNC = 'REDIS_URL' in app.config.keys() RQ_TIMEOUT = app.config.get('RQ_TIMEOUT', 180) rq = RQ(async=RQ_ASYNC, default_timeout=RQ_TIMEOUT) rq.app_worker_path = 'app.worker_preload' rq.init_app(app) from views import address app.register_blueprint(address) @app.route('/') def index(): return make_response('ok') @app.route('/test') def test_form(): return render_template('form.html')
from flask_migrate import Migrate from flask_rq2 import RQ from flask_mail import Mail from flask_lastuser import Lastuser from flask_lastuser.sqlalchemy import UserManager from flask_admin import Admin import wtforms_json from baseframe import baseframe, assets, Version from ._version import __version__ import coaster.app app = Flask(__name__, instance_relative_config=True) lastuser = Lastuser() mail = Mail() rq = RQ() # --- Assets ------------------------------------------------------------------ version = Version(__version__) assets['boxoffice.css'][version] = 'css/app.css' assets['boxoffice.js'][version] = 'js/scripts.js' from . import extapi, views # NOQA from .models import db, User, Item, Price, DiscountPolicy, DiscountCoupon, ItemCollection, Organization, Category, Invoice # noqa from .siteadmin import OrganizationModelView, DiscountCouponModelView, InvoiceModelView # noqa # Configure the app coaster.app.init_app(app) db.init_app(app) db.app = app
def test_rq_outside_flask(): rq = RQ() assert pytest.raises(RuntimeError, lambda: rq.connection)
from ddtrace import patch_all patch_all() def load_corpus(): mombler = Momblish(corpus=Corpus.load("support/corpus.json")) return mombler db = SQLAlchemy() migrate = Migrate() jwt = JWTManager() momblish = load_corpus() json_schema_manager = JSONSchemaManager("../support/schemas") Q = RQ() redis_client = FlaskRedis() Q.queues = ["email", "nomad"] logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def create_app(env: str = "development"): app = Flask(__name__) app.config.from_object(settings.app_config[env]) db.init_app(app) migrate.init_app(app, db) jwt.init_app(app) json_schema_manager.init_app(app) Q.init_app(app)
from flask_rq2 import RQ from rq import get_current_job import random rq = RQ() rq.redis_url = 'redis://redis:6379/0' # the timeout parameter specifies how long a job may take # to execute before it is aborted and regardes as failed @rq.job(timeout=180) def approximate_pi(num_iterations): """ approximate Pi by using monte carlo method """ # get a reference to the job we are currently in # to send back status reports self_job = get_current_job() inside = 0 for i in range( 1, num_iterations + 1 ): # start from 1 to get round numbers in the progress information x, y = random.random(), random.random() dist = x**2 + y**2 if dist <= 1.0: inside += 1 # update meta information on every 1000 iterations if i % 1000 == 0: self_job.meta['progress'] = {
def test_config_default_timeout(app, config): rq3 = RQ(app, default_timeout=911) assert rq3.default_timeout != Queue.DEFAULT_TIMEOUT assert rq3.default_timeout == 911
app = Flask(__name__) app.config.from_object("config") cache = Cache(app) cors = CORS(app) db = SQLAlchemy(app) jwt = JWTManager(app) limiter = Limiter( app, key_func=get_remote_address, default_limits=["100 per minute", "5 per second"], ) mail = Mail(app) swagger = Swagger(app) storage = Minio(app) rq = RQ(app) # -- Handler from app.handlers import (http_handler, jwt_handler, log_handler) # Comment above code when change database systems if not os.path.exists("db.sqlite"): db.create_all() # -- Controllers from app.controllers import (auth_controller, file_controller, homepage_controller, mail_controller, user_controller) app.register_blueprint(auth_controller.mod) app.register_blueprint(file_controller.mod)
app = Flask(__name__) with app.app_context(): from helper import * """ __author__ : Bijin Benny __email__ : [email protected] __license__ : MIT __version__ : 1.0 Modification : The native Redis library used in the original reference is outdated and is modified to use the new redis library specific to Flask apps Configure and intialize Redis task queue """ app.config['RQ_REDIS_URL'] = 'redis://localhost:6379/0' redis_conn = RQ(app) """ __author__ : Bijin Benny __email__ : [email protected] __license__ : MIT __version__ : 1.0 Modification : The deprecated elasticsearch library elasticsearch_dsl is removed and replaced with the new elasticsearch library for ES clients Load environment variables and create elastic search DB client """ host = os.getenv("HOST") user = os.getenv("USERNAME") pwd = os.getenv("PASSWORD") port = os.getenv("PORT")
from services.stripe import Stripe # Setup flask cache cache = Cache() # init flask assets assets_env = Environment() debug_toolbar = DebugToolbarExtension() login_manager = LoginManager() login_manager.login_view = "auth.login" login_manager.login_message_category = "warning" # login_manager.user_loader is registered in controllers/auth. # TODO: login_manager.refresh_view = "auth.reauth" login_manager.needs_refresh_message = ( u"To protect your account, please reauthenticate to access this page.") login_manager.needs_refresh_message_category = "info" token = Token() socketio = SocketIO() rq2 = RQ() admin = AdminDashboard() mail = Mail() limiter = Limiter(key_func=get_remote_address) stripe = Stripe() sentry = Sentry()
from __future__ import absolute_import, print_function from flask import Flask from flask_rq2 import RQ class Config(object): RQ_REDIS_URL = 'redis://localhost:6379/15' RQ_QUEUES = ['test-queue'] RQ_ASYNC = False RQ_SCHEDULER_QUEUE = 'scheduler-queue' RQ_SCHEDULER_INTERVAL = 42 testapp = Flask('testapp') testapp.config.from_object(Config()) testrq = RQ(testapp)
from flask_rq2 import RQ from flask_mail import Mail from flask_lastuser import Lastuser from flask_lastuser.sqlalchemy import UserManager from flask_admin import Admin import wtforms_json from baseframe import baseframe, assets, Version from ._version import __version__ import coaster.app app = Flask(__name__, instance_relative_config=True) lastuser = Lastuser() mail = Mail() rq = RQ() # --- Assets ------------------------------------------------------------------ version = Version(__version__) assets['boxoffice.css'][version] = 'css/app.css' assets['boxoffice.js'][version] = 'js/scripts.js' from . import extapi, views # NOQA from boxoffice.models import db, User, Item, Price, DiscountPolicy, DiscountCoupon, ItemCollection, Organization, Category, Invoice # noqa from siteadmin import OrganizationModelView, DiscountCouponModelView, InvoiceModelView # noqa # Configure the app coaster.app.init_app(app)
# define login_manager global for other modules login_manager = LoginManager() login_manager.login_view = 'login' # Initialize ESI connection, all three below globals are needed to set up ESI connection esiapp = App.create(config.ESI_SWAGGER_JSON) # init the security object esisecurity = EsiSecurity( app=esiapp, redirect_uri=config.ESI_CALLBACK, client_id=config.ESI_CLIENT_ID, secret_key=config.ESI_SECRET_KEY, headers={'User-Agent': config.ESI_USER_AGENT} ) # init the client esiclient = EsiClient( security=esisecurity, cache=None, headers={'User-Agent': config.ESI_USER_AGENT} ) # init RQ, result_ttl needs to be unset so that scheduled jobs will always continue to run rq = RQ() rq.default_result_ttl = None rq.default_timeout = config.DEFAULT_TIMEOUT # direct access to redis is needed for some components, like statistics caching r = redis.StrictRedis(host=config.REDIS_URL, port=config.REDIS_PORT, db=0)
def rq_cli_app(app): FlaskCLI(app) app.cli.name = app.name RQ(app) return app
from flask import Flask, jsonify from flask_sqlalchemy import SQLAlchemy from flask_bcrypt import Bcrypt from flask_migrate import Migrate from flask_rq2 import RQ import config import os logger = None bcrypt = Bcrypt() db = SQLAlchemy() rq = RQ(default_timeout=180 * 2) migrate = Migrate() def create_app(config_name=None): global logger # Define the WSGI application object app = Flask(__name__) # Configurations app.config.from_object(_get_config_class(config_name)) logger = app.logger # Define Database object which is imported by some modules bcrypt.init_app(app) rq.init_app(app) db.init_app(app)
def test_config_async_override(app, config, rq): rq2 = RQ(app, async_=not config.RQ_ASYNC) assert rq2._async != config.RQ_ASYNC
def job_fetch(self, id): job = False, None try: job = True, Job.fetch(id, connection=rq_instance.connection) except NoSuchJobError as e: job = False, str(e) except Exception as e: job = False, str(e) return job # class method injection RQ.job_fetch = job_fetch rq_instance = RQ() RESUTL_TTL = '7d' # -1 for never expire, clean up result key manually RUN_MINTCAST_JOB_TIMEOUT = '20d' NORMAL_JOB_TIMEOUT = '30m' DOWNLOAD_JOB_TIMEOUT = '4h' def queue_job_with_connection(job, connection, *args, _queue_name=None, **kwargs): if not connection: return job
from flask import Flask from flask_rq2 import RQ from dobby_hardware.wheels import Wheels # DON'T REMOVE from dobby_hardware.camservo import Camservo # DON'T REMOVE from dobby_hardware.ultrasound import Ultrasonic # DON'T REMOVE rq = RQ() def create_app(): app = Flask(__name__) app.config.from_object('config') rq.init_app(app) from .api.wheels.views import wheels as wheelsapi from .api.cam.views import cam as camapi from .api.ultrasonic.views import path as pathapi app.register_blueprint(wheelsapi) app.register_blueprint(camapi) app.register_blueprint(pathapi) import web.jobs # DON'T REMOVE return app
from flask_migrate import Migrate from flask_lastuser import Lastuser from flask_lastuser.sqlalchemy import UserManager from baseframe import baseframe, assets, Version from baseframe import _, __ # NOQA import coaster.app from ._version import __version__ version = Version(__version__) # First, make an app app = Flask(__name__, instance_relative_config=True) mail = Mail() lastuser = Lastuser() rq = RQ() # Second, import the models and views from . import models, views # NOQA from .models import db # Third, setup baseframe and assets assets['hasmail.js'][version] = 'js/app.js' assets['hasmail.css'][version] = 'css/app.css' # Configure the app coaster.app.init_app(app) db.init_app(app)