コード例 #1
0
def create_app(config_name=Config):
    app = Flask(__name__.split('.')[0])
    app.config.from_object(config_name)

    # Register before requests mixins prior to those that are inside extensions
    register_extensions(app)

    app.redis = redis.from_url(app.config['REDIS_URL'])

    app.task_queue = rq.Queue('freedomap-tasks', connection=app.redis)
    app.task_queue.empty()
    app.scheduler = rq_scheduler.Scheduler(
        queue=app.task_queue, connection=app.task_queue.connection)
    setup_tasks(app.scheduler)

    register_url_rules(app)
    register_blueprints(app)
    register_errorhandlers(app)
    app.shell_context_processor(
        lambda: {
            'db': db,
            'Protest': map_blueprint.models.Protest,
            'ProtestSubmission': map_blueprint.models.ProtestSubmission
        })

    return app
コード例 #2
0
ファイル: scheduling.py プロジェクト: aocks/ox_herd
    def get_scheduled_jobs():
        results = []
        scheduler = rq_scheduler.Scheduler(connection=Redis())
        jobs = scheduler.get_jobs()
        for item in jobs:
            try:
                if not getattr(item, 'kwargs').get('ox_herd_task', None):
                    continue
            except UnpickleError as problem:
                logging.info('Could not unickle %s because %s; skip',
                             str(item), str(problem))
                continue
            try:
                cron_string = item.meta.get('cron_string', None)
                if cron_string:
                    results.append(item)
                else:
                    logging.info('Skipping task without cron_string.'
                                 'Probably was just a one-off launch.')
            except Exception as problem:
                logging.warning(
                    'Skip job %s in get_scheduled_jobs due to exception %s',
                    str(item), problem)

        return results
コード例 #3
0
ファイル: scheduling.py プロジェクト: aocks/ox_herd
 def schedule_via_rq(ox_herd_task):
     rq_kw = dict([(name, getattr(ox_herd_task, name))
                   for name in ox_herd_task.rq_fields])
     scheduler = rq_scheduler.Scheduler(connection=Redis(),
                                        queue_name=rq_kw.pop('queue_name'))
     if rq_kw['cron_string']:
         return scheduler.cron(rq_kw.pop('cron_string'),
                               rq_kw.pop('func'),
                               kwargs={'ox_herd_task': ox_herd_task},
                               **rq_kw)
     else:
         raise ValueError('No scheduling method for rq task.')
コード例 #4
0
ファイル: scheduling.py プロジェクト: aocks/ox_herd
    def find_job(target_job):
        scheduler = rq_scheduler.Scheduler(connection=Redis())
        job = Job.fetch(target_job, connection=scheduler.connection)
        if job:
            return job

        # FIXME: stuff below probably obsolete

        job_list = scheduler.get_jobs()
        for job in job_list:
            if job.id == target_job:
                return job
        return None
コード例 #5
0
    def _retry_handler(self, job, exc_type, exc_value, traceback):
        if ((isinstance(exc_type, github.GithubException)
             and exc_value.status >= 500)
                or (isinstance(exc_type, requests.exceptions.HTTPError)
                    and exc_value.response.status_code >= 500)):
            backoff = datetime.timedelta(seconds=5)

        elif (exc_type == github.GithubException and exc_value.status == 403
              and "You have triggered an abuse detection mechanism"
              in exc_value.data["message"]):
            backoff = datetime.timedelta(minutes=5)

        else:
            return True

        max_retries = 10
        job.meta.setdefault('failures', 0)
        job.meta['failures'] += 1

        # Too many failures
        if job.meta['failures'] >= max_retries:
            LOG.warn('job %s: failed too many times times - moving to '
                     'failed queue' % job.id)
            job.save()
            return True

        # Requeue job and stop it from being moved into the failed queue
        LOG.warn('job %s: failed %d times - retrying' %
                 (job.id, job.meta['failures']))

        # Exponential backoff
        retry_in = 2**(job.meta['failures'] - 1) * backoff
        for q in self.queues:
            if q.name == job.origin:
                scheduler = rq_scheduler.Scheduler(queue=q)
                scheduler.enqueue_in(retry_in, self._retry_now, job.origin,
                                     job.data, job.meta['failures'])
                return False

        # Can't find queue, which should basically never happen as we only work
        # jobs that match the given queue names and queues are transient in rq.
        LOG.warn('job %s: cannot find queue %s - moving to failed queue' %
                 (job.id, job.origin))
        return True
コード例 #6
0
ファイル: worker.py プロジェクト: EstudioNexos/rq-retry
    def requeue_job(self, job):
        if self.use_scheduler:
            tries = job.meta['tries']
            try:
                delay = self.delays[tries - 1]
            except IndexError:
                delay = self.delays[-1]

            scheduler = rq_scheduler.Scheduler(connection=self.connection,
                                               queue_name=job.origin)

            self.failed_queue.remove(job)
            job = scheduler.enqueue_in(timedelta(seconds=delay), job.func,
                                       *job.args, **job.kwargs)
            job.meta['tries'] = tries + 1
            job.save()
            self.log.info('scheduled to run in {} seconds'.format(delay))
        else:
            job.meta['tries'] += 1
            job.save()
            self.failed_queue.requeue(job.id)
            self.log.info('requeued')
コード例 #7
0
ファイル: scheduling.py プロジェクト: aocks/ox_herd
 def cancel_job(job):
     scheduler = rq_scheduler.Scheduler(connection=Redis())
     return scheduler.cancel(job)
コード例 #8
0
ファイル: util.py プロジェクト: pbhuss/meowbot
def get_scheduler():
    return rq_scheduler.Scheduler(connection=get_redis())
コード例 #9
0
 def __init__(self) -> None:
     self._redis_conn = redis.Redis()
     self._scheduler = rq_scheduler.Scheduler(
         queue_name=GigantumQueues.default_queue.value,
         connection=self._redis_conn)
コード例 #10
0
def create_app():

    app = Flask(__name__)

    app.config.from_object('config.app_config.Config')
    app.config.from_envvar('GIS_REST_LAYER_CONF')
    app.config['DEBUG'] = False

    logging.config.fileConfig(app.config.get('LOGGING_CONF_FILE'))
    logger = logging.getLogger(__name__)
    logger.info('Starting Application GISRestLayer')

    redis_connection = Redis(host=app.config.get('REDIS_HOST', 'redis'),
                             db=app.config.get('REDIS_DB', 1),
                             port=app.config.get('REDIS_PORT', 6379))

    geo_q = Queue("geo_q", connection=redis_connection)
    analytics_q = Queue("analytics_q", connection=redis_connection)
    default_scheduler_q = Queue("default_scheduler_q",
                                connection=redis_connection)

    scheduler = rq_scheduler.Scheduler(queue_name='default_scheduler_q',
                                       connection=redis_connection)

    # for code in wexceptions.default_exceptions.iterkeys():
    #     app.error_handler_spec[None][code] = make_json_error

    import deleteapi.delete_api as delete_api
    import importapi.import_api as import_api
    import checksapi.checks_api as checks_api
    import analyticsapi.analytics_api as analytics_api
    import schedulerapi.scheduler_api as scheduler_api

    app.register_blueprint(import_api.import_api)
    app.register_blueprint(delete_api.delete_api)
    app.register_blueprint(checks_api.checks_api)
    app.register_blueprint(analytics_api.analytics_api)
    app.register_blueprint(scheduler_api.scheduler_api)

    import_api.import_api_dict['geo_q'] = geo_q
    analytics_api.analytics_api_dict['analytics_q'] = analytics_q
    scheduler_api.scheduler_api_dict['scheduler'] = scheduler

    # rq_dashboard.RQDashboard(app, url_prefix='/monitor')
    app.register_blueprint(rq_dashboard.blueprint,
                           url_prefix=app.config.get('MONITOR_URL',
                                                     '/monitor'))

    @app.route('/version', methods=['GET'])
    @app.route('/about', methods=['GET'])
    def about():
        return flask.jsonify({"version": VERSION})

    @app.errorhandler(Exception)
    def make_json_error(ex):
        logger.error(str(ex))
        resp = {
            'message': str(ex),
            'state': 'failure',
            'error_class': type(ex).__name__,
            'error_type': 'transformation-init-problem'
        }
        response = jsonify(resp)
        response.status_code = (ex.code if isinstance(
            ex, wexceptions.HTTPException) else 500)
        return response

    return app
コード例 #11
0
 def __init__(self, queue_name: str = DEFAULT_JOB_QUEUE) -> None:
     self._redis_conn = redis.Redis()
     self._job_queue = rq.Queue(queue_name, connection=self._redis_conn)
     self._scheduler = rq_scheduler.Scheduler(queue_name=queue_name,
                                              connection=self._redis_conn)