def test_cleanup(self): """Moving expired jobs to FailedQueue.""" failed_queue = FailedQueue(connection=self.testconn) self.assertTrue(failed_queue.is_empty()) self.testconn.zadd(self.registry.key, 1, 'foo') self.registry.move_expired_jobs_to_failed_queue() self.assertIn('foo', failed_queue.job_ids)
def run(self, args): redis = StrictRedis(config.REDIS_HOST, config.REDIS_PORT) names = args.queue_names fq = FailedQueue(connection=redis) def check_fjob(fjob): if fjob.origin not in names: return None # XXX: Damn... Should change to another database soon if isinstance(fjob.exc_info, str): temporary_errors = [ 'sqlite3.OperationalError: database is locked', 'rq.timeouts.JobTimeoutException: Task exceeded maximum timeout value', 'elasticsearch.exceptions.ConnectionTimeout: ConnectionTimeout caused by', 'elasticsearch.exceptions.ConnectionError: ConnectionError', 'elasticsearch.exceptions.TransportError: TransportError', 'requests.exceptions.ConnectionError: HTTPSConnectionPool', 'pdftotext', 'not pdf, but `text/xml`', 'OperationalError: database is locked', """oss2.exceptions.RequestError: {'status': -2, 'x-oss-request-id': '', 'details': "RequestError: ('Connection aborted.', timeout('timed out'))"}""", "requests.exceptions.ConnectionError: ('Connection aborted.', timeout('timed out'))", """"RequestError: ('Connection aborted.', BrokenPipeError(32, 'Broken pipe'))"}""", """oss2.exceptions.RequestError: {'status': -2, 'x-oss-request-id': '', 'details': "RequestError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))"}""", 'port=80): Read timed out. (read timeout=60)"}', ] for s in temporary_errors: if s in fjob.exc_info: return 'requeue' permitted_errors = [ 'sqlite3.IntegrityError: UNIQUE constraint failed: progress.key' ] for s in permitted_errors: if s in fjob.exc_info: return 'delete' return None while True: count_requeue = 0 count_delete = 0 fjobs = fq.get_jobs() for fjob in fjobs: t = check_fjob(fjob) if t == 'requeue': fq.requeue(fjob.id) count_requeue += 1 elif t == 'delete': fjob.delete() count_delete += 1 num_remain = len(fjobs) - count_requeue - count_delete print( '{} failed jobs: {} requeued, {} deleteed, {} remains'.format( len(fjobs), count_requeue, count_delete, num_remain)) time.sleep(args.interval)
def test_cleanup(self): """Moving expired jobs to FailedQueue.""" failed_queue = FailedQueue(connection=self.testconn) self.assertTrue(failed_queue.is_empty()) self.testconn.zadd(self.registry.key, 1, "foo") self.registry.cleanup() self.assertIn("foo", failed_queue.job_ids) self.assertEqual(self.testconn.zscore(self.registry.key, "foo"), None)
def test_cleanup(self): """Moving expired jobs to FailedQueue.""" failed_queue = FailedQueue(connection=self.testconn) self.assertTrue(failed_queue.is_empty()) self.testconn.zadd(self.registry.key, 1, 'foo') self.registry.cleanup() self.assertIn('foo', failed_queue.job_ids) self.assertEqual(self.testconn.zscore(self.registry.key, 'foo'), None)
def test_cleanup(self): """Moving expired jobs to FailedQueue.""" failed_queue = FailedQueue(connection=self.testconn) self.assertTrue(failed_queue.is_empty()) queue = Queue(connection=self.testconn) job = queue.enqueue(say_hello) self.testconn.zadd(self.registry.key, 2, job.id) self.registry.cleanup(1) self.assertNotIn(job.id, failed_queue.job_ids) self.assertEqual(self.testconn.zscore(self.registry.key, job.id), 2) self.registry.cleanup() self.assertIn(job.id, failed_queue.job_ids) self.assertEqual(self.testconn.zscore(self.registry.key, job.id), None) job.refresh() self.assertEqual(job.status, JobStatus.FAILED)
def test_cleanup(self): """Moving expired jobs to FailedQueue.""" failed_queue = FailedQueue(connection=self.testconn) self.assertTrue(failed_queue.is_empty()) queue = Queue(connection=self.testconn) job = queue.enqueue(say_hello) self.testconn.zadd(self.registry.key, 2, job.id) self.registry.cleanup(1) self.assertNotIn(job.id, failed_queue.job_ids) self.assertEqual(self.testconn.zscore(self.registry.key, job.id), 2) self.registry.cleanup() self.assertIn(job.id, failed_queue.job_ids) self.assertEqual(self.testconn.zscore(self.registry.key, job.id), None) job.refresh() self.assertEqual(job.get_status(), JobStatus.FAILED)
def get_queue_by_index(index): """ Returns an rq Queue using parameters defined in ``QUEUES_LIST`` """ from .settings import QUEUES_LIST config = QUEUES_LIST[int(index)] if config['name'] == 'failed': return FailedQueue(connection=get_redis_connection(config['connection_config'])) return Queue(config['name'], connection=get_redis_connection(config['connection_config']), async=config.get('ASYNC', True))
def get_queue_by_index(index): """ Returns an rq Queue using parameters defined in ``QUEUES_LIST`` """ from .settings import QUEUES_LIST config = QUEUES_LIST[int(index)] if config['name'] == 'failed': return FailedQueue( connection=get_redis_connection(config['connection_config'])) return get_queue_class(config)( config['name'], connection=get_redis_connection(config['connection_config']), # Backward comp: if SYNC is not in setting, look for the old ASYNC sync=config.get('SYNC', not config.get('ASYNC', True)))
def get_queue_by_index(index): """ Returns an rq Queue using parameters defined in ``QUEUES_LIST`` """ from .settings import QUEUES_LIST config = QUEUES_LIST[int(index)] if config['name'] == 'failed': return FailedQueue(connection=get_redis_connection(config['connection_config'])) if config['name'] == 'scheduled': scheduled_queue = get_scheduler() scheduled_queue.name = 'scheduled' scheduled_queue.key = scheduled_queue.scheduled_jobs_key return scheduled_queue return get_queue_class(config)( config['name'], connection=get_redis_connection(config['connection_config']), async=config.get('ASYNC', True))
def show_stats(write_fn): """Print some metrics to stdout""" queue = django_rq.get_queue() conn = django_rq.get_connection() write_fn("Queued:") for job in queue.jobs: _print(write_fn, job) write_fn("Started:") for job_id in registry.StartedJobRegistry(connection=conn).get_job_ids(): _print(write_fn, queue.fetch_job(job_id)) write_fn("Finished:") for job_id in registry.FinishedJobRegistry(connection=conn).get_job_ids(): _print(write_fn, queue.fetch_job(job_id)) write_fn("Failed:") for job in FailedQueue(connection=conn).jobs: _print(write_fn, job) for line in job.exc_info.split('\n'): write_fn("\t\t" + line)
def get_failed_queue(name='default'): """ Returns the rq failed Queue using parameters defined in ``RQ_QUEUES`` """ return FailedQueue(connection=get_connection(name))
from rq import Queue, requeue_job from rq.queue import FailedQueue from redis import Redis import os conn_redis = Redis(host=os.environ['REDIS_HOST'], password=os.environ['REDIS_PASSWORD']) qfailed = FailedQueue(connection=conn_redis) fail_registry_api = [] fail_no_xtf_results = [] fail_timeout = [] fail_other = [] ###for job in Queue(connection=conn_redis).jobs: ### job.timeout = 2*job.timeout ### job.save() ### print job, job.timeout for job in qfailed.jobs: "ConnectionError: HTTPSConnectionPool(host='registry.cdlib.org'" if "HTTPSConnectionPool(host='registry.cdlib.org'" in job.exc_info: fail_registry_api.append(job) elif "ValueError: http://dsc.cdlib.org/search" in job.exc_info: fail_no_xtf_results.append(job) elif "Job exceeded maximum timeout value" in job.exc_info: fail_timeout.append(job) else: fail_other.append(job) print(80 * '=')
import sys, os from redis import Redis from rq import Connection, Queue, Worker from rq.queue import FailedQueue REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', None) REDIS_HOST = os.environ.get('REDIS_HOST', 'localhost') redis_conn = Redis(host=REDIS_HOST, password=REDIS_PASSWORD) qfailed = FailedQueue(connection=redis_conn) #print(dir(qfailed)) #action : can be one of requeue o, connection=redis_conn) err_search = 'timeout' action = 'requeue' #action = 'cancel' jobs_filtered = [] for job in qfailed.jobs: print(job.dump()) # print(job.dump().keys()) if err_search in job.dump()['exc_info']: jobs_filtered.append(job) job.timeout = 604800 #1week job.save() if action == 'requeue': result = qfailed.requeue(job.id) #q = Queue(job.dump()['origin'], connection=redis_conn) ##result = q.enqueue(job) print result print('{} jobs matched {}'.format(len(jobs_filtered), err_search))
import sys, os from redis import Redis from rq import Connection, Queue, Worker from rq.queue import FailedQueue REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', None) REDIS_HOST = os.environ.get('REDIS_HOST', 'localhost' ) redis_conn = Redis(host=REDIS_HOST, password=REDIS_PASSWORD) qfailed = FailedQueue(connection=redis_conn) #print(dir(qfailed)) #action : can be one of requeue o, connection=redis_conn) err_search = 'timeout' action = 'requeue' #action = 'cancel' jobs_filtered = [] for job in qfailed.jobs: print(job.dump()) # print(job.dump().keys()) if err_search in job.dump()['exc_info']: jobs_filtered.append(job) job.timeout = 604800 #1week job.save() if action == 'requeue': result = qfailed.requeue(job.id) #q = Queue(job.dump()['origin'], connection=redis_conn) ##result = q.enqueue(job) print result print('{} jobs matched {}'.format(len(jobs_filtered), err_search))
from jobs import async_upload from flask_socketio import SocketIO, send, emit, join_room, leave_room import rq_dashboard app = Flask(__name__) app.config.from_object(rq_dashboard.default_settings) app.register_blueprint(rq_dashboard.blueprint, url_prefix="/rq") app.config['REDIS_DB'] = settings.REDIS_DB socketio = SocketIO(app) q = Queue(connection=Redis(db=settings.REDIS_DB)) fq = FailedQueue(connection=Redis(db=settings.REDIS_DB)) pbclient.set('api_key', settings.APIKEY) pbclient.set('endpoint', settings.SERVER_NAME) @app.route('/') def index(): return render_template('index.html') @app.route('/static/js/<path:path>') def send_js(path): return send_from_directory('js', path) def allowed_file(filename): return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in settings.ALLOWED_EXTENSIONS