Example #1
0
 def handle(self, *args, **options):
     q = Queue('ping_subnet', connection=redis_conn)
     q.empty()
     count = options['count']
     all_subnet = Subnet.objects.order_by("ping_latest_time").all()[:count]
     for subnet in all_subnet:
         q.enqueue(ping_subnet, subnet)
Example #2
0
    def test_work_fails(self):
        """Failing jobs are put on the failed queue."""
        q = Queue()
        failed_q = get_failed_queue()

        # Preconditions
        self.assertEquals(failed_q.count, 0)
        self.assertEquals(q.count, 0)

        # Action
        job = q.enqueue(div_by_zero)
        self.assertEquals(q.count, 1)

        # keep for later
        enqueued_at_date = strip_microseconds(job.enqueued_at)

        w = Worker([q])
        w.work(burst=True)  # should silently pass

        # Postconditions
        self.assertEquals(q.count, 0)
        self.assertEquals(failed_q.count, 1)

        # Check the job
        job = Job.fetch(job.id)
        self.assertEquals(job.origin, q.name)

        # Should be the original enqueued_at date, not the date of enqueueing
        # to the failed queue
        self.assertEquals(job.enqueued_at, enqueued_at_date)
        self.assertIsNotNone(job.exc_info)  # should contain exc_info
Example #3
0
    def test_worker_sets_job_status(self):
        """Ensure that worker correctly sets job status."""
        q = Queue()
        w = Worker([q])

        job = q.enqueue(say_hello)
        self.assertEqual(job.get_status(), Status.QUEUED)
        self.assertEqual(job.is_queued, True)
        self.assertEqual(job.is_finished, False)
        self.assertEqual(job.is_failed, False)

        w.work(burst=True)
        job = Job.fetch(job.id)
        self.assertEqual(job.get_status(), Status.FINISHED)
        self.assertEqual(job.is_queued, False)
        self.assertEqual(job.is_finished, True)
        self.assertEqual(job.is_failed, False)

        # Failed jobs should set status to "failed"
        job = q.enqueue(div_by_zero, args=(1,))
        w.work(burst=True)
        job = Job.fetch(job.id)
        self.assertEqual(job.get_status(), Status.FAILED)
        self.assertEqual(job.is_queued, False)
        self.assertEqual(job.is_finished, False)
        self.assertEqual(job.is_failed, True)
def computeUsers():
    '''
    Computes information about all users from a
    CKAN instance.

    '''
    key = 'users'
    status = getStatus(key)
    queue = Queue(connection=Redis(host=REDIS_HOST), name=key)
    objects = ckan.action.user_list()
    if status['empty']:
      for object in objects:
        job = queue.enqueue(fetchAndStore, key, object['id'])

    response = {
        'success': True,
        'message': 'Computing user information. {n} before finished.'.format(n=status['count']),
        'endpoint': key,
        'time': None,
        'ETA': None,
        'computations': {
          'total': len(objects),
          'completed': len(objects) - status['count'],
          'queued': status['count'],
          'progress': round(((len(objects) - status['count']) / len(objects)) * 100, 2)
        }
      }

    return flask.jsonify(**response)
Example #5
0
    def test_work_is_unreadable(self):
        """Unreadable jobs are put on the failed queue."""
        q = Queue()
        failed_q = get_failed_queue()

        self.assertEquals(failed_q.count, 0)
        self.assertEquals(q.count, 0)

        # NOTE: We have to fake this enqueueing for this test case.
        # What we're simulating here is a call to a function that is not
        # importable from the worker process.
        job = Job.create(func=div_by_zero, args=(3,))
        job.save()
        data = self.testconn.hget(job.key, 'data')
        invalid_data = data.replace(b'div_by_zero', b'nonexisting')
        assert data != invalid_data
        self.testconn.hset(job.key, 'data', invalid_data)

        # We use the low-level internal function to enqueue any data (bypassing
        # validity checks)
        q.push_job_id(job.id)

        self.assertEquals(q.count, 1)

        # All set, we're going to process it
        w = Worker([q])
        w.work(burst=True)   # should silently pass
        self.assertEquals(q.count, 0)
        self.assertEquals(failed_q.count, 1)
Example #6
0
    def test_cancelled_jobs_arent_executed(self):  # noqa
        """Cancelling jobs."""

        SENTINEL_FILE = '/tmp/rq-tests.txt'

        try:
            # Remove the sentinel if it is leftover from a previous test run
            os.remove(SENTINEL_FILE)
        except OSError as e:
            if e.errno != 2:
                raise

        q = Queue()
        job = q.enqueue(create_file, SENTINEL_FILE)

        # Here, we cancel the job, so the sentinel file may not be created
        assert q.count == 1
        job.cancel()
        assert q.count == 1

        w = Worker([q])
        w.work(burst=True)
        assert q.count == 0

        # Should not have created evidence of execution
        self.assertEquals(os.path.exists(SENTINEL_FILE), False)
Example #7
0
    def test_timeouts(self):
        """Worker kills jobs after timeout."""
        sentinel_file = '/tmp/.rq_sentinel'

        q = Queue()
        w = Worker([q])

        # Put it on the queue with a timeout value
        res = q.enqueue(create_file_after_timeout,
                        args=(sentinel_file, 4),
                        timeout=1)

        try:
            os.unlink(sentinel_file)
        except OSError as e:
            if e.errno == 2:
                pass

        self.assertEquals(os.path.exists(sentinel_file), False)
        w.work(burst=True)
        self.assertEquals(os.path.exists(sentinel_file), False)

        # TODO: Having to do the manual refresh() here is really ugly!
        res.refresh()
        self.assertIn('JobTimeoutException', as_text(res.exc_info))
Example #8
0
class CSVRedisWrapper:
    def __init__(self, job=None):
        self.connection = redis.Redis(
            host=app.config['REDIS_HOST'],
            port=app.config['REDIS_PORT']
        )
        self.q_queue = Queue(connection=self.connection)
        self.q_failed = Queue(connection=self.connection)
        self.__job = job

    def csv_import(self, csv_fileobj):
        c = CSVRedis(csv_fileobj=csv_fileobj, queue=self.q_queue)
        self.__job = c.job

    def get_job(self, job_id):
        self.__job = self.q_queue.fetch_job(job_id)
        if self.__job is None:
            raise RedisJobMissing()

    @property
    def job(self):
        return self.__job

    def finished(self):
        if self.__job is None:
            raise RedisJobMissing()
        if self.__job.result is not None:
            return True
        return False

    def failed(self):
        if self.__job is None:
            raise RedisJobMissing()
        try:
            if self.__job.failed is True:
                return True
        except AttributeError:
            return False
        if self.q_failed.fetch_job(self.job.id):
            return True
        return False

    def bad_records(self):
        if self.__job is None:
            raise RedisJobMissing()
        if self.finished():
            return self.job.result[0]
        return []

    def failures(self):
        if self.__job is None:
            raise RedisJobMissing()
        if self.finished():
            return self.job.result[1]
        return []

    def import_id(self):
        if self.__job is None:
            raise RedisJobMissing()
        return self.job.meta['import_id']
Example #9
0
    def test_custom_exc_handling(self):
        """Custom exception handling."""
        def black_hole(job, *exc_info):
            # Don't fall through to default behaviour (moving to failed queue)
            return False

        q = Queue()
        failed_q = get_failed_queue()

        # Preconditions
        self.assertEquals(failed_q.count, 0)
        self.assertEquals(q.count, 0)

        # Action
        job = q.enqueue(div_by_zero)
        self.assertEquals(q.count, 1)

        w = Worker([q], exc_handler=black_hole)
        w.work(burst=True)  # should silently pass

        # Postconditions
        self.assertEquals(q.count, 0)
        self.assertEquals(failed_q.count, 0)

        # Check the job
        job = Job.fetch(job.id)
        self.assertEquals(job.is_failed, True)
Example #10
0
def main():
    import sys
    sys.path.insert(0, '.')

    opts, args, parser = parse_args()

    use_connection()

    queues = ('default', 'high', 'low')

    sample_calls = [
            (dummy.do_nothing, [], {}),
            (dummy.sleep, [1], {}),
            (dummy.fib, [8], {}),              # normal result
            (dummy.fib, [24], {}),             # takes pretty long
            (dummy.div_by_zero, [], {}),       # 5 / 0 => div by zero exc
            (dummy.random_failure, [], {}),    # simulate random failure (handy for requeue testing)
    ]

    for i in range(opts.count):
        import random
        f, args, kwargs = random.choice(sample_calls)

        q = Queue(random.choice(queues))
        q.enqueue(f, *args, **kwargs)
Example #11
0
    def test_suspend_worker_execution(self):
        """Test Pause Worker Execution"""

        SENTINEL_FILE = '/tmp/rq-tests.txt'

        try:
            # Remove the sentinel if it is leftover from a previous test run
            os.remove(SENTINEL_FILE)
        except OSError as e:
            if e.errno != 2:
                raise

        q = Queue()
        q.enqueue(create_file, SENTINEL_FILE)

        w = Worker([q])

        suspend(self.testconn)

        w.work(burst=True)
        assert q.count == 1

        # Should not have created evidence of execution
        self.assertEqual(os.path.exists(SENTINEL_FILE), False)

        resume(self.testconn)
        w.work(burst=True)
        assert q.count == 0
        self.assertEqual(os.path.exists(SENTINEL_FILE), True)
def crawlPaperCitationWithRQ():
    # this method crawl citation in redis distributed way
    ci = MongoDBInterface()
    ci.setCollection(main_paper_with_citation)

    mi = MongoDBInterface()
    mi.setCollection(main_paper_list)

    docs = [doc for doc in mi.getAllDocuments()]
    mi.disconnect()

    random.shuffle(docs)

    redis_conn = Redis(redis_server)
    q = Queue(connection=redis_conn)

    for doc in docs:
        if ci.getOneDocument(condition={'_id': doc['_id']}) is not None:
            print 'paper %d is already there' % id
            continue

        paras = (doc)
        q.enqueue_call(func=crawlCitationPaper,args=(paras,),timeout=time_out)

    ci.disconnect()
Example #13
0
 def process(event):
     """
     event.event_type
         'modified' | 'created' | 'moved' | 'deleted'
     event.is_directory
         True | False
     event.src_path
         path/to/observed/file
     """
     q = Queue(connection=Redis(host="redis"), default_timeout=86400)
     if event.event_type == "created":
         print event.event_type, event.src_path
         # let jobs run for up to one day
         # let jobs be queued for up to 30 days
         if event.src_path.endswith(".template"):
             result = q.enqueue('file_watch.template_queue', event.event_type+":"+event.src_path, ttl=2592000)
     elif event.event_type == "modified":
         print event.event_type, event.src_path
         # let jobs run for up to one day
         # let jobs be queued for up to 30 days
         if event.src_path.endswith(".template"):
             result = q.enqueue('file_watch.template_queue', event.event_type+":"+event.src_path, ttl=2592000)
     elif event.event_type == "deleted":
         print event.event_type, event.src_path
         # let jobs run for up to one day
         # let jobs be queued for up to 30 days
         if event.src_path.endswith(".template"):
             result = q.enqueue('file_watch.template_queue', event.event_type+":"+event.src_path, ttl=2592000)
Example #14
0
    def test_worker_logs_success(self):
        """Worker can log success response of various jobs"""
        q = Queue()
        w = Worker([q])

        # ascii response
        job = q.enqueue(say_hello)
        w.prepare_job_execution(job)
        try:
            success = w.perform_job(job, q)
        except UnicodeEncodeError:
            self.fail('perform_job raised UnicodeEncodeError unexpectedly')
        self.assertTrue(success)

        # bytestring response
        job = q.enqueue(return_bytestring)
        w.prepare_job_execution(job)
        try:
            success = w.perform_job(job, q)
        except UnicodeDecodeError:
            self.fail('perform_job raised UnicodeDecodeError unexpectedly')
        self.assertTrue(success)

        # unicode response
        job = q.enqueue(return_unicode)
        w.prepare_job_execution(job)
        try:
            success = w.perform_job(job, q)
        except UnicodeEncodeError:
            self.fail('perform_job raised UnicodeEncodeError unexpectedly')
        self.assertTrue(success)
Example #15
0
File: app.py Project: eJon/tiler
def main():  # pragma: no cover
    tornado.options.parse_command_line()

    q = Queue(connection=redis.client.Redis(
        settings.REDIS_HOST,
        settings.REDIS_PORT
    ))

    job = q.enqueue(handlers.sample_queue_job)
    for i in range(12):
        if job.result:
            break
        sleep(i / 10.0)
        if i > 0 and not i % 3:
            print "Waiting to see if Queue workers are awake..."
    else:
        raise SystemError("Queue workers not responding")

    http_server = tornado.httpserver.HTTPServer(Application())
    print "Starting tornado on port", options.port
    http_server.listen(options.port)
    try:
        tornado.ioloop.IOLoop.instance().start()
    except KeyboardInterrupt:
        pass
Example #16
0
    def test_queue_is_empty(self):
        """Detecting empty queues."""
        q = Queue('example')
        self.assertEquals(q.is_empty(), True)

        self.testconn.rpush('rq:queue:example', 'sentinel message')
        self.assertEquals(q.is_empty(), False)
Example #17
0
    def test_worker_sets_result_ttl(self):
        """Ensure that Worker properly sets result_ttl for individual jobs."""
        q = Queue()
        job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
        w = Worker([q])
        self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
        w.work(burst=True)
        self.assertNotEqual(self.testconn._ttl(job.key), 0)
        self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))

        # Job with -1 result_ttl don't expire
        job = q.enqueue(say_hello, args=('Frank',), result_ttl=-1)
        w = Worker([q])
        self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
        w.work(burst=True)
        self.assertEqual(self.testconn._ttl(job.key), -1)
        self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))

        # Job with result_ttl = 0 gets deleted immediately
        job = q.enqueue(say_hello, args=('Frank',), result_ttl=0)
        w = Worker([q])
        self.assertIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
        w.work(burst=True)
        self.assertEqual(self.testconn.get(job.key), None)
        self.assertNotIn(job.get_id().encode('utf-8'), self.testconn.lrange(q.key, 0, -1))
Example #18
0
 def test_enqueue_preserves_result_ttl(self):
     """Enqueueing persists result_ttl."""
     q = Queue()
     job = q.enqueue(div_by_zero, args=(1, 2, 3), result_ttl=10)
     self.assertEqual(job.result_ttl, 10)
     job_from_queue = Job.fetch(job.id, connection=self.testconn)
     self.assertEqual(int(job_from_queue.result_ttl), 10)
Example #19
0
 def test_empty_removes_jobs(self):
     """Emptying a queue deletes the associated job objects"""
     q = Queue('example')
     job = q.enqueue(say_hello)
     self.assertTrue(Job.exists(job.id))
     q.empty()
     self.assertFalse(Job.exists(job.id))
Example #20
0
 def test_dequeue_deleted_jobs(self):
     """Dequeueing deleted jobs from queues don't blow the stack."""
     q = Queue()
     for _ in range(1, 1000):
         job = q.enqueue(say_hello)
         job.delete()
     q.dequeue()
Example #21
0
    def test_enqueue_explicit_args(self):
        """enqueue() works for both implicit/explicit args."""
        q = Queue()

        # Implicit args/kwargs mode
        job = q.enqueue(echo, 1, timeout=1, result_ttl=1, bar='baz')
        self.assertEqual(job.timeout, 1)
        self.assertEqual(job.result_ttl, 1)
        self.assertEqual(
            job.perform(),
            ((1,), {'bar': 'baz'})
        )

        # Explicit kwargs mode
        kwargs = {
            'timeout': 1,
            'result_ttl': 1,
        }
        job = q.enqueue(echo, timeout=2, result_ttl=2, args=[1], kwargs=kwargs)
        self.assertEqual(job.timeout, 2)
        self.assertEqual(job.result_ttl, 2)
        self.assertEqual(
            job.perform(),
            ((1,), {'timeout': 1, 'result_ttl': 1})
        )
Example #22
0
def list_jobs(queue_name, page):
    current_page = int(page)
    queue = Queue(queue_name)
    per_page = 5
    total_items = queue.count
    pages_numbers_in_window = pagination_window(
        total_items, current_page, per_page)
    pages_in_window = [
        dict(number=p, url=url_for('.overview', queue_name=queue_name, page=p))
        for p in pages_numbers_in_window
    ]
    last_page = int(ceil(total_items / float(per_page)))

    prev_page = None
    if current_page > 1:
        prev_page = dict(url=url_for(
            '.overview', queue_name=queue_name, page=(current_page - 1)))

    next_page = None
    if current_page < last_page:
        next_page = dict(url=url_for(
            '.overview', queue_name=queue_name, page=(current_page + 1)))

    pagination = remove_none_values(
        dict(
            pages_in_window=pages_in_window,
            next_page=next_page,
            prev_page=prev_page
        )
    )

    offset = (current_page - 1) * per_page
    jobs = [serialize_job(job) for job in queue.get_jobs(offset, per_page)]
    return dict(name=queue.name, jobs=jobs, pagination=pagination)
Example #23
0
def enqueue(timelines=[]):
    "Enqueue timelines to process."
    now = datetime.datetime.utcnow()
    logger.debug("Start enqueue...")
    session = db.session()
    try:
        if not timelines:
            timelines = session.query(Timeline).\
                filter(Timeline.enabled == True, Timeline.next_check < now).\
                order_by(Timeline.next_check)
        else:
            timelines = [session.merge(t) for t in timelines]
        with Connection(r):
            q = Queue(QUEUE)
            for timeline in timelines:
                description = unicode(timeline).encode('utf8')
                if timeline.state == State.BUSY and \
                    timeline.next_check + datetime.timedelta(seconds=Timeline.DEFAULT_FREQUENCY) > now: # warning
                    logger.debug('Skipped: %s', description)
                    continue
                user_id, list_id = (timeline.user_id, timeline.list_id)
                job = q.enqueue_call(func=process, args=(user_id, list_id), 
                    description=description, result_ttl=RESULT_TTL, timeout=TIMEOUT) # job_id=unicode(user_id), result_ttl=0
                timeline.state = State.BUSY
                logger.debug('Queued: %s', description)
        session.commit()
    except:
        session.rollback()
        raise
    finally:
        session.close()
        logger.debug("End enqueue")
Example #24
0
def queue_image_harvest(redis_host,
                        redis_port,
                        redis_password,
                        redis_timeout,
                        rq_queue,
                        collection_key,
                        url_couchdb=None,
                        object_auth=None,
                        get_if_object=False,
                        ignore_content_type=False,
                        harvest_timeout=IMAGE_HARVEST_TIMEOUT):
    rQ = Queue(
        rq_queue,
        connection=Redis(
            host=redis_host,
            port=redis_port,
            password=redis_password,
            socket_connect_timeout=redis_timeout))
    job = rQ.enqueue_call(
        func='harvester.image_harvest.main',
        kwargs=dict(
            collection_key=collection_key,
            url_couchdb=url_couchdb,
            object_auth=object_auth,
            get_if_object=get_if_object,
            ignore_content_type=ignore_content_type),
        timeout=harvest_timeout)
    return job
Example #25
0
def test_parallel_strategy():

	# Tell RQ what Redis connection to use
	redis_conn = util.get_redis_conn()
	q = Queue(connection=redis_conn, default_timeout=1500)  # no args implies the default queue
	import dateutil.parser
	startdate = dateutil.parser.parse('2010-06-30T08:00:00.000Z')
	enddate = dateutil.parser.parse('2014-12-31T08:00:00.000Z')
	#startdate = dateutil.parser.parse('2012-06-30T08:00:00.000Z')
	#enddate = dateutil.parser.parse('2015-08-31T08:00:00.000Z')

	jobList = []

	for ticker in tickerList:
		#jobList.append(q.enqueue(xiQuantStrategyUtil.run_strategy_redis,20, ticker, 100000, startdate, enddate, indicators=False))
		jobList.append(q.enqueue(xiQuantStrategyUtil.run_strategy_redis_debug,20, ticker, 100000, startdate, enddate))

	master_orders = [] #### populate master list of  orders dictionary...
	jobID = 1
	for job in jobList:
		try:
			print "Currently processing job id: ", jobID
			sleep = True
			while(sleep):
				time.sleep(1)
				if job.get_status() == 'failed' or job.get_status()=='finished':
					sleep = False
			if job.get_status() == 'finished' and any(job.result):
				master_orders.append(job.result)
				#master_orders.append(job.result.getOrdersFilteredByMomentumRank(filterCriteria=rank))
				#master_orders.append(job.result.getOrdersFilteredByRules())
			jobID +=1
		except Exception,e:
			print "Entered into exception block while processing:...", str(e)
			pass ### Make sure you move on with other job...
Example #26
0
def main():
    # Range of Fibonacci numbers to compute
    fib_range = range(20, 34)

    # Kick off the tasks asynchronously
    async_results = {}
    q = Queue()
    for x in fib_range:
        async_results[x] = q.enqueue(slow_fib, x)

    start_time = time.time()
    done = False
    while not done:
        os.system('clear')
        print('Asynchronously: (now = %.2f)' % (time.time() - start_time,))
        done = True
        for x in fib_range:
            result = async_results[x].return_value
            if result is None:
                done = False
                result = '(calculating)'
            print('fib(%d) = %s' % (x, result))
        print('')
        print('To start the actual in the background, run a worker:')
        print('    python examples/run_worker.py')
        time.sleep(0.2)

    print('Done')
Example #27
0
def redis_network():
	use_connection(redis_conn)
	q = Queue('high', connection=redis_conn)
	job = q.enqueue(setNetworkDevs)
	job = q.enqueue(setLocalIP)
	job = q.enqueue(setGlobalIP)
	job = q.enqueue(setPingInfo, get_globalparam("pinghost"), 10)
Example #28
0
def requeue_2xtimeout_job_view(job_id):
    # Get the handle for the failed queue
    fq = get_failed_queue()
    # Fetch the job from the failed queue
    job = fq.fetch_job(job_id)
    # Test if the job exists
    if job is None:
        raise NoSuchJobError(
            'Job {} does not exist in failed queue'.format(job_id)
        )
    # Remove the job from the failed queue
    if fq.remove(job_id) == 0:
        raise InvalidJobOperationError('Cannot requeue non-failed jobs')
    # Reset the job state
    job.set_status(JobStatus.QUEUED)
    job.exc_info = None
    if not job.timeout:
        job.timeout = Queue.DEFAULT_TIMEOUT
    # Double the timeout
    job.timeout *= 2
    # Get a handle for the original queue
    q = Queue(job.origin, connection=job.connection)
    # Queue the job
    q.enqueue_job(job)
    return dict(status='OK')
Example #29
0
def list_jobs(queue_name, page):
    current_page = int(page)
    queue = Queue(queue_name)
    per_page = current_app.config.get('RQ_DASHBOARD_JOBS_PER_PAGE', 5)
    order = current_app.config.get('RQ_DASHBOARD_JOBS_ORDER_DESC', True)
    total_items = queue.count
    pages_numbers_in_window = pagination_window(total_items, current_page, per_page)
    pages_in_window = [dict(number=p, url=url_for('.overview',
                       queue_name=queue_name, page=p)) for p in pages_numbers_in_window]
    last_page = int(ceil(total_items / float(per_page)))

    prev_page = None
    if current_page > 1:
        prev_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page-1)))

    next_page = None
    if current_page < last_page:
        next_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page+1)))

    pagination = remove_none_values(
        dict(pages_in_window=pages_in_window,
            next_page=next_page,
            prev_page=prev_page))
    if order :
        tmp_offset = total_items - ((current_page - 1) * per_page)
        if tmp_offset >= per_page :
            offset =  tmp_offset - per_page
        else :
            offset = 0
            per_page = tmp_offset
        jobs = list(reversed([serialize_job(job) for job in queue.get_jobs(offset, per_page)]))
    else :
        offset = (current_page - 1) * per_page
        jobs = [serialize_job(job) for job in queue.get_jobs(offset, per_page)]
    return dict(name=queue.name, jobs=jobs, pagination=pagination)
Example #30
0
 def test_work_via_string_argument(self):
     """Worker processes work fed via string arguments."""
     q = Queue("foo")
     w = Worker([q])
     job = q.enqueue("tests.fixtures.say_hello", name="Frank")
     self.assertEquals(w.work(burst=True), True, "Expected at least some work done.")
     self.assertEquals(job.result, "Hi there, Frank!")
Example #31
0
class RQExperiment(ExperimentBase):
    """An experiment that uses the python-rq library to enqueue tasks and wait for them to finish.

    http://python-rq.org/

    For this experiment to complete, you need some amount of RQ workers running the Triage codebase
    (either on the same machine as the experiment or elsewhere),
    and a Redis instance that both the experiment process and RQ workers can access.

    Args:
        redis_connection (redis.connection): A connection to a Redis instance that
            some rq workers can also access
        sleep_time (int, default 5) How many seconds the process should sleep while
            waiting for RQ results
        queue_kwargs (dict, default {}) Any extra keyword arguments to pass to Queue creation
    """
    def __init__(self,
                 redis_connection,
                 sleep_time=5,
                 queue_kwargs=None,
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)
        self.redis_connection = redis_connection
        if queue_kwargs is None:
            queue_kwargs = {}
        self.queue = Queue(connection=self.redis_connection, **queue_kwargs)
        self.sleep_time = sleep_time

    def wait_for(self, jobs):
        """Wait for a list of jobs to complete

        Will run until all jobs are either finished or failed.

        Args:
            jobs (list of rq.Job objects)

        Returns: (list) of job return values
        """
        while True:
            num_done = sum(1 for job in jobs if job.is_finished)
            num_failed = sum(1 for job in jobs if job.is_failed)
            num_pending = sum(1 for job in jobs
                              if not job.is_finished and not job.is_failed)
            logging.info(
                "Report: jobs %s done, %s failed, %s pending",
                num_done,
                num_failed,
                num_pending,
            )
            if num_pending == 0:
                logging.info("All jobs completed or failed, returning")
                return [job.result for job in jobs]
            else:
                logging.info("Sleeping for %s seconds", self.sleep_time)
                time.sleep(self.sleep_time)

    def process_query_tasks(self, query_tasks):
        """Run queries by table

        Will run preparation (e.g. create table) and finalize (e.g. create index) tasks
        in the main process,
        but delegate inserts to rq Jobs in batches of 25

        Args: query_tasks (dict) - keys should be table names and values should be dicts.
            Each inner dict should have up to three keys, each with a list of queries:
            'prepare' (setting up the table),
            'inserts' (insert commands to populate the table),
            'finalize' (finishing table setup after all inserts have run)

            Example: {
                'table_one': {
                    'prepare': ['create table table_one (col1 varchar)'],
                    'inserts': [
                        'insert into table_one values (\'a\')',
                        'insert into table_one values (\'b'\')'
                    ]
                    'finalize': ['create index on table_one (col1)']
                }
            }
        """
        for table_name, tasks in query_tasks.items():
            logging.info("Processing features for %s", table_name)
            self.feature_generator.run_commands(tasks.get("prepare", []))

            insert_batches = [
                list(task_batch)
                for task_batch in Batch(tasks.get("inserts", []), 25)
            ]
            jobs = [
                self.queue.enqueue(
                    self.feature_generator.run_commands,
                    insert_batch,
                    timeout=DEFAULT_TIMEOUT,
                    result_ttl=DEFAULT_TIMEOUT,
                    ttl=DEFAULT_TIMEOUT,
                ) for insert_batch in insert_batches
            ]
            self.wait_for(jobs)

            self.feature_generator.run_commands(tasks.get("finalize", []))
            logging.info("%s completed", table_name)

    def process_matrix_build_tasks(self, matrix_build_tasks):
        """Run matrix build tasks using RQ

        Args:
            matrix_build_tasks (dict) Keys should be matrix uuids (though not used here),
                values should be dictionaries suitable as kwargs for sending
                to self.matrix_builder.build_matrix

        Returns: (list) of job results for each given task
        """
        jobs = [
            self.queue.enqueue(self.matrix_builder.build_matrix,
                               timeout=DEFAULT_TIMEOUT,
                               result_ttl=DEFAULT_TIMEOUT,
                               ttl=DEFAULT_TIMEOUT,
                               **build_task)
            for build_task in matrix_build_tasks.values()
        ]
        return self.wait_for(jobs)

    def process_train_tasks(self, train_tasks):
        """Run train tasks using RQ

        Args:
            train_tasks (list) of dictionaries, each representing kwargs suitable
                for self.trainer.process_train_task
        Returns: (list) of job results for each given task
        """
        jobs = [
            self.queue.enqueue(self.trainer.process_train_task,
                               timeout=DEFAULT_TIMEOUT,
                               result_ttl=DEFAULT_TIMEOUT,
                               ttl=DEFAULT_TIMEOUT,
                               **train_task) for train_task in train_tasks
        ]
        return self.wait_for(jobs)

    def process_model_test_tasks(self, test_tasks):
        """Run test tasks using RQ

        Args:
            test_tasks (list) of dictionaries, each representing kwargs suitable
                for self.tester.process_model_test_task
        Returns: (list) of job results for each given task
        """
        jobs = [
            self.queue.enqueue(self.tester.process_model_test_task,
                               timeout=DEFAULT_TIMEOUT,
                               result_ttl=DEFAULT_TIMEOUT,
                               ttl=DEFAULT_TIMEOUT,
                               **test_task) for test_task in test_tasks
        ]
        return self.wait_for(jobs)
Example #32
0
from marshmallow import ValidationError
import os
from rq import Queue
from rq.job import Job
from werkzeug.contrib.fixers import ProxyFix

from charon import config
import charon.cloudutils
import charon.util
import charon.scanners.nessus
from charon.worker import conn

logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)

q = Queue(connection=conn)


def create_app():


    app = Flask(__name__)
    APIKEY = os.environ['CHARON_APIKEY']
    CONFIG_FILE = os.environ['CHARON_CONFIG_FILE']

    app.wsgi_app = ProxyFix(app.wsgi_app, num_proxies=1)
    limiter = Limiter(app, key_func=get_remote_address)

    def require_apikey(func):
        @wraps(func)
        def decorated_function(*args, **kwargs):
Example #33
0
def emptyQueue():
    q = Queue(connection=Redis(REDIS_HOST, REDIS_PORT))
    q.empty()
    return
Example #34
0
def list_queues(instance_number):
    queues = serialize_queues(instance_number, sorted(Queue.all()))
    return dict(queues=queues)
Example #35
0
def compact_queue(queue_name):
    q = Queue(queue_name)
    q.compact()
    return dict(status="OK")
Example #36
0
def get_job(job_id):

    with Connection(connection=Redis(host="redis-server")):
        q = Queue()
        job = q.fetch_job(job_id)
    if job:
        response_object = {
            'data': {
                'job_id': job.get_id(),
                'job_status': job.get_status(),
                'job_result': job.result,
                'job_is_started': job.is_started,
                'job_started_at': job.started_at,
                'job_is_queued': job.is_queued,
                'job_timeout': job.timeout,
                'job_enqueued_at': job.enqueued_at,
                'job_ended_at': job.ended_at,
                'job_func_name': job.func_name,
                'job_args': job.args,
                'job_kwargs': job.kwargs,
            }
        }

        if job.is_failed:
            response_object = {
                'status': 'failed',
                'data': {
                    'job_id': job.get_id(),
                    'job_status': job.get_status(),
                    'job_result': job.result,
                    'message': job.exc_info.strip().split('\n')[-1]
                }
            }
    else:
        response_object = {'status': 'ERROR: Unable to fetch the job from RQ'}

    # Retrieve directory id by providing known job id
    directory_name = jobs_collection.find_one({"key": job_id})

    # If there is no such directory yet
    if not directory_name:
        abort(404)
    directory_name = directory_name['data']

    image_filename = f"{directory_name}.png"

    json_result = result_collection.find_one({"key": directory_name})

    if json_result:
        observed_counts = json_result["data"]["observed_counts"]
        expected_counts = json_result["data"]["expected_counts"]
        first_digit_probabilities = json_result["data"][
            "first_digit_probabilities"]
        chi_squared_test_statistic = json_result["data"][
            "chi-squared_test_statistic"]
        result = json_result["data"]["result"]
    else:
        observed_counts = None
        expected_counts = None
        first_digit_probabilities = None
        chi_squared_test_statistic = None
        result = None

    return render_template(
        'job.html',
        job=response_object,
        image_name=image_filename,
        observed_counts=observed_counts,
        expected_counts=expected_counts,
        first_digit_probabilities=first_digit_probabilities,
        chi_squared_test_statistic=chi_squared_test_statistic,
        result=result)
Example #37
0
def queue_urls(urls):
    urls = filter_urls(urls)
    q = Queue(connection=Redis())
    for url in urls:
        if spyder.lookup_page(url) is None:
            result = q.enqueue(crawl_and_store_page, url)
Example #38
0
# -*- coding: utf-8 -*-
"""RQ queues for Parker."""

from rq import Queue
from redis import StrictRedis
from configloader import load_config

_redis_config = load_config('redis')

crawl_q = Queue(
    'crawl',
    connection=StrictRedis(
        **_redis_config
    )
)
consume_q = Queue(
    'consume',
    connection=StrictRedis(
        **_redis_config
    )
)
Example #39
0
 def test_create_worker(self):
     """Worker creation."""
     fooq, barq = Queue('foo'), Queue('bar')
     w = Worker([fooq, barq])
     self.assertEquals(w.queues, [fooq, barq])
Example #40
0
import socket
import datetime

from redis import Redis
from rq import Connection,Queue
from flask.ext.redis import Redis as fRedis
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.cache import Cache
from flask.ext.mail import Mail

db = SQLAlchemy()
cache = Cache()
mail = Mail()
redis = fRedis()
rq = Queue('motiky',connection=Redis())

class DefaultConfig(object):

    DEBUG = False
    SECRET_KEY = 'lifeistooshorttowait'
    APPLICATION_SECRET = 'lifeistooshorttowait'
    SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://user:password@localhost/motiky'
    SQLALCHEMY_ECHO = False

class TestConfig(object):
    CONFIG_TYPE = 'test'
    SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://user:password@localhost/test'
    SQLALCHEMY_ECHO = False
    APPLICATION_SECRET = 'lifeistooshorttowait'
    CSRF_ENABLED = False
Example #41
0
from rq import Queue, Connection

__author__ = 'bahadircambel'

with Connection():
    worker_queue = Queue()
Example #42
0
from flask_cors import CORS
from index import getName
from forone import forone
from start import start
from checkLifes import getLife
from subjectWise import subjectWise
from main import main
from stats import *
from newlnct import newlnct
from firebase import Firebase
from newlnctnotification import getAttendance
from cryptography.fernet import Fernet
from redis import Redis
from rq import Queue
r = Redis()
q = Queue(connection=r)
from flask_sslify import SSLify
from dateWise import dateWise, getDateWiseAttendace
import os
config = {
    "apiKey": str(os.getenv('apiKey1')),
    "authDomain": "lnctdata.firebaseapp.com",
    "databaseURL": "https://lnctdata.firebaseio.com",
    "projectId": "lnctdata",
    "storageBucket": "lnctdata.appspot.com",
    "messagingSenderId": str(os.getenv('messageid1')),
    "databaseURL": "https://lnctdata.firebaseio.com"
}
firebase = Firebase(config)
db = firebase.database()
app = Flask(__name__, static_url_path='/static', template_folder="templates")
Example #43
0
    title="Bugbug",
    version=get_bugbug_version(),
    openapi_version="3.0.2",
    info=dict(description=API_DESCRIPTION),
    plugins=[FlaskPlugin(), MarshmallowPlugin()],
    security=[{
        "api_key": []
    }],
)

application = Flask(__name__)
redis_url = os.environ.get("REDIS_URL", "redis://localhost/0")
redis_conn = Redis.from_url(redis_url)

JOB_TIMEOUT = 1800  # 30 minutes in seconds
q = Queue(connection=redis_conn,
          default_timeout=JOB_TIMEOUT)  # no args implies the default queue
VALIDATOR = Validator()

BUGZILLA_TOKEN = os.environ.get("BUGBUG_BUGZILLA_TOKEN")

# Keep an HTTP client around for persistent connections
BUGBUG_HTTP_CLIENT, BUGZILLA_API_URL = get_bugzilla_http_client()

logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger()


class BugPrediction(Schema):
    prob = fields.List(fields.Float())
    index = fields.Integer()
    suggestion = fields.Str()
Example #44
0
#!/usr/bin/env python

import sys
from rq import Queue, Connection, Worker

# Provide queue names to listen to as arguments to this script, similar to rqworker
with Connection():
    qs = map(Queue, sys.argv[1:]) or [Queue()]
    w = Worker(qs)
    w.work()
Example #45
0
def get_queue(queue_name):
    if queue_name == 'failed':
        return get_failed_queue()
    else:
        return Queue(queue_name)
Example #46
0
File: main.py Project: kestefon/dev
import flask
from app_pages import app1, app2, index, collapse
from app_pages import functions_nn as fnn

import datetime
import time

from plotnine import *
from io import BytesIO
import base64
import os

from rq import Queue
from worker import conn

q = Queue('high', is_async=False, connection=conn, default_timeout=60 * 3)

raw = pd.read_csv(
    'https://raw.githubusercontent.com/kestefon/dev/master/data.csv')
data_handler = fnn.DataHandler(raw)
out_data = data_handler.data_cleanup()

#learning rate factor

#CSS
external_stylesheets = [
    'https://codepen.io/chriddyp/pen/bWLwgP.css', {
        'href':
        'https://fonts.googleapis.com/css?family=Roboto:100,200,300,400,500,600,700,900',
        'ref': 'stylesheet'
    }, {
Example #47
0
from redis import StrictRedis
from rq import Queue, get_failed_queue
import time

MAX_ATTEMPTS = 30

queue_conn = StrictRedis(host='433-19.csse.rose-hulman.edu', port=6379, db=0)
q = Queue(connection=queue_conn)

while True:
    failed_queue = get_failed_queue(queue_conn)
    dictionary = {}
    dictionary2 = {}
    print(failed_queue.count)
    with open("Queue_Log.txt", 'r+') as f:
        for line in f:
            key, val = line.split()
            dictionary[str(key)] = int(val)

        for job_id in failed_queue.job_ids:
            if job_id in dictionary.keys():
                dictionary2[job_id] = dictionary[job_id] + 1
            else:
                dictionary2[job_id] = 1

            if dictionary2[job_id] <= MAX_ATTEMPTS:
                failed_queue.requeue(job_id)
            else:
                dictionary2.pop(job_id)

        f.seek(0)
Example #48
0
from pybossa.cache import projects as cached_projects
from pybossa.cache import categories as cached_cat
from pybossa.auth import ensure_authorized_to
from pybossa.core import project_repo, user_repo, sentinel
from pybossa.feed import get_update_feed
import pybossa.dashboard.data as dashb
from pybossa.jobs import get_dashboard_jobs
import json
from StringIO import StringIO

from pybossa.forms.admin_view_forms import *
from pybossa.news import NOTIFY_ADMIN

blueprint = Blueprint('admin', __name__)

DASHBOARD_QUEUE = Queue('super', connection=sentinel.master)


def format_error(msg, status_code):
    """Return error as a JSON response."""
    error = dict(error=msg, status_code=status_code)
    return Response(json.dumps(error),
                    status=status_code,
                    mimetype='application/json')


@blueprint.route('/')
@login_required
@admin_required
def index():
    """List admin actions."""
Example #49
0
def _get_queue(name="default"):
    # Connect to Redis Queue
    return Queue(name, connection=redis_conn)
Example #50
0
"""
RQ seems pretty nice, especially as you can have multiple queues with different priorities.

This is a minimal working example.

You will need to start an rqworker and redis-server for this to run
"""

from redis import Redis
from rq import Queue
from my_module import add_3
import time

q = Queue(connection=Redis())
job = q.enqueue(add_3, 9)
time.sleep(1)
print job.result
Example #51
0
def create_app(test_config=None):
    # Setup the Flask App
    app=Flask(__name__, instance_relative_config=True)
    app.config.from_mapping(
        SECRET_KEY='dev',
        DATABASE=os.path.join(app.instance_path, 'stock_twit_trap.sqlite')
    )
    # Setup the queue and scheduler
    redis_connection = Redis(host='localhost', port=6379)
    queue = Queue(connection=redis_connection)
    scheduler = Scheduler(connection=redis_connection)

    # Initialize_db & db_service
    database.init_db()
    db_service = DatabaseService()

    scheduler_service = SchedulerService(scheduler)
    scheduler_service.delete_queue(queue)
    # scheduler_service.schedule_twit_rip(db_service.get_list_of_all_symbols_filter_active())

    alpha_service = AlphaVantageApiService()
    results = alpha_service.get_stock_data_for_symbol('AAPL')
    alpha_service.write_results_to_file(results.json())

    @app.route('/counts/')
    def db_count():
        messages = db_service.get_message_count()
        symbols = db_service.get_symbol_count()
        return f"Messages: {messages} ||  Symbols: {symbols}"


    @app.route('/messages/')
    def message_list():
        messages = db_service.get_list_of_all_messages()
        return f"{messages}"


    @app.route('/symbols/')
    def symbol_list():
        symbols = db_service.get_list_of_all_symbols()
        return f"{symbols}"


    @app.route('/symbols/info/<symbol>/')
    def symbol_info(symbol):
        symbol = db_service.get_symbols_by_symbol(symbol)
        return f"Symbol: {symbol.symbol}, Active: {symbol.active}, ID: {symbol.stocktwit_id}"


    @app.route('/symbols/set_active/<symbol>/')
    def symbol_set_active(symbol):
        symbol = db_service.get_symbols_by_symbol(symbol)
        symbol.active = 1
        db_service.commit()
        return f"{symbol.symbol} set to active!"


    @app.route('/symbols/set_inactive/<symbol>/')
    def symbol_set_inactive(symbol):
        symbol = db_service.get_symbols_by_symbol(symbol)
        symbol.active = 0
        db_service.commit()
        return f"{symbol.symbol} set to inactive!"


    @app.teardown_appcontext
    def shutdown_session(exception=None):
        database.db_session.remove()

    return app
def main():
    redis_conn = Redis()
    q = Queue(connection=redis_conn)
    testDB(q)
Example #53
0
from redis import Redis
from rq import Queue
from testRedisprocessor import testConverge
import time

if __name__ == '__main__':
    # Tell RQ what Redis connection to use
    redis_conn = Redis()
    q = Queue('5000',
              connection=redis_conn)  # no args implies the default queue

    # Delay execution of count_words_at_url('http://nvie.com')
    job = q.enqueue(testConverge, 1)
    result = job.result
    print(f'immediate result: {result}')  # => None

    # Now, wait a while, until the worker is finished
    time.sleep(4)
    result = job.result
    print(f'\nafter 4 secs: {result}')  # => 889
Example #54
0
from redis import Redis
from rq import Queue
import sys
q = Queue(connection=Redis('10.0.0.9'))
q.enqueue('process_pe.procpe', sys.argv[1])
Example #55
0
import sys

import os
from flask import Flask, render_template, jsonify, request
from rq import Queue
from src.job.downloader import conn
from src.cmd.utils import fetch_video

q = Queue(connection=conn, timeout="1h", result_ttl="3h")


def create_app():
    app = Flask(__name__, static_url_path="/src/web/static")
    app.config.from_mapping(
        SECRET_KEY=os.environ.get("SECRET_KEY") or "dev_key")

    return app


app = create_app()


@app.route("/")
def index():
    return render_template("index.html")


# @app.route("/video/<name>"):


@app.route("/fetch", methods=["POST"])
Example #56
0
from rq import Queue
from redis import Redis
import docproc.bottleneck as bn

import pymongo as py

redis_conn = Redis(host='redis')
q = Queue(connection=redis_conn)

client = py.MongoClient('mongo')
db = client['docs']
col = db['aug_meta']

for doc_id in col.find({"Content-Type.Content": "application/jpeg"}, {}):
    job = q.enqueue(bn.insert_inception, doc_id)
    print(job.key)
Example #57
0
def empty_failed():
    fqueue = Queue('failed', connection=redis_connection)
    fqueue.empty()
Example #58
0
import logging
import os
import datetime
import tempfile
from waitress import serve
from flask import Flask, request, make_response
from redis import Redis
from rq import Queue  # type: ignore
from werkzeug.utils import secure_filename
from gtfs2geojson import convert_sync

import init_log

init_log.config_api_log()

q = Queue(connection=Redis.from_url(os.environ.get("REDIS_URL") or "redis://"))

app = Flask(__name__)


def _convert(conversion_type):
    datagouv_id = request.args.get("datagouv_id")
    url = request.args.get("url")
    if datagouv_id and url:
        q.enqueue(
            "jobs.convert",
            {
                "url": url,
                "datagouv_id": datagouv_id,
                "task_date": datetime.datetime.today(),
                "conversion_type": conversion_type,
Example #59
0
from rq import Queue
from sqlalchemy import event

from pybossa.feed import update_feed
from pybossa.model import update_project_timestamp, update_target_timestamp
from pybossa.model.blogpost import Blogpost
from pybossa.model.project import Project
from pybossa.model.task import Task
from pybossa.model.task_run import TaskRun
from pybossa.model.webhook import Webhook
from pybossa.model.user import User
from pybossa.jobs import webhook, notify_blog_users
from pybossa.core import sentinel

webhook_queue = Queue('high', connection=sentinel.master)
mail_queue = Queue('super', connection=sentinel.master)


@event.listens_for(Blogpost, 'after_insert')
def add_blog_event(mapper, conn, target):
    """Update PyBossa feed with new blog post."""
    sql_query = ('select name, short_name, info from project \
                 where id=%s') % target.project_id
    results = conn.execute(sql_query)
    obj = dict(id=target.project_id,
               name=None,
               short_name=None,
               info=None,
               action_updated='Blog')
    for r in results:
Example #60
0
    def test_info_only_workers(self):
        """rq info -u <url> --only-workers (-W)"""
        runner = CliRunner()
        result = runner.invoke(main, ['info', '-u', self.redis_url, '--only-workers'])
        self.assert_normal_execution(result)
        self.assertIn('0 workers, 0 queue', result.output)

        result = runner.invoke(main, ['info', '--by-queue',
                                      '-u', self.redis_url, '--only-workers'])
        self.assert_normal_execution(result)
        self.assertIn('0 workers, 0 queue', result.output)

        worker = Worker(['default'], connection=self.connection)
        worker.register_birth()
        result = runner.invoke(main, ['info', '-u', self.redis_url, '--only-workers'])
        self.assert_normal_execution(result)
        self.assertIn('1 workers, 0 queues', result.output)
        worker.register_death()

        queue = Queue(connection=self.connection)
        queue.enqueue(say_hello)
        result = runner.invoke(main, ['info', '-u', self.redis_url, '--only-workers'])
        self.assert_normal_execution(result)
        self.assertIn('0 workers, 1 queues', result.output)

        foo_queue = Queue(name='foo', connection=self.connection)
        foo_queue.enqueue(say_hello)

        bar_queue = Queue(name='bar', connection=self.connection)
        bar_queue.enqueue(say_hello)

        worker_1 = Worker([foo_queue, bar_queue], connection=self.connection)
        worker_1.register_birth()

        worker_2 = Worker([foo_queue, bar_queue], connection=self.connection)
        worker_2.register_birth()
        worker_2.set_state(WorkerStatus.BUSY)

        result = runner.invoke(main, ['info', 'foo', 'bar',
                                      '-u', self.redis_url, '--only-workers'])

        self.assert_normal_execution(result)
        self.assertIn('2 workers, 2 queues', result.output)

        result = runner.invoke(main, ['info', 'foo', 'bar', '--by-queue',
                                      '-u', self.redis_url, '--only-workers'])

        self.assert_normal_execution(result)
        # Ensure both queues' workers are shown
        self.assertIn('foo:', result.output)
        self.assertIn('bar:', result.output)
        self.assertIn('2 workers, 2 queues', result.output)