def redis_connection():
    """
    Return the currently open redis connection object. If there is no
    connection currently open, one is created using the url specified in
    config['redis', 'url']
    """
    conn = rq.get_current_connection()
    if conn:
        return conn
    rq.use_connection(redis=redis.Redis.from_url(config["redis", "url"]))
    return rq.get_current_connection()
def redis_connection():
    """
    Return the currently open redis connection object. If there is no 
    connection currently open, one is created using the keyword arguments 
    specified in config.REDIS_CONNECTION_KWARGS
    """
    conn = rq.get_current_connection()
    if conn:
        return conn
    kwargs = config.REDIS_CONNECTION_KWARGS
    rq.use_connection(redis=redis.Redis(**kwargs))
    return rq.get_current_connection()
 def execute(self):
     # It is always possible that the Redis connection is not yet set 
     print "ENTER"
     if not get_current_connection():
         conn = Redis('localhost', settings.REDIS_PORT)
         use_connection(conn)
     if not get_current_connection():
         log.error(u'Unable to create redis connection')
     # use the 'default' queue. We only used this one;
     q = Queue()
     # if the queue is not empty then some old idle workers may have to be cleaned
     if not q.is_empty():
         for w in Worker.all():
             if w.state == 'idle' and q in w.queues:
                 log.info(u'Work %s will die gently' % w.name)
                 w.register_death()
Beispiel #4
0
def job_expire(request, job=None):
    if not job:
        job = get_current_job(get_current_connection())
    job.ttl = request.ttl
    job.result_ttl = request.result_ttl
    job.timeout = request.timeout
    job.save()
Beispiel #5
0
    def all(cls,
            connection=None,
            job_class=None,
            queue_class=None,
            queue=None,
            serializer=None):
        """Returns an iterable of all Workers.
        """
        if queue:
            connection = queue.connection
        elif connection is None:
            connection = get_current_connection()

        worker_keys = [
            key
            for key in connection.smembers('rq:workers:{0}'.format(namespace))
        ]
        workers = [
            cls.find_by_key(as_text(key),
                            connection=connection,
                            job_class=job_class,
                            queue_class=queue_class,
                            serializer=serializer) for key in worker_keys
        ]
        return compact(workers)
Beispiel #6
0
 def _poolJobs(self, db_name, check=False):
     """Check if we are a worker process.
     """
     if get_current_connection() and get_current_job():
         pass
     else:
         super(IrCron, self)._poolJobs(db_name, check)
Beispiel #7
0
 def _poolJobs(self, db_name, check=False):
     """Check if we are a worker process.
     """
     if get_current_connection() and get_current_job():
         pass
     else:
         super(IrCron, self)._poolJobs(db_name, check)
Beispiel #8
0
def start_worker_process(queue_name, connection=None, worker_name=None, burst=False):
    """
    Use multiprocessing to start a new worker in a separate process.
    """
    connection = connection or get_current_connection()
    conn_kwargs = connection.connection_pool.connection_kwargs
    p = Process(target=start_worker, args=(queue_name, conn_kwargs, worker_name, burst))
    p.start()
    return p
Beispiel #9
0
def rpush(key, value, append_worker_name=False, sleep=0):
    """Push a value into a list in Redis. Useful for detecting the order in
    which jobs were executed."""
    if sleep:
        time.sleep(sleep)
    if append_worker_name:
        value += ':' + get_current_job().worker_name
    redis = get_current_connection()
    redis.rpush(key, value)
Beispiel #10
0
    def test_use_connection(self):
        """Test function use_connection works as expected."""
        conn = new_connection()
        use_connection(conn)

        self.assertEqual(conn, get_current_connection())

        use_connection()

        self.assertNotEqual(conn, get_current_connection())

        use_connection(self.testconn)  # Restore RQTestCase connection

        with self.assertRaises(AssertionError):
            with Connection(new_connection()):
                use_connection()
                with Connection(new_connection()):
                    use_connection()
Beispiel #11
0
    def test_use_connection(self):
        """Test function use_connection works as expected."""
        conn = new_connection()
        use_connection(conn)

        self.assertEqual(conn, get_current_connection())

        use_connection()

        self.assertNotEqual(conn, get_current_connection())

        use_connection(self.testconn)  # Restore RQTestCase connection

        with self.assertRaises(AssertionError):
            with Connection(new_connection()):
                use_connection()
                with Connection(new_connection()):
                    use_connection()
Beispiel #12
0
def run_around_tests(test_domain):

    yield

    conn = get_current_connection()
    conn.flushall()

    if test_domain.has_provider('default'):
        test_domain.get_provider('default')._data_reset()
Beispiel #13
0
 def get_rq_data(self, request):
     q = self._job(self.settings).queue
     current_job = get_current_job(get_current_connection())
     first_job_id = current_job._dependency_id
     depends_job = q.fetch_job(first_job_id)
     response = depends_job.result
     self.perform_callback(request, response)
     # job_expire(request,depends_job)
     #depend_on (依赖get_response)任务完成后 设置过期时间
     # tl=10
     depends_job.cleanup(ttl=220)
Beispiel #14
0
def setup_redis_connection():
    redis_conn = get_current_connection()
    if not redis_conn:
        if config.get('redis_url', False):
            oorq_log('Connecting to redis using redis_url: %s'
                     % config['redis_url'])
            redis_conn = from_url(config['redis_url'])
        else:
            oorq_log('Connecting to redis using defaults')
            redis_conn = Redis()
        push_connection(redis_conn)
    return redis_conn
Beispiel #15
0
def setup_redis_connection():
    redis_conn = get_current_connection()
    if not redis_conn:
        if config.get('redis_url', False):
            oorq_log('Connecting to redis using redis_url: %s' %
                     config['redis_url'])
            redis_conn = from_url(config['redis_url'])
        else:
            oorq_log('Connecting to redis using defaults')
            redis_conn = Redis()
        push_connection(redis_conn)
    return redis_conn
Beispiel #16
0
def cleanup_ghosts(conn=None):
    """
    RQ versions < 0.3.6 suffered from a race condition where workers, when
    abruptly terminated, did not have a chance to clean up their worker
    registration, leading to reports of ghosted workers in `rqinfo`.  Since
    0.3.6, new worker registrations automatically expire, and the worker will
    make sure to refresh the registrations as long as it's alive.

    This function will clean up any of such legacy ghosted workers.
    """
    conn = conn if conn else get_current_connection()
    for worker in Worker.all(connection=conn):
        if conn._ttl(worker.key) == -1:
            ttl = worker.default_worker_ttl
            conn.expire(worker.key, ttl)
            logger.info('Marked ghosted worker {0} to expire in {1} seconds.'.format(worker.name, ttl))
Beispiel #17
0
def cleanup_ghosts():
    """
    RQ versions < 0.3.6 suffered from a race condition where workers, when
    abruptly terminated, did not have a chance to clean up their worker
    registration, leading to reports of ghosted workers in `rqinfo`.  Since
    0.3.6, new worker registrations automatically expire, and the worker will
    make sure to refresh the registrations as long as it's alive.

    This function will clean up any of such legacy ghosted workers.
    """
    conn = get_current_connection()
    for worker in Worker.all():
        if conn.ttl(worker.key) == -1:
            ttl = worker.default_worker_ttl
            conn.expire(worker.key, ttl)
            logger.info('Marked ghosted worker {0} to expire in {1} seconds.'.format(worker.name, ttl))
Beispiel #18
0
def spawn(count):
    """Spawns additional jobs from a worker.

    Useful for verifying that a job handler can spawn additional jobs::

        q = buildcat.queue.Queue()
        q.submit("buildcat.test.spawn", 3)

    The spawn job will be handled by a worker, which will spawn 3 additional
    :func:`buildcat.test.log` jobs, which will be handled subsequently.

    Parameters
    ----------
    count: int, required
        Number of new jobs to create.
    """

    for index in range(count):
        rq.Queue(connection=rq.get_current_connection()).enqueue(
            "buildcat.test.log", "Job-{}".format(index))
Beispiel #19
0
def split_frames(lxofile, frames):
    """Render individual frames from a Modo .lxo file.

    Parameters
    ----------
    lxofile: str, required
        Path to the file to be rendered.
    frames: tuple, required
        Contains the half-open (start, end, step) of frames to be rendered.
    """
    lxofile = str(lxofile)
    start = int(frames[0])
    end = int(frames[1])
    step = int(frames[2])

    q = rq.Queue(rq.get_current_job().origin,
                 connection=rq.get_current_connection())
    for frame in range(start, end, step):
        q.enqueue("buildcat.modo.render_frames", lxofile,
                  (frame, frame + 1, 1))
Beispiel #20
0
def cleanup_ghosts():
    """
    RQ versions < 0.3.6 suffered from a race condition where workers, when
    abruptly terminated, did not have a chance to clean up their worker
    registration, leading to reports of ghosted workers in `rqinfo`.  Since
    0.3.6, new worker registrations automatically expire, and the worker will
    make sure to refresh the registrations as long as it's alive.

    This function will clean up any of such legacy ghosted workers.
    """

    # 该函数是为了给 0.3.6 版本以前产生的 ghost worker 设置 ttl,避免这些
    # worker 一直出现在 rqinfo 监视命令中产生多余的信息。
    #
    # 在 compat/connections.py 中的 patch_connection() 为 StrictRedis 对象附加了
    # _ttl() 方法,但返回 -1 时说明当前 key 没有设置 ttl,所以下面给 worker 设置 ttl。
    conn = get_current_connection()
    for worker in Worker.all():
        if conn._ttl(worker.key) == -1:
            ttl = worker.default_worker_ttl
            conn.expire(worker.key, ttl) #
            logger.info('Marked ghosted worker {0} to expire in {1} seconds.'.format(worker.name, ttl))
Beispiel #21
0
def split_frames(hipfile, rop, frames):
    """Render individual frames from a Houdini .hip file.

    Parameters
    ----------
    hipfile: str, required
        Path to the file to be rendered.
    rop: str, required
        Absolute path of the ROP node to use for rendering.
    frames: tuple, required
        Contains the half-open (start, end, step) of frames to be rendered.
    """
    hipfile = str(hipfile)
    rop = str(rop)
    start = int(frames[0])
    end = int(frames[1])
    step = int(frames[2])

    q = rq.Queue(rq.get_current_job().origin,
                 connection=rq.get_current_connection())
    for frame in range(start, end, step):
        q.enqueue("buildcat.hou.render_frame", hipfile, rop, frame)
Beispiel #22
0
def mock_current_measurement(voltage):
    # Get a reference to the job
    job = get_current_job()
    connection = get_current_connection()

    # Setup current measurement
    print("Setting up mock sourcemeter")

    # Start the source
    t_start = time()

    # Run the measurement
    t = []
    i = []
    while connection.get(job.key + b':should_stop') != b'1':
        t.append(time() - t_start)
        i.append(np.random.random())
        job.meta['data'] = (t, i)
        job.save_meta()
        print(connection.get(job.key + b':should_stop'))
        sleep(5)
    print("Disabled sourcemeter")
Beispiel #23
0
def access_self():
    assert get_current_connection() is not None
    assert get_current_job() is not None
Beispiel #24
0
 def __init__(self, name='scheduled_jobs'):
     self.connection = get_current_connection()
     prefix = self.scheduler_queue_namespace_prefix
     self.name = name
     self._key = '{0}{1}'.format(prefix, name)
Beispiel #25
0
def schedule_access_self():
    q = Queue('default', connection=get_current_connection())
    q.enqueue(access_self)
Beispiel #26
0
 def __init__(self, name="default", redis_conn=None):
     if get_current_connection() is None:
         if redis_conn is None:
             redis_conn = Redis()
         push_connection(redis_conn)
     self.name = name
Beispiel #27
0
def schedule_access_self():
    q = Queue('default', connection=get_current_connection())
    q.enqueue(access_self)
Beispiel #28
0
 # -*- coding:utf-8 -*-
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.sites.models import Site
from django.conf import settings
from djrdf.models import djRdf
import logging
import subhub

log = logging.getLogger('djrdf')



from redis import Redis
from rq import Queue, use_connection, get_current_connection
if not get_current_connection():
    conn = Redis('127.0.0.1', settings.REDIS_PORT)
    use_connection(conn)
if not get_current_connection():
    log.error(u'Unable to create redis connection')
# use the 'default' queue
q = Queue()


# Workaround for rqworker limitation
def letsCallDistributionTaskProcess():
    log.debug(u'IN QUEUE try to  know the numbers of tasks %s' %
        len(subhub.models.DistributionTask.objects.all()))
    subhub.models.DistributionTask.objects.process(log=log)

Beispiel #29
0
def access_self():
    assert get_current_connection() is not None
    assert get_current_job() is not None
Beispiel #30
0
 def __init__(self, redis_conn=None, poll_interval=2500):
     self._poll_interval = poll_interval
     if get_current_connection() is None:
         if redis_conn is None:
             redis_conn = Redis()
         push_connection(redis_conn)