예제 #1
0
    def job(self, id_):
        '''
        Get data about a specific job.

        **Example Response**

        .. sourcecode:: json

            {
                "current": 1321,
                "description": "Running reports.",
                "id": "cc4618c1-22ed-4b5d-a9b8-5186c0259b46",
                "progress": 0.4520876112251882,
                "total": 2922,
                "type": "index"
            }

        :<header Content-Type: application/json
        :<header X-Auth: the client's auth token
        :query id: the job ID to fetch

        :>header Content-Type: application/json
        :>json int current: the number of
            records processed so far by this job
        :>json str description: description of
            the current job (optional)
        :>json str id: unique job identifier
        :>json float progress: the percentage of
            records processed by this job, expressed as a decimal
        :>json int total: the total number of
            records expected to be processed by this job
        :>json str type: the type of this job,
            indicating what subsystem it belongs to (optional)

        :status 200: ok
        :status 401: authentication required
        :status 403: you must be an administrator
        :status 404: no job with the specified ID
        '''

        # workers = list()

        with rq.Connection(g.redis):
            for queue in rq.Queue.all():
                if queue.name == 'failed':
                    continue

                job = queue.fetch_job(id_)

                if job is not None:
                    return jsonify(
                        current=job.meta['current'],
                        description=job.meta['description'],
                        id=job.id,
                        progress=job.meta['current'] / job.meta['total'],
                        total=job.meta['total'],
                        type=job.meta['type'] if 'type' in job.meta else None
                    )

        raise NotFound('No job exists with that ID.')
예제 #2
0
def run():
    q = None
    queue_name = raw_input()
    try:
        redis_url = settings.STRUCTURE_REDIS
    except:
        redis_url = None
    connection = redis.Redis.from_url(redis_url)
    too_high_queue = rq.Queue(Consts.QUEUE_PRIORITY_TOO_HIGH,
                              connection=connection)
    high_queue = rq.Queue(Consts.QUEUE_PRIORITY_HIGH, connection=connection)
    normal_queue = rq.Queue(Consts.QUEUE_PRIORITY_NORMAL,
                            connection=connection)
    low_queue = rq.Queue(Consts.QUEUE_PRIORITY_LOW, connection=connection)
    if queue_name == "structure:higher":
        q = too_high_queue
    elif queue_name == "structure:high":
        q = high_queue
    elif queue_name == "structure:normal":
        q = normal_queue
    elif queue_name == "structure:low":
        q = low_queue
    else:
        print "Error: Cannot find the queue from Redis."
    if q is not None:
        with rq.Connection(connection):
            w = rq.Worker([q])
            w.work()
예제 #3
0
def check_is_any_new_file():
    for csv_file in os.listdir(settings.RAW_DATA_PATH):
        if csv_file[-4:] == '.csv':
            with rq.Connection(redis.from_url(settings.REDIS_URL)):
                q = rq.Queue()
                task = q.enqueue(split_spawn_file_api,
                                 f'{settings.RAW_DATA_PATH}/{csv_file}')
예제 #4
0
async def split_spawn_file(file_path: str) -> list:
    output = []
    connection = await get_connection()
    async with connection.transaction(isolation='serializable'):
        file_hash = hashlib.md5(open(file_path, 'rb').read()).hexdigest()

        check_hash = await connection.fetch("SELECT * FROM file WHERE hash=$1",
                                            file_hash)
        if check_hash != []:
            print('already process')
            return None  # already process

        template = f'{os.path.basename(file_path).replace(".csv", "")}__%s.csv'
        output = split(open(file_path, 'rU'),
                       row_limit=settings.CSV_LINE_LIMIT,
                       output_path=settings.SPLIT_DATA_PATH,
                       output_name_template=template)

        for item in output:
            with rq.Connection(redis.from_url(settings.REDIS_URL)):
                q = rq.Queue()
                task = q.enqueue(insert_data_from_csv_api, item, file_hash)

                await connection.execute(
                    """
                    INSERT INTO file ("name", "hash", "split", "task_id") 
                    VALUES ($1, $2, $3, $4)
                """, file_path, file_hash, item, task.get_id())

    await connection.close()
    print(output)
    return output
 def run(self):
     """ Run the queue-worker """
     with rq.Connection():
         # start the task worker
         self.worker = rq.Worker(self.q)
         self.worker.work()
         print('Worker was startetd')
def clean_up():
    """ Reset the pop interval data for each empty queue """
    with rq.Connection(redis_connection()):
        for q in rq.Queue.all():
            if q != rq.queue.FailedQueue():
                if q.is_empty():
                    clear_pop_interval_stat(q.name)
예제 #7
0
    def delete(self, id_):
        '''
        Delete a failed task with the given id_.

        :<header Content-Type: application/json
        :<header X-Auth: the client's auth token
        :query id: the job ID to delete

        :>header Content-Type: application/json

        :status 200: ok
        :status 401: authentication required
        :status 403: you must be an administrator
        :status 404: no job exists with this ID
        '''

        with rq.Connection(g.redis):
            found = False

            for job in rq.get_failed_queue().jobs:
                if job.id == id_:
                    job.delete()
                    found = True
                    break

            if not found:
                raise NotFound('No job exists with ID "%s".' % id_)

        return jsonify(message='ok')
예제 #8
0
    def work(self):
        """Start the worker."""

        with rq.Connection(self.rcon):
            LOG.info("Starting worker %s on %s redis: %s:%s/%s", self._name,
                     self._queues, self._redis_host, self._redis_port,
                     self._redis_database)
            self.worker.work()
예제 #9
0
def main():
    utils.setup_logging()
    config.log()
    gh_pr.monkeypatch_github()
    if config.FLUSH_REDIS_ON_STARTUP:
        utils.get_redis().flushall()
    with rq.Connection(utils.get_redis()):
        worker = rq.Worker(['default'])
        worker.work()
예제 #10
0
def cancel_test(markus_address, run_ids, **kw):
    """
    Cancel a test run job with the job_id defined using 
    markus_address and run_id. 
    """
    with rq.Connection(ats.redis_connection()):
        for run_id in run_ids:
            job_id = format_job_id(markus_address, run_id)
            rq.cancel_job(job_id)
예제 #11
0
def main():
    utils.setup_logging()
    gh_pr.monkeypatch_github()
    if config.FLUSH_REDIS_ON_STARTUP:
        utils.get_redis().flushall()
    with rq.Connection(utils.get_redis()):
        worker = rq.worker.HerokuWorker([rq.Queue('default')],
                                        exception_handlers=[error_handler])
        worker.work()
예제 #12
0
def main():
    """Sets up Redis connection and starts the worker."""
    redis_connection = redis.Redis(host="queue-server")
    with rq.Connection(redis_connection):
        queue = rq.Queue('build_n_run_queue')
        worker = rq.Worker([queue])

        while queue.count + queue.deferred_job_registry.count > 0:
            worker.work(burst=True)
            time.sleep(5)
예제 #13
0
async def get_task(request: web.BaseRequest) -> web.json_response:
    with rq.Connection(redis.from_url(settings.REDIS_URL)):
        q = rq.Queue()
        task = q.fetch_job(request.match_info.get('task_id'))

        return web.json_response({
            "task_id": task.get_id(),
            "task_status": task.get_status(),
            "task_result": task.result,
        })
예제 #14
0
def work():
    """Start an rq worker on the connection provided by create_connection."""
    # Preload ROOT module to reduce worker startup time
    import ROOT  # noqa
    with rq.Connection(create_connection()):
        worker = rq.Worker(list(map(create_queue, QUEUES)))
        # Quiet workers to suppress large result output
        # https://github.com/nvie/rq/issues/136
        worker.log.setLevel(logging.WARNING)
        worker.work()
예제 #15
0
def loop_check_is_any_new_file():
    with rq.Connection(redis.from_url(settings.REDIS_URL)):
        q = rq.Queue()
        scheduler = Scheduler(queue=q)
        scheduler.schedule(
            scheduled_time=datetime.utcnow(
            ),  # Time for first execution, in UTC timezone
            func=check_is_any_new_file,
            interval=600,  # Time before the function is called again, in seconds
        )
예제 #16
0
파일: worker.py 프로젝트: aoshiken/hashnist
def start_worker( queue_name, redis_url, with_scheduler=False ):

    redis_conn = redis.from_url( redis_url )

    with rq.Connection( redis_conn ):

        worker = rq.Worker( queues     = [queue_name],
                            connection = redis_conn )

        worker.work( with_scheduler=with_scheduler )
예제 #17
0
def main():
    """Set up Redis connection and start the experiment."""
    redis_connection = redis.Redis(host="queue-server")

    config_path = environment.get('EXPERIMENT_CONFIG',
                                  'fuzzbench/local-experiment-config.yaml')
    config = yaml_utils.read(config_path)
    config = config_utils.validate_and_expand(config)

    with rq.Connection(redis_connection):
        return run_experiment(config)
예제 #18
0
def test_concurrency(worker):
    """Worker execute many tasks concurrently."""

    with rq.Connection():
        queue = rq.Queue('foo')

    job1 = queue.enqueue('fixtures.long_running_job', 50)
    job2 = queue.enqueue('fixtures.long_running_job', 50)

    sleep(30)

    assert job1.is_started
    assert job2.is_started
예제 #19
0
def main():  # pragma: no cover
    utils.setup_logging()
    config.log()
    gh_pr.monkeypatch_github()
    r = utils.get_redis_for_rq()
    if config.FLUSH_REDIS_ON_STARTUP:
        r.flushall()
    with rq.Connection(r):
        worker = rq.Worker(['default'])
        if config.SENTRY_URL:
            client = raven.Client(config.SENTRY_URL, transport=HTTPTransport)
            register_sentry(client, worker)
        worker.work()
예제 #20
0
def load_app(config):
    database = config.get('database')
    queues = config.get('queues', ['default'])

    redis_conn = redis.from_url(database)

    hostname = socket.gethostname()
    name = '%s-%s' % (hostname, uuid.uuid4().hex)

    with rq.Connection(redis_conn):
        worker = rq.Worker(map(rq.Queue, queues), name=name)

    return worker
예제 #21
0
def cancel_tests(client_type: str, client_data: Dict, test_data: List[Dict]) -> None:
    """
    Cancel a test run job with enqueued with the same
    """
    with rq.Connection(redis_connection()):
        for data in test_data:
            client = get_client(client_type, {**client_data, **data})
            try:
                job = rq.job.Job.fetch(client.unique_run_str())
            except NoSuchJobError:
                continue
            if job.is_queued():
                job.cancel()
예제 #22
0
def test_working_worker_cold_shutdown(worker):
    """Worker with an ongoing job receiving double SIGTERM signal and
    shutting down immediately.

    """

    with rq.Connection():
        queue = rq.Queue('foo')

    job = queue.enqueue('fixtures.long_running_job')

    worker.stop_with(SIGTERM, SIGTERM)

    assert worker.returncode == 0
    assert not job.is_finished
예제 #23
0
def main(argv):
    # CONNECT TO DATABASE

    # UNCOMMENT NEXT 4 LINES
    # connect("extensionsASTnpantel", username="******", host="localhost", port=37017)

    # load data from query
    # queue = readFromDatabase()
    # queuetest = ['a','b','c','d','e','f','g','h','i','j','k','l']
    # put them and run them in the queue
    # specialLCSHandler(queue)
    with rq.Connection(redis.Redis(REDIS_HOST, REDIS_PORT)) as connection:

        # print(job.result)
        w = rq.Worker([QUEUE_NAME])
        w.work()
예제 #24
0
def test_working_worker_warm_shutdown(worker):
    """Worker with an ongoing job receiving single SIGTERM signal,
    allowing job to finish then shutting down.

    """

    with rq.Connection():
        queue = rq.Queue('foo')

    job = queue.enqueue('fixtures.long_running_job')

    worker.stop_with(SIGTERM)

    assert worker.returncode == 0
    assert job.is_finished
    assert 'Done sleeping...' == job.result
예제 #25
0
def cancel_test(markus_address, run_ids, **_kw):
    """
    Cancel a test run job with the job_id defined using
    markus_address and run_id.
    """
    with rq.Connection(redis_connection()):
        for run_id in run_ids:
            job_id = _format_job_id(markus_address, run_id)
            try:
                job = rq.job.Job.fetch(job_id)
            except NoSuchJobError:
                return
            if job.is_queued():
                files_path = job.kwargs["files_path"]
                if files_path:
                    shutil.rmtree(files_path, onerror=ignore_missing_dir_error)
                job.cancel()
예제 #26
0
    def queues(self):
        '''
        Get data about message queues.

        **Example Response**

        .. sourcecode:: json

            {
                "queues": [
                    {
                        "name": "default",
                        "pending_tasks": 4
                    },
                    ...
                ]
            }

        :<header Content-Type: application/json
        :<header X-Auth: the client's auth token

        :>header Content-Type: application/json
        :>json list queues: list of message queues
        :>json str queues[n]["name"]: name of the message queue
        :>json int queues[n]["pending_tasks"]: number of tasks pending in this
            queue

        :status 200: ok
        :status 401: authentication required
        :status 403: you must be an administrator
        '''

        queues = list()

        with rq.Connection(g.redis):

            for queue in rq.Queue.all():
                if queue.name == 'failed':
                    continue

                queues.append({
                    'pending_tasks': queue.count,
                    'name': queue.name,
                })

        return jsonify(queues=queues)
예제 #27
0
    def delete_all_failed(self):
        '''
        Delete all failed tasks.

        :<header Content-Type: application/json
        :<header X-Auth: the client's auth token

        :>header Content-Type: application/json

        :status 200: ok
        :status 401: authentication required
        :status 403: you must be an administrator
        '''
        with rq.Connection(g.redis):

            for job in rq.get_failed_queue().jobs:
                job.delete()

        return jsonify(message='ok')
예제 #28
0
파일: rq_worker.py 프로젝트: zxytim/jrp
    def run(self, args):
        redis = StrictRedis(config.REDIS_HOST, config.REDIS_PORT)
        workers = []

        queue_config = parse_queue_config(args.config)
        with rq.Connection(redis):
            for name, count in queue_config:
                queue = rq.Queue(name)

                for i in range(count):
                    w = rq.Worker([queue], name='{}:{}'.format(name, i))
                    workers.append(w)

        procs = [Process(target=start_worker, args=(w, )) for w in workers]
        for p in procs:
            p.start()

        for p in procs:
            p.join()
    # Requeue job and stop it from being moved into the failed queue
    logger.warn('job %s: failed %d times - retrying' % (job.id, job.meta['failures']))

    for queue in queues:
        if queue.name == job.origin:
            queue.enqueue_job(job, timeout=job.timeout)
            return False

    # Can't find queue, which should basically never happen as we only work jobs that match the given queue names and
    # queues are transient in rq.
    logger.warn('job %s: cannot find queue %s - moving to failed queue' % (job.id, job.origin))
    return True


with rq.Connection():
    queues = map(rq.Queue, sys.argv[1:]) or [rq.Queue()]
_____________________________________________________________

 python-rq with tornado asynchronous
jobs.py
#!/usr/bin/env python
# encoding: utf-8

import time

def test(interval):
    for i in xrange(int(interval)):
        print i
        time.sleep(1)
예제 #30
0
import os
import redis
import rq

listen = ['high', 'default', 'low']

redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')

connection = redis.from_url(redis_url)

if __name__ == '__main__':
    with rq.Connection(connection):
        worker = rq.Worker(map(rq.Queue, listen))
        worker.work()