Exemple #1
0
def test_fetch_current_task():
    worker = Worker('testworker', queues=[QueueFactory()])
    queue = worker.queues[0]
    queue.enqueue_call()
    assert worker.fetch_current_task() is None
    task = queue.dequeue(worker)
    assert worker.fetch_current_task().id == task.id
Exemple #2
0
def list_workers():
    return {
        'workers': [{
            'name': worker.description,
            'queues': [q.name for q in worker.queues],
            'state': str(worker.state),
        } for worker in Worker.all()]
    }
Exemple #3
0
def WorkerFactory(*, queues=None):
    global worker_sequence
    worker_sequence += 1
    id = f'worker_{worker_sequence}'
    description = f"Worker {worker_sequence}"
    if queues is None:
        queues = [QueueFactory()]
    return Worker(id, description=description, queues=queues)
Exemple #4
0
def list_jobs(queue_name, page):
    if queue_name != '[running]':
        if queue_name == '[failed]':
            queue = failed_task_registry
            reverse_order = True
        elif queue_name == '[finished]':
            queue = finished_task_registry
            reverse_order = True
        else:
            queue = Queue(queue_name)
            reverse_order = False

        current_page = int(page)
        per_page = 20
        total_items = queue.count()
        pages_numbers_in_window = pagination_window(total_items, current_page,
                                                    per_page)
        pages_in_window = [
            dict(number=p,
                 url=app.url_for('overview', queue_name=queue_name, page=p))
            for p in pages_numbers_in_window
        ]
        last_page = int(ceil(total_items / float(per_page)))

        prev_page = None
        if current_page > 1:
            prev_page = dict(
                url=app.url_for('overview', queue_name=queue_name, page=1))

        next_page = None
        if current_page < last_page:
            next_page = dict(url=app.url_for(
                'overview', queue_name=queue_name, page=last_page))

        pagination = remove_none_values(
            dict(pages_in_window=pages_in_window,
                 next_page=next_page,
                 prev_page=prev_page))

        if reverse_order:
            start = -1 - (current_page - 1) * per_page
            end = start - per_page
            jobs = reversed(queue.get_tasks(end, start))
        else:
            offset = (current_page - 1) * per_page
            jobs = queue.get_tasks(offset, per_page)
        jobs = [serialize_job(job) for job in jobs]
    else:
        jobs = sorted(
            ({
                **serialize_job(Task.fetch(tid)), 'worker':
                Worker.fetch(wid).description
            } for wid, tid in worker_registry.get_running_tasks().items()),
            key=itemgetter('worker'),
        )
        pagination = {}
    return dict(name=queue_name, jobs=jobs, pagination=pagination)
Exemple #5
0
def overview(queue_name, page):
    queue = Queue(queue_name)

    return app.render_template(
        'rt_dashboard/dashboard.html',
        workers=Worker.all(),
        queue=queue,
        page=page,
        queues=Queue.all(),
        rt_url_prefix=app.url_for('overview'),
        poll_interval=2500,
    )
Exemple #6
0
def show_workers(queues, by_queue):
    def state_symbol(state):
        return {
            WorkerState.BUSY: red('busy'),
            WorkerState.IDLE: green('idle')
        }.get(state, state)

    workers = Worker.all()
    all_queues = set(Queue.all()) | {q for w in workers for q in w.queues}
    if queues:
        workers = [w for w in workers if any(set(w.queues) & set(queues))]
    else:
        queues = all_queues

    if not by_queue:
        click.secho("Workers", bold=True)
    else:
        click.secho("Workers per Queue", bold=True)
    print_separator()
    if not by_queue:
        for worker in sorted(workers, key=attrgetter('description')):
            worker_queues = ', '.join(q.name for q in worker.queues)
            click.echo(
                f'{worker.description} {state_symbol(worker.state)}: {worker_queues}'
            )
    else:
        queue_map = {q: [] for q in all_queues}
        for w in workers:
            for q in w.queues:
                queue_map[q].append(w)
        max_qname = max(len(q.name) for q in queues)
        for queue in sorted(queues, key=attrgetter('name')):
            q_workers = queue_map[queue]
            workers_str = ", ".join(
                sorted(f'{w.description} {state_symbol(w.state)}'
                       for w in q_workers))
            if not workers_str:
                workers_str = '–'
            click.echo(f'{queue.name:>{max_qname}}: {workers_str}')

    click.echo(f'{len(workers)} worker(s)')
Exemple #7
0
 def handle_died_workers(self):  # TODO: Test
     from redis_tasks.worker import Worker
     died_worker_ids = self.get_dead_ids()
     for worker_id in died_worker_ids:
         worker = Worker.fetch(worker_id)
         worker.died()
Exemple #8
0
def test_died(time_mocker, connection, assert_atomic):
    time = time_mocker('redis_tasks.worker.utcnow')

    # Die while idle
    worker = Worker('idleworker', queues=[QueueFactory()])
    time.step()
    worker.startup()
    time.step()
    assert connection.ttl(worker.key) == -1
    assert worker.id in worker_registry.get_worker_ids()
    with assert_atomic():
        worker.died()
    assert worker.id not in worker_registry.get_worker_ids()
    for w in [worker, Worker.fetch(worker.id)]:
        assert w.state == WorkerState.DEAD
        assert w.shutdown_at == time.now
    assert connection.ttl(worker.key) > 0

    # die whith task in limbo
    worker = Worker('limboworker', queues=[QueueFactory(), QueueFactory()])
    queue = worker.queues[1]
    time.step()
    worker.startup()
    time.step()
    queue.enqueue_call()
    task = queue.dequeue(worker)
    with assert_atomic(exceptions=['hgetall']):
        worker.died()
    assert queue.get_task_ids() == [task.id]
    assert worker.id not in worker_registry.get_worker_ids()
    for w in [worker, Worker.fetch(worker.id)]:
        assert w.state == WorkerState.DEAD
        assert w.current_task_id is None
        assert w.shutdown_at == time.now
    assert connection.ttl(worker.key) > 0

    # die while busy
    worker = Worker('busyworker', queues=[QueueFactory(), QueueFactory()])
    queue = worker.queues[1]
    time.step()
    worker.startup()
    time.step()
    queue.enqueue_call()
    task = queue.dequeue(worker)
    worker.start_task(task)
    with assert_atomic(exceptions=['hgetall']):
        worker.died()
    assert queue.get_task_ids() == []
    assert failed_task_registry.get_task_ids() == [task.id]
    assert worker.id not in worker_registry.get_worker_ids()
    for w in [worker, Worker.fetch(worker.id)]:
        assert w.state == WorkerState.DEAD
        assert w.current_task_id is None
        assert w.shutdown_at == time.now
    assert connection.ttl(worker.key) > 0
Exemple #9
0
def test_persistence(assert_atomic, connection, time_mocker):
    time = time_mocker('redis_tasks.worker.utcnow')
    time.step()
    fields = {
        'description', 'state', 'queues', 'started_at', 'shutdown_at',
        'current_task_id'
    }

    def randomize_data(worker):
        string_fields = ['description', 'state', 'current_task_id']
        date_fields = ['started_at', 'shutdown_at']
        for f in string_fields:
            setattr(worker, f, str(uuid.uuid4()))
        for f in date_fields:
            setattr(
                worker, f,
                datetime.datetime(random.randint(1000, 9999),
                                  1,
                                  1,
                                  tzinfo=datetime.timezone.utc))

        worker.args = tuple(str(uuid.uuid4()) for i in range(4))
        worker.kwargs = {str(uuid.uuid4()): ["d"]}
        worker.meta = {"x": [str(uuid.uuid4())]}
        worker.aborted_runs = ["foo", "bar", str(uuid.uuid4())]

    def as_dict(worker):
        return {
            f: getattr(worker, f)
            if f != 'queues' else [q.name for q in worker.queues]
            for f in fields
        }

    worker = Worker("testworker", queues=[QueueFactory()])
    with assert_atomic():
        worker.startup()
    assert as_dict(worker) == as_dict(Worker.fetch(worker.id))
    worker2 = Worker("worker2", queues=[QueueFactory() for i in range(5)])
    worker2.startup()
    assert as_dict(worker2) == as_dict(Worker.fetch(worker2.id))
    assert as_dict(worker) != as_dict(worker2)

    worker = Worker("testworker", queues=[QueueFactory()])
    worker.startup()
    with assert_atomic():
        worker._save()
    assert set(decode_list(connection.hkeys(worker.key))) <= fields
    assert as_dict(Worker.fetch(worker.id)) == as_dict(worker)

    randomize_data(worker)
    worker._save()
    assert as_dict(Worker.fetch(worker.id)) == as_dict(worker)

    # only deletes
    worker.started_at = None
    worker._save(['started_at'])
    assert as_dict(Worker.fetch(worker.id)) == as_dict(worker)

    for i in range(5):
        store = random.sample(fields, 3)
        copy = Worker.fetch(worker.id)
        randomize_data(copy)
        copy._save(store)
        for f in store:
            setattr(worker, f, getattr(copy, f))
        assert as_dict(Worker.fetch(worker.id)) == as_dict(worker)

    worker = Worker("nonexist", queues=[QueueFactory()])
    with pytest.raises(WorkerDoesNotExist):
        worker.refresh()
    with pytest.raises(WorkerDoesNotExist):
        Worker.fetch("nonexist")
Exemple #10
0
def test_state_transitions(time_mocker, connection, assert_atomic):
    worker = Worker('myworker', queues=[QueueFactory(), QueueFactory()])
    time = time_mocker('redis_tasks.worker.utcnow')
    queue = worker.queues[0]

    time.step()
    assert not connection.exists(worker.key, worker.task_key)
    assert worker.id not in worker_registry.get_worker_ids()
    with assert_atomic():
        worker.startup()
    assert worker.id in worker_registry.get_worker_ids()
    for w in [worker, Worker.fetch(worker.id)]:
        assert w.started_at == time.now
        assert w.state == WorkerState.IDLE

    queue.enqueue_call()
    with assert_atomic(exceptions=['hgetall']):
        task = queue.dequeue(worker)
    assert connection.exists(worker.key, worker.task_key) == 2
    for w in [worker, Worker.fetch(worker.id)]:
        assert w.current_task_id == task.id
        assert w.state == WorkerState.IDLE

    with assert_atomic():
        worker.start_task(task)
    for w in [worker, Worker.fetch(worker.id)]:
        assert w.current_task_id == task.id
        assert w.state == WorkerState.BUSY

    with assert_atomic():
        worker.end_task(task, TaskOutcome("success"))
    for w in [worker, Worker.fetch(worker.id)]:
        assert w.current_task_id is None
        assert w.state == WorkerState.IDLE

    time.step()
    assert connection.ttl(worker.key) == -1
    assert worker.id in worker_registry.get_worker_ids()
    with assert_atomic():
        worker.shutdown()
    assert worker.id not in worker_registry.get_worker_ids()
    for w in [worker, Worker.fetch(worker.id)]:
        assert w.state == WorkerState.DEAD
        assert w.shutdown_at == time.now
    assert connection.ttl(worker.key) > 0
Exemple #11
0
def test_heartbeat(mocker):
    heartbeat = mocker.patch.object(worker_registry, 'heartbeat')
    worker = Worker('testworker', queues=[QueueFactory()])
    worker.startup()
    worker.heartbeat()
    assert heartbeat.called_once_with(worker)
Exemple #12
0
def test_all():
    w1 = Worker('w1', queues=[QueueFactory()])
    w2 = Worker('w2', queues=[QueueFactory()])
    assert Worker.all() == []
    w1.startup()
    w2.startup()
    assert id_list(Worker.all()) == [w1.id, w2.id]
    w1.shutdown()
    assert id_list(Worker.all()) == [w2.id]
Exemple #13
0
def get_history():
    max_finished_tasks = 5000
    max_failed_tasks = 1000
    finished_tasks = finished_task_registry.get_tasks(-max_finished_tasks, -1)
    failed_tasks = failed_task_registry.get_tasks(-max_failed_tasks, -1)
    if len(finished_tasks) == max_finished_tasks:
        failed_tasks = [
            t for t in failed_tasks if t.ended_at >= finished_tasks[0].ended_at
        ]
    if len(failed_tasks) == max_failed_tasks:
        finished_tasks = [
            t for t in finished_tasks if t.ended_at >= failed_tasks[0].ended_at
        ]

    now = utcnow()
    running_tasks = []
    for wid, tid in worker_registry.get_running_tasks().items():
        task = Task.fetch(tid)
        task.ended_at = now
        task.running_on = Worker.fetch(wid).description
        running_tasks.append(task)

    tasks = failed_tasks + finished_tasks + running_tasks
    tasks.sort(key=lambda t: t.started_at)

    by_func = defaultdict(list)
    for t in tasks:
        by_func[t.func_name].append(t)

    # reconstruct worker-mapping
    for group in by_func.values():
        workers = []
        for task in sorted(group, key=lambda t: t.started_at):
            workers = [
                None if not t or t.ended_at <= task.started_at else t
                for t in workers
            ]
            try:
                task.worker = workers.index(None)
                workers[task.worker] = task
            except ValueError:
                task.worker = len(workers)
                workers.append(task)

    groups = sorted(by_func.values(),
                    key=lambda group_tasks: (
                        min(t.started_at.timetuple()[3:] for t in group_tasks),
                        max(t.ended_at - t.started_at for t in group_tasks),
                    ))

    collapsed_groups = {
        k
        for k, v in by_func.items() if len(v) / len(tasks) < 0.02
    }

    tz = pytz.timezone(settings.TIMEZONE)
    rows = []
    for t in tasks:
        t.started_at = t.started_at.astimezone(tz)
        t.ended_at = t.ended_at.astimezone(tz)
        keys = {
            'group': t.func_name,
            'subgroup': t.worker,
            'start': t.started_at,
            'title': task_tooltip(t),
        }
        if hasattr(t, 'running_on'):
            keys.update({
                'end': t.ended_at,
                'type': 'range',
                'content': t.running_on,
            })
        elif (t.func_name not in collapsed_groups
              or (t.ended_at - t.started_at) > datetime.timedelta(minutes=1)):
            keys.update({
                'end': t.ended_at,
                'type': 'range',
                'content': f'[{t.ended_at - t.started_at}]',
            })
        else:
            keys.update({
                'type': 'point',
                'content': t.started_at.strftime('%H:%M:%S'),
            })

        if t.status == 'failed':
            keys['style'] = 'border-color: {0}; background-color: {0}'.format(
                '#E69089')
        elif t.status == 'running':
            keys['style'] = 'border-color: {0}; background-color: {0}'.format(
                '#D5F6D7')

        keys = {
            k: v.timestamp() if isinstance(v, datetime.datetime) else v
            for k, v in keys.items()
        }
        rows.append(keys)

    return {
        "rows":
        rows,
        "groups": [{
            'id': group[0].func_name,
            'content': group[0].func_name,
            'order': i
        } for i, group in enumerate(groups)],
    }