def test_task_logging_capture(engine): with engine.begin() as cn: cn.execute('delete from rework.task') with workers(engine, 2): t1 = api.schedule(engine, 'capture_logs') t2 = api.schedule(engine, 'capture_logs') t1.join() t2.join() t1logs = [scrub(logline) for id_, logline in t1.logs()] assert [ u'my_app_logger:ERROR: <X>-<X>-<X> <X>:<X>:<X>: will be captured <X>', u'stdout:INFO: <X>-<X>-<X> <X>:<X>:<X>: I want to be captured', u'my_app_logger:DEBUG: <X>-<X>-<X> <X>:<X>:<X>: will be captured <X> also' ] == t1logs t2logs = [scrub(logline) for id_, logline in t2.logs()] assert [ u'my_app_logger:ERROR: <X>-<X>-<X> <X>:<X>:<X>: will be captured <X>', u'stdout:INFO: <X>-<X>-<X> <X>:<X>:<X>: I want to be captured', u'my_app_logger:DEBUG: <X>-<X>-<X> <X>:<X>:<X>: will be captured <X> also' ] == t2logs t3 = api.schedule(engine, 'capture_logs') t3.join() logids = [lid for lid, logline_ in t3.logs()] assert 2 == len(t3.logs(fromid=logids[0]))
def test_list_monitors(engine, cli): with workers(engine): r = cli('list-monitors', engine.url) assert ('<X> <X>-<X>-<X> <X>:<X>:<X>+<X> ' 'default options(maxmem=<X>, maxruns=<X>, debugport=<X>, ' 'maxworkers=<X>, minworkers=<X>)') == scrub(r.output)
def test_list_workers(engine, cli): with workers(engine): r = cli('list-workers', engine.url) assert ('<X> <X>@<X>.<X>.<X>.<X> <X> Mb [running (idle)] ' '[<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' '→ [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>]') == scrub(r.output) with engine.begin() as cn: insert('rework.worker').values(host='12345', domain='default').do(cn) r = cli('list-workers', engine.url) assert ('<X> <X>@<X>.<X>.<X>.<X> <X> Mb [dead] ' '[<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' '→ [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' '→ [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' 'Forcefully killed by the monitor. \n' '<X> <nopid>@<X> <X> Mb [unstarted] ' '[<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>]') == scrub(r.output)
def test_task_logs(engine, cli): with workers(engine): t = api.schedule(engine, 'capture_logs') t.join() r = cli('log-task', engine.url, t.tid) assert ( '\x1b[<X>mmy_app_logger:ERROR: <X>-<X>-<X> <X>:<X>:<X>: will be captured <X>\n' '\x1b[<X>mstdout:INFO: <X>-<X>-<X> <X>:<X>:<X>: I want to be captured\n' '\x1b[<X>mmy_app_logger:DEBUG: <X>-<X>-<X> <X>:<X>:<X>: will be captured <X> also' ) == scrub(r.output)
def test_shutdown_worker(engine, cli): url = engine.url with workers(engine) as mon: cli('shutdown-worker', url, mon.wids[0]) guard( engine, 'select running from rework.worker where id = {}'.format( mon.wids[0]), lambda res: res.scalar() == 0) r = cli('list-workers', url) assert 'explicit shutdown' in scrub(r.output)
def test_debug_worker(engine, cli): url = engine.url with engine.begin() as cn: cn.execute('delete from rework.worker') with workers(engine, debug=True): r = cli('list-workers', url) assert ( '<X> <X>@<X>.<X>.<X>.<X> <X> Mb [running (idle)] debugport = <X> ' '[<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' '→ [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>]') == scrub(r.output)
def test_monitors_table(engine, client, refresh): with engine.begin() as cn: cn.execute('delete from rework.monitor') cn.execute('delete from rework.worker') with workers(engine): res = client.get('/workers-table') html = normalize(scrub(res.text)) refpath = DATADIR / 'monitors-table.html' if refresh: refpath.write_bytes(html) assert html == refpath.read_bytes() t = api.schedule(engine, 'abortme') t.join('running') res = client.get('/workers-table') html = normalize(scrub(res.text)) refpath = DATADIR / 'monitors-table-1-task.html' if refresh: refpath.write_bytes(html) assert html == refpath.read_bytes()
def test_task_abortion(engine): with workers(engine) as mon: wid = mon.wids[0] t = api.schedule(engine, 'infinite_loop', True) guard( engine, 'select count(id) from rework.task where worker = {}'.format(wid), lambda res: res.scalar() == 1) assert t.state == 'running' with pytest.raises(TimeOut) as err: t.join(timeout=.1) assert err.value.args[0] == t # check cpu usage mon.track_resources() cpu = engine.execute( 'select cpu from rework.worker where id = {}'.format( wid)).scalar() assert cpu > 0 t.abort() assert t.aborted # this is potentially racy but might work most of the time assert t.state == 'aborting' mon.preemptive_kill() t.join() assert t.state == 'aborted' assert t.deathinfo.startswith('preemptive kill') # one dead worker guard(engine, 'select running from rework.worker where id = {}'.format(wid), lambda res: not res.scalar()) diagnostic = engine.execute( 'select deathinfo from rework.worker where id = {}'.format( wid)).scalar() assert 'preemptive kill at <X>-<X>-<X> <X>:<X>:<X>.<X>+<X>:<X>' == scrub( diagnostic) queued = t._propvalue('queued') started = t._propvalue('started') finished = t._propvalue('finished') assert finished > started > queued
def test_kill_worker(engine, cli): url = engine.url with engine.begin() as cn: cn.execute('delete from rework.worker') with workers(engine) as mon: t = api.schedule(engine, 'infinite_loop') t.join('running') # let the worker pick up the task r = cli('kill-worker', url, mon.wids[0]) mon.preemptive_kill() r = cli('list-workers', url) assert ('<X> <X>@<X>.<X>.<X>.<X> <X> Mb [dead] ' '[<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' '→ [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' '→ [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' 'preemptive kill at ' '<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>:<X>') == scrub(r.output) r = cli('list-tasks', url) assert ('<X> infinite_loop aborted [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' '→ [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' '→ [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>]') == scrub(r.output)
def test_abort_task(engine, cli): url = engine.url with workers(engine) as mon: r = cli('list-workers', url) assert ('<X> <X>@<X>.<X>.<X>.<X> <X> Mb [running (idle)] ' '[<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' '→ [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>]') == scrub(r.output) t = api.schedule(engine, 'infinite_loop') t.join('running') # let the worker pick up the task r = cli('list-workers', url) assert ('<X> <X>@<X>.<X>.<X>.<X> <X> Mb [running #<X>]' ' [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' '→ [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>]') == scrub(r.output) r = cli('list-tasks', url) assert ('<X> infinite_loop running [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>]' ' → [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>]') == scrub(r.output) r = cli('abort-task', url, t.tid) mon.preemptive_kill() t.join() r = cli('list-workers', url) assert ('<X> <X>@<X>.<X>.<X>.<X> <X> Mb [dead] ' '[<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' '→ [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' '→ [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' 'preemptive kill at ' '<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>:<X>') == scrub(r.output) r = cli('list-tasks', url) assert ('<X> infinite_loop aborted [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' '→ [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>] ' '→ [<X>-<X>-<X> <X>:<X>:<X>.<X>+<X>]') == scrub(r.output)
def test_list_operations(engine, cli): with workers(engine): r = cli('list-operations', engine.url) assert """ <X> host(<X>) `<X>.<X>.<X>.<X>` path(print_sleep_and_go_away) <X> host(<X>) `<X>.<X>.<X>.<X>` path(run_in_non_default_domain) <X> host(<X>) `<X>.<X>.<X>.<X>` path(raw_input) <X> host(<X>) `<X>.<X>.<X>.<X>` path(infinite_loop) <X> host(<X>) `<X>.<X>.<X>.<X>` path(infinite_loop_timeout) <X> host(<X>) `<X>.<X>.<X>.<X>` path(infinite_loop_long_timeout) <X> host(<X>) `<X>.<X>.<X>.<X>` path(unstopable_death) <X> host(<X>) `<X>.<X>.<X>.<X>` path(normal_exception) <X> host(<X>) `<X>.<X>.<X>.<X>` path(allocate_and_leak_mbytes) <X> host(<X>) `<X>.<X>.<X>.<X>` path(capture_logs) <X> host(<X>) `<X>.<X>.<X>.<X>` path(log_swarm) <X> host(<X>) `<X>.<X>.<X>.<X>` path(stderr_swarm) <X> host(<X>) `<X>.<X>.<X>.<X>` path(flush_captured_stdout) """.strip() == scrub(r.output).strip()
def test_minworkers(engine, cli): with workers(engine, minworkers=1, numworkers=4) as mon: r = cli('list-workers', engine.url) assert r.output.count('running (idle)') == 1 # progressive ramp up, one task t1 = api.schedule(engine, 'infinite_loop') t1.join(target='running') new = mon.ensure_workers().new assert len(new) == 0 r = cli('list-workers', engine.url) assert r.output.count('\n') == 1 assert r.output.count('running (idle)') == 0 assert scrub(r.output).count('running #<X>') == 1 # now with three tasks t2 = api.schedule(engine, 'infinite_loop') t3 = api.schedule(engine, 'infinite_loop') new = mon.ensure_workers().new assert len(new) == 2 t2.join(target='running') t3.join(target='running') r = cli('list-workers', engine.url) assert scrub(r.output).count('running #<X>') == 3 assert r.output.count('running (idle)') == 0 # just one useless turn for fun new = mon.ensure_workers().new assert len(new) == 0 r = cli('list-workers', engine.url) assert scrub(r.output).count('running #<X>') == 3 assert r.output.count('running (idle)') == 0 # t4 should run and t5 remain in queue t4 = api.schedule(engine, 'infinite_loop') t5 = api.schedule(engine, 'infinite_loop') new = mon.ensure_workers().new assert len(new) == 1 t4.join(target='running') assert t5.status == 'queued' r = cli('list-workers', engine.url) assert scrub(r.output).count('running #<X>') == 4 assert r.output.count('running (idle)') == 0 r = cli('list-tasks', engine.url) assert r.output.count('running') == 4 assert r.output.count('queued') == 1 # ramp down t1.abort() t2.abort() mon.preemptive_kill() t1.join() t2.join() mon.ensure_workers() r = cli('list-workers', engine.url) assert scrub(r.output).count('running #<X>') == 3 assert r.output.count('running (idle)') == 0 mon.ensure_workers() r = cli('list-workers', engine.url) assert scrub(r.output).count('running #<X>') == 3 assert r.output.count('running (idle)') == 0 t3.abort() t4.abort() mon.preemptive_kill() t3.join() t4.join() mon.ensure_workers() r = cli('list-workers', engine.url) assert scrub(r.output).count('running #<X>') == 1 assert r.output.count('running (idle)') == 0 t5.abort() mon.preemptive_kill() t5.join() mon.ensure_workers() r = cli('list-workers', engine.url) assert scrub(r.output).count('running #<X>') == 0 assert r.output.count('running (idle)') == 1
def test_tasks_table(engine, client, refresh): with engine.begin() as cn: cn.execute('delete from rework.task') with workers(engine): res = client.get('/tasks-table') assert res.text == '<p>Table under construction ...</p>' res = client.get('/tasks-table-hash') assert res.text == 'no-hash-yet' taskstable.refresh_tasks_file(engine) res = client.get('/tasks-table') assert res.text == ( '<br>\n' '<table class="table table-sm table-bordered table-striped table-hover">\n' '<thead class="thead-inverse"><tr><th>#</th><th>service</th><th>domain</th>' '<th>queued</th><th>started</th><th>finished</th>' '<th>user</th><th>worker</th><th>status</th><th>action</th></tr></thead>\n</table>' ) res = client.get('/tasks-table-hash') assert res.text == 'd751713988987e9331980363e24189ce' res = client.get('/tasks-table-hash?domain=all') assert res.text == 'no-hash-yet' res = client.get('/tasks-table-hash?domain=default') assert res.text == 'd751713988987e9331980363e24189ce' t = api.schedule(engine, 'good_job', metadata={'user': '******'}) t.join() taskstable.refresh_tasks_file(engine) res = client.get('/tasks-table') refpath = DATADIR / 'tasks-table.html' if refresh: refpath.write_bytes(scrub(res.text).encode('utf-8')) assert scrub(res.text) == refpath.read_bytes().decode('utf-8') count = engine.execute('select count(*) from rework.taskstable').scalar() assert count == 1 # only default domains, 'all' appears with many domains t = api.schedule(engine, 'bad_job', metadata={'user': '******'}) t.join() taskstable.refresh_tasks_file(engine) res = client.get('/tasks-table') srcpath = re.compile('File "(.*)"') def edit(elt): if 'title' in elt.attrib: elt.attrib['title'] = srcpath.sub('/path/to/src/file', elt.attrib['title']) return elt html = edittag('td', edit, res.text).decode('utf-8') refpath = DATADIR / 'tasks-table-error.html' if refresh: refpath.write_bytes(scrub(html).encode('utf-8')) assert scrub(html) == refpath.read_bytes().decode('utf-8') # declare an new domain from . import tasks api.freeze_operations(engine) with workers(engine, domain='uranus'): t = api.schedule(engine, 'justdoit', domain='uranus', metadata={'user': '******'}) t.join() taskstable.refresh_tasks_file(engine) res = client.get('/tasks-table-hash?domain=uranus') assert res.text == '05265be5adad9bb8b0ee50f837535cfa' res = client.get('/tasks-table?domain=uranus') refpath = DATADIR / 'tasks-table-uranus.html' if refresh: refpath.write_bytes(scrub(res.text).encode('utf-8')) assert scrub(res.text) == refpath.read_bytes().decode('utf-8')