def test_meta_arg_default_disallowed(self): def py_callable(a, b, changed=None): pass task = Task('Fake', [(py_callable, ('a', 'b'))]) task.options = {} task.dep_changed = ['changed'] my_action = task.actions[0] pytest.raises(action.InvalidTask, my_action.execute)
def test_callable_invalid(self): def get_cmd(blabla): pass task = Task('Fake', [action.CmdAction(get_cmd)]) task.options = {'opt1':'3'} my_action = task.actions[0] got = my_action.execute() assert isinstance(got, TaskError)
def test_save_result(self, pdepfile): t1 = Task('t_name', None) t1.result = "result" pdepfile.save_success(t1) assert get_md5("result") == pdepfile._get(t1.name, "result:") pdepfile.remove_success(t1) assert None is pdepfile._get(t1.name, "result:")
def test_no_extra_args(self): # no error trying to inject values def py_callable(): return True task = Task('Fake', [py_callable], file_dep=['dependencies']) task.options = {} my_action = task.actions[0] my_action.execute()
def test_option_default_allowed(self): got = [] def py_callable(opt2='ABC'): got.append(opt2) task = Task('Fake', [py_callable]) task.options = {'opt2':'123'} my_action = task.actions[0] my_action.execute() assert ['123'] == got, repr(got)
def test_task_options(self): cmd = "%s %s/myecho.py" % (executable, TEST_PATH) cmd += " %(opt1)s - %(opt2)s" task = Task('Fake', [cmd]) task.options = {'opt1':'3', 'opt2':'abc def'} my_action = task.actions[0] assert my_action.execute() is None got = my_action.out.strip() assert "3 - abc def" == got
def testSetVerbosity(self, depfile): output = StringIO.StringIO() t = Task('x', None) used_verbosity = [] def my_execute(out, err, verbosity): used_verbosity.append(verbosity) t.execute = my_execute cmds.doit_run(depfile.name, [t], output, verbosity=2) assert 2 == used_verbosity[0], used_verbosity
def test_both(self, monkeypatch): monkeypatch.setattr(action.CmdAction, 'STRING_FORMAT', 'both') cmd = "%s %s/myecho.py" % (executable, TEST_PATH) cmd += " {dependencies} - %(opt1)s" task = Task('Fake', [cmd], ['data/dependency1']) task.options = {'opt1':'abc'} my_action = task.actions[0] assert my_action.execute() is None got = my_action.out.strip() assert "data/dependency1 - abc" == got
def test_callable_return_command_str(self): def get_cmd(opt1, opt2): cmd = "%s %s/myecho.py" % (executable, TEST_PATH) return cmd + " %s - %s" % (opt1, opt2) task = Task('Fake', [action.CmdAction(get_cmd)]) task.options = {'opt1':'3', 'opt2':'abc def'} my_action = task.actions[0] assert my_action.execute() is None got = my_action.out.strip() assert "3 - abc def" == got, repr(got)
def test_task_pos_arg(self): cmd = "%s %s/myecho.py" % (executable, TEST_PATH) cmd += " %(pos)s" task = Task('Fake', [cmd], pos_arg='pos') task.options = {} task.pos_arg_val = ['hi', 'there'] my_action = task.actions[0] assert my_action.execute() is None got = my_action.out.strip() assert "hi there" == got
def test_save_result_dep(self, depfile): t1 = Task('t1', None) t1.result = "result" depfile.save_success(t1) t2 = Task('t2', None, result_dep=['t1']) depfile.save_success(t2) assert get_md5(t1.result) == depfile._get("t2", "task:t1") t3 = Task('t3', None, task_dep=['t1']) depfile.save_success(t3) assert None is depfile._get("t3", "task:t1")
def test_task_pos_arg(self): got = [] def py_callable(pos): got.append(pos) task = Task('Fake', [py_callable], pos_arg='pos') task.options = {} task.pos_arg_val = ['hi', 'there'] my_action = task.actions[0] my_action.execute() assert [['hi', 'there']] == got, repr(got)
def testSetVerbosity(self, depfile): output = StringIO.StringIO() t = Task('x', None) used_verbosity = [] def my_execute(out, err, verbosity): used_verbosity.append(verbosity) t.execute = my_execute cmd_run = Run(dep_file=depfile.name, task_list=[t]) cmd_run._execute(output, verbosity=2) assert 2 == used_verbosity[0], used_verbosity
def testSetVerbosity(self): remove_testdb() output = StringIO.StringIO() t = Task('x', None) used_verbosity = [] def my_execute(out, err, verbosity): used_verbosity.append(verbosity) t.execute = my_execute cmds.doit_run(TESTDB, [t], output, verbosity=2) assert 2 == used_verbosity[0], used_verbosity
def test_task_options(self): got = [] def py_callable(opt1, opt3): got.append(opt1) got.append(opt3) task = Task('Fake', [py_callable]) task.options = {'opt1':'1', 'opt2':'abc def', 'opt3':3} my_action = task.actions[0] my_action.execute() assert ['1',3] == got, repr(got)
def testSetVerbosity(self, depfile_name): output = StringIO() t = Task('x', None) used_verbosity = [] def my_execute(out, err, verbosity): used_verbosity.append(verbosity) t.execute = my_execute cmd_run = CmdFactory(Run, backend='dbm', dep_file=depfile_name, task_list=[t]) cmd_run._execute(output, verbosity=2) assert 2 == used_verbosity[0], used_verbosity
def test_task_pos_arg_None(self): # pos_arg_val is None when the task is not specified from # command line but executed because it is a task_dep cmd = "%s %s/myecho.py" % (executable, TEST_PATH) cmd += " %(pos)s" task = Task('Fake', [cmd], pos_arg='pos') task.options = {} my_action = task.actions[0] assert my_action.execute() is None got = my_action.out.strip() assert "" == got
def test_keyword_extra_args(self): got = [] def py_callable(arg=None, **kwargs): got.append(kwargs) my_task = Task('Fake', [(py_callable, (), {'b': 4})], file_dep=['dependencies']) my_task.options = {'foo': 'bar'} my_action = my_task.actions[0] my_action.execute() # meta args do not leak into kwargs assert got == [{'foo': 'bar', 'b': 4}]
def test_extra_kwarg_overwritten(self): got = [] def py_callable(a, b, **kwargs): got.append(a) got.append(b) got.append(kwargs['changed']) task = Task('Fake', [(py_callable, ('a', 'b'), {'changed': 'c'})]) task.options = {} task.dep_changed = ['changed'] my_action = task.actions[0] my_action.execute() assert got == ['a', 'b', 'c']
def test_ok(self, reporter, dep_manager): touched = [] def touch(): touched.append(1) t1 = Task('t1', [], teardown=[(touch,)]) my_runner = runner.Runner(dep_manager, reporter) my_runner.teardown_list = [t1] t1.execute() my_runner.teardown() assert 1 == len(touched) assert ('teardown', t1) == reporter.log.pop(0) assert not reporter.log
def test_mixed_args(self): got = [] def py_callable(a, b, changed): got.append(a) got.append(b) got.append(changed) task = Task('Fake', [(py_callable, ('a', 'b'))]) task.options = {} task.dep_changed = ['changed'] my_action = task.actions[0] my_action.execute() assert got == ['a', 'b', ['changed']]
def test_extra_arg_overwritten(self): got = [] def py_callable(a, b, changed): got.append(a) got.append(b) got.append(changed) task = Task('Fake', [(py_callable, ('a', 'b', 'c'))]) task.dep_changed = ['changed'] task.options = {} my_action = task.actions[0] my_action.execute() assert got == ['a', 'b', 'c']
def test_kwonlyargs_minimal(self): got = [] scope = {'got': got} exec(textwrap.dedent(''' def py_callable(*args, kwonly=None): got.append(args) got.append(kwonly) '''), scope) task = Task('Fake', [(scope['py_callable'], (1, 2, 3), {'kwonly': 4})]) task.options = {} my_action = task.actions[0] my_action.execute() assert [(1, 2, 3), 4] == got, repr(got)
def test_method(self): got = [] class CallMe(object): def xxx(self, a, b, changed): got.append(a) got.append(b) got.append(changed) task = Task('Fake', [(CallMe().xxx, ('a', 'b'))]) task.options = {} task.dep_changed = ['changed'] my_action = task.actions[0] my_action.execute() assert got == ['a', 'b', ['changed']]
def test_named_extra_args(self): got = [] def py_callable(targets, dependencies, changed, task): got.append(targets) got.append(dependencies) got.append(changed) got.append(task) task = Task('Fake', [py_callable], file_dep=['dependencies'], targets=['targets']) task.dep_changed = ['changed'] task.options = {} my_action = task.actions[0] my_action.execute() assert got == [['targets'], ['dependencies'], ['changed'], task]
def test_task_meta_reference(self): cmd = "%s %s/myecho.py" % (executable, TEST_PATH) cmd += " %(dependencies)s - %(changed)s - %(targets)s" dependencies = ["data/dependency1", "data/dependency2"] targets = ["data/target", "data/targetXXX"] task = Task('Fake', [cmd], dependencies, targets) task.dep_changed = ["data/dependency1"] task.options = {} my_action = task.actions[0] assert my_action.execute() is None got = my_action.out.split('-') assert task.file_dep == set(got[0].split()) assert task.dep_changed == got[1].split() assert targets == got[2].split()
def test_complete_run_verbosity2_redisplay(self): rep = reporter.ConsoleReporter(StringIO(), {'failure_verbosity': 2}) catched = CatchedException("catched exception there", Exception("foo")) task = Task("t_name", None, verbosity=2) task.executed = True rep.add_failure(task, catched) # assign new StringIO so output is only from complete_run() rep.outstream = StringIO() rep.complete_run() got = rep.outstream.getvalue() assert "<stdout>" in got assert "<stderr>" in got
def test(self): def sample(): print("this is printed") t1 = Task("t1", [(sample,)]) result = reporter.TaskResult(t1) result.start() t1.execute(Stream(0)) result.set_result('success') got = result.to_dict() assert t1.name == got['name'], got assert 'success' == got['result'], got assert "this is printed\n" == got['out'], got assert "" == got['err'], got assert got['started'] assert 'elapsed' in got
def test(self): def sample(): print("this is printed") t1 = Task("t1", [(sample,)]) result = reporter.TaskResult(t1) result.start() t1.execute() result.set_result("success") got = result.to_dict() assert t1.name == got["name"], got assert "success" == got["result"], got assert "this is printed\n" == got["out"], got assert "" == got["err"], got assert got["started"] assert "elapsed" in got
def test_values_and_results(self, depfile, dependency1): my_task = Task("t2", [""], file_dep=['tests/data/dependency1']) my_task.result = "result" my_task.values = {'x': 5, 'y': 10} depfile.save_success(my_task) depfile.checker = TimestampChecker() # trigger task update reseted = Task("t2", [""], file_dep=['tests/data/dependency1']) output = StringIO() cmd_reset = CmdFactory(ResetDep, outstream=output, task_list=[reseted], dep_manager=depfile) cmd_reset._execute() got = output.getvalue() assert "processed t2\n" == got assert {'x': 5, 'y': 10} == depfile.get_values(reseted.name) assert get_md5('result') == depfile.get_result(reseted.name)
def test_UptodateCallable_added_attributes(self, pdep_manager): task_dict = "fake dict" class My_uptodate(UptodateCalculator): def __call__(self, task, values): # attributes were added to object before call'ing it assert task_dict == self.tasks_dict assert None == self.get_val('t1', None) return True check = My_uptodate() t1 = Task("t1", None, uptodate=[check]) assert 'up-to-date' == pdep_manager.get_status(t1, task_dict).status
def test_result(self, reporter, RunnerClass, dep_manager): task = Task("taskY", [my_action] ) my_runner = RunnerClass(dep_manager, reporter) assert None == task.result assert {} == task.values assert [None] == [a.out for a in task.actions] assert [None] == [a.err for a in task.actions] my_runner.run_tasks(TaskDispatcher({'taskY':task}, [], ['taskY'])) assert runner.SUCCESS == my_runner.finish() assert {'bb': 5} == task.result assert {'bb': 5} == task.values assert ['out here'] == [a.out for a in task.actions] assert ['err here'] == [a.err for a in task.actions]
def test_ok(self, reporter, dep_manager): touched = [] def touch(): touched.append(1) t1 = Task('t1', [], teardown=[(touch, )]) my_runner = runner.Runner(dep_manager, reporter) my_runner.teardown_list = [t1] my_runner.teardown() assert 1 == len(touched) assert ('teardown', t1) == reporter.log.pop(0) assert not reporter.log
def test_continue_dont_execute_parent_of_failed_task( self, reporter, RunnerClass, dep_manager): t1 = Task("t1", [(_error, )]) t2 = Task("t2", [(ok, )], task_dep=['t1']) t3 = Task("t3", [(ok, )]) my_runner = RunnerClass(dep_manager, reporter, continue_=True) disp = TaskDispatcher({ 't1': t1, 't2': t2, 't3': t3 }, [], ['t1', 't2', 't3']) my_runner.run_tasks(disp) assert runner.ERROR == my_runner.finish() assert ('start', t1) == reporter.log.pop(0) assert ('execute', t1) == reporter.log.pop(0) assert ('fail', t1) == reporter.log.pop(0) assert ('start', t2) == reporter.log.pop(0) assert ('fail', t2) == reporter.log.pop(0) assert ('start', t3) == reporter.log.pop(0) assert ('execute', t3) == reporter.log.pop(0) assert ('success', t3) == reporter.log.pop(0) assert 0 == len(reporter.log)
def test_errors(self, reporter, dep_manager): def raise_something(x): raise Exception(x) t1 = Task('t1', [], teardown=[(raise_something,['t1 blow'])]) t2 = Task('t2', [], teardown=[(raise_something,['t2 blow'])]) my_runner = runner.Runner(dep_manager, reporter) my_runner.teardown_list = [t1, t2] t1.execute(my_runner.stream) t2.execute(my_runner.stream) my_runner.teardown() assert ('teardown', t2) == reporter.log.pop(0) assert ('cleanup_error',) == reporter.log.pop(0) assert ('teardown', t1) == reporter.log.pop(0) assert ('cleanup_error',) == reporter.log.pop(0) assert not reporter.log
def test_file_dep(self, depfile, dependency1): my_task = Task("t2", [""], file_dep=['tests/data/dependency1']) output = StringIO() cmd_reset = CmdFactory(ResetDep, outstream=output, task_list=[my_task], dep_manager=depfile) cmd_reset._execute() got = output.getvalue() assert "processed t2\n" == got dep = list(my_task.file_dep)[0] timestamp, size, md5 = depfile._get(my_task.name, dep) assert get_file_md5(get_abspath("data/dependency1")) == md5
def test_full_task(self, reporter, dep_manager): # test execute_task_subprocess can receive a full Task object run = runner.MRunner(dep_manager, reporter) t1 = Task('t1', [simple_result]) task_q = Queue() task_q.put(runner.JobTask(t1)) # to test task_q.put(None) # to terminate function result_q = Queue() run.execute_task_subprocess(task_q, result_q, reporter.__class__) run.finish() # check result assert result_q.get() == {'name': 't1', 'reporter': 'execute_task'} assert result_q.get()['task']['result'] == 'my-result' assert result_q.empty()
def test_opt_show_all(self, dependency1, depfile_name): output = StringIO() task = Task("tt", ["cat %(dependencies)s"], file_dep=['tests/data/dependency1']) cmd = Strace(outstream=output) cmd._loader.load_tasks = mock.Mock(return_value=([task], {})) params = DefaultUpdate(dep_file=depfile_name, show_all=True, keep_trace=False, backend='dbm') result = cmd.execute(params, ['tt']) assert 0 == result got = output.getvalue().split("\n") assert "cat" in got[0]
def test_waiting_node_updated(self): tasks = {'t1': Task('t1', None, calc_dep=['t2'], task_dep=['t4']), 't2': Task('t2', None), 't3': Task('t3', None), 't4': Task('t4', None), } td = TaskDispatcher(tasks, [], None) n1 = td._gen_node(None, 't1') n1_gen = td._add_task(n1) n2 = next(n1_gen) assert 't2' == n2.task.name assert 't4' == next(n1_gen).task.name assert 'wait' == next(n1_gen) assert set() == n1.calc_dep assert td.waiting == set() n2.run_status = 'done' n2.task.values = {'calc_dep': ['t2', 't3'], 'task_dep':['t5']} assert n1.calc_dep == set() assert n1.task_dep == [] td._update_waiting(n2) assert n1.calc_dep == set(['t3']) assert n1.task_dep == ['t5']
def testActionModifiesFiledep(self, reporter, RunnerClass, depfile_name): extra_dep = os.path.join(os.path.dirname(__file__), 'sample_md5.txt') def action_add_filedep(task): task.file_dep.add(extra_dep) t1 = Task("t1", [(my_print, ["out a"]), action_add_filedep]) my_runner = RunnerClass(Dependency, depfile_name, reporter) my_runner.run_tasks(TaskDispatcher({'t1': t1}, [], ['t1'])) assert runner.SUCCESS == my_runner.finish() assert ('start', t1) == reporter.log.pop(0), reporter.log assert ('execute', t1) == reporter.log.pop(0) assert ('success', t1) == reporter.log.pop(0) assert t1.file_dep == set([extra_dep])
def test_getargs(self, reporter, RunnerClass, depfile_name): def use_args(arg1): six.print_(arg1) def make_args(): return {'myarg': 1} t1 = Task("t1", [(use_args, )], getargs=dict(arg1=('t2', 'myarg'))) t2 = Task("t2", [(make_args, )]) my_runner = RunnerClass(Dependency, depfile_name, reporter) my_runner.run_tasks( TaskDispatcher({ 't1': t1, 't2': t2 }, [], ['t1', 't2'])) assert runner.SUCCESS == my_runner.finish() assert ('start', t1) == reporter.log.pop(0) assert ('start', t2) == reporter.log.pop(0) assert ('execute', t2) == reporter.log.pop(0) assert ('success', t2) == reporter.log.pop(0) assert ('execute', t1) == reporter.log.pop(0) assert ('success', t1) == reporter.log.pop(0) assert 0 == len(reporter.log)
def test_less_processes(self, reporter, monkeypatch, dep_manager): mock_process = Mock() monkeypatch.setattr(runner.MRunner, 'Child', mock_process) t1 = Task('t1', []) td = TaskDispatcher({'t1': t1}, [], ['t1']) run = runner.MRunner(dep_manager, reporter, num_process=2) run._run_tasks_init(td) result_q = Queue() task_q = Queue() proc_list = run._run_start_processes(task_q, result_q) run.finish() assert 1 == len(proc_list) assert t1.name == task_q.get().name
def test_targets(self, pdep_manager, dependency1): filePath = get_abspath("data/target") ff = open(filePath, "w") ff.write("part1") ff.close() deps = [dependency1] targets = [filePath] t1 = Task("task X", None, deps, targets) pdep_manager.save_success(t1) # up-to-date because target exist assert 'up-to-date' == pdep_manager.get_status(t1, {}).status assert [] == t1.dep_changed
def test_delayed_creation(self): def creator(): yield {'name': 'foo1', 'actions': None, 'file_dep': ['bar']} yield {'name': 'foo2', 'actions': None, 'targets': ['bar']} delayed_loader = DelayedLoader(creator, executed='t2') tasks = [ Task('t0', None, task_dep=['t1']), Task('t1', None, loader=delayed_loader), Task('t2', None) ] control = TaskControl(tasks) control.process(['t0']) disp = control.task_dispatcher() gen = disp.generator nt2 = next(gen) assert nt2.task.name == "t2" # wait for t2 to be executed assert "hold on" == next(gen) assert "hold on" == next(gen) # hold until t2 is done # delayed creation of tasks for t1 does not mess existing info assert disp.nodes['t1'].waiting_me == set([disp.nodes['t0']]) nf2 = gen.send(nt2) assert disp.nodes['t1'].waiting_me == set([disp.nodes['t0']]) assert nf2.task.name == "t1:foo2" nf1 = gen.send(nf2) assert nf1.task.name == "t1:foo1" assert nf1.task.task_dep == ['t1:foo2'] # implicit dep added nt1 = gen.send(nf1) assert nt1.task.name == "t1" nt0 = gen.send(nt1) assert nt0.task.name == "t0" pytest.raises(StopIteration, lambda gen: next(gen), gen)
def test_UptodateNone(self, pdep_manager): filePath = get_abspath("data/dependency1") ff = open(filePath, "w") ff.write("part1") ff.close() t1 = Task("t1", None, file_dep=[filePath], uptodate=[None]) # first time execute assert 'run' == pdep_manager.get_status(t1, {}).status assert [filePath] == t1.dep_changed # second time execute too pdep_manager.save_success(t1) assert 'up-to-date' == pdep_manager.get_status(t1, {}).status
def test_save_file_md5(self, pdep_manager): # create a test dependency file filePath = get_abspath("data/dependency1") ff = open(filePath, "w") ff.write("i am the first dependency ever for doit") ff.close() # save it t1 = Task("taskId_X", None, [filePath]) pdep_manager.save_success(t1) expected = "a1bb792202ce163b4f0d17cb264c04e1" value = pdep_manager._get("taskId_X", filePath) assert os.path.getmtime(filePath) == value[0] # timestamp assert 39 == value[1] # size assert expected == value[2] # MD5
def test_opt_show_all(self, dependency1, depfile_name): output = StringIO() task = Task("tt", ["cat %(dependencies)s"], file_dep=['tests/data/dependency1']) cmd = CmdFactory(Strace, outstream=output) cmd.loader = self.loader_for_task(task) params = DefaultUpdate(dep_file=depfile_name, show_all=True, keep_trace=False, backend='dbm', check_file_uptodate='md5') result = cmd.execute(params, ['tt']) assert 0 == result got = output.getvalue().split("\n") assert "cat" in got[0]
def test_target(self, dependency1, depfile_name): output = StringIO() task = Task("tt", ["touch %(targets)s"], targets=['tests/data/dependency1']) cmd = Strace(outstream=output) cmd._loader.load_tasks = mock.Mock(return_value=([task], {})) params = DefaultUpdate(dep_file=depfile_name, show_all=False, keep_trace=False, backend='dbm') result = cmd.execute(params, ['tt']) assert 0 == result got = output.getvalue().split("\n") tgt_path = os.path.abspath("tests/data/dependency1") assert "W %s" % tgt_path in got[0]
def test_addFailure(self): rep = reporter.ConsoleReporter(StringIO(), {}) try: raise Exception("original 中文 exception message here") except Exception as e: catched = CatchedException("catched exception there", e) rep.add_failure(Task("t_name", None), catched) rep.complete_run() got = rep.outstream.getvalue() # description assert "Exception: original 中文 exception message here" in got, got # traceback assert """raise Exception("original 中文 exception message here")""" in got # catched message assert "catched exception there" in got
def test_targetFolder(self, pdep_manager, dependency1): # folder not there. task is not up-to-date deps = [dependency1] folderPath = get_abspath("data/target-folder") if os.path.exists(folderPath): os.rmdir(folderPath) t1 = Task("task x", None, deps, [folderPath]) pdep_manager.save_success(t1) assert 'run' == pdep_manager.get_status(t1, {}).status assert deps == t1.dep_changed # create folder. task is up-to-date os.mkdir(folderPath) assert 'up-to-date' == pdep_manager.get_status(t1, {}).status assert [] == t1.dep_changed
def test_ignore_stdout(self): output = StringIO() rep = reporter.JsonReporter(output) sys.stdout.write("info that doesnt belong to any task...") sys.stderr.write('something on err') t1 = Task("t1", None) expected = {'t1':'success'} rep.get_status(t1) rep.execute_task(t1) rep.add_success(t1) rep.complete_run() got = json.loads(output.getvalue()) assert expected[got['tasks'][0]['name']] == got['tasks'][0]['result'] assert "info that doesnt belong to any task..." == got['out'] assert "something on err" == got['err']
def test_normal(self): output = io.StringIO() rep = reporter.JsonReporter(output) t1 = Task("t1", None) t2 = Task("t2", None) t3 = Task("t3", None) t4 = Task("t4", None) expected = { 't1': 'fail', 't2': 'up-to-date', 't3': 'success', 't4': 'ignore' } # t1 fail rep.get_status(t1) rep.execute_task(t1) rep.add_failure(t1, CatchedException('t1 failed!')) # t2 skipped rep.get_status(t2) rep.skip_uptodate(t2) # t3 success rep.get_status(t3) rep.execute_task(t3) rep.add_success(t3) # t4 ignore rep.get_status(t4) rep.skip_ignore(t4) rep.teardown_task(t4) rep.complete_run() got = json.loads(output.getvalue()) for task_result in got['tasks']: assert expected[task_result['name']] == task_result['result'], got if task_result['name'] == 't1': assert 't1 failed!' in task_result['error']
def test_dependency_error_after_execution(self, dep_manager): t1 = Task("t1", [(my_print, ["out a"] )], file_dep=["i_dont_exist"], targets=['not_there']) reporter = FakeReporter(with_exceptions=True) my_runner = runner.Runner(dep_manager, reporter) # Missing file_dep is not caught because check is short-circuited by # missing target. my_runner.run_tasks(TaskDispatcher({'t1':t1}, [], ['t1'])) assert runner.ERROR == my_runner.finish() print(reporter.log) assert ('start', t1) == reporter.log.pop(0) assert ('execute', t1) == reporter.log.pop(0) fail_log = reporter.log.pop(0) assert ('fail', t1) == fail_log[:2] assert "Dependent file 'i_dont_exist' does not exist" in str(fail_log[2]) assert not reporter.log
def test_dep(self, dependency1, depfile_name): output = StringIO() task = Task("tt", ["cat %(dependencies)s"], file_dep=['tests/data/dependency1']) cmd = CmdFactory(Strace, outstream=output) cmd.loader.load_tasks = mock.Mock(return_value=([task], {})) params = DefaultUpdate(dep_file=depfile_name, show_all=False, keep_trace=False, backend='dbm', check_file_uptodate='md5') result = cmd.execute(params, ['tt']) assert 0 == result got = output.getvalue().split("\n") dep_path = os.path.abspath("tests/data/dependency1") assert "R %s" % dep_path in got[0]
def test_regex_not_found(self): def creator1(): yield Task('foo1', None, targets=['tgt1']) delayed_loader1 = DelayedLoader(creator1, target_regex='tgt.*') t1 = Task('t1', None, loader=delayed_loader1) tc = TaskControl([t1]) selection = tc._filter_tasks(['tgt666']) assert ['_regex_target_tgt666:t1'] == selection td = TaskDispatcher(tc.tasks, tc.targets, selection) n1 = td._gen_node(None, '_regex_target_tgt666:t1') gen = td._add_task(n1) # target not found after generating all tasks from regex group pytest.raises(InvalidCommand, next, gen)
def test_runtime_error(self): output = StringIO() rep = reporter.JsonReporter(output) t1 = Task("t1", None) msg = "runtime error" assert [] == rep.errors rep.get_status(t1) rep.execute_task(t1) rep.add_success(t1) rep.runtime_error(msg) assert [msg] == rep.errors assert "" in rep.outstream.getvalue() # runtime errors abort execution rep.complete_run() got = json.loads(output.getvalue()) assert msg in got['err']
def test_cleanup_error(self, capsys): output = StringIO() rep = reporter.JsonReporter(output) t1 = Task("t1", None) msg = "cleanup error" exception = CatchedException(msg) assert [] == rep.errors rep.get_status(t1) rep.execute_task(t1) rep.add_success(t1) rep.cleanup_error(exception) assert [msg+'\n'] == rep.errors assert "" in rep.outstream.getvalue() rep.complete_run() got = json.loads(output.getvalue()) assert msg in got['err']
def test_target(self, dependency1, depfile_name): output = StringIO() task = Task("tt", ["touch %(targets)s"], targets=['tests/data/dependency1']) cmd = CmdFactory(Strace, outstream=output) cmd.loader = self.loader_for_task(task) params = DefaultUpdate(dep_file=depfile_name, show_all=False, keep_trace=False, backend='dbm', check_file_uptodate='md5') result = cmd.execute(params, ['tt']) assert 0 == result got = output.getvalue().split("\n") tgt_path = os.path.abspath("tests/data/dependency1") assert "W %s" % tgt_path in got[0]
def test_ignore_python_actions(self, dependency1, depfile_name): output = StringIO() def py_open(): with open(dependency1) as ignore: ignore task = Task("tt", [py_open]) cmd = Strace(outstream=output) cmd._loader.load_tasks = mock.Mock(return_value=([task], {})) params = DefaultUpdate(dep_file=depfile_name, show_all=False, keep_trace=False, backend='dbm') result = cmd.execute(params, ['tt']) assert 0 == result
def tasks_sample(): tasks_sample = [ Task("t1", [""], doc="t1 doc string"), Task("t2", [""], file_dep=['tests/data/dependency1'], doc="t2 doc string"), Task("g1", None, doc="g1 doc string"), Task("g1.a", [""], doc="g1.a doc string", is_subtask=True), Task("g1.b", [""], doc="g1.b doc string", is_subtask=True), Task("t3", [""], doc="t3 doc string")] tasks_sample[2].task_dep = ['g1.a', 'g1.b'] return tasks_sample