def test_parse_first_node(): test_stuff = parse_first_node.__doc__.split("```") tinst = eval(utils.dedent(test_stuff[1])) tstr = reflow(remove_comments(utils.dedent(test_stuff[2]))) uinst, leftovers = parse_first_node(tstr) assert isinstance(uinst, StoryNode), ( "Failed to parse a node from:\n```\n{}\n``".format(tstr) ) assert leftovers.strip() == "", ( "Parsed node had leftovers:\n```\n{}\n```".format(leftovers) ) rstr = render_node(tinst) urstr = render_node(uinst) ruinst, leftovers = parse_first_node(rstr) assert isinstance(ruinst, StoryNode), ( "Failed to re-parse a node from:\n```\n{}\n``".format(rstr) ) assert leftovers.strip() == "", ( "Re-parsed node had leftovers:\n```\n{}\n```".format(leftovers) ) assert tinst == uinst, ( ( "Parsed Story doesn't match eval'd version:\n```\n{}\n```\n{}\n```" "\nDifferences:\n {}" ).format(str(tinst), str(uinst), "\n ".join(diff(tinst, uinst))) ) assert rstr == urstr, ( ( "Rendered Story doesn't match re-rendered version:\n```\n{}\n```\n{}\n```" "\nDifferences:\n {}" ).format( str(rstr), str(urstr), "\n ".join(diff(rstr, urstr)) ) ) assert tinst == ruinst, ( ( "Re-parsed Story doesn't match original:\n```\n{}\n```\n{}\n```" "\nDifferences:\n {}" ).format( str(tinst), str(ruinst), "\n ".join(diff(tinst, ruinst)) ) ) return True
def _diff_(self, other): """ Reports differences (see diffable.py). """ result = [] result.extend( ["goals: {}".format(d) for d in diff(self.goals, other.goals)]) result.extend([ "priorities: {}".format(d) for d in diff(self.priorities, other.priorities) ]) return result
def test_packable(): test_stuff = cls._pack_.__doc__.split("```") tinst = eval(utils.dedent(test_stuff[1])) tobj = eval(utils.dedent(test_stuff[2])) uinst = unpack(tobj, cls) pobj = pack(tinst) urec = unpack(pobj, cls) prec = pack(uinst) assert tinst == uinst, ( ( "Unpacked object doesn't match eval'd version:\n```\n{}\n```\n{}\n```" "\nDifferences:\n {}" ).format(str(tinst), str(uinst), "\n ".join(diff(tinst, uinst))) ) assert pobj == tobj, ( ( "Packed object doesn't match given:\n```\n{}\n```\n{}\n```" "\nDifferences:\n {}" ).format( str(tobj), str(pobj), "\n ".join(diff(tobj, pobj)) ) ) assert tinst == urec, ( ( "Pack/unpacked object doesn't match:\n```\n{}\n```\n{}\n```" "\nDifferences:\n {}" ).format( str(tinst), str(urec), "\n ".join(diff(tinst, urec)) ) ) assert tobj == prec, ( ( "Unpack/packed object doesn't match:\n```\n{}\n```\n{}\n```" "\nDifferences:\n {}" ).format( str(tobj), str(prec), "\n ".join(diff(tobj, prec)) ) ) return True
def _diff_(self, other): """ Reports differences (see diffable.py). """ result = [] if self.name != other.name: result.append("names: '{}' != '{}'".format(self.name, other.name)) result.extend([ "decision_methods: {}".format(d) for d in diff(self.decision_method, other.decision_method) ]) result.extend( ["modes: {}".format(d) for d in diff(self.modes, other.modes)]) result.extend([ "priority_methods: {}".format(d) for d in diff(self.priority_method, other.priority_method) ]) result.extend([ "mode_rankings: {}".format(d) for d in diff(self.mode_ranking, other.mode_ranking) ]) result.extend([ "mode_adjustments: {}".format(d) for d in diff(self.mode_adjustments, other.mode_adjustments) ]) result.extend([ "goal_adjustments: {}".format(d) for d in diff(self.goal_adjustments, other.goal_adjustments) ]) result.extend([ "goal_overrides: {}".format(d) for d in diff(self.goal_overrides, other.goal_overrides) ]) return result
def test_parse_story(): test_stuff = parse_story.__doc__.split("```") tinst = eval(utils.dedent(test_stuff[1])) tstr = utils.dedent(test_stuff[2]) uinst = parse_story(tstr) rstr = render_story(tinst) urstr = render_story(uinst) ruinst = parse_story(rstr) assert tinst == uinst, ( ( "Parsed Story doesn't match eval'd version:\n```\n{}\n```\n{}\n```" "\nDifferences:\n {}" ).format(str(tinst), str(uinst), "\n ".join(diff(tinst, uinst))) ) assert rstr == urstr, ( ( "Rendered Story doesn't match re-rendered version:\n```\n{}\n```\n{}\n```" "\nDifferences:\n {}" ).format( str(rstr), str(urstr), "\n ".join(diff(rstr, urstr)) ) ) assert tinst == ruinst, ( ( "Re-parsed Story doesn't match original:\n```\n{}\n```\n{}\n```" "\nDifferences:\n {}" ).format( str(tinst), str(ruinst), "\n ".join(diff(tinst, ruinst)) ) ) return True
def _diff_(self, other): """ Reports differences (see diffable.py). """ differences = [] if self.name != other.name: differences.append("names: '{}' != '{}'".format( self.name, other.name)) differences.extend("options: {}".format(d) for d in diff(self.options, other.options)) return differences
def _diff_(self, other): if self.title != other.title: return ["titles ('{}' =/= '{}')".format(self.title, other.title)] if self.author != other.author: return [ "authors ('{}' =/= '{}')".format(self.author, other.author) ] if self.nodes != other.nodes: return [ "nodes: {}".format(d) for d in diff(self.nodes, other.nodes) ] return []
def _diff_(self, other): results = [] if self.name != other.name: results.append("names ('{}' =/= '{}')".format( self.name, other.name)) if self.content != other.content: results.append("content ('{}' =/= '{}')".format( self.content, other.content)) if self.successors != other.successors: results.extend([ "successors: {}".format(d) for d in diff(self.successors, other.successors) ]) return results
def _diff_(self, other): """ Reports differences (see diffable.py). """ return [ "choices: {}".format(d) for d in diff(self.choice, other.choice) ] + [ "options: {}".format(d) for d in diff(self.option, other.option) ] + [ "outcomes: {}".format(d) for d in diff(self.outcomes, other.outcomes) ] + [ "prospectives: {}".format(d) for d in diff( self.prospective_impressions, other.prospective_impressions ) ] + [ "factored decision models: {}".format(d) for d in diff( self.factored_decision_models, other.factored_decision_models ) ] + [ "goal relevance: {}".format(d) for d in diff(self.goal_relevance, other.goal_relevance) ] + [ "retrospectives: {}".format(d) for d in diff( self.retrospective_impressions, other.retrospective_impressions ) ] + [ "simplified retrospectives: {}".format(d) for d in diff( self.simplified_retrospectives, other.simplified_retrospectives ) ]
def fmt_chronicle(chronicle): """ Formats a debug chronicle as pure text. """ if not chronicle: return "<no results>" result = "Node: {}\nText: '''\n{}\n'''\nState:\n{}\n".format( *chronicle[0] ) last_state = chronicle[0][2] for entry in chronicle[1:]: result += "Node: {}\nText: '''\n{}\n'''\nState changes:\n {}\n".format( entry[0], entry[1], '\n '.join(diff(last_state, entry[2])) ) return result
def _diff_(self, other): """ Reports differences (see diffable.py). """ differences = [] if self.name != other.name: differences.append("names: '{}' != '{}'".format( self.name, other.name)) if self.salience != other.salience: differences.append("salience: {} != {}".format( self.salience, other.salience)) if self.apparent_likelihood != other.apparent_likelihood: differences.append("apparent_likelihood: {} != {}".format( self.apparent_likelihood, other.apparent_likelihood)) if self.actual_likelihood != other.actual_likelihood: differences.append("actual_likelihood: {} != {}".format( self.actual_likelihood, other.actual_likelihood)) differences.extend([ "goal_effects: {}".format(d) for d in diff(self.goal_effects, other.goal_effects) ]) return differences
def test_bot_basics(): """ Tests the most basic bot functionality. """ queue = [ fake_api.FakeTweet( # will be ID 1 "tester", None, "@{} tell help #ignored".format(config.MY_HANDLE) ), fake_api.FakeTweet( # will be ID 2 "tester", 4, "version" ), fake_api.FakeTweet( # will be ID 3 "tester2", None, "tell \"Help\" by Peter Mawhorter" ) ] output = io.StringIO() expect_printed = """\ Initiating streaming connection... From: tester Content: @gathering_round tell help #ignored Handling non-reply as a general command. From: tester Content: version In reply to: 4 Handling as reply to node 'help' in "Help" by Peter Mawhorter. From: tester2 Content: tell "Help" by Peter Mawhorter Handling non-reply as a general command. """ expect_posted = """\ Id: 4 Replying to: 1 -------------------------------------------------------------------------------- @tester Firelight is an interactive story engine. Options appear in brackets. Help topics: [version] [links] 🐵🦊🐴🐃 ================================================================================ Id: 5 Replying to: 2 -------------------------------------------------------------------------------- @tester This is Firelight version 0.1. [back] 🐒🦊🐴🐃 ================================================================================ Id: 6 Replying to: 3 -------------------------------------------------------------------------------- @tester2 Firelight is an interactive story engine. Options appear in brackets. Help topics: [version] [links] 🦍🦊🐴🐃 ================================================================================ """ # Clean out the test database: rdb.reset_db("test/test.db") # create a fake API object fcore = fake_api.FakeTwitterAPI(queue, output, "test/test.db") # load test stories load_stories.load_stories_from_directory( fcore, "test/stories" ) load_stories.load_stories_from_directory( fcore, "test/modules", as_modules=True ) # run the bot through one processing loop, capturing stdout old_stdout = sys.stdout capture = io.StringIO() sys.stdout = capture bot.run_bot(fcore, loop=False) sys.stdout = old_stdout posted = output.getvalue() printed = capture.getvalue() assert printed == expect_printed, ( ( "Bot printed output differs from expected output:\n```\n{}\n```\n{}\n```" "\nDifferences:\n {}" ).format( printed, expect_printed, "\n ".join(diff(printed, expect_printed)) ) ) assert posted == expect_posted, ( ( "Bot posted output differs from expected output:\n```\n{}\n```\n{}\n```" "\nDifferences:\n {}" ).format( posted, expect_posted, "\n ".join(diff(posted, expect_posted)) ) ) return True
def the_test(): nonlocal fcn test_stuff = fcn.__doc__.split("```") native_module = sys.modules[fcn.__module__] native_context = native_module.__dict__ test_eval_true = [] test_cmp_equal = [] test_raises = [] for raw in test_stuff: test = raw.strip() if not test or test[0] not in ">?=x!": continue ttype = test[0] test = utils.dedent(test[1:]).strip() if ttype == '>': test_eval_true.append(test) elif ttype == '?': test_cmp_equal.append((test, [])) elif ttype == '=': if test_cmp_equal: test_cmp_equal[-1][1].append(test) else: test_cmp_equal.append((test, [])) elif ttype == 'x': test_raises.append((test, [])) elif ttype == '!': if test_raises: test_raises[-1][1].append(test) else: test_raises.append((test, [])) for test in test_eval_true: assert eval(test, native_context), "Eval test failed:\n" + test for test, against in test_cmp_equal: base = eval(test, native_context) for ag in against: agval = eval(ag, native_context) assert base == agval, ( ( "Test items not equal:\n```\n{}\n```\n{}\n```" "\nDifferences:\n {}" ).format( base, agval, "\n ".join(diff(base, agval)) ) ) for test, accept in test_raises: accept = tuple(eval(a, native_context) for a in accept) try: eval(test, native_context) assert False, "Test failed to raise an error. Expected:\n {}".format( utils.or_strlist(a.__name__ for a in alternatives) ) except accept: pass except Exception as e: assert False, "Test raised unexpected {} error.".format( e.__class__.__name__ ) return True