def test_process_predictions(self): @smokesignal.on('prediction_finished') def a(scoresheet, dataset, predictor): assert scoresheet.startswith('scoresheet') assert predictor.startswith('pred') assert_equal(dataset, 'testing') a.called = True @smokesignal.on('dataset_finished') def b(dataset): assert_equal(dataset, 'testing') b.called = True @smokesignal.on('run_finished') def c(): c.called = True a.called = b.called = c.called = False lp = linkpred.LinkPred(self.config_file()) lp.predictions = [('pred1', 'scoresheet1'), ('pred2', 'scoresheet2')] lp.process_predictions() assert a.called assert b.called assert c.called smokesignal.clear_all()
def __init__(self, config: dict, dotbot: dict) -> None: """ Initialize the chatbot engine. :param config: Configuration values for the instance. """ self.config = config self.dotbot = dotbot self.dotdb = None self.logger = None self.cache = None self.extensions = [] self.pipeline = [] self.is_fallback = False self.user_id = '' self.bot_id = '' self.org_id = '' self.logger_level = '' self.request = {} self.executed_functions = [] self.bot = None self.functions_map = {} # Registered template functions self.bbot = BBotFunctionsProxy(self) smokesignal.clear_all() # this shouldnt be needed. @TODO build the whole obj hierarchy, store it in a static var and decouple request session data
def test_EvaluatingListener(): @smokesignal.on("evaluation_finished") def t(evaluation, dataset, predictor): assert dataset == "dataset" assert isinstance(evaluation, EvaluationSheet) assert_array_equal(evaluation.tp, [1, 1, 2, 2]) assert_array_equal(evaluation.fp, [0, 1, 1, 2]) assert_array_equal(evaluation.fn, [1, 1, 0, 0]) assert_array_equal(evaluation.tn, [2, 1, 1, 0]) assert predictor == "predictor" t.called = True t.called = False relevant = {1, 2} universe = {1, 2, 3, 4} scoresheet = BaseScoresheet({1: 10, 3: 5, 2: 2, 4: 1}) EvaluatingListener(relevant=relevant, universe=universe) smokesignal.emit( "prediction_finished", scoresheet=scoresheet, dataset="dataset", predictor="predictor", ) assert t.called smokesignal.clear_all()
def test_process_predictions(self): @smokesignal.on("prediction_finished") def a(scoresheet, dataset, predictor): assert scoresheet.startswith("scoresheet") assert predictor.startswith("pred") assert dataset == "testing" a.called = True @smokesignal.on("dataset_finished") def b(dataset): assert dataset == "testing" b.called = True @smokesignal.on("run_finished") def c(): c.called = True a.called = b.called = c.called = False lp = linkpred.LinkPred(self.config_file()) lp.predictions = [("pred1", "scoresheet1"), ("pred2", "scoresheet2")] lp.process_predictions() assert a.called assert b.called assert c.called smokesignal.clear_all()
def test_clear_all(self): smokesignal.on(('foo', 'bar'), self.callback) assert len(smokesignal._receivers['foo']) == 1 assert len(smokesignal._receivers['bar']) == 1 smokesignal.clear_all() assert len(smokesignal._receivers['foo']) == 0 assert len(smokesignal._receivers['bar']) == 0
def test_CacheEvaluationListener(): l = CacheEvaluationListener() scores = BaseScoresheet({1: 10, 2: 5}) ev = EvaluationSheet(scores, {1}) smokesignal.emit('evaluation_finished', ev, 'd', 'p') ev2 = EvaluationSheet.from_file(l.fname) assert_array_equal(ev.data, ev2.data) smokesignal.clear_all() os.unlink(l.fname)
def test_CachePredictionListener(): l = CachePredictionListener() scoresheet = BaseScoresheet({1: 10, 2: 5, 3: 2, 4: 1}) smokesignal.emit("prediction_finished", scoresheet, "d", "p") with open(l.fname) as fh: # Line endings may be different across platforms assert fh.read().replace("\r\n", "\n") == "1\t10\n2\t5\n3\t2\n4\t1\n" smokesignal.clear_all() os.unlink(l.fname)
def test_CacheEvaluationListener(): l = CacheEvaluationListener() scores = BaseScoresheet({1: 10, 2: 5}) ev = EvaluationSheet(scores, {1}) smokesignal.emit("evaluation_finished", ev, "d", "p") ev2 = EvaluationSheet.from_file(l.fname) assert_array_equal(ev.data, ev2.data) smokesignal.clear_all() os.unlink(l.fname)
def test_CachePredictionListener(): l = CachePredictionListener() scoresheet = BaseScoresheet({1: 10, 2: 5, 3: 2, 4: 1}) smokesignal.emit('prediction_finished', scoresheet, 'd', 'p') with open(l.fname) as fh: # Line endings may be different across platforms assert_equal(fh.read().replace("\r\n", "\n"), "1\t10\n2\t5\n3\t2\n4\t1\n") smokesignal.clear_all() os.unlink(l.fname)
def test_clear_all(self): smokesignal.on(('foo', 'bar'), self.fn) assert smokesignal.receivers == { 'foo': set([self.fn]), 'bar': set([self.fn]), } smokesignal.clear_all() assert smokesignal.receivers == { 'foo': set(), 'bar': set(), }
def test_setup_output(self): # Make sure this also works is $DISPLAY is not set. # Should probably mock this out... for name, klass in (('recall-precision', RecallPrecisionPlotter), ('f-score', FScorePlotter), # Should be able to handle uppercase ('ROC', ROCPlotter), ('fmax', FMaxListener), ('cache-evaluations', CacheEvaluationListener)): config = self.config_file(training=True, test=True, output=[name]) lp = linkpred.LinkPred(config) lp.setup_output() assert_is_instance(lp.listeners[0], klass) smokesignal.clear_all() # Has an evaluator been set up? assert_equal(len(lp.evaluator.params['relevant']), 1) assert_equal(lp.evaluator.params['universe'], 2) assert_is_instance(lp.evaluator.params['universe'], int)
def test_setup_output(self): # Make sure this also works is $DISPLAY is not set. # Should probably mock this out... for name, klass in ( ('recall-precision', RecallPrecisionPlotter), ('f-score', FScorePlotter), # Should be able to handle uppercase ('ROC', ROCPlotter), ('fmax', FMaxListener), ('cache-evaluations', CacheEvaluationListener)): config = self.config_file(training=True, test=True, output=[name]) lp = linkpred.LinkPred(config) lp.setup_output() assert_is_instance(lp.listeners[0], klass) smokesignal.clear_all() # Has an evaluator been set up? assert_equal(len(lp.evaluator.params['relevant']), 1) assert_equal(lp.evaluator.params['universe'], 2) assert_is_instance(lp.evaluator.params['universe'], int)
def test_setup_output(self): # Make sure this also works is $DISPLAY is not set. # Should probably mock this out... for name, klass in ( ("recall-precision", RecallPrecisionPlotter), ("f-score", FScorePlotter), # Should be able to handle uppercase ("ROC", ROCPlotter), ("fmax", FMaxListener), ("cache-evaluations", CacheEvaluationListener), ): config = self.config_file(training=True, test=True, output=[name]) lp = linkpred.LinkPred(config) lp.setup_output() assert isinstance(lp.listeners[0], klass) smokesignal.clear_all() # Has an evaluator been set up? assert len(lp.evaluator.params["relevant"]) == 1 assert lp.evaluator.params["universe"] == 2 assert isinstance(lp.evaluator.params["universe"], int)
def test_EvaluatingListener(): @smokesignal.on('evaluation_finished') def t(evaluation, dataset, predictor): assert_equal(dataset, 'dataset') assert_is_instance(evaluation, EvaluationSheet) assert_array_equal(evaluation.tp, [1, 1, 2, 2]) assert_array_equal(evaluation.fp, [0, 1, 1, 2]) assert_array_equal(evaluation.fn, [1, 1, 0, 0]) assert_array_equal(evaluation.tn, [2, 1, 1, 0]) assert_equal(predictor, 'predictor') t.called = True t.called = False relevant = {1, 2} universe = {1, 2, 3, 4} scoresheet = BaseScoresheet({1: 10, 3: 5, 2: 2, 4: 1}) EvaluatingListener(relevant=relevant, universe=universe) smokesignal.emit('prediction_finished', scoresheet=scoresheet, dataset='dataset', predictor='predictor') assert t.called smokesignal.clear_all()
def teardown(self): smokesignal.clear_all()
def onStop(self): self.save() output.out.usage('%s (%s) going down' % (self.info.pdid, self.meta.type)) smokesignal.clear_all() output.out.endLogging()
def onStop(self): self.save() output.out.usage('%s going down' % (self.info.pdid)) smokesignal.clear_all() output.out.endLogging()
def on_game_over(self): self._game_over = True smokesignal.clear_all()
def tearDown(self): smokesignal.clear_all()