def evaluate_file(args, inconsistencies): print("Prediction file: ", args.prediction) print("Gold file: ", args.gold) pred_file = load_predictions(args) gold_file = pyconll.load_from_file(args.gold) if len(pred_file) != len(gold_file): print("Number of sentences does not match!") print("Prediction: {} Gold: {}".format(len(pred_file), len(gold_file))) return upos_evaluator = evaluator.Evaluator(mode="exact") xpos_evaluator = evaluator.Evaluator(mode="exact") feats_evaluator = evaluator.Evaluator(mode="by_feats") ufeats_evaluator = evaluator.Evaluator(mode="exact", only_univ=True) upos_feats_evaluator = evaluator.Evaluator(mode="by_feats") incons_count = 0 token_count = 0 for pred_sent, gold_sent in zip(pred_file, gold_file): if len(pred_sent) != len(gold_sent): print("Number of words in sentence does not match!") print("Prediction: {} Gold: {}".format(len(pred_sent), len(gold_sent))) print("Prediction:", pred_sent._meta) print("Gold:", gold_sent._meta) continue for pred_token, gold_token in zip(pred_sent, gold_sent): if args.upos: upos_evaluator.add_instance({POS: gold_token.upos}, {POS: pred_token.upos}) if args.xpos: xpos_evaluator.add_instance({POS: gold_token.xpos}, {POS: pred_token.xpos}) if args.feats: gold_feats = {x: ",".join(gold_token.feats[x]) for x in gold_token.feats} pred_feats = {x: ",".join(pred_token.feats[x]) for x in pred_token.feats} feats_evaluator.add_instance(gold_feats, pred_feats) ufeats_evaluator.add_instance(gold_feats, pred_feats) if args.upos: if args.incons: token_count += 1 if len(set(pred_feats.keys()) & inconsistencies[pred_token.upos]) > 0: incons_count += 1 gold_feats.update({POS: gold_token.upos}) pred_feats.update({POS: pred_token.upos}) upos_feats_evaluator.add_instance(gold_feats, pred_feats) if upos_evaluator.instance_count > 0: print("UPOS accuracy {:.2f}%".format(100*upos_evaluator.acc())) if xpos_evaluator.instance_count > 0: print("XPOS accuracy {:.2f}%".format(100*xpos_evaluator.acc())) if feats_evaluator.instance_count > 0: print("FEATS micro-F1 {:.2f}%".format(100*feats_evaluator.micro_f1())) if upos_feats_evaluator.instance_count > 0: print("UPOS+FEATS micro-F1 {:.2f}%".format(100*upos_feats_evaluator.micro_f1())) if ufeats_evaluator.instance_count > 0: print("UFEATS accuracy {:.2f}%".format(100*ufeats_evaluator.acc())) if token_count > 0: print("UFEATS inconsistencies {:.2f}%".format(100*incons_count / token_count)) print()
def test_evaluator_external_server(logger): """ Tests evaluator skip_empty flag. """ tester = evaluator.Evaluator( logger, # logger for the session None, # no internal censor "http://facebook.com", # try to talk to facebook actions.utils.RUN_DIRECTORY, # directory to log workers=1, # workers to use runs=1, # only need 1 run test_type="http", # use http test skip_empty=False) # don't skip empty strats population = [ "\/", ] population = [actions.utils.parse(ind, logger) for ind in population] inds = tester.evaluate(population) assert inds[0].fitness == 400 tester = evaluator.Evaluator( logger, # logger for the session None, # no internal censor "http://facebook.com", # try to talk to facebook actions.utils.RUN_DIRECTORY, # directory to log workers=1, # workers to use runs=1, # only need 1 run test_type="http", # use http test skip_empty=True) # don't skip empty strats population = [ "\/", ] population = [actions.utils.parse(ind, logger) for ind in population] inds = tester.evaluate(population) assert inds[0].fitness == -1000 tester = evaluator.Evaluator( logger, # logger for the session None, # no internal censor None, # no external server actions.utils.RUN_DIRECTORY, # directory to log use_external_sites=True, workers=1, # workers to use runs=1, # only need 1 run test_type="http", # use http test skip_empty=False) # don't skip empty strats population = [ "\/", ] population = [actions.utils.parse(ind, logger) for ind in population] inds = tester.evaluate(population) assert inds[0].fitness == 400
def main(): wordbank2 = { 'Det': ['the', 'this', 'that', 'each', 'every', 'another'], 'Ord': ['first', 'second', 'last', 'middle', 'third', 'final', 'penultimate'], 'Adj': ['green', 'blue', 'illiterate', 'cheerful', 'elated', 'ethereal', 'transliterated', 'randy', 'greasy'], 'SingNoun': ['duck', 'ant', 'egg', 'window', 'cereal', 'AI project', 'ulna', 'trenchcoat', 'denture'], 'SingPropNoun': ['Jack', 'Jill', 'Fred', 'George', 'Doris', 'Illuminati', 'Big Brother', 'Arcady Ivanovich'], 'AdvPlace': ['here', 'there', 'everywhere'], 'AdvTimePres': ['today', 'Friday', 'this week', 'sometime'], 'AdvTimePast': ['yesterday', 'last Wednesday', 'last month'] } wordbank3 = { 'Det': ['the', 'this', 'that', 'every', 'another', 'a single'], 'Ord': ['fourth', 'eleventh', 'last', 'middle', 'third', 'final', 'penultimate'], 'Adj': ['red', 'blue', 'winning', 'cheerful', 'elated', 'big', 'little', 'scared', 'rare', 'mucosal', 'phallic'], 'SingNoun': ['spider', 'bucket', 'steak', 'milk', 'computer', 'snowman', 'milksteak', 'funnel', 'babycarrot'], 'SingPropNoun': ['Brendan', 'Ed', 'Jimmy', 'Hannah', 'Ryan', 'Helena', 'Svetlana', 'Pamela', 'Vladimir'], 'AdvDeg': ['very', 'mostly', 'rarely', 'never'] } skeleton_gram_str2 = ''.join(open('species2.pcfg')) skeleton_gram_str3 = ''.join(open('species3.pcfg')) for w in wordbank2.keys(): skeleton_gram_str2 = addterminals(skeleton_gram_str2, w, wordbank2[w]) for w in wordbank3.keys(): skeleton_gram_str3 = addterminals(skeleton_gram_str3, w, wordbank3[w]) user1 = environment.User('/u/_junebug_', None, True, None, True) prod1 = Producer(user1, skeleton_gram_str2, wordbank2) user1.producer = prod1 user2 = environment.User('/u/_gstring_', None, True, None, True) prod2 = Producer(user2, skeleton_gram_str3, wordbank3) user2.producer = prod2 user1.evaluator = evaluator.Evaluator([A(), L(), T(), W(['spider', 'Ryan', 'Doris'], ['little', 'elated']), B(user1)], user1) user2.evaluator = evaluator.Evaluator([A(), L(), T(), W(['Ed', 'steak', 'never'], ['funnel', 'milk', 'green']), B(user2)], user2) # run 5 'iterations' posts = [] for i in range(50): if i % 10 == 0: prod1.parent_grammar.mutate() prod2.parent_grammar.mutate() user1.evaluate_iteration(posts) user2.evaluate_iteration(posts) posts = [] posts.append(prod1.parent_grammar.make_post('1', i//10)) posts.append(prod2.parent_grammar.make_post('2', i//10))
def test_evaluator_external_dns_client(client_worker, logger): """ Tests evaluator server side with external client with --use-external-sites. """ population = [ "\/", ] population = [actions.utils.parse(ind, logger) for ind in population] cmd = [ "--test-type", "dns", "--external-server", "--external-client", client_worker["worker"], "--log-on-fail", # this test should not fail, so log if it does "--log", actions.utils.CONSOLE_LOG_LEVEL, "--no-skip-empty", "--output-directory", actions.utils.RUN_DIRECTORY ] tester = evaluator.Evaluator(cmd, logger) inds = tester.evaluate(population) assert len(inds) == 1 assert str(inds[0]).strip() == "\/"
def test_evaluator_http_client_external_sites(logger): """ Tests http plugin client. """ with tempfile.TemporaryDirectory() as output_dir: cmd = [ "--test-type", "http", "--external-server", "--use-external-sites", "--no-canary", "--log", actions.utils.CONSOLE_LOG_LEVEL, "--no-skip-empty", "--output-directory", output_dir ] tester = evaluator.Evaluator(cmd, logger) population = [ "\/ [UDP:dport:100]-drop-|", # strategy with an unused action tree "\/", "[TCP:flags:PA]-drop-|" # strategy that will break TCP connection ] population = [actions.utils.parse(ind, logger) for ind in population] inds = tester.evaluate(population) assert len(inds) == 3 assert inds[0].fitness == 389 # -10 for unused, -1 for size assert inds[1].fitness == 400 assert inds[2].fitness == -480 for ind in inds: assert os.path.exists( os.path.join(output_dir, "logs", ind.environment_id + ".client.log")) assert os.path.exists( os.path.join(output_dir, "logs", ind.environment_id + ".engine.log")) assert os.path.exists( os.path.join(output_dir, "flags", ind.environment_id + ".fitness"))
def test_evaluator_censor_log_on_debug(logger): """ Tests http plugin client. """ print( "Test testing a failing strategy and a successful strategies, dumping logs on success and failure." ) cmd = [ "--test-type", "http", "--port", "80", "--censor", "censor2", "--log", actions.utils.CONSOLE_LOG_LEVEL, "--no-skip-empty", "--bad-word", "facebook", "--log-on-fail", "--log-on-success", "--output-directory", actions.utils.RUN_DIRECTORY ] try: tester = evaluator.Evaluator(cmd, logger) population = [ "\/", "\/ [TCP:flags:R]-drop-|", ] population = [actions.utils.parse(ind, logger) for ind in population] inds = tester.evaluate(population) assert len(inds) == 2 finally: print("Test shutting down any lingering containers.") common.clean_containers()
def main(start, end, interval, kfold): tpr_lists = [] pth_lists = [] tpr_lists_not_ign = [] pth_lists_not_ign = [] ckpts = range(start, end + 1, interval) best_score = 0 best_epoch = "100" for i in ckpts: e = evaluator.Evaluator('val/%s/%s/bbox/test_%s' % (kfold, i, i), 'val', 'split/val_%s.npy' % (kfold), ckpt=i,pbb_cutoff=0.5) # fp_per_scan, tprs, pths, score = e.froc(ignore=ignore_val) # tpr_lists.append(tprs) # pth_lists.append(pths) _, tprs_not_ign, pths_not_ign, score = e.froc() tpr_lists_not_ign.append(tprs_not_ign) pth_lists_not_ign.append(pths_not_ign) if score > best_score: best_score, best_epoch = score, i print i, 'all:', score print "Best score: ", best_score, " Best epoch: ", best_epoch return best_epoch
def test_list(self): parser = futhon_parser.FuthonParser() env = environment.GlobalEnvironment({}) ev = evaluator.Evaluator() self.assertEqual( "", "" )
def test_evaluator_external_client(client_worker, logger): """ Tests evaluator server side with external client. """ population = [ "\/", ] print(client_worker["worker"]) population = [actions.utils.parse(ind, logger) for ind in population] cmd = [ "--test-type", "http", "--port", "80", "--external-server", "--external-client", client_worker["worker"], "--server", "http://google.com", "--log-on-fail", # this test should not fail, so log when it does "--no-canary", "--log", actions.utils.CONSOLE_LOG_LEVEL, "--no-skip-empty", "--output-directory", actions.utils.RUN_DIRECTORY ] tester = evaluator.Evaluator(cmd, logger) inds = tester.evaluate(population) assert len(inds) == 1 assert str(inds[0]).strip() == "\/" assert inds[0].fitness == 400
def test_get_from_fuzzed_or_real(logger): """ Tests utils.get_from_fuzzed_or_real_packet(environment_id, real_packet_probability): """ # Create an evaluator cmd = [ "--test-type", "echo", "--censor", "censor2", "--log", actions.utils.CONSOLE_LOG_LEVEL, "--no-skip-empty", "--bad-word", "facebook", "--output-directory", actions.utils.RUN_DIRECTORY ] tester = evaluator.Evaluator(cmd, logger) canary = evolve.generate_strategy(logger, 0, 0, 0, 0, None) environment_id = tester.canary_phase(canary) for i in range(0, 100): proto, field, value = actions.utils.get_from_fuzzed_or_real_packet( environment_id, 1) assert proto assert field assert value is not None proto, field, value = actions.utils.get_from_fuzzed_or_real_packet( environment_id, 0) assert proto assert field assert value is not None
def test_evaluator_skip_empty(logger): """ Tests evaluator skip_empty flag. """ population = [ "\/", ] population = [actions.utils.parse(ind, logger) for ind in population] # Create an evaluator tester = evaluator.Evaluator( logger, # logger for the session "censor2", # internal censor None, # no external server actions.utils.RUN_DIRECTORY, # directory to log workers=4, # workers to use runs=1, # only need 1 run test_type="echo", # use echo test skip_empty=True) # skip empty strats inds = tester.evaluate(population) assert len(inds) == 1 assert str(inds[0]).strip() == "\/" assert inds[0].fitness == -1000 tester.skip_empty = False inds = tester.evaluate(population) assert len(inds) == 1 assert str(inds[0]).strip() == "\/" assert inds[0].fitness == -40
def calculate_file(filename): lines = [] with open(filename, 'r') as f: for line in f: lines.append(parser.tokenizer(line)) return evaluator.Evaluator(lines).run()
def test_reset(self): """Test we can reset okay""" evaluator_ = evaluator.Evaluator() evaluator_.set_variable("x", 21.) self.assertAlmostEqual(evaluator_.evaluate("x"), 21.) evaluator_.reset() self.assertRaises(NameError, evaluator_.evaluate, "x")
def __init__(self, configfile=None): ''' Initialization method of Choronzon. Reads the configuration, instantiates objects of the vital classes, builds and analyzes the first generation of chromosomes by reading the initial population provided to the fuzzer. ''' # configuration is a singleton self.configuration = configuration.Configuration(configfile) self.campaign = campaign.Campaign(self.configuration['CampaignName']) seedpath = self.campaign.copy_directory( self.configuration['InitialPopulation'], name='seedfiles') self.tracer = tracer.Tracer() self.strategy = strategy.FuzzingStrategy() self.population = world.Population(self.tracer.cache) self.evaluator = evaluator.Evaluator(self.tracer.cache) try: self.sharedpath = self.campaign.create_shared_directory( self.configuration['ChromosomeShared']) except: self.sharedpath = None # Initialize factory for building chromosome # and the proxy for computing the fitness. chromosomes = chromosome.Factory.build(seedpath) for chromo in chromosomes: self.population.add_chromosome(chromo) self.analyze()
def test_evaluator_client_dns_test(client_worker, protocol, logger): """ Tests DNS evaluation with external client. """ # Setup the population and test type test_type = "dns_tcp" if protocol == "udp": test_type = "dns" population = ["\/"] population = [actions.utils.parse(ind, logger) for ind in population] tester = evaluator.Evaluator( logger, # logger for the session None, # no internal censor None, # no external server actions.utils.RUN_DIRECTORY, # directory to log workers=1, # workers to use runs=1, # only need 1 run for testing external_client=False, # testing an external client test_type=test_type, skip_empty=False) # don't skip empty strats inds = tester.evaluate(population) assert len(inds) == 1 assert str(inds[0]).strip() == "\/" assert inds[0].fitness > 0
def __init__(self): # Init from settings self.max_pop_size = int(settings.Get_Setting("max_pop_size")) self.mutation_rate = float(settings.Get_Setting("mutation_rate")) self.num_cross_points = int( settings.Get_Setting("number_crossover_points")) self.intra_swap_rate = int(settings.Get_Setting("intra_swap_rate")) # Init the rest... self.is_init = false self.average_fitness = 0 self.current_pop = {} self.culled_pop = {} self.eval_cache = {} self.breeders = [] self.offspring = [] self.evaluators = [] # Init the offspring and current population self.Genesis() # Init the evaluators num_procs = int(settings.Get_Setting("number_cpus")) eval_type = "smp" for i in range(0, num_procs): self.evaluators.append(eval.Evaluator(eval_type))
def main(): file_name = __get_file_name(sys.argv) file_ext = __get_file_extension(file_name) if file_ext != ".fyp": eh.print_and_exit("error: invalid file extension: must be .fyp") if not __file_exists(file_name): eh.print_and_exit("error: file does not exist!") _file = open(file_name) _lex = lexer.Lexer(' '.join( [line.rstrip("\n") for line in _file if not __is_comment(line)])) _tokens = _lex.tokenize().tokens is_test = False if len(_tokens) > 1 and _tokens[2] == "test": is_test = True _tokens = _tokens[0:2] + _tokens[3:] _parser = parser_.Parser(_tokens) _eval = evaluator.Evaluator(_parser, is_test) if _eval.tokens[0] != "begin": eh.print_and_exit("error: first function must be 'begin'!") _eval.eval()
def __init__(self, model): self.model = model self.reader = data_input.DataInput() self.split = split.Split(self.reader) self.evaluator = evaluator.Evaluator() self.writer = output.Output() self.model.setup(self.reader)
def test_set_variable(self): """Test we can set evaluator variables""" evaluator_ = evaluator.Evaluator() # check we can assign numbers evaluator_.set_variable("x", 21.) self.assertEqual(evaluator_.variables["x"], 21.) # check we throw an error if value can't be overloaded to float self.assertRaises(ValueError, evaluator_.set_variable, "x", "value")
def test_evaluator_worker_ip_lookup(logger): """ Tests worker IP lookup by specifying a worker name instead of a public IP """ cmd = [ "--test-type", "http", "--public-ip", "example", "--external-client", "example", "--output-directory", actions.utils.RUN_DIRECTORY ] test_evaluator = evaluator.Evaluator(cmd, logger) assert test_evaluator.public_ip == "0.0.0.0" cmd = [ "--test-type", "http", "--output-directory", actions.utils.RUN_DIRECTORY ] test_evaluator = evaluator.Evaluator(cmd, logger) assert not test_evaluator.get_ip()
def __init__(self, **kargs): super().__init__(**kargs) self.data_queue = [] self.model = None print("Load Evaluator") import evaluator print("Instantiate Evaluator") self.model = evaluator.Evaluator() print("Evaluator Loaded")
def __init__(self, network, training_data, validation_data): self.network = network self.aux = None self.training_data = training_data self.validation_data = validation_data self.evaluator = eva.Evaluator(self.training_data, self.validation_data) self.mu = 1 super(Mac, self).__init__()
def test_evaluator_dns_client_external_server(logger, extra_args): """ Tests http plugin client. """ with tempfile.TemporaryDirectory() as output_dir: cmd = [ "--test-type", "dns", "--external-server", "--log", actions.utils.CONSOLE_LOG_LEVEL, "--no-skip-empty", "--output-directory", output_dir ] cmd += extra_args tester = evaluator.Evaluator(cmd, logger) if "--use-tcp" not in cmd: population = [ "\/ [UDP:dport:100]-drop-|", # strategy with an unused action tree "\/", "[UDP:dport:53]-sleep{1}-|", "[UDP:dport:53]-drop-|", # strategy that will break query "[UDP:dport:53]-tamper{DNS:qd:compress}-|" ] else: population = [ "\/ [UDP:dport:100]-drop-|", # strategy with an unused action tree "\/", "[TCP:flags:PA]-sleep{1}-|", "[TCP:flags:PA]-drop-|", # strategy that will break query # "[TCP:flags:PA]-tamper{DNS:qd:compress}-|" # Not implemented due to TCP protocol limitations ] population = [actions.utils.parse(ind, logger) for ind in population] inds = tester.evaluate(population) # Special case for UDP assert len(inds) == 5 if "--use-tcp" not in cmd else 4 assert inds[0].fitness == 389 # -10 for unused, -1 for size assert inds[1].fitness == 400 assert inds[2].fitness == 399 # -1 for size assert inds[3].fitness == -400 if "--use-tcp" not in cmd: assert inds[4].fitness > 0 for ind in inds: assert os.path.exists( os.path.join(output_dir, "logs", ind.environment_id + ".client.log")) assert os.path.exists( os.path.join(output_dir, "logs", ind.environment_id + ".engine.log")) if ind.fitness > 0: assert os.path.exists( os.path.join(output_dir, "flags", ind.environment_id + ".dnsresult")) assert os.path.exists( os.path.join(output_dir, "flags", ind.environment_id + ".fitness"))
def test_evaluator_external_client_server_side(client_worker, logger, args): """ Tests evaluator server side with external client. """ if "http" in args or "--use-tcp" in args: population = [ "\/ [UDP:dport:100]-drop-|", # strategy with an unused action tree "\/", "[TCP:flags:SA]-drop-|" # strategy that will break TCP connection ] else: population = [ "\/ [UDP:dport:100]-drop-|", # strategy with an unused action tree "\/", "\/ [UDP:dport:53]-drop-|" # strategy that will break query ] population = [actions.utils.parse(ind, logger) for ind in population] cmd = [ "--test-type", "http", "--external-client", client_worker["worker"], "--server-side", "--public-ip", get_ip(), "--timeout", "15", "--no-canary", "--log", actions.utils.CONSOLE_LOG_LEVEL, "--no-skip-empty", "--output-directory", actions.utils.RUN_DIRECTORY ] cmd += args tester = evaluator.Evaluator(cmd, logger) inds = tester.evaluate(population) assert len(inds) == 3 assert inds[0].fitness == 389 assert inds[1].fitness == 400 assert inds[2].fitness < 0 # Request a server side without specifying the public ip - should raise an exception cmd = [ "--test-type", "http", "--port", "80", "--external-client", client_worker["worker"], "--server-side", "--no-canary", "--log", actions.utils.CONSOLE_LOG_LEVEL, "--no-skip-empty", "--output-directory", actions.utils.RUN_DIRECTORY ] with pytest.raises(AssertionError): tester = evaluator.Evaluator(cmd, logger)
def test_evaluator_init_nat(logger): """ Sets up evaluator with NAT """ cmd = [ "--test-type", "http", "--sender-ip", "1.1.1.1", "--forward-ip", "2.2.2.2", "--routing-ip", "3.3.3.3", "--output-directory", actions.utils.RUN_DIRECTORY ] test_evaluator = evaluator.Evaluator(cmd, logger) assert not test_evaluator.forwarder, "Evaluator set up a forwarder without --act-as-middlebox" cmd += ["--act-as-middlebox"] test_evaluator = evaluator.Evaluator(cmd, logger) assert test_evaluator.forwarder assert test_evaluator.forwarder["sender_ip"] == "1.1.1.1" assert test_evaluator.forwarder["forward_ip"] == "2.2.2.2" assert test_evaluator.forwarder["routing_ip"] == "3.3.3.3"
def run_test(logger, solution, censor, test_type, log_on_success=False, log_on_fail=False): """ Tests a given solution against a given censor under a given test type using the given log level. """ # Test if docker is running try: subprocess.check_output(['docker', 'ps']) except subprocess.CalledProcessError: pytest.fail("Docker is not running") try: # Parse the string representation of the solution strat = actions.utils.parse(solution, logger) logger.info("Parsed strategy %s" % (str(strat))) # Confirm the parsing was correct assert str(strat).strip() == solution, "Failed to correctly parse given strategy" logger.info("Testing %s" % censor) # Setup the external server to test with, if an http test is done if test_type == "echo": external_server = None elif test_type == "http": external_server = "facebook.com" # Create an evaluator cmd = [ "--test-type", "echo", "--censor", censor, "--log", actions.utils.CONSOLE_LOG_LEVEL, "--no-skip-empty", "--bad-word", "facebook", "--output-directory", actions.utils.RUN_DIRECTORY ] if log_on_success: cmd += ["--log-on-success"] if log_on_fail: cmd += ["--log-on-fail"] tester = evaluator.Evaluator(cmd, logger) # Use the fitness function to evaluate the strategy population = evolve.fitness_function(logger, [strat], tester) # Check that we got back the same number of individuals we gave assert len(population) == 1, "Population size changed" # Shutdown the evaluator tester.shutdown() # Retrieve the fitness from the individual return population[0].fitness finally: clean_containers()
def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) evaluator_pb2_grpc.add_EvaluatorServicer_to_server(evaluator.Evaluator(), server) server.add_insecure_port('[::]:50051') server.start() try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: server.stop(0)
def evaluate(self): """Sets up and shows the evaluation of schedule""" evalworker = evaluator.Evaluator(self.mainLayout.tab.currentWidget()) empty = evalworker.countEmptyPercentforColumns() constr = [] for i in range(len(empty)): constr.append(10 - int(empty[i] / 10)) evald = EvaluationDialog(self.mainLayout.tab.currentWidget().getWeeknum(), constr) evald.exec_()
def __init__(self): if Computor.instance is None: Computor.instance = self self.__parser = Parser() self.__simplifier = Simplifier() self.__evaluator = evaluator.Evaluator() self.__solver = Solver() self.__vars = {} self.__funcs = {} self.__local_vars = [] # list of tuple (name, value), to be used as stack else: raise ComputorException('Computor.instance already instantiated')
def test_evaluate(self): """Test we can evaluate expressions okay""" evaluator_ = evaluator.Evaluator() evaluator_.set_variable("x", 21.) value = evaluator_.evaluate("cos(pi*x)*m") self.assertAlmostEqual(value, -1.*xboa.Common.units["m"]) value = evaluator_.evaluate("sin(pi*x)") self.assertAlmostEqual(value, 0.) self.assertRaises(NameError, evaluator_.evaluate, "sin(pi*y)") value = evaluator_.evaluate("sin(pi*x)") self.assertAlmostEqual(value, 0.)