Exemplo n.º 1
0
def simulatedAnnealing(initState, pawnAmountBW, temperature, decreaseRate,
                       iteration):
    current = initState
    evalCurrent = evaluate(current, pawnAmountBW)
    i = 0
    isLocalMinim = False
    while (evalCurrent != 0 and i < iteration and not isLocalMinim):
        isLocalMinim = True
        AllNeighbour = listAllNeighbour(current)
        for neighbour in AllNeighbour:
            if (evalCurrent > evaluate(neighbour, pawnAmountBW)):
                current = neighbour
                evalCurrent = evaluate(current, pawnAmountBW)
                isLocalMinim = False
            elif (temperature != 0):
                probability = int(
                    exp(
                        evaluate(neighbour, pawnAmountBW) -
                        evalCurrent / temperature))
                if (decision(probability)):
                    current = neighbour
                    evalCurrent = evaluate(current, pawnAmountBW)
                    isLocalMinim = False
        i += 1
        temperature *= decreaseRate / 100
    return current
Exemplo n.º 2
0
def test_evaluate_too_many_arguments():
    expr = parse(tokenize("(/ 6 7 8)"))
    with raises(TooManyArguments) as excinfo:
        evaluate(expr)

    want = "Too many arguments for operator: '/'."
    assert want == str(excinfo.value)
Exemplo n.º 3
0
def evaluate(num_elves, input, solution):
    output = 'subm.csv'

    toy_file = os.path.join(os.getcwd(), input)
    soln_file = os.path.join(os.getcwd(), output)

    toys = get_toys(toy_file)

    result = solution.solve(toys)

    with open(soln_file, 'wb') as w:
        wcsv = csv.writer(w)
        wcsv.writerow(['ToyId', 'ElfId', 'StartTime', 'Duration'])
        for line in result:
            toy_id, elf_id, start_seconds, work_duration, rating = line
            ref_time = datetime.datetime(2014, 1, 1, 0, 0)
            #TODO pasar a hours.py:
            tt = ref_time + datetime.timedelta(seconds=60 * start_seconds)
            time_string = " ".join([
                str(tt.year),
                str(tt.month),
                str(tt.day),
                str(tt.hour),
                str(tt.minute)
            ])
            wcsv.writerow([toy_id, elf_id, time_string, work_duration, rating])

    evaluator.evaluate(num_elves, input, output)
Exemplo n.º 4
0
    def _call_and_eval_assembler(**kwargs):

        params = util.kwargs_to_params_dic(kwargs, param2str)
        if params is not None:
            lam = params.get('-lam', 0.5)
            params.pop('-lam', None)
        else:
            lam = 0.5

        assembler.run_assembler(assembler_name, paths['read_alignment'],
                                paths['assembly_gtf'], params)

        evaluator.evaluate(paths['ref_gtf'], paths['assembly_gtf'],
                           paths['eval_res_prefix'])

        metric_stat = evaluator.extract_stat(paths['eval_res_prefix'])

        #TODO(shunfu): find a good metric for BO
        if metric_stat == {}:
            metric = 0
        else:
            metric = evaluator.calc_metric(metric_stat, metric_type, lam)

        # res logs
        util.logging('assembler:' + assembler_name, paths['log'])
        util.logging(
            'params: default' if params is None else 'params:\n' + str(params),
            paths['log'])
        util.logging('metric stat is: %s' % str(metric_stat), paths['log'])
        util.logging(
            'metric (%s, lam=%.2f) is %f\n' % (metric_type, lam, metric),
            paths['log'])

        return metric
Exemplo n.º 5
0
def test_evaluate_missing_arguments():
    expr = parse(tokenize("(* 5)"))
    with raises(MissingArgument) as excinfo:
        evaluate(expr)

    want = "Not enough arguments for operator: '*'."
    assert want == str(excinfo.value)
Exemplo n.º 6
0
def main(args):

    # Load data and ground-truth
    train_data, train_labels, test_data, test_labels = database.load(
        standardized=True,
        printSize=True,
        train_data_path=args.train_data,
        train_labels_path=args.train_labels,
        test_data_path=args.test_data,
        test_labels_path=args.test_labels,
    )
    train_labels = train_labels.ravel()

    # Training
    model = algorithm.train(train_data, train_labels, args)

    # Logging scores on train and test sets
    logging.info("---Training set accuracy: %f" %
                 model.score(train_data, train_labels))
    logging.info("---Testing  set accuracy: %f" %
                 model.score(test_data, test_labels))

    predictions = algorithm.predict(test_data, model)

    # Evaluate the predictions
    evaluator.evaluate(predictions, test_data, test_labels, args.output_folder,
                       args.model, model)
Exemplo n.º 7
0
 def test_eval(self):
     testcase = '''
     (define zero (lambda (f) (lambda (x) x)))
     (define one (lambda (f) (lambda (x) (f x))))
     (define plus (lambda (m n) (lambda (f) (lambda (x) ((n f) ((m f) x))))))
     (define mult (lambda (m n) (lambda (f) (lambda (x) ((n (m f)) x)))))
     (define xp (lambda (m n) (lambda (f) (lambda (x) (((n m) f) x)))))
     (define pr (lambda (x) (begin (display x) x)))
     (define prn (lambda (n) (begin ((n pr) ".") (newline))))
     (define two (plus one one))
     (define three (plus two one))
     (define six (mult two three))
     (define sixty-four (xp two six))
     (prn sixty-four)
     '''
     stdout = resoup.globals.stdout
     sio = StringIO.StringIO()
     resoup.globals.stdout = sio
     env = Environment(builtin_env)
     for line in testcase.split('\n'):
         if line.strip() == '':
             continue
         evaluate(Parser().parse(Lexer().lex(line)), env)
     output = sio.getvalue()
     resoup.globals.stdout = stdout
     self.assertEquals('.' * 64, output.strip())
Exemplo n.º 8
0
 def testFunction(self):
     
     tree = [["function", "test", ["return", 2]], ["execute", "true"]]
     self.assertEqual(evaluator.evaluate(tree), 2)
     
     tree = [["function", "foo", [["var", "temp", 3], ["return", ['+', "temp", 2]]]], ["execute", "foo"]]
     self.assertEqual(evaluator.evaluate(tree), 5)
Exemplo n.º 9
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    if FLAGS.clean_dir:
        shutil.rmtree(FLAGS.eval_dir, ignore_errors=True)

    if FLAGS.pipeline_config_path:
        model_config, eval_config, input_config = get_configs_from_pipeline_file()
    else:
        model_config, eval_config, input_config = get_configs_from_multiple_files()

    if FLAGS.save_detection_results and FLAGS.detection_results_name == '':
        FLAGS.detection_results_name = os.path.join(FLAGS.eval_dir, 'detection_results.pkl')

    model_fn = functools.partial(
        model_builder.build,
        model_config=model_config,
        is_training=False)

    create_input_dict_fn = functools.partial(
        input_reader_builder.build,
        input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, input_config, categories,
                       FLAGS.checkpoint_dir, FLAGS.eval_dir, FLAGS.run_mode,
                       FLAGS.save_detection_results, FLAGS.detection_results_name)
Exemplo n.º 10
0
 def testConditionals(self):
     
     tree = ["if", ["true"], True, False]
     self.assertTrue(evaluator.evaluate(tree))
     
     tree = ["if", ["false"], True, False]
     self.assertFalse(evaluator.evaluate(tree))
Exemplo n.º 11
0
def test_evaluate_unknown_operator():
    expr = parse(tokenize("@"))
    with raises(UnknownOperator) as excinfo:
        evaluate(expr)

    want = "Unknown operator: '@'."
    assert want == str(excinfo.value)
Exemplo n.º 12
0
 def showdown(self, p1hc, p2hc, bc, pot, hand_string):
     p1_fullhand = copy.deepcopy(bc)
     p2_fullhand = copy.deepcopy(bc)
     p1_fullhand.extend(p1hc)
     p2_fullhand.extend(p2hc)
     p1_rank = evaluate(p1_fullhand)
     p2_rank = evaluate(p2_fullhand)
     if self.player:
         print("SHOWDOWN")
         print("Dealer's holecards: {0} {1}".format(p1hc[0].upper(),
                                                    p1hc[1].upper()))
         print("BB's holecards: {0} {1}".format(p2hc[0].upper(),
                                                p2hc[1].upper()))
     if p1_rank == p2_rank:
         msg = "Draw at showdown pot of {0} is split".format(pot)
         if self.player:
             print(msg)
         return [0, 0, "{0}\n{1}".format(hand_string, msg)]
     elif p1_rank < p2_rank:
         msg = "Dealer wins pot of {0} at showdown".format(pot)
         if self.player:
             print(msg)
         return [(pot / 2), -(pot / 2), "{0}\n{1}".format(hand_string, msg)]
     else:
         msg = "BB wins pot of {0} at showdown".format(pot)
         if self.player:
             print(msg)
         return [-(pot / 2), (pot / 2), "{0}\n{1}".format(hand_string, msg)]
Exemplo n.º 13
0
    def test(self):
        self.construct_graph()

        sess = tf.Session()
        saver = tf.train.Saver()
        if self.reload:
            saver.restore(sess, self.model_path)
#             print('model restored')
        else:
            sess.run(tf.global_variables_initializer())

        # test on validation set
        feed_dict = {
            self.pv_var: self.val_pv,
            self.wd_var: self.val_wd,
            self.gt_var: self.val_gt
        }
        val_loss, val_pre = sess.run((self.loss, self.pred), feed_dict)
        cur_valid_perf = evaluate(val_pre, self.val_gt, self.hinge)
        print('\tVal per:', cur_valid_perf, '\tVal loss:', val_loss)

        # test on testing set
        feed_dict = {
            self.pv_var: self.tes_pv,
            self.wd_var: self.tes_wd,
            self.gt_var: self.tes_gt
        }
        test_loss, tes_pre = sess.run((self.loss, self.pred), feed_dict)
        cur_test_perf = evaluate(tes_pre, self.tes_gt, self.hinge)
        #         print('\tTest per:', cur_test_perf, '\tTest loss:', test_loss)
        sess.close()
        tf.reset_default_graph()
Exemplo n.º 14
0
def main(unused_argv):
    if (FLAGS.omp > 0):
        if not os.environ.get("OMP_NUM_THREADS"):
            logging.info('OMP_NUM_THREADS value= %d', FLAGS.omp)
            os.environ["OMP_NUM_THREADS"] = str(FLAGS.omp)
        if not os.environ.get("KMP_BLOCKTIME"):
            logging.info('KMP_BLOCKTIME value= %d', FLAGS.blocktime)
            os.environ["KMP_BLOCKTIME"] = str(FLAGS.blocktime)
        if not os.environ.get("KMP_SETTINGS"):
            os.environ["KMP_SETTINGS"] = "1"
        # os.environ["KMP_AFFINITY"]= "granularity=fine,verbose,compact,1,0"
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        tf.gfile.Copy(FLAGS.pipeline_config_path,
                      os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path)
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config,
                          os.path.join(FLAGS.eval_dir, name),
                          overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    if FLAGS.eval_training_data:
        input_config = configs['train_input_config']

    model_fn = functools.partial(
        model_builder.build,
        model_config=model_config,
        is_training=False)

    def get_next(config):
        return dataset_util.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
                       FLAGS.checkpoint_dir, FLAGS.eval_dir, intra_op=FLAGS.intra_op, inter_op=FLAGS.inter_op)
Exemplo n.º 15
0
def main(unused_argv):
    assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
    assert FLAGS.eval_dir, '`eval_dir` is missing.'
    tf.gfile.MakeDirs(FLAGS.eval_dir)
    if FLAGS.pipeline_config_path:
        configs = config_util.get_configs_from_pipeline_file(
            FLAGS.pipeline_config_path)
        tf.gfile.Copy(FLAGS.pipeline_config_path,
                      os.path.join(FLAGS.eval_dir, 'pipeline.config'),
                      overwrite=True)
    else:
        configs = config_util.get_configs_from_multiple_files(
            model_config_path=FLAGS.model_config_path,
            eval_config_path=FLAGS.eval_config_path,
            eval_input_config_path=FLAGS.input_config_path)
        for name, config in [('model.config', FLAGS.model_config_path),
                             ('eval.config', FLAGS.eval_config_path),
                             ('input.config', FLAGS.input_config_path)]:
            tf.gfile.Copy(config,
                          os.path.join(FLAGS.eval_dir, name),
                          overwrite=True)

    model_config = configs['model']
    eval_config = configs['eval_config']
    input_config = configs['eval_input_config']
    if FLAGS.eval_training_data:
        input_config = configs['train_input_config']

    model_fn = functools.partial(model_builder.build,
                                 model_config=model_config,
                                 is_training=False)

    def get_next(config):
        return dataset_builder.make_initializable_iterator(
            dataset_builder.build(config)).get_next()

    create_input_dict_fn = functools.partial(get_next, input_config)

    label_map = label_map_util.load_labelmap(input_config.label_map_path)
    max_num_classes = max([item.id for item in label_map.item])
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes)

    if FLAGS.run_once:
        eval_config.max_evals = 1

    graph_rewriter_fn = None
    if 'graph_rewriter_config' in configs:
        graph_rewriter_fn = graph_rewriter_builder.build(
            configs['graph_rewriter_config'], is_training=False)

    evaluator.evaluate(create_input_dict_fn,
                       model_fn,
                       eval_config,
                       categories,
                       FLAGS.checkpoint_dir,
                       FLAGS.eval_dir,
                       graph_hook_fn=graph_rewriter_fn)
Exemplo n.º 16
0
def test_evaluate():

    ref_gtf = '/home/shunfu/boassembler/data/resRef/hg19_chr15-UCSC.gtf'
    assembly_gtf = '/home/shunfu/boassembler/res/stringtie.gtf'
    eval_res_prefix = '/home/shunfu/boassembler/res/stringtie_res'

    evaluator.evaluate(ref_gtf, assembly_gtf, eval_res_prefix)

    return
Exemplo n.º 17
0
    def run(self) -> None:
        """Train implicit recommenders."""
        train_point = np.load(f'../data/{self.data}/point/train.npy')
        val_point = np.load(f'../data/{self.data}/point/val.npy')
        test_point = np.load(f'../data/{self.data}/point/test.npy')
        prop = np.load(f'../data/{self.data}/point/prop.npy')
        num_users = np.int(train_point[:, 0].max() + 1)
        num_items = np.int(train_point[:, 1].max() + 1)
        if self.model_name in ['bpr', 'ubpr']:
            train = np.load(f'../data/{self.data}/{self.model_name}/train.npy')

        tf.set_random_seed(12345)
        ops.reset_default_graph()
        sess = tf.Session()
        if self.model_name in ['ubpr', 'bpr']:
            pair = PairwiseRecommender(num_users=num_users,
                                       num_items=num_items,
                                       dim=self.dim,
                                       lam=self.lam,
                                       eta=self.eta,
                                       beta=self.beta)
            pairwise_trainer(sess,
                             data=self.data,
                             model=pair,
                             train=train,
                             train_point=train_point,
                             test_point=test_point,
                             max_iters=self.max_iters,
                             batch_size=2**self.batch_size,
                             model_name=self.model_name)

        elif self.model_name == 'relmf':
            point = PointwiseRecommender(num_users=num_users,
                                         num_items=num_items,
                                         clip=self.clip,
                                         dim=self.dim,
                                         lam=self.lam,
                                         eta=self.eta)
            pointwise_trainer(sess,
                              data=self.data,
                              model=point,
                              train=train_point,
                              test=test_point,
                              propensity=prop,
                              max_iters=self.max_iters,
                              batch_size=2**self.batch_size,
                              model_name=self.model_name)

        evaluate(data=self.data,
                 train=train_point,
                 val=val_point,
                 test=test_point,
                 propensity=prop,
                 model_name=self.model_name,
                 rare=500,
                 k=[1, 3, 5])
Exemplo n.º 18
0
def cond(args, env):
    if len(args) != 3:
        throw_error("syntax", "Incorrect use of (if ...): must take exactly three arguments (a test, a pass case, and a fail case).")
    test = ev.evaluate(args[0], env)
    if type(test) != bool:
        throw_error("type", "Incorrect use of (if ...): the test must evaluate to a boolean.")
    if test:
        return ev.evaluate(args[1], env)
    else:
        return ev.evaluate(args[2], env)
Exemplo n.º 19
0
 def test_straight(self):
     # Test straight
     cards_1 = [[0, 1], [1, 2], [3, 3], [2, 4], [3, 5]]
     cards_2 = [[0, 13], [3, 12], [1, 11], [2, 9], [3, 10]]
     cards_1_score = evaluator.evaluate(cards_1)
     cards_2_score = evaluator.evaluate(cards_2)
     print("Straight:", cards_1_score, cards_2_score)
     self.assertEqual(6, len(cards_1_score))
     self.assertEqual(
         -1, evaluator.evaluate_score(cards_1_score, cards_2_score))
Exemplo n.º 20
0
 def test_three_of_a_kind(self):
     # Test 3 of a kind
     cards_1 = [[0, 2], [1, 2], [3, 2], [2, 8], [3, 10]]
     cards_2 = [[0, 9], [3, 5], [1, 5], [2, 5], [3, 10]]
     cards_1_score = evaluator.evaluate(cards_1)
     cards_2_score = evaluator.evaluate(cards_2)
     print("Three of a kind:", cards_1_score, cards_2_score)
     self.assertEqual(6, len(cards_1_score))
     self.assertEqual(
         -1, evaluator.evaluate_score(cards_1_score, cards_2_score))
Exemplo n.º 21
0
 def test_high_card(self):
     # Test high card
     cards_1 = [[0, 2], [1, 3], [1, 5], [2, 8], [3, 10]]
     cards_2 = [[0, 3], [1, 6], [1, 5], [2, 8], [3, 10]]
     cards_1_score = evaluator.evaluate(cards_1)
     cards_2_score = evaluator.evaluate(cards_2)
     print("High card:", cards_1_score, cards_2_score)
     self.assertEqual(6, len(cards_1_score))
     self.assertEqual(
         -1, evaluator.evaluate_score(cards_1_score, cards_2_score))
Exemplo n.º 22
0
 def test_evaluator(self):
     n = NumberLit(2)
     m = NumberLit(3)
     self.assertEqual(evaluate(n), 2)
     self.assertEqual(evaluate(m), 3)
     e1 = BinaryOp(n, Op.Plus, m)
     self.assertEqual(evaluate(e1), 5)
     e2 = BinaryOp(n, Op.Mult, m)
     self.assertEqual(evaluate(e2), 6)
     e3 = BinaryOp(e1, Op.Mult, e1)
     self.assertEqual(evaluate(e3), 25)
Exemplo n.º 23
0
def main():
    config = get_args()

    if config.mode == 'eval':
        evaluate(config)
    else:
        datasets = get_data(config)

        model = get_model(config)
        trainer = Trainer(config, datasets)
        trainer.train(model)
Exemplo n.º 24
0
def main():
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    K.set_session(tf.Session(config=config))
    init_directories()

    while True:
        model = load_latest_model()
        best_model = load_best_model()
        evaluate(best_model, model)
        train(model, game_model_name=best_model.name)
        K.clear_session()
Exemplo n.º 25
0
def main():
    print("Starting run (v{})".format(__version__))
    init_directories()
    model_name = "model_1"
    model = create_initial_model(name=model_name)

    while True:
        model = load_latest_model()
        best_model = load_best_model()
        train(model, game_model_name=best_model.name)
        evaluate(best_model, model)
        K.clear_session()
Exemplo n.º 26
0
def test_evaluate_user_function_missing_argument(mod_body):
    # backup function_definitions
    import evaluator
    initial_fundefs = evaluator.function_definitions
    evaluator.function_definitions = {}
    # test
    define_function(('mod', ['m', 'n'], mod_body))
    ast = ['mod', 19]
    with raises(errors.MissingArgument) as excinfo:
        evaluate({}, ast)
    assert "Missing argument: 'mod' needs 2." == str(excinfo.value)
    # restore function_definitions
    evaluator.function_definitions = initial_fundefs
Exemplo n.º 27
0
def main():

    init_directories()
    model_name = "model_1"
    model = create_initial_model(name=model_name)

    while True:
        model = load_latest_model()
        best_model = load_model(os.path.join(conf['MODEL_DIR'],
                                             conf['BEST_MODEL']),
                                custom_objects={'loss': loss})
        train(model, game_model_name=best_model.name)
        evaluate(best_model, model)
Exemplo n.º 28
0
def hillClimbing(initState, pawnAmountBW):
  current = initState
  evalCurrent = evaluate(current, pawnAmountBW)
  isLocalMinim = False
  while (evalCurrent != 0 and not isLocalMinim):
    isLocalMinim = True
    AllNeighbour = listAllNeighbour(current)
    for neighbour in AllNeighbour:
      if (evalCurrent > evaluate(neighbour, pawnAmountBW)):
        isLocalMinim = False
        current = neighbour
        evalCurrent = evaluate(neighbour, pawnAmountBW)
  return current
Exemplo n.º 29
0
 def setUp(self):
     init_directories()
     model_name = "model_1"
     model = create_initial_model(name=model_name)
     best_model = load_best_model()
     if best_model.name == model.name:
         train(model, game_model_name=best_model.name)
         evaluate(best_model, model)
         # We save wether or not it was a better model
         full_filename = os.path.join(conf['MODEL_DIR'], conf['BEST_MODEL'])
         model.save(full_filename)
     else:
         model = best_model
     self.model = model
Exemplo n.º 30
0
def check_gold(Hs, cipher_text):
    """
    Each iteration, check whether current best solutions. (In order to check in which step the the solution is pruned)
    :param Hs:
    :param cipher_text:
    :return: max acc
    """
    max_acc = 0
    deciphered_text = None
    for mappings, sc in Hs:
        deciphered = decipher(cipher_text, mappings)
        if max_acc < evaluator.evaluate(deciphered):
            max_acc = evaluator.evaluate(deciphered)
            deciphered_text = deciphered
    return max_acc, deciphered_text
Exemplo n.º 31
0
def cond(args, env):
    if len(args) != 3:
        throw_error(
            "syntax",
            "Incorrect use of (if ...): must take exactly three arguments (a test, a pass case, and a fail case)."
        )
    test = ev.evaluate(args[0], env)
    if type(test) != bool:
        throw_error(
            "type",
            "Incorrect use of (if ...): the test must evaluate to a boolean.")
    if test:
        return ev.evaluate(args[1], env)
    else:
        return ev.evaluate(args[2], env)
Exemplo n.º 32
0
    def check_gwm(self, raw_hit_chance, hit_chance, die, attacks, dmg_str):
        p_chance = None
        new_dmg_str = None
        if "gwm" in self.feats or "gwm2" in self.feats:
            penalty = 5 if "gwm" in self.feats else 2
            bonus_dmg = 10 if "gwm" in self.feats else (
                "%.4f" %
                evaluate("d%s" % die.split("d")[-1], self.fighting_style))
            p_chance = min(1, (raw_hit_chance * 20 - penalty) / 20.0)
            p_attack = "(%s*(%s+%s+%s%s))" % (p_chance, die,
                                              self.attack_mod + self.dmg_bonus,
                                              bonus_dmg, self.smite)
            n_attack = "(%s*(%s+%s%s))" % (hit_chance, die, self.attack_mod +
                                           self.dmg_bonus, self.smite)
            np_attack = "%s+%s" % (p_attack, n_attack)
            p_dpr = self.evaluate_dmg_str("%s*(%s)" % (attacks, p_attack))
            np_dpr = self.evaluate_dmg_str(np_attack) if attacks > 1 else 0
            n_dpr = self.evaluate_dmg_str("%s*(%s)" % (attacks, n_attack))
            maxdpr = max(p_dpr, np_dpr, n_dpr)
            if maxdpr == p_dpr:
                new_dmg_str = "%s*%s" % (attacks, p_attack)
            elif maxdpr == np_dpr:
                new_dmg_str = np_attack
                p_chance = (hit_chance + p_chance) / 2.0
            else:
                new_dmg_str = "%s*%s" % (attacks, n_attack)
                p_chance = hit_chance

        if dmg_str:
            new_dmg_str = "%s+%s" % (dmg_str, new_dmg_str)
        if new_dmg_str:
            self.dmg_str = new_dmg_str
        return p_chance
Exemplo n.º 33
0
    def roll(self):
    
        while True:
            
            try:
                line = read_form()
            except (KeyboardInterrupt,EOFError) as e:
                print("\nMoriturus te saluto.")
                exit(0)

            tokens = scan_lexer.tokenize(line)

            balance = 0

            ast,balance = scan_lexer.prstree_balance(tokens)

            if balance < 0:
                print("Exception: Cannot close more than opened parentheses")
                continue
            elif balance > 0:
                print("Exception: All opened parentheses must be closed")
                continue

            """ Replacing the string format"""
            ast = str_format(ast)    

            for expr in ast:
                """ Print the evaluation of the expression in scope.evaluate(expression) """
                try:                    
                    print(evaluator.evaluate(expr))
                except Exception as e:
                    print(str(e))
Exemplo n.º 34
0
def grade(args):
    if not check_project_folder():
        return

    import evaluator as ev

    base_path = get_base_path(args)
    grader_path = os.path.join(base_path, 'grader.txt')
    order_path = os.path.join(base_path, 'order.json')
    result_path = os.path.join(base_path, 'results.json')

    if os.path.exists(result_path) and not args.force:
        error('Test already evaluated. Pass --force to ovewrite.')
        return

    if not os.path.exists(order_path):
        error('No order.json file found. Cannot evaluate.')
        return

    if os.path.exists(grader_path):
        grades = ev.evaluate(grader_path, order_path)
    else:
        warn('No grader.txt sheet was found. Will only generate stats.')
        grades = None

    stats = ev.get_stats(order_path)

    with open(result_path, 'w') as fp:
        json.dump({"grades": grades, "stats": stats}, fp, indent=4)
Exemplo n.º 35
0
 def anonymous(*arguments):
     # print("inside anonymous function")
     # print("arguments(" + str(len(arguments)) + "):", arguments)
     if len(arguments) != len(largs):
         throw_error("syntax", "This function takes " + str(len(largs)) + " arguments (" + str(len(arguments)) + " provided).")
     lenv = Environment(name="anon_fn", outer=env, variables=largs, values=arguments)
     return ev.evaluate(lbody, lenv)
Exemplo n.º 36
0
def repl(input_fn=input):
    """Read-Eval-Print-Loop"""
    print(f'To exit, type {QUIT_COMMAND}', file=sys.stderr)

    while True:
        # ___________________________________________ Read
        try:
            line = input_fn('> ')
        except EOFError:
            break
        if line == QUIT_COMMAND:
            break
        if not line:
            continue

        # ___________________________________________ Eval
        current_exp = parse_exp(tokenize(line))
        if isinstance(current_exp, list) and current_exp[0] == 'define':
            result = define_function(current_exp[1:])
        else:
            try:
                result = evaluate({}, current_exp)
            except (errors.UndefinedVariable, errors.UndefinedFunction) as exc:
                print('***', exc)
                continue

        # ___________________________________________ Print
        print(result)
Exemplo n.º 37
0
def test_expression(ast, expected):
    got = evaluate(ast)
    assert type(got) is type(expected)
    if isinstance(expected, int):
        assert expected == got
    else:
        assert isclose(expected, got, rel_tol=.01)
Exemplo n.º 38
0
def define(args, env):
    if len(args) != 2:
        throw_error("syntax", "Incorrect use of (define ...): must take exactly two arguments.")
    assert_or_throw(args[0]['type'] == 'symbol', "type", "Incorrect use of (define ...): the variable must be a symbol.")
    variable = args[0]['value']
    value = ev.evaluate(args[1], env)
    env.set(variable, value)
    return value
Exemplo n.º 39
0
def do(args, env):
    do_env = Environment(name="do", outer=env)
    if len(args) == 0:
        throw_error("syntax", "Incorrect use of (do ...): must take at least one argument.")
    result = None
    for a in args:
        result = ev.evaluate(a, do_env)
    return result
Exemplo n.º 40
0
    def testArithmetic(self):

        tree = ["+", 1, 3]
        self.assertEqual(evaluator.evaluate(tree), 4)

        tree = ["-", 3, 1]
        self.assertEqual(evaluator.evaluate(tree), 2)

        tree = ["*", 3, 1]
        self.assertEqual(evaluator.evaluate(tree), 3)

        tree = ["/", 3, 1]
        self.assertEqual(evaluator.evaluate(tree), 3)

        #compound operations
        tree = ["+", ["*", 4, 2], ["/", 16, 8]]
        self.assertEqual(evaluator.evaluate(tree), 10)
Exemplo n.º 41
0
def interpret(source, env=None):
    """
    Interpret a lisp program statement

    Accepts a program statement as a string, interprets it, and then
    returns the resulting lisp expression as string.
    """
    if env is None:
        env = Environment()

    return unparse(evaluate(parse(source), env))
Exemplo n.º 42
0
def start():
    """ setting up the repl loop """
    # load std lib
    init()

    while True:

        input_string = raw_input('<scheme>: ')

        # create a stream object from string
        read_stream = utilities.StringStream(input_string)
        write_stream = utilities.StringStream()

        # retrieve parsed string as list from reader
        parsed_scheme = reader.read_from_stream(read_stream)

        # hand list over to eval
        evaluator.evaluate(parsed_scheme, write_stream)

        print write_stream.get_stream()
Exemplo n.º 43
0
def run_block(args, context):
    block = args["block"]
    
    if type(block) != obj.Block:
        return err("the $block parameter in `run $block` must be of type <block>")
    
    if len(block.params) > 0:
        return err("since no arguments are provided, $block of `run $block` must have no parameters")
    
    ctx = context.enclose()
    return evaluate(block.body, ctx)
Exemplo n.º 44
0
def run_block_with_args(args, context):
    block = args["block"]
    b_args = args["args"].get_elements()
    
    if len(block.params) != len(b_args):
        return err("the amount of arguments provided in `run $block with $args` should match the number of parameters in the block")
    
    params = [param.value for param in block.params]
    args_dictionary = dict(zip(params, b_args))
    
    ctx = context.enclose_with_args(args_dictionary)
    return evaluate(block.body, ctx)
Exemplo n.º 45
0
def execute(text, print_result, ctx):
    tokens = l.lex(text)
    parser = p.Parser(tokens)
    program = parser.parse_program()
        
    if len(parser.errors) > 0:
        parser.print_errors()
    else:
        result = e.evaluate(program, ctx)
        
        if (print_result and type(result) != o.Null) or type(result) == o.Error:
            print(result)
Exemplo n.º 46
0
def interpret_file(filename, env=None):
    """
    Interpret a lisp file

    Accepts the name of a lisp file containing a series of statements. 
    Returns the value of the last expression of the file.
    """
    if env is None:
        env = Environment()

    with open(filename, 'r') as sourcefile:
        source = "".join(sourcefile.readlines())

    asts = parse_multiple(source)
    results = [evaluate(ast, env) for ast in asts]
    return unparse(results[-1])
Exemplo n.º 47
0
def run_stats(args):
    import evaluator

    rev_map = {}
    for k, v in fake_metadata.items():
        rev_map[v["participant_id"]] = k

    basedir = os.path.dirname(os.path.dirname(__file__))
    exome_dir = os.path.join(basedir, "testexomes")

    out_scores = {}
    for donor_dir in glob(os.path.join(args.out_dir, "*")):
        donor = os.path.basename(donor_dir)
        if rev_map[donor] not in out_scores:
            out_scores[rev_map[donor]] = {}
        for vcf_file in glob(os.path.join(donor_dir, "*.vcf")):
            method = os.path.basename(vcf_file).replace(".vcf", "")
            vtype = None
            if method in SNP_METHOD:
                vtype = "SNV"
            if method in INDEL_METHOD:
                vtype = "INDEL"
            truth_file = os.path.join(exome_dir, "testexome" + rev_map[donor][-1:] + ".truth.vcf.gz")
            scores = evaluator.evaluate(vcf_file, truth_file, vtype=vtype, truthmask=False)
            out_scores[rev_map[donor]][method] = scores
    print out_scores

    totals = {}
    for v in out_scores.values():
        for method, values in v.items():
            if method not in totals:
                totals[method] = []
            totals[method].append(values)
    for method, values in totals.items():
        out = []
        for i in range(3):
            out.append("%s" % (sum(j[i] for j in values) / float(len(values))))
        print method, "\t".join(out)
Exemplo n.º 48
0
    def initializePage(self):
        super(ResultsPage, self).initializePage()

        self.results = self.parentWizard.results
        # TODO: Unwire this
        self.grades = evaluate('generated/last/grader.txt', 'generated/last/results.json')

        # This assumes scores are normalized to 1 (which is probably
        # a good idea anyway. Otherwise, the max score would depend
        # on the specific questions each exam got out of randomness)
        self.max_score = self.project.total_questions_per_exam

        self.ui.treeWidget.clear()

        for test_num, test_data in self.results.items():
            # TODO: Make name editable
            # name = random.choice(['Fulano', 'Mengano', 'Ciclano', 'Esperanzejo'])
            grade = self.grades[test_num]['total_grade']
            # score = float(grade)/float(self.max_score) * 100  # TODO: implement scoring correctly
            # item = QTreeWidgetItem([str(test_num), str(name), str(grade), str(score)])
            item = QTreeWidgetItem([str(test_num), str(grade)])

            self.ui.treeWidget.addTopLevelItem(item)
Exemplo n.º 49
0
def evaluate(name, no_initial_training=False):
    # import the solution name into the global namespace as 'exercise'
    exercise = __import__(convert(name))
    name = convert(name)
    # game and agent setup code
    game = Game(do_render=args.render)
    game.set_size(args.grid_size, args.grid_size)
    original_game = copy.copy(game)
    # fetch the agent from the provided solution
    agent = fetch_agent(exercise, game)
    file_name_add = ''
    if not no_initial_training:
        if args.dephase:
            agent.dephase = True
        exercise.train(agent)
    else: 
        file_name_add = 'no_train_'
    # clean up after training
    agent.reward_scaling([1, -1, -1])
    agent.accumulated = 0   # reset accumulated rewards
    agent.set_epsilon(0.0)  # turn off exploration
    agent.game.reset()      # reset the game
    agent.game.high_score = 0
    agent.fov  = args.fov
    agent.game = original_game # if the training modifies the game, it is fixed here
#   load_reward_profile(agent)
    exercise.reward_profile(agent)
    if args.dephase:
        agent.dephase = False
        exercise.train(agent)
    # evaluate the training results
    folder = 'eval_solutions'
    file_name = evaluator.evaluate(agent, name=os.path.join(folder, file_name_add+name))
    # print out a nice summary of how the evaluation went
    summarizer.summarize_e(file_name)
    return file_name
 def test_addition_with_naturals(self):
     self.input_stream.set_stream("(+ 2 3)")
     parsed = reader.read_from_stream(self.input_stream)
     evaluator.evaluate(parsed, self.output_stream)
     self.assertEqual(self.output_stream.get_stream(), "5")
Exemplo n.º 51
0


#   print(wordlist.most_common(10))
# print(classifier.show_most_informative_features(32))
# print(extract_features())
# tweet = "'Love-cheat' Daniel Radcliffe splits with girlfriend Rosie Coker: London, Oct 19: Daniel Radcliffe has split wit... http://tinyurl.com/8oxx2ns "
# print(classifier.classify(extract_features(tweet.split())))


with open("/Users/Jaaksi/Documents/Github/learnpython/harkkatyo/test_data.tsv", "r") as testfile, open("/Users/Jaaksi/Documents/Github/learnpython/harkkatyo/evalfile.tsv", "w") as evalfile:
    tsvreader = csv.reader(testfile, dialect='excel-tab',delimiter="\t")
    evalwriter = csv.writer(evalfile, dialect='excel-tab', delimiter='\t')
    for line in tsvreader:
        tweet = line[3]
        result = classifier.classify(extract_features(tweet.split()))
        evalwriter.writerow([line[0], line[1], result, line[3]])

evaluator.evaluate("/Users/Jaaksi/Documents/Github/learnpython/harkkatyo/test_data.tsv", "/Users/Jaaksi/Documents/Github/learnpython/harkkatyo/evalfile.tsv")

# print(classifier.show_most_informative_features(15))









Exemplo n.º 52
0
 def testAssignment(self):
     
     tree = [["var", "name", "Jane"], ["name"]]
     self.assertEqual(evaluator.evaluate(tree), "Jane")
Exemplo n.º 53
0
            elif text in ('.exit', '.quit'):
                break

            result = evaluate_expression(InterpreterInput(text),
                                         environment)

            print "=>", pretty_print(result)

            # set % as the last evaluated expression in environment
            environment['%'] = quote(result)
        except EOFError:
            break
        except KeyboardInterrupt:
            print "\ninterrupt."
        except Exception as e:
            print "error:", e.message

    print "\nexiting..."

if __name__ == "__main__":
    if len(sys.argv) == 1:
        repl()
    elif len(sys.argv) == 2:
        with codecs.open(sys.argv[1], 'r', 'utf-8') as f:
            evaluate(f,make_minimum_environment())
    else:
        sys.stderr.write("Usage: %s [FILE]\nif FILE is not provided, scheme runs in eval-print-loop mode.\n" %
                         sys.argv[0])
        sys.exit(1)

Exemplo n.º 54
0
def _run_block(block, args, context):
    params = [param.value for param in block.params]
    args_dict = dict(zip(params, args))
    ctx = context.enclose_with_args(args_dict)
    return evaluate(block.body, ctx)
Exemplo n.º 55
0
#!/usr/bin/env python

from sys import argv

from pretty import pretty
from evaluator import evaluate
from parser import parse
from sugar import desugar
from tokenizer import tokenize

if __name__ == '__main__':
  code = argv[1]

  print pretty(evaluate(desugar(parse(tokenize(code)))))
Exemplo n.º 56
0
 def testBool(self):
     
     tree = ['true']
     self.assertTrue(evaluator.evaluate(tree))
     
     tree = ['false']
     self.assertFalse(evaluator.evaluate(tree))
     
     #>
     tree = ['>', 5, 4]
     self.assertTrue(evaluator.evaluate(tree))
     
     tree = ['>', 3, 6]
     self.assertFalse(evaluator.evalutate(tree))
     
     tree = ['>', 6, 6]
     self.assertFalse(evaluator.evalutate(tree))
     
     #<
     tree = ['<', 4, 11]
     self.assertTrue(evaluator.evaluate(tree))
     
     tree = ['<', 9, 8]
     self.assertFalse(evaluator.evaluate(tree))
     
     tree = ['<', 1, 1]
     self.assertFalse(evaluator.evaluate(tree))
     
     #>=
     tree = ['>=', 5, 4]
     self.assertTrue(evaluator.evaluate(tree))
     
     tree = ['>=', 3, 6]
     self.assertFalse(evaluator.evalutate(tree))
     
     tree = ['>=', 6, 6]
     self.assertTrue(evaluator.evalutate(tree))
     
     #<=
     tree = ['<=', 4, 11]
     self.assertTrue(evaluator.evaluate(tree))
     
     tree = ['<=', 9, 8]
     self.assertFalse(evaluator.evaluate(tree))
     
     tree = ['<=', 1, 1]
     self.assertTrue(evaluator.evaluate(tree))
     
     #and
     tree = ['&&', "true", "false"]
     self.assertFalse(evaluator.evaluate(tree))
     
     tree = ['&&', 'true', 'true']
     self.assertTrue(evaluator.evaluate(tree))
     
     tree = ['&&', 'false', 'false']
     self.assertFalse(evaluator.evaluate(tree))
     
     tree = ['&&', 'true', 'true', 'true']
     self.assertTrue(evaluator.evaluate(tree))
     
     #or
     tree = ['||', "true", "false"]
     self.assertTrue(evaluator.evaluate(tree))
     
     tree = ['||', 'true', 'true']
     self.assertTrue(evaluator.evaluate(tree))
     
     tree = ['||', 'false', 'false']
     self.assertFalse(evaluator.evaluate(tree))
     
     tree = ['||', 'true', 'true', 'true']
     self.assertTrue(evaluator.evaluate(tree))
     
     #!
     tree = ['!', 'true']
     self.assertFalse(evaluator.evaluate(tree))
     
     tree = ['!', 'false']
     self.assertTrue(evaluator.evaluate(tree))