Exemplo n.º 1
0
def solve(instance, epsilon, mipgap, mode, drawsolution, stat=None):

    print('***********************************************')
    print(instance.tostr_basic())
    print(instance.tostr_network())

    print("obbt: parse bounds")
    try:
        instance.parse_bounds()
    except UnicodeDecodeError as err:
        print(f'obbt bounds not read: {err}')

    print("create model")
    cvxmodel = rel.build_model(instance, epsilon)
    # cvxmodel.write('convrel.lp')
    cvxmodel.params.MIPGap = mipgap
    cvxmodel.params.timeLimit = 3600
    # cvxmodel.params.OutputFlag = 0
    # cvxmodel.params.Threads = 1
    #cvxmodel.params.FeasibilityTol = 1e-5

    print("solve model")
    costreal = bb.solveconvex(cvxmodel, instance, drawsolution) if mode=='CVX' \
            else bb.lpnlpbb(cvxmodel, instance, drawsolution, adjust_mode=mode)

    if not stat:
        stat = Stat(mode)
    stat.fill(cvxmodel, costreal)
    print('***********************************************')
    print(f"solution for {instance.tostr_basic()}")
    print(stat.tostr_basic())

    cvxmodel.terminate()
    return stat
Exemplo n.º 2
0
 def __init__(self, canvas):
     self.canvas = canvas
     self.game_map = Map()
     self.hero = Hero(0, 1)
     self.boss = Boss(0, 5)
     self.skeleton = Skeleton(6, 8)
     self.skeleton2 = Skeleton(0, 7)
     self.skeleton3 = Skeleton(4, 5)
     self.stat = Stat(self.hero, self.boss)
     self.monster_position()
Exemplo n.º 3
0
def patterns(db):
    stats = Stat(db)
    return template("stats.tpl",
                    totalGames=stats.getTotalGames(),
                    totalMoves=stats.getTotalMoves(),
                    avgMoves=stats.getAvgMoves(),
                    scenarioCount=stats.getScenarios(),
                    outcomeCount=stats.getModifiedOutcomes(),
                    totalWins=stats.getWins(),
                    totalLosses=stats.getLosses())
Exemplo n.º 4
0
 def go(existing_queens):
     """
     :type existing_queens: list[model.Queen]
     :rtype: list[model.Queen]
     """
     if seconds_since(start_time) > timeout:
         log("TIMEOUT! Could not find solution in {} seconds.".format(
             timeout))
         return None
     # log("Current queen count: {} with config: {}".format(len(existing_queens), existing_queens))
     if any_constraint_broken(existing_queens, constraints):
         return None
     stats.append(
         Stat(len(stats), seconds_since(start_time), len(existing_queens)))
     if len(existing_queens) >= N:
         log("Found solution! Returning chessboard: {}".format(
             existing_queens))
         return existing_queens
     possible_values = new_value_selector_function(existing_queens, N)
     if N - len(existing_queens) > len(possible_values):
         # log("\tforwardcheck, need {} more values, but possible is only {}".format(N - len(existing_queens),
         #                                                                           len(possible_values)))
         return None
     else:
         # log("\twith: {} existing: {} possible: {}".format(new_value_selector_function.func_name,
         #                                                   len(existing_queens), len(possible_values)))
         for possible_queen in possible_values:
             queens = go(existing_queens + [possible_queen])
             if queens is not None:
                 return queens
         return None
Exemplo n.º 5
0
def baseline_random_data_dist(trainFile="", testFile=""):
    baseline_dir = "baseline/"

    res_out_dir = FileNames.RESULTS_DIR.value + baseline_dir
    if not os.path.exists(res_out_dir):
        os.makedirs(res_out_dir)

    res_out_file = res_out_dir + FileNames.RESULT.value + '_random_data_dist'

    test_out_dir = FileNames.TEST_OUT_DIR.value + baseline_dir
    if not os.path.exists(test_out_dir):
        os.makedirs(test_out_dir)

    test_out_file = test_out_dir + FileNames.TEST_OUT.value + '_random_data_dist'

    st = Stat(trainFile=trainFile)
    if testFile:
        st.testFile = testFile
    st.load_data()
    tags = st.get_tags()
    test_tokens_sentences, test_tags, test_tokens = st.read_sentences_tokens_tags(
    )
    random_tags = []
    tags_data_dist = st.get_tag_distribution()
    for token in test_tokens:
        if token == '':
            random_tags.append(token)
        else:
            random_tags.append(
                np.random.choice(tags_data_dist.index,
                                 1,
                                 p=tags_data_dist.tolist()).tolist()[0])

    df = pd.DataFrame({'col': random_tags})
    y_pred = df['col']
    pred_data = pd.DataFrame([test_tokens, test_tags, y_pred])
    pred_data = pred_data.transpose()
    pred_data.to_csv(test_out_file, index=None, header=None, sep=' ', mode='w')

    call('./conlleval.pl < {0} > {1}'.format(test_out_file, res_out_file),
         shell=True)

    f1_score = check_output("awk '{print $8}' " +
                            "{0} |sed '2q;d'".format(res_out_file),
                            shell=True).decode("utf-8")

    return f1_score
Exemplo n.º 6
0
def Analyze2():
    np.set_printoptions(precision=4, suppress=True)
    # Testing
    batch = train_d[:1]

    num_vars = len(train_model.log_values)

    full_times = 1000
    full_values = [Stat() for _ in range(num_vars)]
    feed_dict = eval_sch.batch(batch)
    feed_dict[placeholders['dropout']] = FLAGS.dropout
    train_model.get_data(feed_dict)

    if not FLAGS.det_dropout:
        for i in range(full_times):
            acts = sess.run(train_model.log_values, feed_dict=feed_dict)
            for j in range(num_vars):
                full_values[j].add(acts[j])

        for i in range(num_vars):
            print(i)
            print(full_values[i].mean()[0, :5], full_values[i].std()[0, :5])
    else:
        for i in range(full_times):
            acts = sess.run(train_model.log_values, feed_dict=feed_dict)
            for j in range(num_vars):
                if len(acts[j]) == 2:
                    full_values[j] = acts[j]
                else:
                    full_values[j].add(acts[j])

        for i in range(num_vars):
            if isinstance(full_values[i], Stat):
                print('Stochastic {}'.format(i))
                print(full_values[i].mean()[0, :5],
                      full_values[i].std()[0, :5])
            else:
                print('Deterministic {}'.format(i))
                print(full_values[i][0][0, :5],
                      np.sqrt(full_values[i][1][0, :5]))
Exemplo n.º 7
0
def solvebench(bench=FASTBENCH,
               epsilon=EPSILON,
               mipgap=MIPGAP,
               mode='CUT',
               drawsolution=False):
    stat = Stat(mode)
    now = date.today().strftime("%y%m%d")
    resfilename = Path(OUTDIR, f'res{now}-{mode}.csv')
    f = open(resfilename, 'w')
    f.write(
        f"gops, {now}, epsilon={epsilon}, mipgap={mipgap}, mode={mode}, non-valid lbs if nogood cuts at feas nodes\n"
    )
    f.write(f'ntk T day, {stat.tocsv_title()}\n')
    f.close()

    for i in bench:
        instance = makeinstance(i)

        stat = solve(instance, epsilon, mipgap, mode, drawsolution, stat)

        f = open(resfilename, 'a')
        f.write(f"{i}, {stat.tocsv_basic()}\n")
        f.close()
Exemplo n.º 8
0
def GradientVariance():
    # Testing
    batch = train_d[:FLAGS.batch_size]

    full_times = 1000
    full_preds = Stat()
    full_grads = Stat()
    for i in range(full_times):
        feed_dict = eval_sch.batch(batch)
        feed_dict[placeholders['dropout']] = FLAGS.dropout
        pred, grad = test_model.get_pred_and_grad(sess, feed_dict)
        full_preds.add(pred)
        full_grads.add(grad)

    full_preds_m = np.mean(np.abs(full_preds.mean()))
    full_grads_m = np.mean(np.abs(full_grads.mean()))
    print('Full pred stdev = {}'.format(
        np.mean(full_preds.std()) / full_preds_m))
    print('Full grad stdev = {}'.format(
        np.mean(full_grads.std()) / full_grads_m))

    part_times = 1000
    part_preds = Stat()
    part_grads = Stat()
    for i in range(part_times):
        feed_dict = train_sch.batch(batch)
        feed_dict[placeholders['dropout']] = FLAGS.dropout
        pred, grad = train_model.get_pred_and_grad(sess, feed_dict)
        part_preds.add(pred)
        part_grads.add(grad)
    print('Part pred bias = {}'.format(
        np.mean(np.abs(part_preds.mean() - full_preds.mean())) / full_preds_m))
    print('Part pred stdev = {}'.format(
        np.mean(part_preds.std()) / full_preds_m))
    print('Part grad bias = {}'.format(
        np.mean(np.abs(full_grads.mean() - part_grads.mean())) / full_grads_m))
    print('Part grad stdev = {}'.format(
        np.mean(part_grads.std()) / full_grads_m))
    print(full_grads_m, np.mean(part_grads.std()),
          np.mean(np.abs(part_grads.mean())))
Exemplo n.º 9
0
class Game():
    def __init__(self, canvas):
        self.canvas = canvas
        self.game_map = Map()
        self.hero = Hero(0, 1)
        self.boss = Boss(0, 5)
        self.skeleton = Skeleton(6, 8)
        self.skeleton2 = Skeleton(0, 7)
        self.skeleton3 = Skeleton(4, 5)
        self.stat = Stat(self.hero, self.boss)
        self.monster_position()

    def monster_position(self):
        self.monster_position = [(self.boss.x, self.boss.y),
                                 (self.skeleton.x, self.skeleton.y),
                                 (self.skeleton2.x, self.skeleton2.y),
                                 (self.skeleton3.x, self.skeleton3.y)]

    def keyPressed(self, event):
        if event.keysym == 'Down':
            if self.game_map.can_move(self.hero.x, self.hero.y + 1,
                                      self.hero.x, self.hero.y,
                                      self.monster_position):
                # if self.game_map.check_edge(self.hero.x, self.hero.y+1) and self.game_map.check_tile(self.hero.x, self.hero.y+1):
                self.hero.move_down()
            else:
                self.hero.turn_down()
            self.hero.draw_char(self.canvas)
        if event.keysym == 'Right':
            if self.game_map.check_edge(
                    self.hero.x + 1, self.hero.y) and self.game_map.check_tile(
                        self.hero.x + 1, self.hero.y):
                self.hero.move_right()
            else:
                self.hero.turn_right()
            self.hero.draw_char(self.canvas)
        if event.keysym == 'Left':
            if self.game_map.check_edge(
                    self.hero.x - 1, self.hero.y) and self.game_map.check_tile(
                        self.hero.x - 1, self.hero.y):
                self.hero.move_left()
            else:
                self.hero.turn_left()
            self.hero.draw_char(self.canvas)
        if event.keysym == 'Up':
            if self.game_map.check_edge(
                    self.hero.x, self.hero.y - 1) and self.game_map.check_tile(
                        self.hero.x, self.hero.y - 1):
                self.hero.move_up()
            else:
                self.hero.turn_up()
            self.hero.draw_char(self.canvas)
        if event.keysym == 'Space':
            self.battle()
        #
        # def start_the_battle(self):
        #     if self.game_map.is_this_tile_occupied(self.hero.x, self.hero.y, self.monster_position):
        #         self.strike(self.game_map.get_the_enemy(self.hero.x, self.hero.y, self.monster_position))
        #
        # def strike(self,attacker):
        #     if attacker == 'self.boss':
        #         # self.stat.draw_skeleton_boss
        #         self.strike_1 = self.hero.sp + self.d6 + self.d6
        #         self.strike_2 = self.boss.sp + self.d6 + self.d6
        #         if self.strike_1 > self.boss.dp:
        #             self.boss.damage(self.strike_1)
        #         if self.strike_2 + self.d6 + self.d6 > self.hero.dp:
        #             self.hero.damage(self.strike_2)
        #
        #
        #     elif attacker == 'self.skeleton':
        #         # self.stat.draw_skeleton_stat_1()
        #         self.strike_1 = self.hero.sp + self.d6 + self.d6
        #         self.strike_2 = self.skeleton.sp + self.dice + self.d6
        #         if self.strike_1 > self.skeleton.dp:
        #             self.skeleton.damage(self.strike_1)
        #         if self.strike_2 + self.d6+self.d6 > self.hero.dp:
        #             self.hero.damage(self.strike_2)
        #
        #
        #     elif attacker == 'self.skeleton2':
        #         # self.stat.draw_skeleton_stat_2()
        #         self.strike_1 = self.hero.sp+self.d6 + self.d6
        #         self.strike_2 = self.skeleton2.sp + self.dice + self.dice
        #         if self.strike_1 > self.skeleton2.self.dp:
        #             self.skeleton2.damage(self.strike_1)
        #         if self.strike_2 + self.d6 + self.d6 > self.hero.dp:
        #             self.hero.damage(self.strike_2)
        #
        #     elif attacker == 'self.skeleton3':
        #         # self.stat.draw_skeleton_stat_3()
        #         self.strike_1 = self.hero.sp + self.d6 + self.d6
        #         self.strike_2 = self.skeleton_3.sp + self.dice + self.d6
        #         if self.strike_1 > self.skeleton3.dp:
        #             self.skeleton3.damage(self.strike_1)
        #         if self.strike_2 + self.d6 + self.d6 > self.hero.dp:
        #             self.hero.damage(self.strike_2)

    def draw_all(self):
        self.game_map.draw_tile(self.canvas)
        self.hero.draw_char(self.canvas)
        self.boss.draw_char(self.canvas)
        self.skeleton.draw_char(self.canvas)
        self.skeleton2.draw_char(self.canvas)
        self.skeleton3.draw_char(self.canvas)
        self.stat.draw_hero_text(self.canvas)
        self.stat.draw_boss_text(self.canvas)
Exemplo n.º 10
0
    sys.setdefaultencoding("utf8")

    mrtr = None
    for i in xrange(N_REPS):
        rtr = RecipeTreeRoot()
        #print "All:", rtr.getAllNodes()

        ok = True
        while ok:
            ok = rtr.satisfy()

        if not rtr.prune():
            mrtr = None
            continue

        s_rtr = Stat(rtr)
        t_rtr = s_rtr.total
        print "#%02i, Score % .2f" % (i, t_rtr)
        if mrtr is None:
            mrtr = rtr
            t_mrtr = t_rtr
        else:
            if t_mrtr < t_rtr:
                mrtr = rtr
                t_mrtr = t_rtr

    print mrtr
    print "---"
    print mrtr.toText()
    print "---"
    print Stat(mrtr)
Exemplo n.º 11
0
def train_test_minimum(trainFile="",
                       testFile="",
                       smoothing="witten_bell",
                       ngram_order=3,
                       working_dir="",
                       improvement=False,
                       fold="",
                       cut_off=False,
                       cut_off_freq=2):
    """
    function tests a minimum requirement model
    outputs f1 score of the model
    input : trainFile, testFile
    if retOutFiles is set to true function returns names of files where results are written
    """

    #default working dir + test/train files
    minimum_dir = 'minimum/'

    if working_dir:
        minimum_dir = working_dir

    fst_out_dir = FileNames.FST_DIR.value + minimum_dir
    if not os.path.exists(fst_out_dir):
        os.makedirs(fst_out_dir)

    unigram_taggerFstFile = fst_out_dir + FileNames.UNIGRAM_CONCEPT_FST.value
    ngram_lmFile = fst_out_dir + FileNames.NGRAM_LM.value

    res_out_dir = FileNames.RESULTS_DIR.value + minimum_dir
    if not os.path.exists(res_out_dir):
        os.makedirs(res_out_dir)

    res_out_file = res_out_dir + FileNames.RESULT.value

    test_out_dir = FileNames.TEST_OUT_DIR.value + minimum_dir
    if not os.path.exists(test_out_dir):
        os.makedirs(test_out_dir)

    test_out_file = test_out_dir + FileNames.TEST_OUT.value

    st = Stat(trainFile=trainFile)
    if testFile:
        st.testFile = testFile
    st.load_data()
    st.count_tokens()
    st.count_tags()
    st.count_tokens_tags()
    st.probs_token_tags()
    if (cut_off):
        st.create_cut_off_unk_table(cutoff_freq=cut_off_freq)
    else:
        st.create_unk_table()
    st.write_token_unk_pos_probs()
    st.create_lexicon()
    st.write_sentences_tags()

    ct = ConceptTagger()

    ct.create_unigram_tagger(st.lexiconFile, st.unigram_conc_unk,
                             unigram_taggerFstFile)
    # default smoothing and order for now
    ct.create_language_model(st.lexiconFile,
                             st.sentecesTagsFile,
                             ngram_lmFile,
                             smoothing=smoothing,
                             order=ngram_order)

    test_tokens_sentences, test_tags, test_tokens = st.read_sentences_tokens_tags(
    )
    out_list = []
    cnt = 0
    tot = len(test_tokens_sentences)

    # for each sentence creates acceptor, concatenates it with unigram tagger and language model,
    # parses out lex result and appends it to results
    for string in test_tokens_sentences:
        cnt += 1
        print('{0}/{1}'.format(cnt, tot))
        print(string)
        accFile = ct.create_acceptor(string, st.lexiconFile)
        ct.composeFsts(accFile, unigram_taggerFstFile, 'tmp.fst')
        ct.composeFsts('tmp.fst', ngram_lmFile, 'tmp2.fst')
        ct.shortestPath('tmp2.fst', 'out.fst')
        iob_tags = ct.parseOut(st.lexiconFile, 'out.fst').split('\n')
        out_list.append(iob_tags)

    call('rm tmp.fst; rm tmp2.fst; rm out.fst; rm 1.fst', shell=True)
    out_list = list(itertools.chain(*out_list))
    df = pd.DataFrame({'col': out_list})
    y_pred = df['col']
    pred_data = pd.DataFrame([test_tokens, test_tags, y_pred])
    pred_data = pred_data.transpose()
    pred_data.to_csv(test_out_file, index=None, header=None, sep=' ', mode='w')

    co_suffix = ""
    if (cut_off):
        co_suffix = "__cut_off_freq_" + str(cut_off_freq)

    evalFile = res_out_file + '__' + smoothing + "__ngram_size_" + str(
        ngram_order) + co_suffix + str(fold)

    if not improvement:
        call('./conlleval.pl < {0} > {1}'.format(test_out_file, evalFile),
             shell=True)
    else:
        eval_unique_pos_tags(test_out_file, evalFile)

    f1_score = check_output("awk '{print $8}' " +
                            "{0} |sed '2q;d'".format(evalFile),
                            shell=True).decode("utf-8")

    return f1_score