Exemplo n.º 1
0
def judge():
    data = request.json
    logging.info(f"recieve{data}")
    submit_id = data['submit_id']
    problem_id = data['problem_id']
    logging.info(f"run problem id: {problem_id}")
    source = data['source']
    judge_dir = os.path.join(TMP_DIR,
                             str(submit_id))  # temp directory for running
    data_dir = os.path.join(
        BASE_DIR, str(problem_id))  # standard input output file, read only
    if os.path.exists(judge_dir):
        shutil.rmtree(judge_dir)
    os.makedirs(judge_dir)
    with open(os.path.join(judge_dir, data['src']),
              mode='w+',
              encoding='utf-8') as f:
        f.write(source)
    compiler = Compiler(data['compile_command'], judge_dir)
    spj = False
    if os.path.exists(os.path.join(data_dir, "spj")) or \
            os.path.exists(os.path.join(data_dir, "spj.py")):
        spj = True
    judger = Judger(data['max_cpu_time'], data['max_memory'],
                    data['run_command'], data.get('seccomp_rule'), judge_dir,
                    1 if data.get('memory_limit_check_only') else 0, data_dir,
                    submit_id, spj)
    judge_pool.apply_async(run, (judger, compiler), callback=callback)
    return "success"
Exemplo n.º 2
0
 def __init__(self, workdir, language, sourcefile, testfile):
     self._path = path.dirname(path.abspath(__file__))
     self.workdir = workdir
     self.language = language
     self.sourcefile = sourcefile
     self.testfile = testfile
     self.tmpltdir = path.join(self._path, "templates")
     self.judger = Judger()
Exemplo n.º 3
0
    def init_game(self):
        ''' Initialilze the game of Limit Texas Hold'em

        This version supports two-player limit texas hold'em

        Returns:
            (tuple): Tuple containing:

                (dict): The first state of the game
                (int): Current player's id
        '''
        # Initilize a dealer that can deal cards
        self.dealer = Dealer()

        # Initilize two players to play the game
        self.players = [
            Player(i, self.init_chips) for i in range(self.num_players)
        ]

        # Initialize a judger class which will decide who wins in the end
        self.judger = Judger()

        # Deal cards to each  player to prepare for the first round
        for i in range(self.num_players):
            self.players[i].hand.append(self.dealer.deal_card())

        # Initilize public cards
        self.public_cards = []

        # Randomly choose a big blind and a small blind
        s = np.random.randint(0, self.num_players)
        b = (s + 1) % self.num_players
        self.players[b].in_chips = self.big_blind
        self.players[s].in_chips = self.small_blind

        # The player next to the small blind plays the first
        self.game_pointer = (b + 1) % self.num_players

        # Initilize a bidding round, in the first round, the big blind and the small blind needs to
        # be passed to the round for processing.
        self.round = Round(self.num_players, self.big_blind)

        self.round.start_new_round(game_pointer=self.game_pointer,
                                   raised=[p.in_chips for p in self.players])

        # Count the round. There are 4 rounds in each game.
        self.round_counter = 0

        # Save the hisory for stepping back to the last state.
        self.history = []
        self.action_history = []
        for i in range(2):
            self.action_history.append([])
        state = self.get_state(self.game_pointer)

        return state, self.game_pointer
Exemplo n.º 4
0
def lambda_handler(event, context):
    print(event)
    env = event.get('env')
    twitter = Twitter(env=env)
    trends = twitter.get_trends(id=WOEID)
    judger = Judger()
    for trend in trends:
        should_tweet = judger.judge_whether_tweet(trend)
        if should_tweet:
            twitter.post_tweet(trend.get('name'))
            time.sleep(SLEEP_TIME)
Exemplo n.º 5
0
def play(policy_number):
    player1 = HumanPlayer()
    player2 = Player(epsilon=0, symbol=-1)
    player2.load_policy(policy_number)
    while True:
        judger = Judger(player1, player2)
        winner = judger.play()
        if winner == player2.symbol:
            print("You lose!")
        elif winner == player1.symbol:
            print("You win!")
        else:
            print("It is a tie!")
Exemplo n.º 6
0
def train(epochs, print_every_n=500):
    file = open('app/saves/metrics_all.csv', "w")
    with file:
        writer = csv.writer(file)
        writer.writerow(['win_rate1', 'win_rate2', 'draw_rate'])
    file = open('app/saves/metrics_first.csv', "w")
    with file:
        writer = csv.writer(file)
        writer.writerow(['td_error'])
    file = open('app/saves/metrics_second.csv', "w")
    with file:
        writer = csv.writer(file)
        writer.writerow(['td_error'])

    epsilon = 1
    epsilon_decay = 0.999
    epsilon_min = 0.01

    player1 = Player(epsilon=epsilon, symbol=1)
    player2 = Player(epsilon=epsilon, symbol=-1)
    judger = Judger(player1, player2)
    player1_win = 0.0
    player2_win = 0.0
    for i in range(1, epochs + 1):
        winner = judger.play(train=True, print_state=False)
        if winner == 1:
            player1_win += 1
        if winner == -1:
            player2_win += 1

        win_rate1 = player1_win / i
        win_rate2 = player2_win / i
        draw_rate = (i - (player1_win + player2_win)) / i

        metrics_file = open('app/saves/metrics_all.csv', "a")
        with metrics_file:
            writer = csv.writer(metrics_file)
            writer.writerow([win_rate1, win_rate2, draw_rate])

        if i % print_every_n == 0:
            print(
                'Epoch %d, player 1 winrate: %.02f, player 2 winrate: %.02f, draw rate: %.02f'
                % (i, win_rate1, win_rate2, draw_rate))

            player1.save_policy(i)
            player2.save_policy(i)

        epsilon = max(epsilon_min, epsilon * epsilon_decay)
        player1.set_epsilon(epsilon)
        player2.set_epsilon(epsilon)
Exemplo n.º 7
0
def compete(player1, turns, policy_number):
    player2 = Player(epsilon=0, symbol=-1)
    player2.load_policy(policy_number)
    judger = Judger(player1, player2)
    player1_win = 0.0
    player2_win = 0.0
    for _ in range(turns):
        winner = judger.play()
        if winner == 1:
            player1_win += 1
        if winner == -1:
            player2_win += 1

    draw_rate = (turns - (player1_win + player2_win)) / turns

    print(
        '%d turns, player 1 winrate: %.02f, player 2 winrate: %.02f, draw rate: %.02f'
        % (turns, player1_win / turns, player2_win / turns, draw_rate))
Exemplo n.º 8
0
 def __init__(self, name, url_queue, url_list, url_in_queue, Flock, home_urls ,tem_siteID = [0], continue_run = [True]):
     '''
     name
     url_queue       从主服务器中分配到的url
     url_list        本地区分是否重复
     url_in_queue    解析得到的新url    将为每一个站点分配一个 UrlQueue
     Flock
     home_urls       测试是否符合爬取集合
     tem_conn        初始的DNS 缓存
     is_new_task     通过引用传递 由communitor修改  以判断是否需要修改
     tem_home_url    
     old_home_url    引用传递
     continue_run[]  是否继续运行的标志
     '''
     threading.Thread.__init__(self, name = name )  
     #本地测试url队列 如果在本地重复 则直接舍弃
     #如果不重复 加入临时队列 将来传输到中央服务器进行测试
     #为每个站点分配了一个list对象 分开进行url的分辨
     self.__url_list = url_list
     self.__url_queue = url_queue
     #默认为每一个站点分配一个inqueue
     #本地临时记录队列 在url_list中测试不重复后 加入in_queue
     #在积累到一定量后 传输给中央服务器管理
     #Queue()
     self.__url_in_queue = url_in_queue
     #----------------------------------------------------------------
     self.__Flock = Flock
     self.__home_urls = home_urls
     #强制刷新 DNS
     self.__tem_siteID = None
     #引用传递 方便进行对照
     self.__tem_siteID = tem_siteID
     #----------------------------------------------------------------
     self.__Flock = Flock
     self.__htmlparser = HtmlParser()
     self.__picparser = PicParser()
     self.__judger = Judger(self.__home_urls)
     #init temporary home_url and siteID
     #both to determine weather to refresh DNS cache
     self.__dbsource = DBSource()
     self.__collector = Collector(home_urls)
     #continue run
     self.__continue_run = continue_run
Exemplo n.º 9
0
def test_tweet_is_correct():
    judger = Judger()
    # dummy blacklist class injection
    judger.blacklists = DummyBlacklists()
    for trend in sample_trends:
        should_tweet = judger.judge_whether_tweet(trend)

        if trend.get('name') == 'trend_A':
            assert should_tweet is False
        elif trend.get('name') == 'trend_B':
            assert should_tweet is False
        elif trend.get('name') == 'trend_C':
            assert should_tweet is True
        elif trend.get('name') == 'trend_D':
            assert should_tweet is True
        elif trend.get('name') == 'trend_E':
            assert should_tweet is True
        elif trend.get('name') == 'trend_F':
            assert should_tweet is False
        elif trend.get('name') == 'trend_blacklisted_A':
            assert should_tweet is False
import os
import sys
import time
import logging
sys.path.append('..')
from judger import Judger

strtime = time.strftime("%Y-%m-%d-%H-%M", time.localtime())
log_name = "./" + strtime + "ensemble.txt"
logging.basicConfig(handlers=[logging.FileHandler(log_name, 'w+', 'utf-8')],
                    format='%(asctime)s: %(levelname)s: %(message)s')
logging.root.setLevel(level=logging.INFO)

accusation_path = 'accu.txt'
law_path = 'law.txt'
judger = Judger(accusation_path, law_path)

marked_labels_list = np.load('../accu/accu_labels.npy')
scores_path = '../accu/'
scores_names = [
    'accu_lstm89.1.npy', 'accu_gruaug88.3.npy', 'accu_gru87.9.npy',
    'accu_grubigaug87.3.npy', 'accu_rcnn87.npy', 'accu_rcnnaug86.84.npy',
    'accu_cnn86.77.npy', 'accu_fasttextaug76.14.npy'
]
scores_name_num = len(scores_names)


def sigmoid_(inputs):
    """

    Calculate the sigmoid for the give inputs (array)
Exemplo n.º 11
0
def test_tweet_volume_lager_than_threshold():
    trend = {'name': 'dummy', 'tweet_volume': THRETHOLD + 1}
    judger = Judger()
    judger.blacklists = DummyBlacklists()
    assert judger.judge_whether_tweet(trend)
Exemplo n.º 12
0
def test_tweet_volume_equals_threshold():
    trend = {'name': 'dummy', 'tweet_volume': THRETHOLD}
    judger = Judger()
    judger.blacklists = DummyBlacklists()
    assert not judger.judge_whether_tweet(trend)
    del judger
Exemplo n.º 13
0
 def __init__(self, predictor, input_path='./input', output='./out'):
     self.predictor = predictor
     self.input_path = input_path
     self.output_path = output
     self.judger = Judger('./data/accu.txt', './data/law.txt')
     self.cnt = 0
Exemplo n.º 14
0
    #accu = train_SVC(vec, accu_label)
    print('law SVC')
    sys.stdout.flush()
    #law = train_SVC(vec, law_label)
    print('time SVC')
    sys.stdout.flush()
    #time = train_SVC(vec, time_label)

    #test
    print('predict')
    sys.stdout.flush()
    predictor = PredictorLocal(tfidf, accu, law, time)
    test_label, test_predict = predictor.predict_file(test_filename)

    #metrics
    judge = Judger("../baseline/accu.txt", "../baseline/law.txt")
    result = judge.test2(test_label, test_predict)
    print(result)
    rst = judge.get_score(result)

    print(rst)
    rstr = "ACCU:(%.4f, %.4f, %.4f); LAW:(%.4f, %.4f, %.4f) TIME: %.4f"% \
            (rst[0][0], rst[0][1], rst[0][2], rst[1][0], rst[1][1], rst[1][2], rst[2])

    sinfo = 'Prog:%s TrainFile:%s Seg:%s DIM:%s NGRAM:%d RESULT: %s' % (
        sys.argv[0], train_fname, seg_method, dim, ngram, rstr)
    logger.info(sinfo)

    print('begin test model:')
    print('saving model')
    joblib.dump(tfidf, 'predictor/model/tfidf.model')
Exemplo n.º 15
0
            # pdb.set_trace()
            if set(one_tags) == set(predic_labels_names):
                all_qual_num = all_qual_num + 1
            # pdb.set_trace()
    result_file.write(
        "true_count={},predict_count={},all_qual_num={}\n".format(
            true_tags_count, predic_tags_count, all_qual_num))
    # pdb.set_trace()
    outf_path = '../output/'
    out_filename = "{}_output.json".format(task_type_name)
    outf_file = os.path.join(outf_path, out_filename)
    inf_path = os.path.join(labor_data_path, data_filename)
    generate_pred_file(labor_tags_list, labor_preds, inf_path, outf_file)

    # 对结果进行评估
    judger_labor = Judger(tag_path=labor_tag_file)
    reslt_labor = judger_labor.test(truth_path=inf_path, output_path=outf_file)
    score_labor = judger_labor.gen_score(reslt_labor)
    result_file.write('score_{}={}\n\n'.format(model_filename, score_labor))

    exit()

    # 生成divorce领域的预测文件
    print('predict_divorce...')
    tags_list = []
    with open('../../data/divorce/tags.txt', 'r', encoding='utf-8') as tagf:
        for line in tagf.readlines():
            tags_list.append(line.strip())
    prd = Predictor('model_divorce/')
    inf_path = '../../data/divorce/data_small_selected.json'
    outf_path = '../../output/divorce_output.json'
Exemplo n.º 16
0
 def __init__(self, homeurls):
     self.htmlparser = HtmlParser()
     self.judger = Judger(homeurls)
Exemplo n.º 17
0
def evaluate():
    accu_pred, law_pred = [], []
    ground_truth = []
    count = 0
    for batch in batches_val:
        count += 1
        feed_dict = get_feed_dict(batch)
        law_score, law_pred_b, accu_pred_b, loss = sess.run(
            [
                train_model.law_score, train_model.law_prediction,
                train_model.prediction, train_model.loss
            ],
            feed_dict=feed_dict)
        if count % 100 == 0:
            print('valid_step:', count, 'valid loss:', loss)
        # accu_pred+= [[accu_class[j] for j in i] for i in utils.index_to_label(accu_pred_b, model_config.batch_size)][:len(batch)]
        accu_pred += [
            [j + 1 for j in i]
            for i in utils.index_to_label(accu_pred_b, model_config.batch_size)
        ][:len(batch)]
        law_pred += law_pred_b.tolist()
        ground_truth += list(
            zip(feed_dict[train_model.label].tolist(),
                feed_dict[train_model.law_label].tolist()))
        # if count%10==0:
        #     break
        if count == val_step_per_epoch:
            break

    with open('data/valid_label.txt', 'w', encoding='utf-8') as f:
        for each in ground_truth:
            for i in range(len(each[0])):
                if each[0][i] == 1:
                    f.write(str(accu_class[i]))
            for i in range(len(each[1])):
                if each[1][i] == 1:
                    f.write(', ' + str(law_class[i]))
            f.write('\n')

    with open('data/data_valid_predict.json', 'w', encoding='utf-8') as f:
        for i in range(len(accu_pred)):
            rex = {"accusation": [], "articles": [], "imprisonment": 0}
            rex["accusation"] = accu_pred[i]
            for each in law_pred[i]:
                # each is the index of law predicted in law_class
                if each > 0:
                    rex["articles"].append(file_order[law_class[int(each)]])
            print(json.dumps(rex, ensure_ascii=False), file=f)
            # print(rex)
            # f.write('{{"accusation": [0], "articles": {}, "imprisonment": 0}}'.format(law_pred[i]))
    J = Judger('data/accu.txt', 'data/law.txt')
    res = J.test('data/data_valid.json', 'data/data_valid_predict.json')
    total_score = 0
    scores = []
    for task_idx in range(2):
        TP_micro = 0
        FP_micro = 0
        FN_micro = 0
        f1 = []
        for class_idx in range(len(res[task_idx])):
            if res[task_idx][class_idx]["TP"] == 0:
                f1.append(0)
                continue
            TP_micro += res[task_idx][class_idx]["TP"]
            FP_micro += res[task_idx][class_idx]["FP"]
            FN_micro += res[task_idx][class_idx]["FN"]
            precision = res[task_idx][class_idx]["TP"] * 1.0 / (
                res[task_idx][class_idx]["TP"] +
                res[task_idx][class_idx]["FP"])
            recall = res[task_idx][class_idx]["TP"] * 1.0 / (
                res[task_idx][class_idx]["TP"] +
                res[task_idx][class_idx]["FN"])
            f1.append(2 * precision * recall / (precision + recall))
        precision_micro = TP_micro * 1.0 / (TP_micro + FP_micro + 1e-6)
        recall_micro = TP_micro * 1.0 / (TP_micro + FN_micro + 1e-6)
        F1_micro = 2 * precision_micro * recall_micro / (precision_micro +
                                                         recall_micro + 1e-6)
        F1_macro = np.sum(f1) / len(f1)
        total_score += 100.0 * (F1_micro + F1_macro) / 2
        print(
            'task id: {}, F1_micro: {}, F1_macro: {}, final score: {}'.format(
                task_idx + 1, F1_micro, F1_macro,
                100.0 * (F1_micro + F1_macro) / 2))
        scores.append([F1_micro, F1_macro])
    total_score += res[2]['score'] / res[2]['cnt'] * 100
    print('task id: 3, score:{}'.format(res[2]['score'] / res[2]['cnt'] * 100))
    print('total score:', total_score)
    return total_score, scores
Exemplo n.º 18
0
    def judge(args, ip):
        with InitIsolateEnv() as box_id:
            compile_config = languages[args['language_name']]['compile']
            run_config = languages[args['language_name']]['run']
            src_name = compile_config['src_name']
            time_limit = args['time_limit'] / 1000.0
            if args['language_name'] == 'java':
                memory_limit = 512 * 1024
            else:
                memory_limit = args['memory_limit']
            test_case_id = args['test_case_id']
            submission_id = args['submission_id']
            logger.exception(test_case_id)

            path = os.path.join(JUDGE_DEFAULT_PATH, str(box_id))
            host_name = socket.gethostname()
            is_spj = True if 'spj_code' in args and args['spj_code'] else False

            # write source code into file
            try:
                src_path = os.path.join(path, 'box', src_name)
                f = open(src_path, "w")
                f.write(args['src_code'].encode("utf8"))
                f.close()
            except Exception as e:
                logger.exception(e)
                raise JudgeServerError('unable write code to file')
            # write spj code into file
            if is_spj:
                spj_src_path = os.path.join(path, 'box', 'spj.c')
                f = open(spj_src_path, "w")
                f.write(args['spj_code'].encode("utf8"))
                f.close()

            update_submission_status(ip, submission_id, 'compiling')

            # compile
            compiler = Compiler(compile_config=compile_config, box_id=box_id)
            compiler.compile()
            # compile spj code
            if is_spj:
                spj_config = languages['c++']['compile']
                spj_config['src_name'] = 'spj.c'
                spj_config['exe_name'] = 'spj'
                spj_compiler = Compiler(compile_config=spj_config,
                                        box_id=box_id)
                spj_compiler.compile()

            update_submission_status(ip, submission_id, 'running & judging')

            # run
            judger = Judger(run_config=run_config,
                            max_cpu_time=time_limit,
                            max_memory=memory_limit,
                            test_case_id=test_case_id,
                            box_id=box_id,
                            server_ip=ip,
                            submission_id=submission_id,
                            is_spj=is_spj)
            result = judger.run()
            judge_result = {
                "status": RESULT["accepted"],
                "info": result,
                "time": None,
                "memory": None,
                "server": host_name
            }
            for item in judge_result["info"]:
                if item["status"] != RESULT['accepted']:
                    judge_result["status"] = item["status"]
                    break
            else:
                st = sorted(result, key=lambda k: k['info']['time'])
                judge_result["time"] = st[-1]['info']["time"] * 1000
                # TODO 我也不知道为啥除了10之后内存和实际相符
                # 2017.04.06 update:
                # VSS - Virtual Set Size 虚拟耗用内存(包含共享库占用的内存)
                # RSS - Resident Set Size 实际使用物理内存(包含共享库占用的内存)
                # PSS - Proportional Set Size 实际使用的物理内存(比例分配共享库占用的内存)
                # USS - Unique Set Size 进程独自占用的物理内存(不包含共享库占用的内存)
                # 目前来看大概rss/10=uss
                # 经过测试 poj使用的是uss hdu使用的是rss
                judge_result["memory"] = st[-1]['info']["max-rss"]

            judge_result["status"] = RE_RESULT[judge_result["status"]]
            for item in judge_result["info"]:
                item["status"] = RE_RESULT[item["status"]]

            return judge_result