Пример #1
0
async def main():
    judger = Judger()
    judger.addLocalPlayer(0, aiArgs)
    judger.addLocalPlayer(1, aiArgs)
    returnCode = await judger.startJudge(logicArgs, "", 'replay',
                                         (int)(sys.argv[1]))
    print(returnCode)
 def __init__(self, **kwargs):
     self.train_path = kwargs["train_path"]
     self.valid_path = kwargs["valid_path"]
     self.test_path = kwargs["test_path"]
     self.accusation_path = kwargs["accusation_path"]
     self.law_path = kwargs["law_path"]
     self.judger = Judger(accusation_path=self.accusation_path, law_path=self.law_path)
     self.train(train_path=train_path)
     print("\nValid F-score......")
     self.calculate_metrics(path=self.valid_path)
     print("\nTest F-score......")
     self.calculate_metrics(path=self.test_path)
Пример #3
0
 def perform_create(self, serializer):
     #submission = serializer.save(user=self.request.user)
     from account.models import User
     submission = serializer.save(user=User.objects.get(username='******'))
     # TODO 已完成:调用评测函数
     Judger(submission_id=submission.id,
            on_finished=Submission.update_all_statistic_info,
            args=get_dict(id=submission.id))\
         .judge_async()
class Metrics(object):
    """
        Metrics
    """
    def __init__(self, **kwargs):
        self.train_path = kwargs["train_path"]
        self.valid_path = kwargs["valid_path"]
        self.test_path = kwargs["test_path"]
        self.accusation_path = kwargs["accusation_path"]
        self.law_path = kwargs["law_path"]
        self.judger = Judger(accusation_path=self.accusation_path,
                             law_path=self.law_path)
        self.train(train_path=train_path)
        print("\nValid F-score......")
        self.calculate_metrics(path=self.valid_path)
        print("\nTest F-score......")
        self.calculate_metrics(path=self.test_path)

    def train(self, train_path):
        """
        :param train_path:
        :return:
        """
        print("reading...")
        alltext, accu_label, law_label, time_label = read_trainData(train_path)
        print("cut text...")
        train_data = cut_text(alltext)
        print("train tfidf...")
        tfidf = train_tfidf(train_data)
        vec = tfidf.transform(train_data)
        print('accu SVC')
        accu = train_SVC(vec, accu_label)
        print('law SVC')
        law = train_SVC(vec, law_label)
        print('time SVC')
        time = train_SVC(vec, time_label)

        print('saving model to ./predictor/model/*.model')
        joblib.dump(tfidf, './predictor/model/tfidf.model')
        joblib.dump(accu, './predictor/model/accu.model')
        joblib.dump(law, './predictor/model/law.model')
        joblib.dump(time, './predictor/model/time.model')

    def calculate_metrics(self, path):
        """
        :param path:
        :return:
        """
        predictor = Predictor()
        cnt = 0
        result = [[], [], {}]
        for a in range(0, self.judger.task1_cnt):
            result[0].append({"TP": 0, "FP": 0, "TN": 0, "FN": 0})
        for a in range(0, self.judger.task2_cnt):
            result[1].append({"TP": 0, "FP": 0, "TN": 0, "FN": 0})
        result[2] = {"cnt": 0, "score": 0}

        with open(path, encoding="UTF-8") as f:
            for line in f.readlines():
                line = json.loads(line)
                ground_truth = line["meta"]
                fact = line["fact"]
                ans = predictor.predict(fact)
                cnt += 1
                result = self.judger.gen_new_result(result, ground_truth,
                                                    ans[0])
                scores = self.judger.get_score(result)
        # print(result)
        print(scores)
Пример #5
0
        return result

    for file_name in os.listdir(data_path):
        inf = open(os.path.join(data_path, file_name), "r")
        ouf = open(os.path.join(output_path, file_name), "w")

        fact = []

        for line in inf:
            fact.append(json.loads(line)["fact"])
            if len(fact) == get_batch():
                result = solve(fact)
                cnt += len(result)
                for x in result:
                    print(json.dumps(x), file=ouf)
                fact = []

        if len(fact) != 0:
            result = solve(fact)
            cnt += len(result)
            for x in result:
                print(json.dumps(x), file=ouf)
            fact = []
        inf.close()
        ouf.close()

    jud = Judger(accusation_path='/home/wshong/Downloads/cail_0518/accu.txt',
                 law_path='/home/wshong/Downloads/cail_0518/law.txt')
    res = jud.test(truth_path=data_path, output_path=output_path)
    jud.get_score(result=res)
Пример #6
0
    test_filename = 'data_test.json'
    pred = PredictorLocal(tfidf, law, accu, time)
    all_test_predicts = []
    all_test_metas = []
    test_f = open(test_filename)
    for line in test_f:
        js = json.loads(line)
        text = js["fact"]
        meta = js["meta"]

        ans = pred.predict(text)
        all_test_predicts.append(ans)
        all_test_metas.append(meta)

    #metrics
    judge = Judger("../baseline/accu.txt", "../baseline/law.txt")
    result = judge.test2(all_test_metas, all_test_predicts)
    rst = judge.get_score(result)

    print(rst)
    rstr = "ACCU:(%.4f, %.4f, %.4f); LAW:(%.4f, %.4f, %.4f) TIME: %.4f"% \
            (rst[0][0], rst[0][1], rst[0][2], rst[1][0], rst[1][1], rst[1][2], rst[2])

    sinfo = 'Seg:%s DIM:%s NGRAM:%d RESULT: %s' % (seg_method, dim, ngram,
                                                   rstr)
    logger.info(sinfo)
    ''' 
    print('begin test model:')
    print('saving model')
    joblib.dump(tfidf, 'predictor/model/tfidf.model')
    joblib.dump(accu, 'predictor/model/accu.model')
Пример #7
0
from judger.judger import Judger

INPUT = 'input/small/'
OUTPUT = 'output/'
jud = Judger('accu.txt', 'law.txt')
res = jud.test(INPUT, OUTPUT)
print(res)
scor = jud.get_score(res)
print(scor)
print('FIN')
Пример #8
0
    #accu = train_SVC(vec, accu_label)
    print('law SVC')
    sys.stdout.flush()
    #law = train_SVC(vec, law_label)
    print('time SVC')
    sys.stdout.flush()
    #time = train_SVC(vec, time_label)

    #test
    print('predict')
    sys.stdout.flush()
    predictor = PredictorLocal(tfidf, accu, law, time)
    test_label, test_predict = predictor.predict_file(test_filename)

    #metrics
    judge = Judger("../baseline/accu.txt", "../baseline/law.txt")
    result = judge.test2(test_label, test_predict)
    print(result)
    rst = judge.get_score(result)

    print(rst)
    rstr = "ACCU:(%.4f, %.4f, %.4f); LAW:(%.4f, %.4f, %.4f) TIME: %.4f"% \
            (rst[0][0], rst[0][1], rst[0][2], rst[1][0], rst[1][1], rst[1][2], rst[2])

    sinfo = 'Prog: %s TrainFile: %s Seg:%s DIM:%s NGRAM:%d RESULT: %s' % \
            (sys.argv[0], train_fname, seg_method, dim, ngram, rstr)
    logger.info(sinfo)

    print('begin test model:')
    print('saving model')
    joblib.dump(tfidf, 'predictor/model/tfidf.model')