Exemple #1
0
def test(net, testloader, load_path, adj):
    net.load_state_dict(torch.load(load_path))
    net.eval()
    evaluator = another_Evaluator(category_topk=(1, 3, 5), attr_topk=(3, 5))
    t = time.time()
    correct_category = 0
    correct_attribute = 0
    total = 0
    c_topk = np.zeros((2))  # number of top-3 and top-5
    with torch.no_grad():
        tq = tqdm(testloader, ncols=80, ascii=True)
        for i, batch in enumerate(tq):
            image, label, landmarks, category = batch['image'], batch[
                'label'], batch['landmarks'], batch['category']
            image = image.cuda()
            label = label.cuda()
            landmarks = landmarks.cuda()
            category = category.cuda()

            output = net(image, landmarks, adj)
            pre_category = output['category_output']
            pre_attribute = output['attr_output']
            pre_landmark_map = output['lm_pos_map']

            total += image.size(0)
            # evaluate category===================================================================
            _, pre_category_re = torch.max(pre_category, 1)
            correct_category += (pre_category_re == category).sum().item()

            _, pre_category_re = pre_category.topk(5,
                                                   dim=1,
                                                   largest=True,
                                                   sorted=True)
            for j in range(image.size(0)):
                if pre_category_re[j][0] == category[j] or pre_category_re[j][
                        1] == category[j] or pre_category_re[j][2] == category[
                            j]:
                    c_topk[0] += 1
                    c_topk[1] += 1
                    continue
                if pre_category_re[j][3] == category[j] or pre_category_re[j][
                        4] == category[j]:
                    c_topk[1] += 1

            sample = {}
            sample['category_label'] = category
            sample['attr'] = label
            evaluator.add(output, sample)

        print('test_c:', float(correct_category) / total)
        print('c_top3:',
              float(c_topk[0]) / total, 'c_top5:',
              float(c_topk[1]) / total)
        ret = evaluator.evaluate()
        for topk, accuracy in ret['category_accuracy_topk'].items():
            print('metrics/category_top{}'.format(topk), accuracy)

        for topk, accuracy in ret['attr_group_recall'].items():
            for attr_type in range(1, 6):
                print(
                    'metrics/attr_top{}_type_{}_{}_recall'.format(
                        topk, attr_type, attr_type), accuracy[attr_type - 1])

            print('metrics/attr_top{}_all_recall'.format(topk),
                  ret['attr_recall'][topk])
Exemple #2
0
    df_train, df_test = split_train_test(df, 75)
    print len(df_train), len(df_test)
    rf.train(df_train)
    rf.test(df_test)

    estimators = rf.model.estimators_
    rules = []
    for estimator in estimators:
        rules.extend([rule for rule in get_rules(estimator.tree_, df.columns)])

    print(len(rules))

    k = 10

    subsetrules = randomSelector(rules, k)

    print(subsetrules)

    CMRandomTest = buildCoverageMatrix(df_test, subsetrules)

    print("Random selector")
    evaluate(CMRandomTest, df_test, len(rules), nb_learners, k=k)

    CMRuleScore1Train = buildCoverageMatrix(df_train, rules)

    ruleScore1(df_train, CMRuleScore1Train, rules, k)

    CMRuleScore1Test = buildCoverageMatrix(df_test, rules)

    print("Rule score 1")
    evaluate(CMRuleScore1Test, df_test, len(rules), nb_learners, k=k)
Exemple #3
0
    print len(df_train),len(df_test)
    rf.train(df_train)
    rf.test(df_test)

    estimators = rf.model.estimators_
    rules = []
    for estimator in estimators:
        rules.extend([ rule for rule in get_rules(estimator.tree_, df.columns)])


    print(len(rules))

    k = 10

    subsetrules =  randomSelector(rules,k)

    print(subsetrules)

    CMRandomTest = buildCoverageMatrix(df_test, subsetrules )

    print("Random selector")
    evaluate(CMRandomTest, df_test, len(rules), nb_learners, k=k)

    CMRuleScore1Train = buildCoverageMatrix(df_train, rules )

    ruleScore1(df_train, CMRuleScore1Train, rules,k)

    CMRuleScore1Test = buildCoverageMatrix(df_test, rules )

    print("Rule score 1")
    evaluate(CMRuleScore1Test, df_test, len(rules), nb_learners, k=k)
Exemple #4
0
def evaluate():
    args = request.json
    result = evaluator.evaluate(args)
    return json.dumps(result)
Exemple #5
0
from evaluator.evaluator import evaluate
import pandas as pd
from pandas import to_datetime

output_file_EURUSD = "D:/coursework/L4S2/GroupProject/project/results/combined_file.csv"
root = "C:/Users/HP/PycharmProjects/black_region_detection_2/"

accuracy_EURUSD, true_positive, true_negative, false_positive, false_negative = evaluate(
    output_file_EURUSD, "EURUSD", root)
print("Accuracy of EURUSD: " + str(accuracy_EURUSD))

results = pd.read_csv(output_file_EURUSD)
results.Date = results['Date'].apply(lambda x: to_datetime(x).date())
results.index = results.Date
results = results.drop(['Date'], axis=1)
results = results.sort_index()
results.to_csv('results.csv')
print(results)

print(true_positive)
Exemple #6
0
from evaluator.evaluator import evaluate

print(evaluate(123))
Exemple #7
0
        # preprocess layer
        log_messages = KnowledgeGroupLayer(log_messages).run()
        # tokenize layer
        log_messages = TokenizeGroupLayer(log_messages,
                                          rex=setting['regex']).run()
        # dictionarize layer and cluster by wordset
        dict_group_result = DictGroupLayer(log_messages, corpus).run()
        # apply LCS and prefix tree
        results, templates = MaskLayer(dict_group_result).run()
        output_file = os.path.join(outdir, log_file)
        # output parsing results
        FileOutputLayer(log_messages, output_file, templates,
                        ['LineId'] + headers).run()
        print('Parsing done. [Time taken: {!s}]'.format(datetime.now() -
                                                        starttime))
        F1_measure, accuracy = evaluator.evaluate(
            groundtruth=os.path.join(indir, log_file + '_structured.csv'),
            parsedresult=os.path.join(outdir, log_file + '_structured.csv'))
        benchmark_result.append([dataset, F1_measure, accuracy])

    print('\n=== Overall evaluation results ===')
    avg_accr = 0
    for i in range(len(benchmark_result)):
        avg_accr += benchmark_result[i][2]
    avg_accr /= len(benchmark_result)
    pd_result = pd.DataFrame(benchmark_result,
                             columns={'dataset', 'F1_measure', 'Accuracy'})
    print(pd_result)
    print('avarage accuracy is {}'.format(avg_accr))
    pd_result.to_csv('benchmark_result.csv', index=False)