Esempio n. 1
0
def evaluation(sess, model):
    ano_scores = []
    for _, batch_data in DataInput(x, test_batch_size):
        _ano_score = model.eval(sess, batch_data)
        # Extend
        ano_scores += list(_ano_score)
    ano_scores = np.array(ano_scores).reshape((-1, 1))
    # Calculate auc
    auroc = calc_auroc(y, ano_scores)
    print('Eval_auroc:{:.4f}'.format(auroc))
    prec, rec, f1 = calc_metric(y, ano_scores)
    print('Prec:{:.4f}\tRec:{:.4f}\tF1:{:.4f}\n'.format(prec, rec, f1))

    draw_prc(y, ano_scores, key='ResDEAAE_' + 'cross-e')
Esempio n. 2
0
def evaluation(sess, model, ratio):
    (sub_ano, sub_ano_label), _ = _split_dataset(ano, ano_label,
                                                 mapping_ratio[ratio])
    x = np.concatenate((norm, sub_ano), axis=0)
    y = np.concatenate((norm_label, sub_ano_label), axis=0)

    ano_scores = []
    for _, batch_data in DataInput(x, test_batch_size):
        _ano_score = model.eval(sess, batch_data)
        # Extend
        ano_scores += list(_ano_score)
    ano_scores = np.array(ano_scores).reshape((-1, 1))
    # Calculate auc
    auroc = calc_auroc(y, ano_scores)
    print('Anomaly ratio:{:.4f}\tEval_auroc:{:.4f}'.format(ratio, auroc))
    prec, rec, f1 = calc_metric(y, ano_scores)
    print('Prec:{:.4f}\tRec:{:.4f}\tF1:{:.4f}\n'.format(prec, rec, f1))
def _eval(sess, model, test_data, label):
    ano_scores = []
    for _, batch_data in DataInput(test_data, test_batch_size):
        _ano_score = model.eval(sess, batch_data)
        # Extend
        ano_scores += list(_ano_score)
    ano_scores = np.array(ano_scores).reshape((-1, 1))
    # Calculate auroc
    auroc = calc_auroc(label, ano_scores)
    # Calculate metric
    prec, rec, f1 = 0., 0., 0.  # for sake of computation speed
    # prec, rec, f1 = calc_metric(label, ano_scores)

    global best_auroc
    if best_auroc < auroc:
        best_auroc = auroc
        model.save(sess, '{}/ckpt'.format(save_path))
    return auroc, prec, rec, f1
def evaluation(sess, model):
    ano_scores = []
    for _, batch_data in DataInput(x, test_batch_size):
        _ano_score = model.eval(sess, batch_data)
        # Extend
        ano_scores += list(_ano_score)
    ano_scores = np.array(ano_scores).reshape((-1, 1))

    with open('scores.pkl', 'wb') as f:
        pickle.dump((y, ano_scores), f, pickle.HIGHEST_PROTOCOL)

    # Calculate auc
    auroc = calc_auroc(y, ano_scores)
    print('Eval_auroc:{:.4f}'.format(auroc))
    prec, rec, f1 = calc_metric(y, ano_scores)
    print('Prec:{:.4f}\tRec:{:.4f}\tF1:{:.4f}\n'.format(prec, rec, f1))

    draw_prc(y, ano_scores, key='DEAAE_' + method)