Exemplo n.º 1
0
def evaluate(
    src_cfg, tr_norms, te_norms, analytical_fim, pi_derivatives, sqrt_nr_descs,
    do_plot=False, verbose=0):

    outfile = os.path.join(
        CACHE_PATH, "%s_%s_afim_%s_pi_%s_sqrt_nr_descs_%s.dat" % (
            src_cfg, "%s", analytical_fim, pi_derivatives, sqrt_nr_descs))

    dataset = Dataset(CFG[src_cfg]['dataset_name'], **CFG[src_cfg]['dataset_params'])
    (tr_kernel, tr_labels,
     te_kernel, te_labels) = load_kernels(
         dataset, tr_norms=tr_norms, te_norms=te_norms,
         analytical_fim=analytical_fim, pi_derivatives=pi_derivatives,
         sqrt_nr_descs=sqrt_nr_descs, outfile=outfile, do_plot=do_plot,
         verbose=verbose)

    eval = Evaluation(CFG[src_cfg]['eval_name'], **CFG[src_cfg]['eval_params'])
    eval.fit(tr_kernel, tr_labels)
    scores = eval.score(te_kernel, te_labels)

    if verbose > 0:
        print 'Train normalizations:', ', '.join(map(str, tr_norms))
        print 'Test normalizations:', ', '.join(map(str, te_norms))

        if CFG[src_cfg]['metric'] == 'average_precision':
            print_scores(scores)

    if CFG[src_cfg]['metric'] == 'average_precision':
        print "%.2f" % np.mean(scores)
    elif CFG[src_cfg]['metric'] == 'accuracy':
        print "%.2f" % scores
def evaluate(src_cfg, tr_kernel, tr_labels, te_kernel, te_labels):

    eval = Evaluation(CFG[src_cfg]['eval_name'], **CFG[src_cfg]['eval_params'])
    eval.fit(tr_kernel, tr_labels)
    scores = eval.score(te_kernel, te_labels)

    if CFG[src_cfg]['metric'] == 'average_precision':
        print_scores(scores)

    if CFG[src_cfg]['metric'] == 'average_precision':
        print "%.2f" % np.mean(scores)
    elif CFG[src_cfg]['metric'] == 'accuracy':
        print "%.2f" % scores
Exemplo n.º 3
0
def evaluate(src_cfg, tr_kernel, tr_labels, te_kernel, te_labels):

    eval = Evaluation(CFG[src_cfg]['eval_name'], **CFG[src_cfg]['eval_params'])
    eval.fit(tr_kernel, tr_labels)
    scores = eval.score(te_kernel, te_labels)

    if CFG[src_cfg]['metric'] == 'average_precision':
        print_scores(scores)

    if CFG[src_cfg]['metric'] == 'average_precision':
        print "%.2f" % np.mean(scores)
    elif CFG[src_cfg]['metric'] == 'accuracy':
        print "%.2f" % scores
Exemplo n.º 4
0
def evaluate(src_cfg,
             tr_norms,
             te_norms,
             analytical_fim,
             pi_derivatives,
             sqrt_nr_descs,
             do_plot=False,
             verbose=0):

    outfile = os.path.join(
        CACHE_PATH, "%s_%s_afim_%s_pi_%s_sqrt_nr_descs_%s.dat" %
        (src_cfg, "%s", analytical_fim, pi_derivatives, sqrt_nr_descs))

    dataset = Dataset(CFG[src_cfg]['dataset_name'],
                      **CFG[src_cfg]['dataset_params'])
    (tr_kernel, tr_labels, te_kernel,
     te_labels) = load_kernels(dataset,
                               tr_norms=tr_norms,
                               te_norms=te_norms,
                               analytical_fim=analytical_fim,
                               pi_derivatives=pi_derivatives,
                               sqrt_nr_descs=sqrt_nr_descs,
                               outfile=outfile,
                               do_plot=do_plot,
                               verbose=verbose)

    eval = Evaluation(CFG[src_cfg]['eval_name'], **CFG[src_cfg]['eval_params'])
    eval.fit(tr_kernel, tr_labels)
    scores = eval.score(te_kernel, te_labels)

    if verbose > 0:
        print 'Train normalizations:', ', '.join(map(str, tr_norms))
        print 'Test normalizations:', ', '.join(map(str, te_norms))

        if CFG[src_cfg]['metric'] == 'average_precision':
            print_scores(scores)

    if CFG[src_cfg]['metric'] == 'average_precision':
        print "%.2f" % np.mean(scores)
    elif CFG[src_cfg]['metric'] == 'accuracy':
        print "%.2f" % scores