Example #1
0
def get_performance(slm, decisions, preproc, comparison,
                    namebase=None, progkey='result_w_cleanup',
                    return_multi=False, ctrl=None):
    if decisions is None:
        decisions = np.zeros((1, 3200))
    else:
        decisions = np.asarray(decisions)
    assert decisions.shape == (1, 3200)
    if namebase is None:
        namebase = 'memmap_' + str(np.random.randint(1e8))
    image_features = scope.slm_memmap(
            desc=slm,
            X=scope.get_images('float32', preproc=preproc),
            name=namebase + '_img_feat')
    if return_multi:
        comps = ['mult', 'sqrtabsdiff']
    else:
        comps = [comparison]
    cmp_progs = []
    for comp in comps:
        sresult = screening_program(
                    slm_desc=slm,
                    preproc=preproc,
                    comparison=comp,
                    namebase=namebase,
                    decisions=decisions,
                    image_features=image_features,
                    ctrl=ctrl)[1][progkey]
        cmp_progs.append([comp, sresult])
    cmp_results = pyll.rec_eval(cmp_progs)
    if return_multi:
        return cmp_results
    else:
        return cmp_results[0][1]
Example #2
0
def screening_program(slm_desc, decisions, comparison, preproc, namebase,
                      image_features=None, ctrl=None):
    if image_features is None:
        image_features = scope.slm_memmap(
                desc=slm_desc,
                X=scope.get_images('float32', preproc=preproc),
                name=namebase + '_img_feat')
    # XXX: check that float32 images lead to correct features

    # XXX: make sure namebase takes a different value for each process
    #      because it's important that the memmaps don't interfere on disk

    def pairs_dataset(split):
        return scope.pairs_memmap(
            scope.verification_pairs(split=split),
            image_features,
            comparison_name=comparison,
            name=namebase + '_pairs_' + split,
            )

    result = {}

    train_X, train_y = pairs_dataset('DevTrain')
    test_X, test_y = pairs_dataset('DevTest')

    train_d = scope.get_decisions_lfw('DevTrain', decisions)
    test_d = scope.get_decisions_lfw('DevTest', decisions)

    train_Xyd_n, test_Xyd_n = scope.normalize_Xcols(
        (train_X, train_y, train_d,),
        (test_X, test_y, test_d,))

    if 0 and ctrl is not None:
        print >> sys.stderr, "SKIPPING FEATURE KERNEL"
        train_Xyd_n = scope.attach_feature_kernels(train_Xyd_n, test_Xyd_n,
                ctrl, comparison)

    ### TODO: put consts in config, possibly loop over them in MultiBandit
    svm = scope.train_svm(train_Xyd_n,
            l2_regularization=1e-3,
            max_observations=20000)

    new_d_train = scope.svm_decisions_lfw(svm, train_Xyd_n)
    new_d_test = scope.svm_decisions_lfw(svm, test_Xyd_n)

    result = scope.result_binary_classifier_stats_lfw(
            train_Xyd_n,
            test_Xyd_n,
            new_d_train,
            new_d_test,
            result=result)

    result_w_cleanup = scope.run_all(
        result,
        scope.delete_memmap(train_X),
        scope.delete_memmap(test_X),
        scope.delete_memmap(image_features),
        )[0]

    return result_w_cleanup, locals()