Esempio n. 1
0
def lfw_view2_random_AsyncB(host, dbname, A):
    trials = MongoTrials(
            'mongo://%s:44556/%s/jobs' % (host, dbname),
            exp_key=exp_keys['random_asyncB'],
            refresh=True)
    helper = BoostHelper(trials.trials)
    # XXX: Should the weights be used? I don't think so, we're basically
    #      doing LPBoost at this point
    members = helper.ensemble_members(MultiBandit())[:int(A)]
    for ii, dd in enumerate(members):
        ccc = helper.continues(dd)
        print ii, dd['_id'], dd['tid'], dd['result']['loss'],
        print (ccc['_id'] if ccc else None)
    namebases = []
    for doc in members:
        namebase = '%s_%s' % (dbname, doc['_id'])
        namebases.append(namebase)

        get_view2_features(
                slm_desc=doc['spec']['model']['slm'],
                preproc=doc['spec']['model']['preproc'],
                comparison=doc['spec']['comparison'],
                namebase=namebase,
                basedir=os.getcwd(),
                )

    basedirs = [os.getcwd()] * len(namebases)

    train_view2(namebases=namebases, basedirs=basedirs)
Esempio n. 2
0
def lfw_view2_random_AdaboostMixture(host, dbname, A):
    trials = MongoTrials(
            'mongo://%s:44556/%s/jobs' % (host, dbname),
            exp_key=exp_keys['random'],
            refresh=True)
    bandit = MultiBandit()
    mix = AdaboostMixture(trials, bandit, test_mask=True)
    # XXX: Should the weights be used? I don't think so, we're basically
    #      doing LPBoost at this point
    specs, weights, tids = mix.mix_models(int(A), ret_tids=True)
    assert len(specs) == len(tids)
    namebases = []
    for spec, tid in zip(specs, tids):
        # -- allow this feature cache to be
        #    reused by AdaboostMixture and
        #    SimpleMixtures of different 
        #    sizes

        #XXX: Potentially affected by the tid/injected jobs bug,
        #     but unlikely. Rerun just in case once dual svm solver is in.
        namebase = '%s_%s' % (dbname, tid)
        namebases.append(namebase)

        get_view2_features(
                slm_desc=spec['model']['slm'],
                preproc=spec['model']['preproc'],
                comparison=spec['comparison'],
                namebase=namebase,
                basedir=os.getcwd(),
                )

    basedirs = [os.getcwd()] * len(namebases)

    train_view2(namebases=namebases, basedirs=basedirs)
Esempio n. 3
0
def test_baby_view2_libsvm_kernel():
    c = config_tiny_rnd0
    test_set = range(20) + range(500, 520)
    lfw.get_view2_features(c['slm'], c['preproc'], 'mult', 'libsvm', os.getcwd(),
                           test=test_set)
    return lfw.train_view2(['libsvm'],[os.getcwd()],
                           test=test_set, use_libsvm={'kernel': 'precomputed'})
Esempio n. 4
0
def lfw_view2_randomL(host, dbname):
    trials = MongoTrials('mongo://%s:44556/%s/jobs' % (host, dbname),
            refresh=False)
    #B = main_lfw_driver(trials)
    #E = B.get_experiment(name=('random', 'foo'))
    mongo_trials = trials.view(exp_key=exp_keys['randomL'], refresh=True)

    docs = [d for d in mongo_trials.trials
            if d['result']['status'] == hyperopt.STATUS_OK]
    local_trials = hyperopt.trials_from_docs(docs)
    losses = local_trials.losses()
    best_doc = docs[np.argmin(losses)]

    #XXX: Potentially affected by the tid/injected jobs bug,
    #     but unlikely. Rerun just in case once dual svm solver is in.
    print best_doc['spec']
    namebase = '%s_randomL_%s' % (dbname, best_doc['tid'])

    get_view2_features(
            slm_desc=best_doc['spec']['model']['slm'],
            preproc=best_doc['spec']['model']['preproc'],
            comparison=best_doc['spec']['comparison'],
            namebase=namebase,
            basedir=os.getcwd(),
            )

    namebases = [namebase]
    basedirs = [os.getcwd()] * len(namebases)

    #train_view2(namebases=namebases, basedirs=basedirs)
    # running on the try2 database
    # finds id 1674
    #train err mean 0.0840740740741
    #test err mean 0.199666666667

    #running with libsvm:
    train_view2(namebases=namebases, basedirs=basedirs,
                use_libsvm={'kernel':'precomputed'})
Esempio n. 5
0
def test_baby_view2():
    c = config_tiny_rnd0
    lfw.get_view2_features(c['slm'], c['preproc'], 'mult', '', os.getcwd(),
                           test=50)
    return lfw.train_view2([''],[os.getcwd()], test=50)