Exemple #1
0
def main():

    models = [train_model(make_model(i), i) for i in [0, 1, 2]]
    #    models = [train_model(make_model(i), i) for i in [0]]
    mg = ModelGroup(*models)
    #    mg.save('../models/full_classifier_retrain_xd_all_0417_noise.dat')
    #    mg.save('../models/full_classifier_retrain_xd_all_entropy_0430.dat')
    #    mg.save('../models/full_classifier_retrain_xd_all_gini_0528.dat')
    #    mg.save('../models/full_classifier_xd_entropy_0528.dat')
    #    mg.save('../models/full_classifier_xd_reduceMWP_simulation_1025.dat')
    #    mg.save('../models/full_classifier_xd_only_simulation_1029.dat')
    #    mg.save('../models/full_classifier_xd_retrain_noise_1030.dat')
    mg.save('../models/full_classifier_xd_only_sim_non_noi_1102.dat')
Exemple #2
0
def submit_job(lon):
    """
    Submit a new batch classification job to the cloud

    This also creates or overwrides the appropraite entry
    in classify_jobs.json

    Parameters
    ----------
    lon : longitude to run
    """
    if already_submitted(lon):
        print("Job already submitted. To force a re-run, "
              "first run\n\t python %s delete %i" % (__file__, lon))
        return

    workers = 100
    stamps = field_stamps(lon)
    model = ModelGroup.load('../models/full_classifier.dat')

    chunks = chunk(stamps, workers)
    jobs = cloud_map(model.decision_function,
                     chunks,
                     return_jobs=True,
                     _label='classify_%3.3i' % lon)
    save_job_ids(lon, jobs)
Exemple #3
0
def submit_job(lon):
    """
    Submit a new batch classification job to the cloud

    This also creates or overwrides the appropraite entry
    in classify_jobs.json

    Parameters
    ----------
    lon : longitude to run
    """
    if already_submitted(lon):
        print ("Job already submitted. To force a re-run, "
               "first run\n\t python %s delete %i" % (__file__, lon))
        return

    workers = 100
    stamps = field_stamps(lon)
    model = ModelGroup.load('../models/full_classifier.dat')

    chunks = chunk(stamps, workers)
    jobs = cloud_map(model.decision_function,
                     chunks,
                     return_jobs=True,
                     _label='classify_%3.3i' % lon)
    save_job_ids(lon, jobs)
Exemple #4
0
def redo(field):
    """
    Reclassify nan-scores from a single field in full_search_old,
    write to new files in full_search

    Parameters
    ----------
    field : integer
        Longitude of field to reclassify
    """
    result = []
    old = '../data/full_search_old/%3.3i.h5' % field
    new = '../data/full_search/%3.3i.h5' % field

    model = ModelGroup.load('../models/full_classifier.dat')

    with File(old) as infile:
        stamps, scores = infile['stamps'][:], infile['scores'][:]
        redo = ~np.isfinite(infile['scores'])

    new_scores = model.decision_function(stamps[redo])
    print np.isfinite(new_scores).sum()

    scores[redo] = new_scores

    with File(new, 'w') as outfile:
        outfile.create_dataset('stamps', data=stamps, compression=9)
        outfile.create_dataset('scores', data=scores, compression=9)
def redo(field):
    """
    Reclassify nan-scores from a single field in full_search_old,
    write to new files in full_search

    Parameters
    ----------
    field : integer
        Longitude of field to reclassify
    """
    result = []
    old = '../data/full_search_old/%3.3i.h5' % field
    new = '../data/full_search/%3.3i.h5' % field

    model = ModelGroup.load('../models/full_classifier.dat')

    with File(old) as infile:
        stamps, scores = infile['stamps'][:], infile['scores'][:]
        redo = ~np.isfinite(infile['scores'])

    new_scores = model.decision_function(stamps[redo])
    print np.isfinite(new_scores).sum()

    scores[redo] = new_scores

    with File(new, 'w') as outfile:
        outfile.create_dataset('stamps', data=stamps, compression=9)
        outfile.create_dataset('scores', data=scores, compression=9)
def main():

    model = ModelGroup.load('../models/full_classifier.dat')
    loc = locations()
    result = {'stamps': loc}
    result['scores'] = model.decision_function(loc).tolist()

    with open('../models/random_scores.json', 'w') as outfile:
        json.dump(result, outfile)
def main():

    model = ModelGroup.load("../models/full_classifier.dat")
    loc = locations()
    result = {"stamps": loc}
    result["scores"] = model.decision_function(loc).tolist()

    with open("../models/random_scores.json", "w") as outfile:
        json.dump(result, outfile)
Exemple #8
0
def main():

    model = ModelGroup.load('../models/full_classifier.dat')
    bubbles = sorted(bubble_params())
    scores = model.decision_function(bubbles)

    result = {'params': bubbles, 'scores': scores.tolist()}

    with open('../models/bubble_scores.json', 'w') as outfile:
        json.dump(result, outfile)
Exemple #9
0
def main():

    model = ModelGroup.load('../models/full_classifier.dat')
    on, off = locations()

    result = {'on': on, 'off': off}
    result['on_score'] = model.decision_function(on).tolist()
    result['off_score'] = model.decision_function(off).tolist()

    with open('../models/benchmark_scores.json', 'w') as outfile:
        json.dump(result, outfile)
Exemple #10
0
def main():

    model = ModelGroup.load('../models/full_classifier.dat')

    f = get_field(305)
    stamps = sorted(list(f.all_stamps()))

    df = model.cloud_decision_function(stamps, workers=100)
    result = {'stamps': stamps, 'scores': df.tolist()}

    with open('../models/l305_scores.json', 'w') as outfile:
        json.dump(result, outfile)
Exemple #11
0
def main():

    model = ModelGroup.load('../models/full_classifier.dat')

    f = get_field(35)
    stamps = list(f.small_stamps())
    stamps = [s for s in stamps if s[1] > 34.5 and s[1] < 35.5]

    df = model.cloud_decision_function(stamps, workers=100)
    result = {'stamps': stamps, 'scores': df.tolist()}

    with open('../models/l035_small_scores.json', 'w') as outfile:
        json.dump(result, outfile)
def main():

    models = [train_model(make_model(i), i) for i in [0, 1, 2]]
    mg = ModelGroup(*models)
    mg.save('../models/full_classifier.dat')
def main():

    models = [train_model(make_model(i), i) for i in [0, 1, 2]]
    mg = ModelGroup(*models)
    mg.save("../models/full_classifier.dat")