예제 #1
0
def main():
    ds = PascalSegmentation()
    # load training data
    edge_type = "pairwise"
    which = "kTrain"
    data_train = load_pascal(which=which, sp_type="cpmc")

    data_train = add_edges(data_train, edge_type)
    data_train = add_edge_features(ds, data_train)
    data_train = discard_void(ds, data_train, ds.void_label)

    X, Y = data_train.X, data_train.Y

    class_weights = 1. / np.bincount(np.hstack(Y))
    class_weights *= 21. / np.sum(class_weights)

    model = crfs.EdgeFeatureGraphCRF(class_weight=class_weights,
                                     symmetric_edge_features=[0, 1],
                                     antisymmetric_edge_features=[2],
                                     inference_method='qpbo')

    ssvm = learners.NSlackSSVM(model, C=0.01, n_jobs=-1)
    ssvm.fit(X, Y)
예제 #2
0
class_weights *= 21. / np.sum(class_weights)
print(class_weights)

model = crfs.EdgeFeatureGraphCRF(inference_method='qpbo',
                                 class_weight=class_weights,
                                 symmetric_edge_features=[0, 1],
                                 antisymmetric_edge_features=[2])

experiment_name = "edge_features_one_slack_trainval_%f" % C

ssvm = learners.NSlackSSVM(model,
                           verbose=2,
                           C=C,
                           max_iter=100000,
                           n_jobs=-1,
                           tol=0.0001,
                           show_loss_every=5,
                           logger=SaveLogger(experiment_name + ".pickle",
                                             save_every=100),
                           inactive_threshold=1e-3,
                           inactive_window=10,
                           batch_size=100)
ssvm.fit(data_train['X'], data_train['Y'])

data_val = cPickle.load(open("data_val_dict.pickle"))
y_pred = ssvm.predict(data_val['X'])

# we throw away void superpixels and flatten everything
y_pred, y_true = np.hstack(y_pred), np.hstack(data_val['Y'])
y_pred = y_pred[y_true != 255]
y_true = y_true[y_true != 255]
예제 #3
0
    class_weight *= float(n_states) / np.sum(class_weight)
    n_jobs = 6
    C = 0.01

    # init CRF model
    # model = models.EdgeFeatureGraphCRF(inference_method='qpbo', class_weight=class_weight, symmetric_edge_features=[0, 1], antisymmetric_edge_features=[2, 3])
    model = models.EdgeFeatureGraphCRF(inference_method='qpbo',
                                       class_weight=class_weight,
                                       symmetric_edge_features=[0, 1, 2],
                                       antisymmetric_edge_features=[3, 4])
    # model = models.EdgeFeatureGraphCRF(class_weight=class_weight, symmetric_edge_features=[0, 1], antisymmetric_edge_features=[2, 3])

    # init learner
    ssvm = learners.NSlackSSVM(model,
                               verbose=2,
                               n_jobs=n_jobs,
                               C=C,
                               logger=SaveLogger(args.model_filename,
                                                 save_every=50))
    # ssvm = learners.NSlackSSVM(model, verbose=2, C=C, max_iter=100000, n_jobs=n_jobs, tol=0.0001, show_loss_every=5, logger=SaveLogger(args.model_filename, save_every=50), inactive_threshold=1e-3, inactive_window=10, batch_size=100)

    # train model
    ssvm.fit(X_train, y_train)

    # predict score on test dataset
    y_pred = ssvm.predict(X_test)
    y_pred, y_test = np.hstack(y_pred), np.hstack(y_test)
    y_pred = y_pred[y_test != 0]
    y_test = y_test[y_test != 0]

    print("Score on validation set: %f" % np.mean(y_test == y_pred))
예제 #4
0
파일: crf.py 프로젝트: sgilm/pecdeeplearn
        for actual, predicted in train_vols]

    # Get the required data from the graphical model being used.
    offsets, edges, centre_index = strand_model(exp.params['model_shape'])

    # Build training dataset.
    training_input, training_output, _ = extract_crf_data(
        train_vols, training_maps, offsets, edges)

    # Construct model and learner.
    model = models.GraphCRF(inference_method='qpbo')
    learner = learners.NSlackSSVM(model,
                                  verbose=2,
                                  max_iter=10000,
                                  n_jobs=-1,
                                  tol=0.001,
                                  show_loss_every=5,
                                  inactive_threshold=1e-3,
                                  inactive_window=10,
                                  batch_size=100)

    # Learn model parameters.
    start_time = time.time()
    learner.fit(training_input, training_output)
    exp.add_result('learning_duration', time.time() - start_time)

    # Predict a segmentation using model inference.
    for test_vol_actual, test_vol_predicted in test_vols:

        # Only predict on the bounding box of the predicted volume.
        bounds = test_vol_predicted.bounding_box()
예제 #5
0
def svm_on_segments(C=.1, learning_rate=.001, subgradient=False):
    data_file = "data_train_XY.pickle"
    ds = PascalSegmentation()
    if os.path.exists(data_file):
        X_, Y_ = cPickle.load(open(data_file))
    else:
        # load and prepare data
        data_train = load_pascal("train", sp_type="cpmc")
        data_train = make_cpmc_hierarchy(ds, data_train)
        data_train = discard_void(ds, data_train)
        X_, Y_ = data_train.X, data_train.Y
        cPickle.dump((X_, Y_), open(data_file, 'wb'), -1)

    class_weights = 1. / np.bincount(np.hstack(Y_))
    class_weights *= 21. / np.sum(class_weights)
    experiment_name = ("latent_25_cpmc_%f_qpbo_n_slack_blub3" % C)
    logger = SaveLogger(experiment_name + ".pickle", save_every=10)
    model = LatentNodeCRF(n_hidden_states=25,
                          inference_method='qpbo',
                          class_weight=class_weights,
                          latent_node_features=False)
    if subgradient:
        ssvm = learners.LatentSubgradientSSVM(model,
                                              C=C,
                                              verbose=1,
                                              show_loss_every=10,
                                              logger=logger,
                                              n_jobs=-1,
                                              learning_rate=learning_rate,
                                              decay_exponent=1,
                                              momentum=0.,
                                              max_iter=100000,
                                              decay_t0=100)
    else:
        latent_logger = SaveLogger("lssvm_" + experiment_name + "_%d.pickle",
                                   save_every=1)
        #base_ssvm = learners.OneSlackSSVM(
        #model, verbose=2, C=C, max_iter=100, n_jobs=-1, tol=0.001,
        #show_loss_every=200, inference_cache=50, logger=logger,
        #cache_tol='auto', inactive_threshold=1e-5, break_on_bad=False,
        #switch_to=('ogm', {'alg': 'dd'}))
        base_ssvm = learners.NSlackSSVM(model,
                                        verbose=4,
                                        C=C,
                                        n_jobs=-1,
                                        tol=0.1,
                                        show_loss_every=20,
                                        logger=logger,
                                        inactive_threshold=1e-8,
                                        break_on_bad=False,
                                        batch_size=36,
                                        inactive_window=10,
                                        switch_to=('ad3', {
                                            'branch_and_bound': True
                                        }))
        ssvm = learners.LatentSSVM(base_ssvm,
                                   logger=latent_logger,
                                   latent_iter=3)
    #warm_start = True
    warm_start = False
    if warm_start:
        ssvm = logger.load()
        ssvm.logger = SaveLogger(experiment_name + "_retrain.pickle",
                                 save_every=10)
        ssvm.max_iter = 10000
        ssvm.decay_exponent = 1
        #ssvm.decay_t0 = 1000
        #ssvm.learning_rate = 0.00001
        #ssvm.momentum = 0

    X_, Y_ = shuffle(X_, Y_)
    #ssvm.fit(data_train.X, data_train.Y)
    ssvm.fit(X_, Y_)
    #H_init = [np.hstack([y, np.random.randint(21, 26)]) for y in Y_]
    #ssvm.fit(X_, Y_, H_init=H_init)
    print("fit finished!")