def train_svm(test=False, C=0.01, gamma=.1, grid=False): which = "piecewise" data_train = load_data(which=which) data_train = add_kraehenbuehl_features(data_train, which="train_30px") data_train = add_kraehenbuehl_features(data_train, which="train") data_train_novoid = discard_void(data_train, 21) if grid and test: raise ValueError("Don't you dare grid-search on the test-set!") svm = LinearSVC(C=C, class_weight='auto', multi_class='crammer_singer', dual=False) #svm = LogisticRegression(C=C, class_weight='auto') data_val = load_data('val', which=which) data_val = add_kraehenbuehl_features(data_val, which="train_30px") data_val = add_kraehenbuehl_features(data_val, which="train") data_val_novoid = discard_void(data_val, 21) if grid: n_samples_train = len(np.hstack(data_train_novoid.Y)) n_samples_val = len(np.hstack(data_val_novoid.Y)) cv = SimpleSplitCV(n_samples_train, n_samples_val) data_trainval = concatenate_datasets(data_train_novoid, data_val_novoid) from sklearn.grid_search import GridSearchCV #from sklearn.grid_search import RandomizedSearchCV #from scipy.stats import expon, gamma #param_grid = {'C': 10. ** np.arange(1, 4), 'gamma': 10. ** #np.arange(-3, 1)} param_grid = {'C': 10. ** np.arange(-6, 2)} scorer = PixelwiseScorer(data=data_val) grid = GridSearchCV(svm, param_grid=param_grid, verbose=10, n_jobs=-1, cv=cv, scoring=scorer, refit=False) grid.fit(np.vstack(data_trainval.X), np.hstack(data_trainval.Y)) print(grid.best_params_) print(grid.best_score_) else: print(svm) if test: data_train_novoid = concatenate_datasets(data_train_novoid, data_val_novoid) print(np.vstack(data_train_novoid.X).shape) svm.fit(np.vstack(data_train_novoid.X), np.hstack(data_train_novoid.Y)) if test: data_test = load_data("test", which=which) else: data_test = load_data("val", which=which) data_test = add_kraehenbuehl_features(data_test, which="train_30px") data_test = add_kraehenbuehl_features(data_test, which="train") scorer = PixelwiseScorer(data=data_test) scorer(svm, None, None) return svm
def main(C=1, test=False): # load training data #independent = True independent = False data_train = load_data(which="piecewise") data_train = add_edges(data_train, independent=independent, fully_connected=True) data_train = add_kraehenbuehl_features(data_train, which="train_30px") data_train = add_kraehenbuehl_features(data_train, which="train") #data_train = load_data_global_probs() if not independent: data_train = add_edge_features(data_train) data_train = discard_void(data_train, 21) if test: data_val = load_data("val", which="piecewise_train") data_val = add_edges(data_val, independent=independent) data_val = add_kraehenbuehl_features(data_val, which="train_30px") data_val = add_kraehenbuehl_features(data_val, which="train") data_val = add_edge_features(data_val) data_val = discard_void(data_val, 21) data_train = concatenate_datasets(data_train, data_val) #X_.extend(data_val.X) #Y_.extend(data_val.Y) n_states = 21 print("number of samples: %s" % len(data_train.X)) class_weights = 1. / np.bincount(np.hstack(data_train.Y)) #class_weights[21] = 0 class_weights *= 21. / np.sum(class_weights) #class_weights = np.ones(n_states) print(class_weights) #model = crfs.GraphCRF(n_states=n_states, #n_features=data_train.X[0][0].shape[1], #inference_method='qpbo', class_weight=class_weights) model = crfs.EdgeFeatureGraphCRF(n_states=n_states, n_features=data_train.X[0][0].shape[1], inference_method='qpbo', class_weight=class_weights, n_edge_features=3, symmetric_edge_features=[0, 1], antisymmetric_edge_features=[2]) experiment_name = "fully_connected_%f" % C #warm_start = True warm_start = False ssvm = learners.OneSlackSSVM(model, verbose=2, C=C, max_iter=100000, n_jobs=-1, tol=0.0001, show_loss_every=50, inference_cache=50, cache_tol='auto', logger=SaveLogger(experiment_name + ".pickle", save_every=100), inactive_threshold=1e-5, break_on_bad=False, inactive_window=50, switch_to_ad3=False) #ssvm = learners.SubgradientSSVM( #model, verbose=3, C=C, max_iter=10000, n_jobs=-1, show_loss_every=10, #logger=SaveLogger(experiment_name + ".pickle", save_every=10), #momentum=0, learning_rate=0.001, decay_exponent=1) if warm_start: ssvm = SaveLogger(experiment_name + ".pickle").load() ssvm.logger = SaveLogger(file_name=experiment_name + "_refit.pickle", save_every=10) ssvm.learning_rate = 0.000001 #ssvm.model.inference_method = 'ad3' #ssvm.n_jobs = 1 ssvm.fit(data_train.X, data_train.Y, warm_start=warm_start) print("fit finished!") return
def main(C=1, test=False): # load training data #independent = True independent = False data_train = load_data(which="piecewise") data_train = add_edges(data_train, independent=independent, fully_connected=True) data_train = add_kraehenbuehl_features(data_train, which="train_30px") data_train = add_kraehenbuehl_features(data_train, which="train") #data_train = load_data_global_probs() if not independent: data_train = add_edge_features(data_train) data_train = discard_void(data_train, 21) if test: data_val = load_data("val", which="piecewise_train") data_val = add_edges(data_val, independent=independent) data_val = add_kraehenbuehl_features(data_val, which="train_30px") data_val = add_kraehenbuehl_features(data_val, which="train") data_val = add_edge_features(data_val) data_val = discard_void(data_val, 21) data_train = concatenate_datasets(data_train, data_val) #X_.extend(data_val.X) #Y_.extend(data_val.Y) n_states = 21 print("number of samples: %s" % len(data_train.X)) class_weights = 1. / np.bincount(np.hstack(data_train.Y)) #class_weights[21] = 0 class_weights *= 21. / np.sum(class_weights) #class_weights = np.ones(n_states) print(class_weights) #model = crfs.GraphCRF(n_states=n_states, #n_features=data_train.X[0][0].shape[1], #inference_method='qpbo', class_weight=class_weights) model = crfs.EdgeFeatureGraphCRF(n_states=n_states, n_features=data_train.X[0][0].shape[1], inference_method='qpbo', class_weight=class_weights, n_edge_features=3, symmetric_edge_features=[0, 1], antisymmetric_edge_features=[2]) experiment_name = "fully_connected_%f" % C #warm_start = True warm_start = False ssvm = learners.OneSlackSSVM( model, verbose=2, C=C, max_iter=100000, n_jobs=-1, tol=0.0001, show_loss_every=50, inference_cache=50, cache_tol='auto', logger=SaveLogger(experiment_name + ".pickle", save_every=100), inactive_threshold=1e-5, break_on_bad=False, inactive_window=50, switch_to_ad3=False) #ssvm = learners.SubgradientSSVM( #model, verbose=3, C=C, max_iter=10000, n_jobs=-1, show_loss_every=10, #logger=SaveLogger(experiment_name + ".pickle", save_every=10), #momentum=0, learning_rate=0.001, decay_exponent=1) if warm_start: ssvm = SaveLogger(experiment_name + ".pickle").load() ssvm.logger = SaveLogger( file_name=experiment_name + "_refit.pickle", save_every=10) ssvm.learning_rate = 0.000001 #ssvm.model.inference_method = 'ad3' #ssvm.n_jobs = 1 ssvm.fit(data_train.X, data_train.Y, warm_start=warm_start) print("fit finished!") return