def test_graph_crf_inference(): # create two samples with different graphs # two states only, pairwise smoothing for inference_method in ['qpbo', 'lp', 'ad3', 'dai']: crf = GraphCRF(n_states=2, inference_method=inference_method) assert_array_equal(crf.inference((x_1, g_1), w), y_1) assert_array_equal(crf.inference((x_2, g_2), w), y_2)
def test_graph_crf_loss_augment(): x = (x_1, g_1) y = y_1 crf = GraphCRF(n_states=2, inference_method='lp') y_hat, energy = crf.loss_augmented_inference(x, y, w, return_energy=True) # check that y_hat fulfulls energy + loss condition assert_almost_equal(np.dot(w, crf.psi(x, y_hat)) + crf.loss(y, y_hat), -energy)
def __init__(self, n_states=2, n_features=None, inference_method='qpbo', void_label=21): if void_label >= n_states: raise ValueError("void_label must be one of the states!") GraphCRF.__init__(self, n_states, n_features, inference_method) self.void_label = void_label
def test_graph_crf_energy_lp_integral(): crf = GraphCRF(n_states=2, inference_method='lp') inf_res, energy_lp = crf.inference((x_1, g_1), w, relaxed=True, return_energy=True) # integral solution assert_array_almost_equal(np.max(inf_res[0], axis=-1), 1) y = np.argmax(inf_res[0], axis=-1) # energy and psi check out assert_almost_equal(energy_lp, -np.dot(w, crf.psi((x_1, g_1), y)))
def test_graph_crf_continuous_inference(): for inference_method in ['lp', 'ad3']: crf = GraphCRF(n_states=2, inference_method=inference_method) assert_array_equal(np.argmax(crf.inference((x_1, g_1), w, relaxed=True)[0], axis=-1), y_1) assert_array_equal(np.argmax(crf.inference((x_2, g_2), w, relaxed=True)[0], axis=-1), y_2)
def test_graph_crf_energy_lp_relaxed(): crf = GraphCRF(n_states=2, inference_method='lp') for i in xrange(10): w_ = np.random.uniform(size=w.shape) inf_res, energy_lp = crf.inference((x_1, g_1), w_, relaxed=True, return_energy=True) assert_almost_equal(energy_lp, -np.dot(w_, crf.psi((x_1, g_1), inf_res))) # now with fractional solution x = np.array([[0, 0], [0, 0], [0, 0]]) inf_res, energy_lp = crf.inference((x, g_1), w, relaxed=True, return_energy=True) assert_almost_equal(energy_lp, -np.dot(w, crf.psi((x, g_1), inf_res)))
def train_car(): car_idx = np.where(classes == "car")[0] data_train = load_data("train", independent=False) car_images = np.array( [i for i, y in enumerate(data_train.Y) if np.any(y == car_idx)]) n_states_per_label = np.ones(22, dtype=np.int) n_states_per_label[car_idx] = 6 X, Y, file_names, images, all_superpixels = zip( *[(data_train.X[i], data_train.Y[i], data_train.file_names[i], data_train.images[i], data_train.superpixels[i]) for i in car_images]) problem = GraphCRF(n_states=22, inference_method='ad3', n_features=21 * 6) ssvm = learners.SubgradientStructuredSVM(problem, verbose=2, C=.001, max_iter=5000, n_jobs=-1, show_loss_every=10, learning_rate=0.0001, decay_exponent=0.5) ssvm.fit(X, Y) Y_pred = ssvm.predict(X) plot_results(images, file_names, Y, Y_pred, all_superpixels, folder="cars_only") data_val = load_data("val", independent=False) car_images_val = np.array( [i for i, y in enumerate(data_val.Y) if np.any(y == car_idx)]) X_val, Y_val, file_names_val, images_val, all_superpixels_val = \ zip(*[(data_val.X[i], data_val.Y[i], data_val.file_names[i], data_val.images[i], data_val.superpixels[i]) for i in car_images_val]) Y_pred_val = ssvm.predict(X_val) plot_results(images_val, file_names_val, Y_val, Y_pred_val, all_superpixels_val, folder="cars_only_val") # C=10 ## train: #0.92743060939680566V #> ssvm.score(X_val, Y_val) #0.52921719955898561 # test 0.61693548387096775 tracer()
def continuous_loss(self, y, y_hat): # continuous version of the loss # y is the result of linear programming mask = y != self.void_label return (GraphCRF.continuous_loss(self, y[mask], y_hat[mask]) + np.sum(y_hat == self.void_label) / y.size)