def syntetic_test(): # test model on different train set size & on different train sets results = np.zeros((18, 5)) full_labeled = np.array([2, 4, 10, 25, 100]) train_size = 400 for dataset in xrange(1, 19): X, Y = load_syntetic(dataset) for j, nfull in enumerate(full_labeled): crf = EdgeCRF(n_states=10, n_features=10, n_edge_features=2, inference_method='qpbo') clf = OneSlackSSVM(crf, max_iter=10000, C=0.01, verbose=0, tol=0.1, n_jobs=4, inference_cache=100) x_train = X[:nfull] y_train = Y[:nfull] x_test = X[(train_size + 1):] y_test = Y[(train_size + 1):] try: clf.fit(x_train, y_train) y_pred = clf.predict(x_test) results[dataset - 1, j] = compute_error(y_test, y_pred) print 'dataset=%d, nfull=%d, error=%f' % (dataset, nfull, results[dataset - 1, j]) except ValueError: print 'dataset=%d, nfull=%d: Failed' % (dataset, nfull) np.savetxt('results/syntetic/full_labeled.txt', results)
def test_one_slack_constraint_caching(): # testing cutting plane ssvm on easy multinomial dataset X, Y = generate_blocks_multinomial(n_samples=10, noise=0.5, seed=0, size_x=9) n_labels = len(np.unique(Y)) exact_inference = get_installed([('ad3', {'branch_and_bound': True}), "lp"])[0] crf = GridCRF(n_states=n_labels, inference_method=exact_inference) clf = OneSlackSSVM(model=crf, max_iter=150, C=1, check_constraints=True, break_on_bad=True, inference_cache=50, inactive_window=0) clf.fit(X, Y) Y_pred = clf.predict(X) assert_array_equal(Y, Y_pred) assert_equal(len(clf.inference_cache_), len(X)) # there should be 13 constraints, which are less than the 94 iterations # that are done # check that we didn't change the behavior of how we construct the cache constraints_per_sample = [len(cache) for cache in clf.inference_cache_] if exact_inference == "lp": assert_equal(len(clf.inference_cache_[0]), 18) assert_equal(np.max(constraints_per_sample), 18) assert_equal(np.min(constraints_per_sample), 18) else: assert_equal(len(clf.inference_cache_[0]), 13) assert_equal(np.max(constraints_per_sample), 20) assert_equal(np.min(constraints_per_sample), 11)
def test_latent_node_boxes_edge_features(): # learn the "easy" 2x2 boxes dataset. # smoketest using a single constant edge feature X, Y = make_simple_2x2(seed=1, n_samples=40) latent_crf = EdgeFeatureLatentNodeCRF(n_labels=2, n_hidden_states=2, n_features=1) base_svm = OneSlackSSVM(latent_crf) base_svm.C = 10 latent_svm = LatentSSVM(base_svm, latent_iter=10) G = [make_grid_edges(x) for x in X] # make edges for hidden states: edges = make_edges_2x2() G = [np.vstack([make_grid_edges(x), edges]) for x in X] # reshape / flatten x and y X_flat = [x.reshape(-1, 1) for x in X] Y_flat = [y.ravel() for y in Y] #X_ = zip(X_flat, G, [2 * 2 for x in X_flat]) # add edge features X_ = [(x, g, np.ones((len(g), 1)), 4) for x, g in zip(X_flat, G)] latent_svm.fit(X_[:20], Y_flat[:20]) assert_array_equal(latent_svm.predict(X_[:20]), Y_flat[:20]) assert_equal(latent_svm.score(X_[:20], Y_flat[:20]), 1) # test that score is not always 1 assert_true(.98 < latent_svm.score(X_[20:], Y_flat[20:]) < 1)
def test_standard_svm_blobs_2d_class_weight(): # no edges, reduce to crammer-singer svm X, Y = make_blobs(n_samples=210, centers=3, random_state=1, cluster_std=3, shuffle=False) X = np.hstack([X, np.ones((X.shape[0], 1))]) X, Y = X[:170], Y[:170] X_graphs = [(x[np.newaxis, :], np.empty((0, 2), dtype=np.int)) for x in X] pbl = GraphCRF(n_features=3, n_states=3, inference_method='unary') svm = OneSlackSSVM(pbl, check_constraints=False, C=1000) svm.fit(X_graphs, Y[:, np.newaxis]) weights = 1. / np.bincount(Y) weights *= len(weights) / np.sum(weights) pbl_class_weight = GraphCRF(n_features=3, n_states=3, class_weight=weights, inference_method='unary') svm_class_weight = OneSlackSSVM(pbl_class_weight, C=10, check_constraints=False, break_on_bad=False) svm_class_weight.fit(X_graphs, Y[:, np.newaxis]) assert_greater(f1_score(Y, np.hstack(svm_class_weight.predict(X_graphs))), f1_score(Y, np.hstack(svm.predict(X_graphs))))
def test_class_weights_rescale_C(): # check that our crammer-singer implementation with class weights and # rescale_C=True is the same as LinearSVC's c-s class_weight implementation raise SkipTest("class weight test needs update") from sklearn.svm import LinearSVC X, Y = make_blobs(n_samples=210, centers=3, random_state=1, cluster_std=3, shuffle=False) X = np.hstack([X, np.ones((X.shape[0], 1))]) X, Y = X[:170], Y[:170] weights = len(Y) / (np.bincount(Y) * len(np.unique(Y))) pbl_class_weight = MultiClassClf(n_features=3, n_classes=3, class_weight=weights, rescale_C=True) svm_class_weight = OneSlackSSVM(pbl_class_weight, C=10, tol=1e-5) svm_class_weight.fit(X, Y) try: linearsvm = LinearSVC(multi_class='crammer_singer', fit_intercept=False, class_weight='balanced', C=10) linearsvm.fit(X, Y) assert_array_almost_equal(svm_class_weight.w, linearsvm.coef_.ravel(), 3) except TypeError: # travis has a really old sklearn version that doesn't support # class_weight in LinearSVC pass
def msrc(): models_basedir = 'models/msrc/' crf = EdgeCRF(n_states=24, n_features=2028, n_edge_features=4, inference_method='gco') clf = OneSlackSSVM(crf, max_iter=10000, C=0.01, verbose=2, tol=0.1, n_jobs=4, inference_cache=100) X, Y = load_msrc('train') Y = remove_areas(Y) start = time() clf.fit(X, Y) stop = time() np.savetxt(models_basedir + 'msrc_full.csv', clf.w) with open(models_basedir + 'msrc_full' + '.pickle', 'w') as f: pickle.dump(clf, f) X, Y = load_msrc('test') Y = remove_areas(Y) Y_pred = clf.predict(X) print('Error on test set: %f' % compute_error(Y, Y_pred)) print('Score on test set: %f' % clf.score(X, Y)) print('Norm of weight vector: |w|=%f' % np.linalg.norm(clf.w)) print('Elapsed time: %f s' % (stop - start)) return clf
def test_class_weights_rescale_C(): # check that our crammer-singer implementation with class weights and # rescale_C=True is the same as LinearSVC's c-s class_weight implementation from sklearn.svm import LinearSVC X, Y = make_blobs(n_samples=210, centers=3, random_state=1, cluster_std=3, shuffle=False) X = np.hstack([X, np.ones((X.shape[0], 1))]) X, Y = X[:170], Y[:170] weights = 1. / np.bincount(Y) weights *= len(weights) / np.sum(weights) pbl_class_weight = MultiClassClf(n_features=3, n_classes=3, class_weight=weights, rescale_C=True) svm_class_weight = OneSlackSSVM(pbl_class_weight, C=10, tol=1e-5) svm_class_weight.fit(X, Y) try: linearsvm = LinearSVC(multi_class='crammer_singer', fit_intercept=False, class_weight='auto', C=10) linearsvm.fit(X, Y) assert_array_almost_equal(svm_class_weight.w, linearsvm.coef_.ravel(), 3) except TypeError: # travis has a really old sklearn version that doesn't support # class_weight in LinearSVC pass
def test_one_slack_constraint_caching(): #testing cutting plane ssvm on easy multinomial dataset X, Y = generate_blocks_multinomial(n_samples=10, noise=0.5, seed=0, size_x=9) n_labels = len(np.unique(Y)) crf = GridCRF(n_states=n_labels, inference_method='lp') clf = OneSlackSSVM(model=crf, max_iter=150, C=1, check_constraints=True, break_on_bad=True, inference_cache=50, inactive_window=0, verbose=10) clf.fit(X, Y) Y_pred = clf.predict(X) assert_array_equal(Y, Y_pred) assert_equal(len(clf.inference_cache_), len(X)) # there should be 11 constraints, which are less than the 94 iterations # that are done assert_equal(len(clf.inference_cache_[0]), 11) # check that we didn't change the behavior of how we construct the cache constraints_per_sample = [len(cache) for cache in clf.inference_cache_] assert_equal(np.max(constraints_per_sample), 19) assert_equal(np.min(constraints_per_sample), 11)
def test_binary_blocks_one_slack_graph(): #testing cutting plane ssvm on easy binary dataset # generate graphs explicitly for each example X, Y = generate_blocks(n_samples=3) crf = GraphCRF(inference_method=inference_method) clf = OneSlackSSVM(model=crf, max_iter=100, C=1, check_constraints=True, break_on_bad=True, n_jobs=1, tol=.1) x1, x2, x3 = X y1, y2, y3 = Y n_states = len(np.unique(Y)) # delete some rows to make it more fun x1, y1 = x1[:, :-1], y1[:, :-1] x2, y2 = x2[:-1], y2[:-1] # generate graphs X_ = [x1, x2, x3] G = [make_grid_edges(x) for x in X_] # reshape / flatten x and y X_ = [x.reshape(-1, n_states) for x in X_] Y = [y.ravel() for y in [y1, y2, y3]] X = list(zip(X_, G)) clf.fit(X, Y) Y_pred = clf.predict(X) for y, y_pred in zip(Y, Y_pred): assert_array_equal(y, y_pred)
def test_binary_blocks_one_slack_graph(): #testing cutting plane ssvm on easy binary dataset # generate graphs explicitly for each example for inference_method in ["dai", "lp"]: print("testing %s" % inference_method) X, Y = toy.generate_blocks(n_samples=3) crf = GraphCRF(inference_method=inference_method) clf = OneSlackSSVM(problem=crf, max_iter=100, C=100, verbose=100, check_constraints=True, break_on_bad=True, n_jobs=1) x1, x2, x3 = X y1, y2, y3 = Y n_states = len(np.unique(Y)) # delete some rows to make it more fun x1, y1 = x1[:, :-1], y1[:, :-1] x2, y2 = x2[:-1], y2[:-1] # generate graphs X_ = [x1, x2, x3] G = [make_grid_edges(x) for x in X_] # reshape / flatten x and y X_ = [x.reshape(-1, n_states) for x in X_] Y = [y.ravel() for y in [y1, y2, y3]] X = zip(X_, G) clf.fit(X, Y) Y_pred = clf.predict(X) for y, y_pred in zip(Y, Y_pred): assert_array_equal(y, y_pred)
def __init__(self, prob_estimator, C_ssvm=1., inference='ad3', inference_cache=50, tol=1., max_iter=200, n_jobs=1): """ Called when initializing the classifier """ #self.C_logreg = C_logreg self.C_ssvm = C_ssvm self.inference = inference self.inference_cache = inference_cache self.tol = tol self.max_iter = max_iter self.n_jobs = n_jobs self.prob_estimator = prob_estimator self.crf = EdgeFeatureGraphCRF(inference_method=inference) self.ssvm = OneSlackSSVM(self.crf, inference_cache=inference_cache, C=C_ssvm, tol=tol, max_iter=max_iter, n_jobs=n_jobs)
def syntetic_test(): # test model on different train set size & on different train sets results = np.zeros((18, 5)) full_labeled = np.array([2, 4, 10, 25, 100]) train_size = 400 for dataset in range(1, 19): X, Y = load_syntetic(dataset) for j, nfull in enumerate(full_labeled): crf = EdgeCRF(n_states=10, n_features=10, n_edge_features=2, inference_method='qpbo') clf = OneSlackSSVM(crf, max_iter=10000, C=0.01, verbose=0, tol=0.1, n_jobs=4, inference_cache=100) x_train = X[:nfull] y_train = Y[:nfull] x_test = X[(train_size + 1):] y_test = Y[(train_size + 1):] try: clf.fit(x_train, y_train) y_pred = clf.predict(x_test) results[dataset - 1, j] = compute_error(y_test, y_pred) print('dataset=%d, nfull=%d, error=%f' % (dataset, nfull, results[dataset - 1, j])) except ValueError: print('dataset=%d, nfull=%d: Failed' % (dataset, nfull)) np.savetxt('results/syntetic/full_labeled.txt', results)
def losocv_CRF_prepro(xtrain, y, C=0.5, weight_shift=0, max_iter=1000, fs=128): """Leave one subject out cross validation for the CRF model becasuse it requires special datahandling. Input should be a Pandas Dataframe.""" epochs = 21600 num_sub = 3 # Indices of the subjects sub_indices = [ np.arange(0, epochs), np.arange(epochs, epochs * 2), np.arange(epochs * 2, epochs * 3) ] res = [] for i in range(len(sub_indices)): # For the ith iteration, select as trainin the sub_indices other than those at index i for train_index train_index = np.concatenate( [sub_indices[(i + 1) % num_sub], sub_indices[(i + 2) % num_sub]]) xtrain_ = xtrain[train_index] y_train = y.values[train_index] ytrain_ = y_train # The test subject is the one at index i test_index = sub_indices[i] xtest_ = xtrain[test_index] y_test = y.values[test_index] ytest_ = y_test # CRF Model Preprocessing ytrain_classes = np.reshape(y_train, (y_train.shape[0], )) xtrain_crf = np.reshape( xtrain_, (2, -1, xtrain_.shape[1])) # Reshape so that it works with CRF ytrain_crf = np.reshape( ytrain_, (2, -1)) - 1 # Reshape so that it works with CRF # CRF Model fitting: classes = np.unique(ytrain_) weights_crf = compute_class_weight("balanced", list(classes), list(ytrain_classes)) weights_crf[0] = weights_crf[0] + (2.5 * weight_shift) weights_crf[1] = weights_crf[1] + (1.5 * weight_shift) model = ChainCRF(class_weight=weights_crf) ssvm = OneSlackSSVM(model=model, C=C, max_iter=max_iter) ssvm.fit(xtrain_crf, ytrain_crf) # Test on the third guy xtest_crf = np.reshape(xtest_, (1, -1, xtest_.shape[1])) ytest_crf = np.reshape(ytest_, (1, -1)) - 1 y_pred_crf = ssvm.predict(xtest_crf) y_pred_crf = np.asarray(y_pred_crf).reshape(-1) + 1 resy = sklearn.metrics.balanced_accuracy_score(ytest_, y_pred_crf) print("Iteration, result:", i, resy) res.append(resy) return res
def CRF_pred(eeg1, eeg2, emg, y, eeg1test, eeg2test, emgtest, C=0.9, weight_shift=0, max_iter=1000, fs=128): # For the ith iteration, select as trainin the sub_indices other than those at index i for train_index eeg1_train = eeg1.values eeg2_train = eeg2.values emg_train = emg.values y_train = y.values # The test subject is the one at index i eeg1_test = eeg1test.values eeg2_test = eeg2test.values emg_test = emgtest.values # CRF Model Preprocessing eeg1_ = process_EEG(eeg1_train) eeg2_ = process_EEG(eeg2_train) emg_ = process_EMG(emg_train) xtrain_ = np.concatenate((eeg1_, eeg2_, emg_), axis=1) ytrain_classes = np.reshape(y_train, (y_train.shape[0], )) ytrain_ = y_train eeg1_ = process_EEG(eeg1_test) eeg2_ = process_EEG(eeg2_test) emg_ = process_EMG(emg_test) xtest_ = np.concatenate((eeg1_, eeg2_, emg_), axis=1) xtrain_crf = np.reshape( xtrain_, (3, -1, xtrain_.shape[1])) # Reshape so that it works with CRF ytrain_crf = np.reshape(ytrain_, (3, -1)) - 1 # Reshape so that it works with CRF # CRF Model fitting: classes = np.unique(ytrain_) weights_crf = compute_class_weight("balanced", list(classes), list(ytrain_classes)) weights_crf[0] = weights_crf[0] + (2.5 * weight_shift) weights_crf[1] = weights_crf[1] + (1.5 * weight_shift) model = ChainCRF(class_weight=weights_crf) ssvm = OneSlackSSVM(model=model, C=C, max_iter=max_iter) ssvm.fit(xtrain_crf, ytrain_crf) # Test on the third guy xtest_crf = np.reshape(xtest_, (2, -1, xtest_.shape[1])) y_pred_crf = ssvm.predict(xtest_crf) y_pred_crf = np.asarray(y_pred_crf).reshape(-1) + 1 return y_pred_crf
def __init__(self): # the model self.model = ChainCRF(directed=True) # the learner self.learner = OneSlackSSVM(model=self.model, C=.1, inference_cache=50, tol=0.1, n_jobs=1)
def test(nfiles): X = [] Y = [] X_tst = [] Y_tst = [] ntrain = nfiles ntest = 5*nfiles print("Training/testing with %d/%d files." % (ntrain,ntest)) start = time.clock() filename = '../maps/MAPS_AkPnCGdD_2/AkPnCGdD/MUS' #filename = '../maps/MAPS_AkPnCGdD_1/AkPnCGdD/ISOL/NO' files = dirs.get_files_with_extension(filename, '.mid') train_files = files[:ntrain] print("\t" + str(train_files)) #test_files = files[ntrain:ntest+ntrain] # for legit testing test_files = files[-ntest:] map(per_file, train_files, it.repeat(X, ntrain), it.repeat(Y, ntrain)) map(per_file, test_files, it.repeat(X_tst, ntest), it.repeat(Y_tst, ntest)) end = time.clock() print("\tRead time: %f" % (end - start)) print("\tnWindows train: " + str([X[i].shape[0] for i in range(len(X))])) start = time.clock() crf = ChainCRF(n_states=2) clf = OneSlackSSVM(model=crf, C=100, n_jobs=-1, inference_cache=100, tol=.1) clf.fit(np.array(X), np.array(Y)) end = time.clock() print("\tTrain time: %f" % (end - start)) start = time.clock() Y_pred = clf.predict(X_tst) comp = [] for i in range(len(Y_tst)): for j in range(len(Y_tst[i])): comp.append((Y_tst[i][j], Y_pred[i][j])) print Y_tst[i][j], print for j in range(len(Y_tst[i])): print Y_pred[i][j], print print print("\tTrue positives: %d" % comp.count((1,1))) print("\tTrue negatives: %d" % comp.count((0,0))) print("\tFalse positives: %d" % comp.count((0,1))) print("\tFalse negatives: %d" % comp.count((1,0))) end = time.clock() print("\tTest time: %f" % (end - start))
def train(self, trajid_list, n_jobs=4): if self.poi_info is None: self.poi_info = self.dat_obj.calc_poi_info(trajid_list) # build POI_ID <--> POI__INDEX mapping for POIs used to train CRF # which means only POIs in traj such that len(traj) >= 2 are included poi_set = {p for tid in trajid_list for p in self.dat_obj.traj_dict[tid] if len(self.dat_obj.traj_dict[tid]) >= 2} self.poi_list = sorted(poi_set) self.poi_id_dict, self.poi_id_rdict = dict(), dict() for idx, poi in enumerate(self.poi_list): self.poi_id_dict[poi] = idx self.poi_id_rdict[idx] = poi # generate training data train_traj_list = [self.dat_obj.traj_dict[k] for k in trajid_list if len(self.dat_obj.traj_dict[k]) >= 2] node_features_list = Parallel(n_jobs=n_jobs)(delayed(calc_node_features)( tr[0], len(tr), self.poi_list, self.poi_info, self.dat_obj) for tr in train_traj_list) edge_features = calc_edge_features(trajid_list, self.poi_list, self.poi_info, self.dat_obj) # feature scaling: node features # should each example be flattened to one vector before scaling? self.fdim_node = node_features_list[0].shape X_node_all = np.vstack(node_features_list) X_node_all = self.scaler_node.fit_transform(X_node_all) X_node_all = X_node_all.reshape(-1, self.fdim_node[0], self.fdim_node[1]) # feature scaling: edge features fdim_edge = edge_features.shape edge_features = self.scaler_edge.fit_transform(edge_features.reshape(fdim_edge[0] * fdim_edge[1], -1)) self.edge_features = edge_features.reshape(fdim_edge) assert(len(train_traj_list) == X_node_all.shape[0]) X_train = [(X_node_all[k, :, :], self.edge_features.copy(), (self.poi_id_dict[train_traj_list[k][0]], len(train_traj_list[k]))) for k in range(len(train_traj_list))] y_train = [np.array([self.poi_id_dict[k] for k in tr]) for tr in train_traj_list] assert(len(X_train) == len(y_train)) # train sm = MyModel(inference_train=self.inference_train, inference_pred=self.inference_pred, share_params=self.share_params, multi_label=self.multi_label) if self.debug is True: print('C:', self.C) verbose = 1 if self.debug is True else 0 self.osssvm = OneSlackSSVM(model=sm, C=self.C, n_jobs=n_jobs, verbose=verbose) try: self.osssvm.fit(X_train, y_train, initialize=True) self.trained = True print('SSVM training finished.') # except ValueError: except: self.trained = False sys.stderr.write('SSVM training FAILED.\n') # raise return self.trained
def train(X,y): X_train_directions = make_directions(X) Y_train_flat = [y.ravel()] inference = 'qpbo' # first, train on X with directions only: crf = EdgeFeatureGraphCRF(inference_method=inference) ssvm = OneSlackSSVM(crf, inference_cache=50, C=.1, tol=.1, max_iter=10, n_jobs=1,show_loss_every=1) ssvm.fit(X_train_directions, Y_train_flat, warm_start=False) return ssvm
def test_constraint_removal(): digits = load_digits() X, y = digits.data, digits.target y = 2 * (y % 2) - 1 # even vs odd as +1 vs -1 X = X / 16. pbl = BinaryClf(n_features=X.shape[1]) clf_no_removal = OneSlackSSVM(model=pbl, max_iter=500, C=1, inactive_window=0, tol=0.01) clf_no_removal.fit(X, y) clf = OneSlackSSVM(model=pbl, max_iter=500, C=1, tol=0.01, inactive_threshold=1e-8) clf.fit(X, y) # check that we learned something assert_greater(clf.score(X, y), .92) # results are mostly equal # if we decrease tol, they will get more similar assert_less(np.mean(clf.predict(X) != clf_no_removal.predict(X)), 0.02) # without removal, have as many constraints as iterations assert_equal(len(clf_no_removal.objective_curve_), len(clf_no_removal.constraints_)) # with removal, there are less constraints than iterations assert_less(len(clf.constraints_), len(clf.objective_curve_))
def test_one_slack_attractive_potentials(): # test that submodular SSVM can learn the block dataset X, Y = generate_blocks(n_samples=10) crf = GridCRF(inference_method=inference_method) submodular_clf = OneSlackSSVM( model=crf, max_iter=200, C=1, check_constraints=True, negativity_constraint=[5], inference_cache=50 ) submodular_clf.fit(X, Y) Y_pred = submodular_clf.predict(X) assert_array_equal(Y, Y_pred) assert_true(submodular_clf.w[5] < 0)
def test_multinomial_blocks_one_slack(): #testing cutting plane ssvm on easy multinomial dataset X, Y = generate_blocks_multinomial(n_samples=10, noise=0.5, seed=0) n_labels = len(np.unique(Y)) crf = GridCRF(n_states=n_labels, inference_method=inference_method) clf = OneSlackSSVM(model=crf, max_iter=150, C=1, check_constraints=True, break_on_bad=True, tol=.1, inference_cache=50) clf.fit(X, Y) Y_pred = clf.predict(X) assert_array_equal(Y, Y_pred)
def test_one_slack_attractive_potentials(): # test that submodular SSVM can learn the block dataset X, Y = toy.generate_blocks(n_samples=10) crf = GridCRF() submodular_clf = OneSlackSSVM(model=crf, max_iter=200, C=100, verbose=1, check_constraints=True, positive_constraint=[5], n_jobs=-1) submodular_clf.fit(X, Y) Y_pred = submodular_clf.predict(X) assert_array_equal(Y, Y_pred) assert_true(submodular_clf.w[5] < 0) # don't ask me about signs
def syntetic_train_score_per_iter(result, only_weak=False, plot=True): w_history = result.data["w_history"] meta_data = result.meta n_full = meta_data["n_full"] n_train = meta_data["n_train"] n_inference_iter = meta_data["n_inference_iter"] n_full = meta_data["n_full"] n_train = meta_data["n_train"] dataset = meta_data["dataset"] C = meta_data["C"] latent_iter = meta_data["latent_iter"] max_iter = meta_data["max_iter"] inner_tol = meta_data["inner_tol"] outer_tol = meta_data["outer_tol"] alpha = meta_data["alpha"] min_changes = meta_data["min_changes"] initialize = meta_data["initialize"] crf = HCRF( n_states=10, n_features=10, n_edge_features=2, alpha=alpha, inference_method="gco", n_iter=n_inference_iter ) base_clf = OneSlackSSVM(crf, max_iter=max_iter, C=C, verbose=0, tol=inner_tol, n_jobs=4, inference_cache=100) clf = LatentSSVM(base_clf, latent_iter=latent_iter, verbose=2, tol=outer_tol, min_changes=min_changes, n_jobs=4) X, Y = load_syntetic(dataset) Xtrain, Ytrain, Ytrain_full, Xtest, Ytest = split_test_train(X, Y, n_full, n_train) if only_weak: Xtrain = [x for (i, x) in enumerate(Xtrain) if not Ytrain[i].full_labeled] Ytrain_full = [y for (i, y) in enumerate(Ytrain_full) if not Ytrain[i].full_labeled] base_clf.w = None clf.w_history_ = w_history clf.iter_done = w_history.shape[0] train_scores = [] for score in clf.staged_score(Xtrain, Ytrain_full): train_scores.append(score) train_scores = np.array(train_scores) if plot: x = np.arange(0, train_scores.size) pl.rc("text", usetex=True) pl.rc("font", family="serif") pl.figure(figsize=(10, 10), dpi=96) pl.title("score on train set") pl.plot(x, train_scores) pl.scatter(x, train_scores) pl.xlabel("iteration") pl.xlim([-0.5, train_scores.size + 1]) return train_scores
def test_multilabel_yeast_independent(): yeast = fetch_mldata("yeast") X = yeast.data y = yeast.target.toarray().T.astype(np.int) # no edges for the moment edges = np.zeros((0, 2), dtype=np.int) pbl = MultiLabelProblem(n_features=X.shape[1], n_labels=y.shape[1], edges=edges) ssvm = OneSlackSSVM(pbl, verbose=10) ssvm.fit(X, y) from IPython.core.debugger import Tracer Tracer()()
def test_multinomial_blocks_one_slack(): #testing cutting plane ssvm on easy multinomial dataset X, Y = toy.generate_blocks_multinomial(n_samples=10, noise=0.3, seed=0) n_labels = len(np.unique(Y)) for inference_method in ['lp']: crf = GridCRF(n_states=n_labels, inference_method=inference_method) clf = OneSlackSSVM(problem=crf, max_iter=50, C=100, verbose=100, check_constraints=True, break_on_bad=True) clf.fit(X, Y) Y_pred = clf.predict(X) assert_array_equal(Y, Y_pred)
def test_one_slack_attractive_potentials(): # test that submodular SSVM can learn the block dataset X, Y = generate_blocks(n_samples=10) crf = GridCRF(inference_method=inference_method) submodular_clf = OneSlackSSVM(model=crf, max_iter=200, C=1, check_constraints=True, negativity_constraint=[5], inference_cache=50) submodular_clf.fit(X, Y) Y_pred = submodular_clf.predict(X) assert_array_equal(Y, Y_pred) assert_true(submodular_clf.w[5] < 0)
def __init__(self, model, max_iter=10000, C=1.0, check_constraints=False, verbose=0, negativity_constraint=None, n_jobs=1, break_on_bad=False, show_loss_every=0, tol=1e-3, inference_cache=0, inactive_threshold=1e-5, inactive_window=50, logger=None, cache_tol='auto', switch_to=None): Pystruct_OneSlackSSVM.__init__(self, model, max_iter=max_iter, C=C, check_constraints=check_constraints, verbose=verbose, negativity_constraint=negativity_constraint, n_jobs=n_jobs, break_on_bad=break_on_bad, show_loss_every=show_loss_every, tol=tol, inference_cache=inference_cache, inactive_threshold=inactive_threshold, inactive_window=inactive_window, logger=logger, cache_tol=cache_tol, switch_to=switch_to)
def test_blobs_2d_one_slack(): # make two gaussian blobs X, Y = make_blobs(n_samples=80, centers=2, random_state=1) Y = 2 * Y - 1 # we have to add a constant 1 feature by hand :-/ X = np.hstack([X, np.ones((X.shape[0], 1))]) X_train, X_test, Y_train, Y_test = X[:40], X[40:], Y[:40], Y[40:] pbl = BinarySVMModel(n_features=3) svm = OneSlackSSVM(pbl, verbose=30, C=1000) svm.fit(X_train, Y_train) assert_array_equal(Y_test, np.hstack(svm.predict(X_test)))
def test_blobs_2d_one_slack(): # make two gaussian blobs X, Y = make_blobs(n_samples=80, centers=3, random_state=42) # we have to add a constant 1 feature by hand :-/ X = np.hstack([X, np.ones((X.shape[0], 1))]) X_train, X_test, Y_train, Y_test = X[:40], X[40:], Y[:40], Y[40:] pbl = MultiClassClf(n_features=3, n_classes=3) svm = OneSlackSSVM(pbl, check_constraints=True, C=1000) svm.fit(X_train, Y_train) assert_array_equal(Y_test, np.hstack(svm.predict(X_test)))
def set_params(self, trajid_list, w, n_jobs=4): if self.poi_info is None: self.poi_info = self.dat_obj.calc_poi_info(trajid_list) poi_set = {p for tid in trajid_list for p in self.dat_obj.traj_dict[tid] if len(self.dat_obj.traj_dict[tid]) >= 2} self.poi_list = sorted(poi_set) self.poi_id_dict, self.poi_id_rdict = dict(), dict() for idx, poi in enumerate(self.poi_list): self.poi_id_dict[poi] = idx self.poi_id_rdict[idx] = poi # generate training data train_traj_list = [self.dat_obj.traj_dict[k] for k in trajid_list if len(self.dat_obj.traj_dict[k]) >= 2] node_features_list = Parallel(n_jobs=n_jobs)(delayed(calc_node_features)( tr[0], len(tr), self.poi_list, self.poi_info, self.dat_obj) for tr in train_traj_list) edge_features = calc_edge_features(trajid_list, self.poi_list, self.poi_info, self.dat_obj) # feature scaling: node features # should each example be flattened to one vector before scaling? self.fdim_node = node_features_list[0].shape X_node_all = np.vstack(node_features_list) X_node_all = self.scaler_node.fit_transform(X_node_all) X_node_all = X_node_all.reshape(-1, self.fdim_node[0], self.fdim_node[1]) # feature scaling: edge features fdim_edge = edge_features.shape edge_features = self.scaler_edge.fit_transform(edge_features.reshape(fdim_edge[0] * fdim_edge[1], -1)) self.edge_features = edge_features.reshape(fdim_edge) assert(len(train_traj_list) == X_node_all.shape[0]) X_train = [(X_node_all[k, :, :], self.edge_features.copy(), (self.poi_id_dict[train_traj_list[k][0]], len(train_traj_list[k]))) for k in range(len(train_traj_list))] y_train = [np.array([self.poi_id_dict[k] for k in tr]) for tr in train_traj_list] assert(len(X_train) == len(y_train)) n_features = X_train[0][0].shape[1] n_states = len(np.unique(np.hstack([y.ravel() for y in y_train]))) n_edge_features = X_train[0][1].shape[2] sm = MyModel(inference_train=self.inference_train, inference_pred=self.inference_pred, share_params=self.share_params, multi_label=self.multi_label, n_states=n_states, n_features=n_features, n_edge_features=n_edge_features, debug=self.debug) self.learner = OneSlackSSVM(model=sm, C=self.C, n_jobs=1) self.learner.w = w self.trained = True
def test_with_crosses_bad_init(): # use less perfect initialization rnd = np.random.RandomState(0) X, Y = generate_crosses(n_samples=20, noise=5, n_crosses=1, total_size=8) X_test, Y_test = X[10:], Y[10:] X, Y = X[:10], Y[:10] crf = LatentGridCRF(n_states_per_label=2) crf.initialize(X, Y) H_init = crf.init_latent(X, Y) mask = rnd.uniform(size=H_init.shape) > .7 H_init[mask] = 2 * (H_init[mask] / 2) one_slack_ssvm = OneSlackSSVM(crf, inactive_threshold=1e-8, cache_tol=.0001, inference_cache=50, C=100) clf = LatentSSVM(one_slack_ssvm) clf.fit(X, Y, H_init=H_init) Y_pred = clf.predict(X) assert_array_equal(np.array(Y_pred), Y) # test that score is not always 1 assert_true(.98 < clf.score(X_test, Y_test) < 1)
def test_standard_svm_blobs_2d(): # no edges, reduce to crammer-singer svm X, Y = make_blobs(n_samples=80, centers=3, random_state=42) # we have to add a constant 1 feature by hand :-/ X = np.hstack([X, np.ones((X.shape[0], 1))]) X_train, X_test, Y_train, Y_test = X[:40], X[40:], Y[:40], Y[40:] X_train_graphs = [(x[np.newaxis, :], np.empty((0, 2), dtype=np.int)) for x in X_train] X_test_graphs = [(x[np.newaxis, :], np.empty((0, 2), dtype=np.int)) for x in X_test] pbl = GraphCRF(n_features=3, n_states=3) svm = OneSlackSSVM(pbl, verbose=10, check_constraints=True, C=1000) svm.fit(X_train_graphs, Y_train[:, np.newaxis]) assert_array_equal(Y_test, np.hstack(svm.predict(X_test_graphs)))
def test_latent_node_boxes_standard_latent(): # learn the "easy" 2x2 boxes dataset. # a 2x2 box is placed randomly in a 4x4 grid # we add a latent variable for each 2x2 patch # that should make the model fairly simple X, Y = make_simple_2x2(seed=1, n_samples=40) latent_crf = LatentNodeCRF(n_labels=2, n_hidden_states=2, n_features=1) one_slack = OneSlackSSVM(latent_crf) n_slack = NSlackSSVM(latent_crf) subgradient = SubgradientSSVM(latent_crf, max_iter=100) for base_svm in [one_slack, n_slack, subgradient]: base_svm.C = 10 latent_svm = LatentSSVM(base_svm, latent_iter=10) G = [make_grid_edges(x) for x in X] # make edges for hidden states: edges = make_edges_2x2() G = [np.vstack([make_grid_edges(x), edges]) for x in X] # reshape / flatten x and y X_flat = [x.reshape(-1, 1) for x in X] Y_flat = [y.ravel() for y in Y] X_ = zip(X_flat, G, [2 * 2 for x in X_flat]) latent_svm.fit(X_[:20], Y_flat[:20]) assert_array_equal(latent_svm.predict(X_[:20]), Y_flat[:20]) assert_equal(latent_svm.score(X_[:20], Y_flat[:20]), 1) # test that score is not always 1 assert_true(.98 < latent_svm.score(X_[20:], Y_flat[20:]) < 1)
def do_comparison(X_train, y_train, X_test, y_test, dataset): # evaluate both svms on a given datasets, generate plots Cs = 10.**np.arange(-4, 1) multisvm = MultiSVM() svm = OneSlackSSVM(MultiClassClf(), tol=0.01) accs_pystruct, times_pystruct = eval_on_data(X_train, y_train, X_test, y_test, svm, Cs=Cs) accs_svmstruct, times_svmstruct = eval_on_data(X_train, y_train, X_test, y_test, multisvm, Cs=Cs) plot_curves(times_svmstruct, times_pystruct, Cs=Cs, title="learning time (s) %s" % dataset, filename="times_%s.pdf" % dataset) plot_curves(accs_svmstruct, accs_pystruct, Cs=Cs, title="accuracy %s" % dataset, filename="accs_%s.pdf" % dataset)
def main(): print("Please be patient. Will take 5-20 minutes.") snakes = load_snakes() X_train, Y_train = snakes['X_train'], snakes['Y_train'] X_train = [one_hot_colors(x) for x in X_train] Y_train_flat = [y_.ravel() for y_ in Y_train] X_train_directions, X_train_edge_features = prepare_data(X_train) # first, train on X with directions only: crf = EdgeFeatureGraphCRF(inference_method='qpbo') ssvm = OneSlackSSVM(crf, inference_cache=50, C=.1, tol=.1, switch_to='ad3', n_jobs=-1) ssvm.fit(X_train_directions, Y_train_flat) # Evaluate using confusion matrix. # Clearly the middel of the snake is the hardest part. X_test, Y_test = snakes['X_test'], snakes['Y_test'] X_test = [one_hot_colors(x) for x in X_test] Y_test_flat = [y_.ravel() for y_ in Y_test] X_test_directions, X_test_edge_features = prepare_data(X_test) Y_pred = ssvm.predict(X_test_directions) print("Results using only directional features for edges") print("Test accuracy: %.3f" % accuracy_score(np.hstack(Y_test_flat), np.hstack(Y_pred))) print(confusion_matrix(np.hstack(Y_test_flat), np.hstack(Y_pred))) # now, use more informative edge features: crf = EdgeFeatureGraphCRF(inference_method='qpbo') ssvm = OneSlackSSVM(crf, inference_cache=50, C=.1, tol=.1, switch_to='ad3', n_jobs=-1) ssvm.fit(X_train_edge_features, Y_train_flat) Y_pred2 = ssvm.predict(X_test_edge_features) print("Results using also input features for edges") print("Test accuracy: %.3f" % accuracy_score(np.hstack(Y_test_flat), np.hstack(Y_pred2))) print(confusion_matrix(np.hstack(Y_test_flat), np.hstack(Y_pred2))) # plot stuff fig, axes = plt.subplots(2, 2) axes[0, 0].imshow(snakes['X_test'][0], interpolation='nearest') axes[0, 0].set_title('Input') y = Y_test[0].astype(np.int) bg = 2 * (y != 0) # enhance contrast axes[0, 1].matshow(y + bg, cmap=plt.cm.Greys) axes[0, 1].set_title("Ground Truth") axes[1, 0].matshow(Y_pred[0].reshape(y.shape) + bg, cmap=plt.cm.Greys) axes[1, 0].set_title("Prediction w/o edge features") axes[1, 1].matshow(Y_pred2[0].reshape(y.shape) + bg, cmap=plt.cm.Greys) axes[1, 1].set_title("Prediction with edge features") for a in axes.ravel(): a.set_xticks(()) a.set_yticks(()) plt.show() from IPython.core.debugger import Tracer Tracer()()
def synteticTrain(peer): # train model on a single set clf = OneSlackSSVM(peer, max_iter=10000, C=0.01, verbose=2, tol=0.1, n_jobs=4, inference_cache=100) start = time() clf.fit() stop = time() models_basedir = peer.config.get("models.basedir") np.savetxt(models_basedir + 'syntetic_full.csv', clf.w[None], delimiter=' ') #with open(models_basedir + 'syntetic_full' + '.pickle', 'w') as f: # pickle.dump(clf, f) peer.log('Elapsed time: %f s' % (stop - start)) return clf
def test_svm_as_crf_pickling(): iris = load_iris() X, y = iris.data, iris.target X_ = [(np.atleast_2d(x), np.empty((0, 2), dtype=np.int)) for x in X] Y = y.reshape(-1, 1) X_train, X_test, y_train, y_test = train_test_split(X_, Y, random_state=1) _, file_name = mkstemp() pbl = GraphCRF(n_features=4, n_states=3, inference_method="unary") logger = SaveLogger(file_name) svm = OneSlackSSVM(pbl, check_constraints=True, C=1, n_jobs=1, logger=logger) svm.fit(X_train, y_train) assert_less(0.97, svm.score(X_test, y_test)) assert_less(0.97, logger.load().score(X_test, y_test))
def msrc_test(): # test model on different train set sizes basedir = '../data/msrc/trainmasks/' models_basedir = 'models/msrc/' quality = [] Xtest, Ytest = load_msrc('test') Ytest = remove_areas(Ytest) Xtrain, Ytrain = load_msrc('train') Ytrain = remove_areas(Ytrain) for n_train in [20, 40, 80, 160, 276]: crf = EdgeCRF(n_states=24, n_features=2028, n_edge_features=4, inference_method='gco') clf = OneSlackSSVM(crf, max_iter=1000, C=0.01, verbose=0, tol=0.1, n_jobs=4, inference_cache=100) if n_train != 276: train_mask = np.genfromtxt(basedir + 'trainMaskX%d.txt' % n_train) train_mask = train_mask[:277].astype(np.bool) else: train_mask = np.ones(276).astype(np.bool) curX = [] curY = [] for (s, x, y) in zip(train_mask, Xtrain, Ytrain): if s: curX.append(x) curY.append(y) start = time() clf.fit(curX, curY) stop = time() np.savetxt(models_basedir + 'test_model_%d.csv' % n_train, clf.w) with open(models_basedir + 'test_model_%d' % n_train + '.pickle', 'w') as f: pickle.dump(clf, f) Ypred = clf.predict(Xtest) q = 1 - compute_error(Ytest, Ypred) print('n_train=%d, quality=%f, time=%f' % (n_train, q, stop - start)) quality.append(q) np.savetxt('results/msrc/msrc_full.txt', quality)
def msrc_test(): # test model on different train set sizes basedir = '../data/msrc/trainmasks/' models_basedir = 'models/msrc/' quality = [] Xtest, Ytest = load_msrc('test') Ytest = remove_areas(Ytest) Xtrain, Ytrain = load_msrc('train') Ytrain = remove_areas(Ytrain) for n_train in [20, 40, 80, 160, 276]: crf = EdgeCRF(n_states=24, n_features=2028, n_edge_features=4, inference_method='gco') clf = OneSlackSSVM(crf, max_iter=1000, C=0.01, verbose=0, tol=0.1, n_jobs=4, inference_cache=100) if n_train != 276: train_mask = np.genfromtxt(basedir + 'trainMaskX%d.txt' % n_train) train_mask = train_mask[:277].astype(np.bool) else: train_mask = np.ones(276).astype(np.bool) curX = [] curY = [] for (s, x, y) in zip(train_mask, Xtrain, Ytrain): if s: curX.append(x) curY.append(y) start = time() clf.fit(curX, curY) stop = time() np.savetxt(models_basedir + 'test_model_%d.csv' % n_train, clf.w) with open(models_basedir + 'test_model_%d' % n_train + '.pickle', 'w') as f: cPickle.dump(clf, f) Ypred = clf.predict(Xtest) q = 1 - compute_error(Ytest, Ypred) print 'n_train=%d, quality=%f, time=%f' % (n_train, q, stop - start) quality.append(q) np.savetxt('results/msrc/msrc_full.txt', quality)
def train(x_train, y_train, x_test, y_test): x_train = np.asarray(x_train, dtype=np.float) y_train = np.asarray(y_train, dtype=np.int64) # x_test = np.asarray(x_test, dtype=np.float) # y_test = np.asarray(y_test, dtype=np.int64) x_test = x_train y_test = y_train from pystruct.learners import NSlackSSVM, OneSlackSSVM, SubgradientSSVM, LatentSSVM, SubgradientLatentSSVM, PrimalDSStructuredSVM from pystruct.models import MultiLabelClf, MultiClassClf clf = OneSlackSSVM(MultiLabelClf(), C=1, show_loss_every=1, verbose=1, max_iter=1000) # print(x_train, y_train) # input() clf.fit(x_train, y_train) result = clf.predict(x_test) print('Result: \n', result) print('True label:\n', y_test) clf.score(x_test, y_test) print('\n') count = 0 for i in range(len(result)): # print(np.sum(np.square(y_test[i]-result[i]))) if np.sum(np.square(y_test[i] - result[i])) != 0: print('True label: ', y_test[i], 'Predict: ', result[i]) count += 1 print(count) translate_vector(x_test, y_test)
def syntetic(): # train model on a single set models_basedir = 'models/syntetic/' crf = EdgeCRF(n_states=10, n_features=10, n_edge_features=2, inference_method='gco') clf = OneSlackSSVM(crf, max_iter=10000, C=0.01, verbose=2, tol=0.1, n_jobs=4, inference_cache=100) X, Y = load_syntetic(1) x_train, x_test, y_train, y_test = train_test_split(X, Y, train_size=100, random_state=179) start = time() clf.fit(x_train, y_train) stop = time() np.savetxt(models_basedir + 'syntetic_full.csv', clf.w) with open(models_basedir + 'syntetic_full' + '.pickle', 'w') as f: cPickle.dump(clf, f) y_pred = clf.predict(x_test) print 'Error on test set: %f' % compute_error(y_test, y_pred) print 'Score on test set: %f' % clf.score(x_test, y_test) print 'Score on train set: %f' % clf.score(x_train, y_train) print 'Norm of weight vector: |w|=%f' % np.linalg.norm(clf.w) print 'Elapsed time: %f s' % (stop - start) return clf
def test_standard_svm_blobs_2d(): # no edges, reduce to crammer-singer svm X, Y = make_blobs(n_samples=80, centers=3, random_state=42) # we have to add a constant 1 feature by hand :-/ X = np.hstack([X, np.ones((X.shape[0], 1))]) X_train, X_test, Y_train, Y_test = X[:40], X[40:], Y[:40], Y[40:] X_train_graphs = [(x[np.newaxis, :], np.empty((0, 2), dtype=np.int)) for x in X_train] X_test_graphs = [(x[np.newaxis, :], np.empty((0, 2), dtype=np.int)) for x in X_test] pbl = GraphCRF(n_features=3, n_states=3, inference_method='unary') svm = OneSlackSSVM(pbl, check_constraints=True, C=1000) svm.fit(X_train_graphs, Y_train[:, np.newaxis]) assert_array_equal(Y_test, np.hstack(svm.predict(X_test_graphs)))
class PystructSequenceLearner(SequenceLearner): def __init__(self): # the model self.model = ChainCRF(directed=True) # the learner self.learner = OneSlackSSVM(model=self.model, C=.1, inference_cache=50, tol=0.1, n_jobs=1) # self.learner = StructuredPerceptron(model=self.model, average=True) def fit(self, X, y): # Train linear chain CRF self.learner.fit(X, y) def predict(self, X): return self.learner.predict(X)
def test_svm_as_crf_pickling(): iris = load_iris() X, y = iris.data, iris.target X_ = [(np.atleast_2d(x), np.empty((0, 2), dtype=np.int)) for x in X] Y = y.reshape(-1, 1) X_train, X_test, y_train, y_test = train_test_split(X_, Y, random_state=1) _, file_name = mkstemp() pbl = GraphCRF(n_features=4, n_states=3, inference_method='unary') logger = SaveLogger(file_name) svm = OneSlackSSVM(pbl, check_constraints=True, C=1, n_jobs=1, logger=logger) svm.fit(X_train, y_train) assert_less(.97, svm.score(X_test, y_test)) assert_less(.97, logger.load().score(X_test, y_test))
def conditional_random_fields(X, y): """ """ X_ = [(np.atleast_2d(x), np.empty((0, 2), dtype=np.int)) for x in X] Y = y.reshape(-1, 1) X_train, X_test, y_train, y_test = train_test_split(X_, Y) pbl = GraphCRF() svm = OneSlackSSVM(pbl) svm.fit(X_train, y_train) y_pred = np.vstack(svm.predict(X_test)) print("Score with pystruct crf svm: %f " % (np.mean(y_pred == y_test))) print classification_report(y_test, y_pred) plot_confusion_matrix(y_test, y_pred)
def __init__(self, model, max_iter=10000, C=1.0, check_constraints=False, verbose=0, negativity_constraint=None, positivity_constraint=None, null_constraints=None, hard_constraints=None, n_jobs=1, break_on_bad=False, show_loss_every=0, tol=1e-3, inference_cache=0, inactive_threshold=1e-5, inactive_window=50, logger=None, cache_tol='auto', switch_to=None, generate_hard_constraints=None, initialize_constraints=None, qp_eps=1e-5): OneSlackSSVM.__init__(self, model, max_iter, C, check_constraints, verbose, negativity_constraint, positivity_constraint, hard_constraints, n_jobs, break_on_bad, show_loss_every, tol, inference_cache, inactive_threshold, inactive_window, logger, cache_tol, switch_to) if (hard_constraints, positivity_constraint, negativity_constraint, generate_hard_constraints) is (None, None, None, None): self.hard_satisfied = True else: # there are hard constraints to satisfy self.hard_satisfied = False self.null_constraints = null_constraints self.generate_hard_constraints = generate_hard_constraints self.initialize_constraints = initialize_constraints self.cutting_constraints = [] self._inference_calls = 0 self.qp_eps = qp_eps self.converged = False
def test_one_slack_constraint_caching(): #testing cutting plane ssvm on easy multinomial dataset X, Y = toy.generate_blocks_multinomial(n_samples=10, noise=0.5, seed=0, size_x=9) n_labels = len(np.unique(Y)) crf = GridCRF(n_states=n_labels) clf = OneSlackSSVM(model=crf, max_iter=150, C=1, check_constraints=True, break_on_bad=True, inference_cache=50, inactive_window=0) clf.fit(X, Y) Y_pred = clf.predict(X) assert_array_equal(Y, Y_pred) assert_equal(len(clf.inference_cache_), len(X)) # there should be 21 constraints, which are less than the 94 iterations # that are done assert_equal(len(clf.inference_cache_[0]), 21) # check that we didn't change the behavior of how we construct the cache constraints_per_sample = [len(cache) for cache in clf.inference_cache_] assert_equal(np.max(constraints_per_sample), 21) assert_equal(np.min(constraints_per_sample), 21)
def train_seq(X, y, crf_params): X_ = [X[k] for k in sorted(X.keys())] y_ = [y[k] for k in sorted(y.keys())] class_sizes = np.bincount(np.hstack(y_)) cw = 1. / class_sizes cw = cw / cw.sum() return OneSlackSSVM(model=ChainCRF(inference_method='max-product', class_weight=cw), max_iter=100000, verbose=False, **crf_params).fit(X_, y_)
def test_one_slack_constraint_caching(): #testing cutting plane ssvm on easy multinomial dataset X, Y = toy.generate_blocks_multinomial(n_samples=10, noise=0.3, seed=0) n_labels = len(np.unique(Y)) crf = GridCRF(n_states=n_labels, inference_method='lp') clf = OneSlackSSVM(problem=crf, max_iter=50, C=100, verbose=100, check_constraints=True, break_on_bad=True, inference_cache=50) clf.fit(X, Y) Y_pred = clf.predict(X) assert_array_equal(Y, Y_pred) assert_equal(len(clf.inference_cache_), len(X)) # there should be 9 constraints, which are less than the 16 iterations # that are done assert_equal(len(clf.inference_cache_[0]), 9) # check that we didn't change the behavior of how we construct the cache constraints_per_sample = [len(cache) for cache in clf.inference_cache_] assert_equal(np.max(constraints_per_sample), 10) assert_equal(np.min(constraints_per_sample), 8)
def test_equal_class_weights(): # test that equal class weight is the same as no class weight X, Y = make_blobs(n_samples=80, centers=3, random_state=42) X = np.hstack([X, np.ones((X.shape[0], 1))]) X_train, X_test, Y_train, Y_test = X[:40], X[40:], Y[:40], Y[40:] pbl = MultiClassClf(n_features=3, n_classes=3) svm = OneSlackSSVM(pbl, C=10) svm.fit(X_train, Y_train) predict_no_class_weight = svm.predict(X_test) pbl_class_weight = MultiClassClf(n_features=3, n_classes=3, class_weight=np.ones(3)) svm_class_weight = OneSlackSSVM(pbl_class_weight, C=10) svm_class_weight.fit(X_train, Y_train) predict_class_weight = svm_class_weight.predict(X_test) assert_array_equal(predict_no_class_weight, predict_class_weight) assert_array_almost_equal(svm.w, svm_class_weight.w)
def train_structured_svm(observations,targets): """ :param observations: our train dataset :param targets: multiple target variables. :return: the structured svm model """ # ideally you can say the edges that are connected. For now, we use full. n_labels = len(targets[0]) full = np.vstack([x for x in itertools.combinations(range(n_labels), 2)]) #tree = chow_liu_tree(targets) # Choose the best model... full_model = MultiLabelClf(edges=full, inference_method='lp') #tree_model = MultiLabelClf(edges=tree, inference_method="max-product") full_ssvm = OneSlackSSVM(full_model, inference_cache=50, C=.1, tol=0.01) full_ssvm.fit(np.array(observations), np.array(targets)) return full_ssvm
def test_one_slack_repellent_potentials(): # test non-submodular problem with and without submodularity constraint # dataset is checkerboard X, Y = generate_checker() crf = GridCRF(inference_method=inference_method) clf = OneSlackSSVM(model=crf, max_iter=10, C=0.01, check_constraints=True) clf.fit(X, Y) Y_pred = clf.predict(X) # standard crf can predict perfectly assert_array_equal(Y, Y_pred) submodular_clf = OneSlackSSVM( model=crf, max_iter=10, C=0.01, check_constraints=True, negativity_constraint=[4, 5, 6] ) submodular_clf.fit(X, Y) Y_pred = submodular_clf.predict(X) assert_less(submodular_clf.score(X, Y), 0.99) # submodular crf can not do better than unaries for i, x in enumerate(X): y_pred_unaries = crf.inference(x, np.array([1, 0, 0, 1, 0, 0, 0])) assert_array_equal(y_pred_unaries, Y_pred[i])
def test_class_weights(): X, Y = make_blobs(n_samples=210, centers=3, random_state=1, cluster_std=3, shuffle=False) X = np.hstack([X, np.ones((X.shape[0], 1))]) X, Y = X[:170], Y[:170] pbl = MultiClassClf(n_features=3, n_classes=3) svm = OneSlackSSVM(pbl, C=10) svm.fit(X, Y) weights = 1. / np.bincount(Y) weights *= len(weights) / np.sum(weights) pbl_class_weight = MultiClassClf(n_features=3, n_classes=3, class_weight=weights) svm_class_weight = OneSlackSSVM(pbl_class_weight, C=10) svm_class_weight.fit(X, Y) assert_greater(f1_score(Y, svm_class_weight.predict(X)), f1_score(Y, svm.predict(X)))