def load_nyu(ds='train', n_sp=300, sp='rgb'): # trigger cache..... dataset = NYUSegmentation() file_names = dataset.get_split(ds) if ds == "test": reorder = np.array([2, 0, 3, 1]) else: reorder = None # load image to generate superpixels result = Parallel(n_jobs=-1)(delayed(load_single_file)(dataset, f, n_sp, sp, reorder=reorder) for f in file_names) X, Y, superpixels = zip(*result) return DataBunch(X, Y, file_names, superpixels)
def load_nyu(ds='train', n_sp=300, sp='rgb'): # trigger cache..... dataset = NYUSegmentation() file_names = dataset.get_split(ds) if ds == "test": reorder = np.array([2, 0, 3, 1]) else: reorder = None # load image to generate superpixels result = Parallel(n_jobs=-1)( delayed(load_single_file)(dataset, f, n_sp, sp, reorder=reorder) for f in file_names) X, Y, superpixels = zip(*result) return DataBunch(X, Y, file_names, superpixels)
def train_svm(C=0.1, grid=False): ds = NYUSegmentation() data_train = load_nyu("train", n_sp=500, sp='rgbd') svm = LinearSVC(C=C, dual=False, class_weight='auto') #N_train = [] #for f, sp in zip(data_train.file_names, data_train.superpixels): #normals = ds.get_pointcloud_normals(f)[:, :, 3:] #mean_normals = get_sp_normals(normals, sp) #N_train.append(mean_normals * .1) #N_flat_train = np.vstack(N_train) X, y = np.vstack(data_train.X), np.hstack(data_train.Y) #X = np.hstack([X, N_flat_train]) svm.fit(X, y) print(svm.score(X, y)) eval_on_sp(ds, data_train, [svm.predict(x) for x in data_train.X], print_results=True) data_val = load_nyu("val", n_sp=500, sp='rgbd') #N_val = [] #for f, sp in zip(data_val.file_names, data_val.superpixels): #normals = ds.get_pointcloud_normals(f)[:, :, 3:] #mean_normals = get_sp_normals(normals, sp) #N_val.append(mean_normals * .1) eval_on_sp(ds, data_val, [svm.predict(x) for x in data_val.X], print_results=True)
def load_nyu_pixelwise(ds='train'): if ds == "test": reorder = np.array([2, 0, 3, 1]) else: reorder = np.arange(4) # trigger cache. dataset = NYUSegmentation() file_names, X, Y = [], [], [] for file_name in dataset.get_split(ds): print(file_name) file_names.append(file_name) gt = dataset.get_ground_truth(file_name) prediction = get_probabilities(file_name, dataset.directory) Y.append(gt) X.append(prediction[:, :, reorder]) return DataBunchNoSP(X, Y, file_names)
def main(C=1): dataset = NYUSegmentation() # load training data data_train = load_nyu('train', n_sp=500, sp='rgbd') data_train = add_edges(data_train) data_train = add_edge_features(dataset, data_train, depth_diff=True, normal_angles=True) data_train = make_hierarchical_data(dataset, data_train) data_train = discard_void(dataset, data_train) n_states = 4. print("number of samples: %s" % len(data_train.X)) class_weights = 1. / np.bincount(np.hstack(data_train.Y)) class_weights *= n_states / np.sum(class_weights) #class_weights = np.ones(n_states) print(class_weights) #model = crfs.GraphCRF(n_states=n_states, #n_features=data_train.X[0][0].shape[1], #inference_method='qpbo', class_weight=class_weights) model = crfs.EdgeFeatureLatentNodeCRF(n_hidden_states=5, n_edge_features=5, inference_method='qpbo', class_weight=class_weights, symmetric_edge_features=[0, 1], latent_node_features=False, n_labels=4) experiment_name = "rgbd_normal_angles_fold1_strong_reweight%f" % C base_ssvm = learners.OneSlackSSVM(model, verbose=2, C=C, max_iter=100000, n_jobs=1, tol=0.001, show_loss_every=100, inference_cache=50, cache_tol='auto', logger=SaveLogger(experiment_name + ".pickle", save_every=100), inactive_threshold=1e-5, break_on_bad=False, inactive_window=50, switch_to=("ad3", { 'branch_and_bound': True })) latent_logger = SaveLogger("lssvm_" + experiment_name + "_%d.pickle", save_every=1) ssvm = learners.LatentSSVM(base_ssvm, logger=latent_logger, latent_iter=3) ssvm.fit(data_train.X, data_train.Y) print("fit finished!") return
def eval_sp_prediction(): dataset = NYUSegmentation() data = load_nyu('val', n_sp=500, sp='rgbd') predictions = [np.argmax(x, axis=-1) for x in data.X] #predictions = data.Y hamming, jaccard = eval_on_sp(dataset, data, predictions, print_results=True)
def eval_pixel_prediction(): dataset = NYUSegmentation() data = load_nyu_pixelwise('val') predictions = [np.argmax(x, axis=-1) for x in data.X] hamming, jaccard = eval_on_pixels(dataset, data.Y, predictions, print_results=True)
def crazy_visual(): dataset = NYUSegmentation() # load training data data = load_nyu(n_sp=500) data = add_edges(data) for x, image_name, superpixels, y in zip(data.X, data.file_names, data.superpixels, data.Y): print(image_name) if int(image_name) != 11: continue image = dataset.get_image(image_name) plt.figure(figsize=(20, 20)) bounary_image = mark_boundaries(image, superpixels) plt.imshow(bounary_image) gridx, gridy = np.mgrid[:superpixels.shape[0], :superpixels.shape[1]] edges = x[1] points_normals = dataset.get_pointcloud_normals(image_name) centers2d = get_superpixel_centers(superpixels) centers3d = [ np.bincount(superpixels.ravel(), weights=c.ravel()) for c in points_normals[:, :, :3].reshape(-1, 3).T ] centers3d = (np.vstack(centers3d) / np.bincount(superpixels.ravel())).T sp_normals = get_sp_normals(points_normals[:, :, 3:], superpixels) offset = centers3d[edges[:, 0]] - centers3d[edges[:, 1]] offset = offset / np.sqrt(np.sum(offset**2, axis=1))[:, np.newaxis] #mean_normal = (sp_normals[edges[:, 0]] + sp_normals[edges[:, 1]]) / 2. mean_normal = sp_normals[edges[:, 0]] #edge_features = np.arccos(np.abs((offset * mean_normal).sum(axis=1))) * 2. / np.pi edge_features = 1 - np.abs((offset * mean_normal).sum(axis=1)) no_normals = (np.all(sp_normals[edges[:, 0]] == 0, axis=1) + np.all(sp_normals[edges[:, 1]] == 0, axis=1)) edge_features[no_normals] = 0 # nan normals if True: coords = points_normals[:, :, :3].reshape(-1, 3) perm = np.random.permutation(superpixels.max() + 1) mv.points3d(coords[:, 0], coords[:, 1], coords[:, 2], perm[superpixels.ravel()], mode='point') #mv.points3d(centers3d[:, 0], centers3d[:, 1], centers3d[:, 2], scale_factor=.04) mv.quiver3d(centers3d[:, 0], centers3d[:, 1], centers3d[:, 2], sp_normals[:, 0], sp_normals[:, 1], sp_normals[:, 2]) mv.show() from IPython.core.debugger import Tracer Tracer()() for i, edge in enumerate(edges): e0, e1 = edge #color = (dataset.colors[y[e0]] + dataset.colors[y[e1]]) / (2. * 255.) #f = edge_features[i] #if f < 0: #e0, e1 = e1, e0 #f = -f #plt.arrow(centers[e0][0], centers[e0][1], #centers[e1][0] - centers[e0][0], centers[e1][1] - centers[e0][1], #width=f * 5 #) color = "black" plt.plot([centers2d[e0][0], centers2d[e1][0]], [centers2d[e0][1], centers2d[e1][1]], c=color, linewidth=edge_features[i] * 5) plt.scatter(centers2d[:, 0], centers2d[:, 1], s=100) plt.tight_layout() plt.xlim(0, superpixels.shape[1]) plt.ylim(superpixels.shape[0], 0) plt.axis("off") plt.savefig("figures/normal_relative/%s.png" % image_name, bbox_inches="tight") plt.close()
def main(): argv = sys.argv print("loading %s ..." % argv[1]) ssvm = SaveLogger(file_name=argv[1]).load() if hasattr(ssvm, 'problem'): ssvm.model = ssvm.problem print(ssvm) if hasattr(ssvm, 'base_ssvm'): ssvm = ssvm.base_ssvm print("Iterations: %d" % len(ssvm.objective_curve_)) print("Objective: %f" % ssvm.objective_curve_[-1]) inference_run = None if hasattr(ssvm, 'cached_constraint_'): inference_run = ~np.array(ssvm.cached_constraint_) print("Gap: %f" % (np.array(ssvm.primal_objective_curve_)[inference_run][-1] - ssvm.objective_curve_[-1])) if len(argv) <= 2: argv.append("acc") if len(argv) <= 3: dataset = 'nyu' else: dataset = argv[3] if argv[2] == 'acc': ssvm.n_jobs = 1 for data_str, title in zip(["train", "val"], ["TRAINING SET", "VALIDATION SET"]): print(title) edge_type = "pairwise" if dataset == 'msrc': ds = MSRC21Dataset() data = msrc_helpers.load_data(data_str, which="piecewise_new") #data = add_kraehenbuehl_features(data, which="train_30px") data = msrc_helpers.add_kraehenbuehl_features(data, which="train") elif dataset == 'pascal': ds = PascalSegmentation() data = pascal_helpers.load_pascal(data_str, sp_type="cpmc") #data = pascal_helpers.load_pascal(data_str) elif dataset == 'nyu': ds = NYUSegmentation() data = nyu_helpers.load_nyu(data_str, n_sp=500, sp='rgbd') else: raise ValueError("Excepted dataset to be 'nyu', 'pascal' or 'msrc'," " got %s." % dataset) if type(ssvm.model).__name__ == "LatentNodeCRF": print("making data hierarchical") data = pascal_helpers.make_cpmc_hierarchy(ds, data) #data = make_hierarchical_data( #ds, data, lateral=True, latent=True, latent_lateral=False, #add_edge_features=False) else: data = add_edges(data, edge_type) if type(ssvm.model).__name__ == 'EdgeFeatureGraphCRF': data = add_edge_features(ds, data, depth_diff=True, normal_angles=True) if type(ssvm.model).__name__ == "EdgeFeatureLatentNodeCRF": data = add_edge_features(ds, data) data = make_hierarchical_data( ds, data, lateral=True, latent=True, latent_lateral=False, add_edge_features=True) #ssvm.model.inference_method = "qpbo" Y_pred = ssvm.predict(data.X) if isinstance(ssvm.model, LatentNodeCRF): Y_pred = [ssvm.model.label_from_latent(h) for h in Y_pred] Y_flat = np.hstack(data.Y) print("superpixel accuracy: %.2f" % (np.mean((np.hstack(Y_pred) == Y_flat)[Y_flat != ds.void_label]) * 100)) if dataset == 'msrc': res = msrc_helpers.eval_on_pixels(data, Y_pred, print_results=True) print("global: %.2f, average: %.2f" % (res['global'] * 100, res['average'] * 100)) #msrc_helpers.plot_confusion_matrix(res['confusion']) else: hamming, jaccard = eval_on_sp(ds, data, Y_pred, print_results=True) print("Jaccard: %.2f, Hamming: %.2f" % (jaccard.mean(), hamming.mean())) plt.show() elif argv[2] == 'plot': data_str = 'val' if len(argv) <= 4: raise ValueError("Need a folder name for plotting.") if dataset == "msrc": ds = MSRC21Dataset() data = msrc_helpers.load_data(data_str, which="piecewise") data = add_edges(data, independent=False) data = msrc_helpers.add_kraehenbuehl_features( data, which="train_30px") data = msrc_helpers.add_kraehenbuehl_features( data, which="train") elif dataset == "pascal": ds = PascalSegmentation() data = pascal_helpers.load_pascal("val") data = add_edges(data) elif dataset == "nyu": ds = NYUSegmentation() data = nyu_helpers.load_nyu("test") data = add_edges(data) if type(ssvm.model).__name__ == 'EdgeFeatureGraphCRF': data = add_edge_features(ds, data, depth_diff=True, normal_angles=True) Y_pred = ssvm.predict(data.X) plot_results(ds, data, Y_pred, argv[4])
def main(): argv = sys.argv print("loading %s ..." % argv[1]) ssvm1 = SaveLogger(file_name=argv[1]).load() ssvm2 = SaveLogger(file_name=argv[2]).load() data_str = 'val' if len(argv) <= 3: raise ValueError("Need a folder name for plotting.") print("loading data...") data = load_nyu(data_str, n_sp=500) dataset = NYUSegmentation() print("done") data1 = add_edges(data, kind="pairwise") data2 = add_edges(data, kind="pairwise") data1 = add_edge_features(dataset, data1) data2 = add_edge_features(dataset, data2, depth_diff=True) Y_pred1 = ssvm1.predict(data1.X) Y_pred2 = ssvm2.predict(data2.X) folder = argv[3] if not os.path.exists(folder): os.mkdir(folder) np.random.seed(0) for image_name, superpixels, y_pred1, y_pred2 in zip(data.file_names, data.superpixels, Y_pred1, Y_pred2): if np.all(y_pred1 == y_pred2): continue gt = dataset.get_ground_truth(image_name) perf1 = eval_on_pixels(dataset, [gt], [y_pred1[superpixels]], print_results=False)[0] perf1 = np.mean(perf1[np.isfinite(perf1)]) perf2 = eval_on_pixels(dataset, [gt], [y_pred2[superpixels]], print_results=False)[0] perf2 = np.mean(perf2[np.isfinite(perf2)]) if np.abs(perf1 - perf2) < 2: continue image = dataset.get_image(image_name) fig, axes = plt.subplots(2, 3, figsize=(12, 6)) axes[0, 0].imshow(image) axes[0, 0].imshow((y_pred1 != y_pred2)[superpixels], vmin=0, vmax=1, alpha=.7) axes[0, 1].set_title("ground truth") axes[0, 1].imshow(image) axes[0, 1].imshow(gt, alpha=.7, cmap=dataset.cmap, vmin=0, vmax=dataset.cmap.N) axes[1, 0].set_title("%.2f" % perf1) axes[1, 0].imshow(image) axes[1, 0].imshow(y_pred1[superpixels], vmin=0, vmax=dataset.cmap.N, alpha=.7, cmap=dataset.cmap) axes[1, 1].set_title("%.2f" % perf2) axes[1, 1].imshow(image) axes[1, 1].imshow(y_pred2[superpixels], alpha=.7, cmap=dataset.cmap, vmin=0, vmax=dataset.cmap.N) present_y = np.unique(np.hstack([y_pred1, y_pred2, np.unique(gt)])) present_y = np.array([y_ for y_ in present_y if y_ != dataset.void_label]) axes[0, 2].imshow(present_y[:, np.newaxis], interpolation='nearest', cmap=dataset.cmap, vmin=0, vmax=dataset.cmap.N) for i, c in enumerate(present_y): axes[0, 2].text(1, i, dataset.classes[c]) for ax in axes.ravel(): ax.set_xticks(()) ax.set_yticks(()) axes[1, 2].set_visible(False) fig.savefig(folder + "/%s.png" % image_name, bbox_inches="tight") plt.close(fig)
def crazy_visual(): dataset = NYUSegmentation() # load training data data = load_nyu(n_sp=500) data = add_edges(data) for x, image_name, superpixels, y in zip(data.X, data.file_names, data.superpixels, data.Y): print(image_name) if int(image_name) != 11: continue image = dataset.get_image(image_name) plt.figure(figsize=(20, 20)) bounary_image = mark_boundaries(image, superpixels) plt.imshow(bounary_image) gridx, gridy = np.mgrid[:superpixels.shape[0], :superpixels.shape[1]] edges = x[1] points_normals = dataset.get_pointcloud_normals(image_name) centers2d = get_superpixel_centers(superpixels) centers3d = [np.bincount(superpixels.ravel(), weights=c.ravel()) for c in points_normals[:, :, :3].reshape(-1, 3).T] centers3d = (np.vstack(centers3d) / np.bincount(superpixels.ravel())).T sp_normals = get_sp_normals(points_normals[:, :, 3:], superpixels) offset = centers3d[edges[:, 0]] - centers3d[edges[:, 1]] offset = offset / np.sqrt(np.sum(offset ** 2, axis=1))[:, np.newaxis] #mean_normal = (sp_normals[edges[:, 0]] + sp_normals[edges[:, 1]]) / 2. mean_normal = sp_normals[edges[:, 0]] #edge_features = np.arccos(np.abs((offset * mean_normal).sum(axis=1))) * 2. / np.pi edge_features = 1 - np.abs((offset * mean_normal).sum(axis=1)) no_normals = (np.all(sp_normals[edges[:, 0]] == 0, axis=1) + np.all(sp_normals[edges[:, 1]] == 0, axis=1)) edge_features[no_normals] = 0 # nan normals if True: coords = points_normals[:, :, :3].reshape(-1, 3) perm = np.random.permutation(superpixels.max()+1) mv.points3d(coords[:,0], coords[:, 1], coords[:, 2], perm[superpixels.ravel()], mode='point') #mv.points3d(centers3d[:, 0], centers3d[:, 1], centers3d[:, 2], scale_factor=.04) mv.quiver3d(centers3d[:, 0], centers3d[:, 1], centers3d[:, 2], sp_normals[:, 0], sp_normals[:, 1], sp_normals[:, 2]) mv.show() from IPython.core.debugger import Tracer Tracer()() for i, edge in enumerate(edges): e0, e1 = edge #color = (dataset.colors[y[e0]] + dataset.colors[y[e1]]) / (2. * 255.) #f = edge_features[i] #if f < 0: #e0, e1 = e1, e0 #f = -f #plt.arrow(centers[e0][0], centers[e0][1], #centers[e1][0] - centers[e0][0], centers[e1][1] - centers[e0][1], #width=f * 5 #) color = "black" plt.plot([centers2d[e0][0], centers2d[e1][0]], [centers2d[e0][1], centers2d[e1][1]], c=color, linewidth=edge_features[i] * 5 ) plt.scatter(centers2d[:, 0], centers2d[:, 1], s=100) plt.tight_layout() plt.xlim(0, superpixels.shape[1]) plt.ylim(superpixels.shape[0], 0) plt.axis("off") plt.savefig("figures/normal_relative/%s.png" % image_name, bbox_inches="tight") plt.close()