def view_test(res1, res2): ds = meta.get_dataset('snemi3d_test') #volumina_n_layer([ds.inp(0), ds.inp(1), pm_new, pm_new1], ['raw','pm_old', 'pm_new1', 'pm_new2']) #else: volumina_n_layer( [ds.inp(0), ds.inp(1), ds.seg(0), res1, res2], ['raw', 'pmap', 'ws', 'curr_res', 'best_res'])
def test_aniso(view=False): pmap = vigra.readHDF5('./test_data/anisotropic/pmap.h5', 'data') ws_aniso_dt, n_labels_aniso = ws_anisotropic_distance_transform( pmap, 0.4, 10., 2.) check_consecutive(ws_aniso_dt) assert n_labels_aniso == ws_aniso_dt.max() + 1 print "Anisotropic distance transform watershed done" res_dt = [] res_gray = [] for n_threads in (1, 4): ws_dt, n_labels_dt = ws_distance_transform_2d_stacked( pmap, 0.4, 2., n_threads=n_threads) check_consecutive(ws_dt) assert n_labels_dt == ws_dt.max() + 1, "%i, %i" % (n_labels_dt, ws_dt.max() + 1) res_dt.append(n_labels_dt) print "Distance transform watershed done" ws_gray, n_labels_gray = ws_grayscale_distance_transform_2d_stacked( pmap, 0.1, 2., n_threads=n_threads) check_consecutive(ws_gray) assert n_labels_gray == ws_gray.max() + 1 res_gray.append(n_labels_gray) print "Grayscale distance transform watershed done" assert res_dt[0] == res_dt[1] assert res_gray[0] == res_gray[1] if view: raw = vigra.readHDF5('./test_data/anisotropic/raw.h5', 'data') volumina_n_layer([raw, pmap, ws_aniso_dt, ws_dt, ws_gray], ['raw', 'pmap', 'ws_aniso_dt', 'ws_dt', 'ws_gray'])
def view_random_slide(raw_path, bounding_box, output_shape, raw_key='data', use_misalign=False): import h5py from volumina_viewer import volumina_n_layer with h5py.File(raw_path) as f: raw = f[raw_key][bounding_box].astype('float32') diff = (np.array(raw.shape) - np.array( [len(raw), output_shape[0], output_shape[1]]).astype('uint32')) // 2 if use_misalign: max_misalign = tuple(diff) trafo = vol.RandomSlide(max_misalign=max_misalign) else: trafo = vol.RandomSlide(output_shape) slided = trafo(raw) crop = tuple( slice(diff[i], raw.shape[i] - diff[i]) for i in range(len(diff))) cropped = raw[crop] assert cropped.shape == slided.shape, "%s, %s" % (str( cropped.shape), str(slided.shape)) volumina_n_layer([cropped, slided], ['cropped', 'slipped'])
def main(): args = process_command_line() with open(args.bounding_box_file, 'r') as f: bb_str = f.readline() bounding_box = bb_str.split(' ') bounding_box = [int(x) for x in bounding_box] assert len(bounding_box) == 6, "Bounding box needs 6 coordinates!" print "Bounding box:", bounding_box shape = (bounding_box[1] - bounding_box[0], bounding_box[3] - bounding_box[2], bounding_box[5] - bounding_box[4] ) print "Resulting shape:", shape print "Reading in skeletons from", args.skeleton_path # get the skeleton coordinates first skeleton_coordinates = coordinates_from_json(args.skeleton_path, bounding_box) # perform dense reconstruction for all skeleton coordinates we have print "Projecting skeletons to dense segments" dense_skeletons = dense_reconstruction(args.prob_folder, skeleton_coordinates, args.rf_path, shape) # for debugging from volumina_viewer import volumina_n_layer probs = np.array(np.squeeze(vigra.readVolume(args.prob_folder+"/pmaps_z=000.tif"))) volumina_n_layer([probs, dense_skeletons.astype(np.uint32)])
def view_snap(snap_path): with h5py.File(snap_path) as f: ds = f['volumes/labels/mask'] attrs = ds.attrs shape = ds.shape offset = attrs['offset'] resolution = attrs['resolution'] offset = [off // resolution[i] for i, off in enumerate(offset)] print(offset) print(shape) bb = tuple( slice(offset[i], offset[i] + s) for i, s in enumerate(shape)) raw = f['volumes/raw'][:] full_shape = raw.shape aff_channels = f['volumes/labels/affinities'].shape[0] aff_shape = full_shape + (aff_channels, ) aff_bb = bb + (slice(None), ) aff = np.zeros(aff_shape, dtype='float32') aff[aff_bb] = f['volumes/labels/affinities'][:].transpose((1, 2, 3, 0)) gt = np.zeros(full_shape, dtype='uint32') gt[bb] = f['volumes/labels/neuron_ids'][:] mask = np.zeros(full_shape, dtype='uint32') mask[bb] = f['volumes/labels/mask'][:] volumina_n_layer([raw.astype('float32'), aff, gt, mask], ['raw', 'affinities', 'gt', 'mask'])
def view_res(res): from volumina_viewer import volumina_n_layer raw = vigra.readHDF5( "/home/constantin/Work/neurodata_hdd/neuroproof_data/raw_test.h5", "data") res = vigra.readHDF5(res, "data").astype(np.uint32) gt = vigra.readHDF5( "/home/constantin/Work/neurodata_hdd/neuroproof_data/gt_test.h5", "data").astype(np.uint32) volumina_n_layer([raw, res, gt])
def make_superpix_isbi2013(superpix = True): path_probs = "/home/constantin/Work/data_ssd/data_150615/isbi2013/pixel_probs/test-probs-nn.h5" key_probs = "exported_data" path_raw = "/home/constantin/Work/data_ssd/data_150615/isbi2013/test-input.h5" key_raw = "data" probs = vigra.readHDF5(path_probs, key_probs) probs = np.squeeze(probs) probs = np.array(probs) probs = 1. - probs raw = vigra.readHDF5(path_raw, key_raw) #volumina_n_layer( (raw, probs) ) #quit() if superpix: # use superpixel algorithm to segment the image # stack 2d segmented images segmentation = np.zeros( (probs.shape[0], probs.shape[1], probs.shape[2]) ,dtype = np.uint32) seeds = np.zeros( (probs.shape[0], probs.shape[1], probs.shape[2]) ,dtype = np.uint32) weights = np.zeros( (probs.shape[0], probs.shape[1], probs.shape[2]) ,dtype = np.uint32) # need offset to keep superpixel of the individual layers seperate! offset = 0 for layer in range(probs.shape[2]): if layer != 0: offset = np.max(segmentation[:,:,layer-1]) #segmentation[:,:,layer] = watershed_superpixel_vigra(probs[:,:,layer], offset) res_wsdt = watershed_distancetransform_2d(probs[:,:,layer], offset) segmentation[:,:,layer] = res_wsdt[0] seeds[:,:,layer] = res_wsdt[1] weights[:,:,layer] = res_wsdt[2] #segmentation[:,:,2] = watershed_distancetransform_2d( probs[:,:,2], 0 ) volumina_n_layer( (probs, segmentation, seeds, weights) ) else: # use supervoxel algorithm to segment the image segmentation = watershed_distancetransform_3d(probs) volumina_n_layer( (raw, probs, segmentation) ) print "Number of superpixels:", segmentation.max() #quit() path = "/home/constantin/Work/data_ssd/data_150615/isbi2013/superpixel/" name = "watershed_nn_dt_supervox_test" fpath = path + name + ".h5" vigra.impex.writeHDF5(segmentation, fpath, "superpixel" )
def view_isbi(): raw = vigra.readHDF5('./cache_isbi/isbi_test/inp0.h5', 'data') pmap = vigra.readHDF5('./cache_isbi/isbi_test/inp1.h5', 'data') seg = vigra.readHDF5('./cache_isbi/isbi_test/seg0.h5', 'data') seg_mc = vigra.readHDF5('./cache_isbi/isbi_test/mc_seg.h5', 'data') seg_ref_mc = vigra.readHDF5('./data/isbi/mc_seg.h5', 'data') #seg_lmc = vigra.readHDF5('./cache_isbi/isbi_test/lmc_seg.h5', 'data') #seg_ref_lmc = vigra.readHDF5('./data/isbi/lmc_seg.h5', 'data') volumina_n_layer([raw, pmap, seg, seg_mc, seg_ref_mc], ['raw', 'pmap', 'seg', 'seg_mc', 'seg_ref_mc'])
def gt_isbi2012(): labels_path = "/home/constantin/Work/data_ssd/data_090615/isbi2012/train-labels.h5" raw_path = "/home/constantin/Work/data_ssd/data_090615/isbi2012/train-volume.h5" labels = vigra.readHDF5(labels_path, "labels") raw = vigra.readHDF5(raw_path, "data") labels = preprocess_for_bgsmoothing_isbi2012(labels) gt = smooth_background(labels).astype(np.uint32) volumina_n_layer( (raw, labels, gt) ) gt_path = "/home/constantin/Work/data_ssd/data_090615/isbi2012/groundtruth/ground_truth_seg.h5"
def check_single_prediction(): prediction_file = '../results/prediction.h5' prediction = vigra.readHDF5(prediction_file, 'data') with h5py.File(raw_file) as raw_f: raw_ds = raw_f['data'] raw = raw_ds[test_slice].astype('float32') assert raw.shape == prediction.shape[:-1], str(raw.shape) + " , " + str( prediction.shape) volumina_n_layer([raw, prediction])
def view_test_pmaps(new_pmaps): ds = meta.get_dataset('snemi3d_test') raw = ds.inp(0) pm_old = ds.inp(1) pm_2d = vigra.readHDF5( '/home/constantin/Work/neurodata_hdd/snemi3d_data/probabilities/pmaps_icv2_test.h5', 'data') data = [raw, pm_old, pm_2d] data.extend(new_pmaps) labels = [ 'raw', '3d_v2', '2d', '3d_v3_i1', '3d_v3_i2', '3d_v3_i3', 'ensemble' ] volumina_n_layer(data, labels)
def view_test(): raw = vigra.readHDF5( "/home/constantin/Work/neurodata_hdd/snemi3d_data/raw/test-input.h5", "data") icv1 = vigra.readHDF5( "/home/constantin/Work/neurodata_hdd/snemi3d_data/probabilities/pmaps_icv1_test.h5", "data") ciresan = vigra.readHDF5( "/home/constantin/Work/neurodata_hdd/snemi3d_data/probabilities/pmaps_ciresan_test.h5", "data") volumina_n_layer([raw, icv1, ciresan], ["raw", "pmap-icv1", "pmap-ciresan"])
def view_res(sample): in_file = './cremi_inputs/%s/test_files.json' % sample with open(in_file) as f: inputs = json.load(f) raw = vigra.readHDF5(inputs['data'][0], 'data').astype('uint32') pmap = vigra.readHDF5(inputs['data'][1], 'data') seg = vigra.readHDF5(inputs['seg'], 'data') gt = vigra.readHDF5(inputs['gt'], 'data') mc_path = os.path.join(inputs['cache'], 'MulticutSegmentation.h5') assert os.path.exists(mc_path) mc = vigra.readHDF5(mc_path, 'data') volumina_n_layer([raw, pmap, seg, gt, mc], ['raw', 'pmap', 'seg', 'gt', 'mc'])
def project_gt_isbi2012(): labels_path = "/home/constantin/Work/data_ssd/data_090615/isbi2012/train-labels.h5" gt_path = "/home/constantin/Work/data_ssd/data_090615/isbi2012/groundtruth/gt_mc.h5" raw_path = "/home/constantin/Work/data_ssd/data_090615/isbi2012/train-volume.h5" labels = vigra.readHDF5(labels_path, "labels") gt = vigra.readHDF5(gt_path, "gt") raw = vigra.readHDF5(raw_path, "data") gt = project_gt(labels, gt) save_path = "/home/constantin/Work/data_ssd/data_090615/isbi2012/groundtruth/gt_mc_bkg.h5" volumina_n_layer( (raw, gt, labels) ) vigra.writeHDF5(gt, save_path, "gt")
def check_mulit_predictions(*args): predictions = [] for pred_path in args: assert os.path.exists(pred_path) predictions.append(vigra.readHDF5(pred_path, 'data')) with h5py.File(raw_file) as raw_f: raw_ds = raw_f['data'] raw = raw_ds[test_slice].astype('float32') for pred in predictions: assert raw.shape == pred.shape[:-1], str(raw.shape) + " , " + str( pred.shape) volumina_n_layer([raw] + predictions)
def view_train(): raw = vigra.readHDF5( "/home/constantin/Work/neurodata_hdd/snemi3d_data/raw/train-input.h5", "data") icv1 = vigra.readHDF5( "/home/constantin/Work/neurodata_hdd/snemi3d_data/probabilities/pmaps_icv1_train.h5", "data") ciresan = vigra.readHDF5( "/home/constantin/Work/neurodata_hdd/snemi3d_data/probabilities/pmaps_ciresan_train.h5", "data") gt = vigra.readHDF5( "/home/constantin/Work/neurodata_hdd/snemi3d_data/groundtruth/train-gt.h5", "data") volumina_n_layer([raw, icv1, ciresan, gt], ["raw", "pmap-icv1", "pmap-ciresan", "groundtruth"])
def gt_pedunculus(): labels_path = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401_pedunculus_membrane_labeling.tif" raw_path = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401pedunculus_middle_512x512_first30_sliced.h5" labels = vigra.readVolume(labels_path) labels = np.squeeze(labels) labels = np.delete(labels, 6, axis = 2) raw = vigra.readHDF5(raw_path, "data") labels = preprocess_for_bgsmoothing_pedunculus(labels) gt = smooth_background(labels).astype(np.uint32) volumina_n_layer( (raw, labels, gt) ) gt_path = "/home/constantin/Work/data_ssd/data_080515/pedunculus/ground_truth_seg.h5" vigra.writeHDF5(gt, gt_path, "gt")
def watershed_supervoxel_vigra(probs): # compute seeds # try different filter for computing the best seeds (= minima) # best options so far: # for isotropic data (2x2x2 nm): Gaussian Smoothing with sigma = 4.5 # for anisotropic data: Gaussian Smoothing with sigma = 2 #sm_probs = np.array(np.abs( probs - 0.5*( np.max(probs) - np.min(probs) ) ), dtype = np.float32 ) # Gaussian smoothing sm_probs = vigra.gaussianSmoothing(probs, (2.5,2.5,0.5) ) hessian = vigra.filters.hessianOfGaussian(probs, sigma = (2.5,2.5,0.5) ) hessian_ev = vigra.filters.tensorEigenvalues( hessian ) print hessian_ev.shape volumina_n_layer( [ probs, np.absolute(hessian_ev[:,:,:,0]), np.absolute(hessian_ev[:,:,:,1]), np.absolute(hessian_ev[:,:,:,2]) ] ) quit() # Difference of Gaussians #diff = vigra.gaussianSmoothing(probs,2) - sm_probs #volumina_single_layer(diff) seeds = vigra.analysis.extendedLocalMinima3D(sm_probs, neighborhood = 26) SEEDS = vigra.analysis.labelVolumeWithBackground(seeds) SEEDS = SEEDS.astype(np.uint32) #plot.figure() #plot.gray() #plot.imshow(SEEDS[:,:,25]) #plot.show() seg_ws, maxRegionLabel = vigra.analysis.watersheds(sm_probs, neighborhood = 6, seeds = SEEDS) seg_ws = vigra.analysis.labelVolumeWithBackground(seg_ws) #volumina_double_layer(probs, seg_ws) return seg_ws
def project_gt_pedunculus(): labels_path = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401_pedunculus_membrane_labeling.tif" gt_path = "/home/constantin/Work/data_ssd/data_080515/pedunculus/gt_mc.h5" raw_path = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401pedunculus_middle_512x512_first30_sliced.h5" labels = vigra.readVolume(labels_path) labels = np.squeeze(labels) labels = np.delete(labels, 6, axis = 2) gt = vigra.readHDF5(gt_path, "gt") raw = vigra.readHDF5(raw_path, "data") gt = project_gt(labels, gt) save_path = "/home/constantin/Work/data_ssd/data_080515/pedunculus/gt_mc_bkg.h5" volumina_n_layer( (raw, gt, labels) ) vigra.writeHDF5(gt, save_path, "gt")
def test_iso(view=False): pmap = vigra.readHDF5('./test_data/isotropic/pmap.h5', 'data') ws_dt, n_labels_dt = ws_distance_transform(pmap, 0.4, 2.) assert ws_dt.min() == 0 assert ws_dt.max() + 1 == len(np.unique(ws_dt)) assert ws_dt.max() + 1 == n_labels_dt print "Wsdt done" ws_gray, n_labels_gray = ws_grayscale_distance_transform(pmap, 0.1, 2.) assert ws_gray.min() == 0 assert ws_gray.max() + 1 == len(np.unique(ws_gray)) assert ws_gray.max() + 1 == n_labels_gray print "Ws gray done" if view: raw = vigra.readHDF5('./test_data/isotropic/raw.h5', 'data') volumina_n_layer([raw, pmap, ws_dt, ws_gray], ['raw', 'pmap', 'ws_dt', 'ws_gray'])
def make_superpix_bock(): from wsDtSegmentation import wsDtSegmentation # sample c path_raw = "/home/constantin/Work/data_hdd/data_131115/Sample_B/raw_data/raw_data_norm_cut.h5" key_raw = "data" path_probs = "/home/constantin/Work/data_hdd/data_131115/Sample_B/google_probabilities/probs_xy_cut.h5" key_probs = "exported_data" raw = vigra.readHDF5(path_raw, key_raw) probs = vigra.readHDF5(path_probs, key_probs) probs = np.array(probs) # need to invert the probability maps (need membrane channel!) probs = 1. - probs #vigra.writeHDF5(probs[:,:,0],"tmp1.h5", "tmp") #vigra.writeHDF5(raw[:,:,0],"tmp2.h5", "tmp") #quit() #probs = vigra.readHDF5("tmp1.h5", "tmp") #raw = vigra.readHDF5("tmp2.h5", "tmp") #probs = np.expand_dims(probs, axis = 2) #raw = np.expand_dims(raw, axis = 2) print probs.shape print raw.shape # visualize the data #volumina_double_layer( raw, probs ) segmentation = np.zeros_like(probs) seeds = np.zeros_like(probs) weights = np.zeros_like(probs) # need offset to keep superpixel of the individual layers seperate! offset = 0 for layer in range(probs.shape[2]): if layer != 0: offset = np.max(segmentation[:,:,layer-1]) # syntax: wsDtSegmentation(probs, pmin, minMemSize, minSegSize, sigSeeds, sigWeights) res_wsdt = wsDtSegmentation(probs[:,:,layer], 0.1, 20, 100, 0.8, 1.) segmentation[:,:,layer] = res_wsdt[0] + offset seeds[:,:,layer] = res_wsdt[1] weights[:,:,layer] = res_wsdt[2] # visualize first layer #print "Nr of seeds:", np.sum(seeds != 0) #volumina_n_layer( [ # raw[:,:,layer], # probs[:,:,layer], # weights[:,:,layer], # seeds[:,:,layer].astype(np.uint32), # segmentation[:,:,layer].astype(np.uint32), # segmentation[:,:,layer].astype(np.uint32)] ) #quit() print "Number of superpixels:", segmentation.max() path_save = "/home/constantin/Work/data_hdd/data_131115/Sample_B/superpixel/wsdt_seg.h5" vigra.impex.writeHDF5(segmentation, path_save, "superpixel" ) # visualize whole stack volumina_n_layer( [raw, probs, segmentation.astype(np.uint32)] )
def gt_isbi2013(): labels_path = "/home/constantin/Work/data_ssd/data_150615/isbi2013/ground_truth/ground-truth.h5" raw_path = "/home/constantin/Work/data_ssd/data_150615/isbi2013/train-input.h5" gt_ignore = vigra.readHDF5(labels_path, "gt") raw = vigra.readHDF5(raw_path, "data") # smooth the gt to get rid of the ignorelabel gt = np.zeros_like( gt_ignore ) for z in range(gt_ignore.shape[2]): print "processing slice", z, "of", gt_ignore.shape[2] gt_ignore_z = gt_ignore[:,:,z] binary = np.zeros_like(gt_ignore_z) binary[gt_ignore_z != 0] = 255 close = vigra.filters.discClosing( binary.astype(np.uint8), 4 ) cc = vigra.analysis.labelImageWithBackground(close.astype(np.uint32), background_value = 255) # find the largest cc (except for zero) counts = np.bincount(cc.flatten()) counts_sort = np.sort(counts)[::-1] mask_myelin = np.zeros_like( cc ) for c in counts_sort: # magic threshold! if c > 4000: id = np.where(counts == c)[0][0] if id != 0: #print "Heureka!", c, id #print np.unique(cc)[id] mask_myelin[cc == id] = 1 else: break #volumina_n_layer( [ raw[:,:,z], gt_ignore_z.astype(np.uint32), mask_myelin ] ) #quit() derivative_filter = vigra.filters.gaussianGradientMagnitude( gt_ignore_z, 0.5 ) derivative_thresh = np.zeros_like( derivative_filter ) derivative_thresh[derivative_filter > 0.1] = 1. dt = vigra.filters.distanceTransform2D(derivative_thresh) dt = np.array(dt) dtInv = vigra.filters.distanceTransform2D(derivative_thresh, background = False) dtInv = np.array(dt) dtInv[dtInv >0 ] -= 1 dtSigned = dt.max() - dt + dtInv smoothed, maxRegionLabel = vigra.analysis.watersheds( dtSigned.astype(np.float32), neighborhood = 8, seeds = gt_ignore_z.astype(np.uint32) ) smoothed[ mask_myelin == 1] = 0 #volumina_n_layer( [ raw[:,:,z], gt_ignore_z.astype(np.uint32), smoothed.astype(np.uint32)] ) #quit() gt[:,:,z] = smoothed #gt = gt.transpose(1,0,2) gt_path = "/home/constantin/Work/data_ssd/data_150615/isbi2013/ground_truth/ground-truth_nobg.h5" vigra.writeHDF5(gt, gt_path, "gt") volumina_n_layer( [ raw, gt_ignore.astype(np.uint32), gt.astype(np.uint32) ] )
myelin = vigra.readHDF5("/home/constantin/Work/data_ssd/data_150615/isbi2013/pixel_probs/myelin_probs_test3.h5", "exported_data") myelin = np.array(myelin) #myelin_cc = get_myelin_cc(myelin) #vigra.writeHDF5(myelin_cc, "tmp.h5", "tmp") #volumina_viewer.volumina_n_layer( [raw, myelin, myelin_cc.astype(np.uint32)] ) myelin_cc = vigra.readHDF5("tmp.h5", "tmp") #myelin_segments = get_myelin_segments(myelin_cc, myelin) #vigra.writeHDF5(myelin_segments, "my_segs.h5", "tmp") #volumina_viewer.volumina_n_layer( [raw, myelin_cc, myelin_segments.astype(np.uint32)] ) myelin_segments = vigra.readHDF5("my_segs.h5", "tmp") superpix = vigra.readHDF5( "/home/constantin/Work/data_ssd/data_150615/isbi2013/superpixel/watershed-test_nn_dt.h5", "superpixel") superpix_projected = project_onto_superpix(superpix, myelin_segments) vigra.writeHDF5( superpix_projected, "/home/constantin/Work/data_ssd/data_150615/isbi2013/superpixel/myelin_test.h5", "superpixel") volumina_viewer.volumina_n_layer( [raw, superpix.astype(np.uint32), superpix_projected.astype(np.uint32)] )
def view_edges(ds, seg_id, uv_ids, labels, labeled, with_defects=False): assert uv_ids.shape[0] == labels.shape[0] assert labels.shape[0] == labeled.shape[0] from volumina_viewer import volumina_n_layer labels_debug = np.zeros(labels.shape, dtype=np.uint32) labels_debug[labels == 1.] = 1 labels_debug[labels == 0.] = 2 labels_debug[np.logical_not(labeled)] = 5 if with_defects: skip_transition = labels_debug.shape[0] - get_skip_edges( ds, seg_id).shape[0] edge_indications = modified_edge_indications(ds, seg_id)[:skip_transition] # get uv ids and labels for the skip edges uv_skip = uv_ids[skip_transition:] labels_skip = labels_debug[skip_transition:] #get uv ids and labels for the normal edges uv_ids = uv_ids[:skip_transition] labels_debug = labels_debug[:skip_transition] else: edge_indications = ds.edge_indications(seg_id) assert edge_indications.shape[0] == labels_debug.shape[0], "%i, %i" % ( edge_indications.shape[0], labels_debug.shape[0]) # xy - and z - labels labels_xy = labels_debug[edge_indications == 1] labels_z = labels_debug[edge_indications == 0] uv_xy = uv_ids[edge_indications == 1] uv_z = uv_ids[edge_indications == 0] seg = ds.seg(seg_id) edge_vol_xy = edges_to_volume_from_uvs_in_plane(ds, seg, uv_xy, labels_xy) edge_vol_z_dn = edges_to_volume_from_uvs_between_plane( ds, seg, uv_z, labels_z, True) edge_vol_z_up = edges_to_volume_from_uvs_between_plane( ds, seg, uv_z, labels_z, False) raw = ds.inp(0).astype('float32') gt = ds.gt() if with_defects: skip_ranges = get_skip_ranges(ds, seg_id) skip_starts = get_skip_starts(ds, seg_id) edge_vol_skip = edges_to_volumes_for_skip_edges( ds, seg, uv_skip, labels_skip, skip_starts, skip_ranges) volumina_n_layer([ raw, seg, gt, edge_vol_z_dn, edge_vol_z_up, edge_vol_skip, edge_vol_xy ], [ 'raw', 'seg', 'groundtruth', 'labels_z_down', 'labels_z_up', 'labels_skip', 'labels_xy' ]) else: volumina_n_layer( [raw, seg, gt, edge_vol_z_dn, edge_vol_z_up, edge_vol_xy], [ 'raw', 'seg', 'groundtruth', 'labels_z_down', 'labels_z_up', 'labels_xy', ])
lbl_train_k = "data" lbl_train = vigra.readHDF5(lbl_train_p, lbl_train_k).astype(np.uint32) #print np.unique( lbl_train ) #print lbl_train[340,398,0] mem_label = 168 lbl_train[np.where(lbl_train != mem_label)] = 0 #volumina_n_layer([raw_train, lbl_train]) dat_train = raw_train.flatten() lbl = lbl_train.flatten() dat_train = np.expand_dims(dat_train, axis = 1) print dat_train.shape print lbl.shape rf_pix = learn_rf( dat_train, lbl ) probs = predict_probability_map(rf_pix, dat_train) probs = probs[:,0] probs = probs.reshape( (512,512,30) ) vigra.writeHDF5(probs, "probs.h5", "probs") volumina_n_layer([raw_train, probs, lbl_train])
def extract_subproblem(block_edge_id, block_uv): block_u, block_v = block_uv # find the actual overlapping regions in block u and v and load them have_overlap, ovlp_begin_u, ovlp_end_u, ovlp_begin_v, ovlp_end_v = blocking.getLocalOverlaps( block_u, block_v, overlap) outer_block_u = blocking.getBlockWithHalo(block_u, overlap).outerBlock begin_u, end_u = outer_block_u.begin, outer_block_u.end # make sure that they are indeed overlapping if not have_overlap: v_block = blocking.getBlockWithHalo(block_v, overlap).outerBlock v_begin, v_end = v_block.begin, v_block.end raise RuntimeError( "No overlap found for blocks %i, %i with coords %s, %s and %s, %s" % (block_u, block_v, str(begin_u), str(end_u), str(v_begin), str(v_end))) # read the segmentation in the ovlp get the nodes and transform them to current node ids ovlp_begin = (np.array(ovlp_begin_u) + np.array(begin_u)).tolist() ovlp_end = (np.array(ovlp_end_u) + np.array(begin_u)).tolist() seg_ovlp = seg.read(ovlp_begin, ovlp_end) # debug views if False and block_edge_id != 0: from volumina_viewer import volumina_n_layer rawp = PipelineParameter().inputs['data'][0] bb = np.s_[ovlp_begin[0]:ovlp_end[0], ovlp_begin[1]:ovlp_end[1], ovlp_begin[2]:ovlp_end[2], ] with h5py.File(rawp) as f: raw = f['data'][bb].astype('float32') volumina_n_layer([raw, seg_ovlp]) quit() workflow_logger.debug( "BlockwiseMulticutStitchingSolver: Extracting problem from block-edge %i \ ovlp between block %i, %i with shape %s" % (block_edge_id, block_u, block_v, str(seg_ovlp.shape))) reduced_nodes = np.unique(global2new_nodes[np.unique(seg_ovlp)]) # add nodes from the outer edges that go between these blocks additional_edges = uv_ids[edges_between_blocks[block_edge_id]] assert additional_edges.ndim == 2 additional_nodes = np.unique(additional_edges) reduced_nodes = np.unique( np.concatenate([reduced_nodes, additional_nodes])) # extract the subproblem inner_edges, _, subgraph = reduced_graph.extractSubgraphFromNodes( reduced_nodes) assert len(inner_edges) == subgraph.numberOfEdges workflow_logger.debug( "BlockwiseMulticutStitchingSolver: Extracted problem for block-edge %i has %i nodes and %i edges" % (block_edge_id, subgraph.numberOfNodes, subgraph.numberOfEdges)) return subgraph, inner_edges
def node_overlaps_for_block_pair(block_edge_id, block_uv): block_u, block_v = block_uv # get the uv-ids connecting the two blocks and the paths to the block segmentations block_u_path = os.path.join(block_res_path, 'block%i_segmentation.h5' % block_u) block_v_path = os.path.join(block_res_path, 'block%i_segmentation.h5' % block_v) # find the actual overlapping regions in block u and v and load them have_overlap, ovlp_begin_u, ovlp_end_u, ovlp_begin_v, ovlp_end_v = blocking.getLocalOverlaps( block_u, block_v, overlap) if not have_overlap: u_block = blocking.getBlockWithHalo(block_u, overlap).outerBlock v_block = blocking.getBlockWithHalo(block_v, overlap).outerBlock u_begin, u_end = u_block.begin, u_block.end v_begin, v_end = v_block.begin, v_block.end raise RuntimeError( "No overlap found for blocks %i, %i with coords %s, %s and %s, %s" % (block_u, block_v, str(u_begin), str(u_end), str(v_begin), str(v_end))) overlap_bb_u = np.s_[ovlp_begin_u[0]:ovlp_end_u[0], ovlp_begin_u[1]:ovlp_end_u[1], ovlp_begin_u[2]:ovlp_end_u[2]] overlap_bb_v = np.s_[ovlp_begin_v[0]:ovlp_end_v[0], ovlp_begin_v[1]:ovlp_end_v[1], ovlp_begin_v[2]:ovlp_end_v[2]] with h5py.File(block_u_path) as f_u, \ h5py.File(block_v_path) as f_v: seg_u = f_u['data'][overlap_bb_u] seg_v = f_v['data'][overlap_bb_v] # debugging view if False: from volumina_viewer import volumina_n_layer u_block = blocking.getBlockWithHalo(block_u, overlap).outerBlock raw_path = PipelineParameter().inputs['data'][0] oseg_path = PipelineParameter().inputs['seg'] u_begin, u_end = u_block.begin, u_block.end with h5py.File(raw_path) as f_raw, h5py.File( oseg_path) as f_oseg: global_bb = np.s_[ovlp_begin_u[0] + u_begin[0]:ovlp_end_u[0] + u_begin[0], ovlp_begin_u[1] + u_begin[1]:ovlp_end_u[1] + u_begin[1], ovlp_begin_u[2] + u_begin[2]:ovlp_end_u[2] + u_begin[2]] raw = f_raw['data'][global_bb].astype('float32') oseg = f_oseg['data'][global_bb] volumina_n_layer([raw, oseg, seg_u, seg_v]) quit() nodes_u = np.unique(seg_u) nodes_v = np.unique(seg_v) # find the overlaps between the two segmentations # NOTE: nodes_u is not dense, I don't know if this is much of a performance issue, but I # really don't want to do all the mapping to make it dense # get the overlap counters overlap_counter_u = ngt.Overlap(nodes_u[-1], seg_u, seg_v) overlap_counter_v = ngt.Overlap(nodes_v[-1], seg_v, seg_u) # FIXME this looks very inefficient.... # if we ever want to put this into production, move to c++ overlaps_u = {} # calculate the symmetrical overlaps for node_u in nodes_u: ovlp_nodes, rel_ovlps = overlap_counter_u.overlapArraysNormalized( node_u) symmetrical_overlaps = np.zeros_like(rel_ovlps, dtype=rel_ovlps.dtype) for ii, node_v in enumerate(ovlp_nodes): ovlp_u = rel_ovlps[ii] ovlp_nodes_v, rel_ovlps_v = overlap_counter_v.overlapArraysNormalized( node_v) where_node_u = ovlp_nodes_v == node_u ovlp_v = rel_ovlps_v[where_node_u] # TODO for now we just average the relative overlaps, but maybe it would be a better idea to use min symmetrical_overlaps[ii] = (ovlp_u + ovlp_v) / 2. overlaps_u[node_u] = (ovlp_nodes, symmetrical_overlaps) return overlaps_u
def view_isbi_train(): raw = vigra.readHDF5('./cache_isbi/isbi_train/inp0.h5', 'data') pmap = vigra.readHDF5('./cache_isbi/isbi_train/inp1.h5', 'data') seg = vigra.readHDF5('./cache_isbi/isbi_train/seg0.h5', 'data') gt = vigra.readHDF5('./cache_isbi/isbi_train/gt.h5', 'data') volumina_n_layer([raw, pmap, seg, gt], ['raw', 'pmap', 'seg', 'gt'])
def view_train(): ds = meta.get_dataset('snemi3d_train') pmap = vigra.readHDF5('/home/constantin/Downloads/traininf-cst-inv.h5', 'data') volumina_n_layer([ds.inp(0), ds.inp(1), pmap, ds.seg(0), ds.gt()])
seg_vol = vigra.readHDF5(path_seg, key_seg) path_img = "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401pedunculus_middle_512x512_first30_Probabilities.h5" key_img = "exported_data" vol = vigra.readHDF5(path_img, key_img) # IMPORTANT only use the edge / membrane probability channel ! vol = vol[:,:,:,0] dim2 = False if dim2: img = vigra.Image(vol[:,:,0]) seg = vigra.Image(seg_vol[:,:,0]).astype(np.uint8) res = agglomerative_clustering_2d( img, seg ) res = vigra.Image(res).astype(np.uint8) volumina_n_layer( (img, seg, res) ) else: vol = vigra.Volume(vol) seg_vol = vigra.Volume(seg_vol) res = agglomerative_clustering_3d(vol, seg_vol ) # res = vigra.Volume(res).astype(np.uint8) # # volumina_n_layer( (vol, seg_vol, res) ) #
resB, _ = ragA.projectBaseGraphGt( segB ) # projected !! uvIds = ragA.uvIds() edgesA = resA[uvIds[:,0]] != resA[uvIds[:,1]] edgesB = resB[uvIds[:,0]] != resB[uvIds[:,1]] diff_edges = edgesA != edgesB return diff_edges, ragA if __name__ == '__main__': segA = vigra.readHDF5('/home/constantin/Work/home_hdd/results/cremi/competition_submissions/it1/', 'data') segB = vigra.readHDF5('/home/constantin/Work/multicut_pipeline/software/multicut_exp/rebuttal/snemi/round3/snemi_final_seglmc_myel_myelmerged.h5', 'data') oversegA = vigra.readHDF5('/home/constantin/Work/neurodata_hdd/snemi3d_data/watersheds/snemiTheUltimateMapWsdtSpecialTest_myel.h5', 'data') oversegB = vigra.readHDF5('/home/constantin/Work/neurodata_hdd/snemi3d_data/watersheds/snemiTheMapWsdtTestV2_myel.h5', 'data') print 'Calculating edge diffs' diff_edges, ragA = compareBySimpleProjection(segA, oversegA, segB) print 'Rendering edge diffs' edge_vol = render_edges(ragA, diff_edges) from volumina_viewer import volumina_n_layer raw = vigra.readHDF5('/home/constantin/Work/neurodata_hdd/snemi3d_data/raw/test-input.h5','data') volumina_n_layer([raw,oversegA,oversegB,segA,segB,edge_vol],['raw','overseg_LMC_new','overseg_LMC_best','seg_LMC_new','seg_LMC_best','edge_diff'])