def create_results_dict(): results = {} for test_set in ALL_VOLUME_IDS: if test_set not in results: results[test_set] = {} gt_path = get_paths("gala-evaluate", "train", SIZE, test_set, "XX", "XX", "XX", "XX")["groundtruth"] gt = io.read_image_stack(gt_path) for train_set in ALL_VOLUME_IDS: if train_set not in results[test_set]: results[test_set][train_set] = {} for feature in ALL_FEATURES: if feature not in results[test_set][train_set]: results[test_set][train_set][feature] = {} for cue in ALL_CUES: seg_path = get_paths("gala-evaluate", "train", SIZE, test_set, cue, feature, "", train_set)["segmentation"] try: seg = io.read_image_stack(seg_path) err = rand_error(seg,gt) print "test %s, train %s, %s, %s: %f" % (test_set, train_set, feature, cue, err) results[test_set][train_set][feature][cue] = err except: print " -- missing: test %s, train %s, %s, %s: %s" % ( test_set, train_set, feature, cue, seg_path) return results
def add_gradient_channel(traintest, size, volume_id, cues_id, datatype, group): last_component = cues_id.split(ID_DELIMITER)[-1] new_cues_id = cues_id+ID_DELIMITER+DERIVATIVE_ID+last_component src_paths = get_paths("XX", traintest, size, volume_id, cues_id, "XX", "XX", "XX") dest_paths = get_paths("XX", traintest, size, volume_id, new_cues_id, "XX", "XX", "XX") src_arr = imio.read_h5_stack(src_paths[datatype], group=group) dest_arr = add_2d_derivative_channel(src_arr, concat=True) write_h5(dest_arr, dest_paths[datatype], group)
def concatenate(traintest, size, volume_id, cues_id_1, cues_id_2, datatype, group): cues_id_new = cues_id_1+ID_DELIMITER+cues_id_2 src_paths_1 = get_paths("XX", traintest, size, volume_id, cues_id_1, "XX", "XX", "XX") src_paths_2 = get_paths("XX", traintest, size, volume_id, cues_id_2, "XX", "XX", "XX") dest_paths = get_paths("XX", traintest, size, volume_id, cues_id_new, "XX", "XX", "XX") src_arr_1 = imio.read_h5_stack(src_paths_1[datatype], group=group) src_arr_2 = imio.read_h5_stack(src_paths_2[datatype], group=group) if src_arr_1.ndim < 4: src_arr_1 = src_arr_1[...,np.newaxis] if src_arr_2.ndim < 4: src_arr_2 = src_arr_2[...,np.newaxis] dest_arr = np.concatenate((src_arr_1, src_arr_2), axis=3) write_h5(dest_arr, dest_paths[datatype], group)
def main(args): ws_id = generate_ws_id(args.min_seed_size, args.connectivity, args.smooth_thresh, args.keep_speckles, args.override_skimage) paths = get_paths("XX", args.traintest, args.size, args.volume_id, "idsia", "XX", "XX", "XX", ws_id) stack = read_h5(paths["hypercubes"], args.h5_key) if args.dry_run: stack = stack[:2,:,:] if not args.dont_invert: stack[...] = invert_gray(stack) ws = np.zeros(stack.shape) cur_max = 0 for ii in range(stack.shape[0]): print ii ws[ii,:,:] = morpho.watershed(stack[ii,:,:], minimum_seed_size=args.min_seed_size, connectivity=args.connectivity, smooth_thresh=args.smooth_thresh, override_skimage=args.override_skimage) + cur_max cur_max = ws[ii,:,:].max() ws = ws.astype('int64') print "unique labels in ws:",np.unique(ws).size if not args.keep_speckles: ws = agglo.despeckle_watershed(ws) print "unique labels after despeckling:",np.unique(ws).size ws, _, _ = evaluate.relabel_from_one(ws) if ws.min() < 1: ws += (1-ws.min()) write_h5(ws, paths["watersheds"], dry=args.dry_run) if args.no_quarters or (args.size != "whole"): return for row_start, row_end, col_start, col_end, frame_start, frame_end, label, size in fractions: path = generate_path(args.traintest, size, label, ws_id) write_h5(ws[frame_start:frame_end, row_start:row_end, col_start:col_end], path)
def format(cues_id, features_id, exec_id, outfile_name): paths = get_paths("gala-segment", "test", "whole", "only", cues_id, features_id, exec_id, "only") seg_path = os.path.join(paths["output_dir"], paths["experiment_name"]+H5_SUFFIX) if len(outfile_name) < 1: out_path = os.path.join(paths["output_dir"], paths["experiment_name"]+TIF_SUFFIX) else: out_path = os.path.join(paths["output_dir"], outfile_name) seg_h5 = h5py.File(seg_path, "r") seg_np = np.array(seg_h5[H5_KEY]).astype(np.int16) print "saving to",out_path tifffile.imsave(out_path, seg_np)