def save_aggregations(output_filepath, aggregations, res_info): """ Convert image matrix to 32 bit and save. """ if type(aggregations) is list: for i, aggregation in enumerate(aggregations): filepath, ext = os.path.splitext(output_filepath) savepath = filepath + '#' + str(i) + ext aggregation = aggregation.astype(np.float32) save_image(savepath, aggregation, resx=res_info[0], resy=res_info[1], size_z=res_info[2], res_unit=res_info[3], compress=True) else: aggregations = aggregations.astype(np.float32) save_image(output_filepath, aggregations, resx=res_info[0], resy=res_info[1], size_z=res_info[2], res_unit=res_info[3], compress=True)
def remove_small_regions(images_dir, model_size_xy_nm, model_size_z_nm): resxy = sizenm_to_dpum(model_size_xy_nm) size_z_um = model_size_z_nm / 1000 res_unit = "micron" for image_stack_filename in tqdm(os.listdir(images_dir)): image_file_path = os.path.join(images_dir, image_stack_filename) image_stack = imread(image_file_path) image_stack = keep_regions_over_threshold(image_stack) save_image(image_file_path, image_stack, resx=resxy, resy=resxy, size_z=size_z_um, res_unit=res_unit, compress=True)
def model_predict(model, source_dir, predictions_dir, model_size_xy_nm, model_size_z_nm, tri_axis=False): resxy = sizenm_to_dpum(model_size_xy_nm) size_z_um = model_size_z_nm / 1000 res_unit = "micron" for filename in tqdm(os.listdir(source_dir)): input_path = os.path.join(source_dir, filename) input_image = imread(input_path) output_label = evaluate(model, input_image) if tri_axis: z_depth = input_image.shape[0] # determine whether or not z axis deep enough, mirror along z if not if z_depth < model.input_shape[:0:-1][1]: print(f'\n{filename}: z axis depth too shallow for 3-axis prediction, mirroring stack') input_image = np.concatenate((input_image, input_image[::-1, :, :]), axis=0) x_switch_label = np.swapaxes(evaluate(model, np.swapaxes(input_image, 0, 2)), 0, 2) y_switch_label = np.swapaxes(evaluate(model, np.swapaxes(input_image, 0, 1)), 0, 1) split_output_dir = os.path.join(predictions_dir, '../', 'xyz-split-predictions') if not os.path.exists(split_output_dir): os.makedirs(split_output_dir) # if we mirror to get a large enough z-axis, drop the extra slices after transposing back if z_depth < model.input_shape[:0:-1][1]: x_switch_label = x_switch_label[:z_depth, :, :] y_switch_label = y_switch_label[:z_depth, :, :] z_output_path = os.path.join(split_output_dir, 'z_'+filename) save_image(z_output_path, output_label, resx=resxy, resy=resxy, size_z=size_z_um, res_unit=res_unit, compress=True) x_output_path = os.path.join(split_output_dir, 'x_'+filename) save_image(x_output_path, x_switch_label, resx=resxy, resy=resxy, size_z=size_z_um, res_unit=res_unit, compress=True) y_output_path = os.path.join(split_output_dir, 'y_'+filename) save_image(y_output_path, y_switch_label, resx=resxy, resy=resxy, size_z=size_z_um, res_unit=res_unit, compress=True) # gather and threshold #output_label = output_label/3 + x_switch_label/3 + y_switch_label/3 # averaging version output_label = to_binary(output_label) x_switch_label = to_binary(x_switch_label) y_switch_label = to_binary(y_switch_label) output_label = np.logical_or(output_label, np.logical_or(x_switch_label, y_switch_label))*1. output_label = to_binary(output_label) output_path = os.path.join(predictions_dir, filename) save_image(output_path, output_label, resx=resxy, resy=resxy, size_z=size_z_um, res_unit=res_unit, compress=True)
for segment in annotation: prev = segment[0] for i in range(1, len(segment)): point = segment[i] draw.line([tuple(prev), tuple(point)], fill=1, width=3) prev = point #cv_contours.append(np.array(segment, dtype=np.int)) #cv2.drawContours(out_stack[0], np.array(cv_contours), -1, 1, 7, lineType=cv2.LINE_AA) #img = img.resize((500, 500), resample=Image.BICUBIC) out_image = np.asarray(img) out_stack[idx] = out_image model_size_xy_nm = 10 model_size_z_nm = 50 resxy = sizenm_to_dpum(model_size_xy_nm) size_z_um = model_size_z_nm / 1000 res_unit = "micron" out_stack = np.where(out_stack >= 0.5, 255, 0).astype(np.uint8) save_image(out_filename, out_stack, resx=resxy, resy=resxy, size_z=size_z_um, res_unit=res_unit, compress=True)
# ignore first index, which is the background largest_idx = np.argmax(occurrences[1:]) largest_intensity = intensities[1:][largest_idx] labelled_stack[labelled_stack != largest_intensity] = 0 labelled_stack[labelled_stack == largest_intensity] = 1 labelled_stack = labelled_stack.astype(np.uint8) * 255 return labelled_stack if __name__ == '__main__': source_dir = "../projects/nuclear/resources/images/raw-labels-stacks" dest_dir = "../projects/nuclear/resources/images/raw-labels-stacks-cc" for file in tqdm(os.listdir(source_dir)): input_path = os.path.join(source_dir, file) output_path = os.path.join(dest_dir, file) source_stack = imread(input_path) resinfo = get_tag_resolution(input_path) dest_stack = keep_largest_region(source_stack) save_image(output_path, dest_stack, resx=resinfo[0], resy=resinfo[1], size_z=resinfo[2], res_unit=resinfo[3], compress=True)
def create_tiff_stack_matching(source_images_dir, source_stacks_dir, label_images_dir, label_stacks_dir, size_z, clear_existing=False, compress=False): if clear_existing and os.path.exists(source_stacks_dir): shutil.rmtree(source_stacks_dir) if clear_existing is True and os.path.exists(label_stacks_dir): shutil.rmtree(label_stacks_dir) if not os.path.exists(source_stacks_dir): os.makedirs(source_stacks_dir) if not os.path.exists(label_stacks_dir): os.makedirs(label_stacks_dir) stack_filenames, filenames, input_extension = get_stack_filenames( source_images_dir) image_range = [] for stack_filename in tqdm(stack_filenames): image_stack = [] label_stack = [] source_image = [] label_image = [] mini = -1 maxi = -1 add_prefix_z = True resx_source = 1 resy_source = 1 res_unit_source = "" resx_label = 1 resy_label = 1 res_unit_label = "" for filename in filenames: file = filename.rsplit('_', 1) # find matching files if file[0] == stack_filename: slice = file[1] add_prefix_z = slice.lower().startswith("z") if add_prefix_z: slice = slice[1:] i = int(slice) if i < mini or mini < 0: mini = i if i > maxi: maxi = i if len(source_image) == 0: source_image = imread(source_images_dir + filename + input_extension) if len(label_image) == 0 and os.path.exists(label_images_dir + filename + input_extension): label_image = imread(label_images_dir + filename + input_extension) image_range = [mini, maxi] for i in range(mini, maxi + 1): input_filename = stack_filename + '_' if add_prefix_z: input_filename += 'z' input_filename += f"{i:04d}" + input_extension input_path = source_images_dir + input_filename if os.path.exists(input_path): source_image = imread(input_path) resx_source, resy_source, _, res_unit_source = get_tag_resolution( input_path) else: source_image = np.zeros_like(source_image) image_stack.append(source_image) input_path = label_images_dir + input_filename if os.path.exists(input_path): label_image = imread(input_path) resx_label, resy_label, _, res_unit_label = get_tag_resolution( input_path) else: label_image = np.zeros_like(label_image) label_stack.append(label_image) np_stack = np.array(image_stack) if np_stack.dtype.kind == "O": print("Stack error", stack_filename, "Type:", np_stack.dtype, "Shape:", np_stack.shape) else: save_image(source_stacks_dir + stack_filename + '.tiff', np_stack, resx=resx_source, resy=resy_source, size_z=size_z, res_unit=res_unit_source, compress=compress) np_stack = np.array(label_stack) if np_stack.dtype.kind == "O": print("Stack error", stack_filename, "Type:", np_stack.dtype, "Shape:", np_stack.shape) else: save_image(label_stacks_dir + stack_filename + '.tiff', np_stack, resx=resx_label, resy=resy_label, size_z=size_z, res_unit=res_unit_label, compress=compress) return image_range