def export_cut_objects(df_row, path_out, padding, use_mask=True, bg_color=None): """ cut and expert objects in image according given segmentation :param df_row: :param str path_out: path for exporting image :param int padding: set padding around segmented object """ annot, _ = tl_data.load_image_2d(df_row['path_1']) img, name = tl_data.load_image_2d(df_row['path_2']) if annot.shape[:2] != img.shape[:2]: raise ImageDimensionError('image sizes not match %r vs %r' % (annot.shape, img.shape)) uq_objects = np.unique(annot) if len(uq_objects) == 1: return for idx in uq_objects[1:]: img_new = tl_data.cut_object(img, annot == idx, padding, use_mask, bg_color) path_img = os.path.join(path_out, '%s_%i.png' % (name, idx)) logging.debug('saving image "%s"', path_img) tl_data.io_imsave(path_img, img_new)
def extract_ellipse_object(idx_row, path_images, path_out, norm_size): """ cut the image selection according ellipse parameters and scale it into given size to have all image in the end the same sizes :param (int, row) idx_row: index and row with ellipse parameters :param str path_images: path to the image folder :param str path_out: path to output folder :param (int, int) norm_size: output image size """ _, row = idx_row # select image with this name and any extension list_imgs = glob.glob(os.path.join(path_images, row['image_name'] + '.*')) path_img = sorted(list_imgs)[0] img, _ = tl_data.load_image_2d(path_img) # create mask according to chosen ellipse ell_params = row[COLUMNS_ELLIPSE].tolist() mask = ell_fit.add_overlap_ellipse(np.zeros(img.shape[:2], dtype=int), ell_params, 1) # cut the particular image img_cut = tl_data.cut_object(img, mask, 0, use_mask=True, bg_color=None) # scaling according to the normal size img_norm = transform.resize(img_cut, norm_size) path_img = os.path.join(path_out, os.path.basename(path_img)) tl_data.export_image(path_img, img_norm)