def export_figure(idx_row, df_slices_info, path_out): """ load image, segmentation and csv with centres 1) draw figure with image, segmentation and csv 2) draw expety annotation 3) expert figure :param idx_row: :param df_slices_info: :param path_out: """ _, row = idx_row img_name = os.path.splitext(os.path.basename(row['path_image']))[0] try: if img_name not in df_slices_info.index: logging.debug('missing image in annotation - "%s"', img_name) return img = tl_data.io_imread(row['path_image']) segm =tl_data.io_imread(row['path_segm']) df = pd.read_csv(os.path.join(row['path_centers']), index_col=0) centres = df[['X', 'Y']].values fig = figure_draw_img_centre_segm(None, img, centres, segm) row_slice = df_slices_info.loc[img_name] fig = figure_draw_annot_csv(fig, img, row_slice) tl_visu.figure_image_adjustment(fig, img.shape) fig.savefig(os.path.join(path_out, img_name + '.png')) plt.close(fig) except Exception: logging.error('failed for: %s', img_name) logging.error(traceback.format_exc())
def load_image_segm_center(idx_row, path_out=None, dict_relabel=None): """ by paths load images and segmentation and weather centers exist, load them if the path out is given r\draw visualisation of inputs :param (int, DF:row) idx_row: :param str path_out: path to output directory :param {} dict_relabel: :return (str, ndarray, ndarray, [[int, int]]): """ idx, row_path = idx_row for k in ['path_image', 'path_segm', 'path_centers']: row_path[k] = tl_data.update_path(row_path[k]) assert os.path.exists(row_path[k]), 'missing %s' % row_path[k] idx_name = get_idx_name(idx, row_path['path_image']) img_struc, img_gene = tl_data.load_img_double_band_split( row_path['path_image'], im_range=None) # img_rgb = np.array(Image.open(row_path['path_img'])) img_rgb = tl_data.merge_image_channels(img_struc, img_gene) if np.max(img_rgb) > 1: img_rgb = img_rgb / float(np.max(img_rgb)) seg_ext = os.path.splitext(os.path.basename(row_path['path_segm']))[-1] if seg_ext == '.npz': with np.load(row_path['path_segm']) as npzfile: segm = npzfile[npzfile.files[0]] if dict_relabel is not None: segm = seg_lbs.merge_probab_labeling_2d(segm, dict_relabel) else: segm = tl_data.io_imread(row_path['path_segm']) if dict_relabel is not None: segm = seg_lbs.relabel_by_dict(segm, dict_relabel) if row_path['path_centers'] is not None \ and os.path.isfile(row_path['path_centers']): ext = os.path.splitext(os.path.basename(row_path['path_centers']))[-1] if ext == '.csv': centers = tl_data.load_landmarks_csv(row_path['path_centers']) centers = tl_data.swap_coord_x_y(centers) elif ext == '.png': centers = tl_data.io_imread(row_path['path_centers']) # relabel loaded segm into relevant one centers = np.array(LUT_ANNOT_CENTER_RELABEL)[centers] else: logging.warning('not supported file format %s', ext) centers = None else: centers = None if is_drawing(path_out): export_visual_input_image_segm(path_out, idx_name, img_rgb, segm, centers) return idx_name, img_rgb, segm, centers
def load_image(path_img, img_type=TYPE_LOAD_IMAGE): """ load image from given path according specification :param str path_img: :param str img_type: :return ndarray: """ path_img = os.path.abspath(os.path.expanduser(path_img)) assert os.path.isfile(path_img), 'missing: "%s"' % path_img if img_type == 'segm': img = tl_data.io_imread(path_img) elif img_type == '2d_struct': img, _ = tl_data.load_img_double_band_split(path_img) assert img.ndim == 2, 'image can be only single color' else: logging.error('not supported loading img_type: %s', img_type) img = tl_data.io_imread(path_img) logging.debug('image shape: %s, value range %f - %f', repr(img.shape), img.min(), img.max()) return img
def cluster_points_draw_export(dict_row, params, path_out=None): """ cluster points into centers and export visualisations :param {} dict_row: :param {str: ...} params: :param str path_out: :return {}: """ assert all(n in dict_row for n in ['path_points', 'path_image', 'path_segm']), \ 'missing some required fields: %s' % repr(dict_row) name = os.path.splitext(os.path.basename(dict_row['path_points']))[0] points = tl_data.load_landmarks_csv(dict_row['path_points']) if len(points) == 0: logging.debug('no points to cluster for "%s"', name) points = tl_data.swap_coord_x_y(points) centres, clust_labels = cluster_center_candidates( points, max_dist=params['DBSCAN_max_dist'], min_samples=params['DBSCAN_min_samples']) path_csv = os.path.join(path_out, FOLDER_CENTER, name + '.csv') tl_data.save_landmarks_csv(path_csv, tl_data.swap_coord_x_y(centres)) path_visu = os.path.join(path_out, FOLDER_CLUSTER_VISUAL) img, segm = None, None if dict_row['path_image'] is not None and os.path.isfile( dict_row['path_image']): img = tl_data.io_imread(dict_row['path_image']) if dict_row['path_segm'] is not None and os.path.isfile( dict_row['path_segm']): segm = tl_data.io_imread(dict_row['path_segm']) export_draw_image_centers_clusters(path_visu, name, img, centres, points, clust_labels, segm) dict_row.update({ 'image': name, 'path_centers': path_csv, 'nb_centres': len(centres) }) return dict_row
def main(path_annot, path_out, nb_comp=5): list_paths = sorted(glob.glob(path_annot)) print('nb images:', len(list_paths), 'SAMPLES:', [os.path.basename(p) for p in list_paths[:5]]) list_segms = [] for path_seg in list_paths: seg = tl_data.io_imread(path_seg) list_segms.append(seg) list_rays, _ = tl_rg.compute_object_shapes(list_segms, ray_step=RAY_STEP, interp_order='spline', smooth_coef=1) logging.info('nb eggs: %i, nb rays: %i', len(list_rays), len(list_rays[0])) x_axis = np.linspace(0, 360, len(list_rays[0]), endpoint=False) df = pd.DataFrame(np.array(list_rays), columns=x_axis.astype(int)) path_csv = os.path.join(path_out, NAME_CSV_RAY_ALL) logging.info('exporting all Rays: %s', path_csv) df.to_csv(path_csv) # SINGLE MODEL model, list_cdf = tl_rg.transform_rays_model_cdf_mixture(list_rays, 1) cdf = np.array(np.array(list_cdf)) # path_model = os.path.join(path_out, NAME_NPZ_MODEL_SINGLE) # logging.info('exporting model: %s', path_model) # np.savez(path_model, name='cdf', cdfs=cdf, mix_model=model) path_model = os.path.join(path_out, NAME_PKL_MODEL_SINGLE) logging.info('exporting model: %s', path_model) with open(path_model, 'wb') as fp: pickle.dump({'name': 'cdf', 'cdfs': cdf, 'mix_model': model}, fp) # MIXTURE MODEL model, list_mean_cdf = tl_rg.transform_rays_model_sets_mean_cdf_mixture( list_rays, nb_comp) # path_model = os.path.join(path_out, NAME_NPZ_MODEL_MIXTURE) # logging.info('exporting model: %s', path_model) # np.savez(path_model, name='set_cdfs', cdfs=list_mean_cdf, # mix_model=model) path_model = os.path.join(path_out, NAME_PKL_MODEL_MIXTURE) logging.info('exporting model: %s', path_model) with open(path_model, 'wb') as fp: pickle.dump( { 'name': 'set_cdfs', 'cdfs': list_mean_cdf, 'mix_model': model }, fp) logging.info('Done')
def load_sample_image(name_img=IMAGE_LENNA): """ load sample image :param str name_img: :return ndarray: >>> img = load_sample_image(IMAGE_LENNA) >>> img.shape (512, 512, 3) """ path_img = get_image_path(name_img) assert os.path.exists(path_img), 'missing: "%s"' % path_img logging.debug('image (%s): %s', os.path.exists(path_img), path_img) img = tl_data.io_imread(path_img) return img
def load_correct_segm(path_img): """ load segmentation and correct it with simple morphological operations :param str path_img: :return (ndarray, ndarray): """ assert os.path.isfile(path_img), 'missing: %s' % path_img logging.debug('loading image: %s', path_img) img = tl_data.io_imread(path_img) seg = (img > 0) seg = morphology.binary_opening(seg, selem=morphology.disk(25)) seg = morphology.remove_small_objects(seg) seg_lb = measure.label(seg) seg_lb[seg == 0] = 0 return seg, seg_lb
def dir_images_frequent_colors(paths_img, ratio_treshold=1e-3): """ look all images and estimate most frequent colours :param paths_img: [np.array<h, w, 3>] :param ratio_treshold: float, percentage of nb clr pixels to be assumed as important :return: """ logging.debug('passing %i images', len(paths_img)) dict_colors = dict() for path_im in paths_img: img = tl_data.io_imread(path_im) local_dict_colors = image_frequent_colors(img, ratio_treshold) for clr in local_dict_colors: if clr not in dict_colors: dict_colors[clr] = 0 dict_colors[clr] += local_dict_colors[clr] logging.info('img folder colours: %s', repr(dict_colors)) return dict_colors
def perform_quantize_image(path_image, list_colors, method='color'): """ perform the quantization together with loading and exporting :param str path_image: :param [(int, int, int)] list_colors: list of possible colours """ logging.debug('quantize img: "%s"', path_image) im = tl_data.io_imread(path_image) assert im.ndim == 3, 'not valid color image of dims %s' % repr(im.shape) im = im[:, :, :3] # im = io.imread(path_image)[:, :, :3] if method == 'color': im_q = seg_annot.quantize_image_nearest_color(im, list_colors) elif method == 'position': im_q = seg_annot.quantize_image_nearest_pixel(im, list_colors) else: logging.error('not implemented method "%s"', method) path_image = os.path.splitext(path_image)[0] + '.png' tl_data.io_imsave(path_image, im_q.astype(np.uint8))