def cluster_points_draw_export(dict_row, params, path_out=None): """ cluster points into centers and export visualisations :param {} dict_row: :param {str: ...} params: :param str path_out: :return: """ assert all(n in dict_row for n in ['path_points', 'path_image', 'path_segm']), \ 'missing some required fields: %s' % repr(dict_row) name = os.path.splitext(os.path.basename(dict_row['path_points']))[0] points = tl_io.load_landmarks_csv(dict_row['path_points']) if len(points) == 0: logging.debug('no points to cluster for "%s"', name) points = tl_io.swap_coord_x_y(points) centres, clust_labels = cluster_center_candidates( points, max_dist=params['DBSCAN_max_dist'], min_samples=params['DBSCAN_min_samples']) path_csv = os.path.join(path_out, FOLDER_CENTER, name + '.csv') tl_io.save_landmarks_csv(path_csv, tl_io.swap_coord_x_y(centres)) path_visu = os.path.join(path_out, FOLDER_CLUSTER_VISUAL) img, segm = None, None if dict_row['path_image'] is not None and os.path.isfile( dict_row['path_image']): img = np.array(Image.open(dict_row['path_image'])) if dict_row['path_segm'] is not None and os.path.isfile( dict_row['path_segm']): segm = np.array(Image.open(dict_row['path_segm'])) export_draw_image_centers_clusters(path_visu, name, img, centres, points, clust_labels, segm) dict_row.update({ 'image': name, 'path_centers': path_csv, 'nb_centres': len(centres) }) return dict_row
def detect_center_candidates(name, image, segm, centers_gt, slic, points, features, feature_names, params, path_out, classif): """ for loaded or computer all necessary data, classify centers_gt candidates and if we have annotation validate this results :param str name: :param ndarray image: :param ndarray seg: :param centers_gt: :param slic: np.array :param [(int, int)] points: :param features: :param [str] feature_names: :param {} params: :param paths: path :param classif: obj :return: """ labels = classif.predict(features) # proba = classif.predict_proba(features) candidates = np.asarray(points)[np.asarray(labels) == 1] path_points = os.path.join(path_out, FOLDER_POINTS) path_visu = os.path.join(path_out, FOLDER_POINTS_VISU) path_csv = os.path.join(path_points, name + '.csv') tl_io.save_landmarks_csv(path_csv, tl_io.swap_coord_x_y(candidates)) export_show_image_points_labels(path_visu, name, image, segm, points, labels, slic, centers_gt) dict_centers = {'image': name, 'path_points': path_csv} if centers_gt is not None: dict_centers = compute_statistic_centers(dict_centers, image, segm, centers_gt, slic, points, labels, params, path_visu) return dict_centers
def image_segmentation(idx_row, params, debug_export=DEBUG_EXPORT): """ image segmentation which prepare inputs (segmentation, centres) and perform segmentation of various segmentation methods :param (int, str) idx_row: input image and centres :param {str: ...} params: segmentation parameters :return str: image name """ _, row_path = idx_row for k in dict(row_path): if isinstance(k, str) and k.startswith('path_'): row_path[k] = tl_data.update_path(row_path[k], absolute=True) logging.debug('segmenting image: "%s"', row_path['path_image']) name = os.path.splitext(os.path.basename(row_path['path_image']))[0] img = load_image(row_path['path_image']) # make the image like RGB img_rgb = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3) seg = load_image(row_path['path_segm'], 'segm') assert img_rgb.shape[:2] == seg.shape, \ 'image %s and segm %s do not match' \ % (repr(img_rgb.shape[:2]), repr(seg.shape)) if not os.path.isfile(row_path['path_centers']): logging.warning('no center was detected for "%s"', name) return name centers = tl_data.load_landmarks_csv(row_path['path_centers']) centers = tl_data.swap_coord_x_y(centers) if len(centers) == 0: logging.warning('no center was detected for "%s"', name) return name # img = seg / float(seg.max()) slic = seg_spx.segment_slic_img2d(img_rgb, sp_size=params['slic_size'], rltv_compact=params['slic_regul']) path_segm = os.path.join(params['path_exp'], 'input', name + '.png') export_draw_image_segm(path_segm, img_rgb, segm_obj=seg, centers=centers) seg_simple = simplify_segm_3cls(seg) path_segm = os.path.join(params['path_exp'], 'simple', name + '.png') export_draw_image_segm(path_segm, seg_simple - 1.) dict_segment = create_dict_segmentation(params, slic, seg, img, centers) image_name = name + '.png' centre_name = name + '.csv' # iterate over segmentation methods and perform segmentation on this image for method in dict_segment: (fn, args) = dict_segment[method] logging.debug(' -> %s on "%s"', method, name) path_dir = os.path.join(params['path_exp'], method) # n.split('_')[0] path_segm = os.path.join(path_dir, image_name) path_centre = os.path.join(path_dir + DIR_CENTRE_POSIX, centre_name) path_fig = os.path.join(path_dir + DIR_VISUAL_POSIX, image_name) path_debug = os.path.join(path_dir + DIR_DEBUG_POSIX, name) # assuming that segmentation may fail try: t = time.time() if debug_export and 'rg2sp' in method: os.mkdir(path_debug) segm_obj, centers, dict_export = fn(*args, debug_export=path_debug) else: segm_obj, centers, dict_export = fn(*args) # also export ellipse params here or inside the segm fn if dict_export is not None: for k in dict_export: export_partial(k, dict_export[k], path_dir, name) logging.info('running time of %s on image "%s" is %d s', repr(fn.__name__), image_name, time.time() - t) Image.fromarray(segm_obj.astype(np.uint8)).save(path_segm) export_draw_image_segm(path_fig, img_rgb, seg, segm_obj, centers) # export also centers centers = tl_data.swap_coord_x_y(centers) tl_data.save_landmarks_csv(path_centre, centers) except: logging.error('segment fail for "%s" via %s with \n %s', name, method, traceback.format_exc()) return name
def dataset_load_images_segms_compute_features(params, df_paths, nb_jobs=NB_THREADS): """ create whole dataset composed from loading input data, computing features and label points by label wether its positive or negative center candidate :param {str: str} paths: :param {str: any} params: :param df_paths: DF :param int nb_jobs: :return {str: any}: """ dict_imgs, dict_segms, dict_center = dict(), dict(), dict() logging.info('loading input data (images, segmentation and centers)') path_show_in = os.path.join(params['path_expt'], FOLDER_INPUT) tqdm_bar = tqdm.tqdm(total=len(df_paths), desc='loading input data') wrapper_load_data = partial(load_image_segm_center, path_out=path_show_in, dict_relabel=params['dict_relabel']) pool = mproc.Pool(nb_jobs) for name, img, seg, center in pool.imap(wrapper_load_data, df_paths.iterrows()): dict_imgs[name] = img dict_segms[name] = seg dict_center[name] = center tqdm_bar.update() pool.close() pool.join() dict_slics, dict_points, dict_features = dict(), dict(), dict() logging.info('estimate candidate points and compute features') tqdm_bar = tqdm.tqdm(total=len(dict_imgs), desc = 'estimate candidates & features') gene_name_img_seg = ((name, dict_imgs[name], dict_segms[name]) for name in dict_imgs) wrapper_points_features = partial(wrapper_estim_points_compute_features, params=params) feature_names = None pool = mproc.Pool(nb_jobs) for name, slic, points, features, feature_names \ in pool.imap_unordered(wrapper_points_features, gene_name_img_seg): dict_slics[name] = slic dict_points[name] = points dict_features[name] = features tqdm_bar.update() pool.close() pool.join() logging.debug('computed features:\n %s', repr(feature_names)) dict_labels = dict() logging.info('assign labels according close distance to center') path_points_train = os.path.join(params['path_expt'], FOLDER_POINTS_TRAIN) tqdm_bar = tqdm.tqdm(total=len(dict_center), desc='labels assignment') for name in dict_center: dict_labels[name] = label_close_points(dict_center[name], dict_points[name], params) points = np.asarray(dict_points[name])[np.asarray(dict_labels[name]) == 1] path_csv = os.path.join(path_points_train, name + '.csv') tl_io.save_landmarks_csv(path_csv, points) tqdm_bar.update() return dict_imgs, dict_segms, dict_slics, dict_points, dict_center, \ dict_features, dict_labels, feature_names