def _add_annotation( annotation, bbox, theta, species_name, viewpoint, interest, decrease, width, height, part_type=None, ): # Transformation matrix R = vt.rotation_around_bbox_mat3x3(theta, bbox) # Get verticies of the annotation polygon verts = vt.verts_from_bbox(bbox, close=True) # Rotate and transform vertices xyz_pts = vt.add_homogenous_coordinate(np.array(verts).T) trans_pts = vt.remove_homogenous_coordinate(R.dot(xyz_pts)) new_verts = np.round(trans_pts).astype(np.int).T.tolist() x_points = [pt[0] for pt in new_verts] y_points = [pt[1] for pt in new_verts] xmin = int(min(x_points) * decrease) xmax = int(max(x_points) * decrease) ymin = int(min(y_points) * decrease) ymax = int(max(y_points) * decrease) # Bounds check xmin = max(xmin, 0) ymin = max(ymin, 0) xmax = min(xmax, width - 1) ymax = min(ymax, height - 1) # Get info info = {} w_ = xmax - xmin h_ = ymax - ymin if w_ < min_annot_size: return if h_ < min_annot_size: return if viewpoint != -1 and viewpoint is not None: info['pose'] = viewpoint if interest is not None: info['interest'] = '1' if interest else '0' if part_type is not None: species_name = '%s+%s' % ( species_name, part_type, ) area = w_ * h_ logger.info('\t\tAdding %r with area %0.04f pixels^2' % ( species_name, area, )) annotation.add_object(species_name, (xmax, xmin, ymax, ymin), **info)
def align(bbox, theta, width, height): # Transformation matrix R = vt.rotation_around_bbox_mat3x3(theta, bbox) # Get verticies of the annotation polygon verts = vt.verts_from_bbox(bbox, close=True) # Rotate and transform vertices xyz_pts = vt.add_homogenous_coordinate(np.array(verts).T) trans_pts = vt.remove_homogenous_coordinate(R.dot(xyz_pts)) new_verts = np.round(trans_pts).astype(np.int).T.tolist() x_points = [pt[0] for pt in new_verts] y_points = [pt[1] for pt in new_verts] xmin = int(min(x_points)) xmax = int(max(x_points)) ymin = int(min(y_points)) ymax = int(max(y_points)) # Bounds check xmin = max(xmin, 0) ymin = max(ymin, 0) xmax = min(xmax, width - 1) ymax = min(ymax, height - 1) xtl = xmin ytl = ymin w = xmax - xmin h = ymax - ymin return ( xtl, ytl, w, h, )
def closest_point_on_bbox(p, bbox): """ Example1: >>> # ENABLE_DOCTEST >>> from vtool.geometry import * # NOQA >>> p_list = np.array([[19, 7], [7, 14], [14, 11], [8, 7], [23, 21]], dtype=np.float) >>> bbox = np.array([10, 10, 10, 10], dtype=np.float) >>> [closest_point_on_bbox(p, bbox) for p in p_list] """ import vtool as vt verts = np.array(vt.verts_from_bbox(bbox, close=True)) new_pts = closest_point_on_verts(p, verts) return new_pts
def show_multiple_chips(ibs, aid_list, in_image=True, fnum=0, sel_aids=[], subtitle='', annote=False, **kwargs): """ CommandLine: python -m ibeis.viz.viz_name --test-show_multiple_chips --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db NNP_Master3 --aids=6435,9861,137,6563,9167,12547,9332,12598,13285 --no-inimage --notitle python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db NNP_Master3 --aids=137,6563,12547,9332,12598,13285 --no-inimage --notitle --adjust=.05 python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db NNP_Master3 --aids=6563,9332,13285,12598 --no-inimage --notitle --adjust=.05 --rc=1,4 python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db PZ_Master0 --aids=1288 --no-inimage --notitle --adjust=.05 python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db PZ_Master0 --aids=4020,4839 --no-inimage --notitle --adjust=.05 python -m ibeis.viz.viz_name --test-show_multiple_chips --db NNP_Master3 --aids=6524,6540,6571,6751 --no-inimage --notitle --adjust=.05 --diskshow python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST -a default:index=0:4 --show --aids=1 --doboth --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1 --doboth --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1 --doboth --rc=2,1 --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1 --doboth --rc=2,1 --show --notitle --trydrawline --no-draw_lbls python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1,2 --doboth --show --notitle --trydrawline python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1,2,3,4,5 --doboth --rc=2,5 --show --chrlbl --trydrawline --qualtitle --no-figtitle --notitle --doboth --doboth --show python -m ibeis.viz.viz_name --test-show_multiple_chips --db NNP_Master3 --aids=15419 --doboth --rc=2,1 --show --notitle --trydrawline --no-draw_lbls Example: >>> # DISABLE_DOCTEST >>> from ibeis.viz.viz_name import * # NOQA >>> import ibeis >>> ibs, aid_list, in_image = testdata_multichips() >>> if True: >>> import matplotlib as mpl >>> from ibeis.scripts.thesis import TMP_RC >>> mpl.rcParams.update(TMP_RC) >>> fnum = 0 >>> sel_aids = [] >>> subtitle = '' >>> annote = False >>> fig = show_multiple_chips(ibs, aid_list, in_image, fnum, sel_aids, subtitle, annote) >>> ut.quit_if_noshow() >>> fig.canvas.draw() >>> ut.show_if_requested() """ fnum = pt.ensure_fnum(fnum) nAids = len(aid_list) if nAids == 0: fig = df2.figure(fnum=fnum, pnum=(1, 1, 1), **kwargs) df2.imshow_null(fnum=fnum, **kwargs) return fig # Trigger computation of all chips in parallel ibsfuncs.ensure_annotation_data(ibs, aid_list, chips=(not in_image or annote), feats=annote) print('[viz_name] * annot_vuuid=%r' % ((ibs.get_annot_visual_uuids(aid_list), ))) print('[viz_name] * aid_list=%r' % ((aid_list, ))) DOBOTH = ut.get_argflag('--doboth') rc = ut.get_argval('--rc', type_=list, default=None) if rc is None: nRows, nCols = ph.get_square_row_cols(nAids * (2 if DOBOTH else 1)) else: nRows, nCols = rc notitle = ut.get_argflag('--notitle') draw_lbls = not ut.get_argflag('--no-draw_lbls') show_chip_kw = dict(annote=annote, in_image=in_image, notitle=notitle, draw_lbls=draw_lbls) #print('[viz_name] * r=%r, c=%r' % (nRows, nCols)) #gs2 = gridspec.GridSpec(nRows, nCols) pnum_ = df2.get_pnum_func(nRows, nCols) fig = df2.figure(fnum=fnum, pnum=pnum_(0), **kwargs) fig.clf() ax_list1 = [] for px, aid in enumerate(aid_list): print('px = %r' % (px, )) _fig, _ax1 = viz_chip.show_chip(ibs, aid=aid, pnum=pnum_(px), **show_chip_kw) print('other_aids = %r' % (ibs.get_annot_contact_aids(aid), )) ax = df2.gca() ax_list1.append(_ax1) if aid in sel_aids: df2.draw_border(ax, df2.GREEN, 4) if ut.get_argflag('--chrlbl') and not DOBOTH: ax.set_xlabel('(' + chr(ord('a') - 1 + px) + ')') elif ut.get_argflag('--numlbl') and not DOBOTH: ax.set_xlabel('(' + str(px + 1) + ')') #plot_aid3(ibs, aid) # HACK to show in image and not in image if DOBOTH: #ut.embed() #ph.get_plotdat_dict(ax_list1[1]) #ph.get_plotdat_dict(ax_list2[1]) ax_list2 = [] show_chip_kw['in_image'] = not show_chip_kw['in_image'] start = px + 1 for px, aid in enumerate(aid_list, start=start): _fig, _ax2 = viz_chip.show_chip(ibs, aid=aid, pnum=pnum_(px), **show_chip_kw) ax = df2.gca() ax_list2.append(_ax2) if ut.get_argflag('--chrlbl'): ax.set_xlabel('(' + chr(ord('a') - start + px) + ')') elif ut.get_argflag('--numlbl'): ax.set_xlabel('(' + str(px - start + 1) + ')') if ut.get_argflag('--qualtitle'): qualtext = ibs.get_annot_quality_texts(aid) ax.set_title(qualtext) if aid in sel_aids: df2.draw_border(ax, df2.GREEN, 4) if in_image: ax_list1, ax_list2 = ax_list2, ax_list1 if ut.get_argflag('--trydrawline'): # Unfinished #ut.embed() # Draw lines between corresponding axes # References: # http://stackoverflow.com/questions/17543359/drawing-lines-between-two-plots-in-matplotlib import matplotlib as mpl import vtool as vt # !!! #http://matplotlib.org/users/transforms_tutorial.html #invTransFigure_fn1 = fig.transFigure.inverted().transform #invTransFigure_fn2 = fig.transFigure.inverted().transform #print(ax_list1) #print(ax_list2) assert len(ax_list1) == len(ax_list2) for ax1, ax2 in zip(ax_list1, ax_list2): #_ = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) #bbox1 = (0, 0, _.width * fig.dpi, _.height * fig.dpi) # returns in figure coordinates #bbox1 = df2.get_axis_bbox(ax=ax1) #if bbox1[-1] < 0: # # Weird bug # bbox1 = bbox1[1] print('--') print('ax1 = %r' % (ax1, )) print('ax2 = %r' % (ax2, )) chipshape = ph.get_plotdat(ax1, 'chipshape') #_bbox1 = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) #bbox1 = (0, 0, _bbox1.width * fig.dpi, _bbox1.height * fig.dpi) bbox1 = (0, 0, chipshape[1], chipshape[0]) aid_ = ph.get_plotdat(ax2, 'aid') aid_list_ = ph.get_plotdat(ax2, 'aid_list') index = aid_list_.index(aid_) annotation_bbox_list = ph.get_plotdat(ax2, 'annotation_bbox_list') bbox2 = annotation_bbox_list[index] print('bbox1 = %r' % (bbox1, )) print('bbox2 = %r' % (bbox2, )) vert_list1 = np.array(vt.verts_from_bbox(bbox1)) vert_list2 = np.array(vt.verts_from_bbox(bbox2)) print('vert_list1 = %r' % (vert_list1, )) print('vert_list2 = %r' % (vert_list2, )) #for vx in [0, 1, 2, 3]: for vx in [0, 1]: vert1 = vert_list1[vx].tolist() vert2 = vert_list2[vx].tolist() print(' ***') print(' * vert1 = %r' % (vert1, )) print(' * vert2 = %r' % (vert2, )) coordsA = coordsB = 'data' #coords = 'axes points' #'axes fraction' #'axes pixels' #coordsA = 'axes pixels' #coordsB = 'data' #'figure fraction' #'figure pixels' #'figure pixels' #'figure points' #'polar' #'offset points' con = mpl.patches.ConnectionPatch(xyA=vert1, xyB=vert2, coordsA=coordsA, coordsB=coordsB, axesA=ax1, axesB=ax2, linewidth=1, color='k') #, arrowstyle="-") #ut.embed() #con.set_zorder(None) ax1.add_artist(con) #ax2.add_artist(con) #ut.embed() #verts2.T[1] -= bbox2[-1] #bottom_left1, bottom_right1 = verts1[1:3].tolist() #bottom_left2, bottom_right2 = verts2[1:3].tolist() ##transAxes1 = ax1.transData.inverted() #transAxes1_fn = ax1.transData.transform #transAxes2_fn = ax2.transData.transform #transAxes1_fn = ut.identity #transAxes2_fn = ut.identity #coord_bl1 = transFigure.transform(transAxes1.transform(bottom_left1)) #coord_br1 = transFigure.transform(transAxes1.transform(bottom_right1)) #coord_bl1 = invTransFigure_fn1(transAxes1_fn(bottom_left1)) #print('bottom_left2 = %r' % (bottom_left2,)) #coord_bl1 = (5, 5) #coord_bl2 = invTransFigure_fn2(transAxes2_fn(bottom_left2)) #print('coord_bl2 = %r' % (coord_bl2,)) #coord_br1 = invTransFigure_fn1(transAxes1_fn(bottom_right1)) #coord_br2 = invTransFigure_fn2(transAxes2_fn(bottom_right2)) ##print('coord_bl1 = %r' % (coord_bl1,)) #line_coords1 = np.vstack([coord_bl1, coord_bl2]) #line_coords2 = np.vstack([coord_br1, coord_br2]) #print('line_coords1 = %r' % (line_coords1,)) #line1 = mpl.lines.Line2D((line_coords1[0]), (line_coords1[1]), transform=fig.transFigure) #line2 = mpl.lines.Line2D((line_coords2[0]), (line_coords2[1]), transform=fig.transFigure) #xs1, ys1 = line_coords1.T #xs2, ys2 = line_coords2.T #linekw = dict(transform=fig.transFigure) #linekw = dict() #print('xs1 = %r' % (xs1,)) #print('ys1 = %r' % (ys1,)) #line1 = mpl.lines.Line2D(xs1, ys1, **linekw) #line2 = mpl.lines.Line2D(xs2, ys2, **linekw) # NOQA #shrinkA=5, shrinkB=5, mutation_scale=20, fc="w") #ax2.add_artist(con) #fig.lines.append(line1) #fig.lines.append(line2) pass return fig
def _add_annotation_or_part( image_index, annot_index, annot_uuid, bbox, theta, species_name, viewpoint, interest, annot_name, decrease, width, height, individuals, part_index=None, part_uuid=None, ): is_part = part_index is not None R = vt.rotation_around_bbox_mat3x3(theta, bbox) verts = vt.verts_from_bbox(bbox, close=True) xyz_pts = vt.add_homogenous_coordinate(np.array(verts).T) trans_pts = vt.remove_homogenous_coordinate(R.dot(xyz_pts)) new_verts = np.round(trans_pts).astype(np.int).T.tolist() x_points = [int(np.around(pt[0] * decrease)) for pt in new_verts] y_points = [int(np.around(pt[1] * decrease)) for pt in new_verts] segmentation = ut.flatten(list(zip(x_points, y_points))) xmin = max(min(x_points), 0) ymin = max(min(y_points), 0) xmax = min(max(x_points), width - 1) ymax = min(max(y_points), height - 1) w = xmax - xmin h = ymax - ymin area = w * h xtl_, ytl_, w_, h_ = bbox xtl_ *= decrease ytl_ *= decrease w_ *= decrease h_ *= decrease annot_part = { 'bbox': [xtl_, ytl_, w_, h_], 'theta': theta, 'viewpoint': viewpoint, 'segmentation': [segmentation], 'segmentation_bbox': [xmin, ymin, w, h], 'area': area, 'iscrowd': 0, 'id': part_index if is_part else annot_index, 'image_id': image_index, 'category_id': category_dict[species_name], 'uuid': str(part_uuid if is_part else annot_uuid), 'individual_ids': individuals, } if is_part: annot_part['annot_id'] = annot_index else: annot_part['isinterest'] = int(interest) annot_part['name'] = annot_name return annot_part, area
def export_to_coco(ibs, species_list, species_mapping={}, target_size=2400, use_maximum_linear_dimension=True, use_existing_train_test=True, gid_list=None, include_reviews=False, require_named=True, output_images=True, **kwargs): """Create training COCO dataset for training models.""" from datetime import date import datetime import random import json print('Received species_mapping = %r' % (species_mapping, )) print('Using species_list = %r' % (species_list, )) current_year = int(date.today().year) datadir = abspath(join(ibs.get_cachedir(), 'coco')) annotdir = join(datadir, 'annotations') imagedir = join(datadir, 'images') image_dir_dict = { 'train': join(imagedir, 'train%s' % (current_year, )), 'val': join(imagedir, 'val%s' % (current_year, )), 'test': join(imagedir, 'test%s' % (current_year, )), } ut.delete(datadir) ut.ensuredir(datadir) ut.ensuredir(annotdir) ut.ensuredir(imagedir) for dataset in image_dir_dict: ut.ensuredir(image_dir_dict[dataset]) info = { 'description': 'Wild Me %s Dataset' % (ibs.dbname, ), # 'url' : 'http://www.greatgrevysrally.com', 'url': 'http://www.wildme.org', 'version': '1.0', 'year': current_year, 'contributor': 'Wild Me, Jason Parham <*****@*****.**>', 'date_created': datetime.datetime.utcnow().isoformat(' '), 'ibeis_database_name': ibs.get_db_name(), 'ibeis_database_uuid': str(ibs.get_db_init_uuid()), } licenses = [ { 'url': 'http://creativecommons.org/licenses/by-nc-nd/2.0/', 'id': 3, 'name': 'Attribution-NonCommercial-NoDerivs License', }, ] assert len(species_list) == len( set(species_list)), 'Cannot have duplicate species in species_list' category_dict = {} categories = [] for index, species in enumerate(sorted(species_list)): species = species_mapping.get(species, species) categories.append({ 'id': index, 'name': species, 'supercategory': 'animal', }) category_dict[species] = index output_dict = {} for dataset in ['train', 'val', 'test']: output_dict[dataset] = { 'info': info, 'licenses': licenses, 'categories': categories, 'images': [], 'annotations': [], } # Get all gids and process them if gid_list is None: aid_list = ibs.get_valid_aids() species_list_ = ibs.get_annot_species(aid_list) flag_list = [ species_mapping.get(species_, species_) in species_list for species_ in species_list_ ] aid_list = ut.compress(aid_list, flag_list) if require_named: nid_list = ibs.get_annot_nids(aid_list) flag_list = [nid >= 0 for nid in nid_list] aid_list = ut.compress(aid_list, flag_list) gid_list = sorted(list(set(ibs.get_annot_gids(aid_list)))) # Make a preliminary train / test split as imagesets or use the existing ones if not use_existing_train_test: ibs.imageset_train_test_split(**kwargs) train_gid_set = set(general_get_imageset_gids(ibs, 'TRAIN_SET', **kwargs)) test_gid_set = set(general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)) image_index = 1 annot_index = 1 aid_dict = {} print('Exporting %d images' % (len(gid_list), )) for gid in gid_list: if gid in test_gid_set: dataset = 'test' elif gid in train_gid_set: state = random.uniform(0.0, 1.0) if state <= 0.75: dataset = 'train' else: dataset = 'val' else: raise AssertionError( 'All gids must be either in the TRAIN_SET or TEST_SET imagesets' ) width, height = ibs.get_image_sizes(gid) if target_size is None: decrease = 1.0 else: condition = width > height if use_maximum_linear_dimension else width < height if condition: ratio = height / width decrease = target_size / width width = target_size height = int(target_size * ratio) else: ratio = width / height decrease = target_size / height height = target_size width = int(target_size * ratio) image_path = ibs.get_image_paths(gid) image_filename = '%012d.jpg' % (image_index, ) image_filepath = join(image_dir_dict[dataset], image_filename) if output_images: _image = ibs.get_images(gid) _image = vt.resize(_image, (width, height)) vt.imwrite(image_filepath, _image) output_dict[dataset]['images'].append({ 'license': 3, # 'file_name' : image_filename, 'file_name': basename(ibs.get_image_uris_original(gid)), 'coco_url': None, 'height': height, 'width': width, 'date_captured': ibs.get_image_datetime_str(gid).replace('/', '-'), 'flickr_url': None, 'id': image_index, 'ibeis_image_uuid': str(ibs.get_image_uuids(gid)), }) print('Copying:\n%r\n%r\n%r\n\n' % ( image_path, image_filepath, (width, height), )) aid_list = ibs.get_image_aids(gid) bbox_list = ibs.get_annot_bboxes(aid_list) theta_list = ibs.get_annot_thetas(aid_list) species_name_list = ibs.get_annot_species_texts(aid_list) viewpoint_list = ibs.get_annot_viewpoints(aid_list) nid_list = ibs.get_annot_nids(aid_list) seen = 0 zipped = zip(aid_list, bbox_list, theta_list, species_name_list, viewpoint_list, nid_list) for aid, bbox, theta, species_name, viewpoint, nid in zipped: species_name = species_mapping.get(species_name, species_name) if species_name is None: continue if species_name not in species_list: continue if require_named and nid < 0: continue # Transformation matrix R = vt.rotation_around_bbox_mat3x3(theta, bbox) verts = vt.verts_from_bbox(bbox, close=True) xyz_pts = vt.add_homogenous_coordinate(np.array(verts).T) trans_pts = vt.remove_homogenous_coordinate(R.dot(xyz_pts)) new_verts = np.round(trans_pts).astype(np.int).T.tolist() x_points = [int(np.around(pt[0] * decrease)) for pt in new_verts] y_points = [int(np.around(pt[1] * decrease)) for pt in new_verts] segmentation = ut.flatten(list(zip(x_points, y_points))) xmin = max(min(x_points), 0) ymin = max(min(y_points), 0) xmax = min(max(x_points), width - 1) ymax = min(max(y_points), height - 1) w = xmax - xmin h = ymax - ymin area = w * h # individuals = ibs.get_name_aids(ibs.get_annot_nids(aid)) reviews = ibs.get_review_rowids_from_single([aid])[0] user_list = ibs.get_review_identity(reviews) aid_tuple_list = ibs.get_review_aid_tuple(reviews) decision_list = ibs.get_review_decision_str(reviews) ids = [] decisions = [] zipped = zip(user_list, aid_tuple_list, decision_list) for user, aid_tuple, decision in zipped: if 'user:web' not in user: continue match = list(set(aid_tuple) - set([aid])) assert len(match) == 1 ids.append(match[0]) decisions.append(decision.lower()) xtl_, ytl_, w_, h_ = bbox xtl_ *= decrease ytl_ *= decrease w_ *= decrease h_ *= decrease annot = { 'bbox': [xtl_, ytl_, w_, h_], 'theta': theta, 'viewpoint': viewpoint, 'segmentation': [segmentation], 'segmentation_bbox': [xmin, ymin, w, h], 'area': area, 'iscrowd': 0, 'image_id': image_index, 'category_id': category_dict[species_name], 'id': annot_index, 'ibeis_annot_uuid': str(ibs.get_annot_uuids(aid)), 'ibeis_annot_name': str(ibs.get_annot_name_texts(aid)), # 'individual_ids' : individuals, } if include_reviews: annot['review_ids'] = list(zip(ids, decisions)) output_dict[dataset]['annotations'].append(annot) seen += 1 print('\t\tAdding %r with area %0.04f pixels^2' % ( species_name, area, )) aid_dict[aid] = annot_index annot_index += 1 # assert seen > 0 image_index += 1 for dataset in output_dict: annots = output_dict[dataset]['annotations'] for index in range(len(annots)): annot = annots[index] # Map internal aids to external annot index # individual_ids = annot['individual_ids'] # individual_ids_ = [] # for individual_id in individual_ids: # if individual_id not in aid_dict: # continue # individual_id_ = aid_dict[individual_id] # individual_ids_.append(individual_id_) # annot['individual_ids'] = individual_ids_ # Map reviews if include_reviews: review_ids = annot['review_ids'] review_ids_ = [] for review in review_ids: review_id, review_decision = review if review_id not in aid_dict: continue review_id_ = aid_dict[review_id] review_ = ( review_id_, review_decision, ) review_ids_.append(review_) annot['review_ids'] = review_ids_ # Store output_dict[dataset]['annotations'][index] = annot for dataset in output_dict: json_filename = 'instances_%s%s.json' % ( dataset, current_year, ) json_filepath = join(annotdir, json_filename) with open(json_filepath, 'w') as json_file: json.dump(output_dict[dataset], json_file) print('...completed') return datadir
def show_multiple_chips(ibs, aid_list, in_image=True, fnum=0, sel_aids=[], subtitle='', annote=False, **kwargs): """ CommandLine: python -m ibeis.viz.viz_name --test-show_multiple_chips --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db NNP_Master3 --aids=6435,9861,137,6563,9167,12547,9332,12598,13285 --no-inimage --notitle python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db NNP_Master3 --aids=137,6563,12547,9332,12598,13285 --no-inimage --notitle --adjust=.05 python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db NNP_Master3 --aids=6563,9332,13285,12598 --no-inimage --notitle --adjust=.05 --rc=1,4 python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db PZ_Master0 --aids=1288 --no-inimage --notitle --adjust=.05 python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db PZ_Master0 --aids=4020,4839 --no-inimage --notitle --adjust=.05 python -m ibeis.viz.viz_name --test-show_multiple_chips --db NNP_Master3 --aids=6524,6540,6571,6751 --no-inimage --notitle --adjust=.05 --diskshow python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST -a default:index=0:4 --show --aids=1 --doboth --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1 --doboth --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1 --doboth --rc=2,1 --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1 --doboth --rc=2,1 --show --notitle --trydrawline --no-draw_lbls python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1,2 --doboth --show --notitle --trydrawline python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1,2,3,4,5 --doboth --rc=2,5 --show --chrlbl --trydrawline --qualtitle --no-figtitle --notitle --doboth --doboth --show python -m ibeis.viz.viz_name --test-show_multiple_chips --db NNP_Master3 --aids=15419 --doboth --rc=2,1 --show --notitle --trydrawline --no-draw_lbls Example: >>> # DISABLE_DOCTEST >>> from ibeis.viz.viz_name import * # NOQA >>> import ibeis >>> ibs, aid_list, in_image = testdata_multichips() >>> fnum = 0 >>> sel_aids = [] >>> subtitle = '' >>> annote = False >>> fig = show_multiple_chips(ibs, aid_list, in_image, fnum, sel_aids, subtitle, annote) >>> ut.quit_if_noshow() >>> fig.canvas.draw() >>> ut.show_if_requested() """ fnum = pt.ensure_fnum(fnum) nAids = len(aid_list) if nAids == 0: fig = df2.figure(fnum=fnum, pnum=(1, 1, 1), **kwargs) df2.imshow_null(fnum=fnum, **kwargs) return fig # Trigger computation of all chips in parallel ibsfuncs.ensure_annotation_data(ibs, aid_list, chips=(not in_image or annote), feats=annote) print('[viz_name] * annot_vuuid=%r' % ((ibs.get_annot_visual_uuids(aid_list),))) print('[viz_name] * aid_list=%r' % ((aid_list,))) DOBOTH = ut.get_argflag('--doboth') rc = ut.get_argval('--rc', type_=list, default=None) if rc is None: nRows, nCols = ph.get_square_row_cols(nAids * (2 if DOBOTH else 1)) else: nRows, nCols = rc notitle = ut.get_argflag('--notitle') draw_lbls = not ut.get_argflag('--no-draw_lbls') show_chip_kw = dict(annote=annote, in_image=in_image, notitle=notitle, draw_lbls=draw_lbls) #print('[viz_name] * r=%r, c=%r' % (nRows, nCols)) #gs2 = gridspec.GridSpec(nRows, nCols) pnum_ = df2.get_pnum_func(nRows, nCols) fig = df2.figure(fnum=fnum, pnum=pnum_(0), **kwargs) fig.clf() ax_list1 = [] for px, aid in enumerate(aid_list): print('px = %r' % (px,)) _fig, _ax1 = viz_chip.show_chip(ibs, aid=aid, pnum=pnum_(px), **show_chip_kw) print('other_aids = %r' % (ibs.get_annot_contact_aids(aid),)) ax = df2.gca() ax_list1.append(_ax1) if aid in sel_aids: df2.draw_border(ax, df2.GREEN, 4) if ut.get_argflag('--chrlbl') and not DOBOTH: ax.set_xlabel('(' + chr(ord('a') - 1 + px) + ')') elif ut.get_argflag('--numlbl') and not DOBOTH: ax.set_xlabel('(' + str(px + 1) + ')') #plot_aid3(ibs, aid) # HACK to show in image and not in image if DOBOTH: #ut.embed() #ph.get_plotdat_dict(ax_list1[1]) #ph.get_plotdat_dict(ax_list2[1]) ax_list2 = [] show_chip_kw['in_image'] = not show_chip_kw['in_image'] start = px + 1 for px, aid in enumerate(aid_list, start=start): _fig, _ax2 = viz_chip.show_chip(ibs, aid=aid, pnum=pnum_(px), **show_chip_kw) ax = df2.gca() ax_list2.append(_ax2) if ut.get_argflag('--chrlbl'): ax.set_xlabel('(' + chr(ord('a') - start + px) + ')') elif ut.get_argflag('--numlbl'): ax.set_xlabel('(' + str(px - start + 1) + ')') if ut.get_argflag('--qualtitle'): qualtext = ibs.get_annot_quality_texts(aid) ax.set_title(qualtext) if aid in sel_aids: df2.draw_border(ax, df2.GREEN, 4) if in_image: ax_list1, ax_list2 = ax_list2, ax_list1 if ut.get_argflag('--trydrawline'): # Unfinished #ut.embed() # Draw lines between corresponding axes # References: # http://stackoverflow.com/questions/17543359/drawing-lines-between-two-plots-in-matplotlib import matplotlib as mpl import vtool as vt # !!! #http://matplotlib.org/users/transforms_tutorial.html #invTransFigure_fn1 = fig.transFigure.inverted().transform #invTransFigure_fn2 = fig.transFigure.inverted().transform #print(ax_list1) #print(ax_list2) assert len(ax_list1) == len(ax_list2) for ax1, ax2 in zip(ax_list1, ax_list2): #_ = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) #bbox1 = (0, 0, _.width * fig.dpi, _.height * fig.dpi) # returns in figure coordinates #bbox1 = df2.get_axis_bbox(ax=ax1) #if bbox1[-1] < 0: # # Weird bug # bbox1 = bbox1[1] print('--') print('ax1 = %r' % (ax1,)) print('ax2 = %r' % (ax2,)) chipshape = ph.get_plotdat(ax1, 'chipshape') #_bbox1 = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) #bbox1 = (0, 0, _bbox1.width * fig.dpi, _bbox1.height * fig.dpi) bbox1 = (0, 0, chipshape[1], chipshape[0]) aid_ = ph.get_plotdat(ax2, 'aid') aid_list_ = ph.get_plotdat(ax2, 'aid_list') index = aid_list_.index(aid_) annotation_bbox_list = ph.get_plotdat(ax2, 'annotation_bbox_list') bbox2 = annotation_bbox_list[index] print('bbox1 = %r' % (bbox1,)) print('bbox2 = %r' % (bbox2,)) vert_list1 = np.array(vt.verts_from_bbox(bbox1)) vert_list2 = np.array(vt.verts_from_bbox(bbox2)) print('vert_list1 = %r' % (vert_list1,)) print('vert_list2 = %r' % (vert_list2,)) #for vx in [0, 1, 2, 3]: for vx in [0, 1]: vert1 = vert_list1[vx].tolist() vert2 = vert_list2[vx].tolist() print(' ***') print(' * vert1 = %r' % (vert1,)) print(' * vert2 = %r' % (vert2,)) coordsA = coordsB = 'data' #coords = 'axes points' #'axes fraction' #'axes pixels' #coordsA = 'axes pixels' #coordsB = 'data' #'figure fraction' #'figure pixels' #'figure pixels' #'figure points' #'polar' #'offset points' con = mpl.patches.ConnectionPatch( xyA=vert1, xyB=vert2, coordsA=coordsA, coordsB=coordsB, axesA=ax1, axesB=ax2, linewidth=1, color='k') #, arrowstyle="-") #ut.embed() #con.set_zorder(None) ax1.add_artist(con) #ax2.add_artist(con) #ut.embed() #verts2.T[1] -= bbox2[-1] #bottom_left1, bottom_right1 = verts1[1:3].tolist() #bottom_left2, bottom_right2 = verts2[1:3].tolist() ##transAxes1 = ax1.transData.inverted() #transAxes1_fn = ax1.transData.transform #transAxes2_fn = ax2.transData.transform #transAxes1_fn = ut.identity #transAxes2_fn = ut.identity #coord_bl1 = transFigure.transform(transAxes1.transform(bottom_left1)) #coord_br1 = transFigure.transform(transAxes1.transform(bottom_right1)) #coord_bl1 = invTransFigure_fn1(transAxes1_fn(bottom_left1)) #print('bottom_left2 = %r' % (bottom_left2,)) #coord_bl1 = (5, 5) #coord_bl2 = invTransFigure_fn2(transAxes2_fn(bottom_left2)) #print('coord_bl2 = %r' % (coord_bl2,)) #coord_br1 = invTransFigure_fn1(transAxes1_fn(bottom_right1)) #coord_br2 = invTransFigure_fn2(transAxes2_fn(bottom_right2)) ##print('coord_bl1 = %r' % (coord_bl1,)) #line_coords1 = np.vstack([coord_bl1, coord_bl2]) #line_coords2 = np.vstack([coord_br1, coord_br2]) #print('line_coords1 = %r' % (line_coords1,)) #line1 = mpl.lines.Line2D((line_coords1[0]), (line_coords1[1]), transform=fig.transFigure) #line2 = mpl.lines.Line2D((line_coords2[0]), (line_coords2[1]), transform=fig.transFigure) #xs1, ys1 = line_coords1.T #xs2, ys2 = line_coords2.T #linekw = dict(transform=fig.transFigure) #linekw = dict() #print('xs1 = %r' % (xs1,)) #print('ys1 = %r' % (ys1,)) #line1 = mpl.lines.Line2D(xs1, ys1, **linekw) #line2 = mpl.lines.Line2D(xs2, ys2, **linekw) # NOQA #shrinkA=5, shrinkB=5, mutation_scale=20, fc="w") #ax2.add_artist(con) #fig.lines.append(line1) #fig.lines.append(line2) pass return fig