def api_root(): rules = current_app.url_map.iter_rules() rule_dict = {} for rule in rules: methods = rule.methods url = str(rule) if '/api/' in url: methods -= set(['HEAD', 'OPTIONS']) if len(methods) == 0: continue if len(methods) > 1: print('methods = %r' % (methods,)) method = list(methods)[0] if method not in rule_dict.keys(): rule_dict[method] = [] rule_dict[method].append((method, url, )) for method in rule_dict.keys(): rule_dict[method].sort() url = '%s/api/core/dbname/' % (current_app.server_url, ) app_auth = controller_inject.get_url_authorization(url) return appf.template(None, 'api', app_url=url, app_name=controller_inject.GLOBAL_APP_NAME, app_secret=controller_inject.GLOBAL_APP_SECRET, app_auth=app_auth, rule_list=rule_dict)
def experiments_voting(**kwargs): experiments_voting_initialize() voting_data() embed = dict(globals(), **locals()) return appf.template('experiments', 'voting', **embed)
def group_review(): prefill = request.args.get('prefill', '') if len(prefill) > 0: ibs = current_app.ibs aid_list = ibs.get_valid_aids() bad_species_list, bad_viewpoint_list = ibs.validate_annot_species_viewpoint_cnn(aid_list) GROUP_BY_PREDICTION = True if GROUP_BY_PREDICTION: grouped_dict = ut.group_items(bad_viewpoint_list, ut.get_list_column(bad_viewpoint_list, 3)) grouped_list = grouped_dict.values() regrouped_items = ut.flatten(ut.sortedby(grouped_list, map(len, grouped_list))) candidate_aid_list = ut.get_list_column(regrouped_items, 0) else: candidate_aid_list = [ bad_viewpoint[0] for bad_viewpoint in bad_viewpoint_list] elif request.args.get('aid_list', None) is not None: aid_list = request.args.get('aid_list', '') if len(aid_list) > 0: aid_list = aid_list.replace('[', '') aid_list = aid_list.replace(']', '') aid_list = aid_list.strip().split(',') candidate_aid_list = [ int(aid_.strip()) for aid_ in aid_list ] else: candidate_aid_list = '' else: candidate_aid_list = '' return appf.template(None, 'group_review', candidate_aid_list=candidate_aid_list, mode_list=appf.VALID_TURK_MODES)
def error404(exception=None): import traceback exception_str = str(exception) traceback_str = str(traceback.format_exc()) print('[web] %r' % (exception_str, )) print('[web] %r' % (traceback_str, )) return appf.template(None, '404', exception_str=exception_str, traceback_str=traceback_str)
def image_view_api(gid=None, thumbnail=False, fresh=False, **kwargs): r""" Returns the base64 encoded image of image <gid> RESTful: Method: GET URL: /image/view/<gid>/ """ encoded = routes_ajax.image_src(gid, thumbnail=thumbnail, fresh=fresh, **kwargs) return appf.template(None, 'single', encoded=encoded)
def view_imagesets(): ibs = current_app.ibs filtered = True imgsetid = request.args.get('imgsetid', '') if len(imgsetid) > 0: imgsetid_list = imgsetid.strip().split(',') imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ] else: imgsetid_list = ibs.get_valid_imgsetids() filtered = False start_time_posix_list = ibs.get_imageset_start_time_posix(imgsetid_list) datetime_list = [ ut.unixtime_to_datetimestr(start_time_posix) if start_time_posix is not None else 'Unknown' for start_time_posix in start_time_posix_list ] gids_list = [ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ] aids_list = [ ut.flatten(ibs.get_image_aids(gid_list)) for gid_list in gids_list ] images_reviewed_list = [ appf.imageset_image_processed(ibs, gid_list) for gid_list in gids_list ] annots_reviewed_viewpoint_list = [ appf.imageset_annot_viewpoint_processed(ibs, aid_list) for aid_list in aids_list ] annots_reviewed_quality_list = [ appf.imageset_annot_quality_processed(ibs, aid_list) for aid_list in aids_list ] image_processed_list = [ images_reviewed.count(True) for images_reviewed in images_reviewed_list ] annot_processed_viewpoint_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_viewpoint_list ] annot_processed_quality_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_quality_list ] reviewed_list = [ all(images_reviewed) and all(annots_reviewed_viewpoint) and all(annot_processed_quality) for images_reviewed, annots_reviewed_viewpoint, annot_processed_quality in zip(images_reviewed_list, annots_reviewed_viewpoint_list, annots_reviewed_quality_list) ] imageset_list = zip( imgsetid_list, ibs.get_imageset_text(imgsetid_list), ibs.get_imageset_num_gids(imgsetid_list), image_processed_list, ibs.get_imageset_num_aids(imgsetid_list), annot_processed_viewpoint_list, annot_processed_quality_list, start_time_posix_list, datetime_list, reviewed_list, ) imageset_list.sort(key=lambda t: t[7]) return appf.template('view', 'imagesets', filtered=filtered, imgsetid_list=imgsetid_list, imgsetid_list_str=','.join(map(str, imgsetid_list)), num_imgsetids=len(imgsetid_list), imageset_list=imageset_list, num_imagesets=len(imageset_list))
def turk_detection_dynamic(): ibs = current_app.ibs gid = request.args.get('gid', None) gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False) image = ibs.get_images(gid) image_src = appf.embed_image_html(image) # Get annotations width, height = ibs.get_image_sizes(gid) aid_list = ibs.get_image_aids(gid) annot_bbox_list = ibs.get_annot_bboxes(aid_list) annot_thetas_list = ibs.get_annot_thetas(aid_list) species_list = ibs.get_annot_species_texts(aid_list) # Get annotation bounding boxes annotation_list = [] for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list): temp = {} temp['left'] = 100.0 * (annot_bbox[0] / width) temp['top'] = 100.0 * (annot_bbox[1] / height) temp['width'] = 100.0 * (annot_bbox[2] / width) temp['height'] = 100.0 * (annot_bbox[3] / height) temp['label'] = species temp['id'] = aid temp['theta'] = float(annot_theta) annotation_list.append(temp) if len(species_list) > 0: species = max(set(species_list), key=species_list.count) # Get most common species elif appf.default_species(ibs) is not None: species = appf.default_species(ibs) else: species = KEY_DEFAULTS[SPECIES_KEY] callback_url = '%s?imgsetid=%s' % (url_for('submit_detection'), gid, ) return appf.template('turk', 'detection_dynamic', gid=gid, refer_aid=None, species=species, image_path=gpath, image_src=image_src, annotation_list=annotation_list, callback_url=callback_url, callback_method='POST', EMBEDDED_CSS=None, EMBEDDED_JAVASCRIPT=None, __wrapper__=False)
def review_graph_match_html(ibs, review_pair, cm_dict, query_config_dict, _internal_state, callback_url, callback_method='POST', view_orientation='vertical', include_jquery=False): r""" Args: ibs (ibeis.IBEISController): image analysis api review_pair (dict): pair of annot uuids cm_dict (dict): query_config_dict (dict): _internal_state (?): callback_url (?): callback_method (unicode): (default = u'POST') view_orientation (unicode): (default = u'vertical') include_jquery (bool): (default = False) CommandLine: python -m ibeis.web.apis_query review_graph_match_html --show ibeis --web python -m ibeis.web.apis_query review_graph_match_html --show --domain=localhost Example: >>> # WEB_DOCTEST >>> from ibeis.web.apis_query import * # NOQA >>> import ibeis >>> web_ibs = ibeis.opendb_bg_web('testdb1') # , domain='http://52.33.105.88') >>> aids = web_ibs.send_ibeis_request('/api/annot/', 'get')[0:2] >>> uuid_list = web_ibs.send_ibeis_request('/api/annot/uuids/', type_='get', aid_list=aids) >>> quuid_list = uuid_list[0:1] >>> duuid_list = uuid_list >>> query_config_dict = { >>> # 'pipeline_root' : 'BC_DTW' >>> } >>> data = dict( >>> query_annot_uuid_list=quuid_list, database_annot_uuid_list=duuid_list, >>> query_config_dict=query_config_dict, >>> ) >>> jobid = web_ibs.send_ibeis_request('/api/engine/query/graph/', **data) >>> print('jobid = %r' % (jobid,)) >>> status_response = web_ibs.wait_for_results(jobid) >>> result_response = web_ibs.read_engine_results(jobid) >>> inference_result = result_response['json_result'] >>> print('inference_result = %r' % (inference_result,)) >>> auuid2_cm = inference_result['cm_dict'] >>> quuid = quuid_list[0] >>> class_dict = auuid2_cm[str(quuid)] >>> # Get information in frontend >>> #ibs = ibeis.opendb('testdb1') >>> #cm = match_obj = ibeis.ChipMatch.from_dict(class_dict, ibs=ibs) >>> #match_obj.print_rawinfostr() >>> # Make the dictionary a bit more managable >>> #match_obj.compress_top_feature_matches(num=2) >>> #class_dict = match_obj.to_dict(ibs=ibs) >>> cm_dict = class_dict >>> # Package for review >>> review_pair = {'annot_uuid_1': quuid, 'annot_uuid_2': duuid_list[1]} >>> callback_method = u'POST' >>> view_orientation = u'vertical' >>> include_jquery = False >>> kw = dict( >>> review_pair=review_pair, >>> cm_dict=cm_dict, >>> query_config_dict=query_config_dict, >>> _internal_state=None, >>> callback_url = None, >>> ) >>> html_str = web_ibs.send_ibeis_request('/api/review/query/graph/', type_='get', **kw) >>> web_ibs.terminate2() >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.render_html(html_str) >>> ut.show_if_requested() Example2: >>> # DISABLE_DOCTEST >>> # This starts off using web to get information, but finishes the rest in python >>> from ibeis.web.apis_query import * # NOQA >>> import ibeis >>> ut.exec_funckw(review_graph_match_html, globals()) >>> web_ibs = ibeis.opendb_bg_web('testdb1') # , domain='http://52.33.105.88') >>> aids = web_ibs.send_ibeis_request('/api/annot/', 'get')[0:2] >>> uuid_list = web_ibs.send_ibeis_request('/api/annot/uuids/', type_='get', aid_list=aids) >>> quuid_list = uuid_list[0:1] >>> duuid_list = uuid_list >>> query_config_dict = { >>> # 'pipeline_root' : 'BC_DTW' >>> } >>> data = dict( >>> query_annot_uuid_list=quuid_list, database_annot_uuid_list=duuid_list, >>> query_config_dict=query_config_dict, >>> ) >>> jobid = web_ibs.send_ibeis_request('/api/engine/query/graph/', **data) >>> status_response = web_ibs.wait_for_results(jobid) >>> result_response = web_ibs.read_engine_results(jobid) >>> web_ibs.terminate2() >>> # NOW WORK IN THE FRONTEND >>> inference_result = result_response['json_result'] >>> auuid2_cm = inference_result['cm_dict'] >>> quuid = quuid_list[0] >>> class_dict = auuid2_cm[str(quuid)] >>> # Get information in frontend >>> ibs = ibeis.opendb('testdb1') >>> cm = ibeis.ChipMatch.from_dict(class_dict, ibs=ibs) >>> cm.print_rawinfostr() >>> # Make the dictionary a bit more managable >>> cm.compress_top_feature_matches(num=1) >>> cm.print_rawinfostr() >>> class_dict = cm.to_dict(ibs=ibs) >>> cm_dict = class_dict >>> # Package for review ( CANT CALL DIRECTLY BECAUSE OF OUT OF CONTEXT ) >>> review_pair = {'annot_uuid_1': quuid, 'annot_uuid_2': duuid_list[1]} >>> x = review_graph_match_html(ibs, review_pair, cm_dict, >>> query_config_dict, _internal_state=None, >>> callback_url=None) >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.render_html(html_str) >>> ut.show_if_requested() """ from ibeis.algo.hots import chip_match # from ibeis.algo.hots.query_request import QueryRequest proot = query_config_dict.get('pipeline_root', 'vsmany') proot = query_config_dict.get('proot', proot) if proot.upper() == 'BC_DTW': cls = chip_match.AnnotMatch # ibs.depc_annot.requestclass_dict['BC_DTW'] else: cls = chip_match.ChipMatch view_orientation = view_orientation.lower() if view_orientation not in ['vertical', 'horizontal']: view_orientation = 'horizontal' # unpack info try: annot_uuid_1 = review_pair['annot_uuid_1'] annot_uuid_2 = review_pair['annot_uuid_2'] except Exception: #??? HACK # FIXME: print('[!!!!] review_pair = %r' % (review_pair,)) review_pair = review_pair[0] annot_uuid_1 = review_pair['annot_uuid_1'] annot_uuid_2 = review_pair['annot_uuid_2'] aid_1 = ibs.get_annot_aids_from_uuid(annot_uuid_1) aid_2 = ibs.get_annot_aids_from_uuid(annot_uuid_2) cm = cls.from_dict(cm_dict, ibs=ibs) qreq_ = ibs.new_query_request([aid_1], [aid_2], cfgdict=query_config_dict) # Get score idx = cm.daid2_idx[aid_2] match_score = cm.name_score_list[idx] #match_score = cm.aid2_score[aid_2] image_matches = make_review_image(aid_2, cm, qreq_, view_orientation=view_orientation) image_matches_src = appf.embed_image_html(image_matches) image_clean = make_review_image(aid_2, cm, qreq_, view_orientation=view_orientation, draw_matches=False) image_clean_src = appf.embed_image_html(image_clean) if False: from ibeis.web import apis_query root_path = dirname(abspath(apis_query.__file__)) else: root_path = dirname(abspath(__file__)) css_file_list = [ ['css', 'style.css'], ['include', 'bootstrap', 'css', 'bootstrap.css'], ] json_file_list = [ ['javascript', 'script.js'], ['include', 'bootstrap', 'js', 'bootstrap.js'], ] if include_jquery: json_file_list = [ ['javascript', 'jquery.min.js'], ] + json_file_list EMBEDDED_CSS = '' EMBEDDED_JAVASCRIPT = '' css_template_fmtstr = '<style type="text/css" ia-dependency="css">%s</style>\n' json_template_fmtstr = '<script type="text/javascript" ia-dependency="javascript">%s</script>\n' for css_file in css_file_list: css_filepath_list = [root_path, 'static'] + css_file with open(join(*css_filepath_list)) as css_file: EMBEDDED_CSS += css_template_fmtstr % (css_file.read(), ) for json_file in json_file_list: json_filepath_list = [root_path, 'static'] + json_file with open(join(*json_filepath_list)) as json_file: EMBEDDED_JAVASCRIPT += json_template_fmtstr % (json_file.read(), ) return appf.template('turk', 'query_match_insert', match_score=match_score, image_clean_src=image_clean_src, image_matches_src=image_matches_src, annot_uuid_1=str(annot_uuid_1), annot_uuid_2=str(annot_uuid_2), view_orientation=view_orientation, callback_url=callback_url, callback_method=callback_method, EMBEDDED_CSS=EMBEDDED_CSS, EMBEDDED_JAVASCRIPT=EMBEDDED_JAVASCRIPT)
def experiments_interest(dbtag1='demo-jasonp', dbtag2='demo-chuck', **kwargs): from uuid import UUID from ibeis.other.detectfuncs import general_overlap, general_parse_gt dbtag1 = str(dbtag1) dbtag2 = str(dbtag2) ibs1 = experiment_init_db(dbtag1) ibs2 = experiment_init_db(dbtag2) dbdir1 = ibs1.dbdir dbdir2 = ibs2.dbdir gid_list1 = ibs1.get_valid_gids(reviewed=1) gid_list2 = ibs2.get_valid_gids(reviewed=1) gt_dict1 = general_parse_gt(ibs1, gid_list1) gt_dict2 = general_parse_gt(ibs2, gid_list2) uuid_list1 = sorted(map(str, ibs1.get_image_uuids(gid_list1))) uuid_list2 = sorted(map(str, ibs2.get_image_uuids(gid_list2))) gid_pair_list = [] index1, index2 = 0, 0 stats_global = { 'disagree_interest1': 0, 'disagree_interest2': 0, 'annot1': 0, 'annot2': 0, } while index1 < len(uuid_list1) or index2 < len(uuid_list2): uuid1 = UUID(uuid_list1[index1]) if index1 < len(uuid_list1) else None uuid2 = UUID(uuid_list2[index2]) if index2 < len(uuid_list2) else None if uuid1 is None and uuid2 is None: break gid1 = ibs1.get_image_gids_from_uuid(uuid1) gid2 = ibs2.get_image_gids_from_uuid(uuid2) print('%s %s' % ( index1, index2, )) stats = None if uuid1 is not None and uuid2 is not None: if uuid1 == uuid2: gt_list1 = gt_dict1[uuid1] gt_list2 = gt_dict2[uuid2] stats_global['annot1'] += len(gt_list1) stats_global['annot2'] += len(gt_list2) overlap = general_overlap(gt_list1, gt_list2) if 0 in overlap.shape: index_list1 = [] index_list2 = [] else: index_list1 = np.argmax(overlap, axis=1) index_list2 = np.argmax(overlap, axis=0) pair_list1 = set(enumerate(index_list1)) pair_list2 = set(enumerate(index_list2)) pair_list2 = set([_[::-1] for _ in pair_list2]) pair_union = pair_list1 | pair_list2 pair_intersect = pair_list1 & pair_list2 pair_diff_sym = pair_list1 ^ pair_list2 pair_diff1 = pair_list1 - pair_list2 pair_diff2 = pair_list2 - pair_list1 message_list = [] if len(gt_list1) > 0 and len(gt_list2) == 0: message_list.append('Jason has annotations, Chuck none') if len(gt_list1) == 0 and len(gt_list2) > 0: message_list.append('Chuck has annotations, Jason none') if len(pair_diff1) > 0 and len(pair_diff2) == 0: message_list.append('Jason has additional annotations') if len(pair_diff1) == 0 and len(pair_diff2) > 0: message_list.append('Chuck has additional annotations') if len(pair_diff1) > 0 and len(pair_diff2) > 0: message_list.append('Assignment mismatch') disagree = 0 for index1_, index2_ in pair_intersect: gt1 = gt_list1[index1_] gt2 = gt_list2[index2_] interest1 = gt1['interest'] interest2 = gt2['interest'] if interest1 != interest2: disagree += 1 if interest1 > interest2: stats_global['disagree_interest1'] += 1 if interest2 > interest1: stats_global['disagree_interest2'] += 1 if disagree > 0: message_list.append('Interest mismatch') stats = { 'num_annot1': len(gt_list1), 'num_annot2': len(gt_list2), 'num_interest1': len([_ for _ in gt_list1 if _['interest']]), 'num_interest2': len([_ for _ in gt_list2 if _['interest']]), 'conflict': len(message_list) > 0, 'message': '<br/>'.join(message_list), } else: if uuid1 < uuid2: gid2 = None else: gid1 = None gid_pair_list.append((gid1, gid2, stats)) if gid1 is not None: index1 += 1 if gid2 is not None: index2 += 1 embed = dict(globals(), **locals()) return appf.template('experiments', 'interest', **embed)
def review_detection_html(ibs, image_uuid, result_list, callback_url, callback_method='POST', include_jquery=False, config=None): """ Return the detection review interface for a particular image UUID and a list of results for that image. Args: image_uuid (UUID): the UUID of the image you want to review detections for result_list (list of dict): list of detection results returned by the detector callback_url (str): URL that the review form will submit to (action) when the user is complete with their review callback_method (str): HTTP method the review form will submit to (method). Defaults to 'POST' Returns: template (html): json response with the detection web interface in html RESTful: Method: GET URL: /api/review/detect/cnn/yolo/ """ ibs.web_check_uuids(image_uuid_list=[image_uuid]) gid = ibs.get_image_gids_from_uuid(image_uuid) if gid is None: return 'INVALID IMAGE UUID' default_config = { 'autointerest' : False, 'interest_bypass' : False, 'metadata' : True, 'metadata_viewpoint' : False, 'metadata_quality' : False, 'metadata_flags' : True, 'metadata_flags_aoi' : True, 'metadata_flags_multiple' : False, 'metadata_species' : True, 'metadata_label' : True, 'metadata_quickhelp' : True, 'parts' : False, 'modes_rectangle' : True, 'modes_diagonal' : True, 'modes_diagonal2' : True, 'staged' : False, } if config is not None: default_config.update(config) gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False) image = ibs.get_images(gid) image_src = appf.embed_image_html(image) width, height = ibs.get_image_sizes(gid) if width <= 0 or width is None or height <= 0 or height is None: vals = (image_uuid, width, height, ) raise IOError('Image %r for review has either no width or no height (w = %s, h = %s)' % vals) annotation_list = [] for result in result_list: quality = result.get('quality', None) if quality in [-1, None]: quality = 0 elif quality <= 2: quality = 1 elif quality > 2: quality = 2 viewpoint1 = result.get('viewpoint1', None) viewpoint2 = result.get('viewpoint2', None) viewpoint3 = result.get('viewpoint3', None) if viewpoint1 is None and viewpoint2 is None and viewpoint3 is None: viewpoint = result.get('viewpoint', None) viewpoint1, viewpoint2, viewpoint3 = appf.convert_viewpoint_to_tuple(viewpoint) annotation_list.append({ 'id' : result.get('id', None), 'left' : 100.0 * (result.get('left', result['xtl']) / width), 'top' : 100.0 * (result.get('top', result['ytl']) / height), 'width' : 100.0 * (result['width'] / width), 'height' : 100.0 * (result['height'] / height), 'species' : result.get('species', result['class']), 'theta' : result.get('theta', 0.0), 'viewpoint1' : viewpoint1, 'viewpoint2' : viewpoint2, 'viewpoint3' : viewpoint3, 'quality' : quality, 'multiple' : 'true' if result.get('multiple', None) == 1 else 'false', 'interest' : 'true' if result.get('interest', None) == 1 else 'false', }) species = KEY_DEFAULTS[SPECIES_KEY] root_path = dirname(abspath(__file__)) css_file_list = [ ['include', 'jquery-ui', 'jquery-ui.min.css'], ['include', 'jquery.ui.rotatable', 'jquery.ui.rotatable.css'], ['css', 'style.css'], ] json_file_list = [ ['include', 'jquery-ui', 'jquery-ui.min.js'], ['include', 'jquery.ui.rotatable', 'jquery.ui.rotatable.min.js'], ['include', 'bbox_annotator_percent.js'], ['javascript', 'script.js'], ['javascript', 'turk-detection.js'], ] if include_jquery: json_file_list = [ ['javascript', 'jquery.min.js'], ] + json_file_list EMBEDDED_CSS = '' EMBEDDED_JAVASCRIPT = '' css_template_fmtstr = '<style type="text/css" ia-dependency="css">%s</style>\n' json_template_fmtstr = '<script type="text/javascript" ia-dependency="javascript">%s</script>\n' for css_file in css_file_list: css_filepath_list = [root_path, 'static'] + css_file with open(join(*css_filepath_list)) as css_file: EMBEDDED_CSS += css_template_fmtstr % (css_file.read(), ) for json_file in json_file_list: json_filepath_list = [root_path, 'static'] + json_file with open(join(*json_filepath_list)) as json_file: EMBEDDED_JAVASCRIPT += json_template_fmtstr % (json_file.read(), ) species_rowids = ibs._get_all_species_rowids() species_nice_list = ibs.get_species_nice(species_rowids) combined_list = sorted(zip(species_nice_list, species_rowids)) species_nice_list = [ combined[0] for combined in combined_list ] species_rowids = [ combined[1] for combined in combined_list ] species_text_list = ibs.get_species_texts(species_rowids) species_list = list(zip(species_nice_list, species_text_list)) species_list = [ ('Unspecified', const.UNKNOWN) ] + species_list # Collect mapping of species to parts aid_list = ibs.get_valid_aids() part_species_rowid_list = ibs.get_annot_species_rowids(aid_list) part_species_text_list = ibs.get_species_texts(part_species_rowid_list) part_rowids_list = ibs.get_annot_part_rowids(aid_list) part_types_list = map(ibs.get_part_types, part_rowids_list) zipped = list(zip(part_species_text_list, part_types_list)) species_part_dict = { const.UNKNOWN: set([]) } for part_species_text, part_type_list in zipped: if part_species_text not in species_part_dict: species_part_dict[part_species_text] = set([const.UNKNOWN]) for part_type in part_type_list: species_part_dict[part_species_text].add(part_type) species_part_dict[const.UNKNOWN].add(part_type) # Add any images that did not get added because they aren't assigned any annotations for species_text in species_text_list: if species_text not in species_part_dict: species_part_dict[species_text] = set([const.UNKNOWN]) for key in species_part_dict: species_part_dict[key] = sorted(list(species_part_dict[key])) species_part_dict_json = json.dumps(species_part_dict) orientation_flag = '0' if species is not None and 'zebra' in species: orientation_flag = '1' settings_key_list = [ ('ia-detection-setting-orientation', orientation_flag), ('ia-detection-setting-parts-assignments', '1'), ('ia-detection-setting-toggle-annotations', '1'), ('ia-detection-setting-toggle-parts', '0'), ('ia-detection-setting-parts-show', '0'), ('ia-detection-setting-parts-hide', '0'), ] settings = { settings_key: request.cookies.get(settings_key, settings_default) == '1' for (settings_key, settings_default) in settings_key_list } return appf.template('turk', 'detection_insert', gid=gid, refer_aid=None, species=species, image_path=gpath, image_src=image_src, config=default_config, settings=settings, annotation_list=annotation_list, species_list=species_list, species_part_dict_json=species_part_dict_json, callback_url=callback_url, callback_method=callback_method, EMBEDDED_CSS=EMBEDDED_CSS, EMBEDDED_JAVASCRIPT=EMBEDDED_JAVASCRIPT)
def view_experiments(**kwargs): return appf.template('experiments')
def upload(): return appf.template(None, 'upload')
def view_images(): ibs = current_app.ibs filtered = True imgsetid_list = [] gid = request.args.get('gid', '') imgsetid = request.args.get('imgsetid', '') page = max(0, int(request.args.get('page', 1))) if len(gid) > 0: gid_list = gid.strip().split(',') gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ] elif len(imgsetid) > 0: imgsetid_list = imgsetid.strip().split(',') imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ] gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid) for imgsetid_ in imgsetid_list ]) else: gid_list = ibs.get_valid_gids() filtered = False # Page page_start = min(len(gid_list), (page - 1) * appf.PAGE_SIZE) page_end = min(len(gid_list), page * appf.PAGE_SIZE) page_total = int(math.ceil(len(gid_list) / appf.PAGE_SIZE)) page_previous = None if page_start == 0 else page - 1 page_next = None if page_end == len(gid_list) else page + 1 gid_list = gid_list[page_start:page_end] print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(gid_list), page_previous, page_next, )) image_unixtime_list = ibs.get_image_unixtime(gid_list) datetime_list = [ ut.unixtime_to_datetimestr(image_unixtime) if image_unixtime is not None else 'Unknown' for image_unixtime in image_unixtime_list ] image_list = zip( gid_list, [ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_image_imgsetids(gid_list) ], ibs.get_image_gnames(gid_list), image_unixtime_list, datetime_list, ibs.get_image_gps(gid_list), ibs.get_image_party_tag(gid_list), ibs.get_image_contributor_tag(gid_list), ibs.get_image_notes(gid_list), appf.imageset_image_processed(ibs, gid_list), ) image_list.sort(key=lambda t: t[3]) return appf.template('view', 'images', filtered=filtered, imgsetid_list=imgsetid_list, imgsetid_list_str=','.join(map(str, imgsetid_list)), num_imgsetids=len(imgsetid_list), gid_list=gid_list, gid_list_str=','.join(map(str, gid_list)), num_gids=len(gid_list), image_list=image_list, num_images=len(image_list), page=page, page_start=page_start, page_end=page_end, page_total=page_total, page_previous=page_previous, page_next=page_next)
def review_detection_html(ibs, image_uuid, result_list, callback_url, callback_method='POST', include_jquery=False): """ Returns the detection review interface for a particular image UUID and a list of results for that image. Args: image_uuid (UUID): the UUID of the image you want to review detections for result_list (list of dict): list of detection results returned by the detector callback_url (str): URL that the review form will submit to (action) when the user is complete with their review callback_method (str): HTTP method the review form will submit to (method). Defaults to 'POST' Returns: template (html): json response with the detection web interface in html RESTful: Method: GET URL: /api/review/detect/cnn/yolo/ """ ibs.web_check_uuids(image_uuid_list=[image_uuid]) gid = ibs.get_image_gids_from_uuid(image_uuid) if gid is None: return 'INVALID IMAGE UUID' gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False) image = ibs.get_images(gid) image_src = appf.embed_image_html(image) width, height = ibs.get_image_sizes(gid) if width <= 0 or width is None or height <= 0 or height is None: vals = ( image_uuid, width, height, ) raise IOError( 'Image %r for review has either no width or no height (w = %s, h = %s)' % vals) annotation_list = [] for result in result_list: annotation_list.append({ 'left': 100.0 * (result['xtl'] / width), 'top': 100.0 * (result['ytl'] / height), 'width': 100.0 * (result['width'] / width), 'height': 100.0 * (result['height'] / height), 'label': result['class'], 'id': None, 'theta': result.get('theta', 0.0), }) species = KEY_DEFAULTS[SPECIES_KEY] root_path = dirname(abspath(__file__)) css_file_list = [ ['include', 'jquery-ui', 'jquery-ui.min.css'], ['include', 'jquery.ui.rotatable', 'jquery.ui.rotatable.css'], ['css', 'style.css'], ] json_file_list = [ ['include', 'jquery-ui', 'jquery-ui.min.js'], ['include', 'jquery.ui.rotatable', 'jquery.ui.rotatable.min.js'], ['include', 'bbox_annotator_percent.js'], ['javascript', 'script.js'], ['javascript', 'turk-detection.js'], ] if include_jquery: json_file_list = [ ['javascript', 'jquery.min.js'], ] + json_file_list EMBEDDED_CSS = '' EMBEDDED_JAVASCRIPT = '' css_template_fmtstr = '<style type="text/css" ia-dependency="css">%s</style>\n' json_template_fmtstr = '<script type="text/javascript" ia-dependency="javascript">%s</script>\n' for css_file in css_file_list: css_filepath_list = [root_path, 'static'] + css_file with open(join(*css_filepath_list)) as css_file: EMBEDDED_CSS += css_template_fmtstr % (css_file.read(), ) for json_file in json_file_list: json_filepath_list = [root_path, 'static'] + json_file with open(join(*json_filepath_list)) as json_file: EMBEDDED_JAVASCRIPT += json_template_fmtstr % (json_file.read(), ) return appf.template('turk', 'detection_insert', gid=gid, refer_aid=None, species=species, image_path=gpath, image_src=image_src, annotation_list=annotation_list, callback_url=callback_url, callback_method=callback_method, EMBEDDED_CSS=EMBEDDED_CSS, EMBEDDED_JAVASCRIPT=EMBEDDED_JAVASCRIPT)
def turk_quality(): """ PZ Needs Tags: 17242 14468 14427 15946 14771 14084 4102 6074 3409 GZ Needs Tags; 1302 CommandLine: python -m ibeis.web.app --exec-turk_quality --db PZ_Master1 python -m ibeis.web.app --exec-turk_quality --db GZ_Master1 python -m ibeis.web.app --exec-turk_quality --db GIRM_Master1 Example: >>> # SCRIPT >>> from ibeis.other.ibsfuncs import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='testdb1') >>> aid_list_ = ibs.find_unlabeled_name_members(qual=True) >>> valid_views = ['primary', 'primary1', 'primary-1'] >>> aid_list = ibs.filter_aids_to_viewpoint(aid_list_, valid_views, unknown_ok=False) >>> ibs.start_web_annot_groupreview(aid_list) """ ibs = current_app.ibs tup = appf.get_turk_annot_args(appf.imageset_annot_quality_processed) (aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup value = ibs.get_annot_qualities(aid) if value == -1: value = None if value == 0: value = 1 review = 'review' in request.args.keys() finished = aid is None display_instructions = request.cookies.get('quality_instructions_seen', 1) == 0 if not finished: gid = ibs.get_annot_gids(aid) gpath = ibs.get_annot_chip_fpath(aid) image = vt.imread(gpath) image_src = appf.embed_image_html(image) else: gid = None gpath = None image_src = None imagesettext = ibs.get_imageset_text(imgsetid) return appf.template('turk', 'quality', imgsetid=imgsetid, src_ag=src_ag, dst_ag=dst_ag, gid=gid, aid=aid, value=value, image_path=gpath, image_src=image_src, previous=previous, imagesettext=imagesettext, progress=progress, finished=finished, display_instructions=display_instructions, review=review)
def turk_additional(): ibs = current_app.ibs imgsetid = request.args.get('imgsetid', '') imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid) gid_list = ibs.get_valid_gids(imgsetid=imgsetid) aid_list = ut.flatten(ibs.get_image_aids(gid_list)) nid_list = ibs.get_annot_nids(aid_list) reviewed_list = appf.imageset_annot_additional_processed(ibs, aid_list, nid_list) try: progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), ) except ZeroDivisionError: progress = '0.00' imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid) aid = request.args.get('aid', '') if len(aid) > 0: aid = int(aid) else: aid_list_ = ut.filterfalse_items(aid_list, reviewed_list) if len(aid_list_) == 0: aid = None else: # aid = aid_list_[0] aid = random.choice(aid_list_) previous = request.args.get('previous', None) value_sex = ibs.get_annot_sex([aid])[0] if value_sex >= 0: value_sex += 2 else: value_sex = None value_age_min, value_age_max = ibs.get_annot_age_months_est([aid])[0] value_age = None if (value_age_min is -1 or value_age_min is None) and (value_age_max is -1 or value_age_max is None): value_age = 1 if (value_age_min is 0 or value_age_min is None) and value_age_max == 2: value_age = 2 elif value_age_min is 3 and value_age_max == 5: value_age = 3 elif value_age_min is 6 and value_age_max == 11: value_age = 4 elif value_age_min is 12 and value_age_max == 23: value_age = 5 elif value_age_min is 24 and value_age_max == 35: value_age = 6 elif value_age_min is 36 and (value_age_max > 36 or value_age_max is None): value_age = 7 review = 'review' in request.args.keys() finished = aid is None display_instructions = request.cookies.get('additional_instructions_seen', 1) == 0 if not finished: gid = ibs.get_annot_gids(aid) gpath = ibs.get_annot_chip_fpath(aid) image = vt.imread(gpath) image_src = appf.embed_image_html(image) else: gid = None gpath = None image_src = None name_aid_list = None nid = ibs.get_annot_name_rowids(aid) if nid is not None: name_aid_list = ibs.get_name_aids(nid) quality_list = ibs.get_annot_qualities(name_aid_list) quality_text_list = ibs.get_annot_quality_texts(name_aid_list) yaw_text_list = ibs.get_annot_yaw_texts(name_aid_list) name_aid_combined_list = list(zip( name_aid_list, quality_list, quality_text_list, yaw_text_list, )) name_aid_combined_list.sort(key=lambda t: t[1], reverse=True) else: name_aid_combined_list = [] return appf.template('turk', 'additional', imgsetid=imgsetid, gid=gid, aid=aid, value_sex=value_sex, value_age=value_age, image_path=gpath, name_aid_combined_list=name_aid_combined_list, image_src=image_src, previous=previous, imagesettext=imagesettext, progress=progress, finished=finished, display_instructions=display_instructions, review=review)
def root(): return appf.template(None)
def turk_viewpoint(): """ CommandLine: python -m ibeis.web.app --exec-turk_viewpoint --db PZ_Master1 Example: >>> # SCRIPT >>> from ibeis.other.ibsfuncs import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='PZ_Master1') >>> aid_list_ = ibs.find_unlabeled_name_members(suspect_yaws=True) >>> aid_list = ibs.filter_aids_to_quality(aid_list_, 'good', unknown_ok=False) >>> ibs.start_web_annot_groupreview(aid_list) """ ibs = current_app.ibs tup = appf.get_turk_annot_args(appf.imageset_annot_viewpoint_processed) (aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup value = appf.convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid)) review = 'review' in request.args.keys() finished = aid is None display_instructions = request.cookies.get('viewpoint_instructions_seen', 1) == 0 if not finished: gid = ibs.get_annot_gids(aid) gpath = ibs.get_annot_chip_fpath(aid) image = vt.imread(gpath) image_src = appf.embed_image_html(image) species = ibs.get_annot_species_texts(aid) else: gid = None gpath = None image_src = None species = None imagesettext = ibs.get_imageset_text(imgsetid) species_rowids = ibs._get_all_species_rowids() species_nice_list = ibs.get_species_nice(species_rowids) combined_list = sorted(zip(species_nice_list, species_rowids)) species_nice_list = [ combined[0] for combined in combined_list ] species_rowids = [ combined[1] for combined in combined_list ] species_text_list = ibs.get_species_texts(species_rowids) species_selected_list = [ species == species_ for species_ in species_text_list ] species_list = zip(species_nice_list, species_text_list, species_selected_list) species_list = [ ('Unspecified', const.UNKNOWN, True) ] + species_list return appf.template('turk', 'viewpoint', imgsetid=imgsetid, src_ag=src_ag, dst_ag=dst_ag, gid=gid, aid=aid, value=value, image_path=gpath, image_src=image_src, previous=previous, species_list=species_list, imagesettext=imagesettext, progress=progress, finished=finished, display_instructions=display_instructions, review=review)
def turk_detection(): ibs = current_app.ibs refer_aid = request.args.get('refer_aid', None) imgsetid = request.args.get('imgsetid', '') imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid) gid_list = ibs.get_valid_gids(imgsetid=imgsetid) reviewed_list = appf.imageset_image_processed(ibs, gid_list) progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(gid_list), ) imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid) gid = request.args.get('gid', '') if len(gid) > 0: gid = int(gid) else: gid_list_ = ut.filterfalse_items(gid_list, reviewed_list) if len(gid_list_) == 0: gid = None else: # gid = gid_list_[0] gid = random.choice(gid_list_) previous = request.args.get('previous', None) finished = gid is None review = 'review' in request.args.keys() display_instructions = request.cookies.get('detection_instructions_seen', 1) == 0 display_species_examples = False # request.cookies.get('detection_example_species_seen', 0) == 0 if not finished: gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False) image = ibs.get_images(gid) image_src = appf.embed_image_html(image) # Get annotations width, height = ibs.get_image_sizes(gid) aid_list = ibs.get_image_aids(gid) annot_bbox_list = ibs.get_annot_bboxes(aid_list) annot_thetas_list = ibs.get_annot_thetas(aid_list) species_list = ibs.get_annot_species_texts(aid_list) # Get annotation bounding boxes annotation_list = [] for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list): temp = {} temp['left'] = 100.0 * (annot_bbox[0] / width) temp['top'] = 100.0 * (annot_bbox[1] / height) temp['width'] = 100.0 * (annot_bbox[2] / width) temp['height'] = 100.0 * (annot_bbox[3] / height) temp['label'] = species temp['id'] = aid temp['theta'] = float(annot_theta) annotation_list.append(temp) if len(species_list) > 0: species = max(set(species_list), key=species_list.count) # Get most common species elif appf.default_species(ibs) is not None: species = appf.default_species(ibs) else: species = KEY_DEFAULTS[SPECIES_KEY] else: gpath = None species = None image_src = None annotation_list = [] callback_url = '%s?imgsetid=%s' % (url_for('submit_detection'), imgsetid, ) return appf.template('turk', 'detection', imgsetid=imgsetid, gid=gid, refer_aid=refer_aid, species=species, image_path=gpath, image_src=image_src, previous=previous, imagesettext=imagesettext, progress=progress, finished=finished, annotation_list=annotation_list, display_instructions=display_instructions, display_species_examples=display_species_examples, callback_url=callback_url, callback_method='POST', EMBEDDED_CSS=None, EMBEDDED_JAVASCRIPT=None, review=review)
def turk(): imgsetid = request.args.get('imgsetid', '') imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid) return appf.template('turk', None, imgsetid=imgsetid)
def view_names(): ibs = current_app.ibs filtered = True aid_list = [] imgsetid_list = [] gid_list = [] nid = request.args.get('nid', '') aid = request.args.get('aid', '') gid = request.args.get('gid', '') imgsetid = request.args.get('imgsetid', '') page = max(0, int(request.args.get('page', 1))) if len(nid) > 0: nid_list = nid.strip().split(',') nid_list = [ None if nid_ == 'None' or nid_ == '' else int(nid_) for nid_ in nid_list ] if len(aid) > 0: aid_list = aid.strip().split(',') aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ] nid_list = ibs.get_annot_name_rowids(aid_list) elif len(gid) > 0: gid_list = gid.strip().split(',') gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ] aid_list = ut.flatten(ibs.get_image_aids(gid_list)) nid_list = ibs.get_annot_name_rowids(aid_list) elif len(imgsetid) > 0: imgsetid_list = imgsetid.strip().split(',') imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ] gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ]) aid_list = ut.flatten(ibs.get_image_aids(gid_list)) nid_list = ibs.get_annot_name_rowids(aid_list) else: nid_list = ibs.get_valid_nids() filtered = False # Page appf.PAGE_SIZE_ = int(appf.PAGE_SIZE / 5) page_start = min(len(nid_list), (page - 1) * appf.PAGE_SIZE_) page_end = min(len(nid_list), page * appf.PAGE_SIZE_) page_total = int(math.ceil(len(nid_list) / appf.PAGE_SIZE_)) page_previous = None if page_start == 0 else page - 1 page_next = None if page_end == len(nid_list) else page + 1 nid_list = nid_list[page_start:page_end] print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(nid_list), page_previous, page_next, )) aids_list = ibs.get_name_aids(nid_list) annotations_list = [ zip( aid_list_, ibs.get_annot_gids(aid_list_), [ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_annot_imgsetids(aid_list_) ], ibs.get_annot_image_names(aid_list_), ibs.get_annot_names(aid_list_), ibs.get_annot_exemplar_flags(aid_list_), ibs.get_annot_species_texts(aid_list_), ibs.get_annot_yaw_texts(aid_list_), ibs.get_annot_quality_texts(aid_list_), ibs.get_annot_sex_texts(aid_list_), ibs.get_annot_age_months_est(aid_list_), [ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(appf.imageset_annot_viewpoint_processed(ibs, aid_list_), appf.imageset_annot_quality_processed(ibs, aid_list_)) ], ) for aid_list_ in aids_list ] name_list = zip( nid_list, annotations_list ) name_list.sort(key=lambda t: t[0]) return appf.template('view', 'names', filtered=filtered, imgsetid_list=imgsetid_list, imgsetid_list_str=','.join(map(str, imgsetid_list)), num_imgsetids=len(imgsetid_list), gid_list=gid_list, gid_list_str=','.join(map(str, gid_list)), num_gids=len(gid_list), aid_list=aid_list, aid_list_str=','.join(map(str, aid_list)), num_aids=len(aid_list), nid_list=nid_list, nid_list_str=','.join(map(str, nid_list)), num_nids=len(nid_list), name_list=name_list, num_names=len(name_list), page=page, page_start=page_start, page_end=page_end, page_total=page_total, page_previous=page_previous, page_next=page_next)
def demo(*args, **kwargs): # Return HTML embedded = dict(globals(), **locals()) return appf.template(None, 'demo', **embedded)
def review_detection_html(ibs, image_uuid, result_list, callback_url, callback_method='POST', include_jquery=False): """ Returns the detection review interface for a particular image UUID and a list of results for that image. Args: image_uuid (UUID): the UUID of the image you want to review detections for result_list (list of dict): list of detection results returned by the detector callback_url (str): URL that the review form will submit to (action) when the user is complete with their review callback_method (str): HTTP method the review form will submit to (method). Defaults to 'POST' Returns: template (html): json response with the detection web interface in html RESTful: Method: GET URL: /api/review/detect/cnn/yolo/ """ ibs.web_check_uuids(image_uuid_list=[image_uuid]) gid = ibs.get_image_gids_from_uuid(image_uuid) if gid is None: return 'INVALID IMAGE UUID' gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False) image = ibs.get_images(gid) image_src = appf.embed_image_html(image) width, height = ibs.get_image_sizes(gid) if width <= 0 or width is None or height <= 0 or height is None: vals = (image_uuid, width, height, ) raise IOError('Image %r for review has either no width or no height (w = %s, h = %s)' % vals) annotation_list = [] for result in result_list: annotation_list.append({ 'left' : 100.0 * (result['xtl'] / width), 'top' : 100.0 * (result['ytl'] / height), 'width' : 100.0 * (result['width'] / width), 'height' : 100.0 * (result['height'] / height), 'label' : result['class'], 'id' : None, 'theta' : result.get('theta', 0.0), }) species = KEY_DEFAULTS[SPECIES_KEY] root_path = dirname(abspath(__file__)) css_file_list = [ ['include', 'jquery-ui', 'jquery-ui.min.css'], ['include', 'jquery.ui.rotatable', 'jquery.ui.rotatable.css'], ['css', 'style.css'], ] json_file_list = [ ['include', 'jquery-ui', 'jquery-ui.min.js'], ['include', 'jquery.ui.rotatable', 'jquery.ui.rotatable.min.js'], ['include', 'bbox_annotator_percent.js'], ['javascript', 'script.js'], ['javascript', 'turk-detection.js'], ] if include_jquery: json_file_list = [ ['javascript', 'jquery.min.js'], ] + json_file_list EMBEDDED_CSS = '' EMBEDDED_JAVASCRIPT = '' css_template_fmtstr = '<style type="text/css" ia-dependency="css">%s</style>\n' json_template_fmtstr = '<script type="text/javascript" ia-dependency="javascript">%s</script>\n' for css_file in css_file_list: css_filepath_list = [root_path, 'static'] + css_file with open(join(*css_filepath_list)) as css_file: EMBEDDED_CSS += css_template_fmtstr % (css_file.read(), ) for json_file in json_file_list: json_filepath_list = [root_path, 'static'] + json_file with open(join(*json_filepath_list)) as json_file: EMBEDDED_JAVASCRIPT += json_template_fmtstr % (json_file.read(), ) return appf.template('turk', 'detection_insert', gid=gid, refer_aid=None, species=species, image_path=gpath, image_src=image_src, annotation_list=annotation_list, callback_url=callback_url, callback_method=callback_method, EMBEDDED_CSS=EMBEDDED_CSS, EMBEDDED_JAVASCRIPT=EMBEDDED_JAVASCRIPT)
def review_graph_match_html(ibs, review_pair, cm_dict, query_config_dict, _internal_state, callback_url, callback_method='POST', view_orientation='vertical', include_jquery=False): r""" Args: ibs (ibeis.IBEISController): image analysis api review_pair (dict): pair of annot uuids cm_dict (dict): query_config_dict (dict): _internal_state (?): callback_url (?): callback_method (unicode): (default = u'POST') view_orientation (unicode): (default = u'vertical') include_jquery (bool): (default = False) CommandLine: python -m ibeis.web.apis_query review_graph_match_html --show ibeis --web python -m ibeis.web.apis_query review_graph_match_html --show --domain=localhost Example: >>> # WEB_DOCTEST >>> from ibeis.web.apis_query import * # NOQA >>> import ibeis >>> web_ibs = ibeis.opendb_bg_web('testdb1') # , domain='http://52.33.105.88') >>> aids = web_ibs.send_ibeis_request('/api/annot/', 'get')[0:2] >>> uuid_list = web_ibs.send_ibeis_request('/api/annot/uuids/', type_='get', aid_list=aids) >>> quuid_list = uuid_list[0:1] >>> duuid_list = uuid_list >>> query_config_dict = { >>> # 'pipeline_root' : 'BC_DTW' >>> } >>> data = dict( >>> query_annot_uuid_list=quuid_list, database_annot_uuid_list=duuid_list, >>> query_config_dict=query_config_dict, >>> ) >>> jobid = web_ibs.send_ibeis_request('/api/engine/query/graph/', **data) >>> print('jobid = %r' % (jobid,)) >>> status_response = web_ibs.wait_for_results(jobid) >>> result_response = web_ibs.read_engine_results(jobid) >>> inference_result = result_response['json_result'] >>> print('inference_result = %r' % (inference_result,)) >>> auuid2_cm = inference_result['cm_dict'] >>> quuid = quuid_list[0] >>> class_dict = auuid2_cm[str(quuid)] >>> # Get information in frontend >>> #ibs = ibeis.opendb('testdb1') >>> #cm = match_obj = ibeis.ChipMatch.from_dict(class_dict, ibs=ibs) >>> #match_obj.print_rawinfostr() >>> # Make the dictionary a bit more managable >>> #match_obj.compress_top_feature_matches(num=2) >>> #class_dict = match_obj.to_dict(ibs=ibs) >>> cm_dict = class_dict >>> # Package for review >>> review_pair = {'annot_uuid_1': quuid, 'annot_uuid_2': duuid_list[1]} >>> callback_method = u'POST' >>> view_orientation = u'vertical' >>> include_jquery = False >>> kw = dict( >>> review_pair=review_pair, >>> cm_dict=cm_dict, >>> query_config_dict=query_config_dict, >>> _internal_state=None, >>> callback_url = None, >>> ) >>> html_str = web_ibs.send_ibeis_request('/api/review/query/graph/', type_='get', **kw) >>> web_ibs.terminate2() >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.render_html(html_str) >>> ut.show_if_requested() Example2: >>> # DISABLE_DOCTEST >>> # This starts off using web to get information, but finishes the rest in python >>> from ibeis.web.apis_query import * # NOQA >>> import ibeis >>> ut.exec_funckw(review_graph_match_html, globals()) >>> web_ibs = ibeis.opendb_bg_web('testdb1') # , domain='http://52.33.105.88') >>> aids = web_ibs.send_ibeis_request('/api/annot/', 'get')[0:2] >>> uuid_list = web_ibs.send_ibeis_request('/api/annot/uuids/', type_='get', aid_list=aids) >>> quuid_list = uuid_list[0:1] >>> duuid_list = uuid_list >>> query_config_dict = { >>> # 'pipeline_root' : 'BC_DTW' >>> } >>> data = dict( >>> query_annot_uuid_list=quuid_list, database_annot_uuid_list=duuid_list, >>> query_config_dict=query_config_dict, >>> ) >>> jobid = web_ibs.send_ibeis_request('/api/engine/query/graph/', **data) >>> status_response = web_ibs.wait_for_results(jobid) >>> result_response = web_ibs.read_engine_results(jobid) >>> web_ibs.terminate2() >>> # NOW WORK IN THE FRONTEND >>> inference_result = result_response['json_result'] >>> auuid2_cm = inference_result['cm_dict'] >>> quuid = quuid_list[0] >>> class_dict = auuid2_cm[str(quuid)] >>> # Get information in frontend >>> ibs = ibeis.opendb('testdb1') >>> cm = ibeis.ChipMatch.from_dict(class_dict, ibs=ibs) >>> cm.print_rawinfostr() >>> # Make the dictionary a bit more managable >>> cm.compress_top_feature_matches(num=1) >>> cm.print_rawinfostr() >>> class_dict = cm.to_dict(ibs=ibs) >>> cm_dict = class_dict >>> # Package for review ( CANT CALL DIRECTLY BECAUSE OF OUT OF CONTEXT ) >>> review_pair = {'annot_uuid_1': quuid, 'annot_uuid_2': duuid_list[1]} >>> x = review_graph_match_html(ibs, review_pair, cm_dict, >>> query_config_dict, _internal_state=None, >>> callback_url=None) >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.render_html(html_str) >>> ut.show_if_requested() """ from ibeis.algo.hots import chip_match # from ibeis.algo.hots.query_request import QueryRequest proot = query_config_dict.get('pipeline_root', 'vsmany') proot = query_config_dict.get('proot', proot) if proot.upper() == 'BC_DTW': cls = chip_match.AnnotMatch # ibs.depc_annot.requestclass_dict['BC_DTW'] else: cls = chip_match.ChipMatch view_orientation = view_orientation.lower() if view_orientation not in ['vertical', 'horizontal']: view_orientation = 'horizontal' # unpack info try: annot_uuid_1 = review_pair['annot_uuid_1'] annot_uuid_2 = review_pair['annot_uuid_2'] except Exception: #??? HACK # FIXME: print('[!!!!] review_pair = %r' % (review_pair, )) review_pair = review_pair[0] annot_uuid_1 = review_pair['annot_uuid_1'] annot_uuid_2 = review_pair['annot_uuid_2'] aid_1 = ibs.get_annot_aids_from_uuid(annot_uuid_1) aid_2 = ibs.get_annot_aids_from_uuid(annot_uuid_2) cm = cls.from_dict(cm_dict, ibs=ibs) qreq_ = ibs.new_query_request([aid_1], [aid_2], cfgdict=query_config_dict) # Get score idx = cm.daid2_idx[aid_2] match_score = cm.name_score_list[idx] #match_score = cm.aid2_score[aid_2] image_matches = make_review_image(aid_2, cm, qreq_, view_orientation=view_orientation) image_matches_src = appf.embed_image_html(image_matches) image_clean = make_review_image(aid_2, cm, qreq_, view_orientation=view_orientation, draw_matches=False) image_clean_src = appf.embed_image_html(image_clean) if False: from ibeis.web import apis_query root_path = dirname(abspath(apis_query.__file__)) else: root_path = dirname(abspath(__file__)) css_file_list = [ ['css', 'style.css'], ['include', 'bootstrap', 'css', 'bootstrap.css'], ] json_file_list = [ ['javascript', 'script.js'], ['include', 'bootstrap', 'js', 'bootstrap.js'], ] if include_jquery: json_file_list = [ ['javascript', 'jquery.min.js'], ] + json_file_list EMBEDDED_CSS = '' EMBEDDED_JAVASCRIPT = '' css_template_fmtstr = '<style type="text/css" ia-dependency="css">%s</style>\n' json_template_fmtstr = '<script type="text/javascript" ia-dependency="javascript">%s</script>\n' for css_file in css_file_list: css_filepath_list = [root_path, 'static'] + css_file with open(join(*css_filepath_list)) as css_file: EMBEDDED_CSS += css_template_fmtstr % (css_file.read(), ) for json_file in json_file_list: json_filepath_list = [root_path, 'static'] + json_file with open(join(*json_filepath_list)) as json_file: EMBEDDED_JAVASCRIPT += json_template_fmtstr % (json_file.read(), ) return appf.template('turk', 'query_match_insert', match_score=match_score, image_clean_src=image_clean_src, image_matches_src=image_matches_src, annot_uuid_1=str(annot_uuid_1), annot_uuid_2=str(annot_uuid_2), view_orientation=view_orientation, callback_url=callback_url, callback_method=callback_method, EMBEDDED_CSS=EMBEDDED_CSS, EMBEDDED_JAVASCRIPT=EMBEDDED_JAVASCRIPT)
def view(): def _date_list(gid_list): unixtime_list = ibs.get_image_unixtime(gid_list) datetime_list = [ ut.unixtime_to_datetimestr(unixtime) if unixtime is not None else 'UNKNOWN' for unixtime in unixtime_list ] datetime_split_list = [ datetime.split(' ') for datetime in datetime_list ] date_list = [ datetime_split[0] if len(datetime_split) == 2 else 'UNKNOWN' for datetime_split in datetime_split_list ] return date_list ibs = current_app.ibs aid_list = ibs.filter_aids_count() gid_list = ibs.get_annot_gids(aid_list) nid_list = ibs.get_annot_name_rowids(aid_list) date_list = _date_list(gid_list) gid_list_unique = list(set(gid_list)) date_list_unique = _date_list(gid_list_unique) date_taken_dict = {} for gid, date in zip(gid_list_unique, date_list_unique): if date not in date_taken_dict: date_taken_dict[date] = [0, 0] date_taken_dict[date][1] += 1 gid_list_all = ibs.get_valid_gids() date_list_all = _date_list(gid_list_all) for gid, date in zip(gid_list_all, date_list_all): if date in date_taken_dict: date_taken_dict[date][0] += 1 value = 0 label_list = [] value_list = [] index_list = [] seen_set = set() current_seen_set = set() previous_seen_set = set() last_date = None date_seen_dict = {} for index, (aid, nid, date) in enumerate(zip(aid_list, nid_list, date_list)): index_list.append(index + 1) # Add to counters if date not in date_seen_dict: date_seen_dict[date] = [0, 0, 0, 0] date_seen_dict[date][0] += 1 if nid not in current_seen_set: current_seen_set.add(nid) date_seen_dict[date][1] += 1 if nid in previous_seen_set: date_seen_dict[date][3] += 1 if nid not in seen_set: seen_set.add(nid) value += 1 date_seen_dict[date][2] += 1 # Add to register value_list.append(value) # Reset step (per day) if date != last_date and date != 'UNKNOWN': last_date = date previous_seen_set = set(current_seen_set) current_seen_set = set() label_list.append(date) else: label_list.append('') # def optimization1(x, a, b, c): # return a * np.log(b * x) + c # def optimization2(x, a, b, c): # return a * np.sqrt(x) ** b + c # def optimization3(x, a, b, c): # return 1.0 / (a * np.exp(-b * x) + c) # def process(func, opts, domain, zero_index, zero_value): # values = func(domain, *opts) # diff = values[zero_index] - zero_value # values -= diff # values[ values < 0.0 ] = 0.0 # values[:zero_index] = 0.0 # values = values.astype(int) # return list(values) # optimization_funcs = [ # optimization1, # optimization2, # optimization3, # ] # # Get data # x = np.array(index_list) # y = np.array(value_list) # # Fit curves # end = int(len(index_list) * 1.25) # domain = np.array(range(1, end)) # zero_index = len(value_list) - 1 # zero_value = value_list[zero_index] # regressed_opts = [ curve_fit(func, x, y)[0] for func in optimization_funcs ] # prediction_list = [ # process(func, opts, domain, zero_index, zero_value) # for func, opts in zip(optimization_funcs, regressed_opts) # ] # index_list = list(domain) prediction_list = [] date_seen_dict.pop('UNKNOWN', None) bar_label_list = sorted(date_seen_dict.keys()) bar_value_list1 = [ date_taken_dict[date][0] for date in bar_label_list ] bar_value_list2 = [ date_taken_dict[date][1] for date in bar_label_list ] bar_value_list3 = [ date_seen_dict[date][0] for date in bar_label_list ] bar_value_list4 = [ date_seen_dict[date][1] for date in bar_label_list ] bar_value_list5 = [ date_seen_dict[date][2] for date in bar_label_list ] bar_value_list6 = [ date_seen_dict[date][3] for date in bar_label_list ] # label_list += ['Models'] + [''] * (len(index_list) - len(label_list) - 1) # value_list += [0] * (len(index_list) - len(value_list)) # Counts imgsetid_list = ibs.get_valid_imgsetids() gid_list = ibs.get_valid_gids() aid_list = ibs.get_valid_aids() nid_list = ibs.get_valid_nids() contrib_list = ibs.get_valid_contrib_rowids() # nid_list = ibs.get_valid_nids() aid_list_count = ibs.filter_aids_count() # gid_list_count = list(set(ibs.get_annot_gids(aid_list_count))) nid_list_count_dup = ibs.get_annot_name_rowids(aid_list_count) nid_list_count = list(set(nid_list_count_dup)) # Calculate the Petersen-Lincoln index form the last two days try: c1 = bar_value_list4[-2] c2 = bar_value_list4[-1] c3 = bar_value_list6[-1] pl_index = int(math.ceil( (c1 * c2) / c3 )) pl_error_num = float(c1 * c1 * c2 * (c2 - c3)) pl_error_dom = float(c3 ** 3) pl_error = int(math.ceil( 1.96 * math.sqrt(pl_error_num / pl_error_dom) )) except IndexError: # pl_index = 'Undefined - Zero recaptured (k = 0)' pl_index = 0 pl_error = 0 except ZeroDivisionError: # pl_index = 'Undefined - Zero recaptured (k = 0)' pl_index = 0 pl_error = 0 # Get the markers gid_list_markers = ibs.get_annot_gids(aid_list_count) gps_list_markers = map(list, ibs.get_image_gps(gid_list_markers)) gps_list_markers_all = map(list, ibs.get_image_gps(gid_list)) REMOVE_DUP_CODE = True if not REMOVE_DUP_CODE: # Get the tracks nid_track_dict = ut.ddict(list) for nid, gps in zip(nid_list_count_dup, gps_list_markers): if gps[0] == -1.0 and gps[1] == -1.0: continue nid_track_dict[nid].append(gps) gps_list_tracks = [ nid_track_dict[nid] for nid in sorted(nid_track_dict.keys()) ] else: __nid_list, gps_track_list, aid_track_list = ibs.get_name_gps_tracks(aid_list=aid_list_count) gps_list_tracks = list(map(lambda x: list(map(list, x)), gps_track_list)) gps_list_markers = [ gps for gps in gps_list_markers if tuple(gps) != (-1, -1, ) ] gps_list_markers_all = [ gps for gps in gps_list_markers_all if tuple(gps) != (-1, -1, ) ] gps_list_tracks = [ [ gps for gps in gps_list_track if tuple(gps) != (-1, -1, ) ] for gps_list_track in gps_list_tracks ] valid_aids = ibs.get_valid_aids() valid_gids = ibs.get_valid_gids() valid_aids_ = ibs.filter_aids_custom(valid_aids) valid_gids_ = ibs.filter_gids_custom(valid_gids) used_gids = list(set( ibs.get_annot_gids(valid_aids) )) used_contrib_tags = list(set( ibs.get_image_contributor_tag(used_gids) )) # Get Age and sex (By Annot) # annot_sex_list = ibs.get_annot_sex(valid_aids_) # annot_age_months_est_min = ibs.get_annot_age_months_est_min(valid_aids_) # annot_age_months_est_max = ibs.get_annot_age_months_est_max(valid_aids_) # age_list = [[0, 0, 0], [0, 0, 0], [0, 0, 0]] # for sex, min_age, max_age in zip(annot_sex_list, annot_age_months_est_min, annot_age_months_est_max): # if sex not in [0, 1]: # sex = 2 # # continue # if (min_age is None or min_age < 12) and max_age < 12: # age_list[sex][0] += 1 # elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36: # age_list[sex][1] += 1 # elif 36 <= min_age and (36 <= max_age or max_age is None): # age_list[sex][2] += 1 # Get Age and sex (By Name) name_sex_list = ibs.get_name_sex(nid_list_count) name_age_months_est_mins_list = ibs.get_name_age_months_est_min(nid_list_count) name_age_months_est_maxs_list = ibs.get_name_age_months_est_max(nid_list_count) age_list = [[0, 0, 0], [0, 0, 0], [0, 0, 0]] age_unreviewed = 0 age_ambiguous = 0 for nid, sex, min_ages, max_ages in zip(nid_list_count, name_sex_list, name_age_months_est_mins_list, name_age_months_est_maxs_list): if len(set(min_ages)) > 1 or len(set(max_ages)) > 1: # print('[web] Invalid name %r: Cannot have more than one age' % (nid, )) age_ambiguous += 1 continue min_age = None max_age = None if len(min_ages) > 0: min_age = min_ages[0] if len(max_ages) > 0: max_age = max_ages[0] # Histogram if (min_age is None and max_age is None) or (min_age is -1 and max_age is -1): # print('[web] Unreviewded name %r: Specify the age for the name' % (nid, )) age_unreviewed += 1 continue if sex not in [0, 1]: sex = 2 # continue if (min_age is None or min_age < 12) and max_age < 12: age_list[sex][0] += 1 elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36: age_list[sex][1] += 1 elif 36 <= min_age and (36 <= max_age or max_age is None): age_list[sex][2] += 1 dbinfo_str = dbinfo() return appf.template('view', line_index_list=index_list, line_label_list=label_list, line_value_list=value_list, prediction_list=prediction_list, pl_index=pl_index, pl_error=pl_error, gps_list_markers=gps_list_markers, gps_list_markers_all=gps_list_markers_all, gps_list_tracks=gps_list_tracks, bar_label_list=bar_label_list, bar_value_list1=bar_value_list1, bar_value_list2=bar_value_list2, bar_value_list3=bar_value_list3, bar_value_list4=bar_value_list4, bar_value_list5=bar_value_list5, bar_value_list6=bar_value_list6, age_list=age_list, age_ambiguous=age_ambiguous, age_unreviewed=age_unreviewed, dbinfo_str=dbinfo_str, imgsetid_list=imgsetid_list, imgsetid_list_str=','.join(map(str, imgsetid_list)), num_imgsetids=len(imgsetid_list), gid_list=gid_list, gid_list_str=','.join(map(str, gid_list)), num_gids=len(gid_list), contrib_list=contrib_list, contrib_list_str=','.join(map(str, contrib_list)), num_contribs=len(contrib_list), gid_list_count=valid_gids_, gid_list_count_str=','.join(map(str, valid_gids_)), num_gids_count=len(valid_gids_), aid_list=aid_list, aid_list_str=','.join(map(str, aid_list)), num_aids=len(aid_list), aid_list_count=valid_aids_, aid_list_count_str=','.join(map(str, valid_aids_)), num_aids_count=len(valid_aids_), nid_list=nid_list, nid_list_str=','.join(map(str, nid_list)), num_nids=len(nid_list), nid_list_count=nid_list_count, nid_list_count_str=','.join(map(str, nid_list_count)), num_nids_count=len(nid_list_count), used_gids=used_gids, num_used_gids=len(used_gids), used_contribs=used_contrib_tags, num_used_contribs=len(used_contrib_tags))