def MAKE_BIG_DB(): workdir = sysres.get_workdir() dbname = 'testdb_big' dbdir = join(workdir, dbname) utool.delete(dbdir) main_locals = ibeis.main(dbdir=dbdir, gui=False) ibs = main_locals['ibs'] # IBEIS Control gpath_list = grabdata.get_test_gpaths(ndata=1) imgdir = get_big_imgdir(workdir) gname_list = utool.list_images(imgdir, recursive=True) gpath_list = [join(imgdir, gname) for gname in gname_list] gpath_list = gpath_list assert all(map(exists, gpath_list)), 'some images dont exist' #nImages = len(gpath_list) #with utool.Timer('Add %d Images' % nImages): gid_list = ibs.add_images(gpath_list) #with utool.Timer('Convert %d Images to annotations' % nImages): aid_list = ibsfuncs.use_images_as_annotations(ibs, gid_list) #with utool.Timer('Compute %d chips' % nImages): cid_list = ibs.add_chips(aid_list) #with utool.Timer('Compute %d features' % nImages): fid_list = ibs.add_feats(cid_list) #with utool.Timer('Getting %d nFeats' % nImages): nFeats_list = ibs.get_num_feats(fid_list) print('Total number of features in the database: %r' % sum(nFeats_list)) return locals()
def get_test_gpaths(ndata=None, names=None, **kwargs): # Read ndata from args or command line """ DEPRICATE """ ndata_arg = ut.get_argval( '--ndata', type_=int, default=None, help_='use --ndata to specify bigger data') if ndata_arg is not None: ndata = ndata_arg imgdir = get_testdata_dir(**kwargs) gpath_list = sorted( list(ut.list_images(imgdir, full=True, recursive=True))) # Get only the gpaths of certain names if names is not None: gpath_list = [ gpath for gpath in gpath_list if ut.basename_noext(gpath) in names ] # Get a some number of test images if ndata is not None: gpath_cycle = cycle(gpath_list) if six.PY2: gpath_list = [gpath_cycle.next() for _ in range(ndata)] else: gpath_list = [next(gpath_cycle) for _ in range(ndata)] return gpath_list
def list_ingestable_images(img_dir, fullpath=True, recursive=True): ignore_list = ['_hsdb', '.hs_internals', '_ibeis_cache', '_ibsdb'] gpath_list = utool.list_images(img_dir, fullpath=fullpath, recursive=recursive, ignore_list=ignore_list) # Ensure in unix format gpath_list = map(utool.unixpath, gpath_list) return gpath_list
def list_images(img_dir): """ lists images that are not in an internal cache """ import utool as ut # NOQA ignore_list = ['_hsdb', '.hs_internals', '_ibeis_cache', '_ibsdb'] gpath_list = ut.list_images(img_dir, fullpath=True, recursive=True, ignore_list=ignore_list) return gpath_list
def extract_zipfile_images(ibs, ingestable): import utool as ut # NOQA zipfile_list = ut.glob(ingestable.img_dir, '*.zip', recursive=True) if len(zipfile_list) > 0: print('Found zipfile_list = %r' % (zipfile_list,)) ut.ensuredir(unzipped_file_base_dir) for zipfile in zipfile_list: unziped_file_relpath = dirname(relpath(relpath(realpath(zipfile), realpath(ingestable.img_dir)))) unzipped_file_dir = join(unzipped_file_base_dir, unziped_file_relpath) ut.ensuredir(unzipped_file_dir) ut.unzip_file(zipfile, output_dir=unzipped_file_dir, overwrite=False) gpath_list = ut.list_images(unzipped_file_dir, fullpath=True, recursive=True) else: gpath_list = [] return gpath_list
def import_images_from_dir(back, dir_=None, size_filter=None, refresh=True): """ File -> Import Images From Directory""" print('[back] import_images_from_dir') if dir_ is None: dir_ = guitool.select_directory('Select directory with images in it') #printDBG('[back] dir=%r' % dir_) if dir_ is None: return gpath_list = utool.list_images(dir_, fullpath=True, recursive=True) if size_filter is not None: raise NotImplementedError('Can someone implement the size filter?') gid_list = back.ibs.add_images(gpath_list) if refresh: back.ibs.update_special_encounters() back.front.update_tables([gh.IMAGE_TABLE, gh.ENCOUNTER_TABLE]) return gid_list
def test_pygist(): print('[pygist] Testing pygist') # Ensure you have test data print('[pygist] Ensuring testdata') datafile = utool.grab_file_url(TEST_MODEL_URL, appname='utool') test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool') imgpaths = utool.list_images(test_image_dir, fullpath=True) # test image paths outdir = utool.get_app_resource_dir('pygist') # where to put results # Run pygist on test images print('[pygist] Running tests') test_results = pygist.test(imgpaths, outdir=outdir, datafile=datafile) # Print results target_results = [-1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1] assert target_results == target_results, 'results do not match' print('test_results = %r' % (test_results,)) print(utool.list_str(list(izip(imgpaths, test_results)))) return locals()
def imagesDropped(ibswgt, url_list): from os.path import isdir print('[drop_event] url_list=%r' % (url_list,)) gpath_list = filter(utool.matches_image, url_list) dir_list = filter(isdir, url_list) if len(dir_list) > 0: ans = guitool.user_option(ibswgt, title='Non-Images dropped', msg='Recursively import from directories?') if ans == 'Yes': gpath_list.extend(map(utool.unixpath, utool.flatten([utool.list_images(dir_, fullpath=True, recursive=True) for dir_ in dir_list]))) else: return print('[drop_event] gpath_list=%r' % (gpath_list,)) if len(gpath_list) > 0: ibswgt.back.import_images_from_file(gpath_list=gpath_list)
def test_pygist(): print('[pygist] Testing pygist') # Ensure you have test data print('[pygist] Ensuring testdata') datafile = utool.grab_file_url(TEST_MODEL_URL, appname='utool') test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool') imgpaths = utool.list_images(test_image_dir, fullpath=True) # test image paths outdir = utool.get_app_resource_dir('pygist') # where to put results # Run pygist on test images print('[pygist] Running tests') test_results = pygist.test(imgpaths, outdir=outdir, datafile=datafile) # Print results target_results = [-1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1] assert target_results == target_results, 'results do not match' print('test_results = %r' % (test_results, )) print(utool.list_str(list(izip(imgpaths, test_results)))) return locals()
def get_test_gpaths(ndata=None, names=None, **kwargs): # Read ndata from args or command line ndata_arg = utool.get_argval('--ndata', type_=int, default=None, help_='use --ndata to specify bigger data') if ndata_arg is not None: ndata = ndata_arg imgdir = get_testdata_dir(**kwargs) gpath_list = sorted(list(utool.list_images(imgdir, full=True, recursive=True))) # Get only the gpaths of certain names if names is not None: gpath_list = [gpath for gpath in gpath_list if utool.basename_noext(gpath) in names] # Get a some number of test images if ndata is not None: gpath_cycle = cycle(gpath_list) if six.PY2: gpath_list = [gpath_cycle.next() for _ in range(ndata)] else: gpath_list = [next(gpath_cycle) for _ in range(ndata)] return gpath_list
def image_upload_zip(**kwargs): r""" Returns the gid_list for image files submitted in a ZIP archive. The image archive should be flat (no folders will be scanned for images) and must be smaller than 100 MB. The archive can submit multiple images, ideally in JPEG format to save space. Duplicate image uploads will result in the duplicate images receiving the same gid based on the hashed pixel values. Args: image_zip_archive (binary): the POST variable containing the binary (multi-form) image archive data **kwargs: Arbitrary keyword arguments; the kwargs are passed down to the add_images function Returns: gid_list (list if rowids): the list of gids corresponding to the images submitted. The gids correspond to the image names sorted in lexigraphical order. RESTful: Method: POST URL: /api/image/zip """ ibs = current_app.ibs # Get image archive image_archive = request.files.get('image_zip_archive', None) if image_archive is None: raise IOError('Image archive not given') # If the directory already exists, delete it uploads_path = ibs.get_uploadsdir() ut.ensuredir(uploads_path) current_time = time.strftime('%Y_%m_%d_%H_%M_%S') modifier = 1 upload_path = '%s' % (current_time) while exists(upload_path): upload_path = '%s_%04d' % (current_time, modifier) modifier += 1 upload_path = join(uploads_path, upload_path) ut.ensuredir(upload_path) # Extract the content try: with zipfile.ZipFile(image_archive, 'r') as zfile: zfile.extractall(upload_path) except Exception: ut.remove_dirs(upload_path) raise IOError('Image archive extracton failed') """ test to ensure Directory and utool do the same thing from wbia.detecttools.directory import Directory upload_path = ut.truepath('~/Pictures') gpath_list1 = sorted(ut.list_images(upload_path, recursive=False, full=True)) direct = Directory(upload_path, include_file_extensions='images', recursive=False) gpath_list = direct.files() gpath_list = sorted(gpath_list) assert gpath_list1 == gpath_list """ gpath_list = sorted(ut.list_images(upload_path, recursive=False, full=True)) # direct = Directory(upload_path, include_file_extensions='images', recursive=False) # gpath_list = direct.files() # gpath_list = sorted(gpath_list) gid_list = ibs.add_images(gpath_list, **kwargs) return gid_list
# DUPLICATE CODE, DELETE from __future__ import absolute_import, division, print_function from plottool import interact_multi_image from plottool import draw_func2 as df2 import utool #import ibeis def test_interact_multimage(imgpaths): print("len: ", len(imgpaths)) bboxes_list = [[]] * len(imgpaths) bboxes_list[0] = [(-200, -100, 400, 400)] print(bboxes_list) iteract_obj = interact_multi_image.MultiImageInteraction(imgpaths, nPerPage=4, bboxes_list=bboxes_list) # def test_interact_multimage(imgpaths, gid_list=None, aids_list=None, bboxes_list=None): # img_list = imread_many(imgpaths) # iteract_obj = interact_multi_image.MultiImageInteraction(img_list + # img_list, # gid_list, aids_list, bboxes_list, # nPerPage=6) return iteract_obj if __name__ == '__main__': TEST_IMAGES_URL = 'https://lev.cs.rpi.edu/public/data/testdata.zip' test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool') imgpaths = utool.list_images(test_image_dir, fullpath=True, recursive=False) # test image paths iteract_obj = test_interact_multimage(imgpaths) exec(df2.present())
# Build parameters bbox_list = [dummy_bbox(img), dummy_bbox(img, (-.25, -.25), .1)] showkw = { 'title': 'test axis title', # The list of bounding boxes to be drawn on the image 'bbox_list': bbox_list, 'theta_list': [tau * .7, tau * .9], 'sel_list': [True, False], 'label_list': ['test label', 'lbl2'], } # Print the keyword arguments to illustrate their format print('showkw = ' + utool.dict_str(showkw)) # Display the image in figure-num 42, using a 1x1 axis grid in the first # axis. Pass showkw as keyword arguments. viz_image2.show_image(img, fnum=42, pnum=(1, 1, 1), **showkw) df2.set_figtitle('Test figure title') if __name__ == '__main__': TEST_IMAGES_URL = 'https://lev.cs.rpi.edu/public/data/testdata.zip' test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool') imgpaths = utool.list_images(test_image_dir, fullpath=True, recursive=False) # test image paths # Get one image filepath to load and display img_fpath = imgpaths[0] # Run Test test_viz_image(img_fpath) # Magic exec which displays or puts you into IPython with --cmd flag exec(df2.present())
def ingest_oxford_style_db(dbdir, dryrun=False): """ Ingest either oxford or paris Args: dbdir (str): CommandLine: python -m ibeis.dbio.ingest_database --exec-ingest_oxford_style_db --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.dbio.ingest_database import * # NOQA >>> dbdir = '/raid/work/Oxford' >>> dryrun = True >>> ingest_oxford_style_db(dbdir) >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.show_if_requested() Ignore: >>> from ibeis.dbio.ingest_database import * # NOQA >>> import ibeis >>> dbdir = '/raid/work/Oxford' >>> dbdir = '/raid/work/Paris' >>> #>>> ibeis.dbio.convert_db.ingest_oxford_style_db(dbdir) """ from PIL import Image print('Loading Oxford Style Images from: ' + dbdir) def _parse_oxsty_gtfname(gt_fname): """ parse gtfname for: (gt_name, quality_lbl, num) """ # num is an id, not a number of annots gt_format = '{}_{:d}_{:D}.txt' name, num, quality = parse.parse(gt_format, gt_fname) return (name, num, quality) def _read_oxsty_gtfile(gt_fpath, name, quality, img_dpath, ignore_list): oxsty_annot_info_list = [] # read the individual ground truth file with open(gt_fpath, 'r') as file: line_list = file.read().splitlines() for line in line_list: if line == '': continue fields = line.split(' ') gname = fields[0].replace('oxc1_', '') + '.jpg' # >:( Because PARIS just cant keep paths consistent if gname.find('paris_') >= 0: paris_hack = gname[6:gname.rfind('_')] gname = join(paris_hack, gname) if gname in ignore_list: continue if len(fields) > 1: # if has bbox bbox = [int(round(float(x))) for x in fields[1:]] else: # Get annotation width / height gpath = join(img_dpath, gname) (w, h) = Image.open(gpath).size bbox = [0, 0, w, h] oxsty_annot_info = (gname, bbox) oxsty_annot_info_list.append(oxsty_annot_info) return oxsty_annot_info_list gt_dpath = ut.existing_subpath(dbdir, ['oxford_style_gt', 'gt_files_170407', 'oxford_groundtruth']) img_dpath = ut.existing_subpath(dbdir, ['oxbuild_images', 'images']) corrupted_file_fpath = join(gt_dpath, 'corrupted_files.txt') ignore_list = [] # Check for corrupted files (Looking at your Paris Buildings Dataset) if ut.checkpath(corrupted_file_fpath): ignore_list = ut.read_from(corrupted_file_fpath).splitlines() gname_list = ut.list_images(img_dpath, ignore_list=ignore_list, recursive=True, full=False) # just in case utool broke for ignore in ignore_list: assert ignore not in gname_list # Read the Oxford Style Groundtruth files print('Loading Oxford Style Names and Annots') gt_fname_list = os.listdir(gt_dpath) num_gt_files = len(gt_fname_list) query_annots = [] gname2_annots_raw = ut.ddict(list) name_set = set([]) print(' * num_gt_files = %d ' % num_gt_files) # # Iterate over each groundtruth file for gtx, gt_fname in enumerate(ut.ProgIter(gt_fname_list, 'parsed oxsty gtfile: ')): if gt_fname == 'corrupted_files.txt': continue #Get name, quality, and num from fname (name, num, quality) = _parse_oxsty_gtfname(gt_fname) gt_fpath = join(gt_dpath, gt_fname) name_set.add(name) oxsty_annot_info_sublist = _read_oxsty_gtfile( gt_fpath, name, quality, img_dpath, ignore_list) if quality == 'query': for (gname, bbox) in oxsty_annot_info_sublist: query_annots.append((gname, bbox, name, num)) else: for (gname, bbox) in oxsty_annot_info_sublist: gname2_annots_raw[gname].append((name, bbox, quality)) print(' * num_query images = %d ' % len(query_annots)) # # Remove duplicates img.jpg : (*1.txt, *2.txt, ...) -> (*.txt) gname2_annots = ut.ddict(list) multinamed_gname_list = [] for gname, val in gname2_annots_raw.iteritems(): val_repr = list(map(repr, val)) unique_reprs = set(val_repr) unique_indexes = [val_repr.index(urep) for urep in unique_reprs] for ux in unique_indexes: gname2_annots[gname].append(val[ux]) if len(gname2_annots[gname]) > 1: multinamed_gname_list.append(gname) # print some statistics query_gname_list = [tup[0] for tup in query_annots] gname_with_groundtruth_list = gname2_annots.keys() gname_with_groundtruth_set = set(gname_with_groundtruth_list) gname_set = set(gname_list) query_gname_set = set(query_gname_list) gname_without_groundtruth_list = list(gname_set - gname_with_groundtruth_set) print(' * num_images = %d ' % len(gname_list)) print(' * images with groundtruth = %d ' % len(gname_with_groundtruth_list)) print(' * images without groundtruth = %d ' % len(gname_without_groundtruth_list)) print(' * images with multi-groundtruth = %d ' % len(multinamed_gname_list)) #make sure all queries have ground truth and there are no duplicate queries # assert len(query_gname_list) == len(query_gname_set.intersection(gname_with_groundtruth_list)) assert len(query_gname_list) == len(set(query_gname_list)) #======================================================= # Build IBEIS database if not dryrun: ibs = ibeis.opendb(dbdir, allow_newdir=True) ibs.cfg.other_cfg.auto_localize = False print('adding to table: ') # Add images to ibeis gpath_list = [join(img_dpath, gname).replace('\\', '/') for gname in gname_list] gid_list = ibs.add_images(gpath_list) # 1) Add Query Annotations qgname_list, qbbox_list, qname_list, qid_list = zip(*query_annots) # get image ids of queries qgid_list = [gid_list[gname_list.index(gname)] for gname in qgname_list] qnote_list = ['query'] * len(qgid_list) # 2) Add nonquery database annots dgname_list = list(gname2_annots.keys()) # NOQA dgid_list = [] dname_list = [] dbbox_list = [] dnote_list = [] for gname in gname2_annots.keys(): gid = gid_list[gname_list.index(gname)] annots = gname2_annots[gname] for name, bbox, quality in annots: dgid_list.append(gid) dbbox_list.append(bbox) dname_list.append(name) dnote_list.append(quality) # 3) Add distractors: TODO: 100k ugid_list = [gid_list[gname_list.index(gname)] for gname in gname_without_groundtruth_list] ubbox_list = [[0, 0, w, h] for (w, h) in ibs.get_image_sizes(ugid_list)] unote_list = ['distractor'] * len(ugid_list) # TODO Annotation consistency in terms of duplicate bounding boxes qaid_list = ibs.add_annots(qgid_list, bbox_list=qbbox_list, name_list=qname_list, notes_list=qnote_list) daid_list = ibs.add_annots(dgid_list, bbox_list=dbbox_list, name_list=dname_list, notes_list=dnote_list) uaid_list = ibs.add_annots(ugid_list, bbox_list=ubbox_list, notes_list=unote_list) print('Added %d query annototations' % len(qaid_list)) print('Added %d database annototations' % len(daid_list)) print('Added %d distractor annototations' % len(uaid_list)) update = False if update: # TODO: integrate this into normal ingest pipeline 'Oxford' ibs = ibeis.opendb(dbdir) aid_list = ibs.get_valid_aids() notes_list = ibs.get_annot_notes(aid_list) _dict = { 'ok': ibs.const.QUAL_OK, 'good': ibs.const.QUAL_GOOD, 'junk': ibs.const.QUAL_JUNK, #'distractor': ibs.const.QUAL_JUNK } qual_text_list = [_dict.get(note, ibs.const.QUAL_UNKNOWN) for note in notes_list] ibs.set_annot_quality_texts(aid_list, qual_text_list) ibs._overwrite_all_annot_species_to('building') tags_list = [[note] if note in ['query', 'distractor'] else [] for note in notes_list] from ibeis import tag_funcs tag_funcs.append_annot_case_tags(ibs, aid_list, tags_list) #ibs._set # tags_ = ibs.get_annot_case_tags(aid_list) # pass """
def test_pyrf(): r""" CommandLine: python run_tests.py --test-test_pyrf Example: >>> # ENABLE_DOCTEST >>> from run_tests import * # NOQA >>> result = test_pyrf() >>> print(result) """ #================================= # Initialization #================================= category = 'zebra_plains' #detect_config = { # 'save_detection_images': True, # 'percentage_top': 0.40, #} testdata_dir = ut.unixpath('~/code/pyrf/results') # assert ut.checkpath(testdata_dir) if ut.get_argflag('--vd'): print(ut.ls(testdata_dir)) # Create detector detector = Random_Forest_Detector() test_path = ut.grab_zipped_url(TEST_DATA_DETECT_URL, appname='utool') models_path = ut.grab_zipped_url(TEST_DATA_MODEL_URL, appname='utool') trees_path = join(models_path, category) detect_path = join(test_path, category, 'detect') ut.ensuredir(detect_path) ut.ensuredir(test_path) ut.ensuredir(trees_path) #================================= # Load Input Images #================================= # Get input images big_gpath_list = ut.list_images(test_path, fullpath=True, recursive=False) print(big_gpath_list) # Resize images to standard size if ut.get_argflag('--small'): big_gpath_list = big_gpath_list[0:8] #big_gpath_list = big_gpath_list[0:8] output_dir = join(test_path, 'resized') std_gpath_list = resize_imagelist_to_sqrtarea(big_gpath_list, sqrt_area=800, output_dir=output_dir, checkexists=True) dst_gpath_list = [join(detect_path, split(gpath)[1]) for gpath in std_gpath_list] #ut.view_directory(test_path) #ut.view_directory('.') print(std_gpath_list) num_images = len(std_gpath_list) #assert num_images == 16, 'the test has diverged!' print('Testing on %r images' % num_images) #================================= # Load Pretrained Forests #================================= # Load forest, so we don't have to reload every time trees_fpath_list = ut.ls(trees_path, '*.txt') #forest = detector.load(trees_path, category + '-') forest = detector.forest(trees_fpath_list) #detector.set_detect_params(**detect_config) results_list1 = [] #================================= # Detect using Random Forest #================================= with ut.Timer('[test_pyrf] for loop detector.detect') as t1: if not ut.get_argflag('--skip1'): results_list1 = detector.detect(forest, std_gpath_list, output_gpath_list=dst_gpath_list) #for ix, (img_fpath, dst_fpath) in enumerate(zip(std_gpath_list, dst_gpath_list)): # #img_fname = split(img_fpath)[1] # #dst_fpath = join(detect_path, img_fname) # #print(' * img_fpath = %r' % img_fpath) # #print(' * dst_fpath = %r' % dst_fpath) # with ut.Timer('[test_pyrf] detector.detect ix=%r' % (ix,)): # results = detector.detect(forest, img_fpath, dst_fpath) # results_list1.append(results) # print('num results = %r' % len(results)) #else: # print('...skipped') #with ut.Timer('[test_pyrf] detector.detect_many') as t2: # results_list2 = detector.detect_many(forest, std_gpath_list, # dst_gpath_list, use_openmp=True) detector.free_forest(forest) print('') print('+ --------------') print('| total time1: %r' % t1.ellapsed) #print('| total time2: %r' % t2.ellapsed) print('|') print('| num results1 = %r' % (list(map(len, results_list1)))) #print('| num results2 = %r' % (list(map(len, results_list2)))) #assert results_list2 == results_list1 return locals()
def image_upload_zip(**kwargs): r""" Returns the gid_list for image files submitted in a ZIP archive. The image archive should be flat (no folders will be scanned for images) and must be smaller than 100 MB. The archive can submit multiple images, ideally in JPEG format to save space. Duplicate image uploads will result in the duplicate images receiving the same gid based on the hashed pixel values. Args: image_zip_archive (binary): the POST variable containing the binary (multi-form) image archive data **kwargs: Arbitrary keyword arguments; the kwargs are passed down to the add_images function Returns: gid_list (list if rowids): the list of gids corresponding to the images submitted. The gids correspond to the image names sorted in lexigraphical order. RESTful: Method: POST URL: /api/image/zip """ ibs = current_app.ibs # Get image archive image_archive = request.files.get('image_zip_archive', None) if image_archive is None: raise IOError('Image archive not given') # If the directory already exists, delete it uploads_path = ibs.get_uploadsdir() ut.ensuredir(uploads_path) current_time = time.strftime('%Y_%m_%d_%H_%M_%S') modifier = 1 upload_path = '%s' % (current_time) while exists(upload_path): upload_path = '%s_%04d' % (current_time, modifier) modifier += 1 upload_path = join(uploads_path, upload_path) ut.ensuredir(upload_path) # Extract the content try: with zipfile.ZipFile(image_archive, 'r') as zfile: zfile.extractall(upload_path) except Exception: ut.remove_dirs(upload_path) raise IOError('Image archive extracton failed') """ test to ensure Directory and utool do the same thing from detecttools.directory import Directory upload_path = ut.truepath('~/Pictures') gpath_list1 = sorted(ut.list_images(upload_path, recursive=False, full=True)) direct = Directory(upload_path, include_file_extensions='images', recursive=False) gpath_list = direct.files() gpath_list = sorted(gpath_list) assert gpath_list1 == gpath_list """ gpath_list = sorted(ut.list_images(upload_path, recursive=False, full=True)) #direct = Directory(upload_path, include_file_extensions='images', recursive=False) #gpath_list = direct.files() #gpath_list = sorted(gpath_list) gid_list = ibs.add_images(gpath_list, **kwargs) return gid_list
nRows, nCols = ph.get_square_row_cols(nImgs) print('[viz*] r=%r, c=%r' % (nRows, nCols)) #gs2 = gridspec.GridSpec(nRows, nCols) pnum_ = df2.get_pnum_func(nRows, nCols) fig = df2.figure(fnum=fnum, pnum=pnum_(0)) fig.clf() for px, img in enumerate(img_list): title = 'test title' bbox_list = [dummy_bbox(img), dummy_bbox(img, (-.25, -.25), .1)] theta_list = [tau * .7, tau * .9] sel_list = [True, False] label_list = ['test label', 'lbl2'] viz_image2.show_image(img, bbox_list=bbox_list, title=title, sel_list=sel_list, label_list=label_list, theta_list=theta_list, fnum=fnum, pnum=pnum_(px)) if __name__ == '__main__': TEST_IMAGES_URL = 'https://lev.cs.rpi.edu/public/data/testdata.zip' test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool') imgpaths = utool.list_images(test_image_dir, fullpath=True) # test image paths test_viz_image(imgpaths) exec(df2.present())
def test_pyrf(): category = 'zebra_plains' detect_config = { 'save_detection_images': True, 'save_scales': True, 'percentage_top': 0.40, } #================================= # Train / Detect Initialization #================================= testdata_dir = utool.unixpath('~/code/pyrf/results') # assert utool.checkpath(testdata_dir) if utool.get_argflag('--vd'): print(utool.ls(testdata_dir)) # Create detector detector = Random_Forest_Detector() test_path = utool.grab_zipped_url(TEST_DATA_DETECT_URL, appname='utool') models_path = utool.grab_zipped_url(TEST_DATA_MODEL_URL, appname='utool') trees_path = join(models_path, category) results_path = join(utool.unixpath('~/code/pyrf/results'), category) # detect_path = join(results_path, 'detect') trees_path = join(results_path, 'trees') detect_path = join(test_path, category, 'detect') utool.ensuredir(detect_path) utool.ensuredir(test_path) utool.ensuredir(trees_path) #================================= # Detect using Random Forest #================================= # Get input images from vtool import image big_gpath_list = utool.list_images(test_path, fullpath=True, recursive=False) print(big_gpath_list) # Resize images to standard size if utool.get_argflag('--small'): big_gpath_list = big_gpath_list[0:8] #big_gpath_list = big_gpath_list[0:8] output_dir = join(test_path, 'resized') std_gpath_list = image.resize_imagelist_to_sqrtarea(big_gpath_list, sqrt_area=800, output_dir=output_dir, checkexists=True) dst_gpath_list = [join(detect_path, split(gpath)[1]) for gpath in std_gpath_list] #utool.view_directory(test_path) #utool.view_directory('.') print(std_gpath_list) num_images = len(std_gpath_list) #assert num_images == 16, 'the test has diverged!' print('Testing on %r images' % num_images) # Load forest, so we don't have to reload every time forest = detector.load(trees_path, category + '-', num_trees=25) detector.set_detect_params(**detect_config) results_list1 = [] with utool.Timer('[test_pyrf] for loop detector.detect') as t1: if not utool.get_argflag('--skip1'): for ix, (img_fpath, dst_fpath) in enumerate(zip(std_gpath_list, dst_gpath_list)): #img_fname = split(img_fpath)[1] #dst_fpath = join(detect_path, img_fname) #print(' * img_fpath = %r' % img_fpath) #print(' * dst_fpath = %r' % dst_fpath) with utool.Timer('[test_pyrf] detector.detect ix=%r' % (ix,)): results = detector.detect(forest, img_fpath, dst_fpath) results_list1.append(results) print('num results = %r' % len(results)) else: print('...skipped') # with utool.Timer('[test_pyrf] detector.detect_many') as t2: # results_list2 = detector.detect_many(forest, std_gpath_list, # dst_gpath_list, use_openmp=True) print('') print('+ --------------') print('| total time1: %r' % t1.ellapsed) # print('| total time2: %r' % t2.ellapsed) print('|') print('| num results1 = %r' % (list(map(len, results_list1)))) # print('| num results2 = %r' % (list(map(len, results_list2)))) #assert results_list2 == results_list1 return locals()
# gs2 = gridspec.GridSpec(nRows, nCols) pnum_ = df2.get_pnum_func(nRows, nCols) fig = df2.figure(fnum=fnum, pnum=pnum_(0)) fig.clf() for px, img in enumerate(img_list): title = 'test title' bbox_list = [dummy_bbox(img), dummy_bbox(img, (-0.25, -0.25), 0.1)] theta_list = [tau * 0.7, tau * 0.9] sel_list = [True, False] label_list = ['test label', 'lbl2'] viz_image2.show_image( img, bbox_list=bbox_list, title=title, sel_list=sel_list, label_list=label_list, theta_list=theta_list, fnum=fnum, pnum=pnum_(px), ) if __name__ == '__main__': TEST_IMAGES_URL = 'https://wildbookiarepository.azureedge.net/data/testdata.zip' test_image_dir = utool.grab_zipped_url(TEST_IMAGES_URL, appname='utool') imgpaths = utool.list_images(test_image_dir, fullpath=True) # test image paths _test_viz_image(imgpaths) exec(df2.present())