def test_learn_weights(self): dataset = Dataset('full_pascal_val') train_dataset = Dataset('full_pascal_train') dataset.images = dataset.images[:20] train_dataset.images = train_dataset.images[:20] dp = DatasetPolicy(dataset,train_dataset,self.weights_dataset_name,**self.config) weights = dp.learn_weights()
def test_learn_weights(self): dataset = Dataset('full_pascal_val') train_dataset = Dataset('full_pascal_train') dataset.images = dataset.images[:20] train_dataset.images = train_dataset.images[:20] dp = DatasetPolicy(dataset, train_dataset, self.weights_dataset_name, **self.config) weights = dp.learn_weights()
def main(): parser = argparse.ArgumentParser(description="Execute different functions of our system") parser.add_argument("--first_n", type=int, help="only take the first N images in the datasets") parser.add_argument( "--name", help="name for this run", default="default", choices=["default", "nolateral", "nohal", "halfsize"] ) parser.add_argument("--force", action="store_true", default=False, help="force overwrite") args = parser.parse_args() print (args) # configuration class class config(object): pass cfg = config() cfg.testname = "../ctfdet/data/finalRL/%s2_test" # object model cfg.bottomup = False # use complete search cfg.resize = 1.0 # resize the input image cfg.hallucinate = True # use HOGs up to 4 pixels cfg.initr = 1 # initial radious of the CtF search cfg.ratio = 1 # radious at the next levels cfg.deform = True # use deformation cfg.usemrf = True # use lateral constraints if args.name == "default": cfg # sticking with the default params elif args.name == "nolateral": cfg.usemrf = False elif args.name == "nohal": cfg.hallucinate = False elif args.name == "halfsize": cfg.resize = 0.5 # f**k it, do both test_datasets = ["val", "test", "train"] for test_dataset in test_datasets: # Load the dataset dataset = Dataset("full_pascal_" + test_dataset) if args.first_n: dataset.images = dataset.images[: args.first_n] # create directory for storing cached detections dirname = "./temp_data" if os.path.exists("/u/sergeyk"): dirname = "/u/vis/x1/sergeyk/object_detection" dirname = dirname + "/ctfdets/%s" % (args.name) ut.makedirs(dirname) num_images = len(dataset.images) for img_ind in range(comm_rank, num_images, comm_size): # check for existing det image = dataset.images[img_ind] filename = os.path.join(dirname, image.name + ".npy") if os.path.exists(filename) and not args.force: # table = np.load(filename)[()] continue # read the image imname = dataset.get_image_filename(img_ind) img = util2.myimread(imname, resize=cfg.resize) # compute the hog pyramid f = pyrHOG2.pyrHOG( img, interv=10, savedir="", notsave=True, notload=True, hallucinate=cfg.hallucinate, cformat=True ) # for each class all_dets = [] for ccls in dataset.classes: t = time.time() cls_ind = dataset.get_ind(ccls) print "%s Img %d/%d Class: %s" % (test_dataset, img_ind + 1, num_images, ccls) # load the class model m = util2.load("%s%d.model" % (cfg.testname % ccls, 7)) res = [] t1 = time.time() # for each aspect for clm, m in enumerate(m): # scan the image with left and right models res.append( pyrHOG2RL.detectflip( f, m, None, hallucinate=cfg.hallucinate, initr=cfg.initr, ratio=cfg.ratio, deform=cfg.deform, bottomup=cfg.bottomup, usemrf=cfg.usemrf, small=False, cl=clm, ) ) fuse = [] numhog = 0 # fuse the detections for mix in res: tr = mix[0] fuse += mix[1] numhog += mix[3] rfuse = tr.rank(fuse, maxnum=300) nfuse = tr.cluster(rfuse, ovr=0.3, inclusion=False) # print "Number of computed HOGs:",numhog time_elapsed = time.time() - t print "Elapsed time: %.3f s" % time_elapsed bboxes = [nf["bbox"] for nf in nfuse] scores = [nf["scr"] for nf in nfuse] assert len(bboxes) == len(scores) if len(bboxes) > 0: arr = np.zeros((len(bboxes), 7)) arr[:, :4] = BoundingBox.convert_arr_from_corners(np.array(bboxes)) arr[:, 4] = scores arr[:, 5] = time_elapsed arr[:, 6] = cls_ind all_dets.append(arr) cols = ["x", "y", "w", "h", "score", "time", "cls_ind"] if len(all_dets) > 0: all_dets = np.concatenate(all_dets, 0) else: all_dets = np.array([]) table = Table(all_dets, cols) np.save(filename, table)
def main(): parser = argparse.ArgumentParser(description="Run experiments with the timely detection system.") parser.add_argument( "--test_dataset", choices=["val", "test", "trainval"], default="val", help="""Dataset to use for testing. Run on val until final runs. The training dataset is inferred (val->train; test->trainval; trainval->trainval).""", ) parser.add_argument("--first_n", type=int, help="only take the first N images in the test dataset") parser.add_argument("--first_n_train", type=int, help="only take the first N images in the train dataset") parser.add_argument( "--config", help="""Config file name that specifies the experiments to run. Give name such that the file is configs/#{name}.json or configs/#{name}/ In the latter case, all files within the directory will be loaded.""", ) parser.add_argument("--suffix", help="Overwrites the suffix in the config(s).") parser.add_argument("--bounds10", action="store_true", default=False, help="set bounds to [0,10]") parser.add_argument("--bounds515", action="store_true", default=False, help="set bounds to [5,15]") parser.add_argument("--force", action="store_true", default=False, help="force overwrite") parser.add_argument("--wholeset_prs", action="store_true", default=False, help="evaluate in the final p-r regime") parser.add_argument( "--no_apvst", action="store_true", default=False, help="do NOT evaluate in the ap vs. time regime" ) parser.add_argument( "--det_configs", action="store_true", default=False, help="output detector statistics to det_configs" ) parser.add_argument("--inverse_prior", action="store_true", default=False, help="use inverse prior class values") args = parser.parse_args() print(args) # If config file is not given, just run one experiment using default config if not args.config: configs = [DatasetPolicy.default_config] else: configs = load_configs(args.config) # Load the dataset dataset = Dataset("full_pascal_" + args.test_dataset) if args.first_n: dataset.images = dataset.images[: args.first_n] # Infer train_dataset if args.test_dataset == "test": train_dataset = Dataset("full_pascal_trainval") elif args.test_dataset == "val": train_dataset = Dataset("full_pascal_train") elif args.test_dataset == "trainval": train_dataset = Dataset("full_pascal_trainval") else: None # impossible by argparse settings # Only need to set training dataset values; evaluation gets it from there if args.inverse_prior: train_dataset.set_values("inverse_prior") # TODO: hack if args.first_n_train: train_dataset.images = train_dataset.images[: args.first_n_train] # In both the above cases, we use the val dataset for weights weights_dataset_name = "full_pascal_val" dets_tables = [] dets_tables_whole = [] clses_tables_whole = [] all_bounds = [] plot_infos = [] for config_f in configs: if args.suffix: config_f["suffix"] = args.suffix if args.bounds10: config_f["bounds"] = [0, 10] if args.bounds515: config_f["bounds"] = [5, 15] assert not (args.bounds10 and args.bounds515) if args.inverse_prior: config_f["suffix"] += "_inverse_prior" config_f["values"] = "inverse_prior" dp = DatasetPolicy(dataset, train_dataset, weights_dataset_name, **config_f) ev = Evaluation(dp) all_bounds.append(dp.bounds) plot_infos.append(dict((k, config_f[k]) for k in ("label", "line", "color") if k in config_f)) # output the det configs first if args.det_configs: dp.output_det_statistics() # evaluate in the AP vs. Time regime, unless told not to if not args.no_apvst: dets_table = ev.evaluate_vs_t(None, None, force=args.force) # dets_table_whole,clses_table_whole = ev.evaluate_vs_t_whole(None,None,force=args.force) if comm_rank == 0: dets_tables.append(dets_table) # dets_tables_whole.append(dets_table_whole) # clses_tables_whole.append(clses_table_whole) # optionally, evaluate in the standard PR regime if args.wholeset_prs: ev.evaluate_detections_whole(None, force=args.force) # and plot the comparison if multiple config files were given if not args.no_apvst and len(configs) > 1 and comm_rank == 0: # filename of the final plot is the config file name dirname = config.get_evals_dir(dataset.get_name()) filename = args.config if args.inverse_prior: filename += "_inverse_prior" # det avg ff = opjoin(dirname, "%s_det_avg.png" % filename) ff_nl = opjoin(dirname, "%s_det_avg_nl.png" % filename) # make sure directory exists ut.makedirs(os.path.dirname(ff)) Evaluation.plot_ap_vs_t(dets_tables, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos) Evaluation.plot_ap_vs_t(dets_tables, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos) if False: # det whole ff = opjoin(dirname, "%s_det_whole.png" % filename) ff_nl = opjoin(dirname, "%s_det_whole_nl.png" % filename) Evaluation.plot_ap_vs_t( dets_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos ) Evaluation.plot_ap_vs_t( dets_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos ) # cls whole ff = opjoin(dirname, "%s_cls_whole.png" % filename) ff_nl = opjoin(dirname, "%s_cls_whole_nl.png" % filename) Evaluation.plot_ap_vs_t( clses_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos ) Evaluation.plot_ap_vs_t( clses_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos )
def main(): parser = argparse.ArgumentParser( description='Execute different functions of our system') parser.add_argument('--first_n', type=int, help='only take the first N images in the datasets') parser.add_argument('--name', help='name for this run', default='default', choices=['default', 'nolateral', 'nohal', 'halfsize']) parser.add_argument('--force', action='store_true', default=False, help='force overwrite') args = parser.parse_args() print(args) #configuration class class config(object): pass cfg = config() cfg.testname = "../ctfdet/data/finalRL/%s2_test" #object model cfg.bottomup = False #use complete search cfg.resize = 1.0 #resize the input image cfg.hallucinate = True #use HOGs up to 4 pixels cfg.initr = 1 #initial radious of the CtF search cfg.ratio = 1 #radious at the next levels cfg.deform = True #use deformation cfg.usemrf = True #use lateral constraints if args.name == 'default': cfg # sticking with the default params elif args.name == 'nolateral': cfg.usemrf = False elif args.name == 'nohal': cfg.hallucinate = False elif args.name == 'halfsize': cfg.resize = 0.5 # f**k it, do both test_datasets = ['val', 'test', 'train'] for test_dataset in test_datasets: # Load the dataset dataset = Dataset('full_pascal_' + test_dataset) if args.first_n: dataset.images = dataset.images[:args.first_n] # create directory for storing cached detections dirname = './temp_data' if os.path.exists('/u/sergeyk'): dirname = '/u/vis/x1/sergeyk/object_detection' dirname = dirname + '/ctfdets/%s' % (args.name) ut.makedirs(dirname) num_images = len(dataset.images) for img_ind in range(comm_rank, num_images, comm_size): # check for existing det image = dataset.images[img_ind] filename = os.path.join(dirname, image.name + '.npy') if os.path.exists(filename) and not args.force: #table = np.load(filename)[()] continue #read the image imname = dataset.get_image_filename(img_ind) img = util2.myimread(imname, resize=cfg.resize) #compute the hog pyramid f = pyrHOG2.pyrHOG(img, interv=10, savedir="", notsave=True, notload=True, hallucinate=cfg.hallucinate, cformat=True) #for each class all_dets = [] for ccls in dataset.classes: t = time.time() cls_ind = dataset.get_ind(ccls) print "%s Img %d/%d Class: %s" % (test_dataset, img_ind + 1, num_images, ccls) #load the class model m = util2.load("%s%d.model" % (cfg.testname % ccls, 7)) res = [] t1 = time.time() #for each aspect for clm, m in enumerate(m): #scan the image with left and right models res.append( pyrHOG2RL.detectflip(f, m, None, hallucinate=cfg.hallucinate, initr=cfg.initr, ratio=cfg.ratio, deform=cfg.deform, bottomup=cfg.bottomup, usemrf=cfg.usemrf, small=False, cl=clm)) fuse = [] numhog = 0 #fuse the detections for mix in res: tr = mix[0] fuse += mix[1] numhog += mix[3] rfuse = tr.rank(fuse, maxnum=300) nfuse = tr.cluster(rfuse, ovr=0.3, inclusion=False) #print "Number of computed HOGs:",numhog time_elapsed = time.time() - t print "Elapsed time: %.3f s" % time_elapsed bboxes = [nf['bbox'] for nf in nfuse] scores = [nf['scr'] for nf in nfuse] assert (len(bboxes) == len(scores)) if len(bboxes) > 0: arr = np.zeros((len(bboxes), 7)) arr[:, :4] = BoundingBox.convert_arr_from_corners( np.array(bboxes)) arr[:, 4] = scores arr[:, 5] = time_elapsed arr[:, 6] = cls_ind all_dets.append(arr) cols = ['x', 'y', 'w', 'h', 'score', 'time', 'cls_ind'] if len(all_dets) > 0: all_dets = np.concatenate(all_dets, 0) else: all_dets = np.array([]) table = Table(all_dets, cols) np.save(filename, table)
def main(): parser = argparse.ArgumentParser( description="Run experiments with the timely detection system.") parser.add_argument('--test_dataset', choices=['val','test','trainval'], default='val', help="""Dataset to use for testing. Run on val until final runs. The training dataset is inferred (val->train; test->trainval; trainval->trainval).""") parser.add_argument('--first_n', type=int, help='only take the first N images in the test dataset') parser.add_argument('--first_n_train', type=int, help='only take the first N images in the train dataset') parser.add_argument('--config', help="""Config file name that specifies the experiments to run. Give name such that the file is configs/#{name}.json or configs/#{name}/ In the latter case, all files within the directory will be loaded.""") parser.add_argument('--suffix', help="Overwrites the suffix in the config(s).") parser.add_argument('--bounds10', action='store_true', default=False, help='set bounds to [0,10]') parser.add_argument('--bounds515', action='store_true', default=False, help='set bounds to [5,15]') parser.add_argument('--force', action='store_true', default=False, help='force overwrite') parser.add_argument('--wholeset_prs', action='store_true', default=False, help='evaluate in the final p-r regime') parser.add_argument('--no_apvst', action='store_true', default=False, help='do NOT evaluate in the ap vs. time regime') parser.add_argument('--det_configs', action='store_true', default=False, help='output detector statistics to det_configs') parser.add_argument('--inverse_prior', action='store_true', default=False, help='use inverse prior class values') args = parser.parse_args() print(args) # If config file is not given, just run one experiment using default config if not args.config: configs = [DatasetPolicy.default_config] else: configs = load_configs(args.config) # Load the dataset dataset = Dataset('full_pascal_'+args.test_dataset) if args.first_n: dataset.images = dataset.images[:args.first_n] # Infer train_dataset if args.test_dataset=='test': train_dataset = Dataset('full_pascal_trainval') elif args.test_dataset=='val': train_dataset = Dataset('full_pascal_train') elif args.test_dataset=='trainval': train_dataset = Dataset('full_pascal_trainval') else: None # impossible by argparse settings # Only need to set training dataset values; evaluation gets it from there if args.inverse_prior: train_dataset.set_values('inverse_prior') # TODO: hack if args.first_n_train: train_dataset.images = train_dataset.images[:args.first_n_train] # In both the above cases, we use the val dataset for weights weights_dataset_name = 'full_pascal_val' dets_tables = [] dets_tables_whole = [] clses_tables_whole = [] all_bounds = [] plot_infos = [] for config_f in configs: if args.suffix: config_f['suffix'] = args.suffix if args.bounds10: config_f['bounds'] = [0,10] if args.bounds515: config_f['bounds'] = [5,15] assert(not (args.bounds10 and args.bounds515)) if args.inverse_prior: config_f['suffix'] += '_inverse_prior' config_f['values'] = 'inverse_prior' dp = DatasetPolicy(dataset, train_dataset, weights_dataset_name, **config_f) ev = Evaluation(dp) all_bounds.append(dp.bounds) plot_infos.append(dict((k,config_f[k]) for k in ('label','line','color') if k in config_f)) # output the det configs first if args.det_configs: dp.output_det_statistics() # evaluate in the AP vs. Time regime, unless told not to if not args.no_apvst: dets_table = ev.evaluate_vs_t(None,None,force=args.force) #dets_table_whole,clses_table_whole = ev.evaluate_vs_t_whole(None,None,force=args.force) if comm_rank==0: dets_tables.append(dets_table) #dets_tables_whole.append(dets_table_whole) #clses_tables_whole.append(clses_table_whole) # optionally, evaluate in the standard PR regime if args.wholeset_prs: ev.evaluate_detections_whole(None,force=args.force) # and plot the comparison if multiple config files were given if not args.no_apvst and len(configs)>1 and comm_rank==0: # filename of the final plot is the config file name dirname = config.get_evals_dir(dataset.get_name()) filename = args.config if args.inverse_prior: filename += '_inverse_prior' # det avg ff = opjoin(dirname, '%s_det_avg.png'%filename) ff_nl = opjoin(dirname, '%s_det_avg_nl.png'%filename) # make sure directory exists ut.makedirs(os.path.dirname(ff)) Evaluation.plot_ap_vs_t(dets_tables, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos) Evaluation.plot_ap_vs_t(dets_tables, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos) if False: # det whole ff = opjoin(dirname, '%s_det_whole.png'%filename) ff_nl = opjoin(dirname, '%s_det_whole_nl.png'%filename) Evaluation.plot_ap_vs_t(dets_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos) Evaluation.plot_ap_vs_t(dets_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos) # cls whole ff = opjoin(dirname, '%s_cls_whole.png'%filename) ff_nl = opjoin(dirname, '%s_cls_whole_nl.png'%filename) Evaluation.plot_ap_vs_t(clses_tables_whole, ff, all_bounds, with_legend=True, force=True, plot_infos=plot_infos) Evaluation.plot_ap_vs_t(clses_tables_whole, ff_nl, all_bounds, with_legend=False, force=True, plot_infos=plot_infos)
def main(): parser = argparse.ArgumentParser(description='Execute different functions of our system') parser.add_argument('mode', choices=[ 'window_stats', 'evaluate_metaparams', 'evaluate_jw', 'evaluate_get_pos_windows', 'train_svm', 'extract_sift','extract_assignments','extract_codebook', 'evaluate_jw_grid', 'final_metaparams', 'assemble_dpm_dets','ctfdet','assemble_ctf_dets' ]) parser.add_argument('--test_dataset', choices=['val','test','train'], default='test', help='dataset to use for testing. the training dataset \ is automatically inferred (val->train and test->trainval).') parser.add_argument('--first_n', type=int, help='only take the first N images in the datasets') parser.add_argument('--bounds', type=str, help='the start_time and deadline_time for the ImagePolicy and corresponding evaluation. ex: (1,5)') parser.add_argument('--name', help='name for this run') parser.add_argument('--priors', default='random', help= \ "list of choice for the policy for selecting the next action. choose from random, oracle,fixed_order, no_smooth, backoff. ex: --priors=random,oracle,no_smooth") parser.add_argument('--compare_evals', action='store_true', default=False, help='plot all the priors modes given on same plot'), parser.add_argument('--detector', choices=['perfect','perfect_with_noise', 'dpm','ctf'], default='perfect', help='detector type') parser.add_argument('--force', action='store_true', default=False, help='force overwrite') parser.add_argument('--gist', action='store_true', default=False, help='use GIST as one of the actions') parser.add_argument('--clear_tmp', action='store_true', default=False, help='clear the cached windows folder before running'), parser.add_argument('--feature_type', choices=['sift','dsift'], default='dsift', help='use this feature type'), parser.add_argument('--kernel', choices=['chi2','rbf'], default='chi2', help='kernel to train svm on'), args = parser.parse_args() if args.priors: args.priors = args.priors.split(',') if args.bounds: args.bounds = [float(x) for x in re.findall(r'\d+', args.bounds)] assert(len(args.bounds)==2) print(args) # Load the dataset dataset = Dataset('full_pascal_'+args.test_dataset) if args.first_n: dataset.images = dataset.images[:args.first_n] # Infer train_dataset if args.test_dataset=='test': train_dataset = Dataset('full_pascal_trainval') elif args.test_dataset=='val': train_dataset = Dataset('full_pascal_train') else: print("Impossible, setting train_dataset to dataset") train_dataset = dataset # Create window generator sw = SlidingWindows(dataset,train_dataset) if args.clear_tmp: dirname = config.get_sliding_windows_cached_dir(train_dataset.get_name()) shutil.rmtree(dirname) dirname = config.get_sliding_windows_cached_dir(dataset.get_name()) shutil.rmtree(dirname) if args.mode=='assemble_dpm_dets': policy = DatasetPolicy(dataset,train_dataset,sw) dets = policy.load_ext_detections(dataset,suffix='dpm_may25') if args.mode=='assemble_ctf_dets': policy = DatasetPolicy(dataset,train_dataset,sw) dets = policy.load_ext_detections(dataset,'ctf','ctf_default') dets = policy.load_ext_detections(dataset,'ctf','ctf_nohal') dets = policy.load_ext_detections(dataset,'ctf', 'ctf_halfsize') if args.mode=='evaluate_get_pos_windows': evaluate_get_pos_windows(train_dataset) return if args.mode=='window_stats': "Compute and plot the statistics of ground truth window parameters." results = SlidingWindows.get_dataset_window_stats(train_dataset,plot=True) if args.mode=='ctfdet': """Run Pedersoli's detector on the dataset and assemble into one Table.""" run_pedersoli(dataset) if args.mode=='evaluate_jw': """ Evaluate the jumping window approach by producing plots of recall vs. #windows. """ # TODO hack: both sw and jw should subclass something like WindowGenerator jw = JumpingWindowsDetector(use_scale=True) sw.jw = jw #classes = dataset.classes classes = ['car'] # classes = ['bicycle' ,'car','horse', 'sofa',\ # 'bird', 'chair', 'motorbike', 'train',\ # 'boat', 'cow', 'person', 'tvmonitor',\ # 'bottle','diningtable', 'pottedplant',\ # 'bus','dog' ,'sheep'] for cls_idx in range(comm_rank, len(classes), comm_size): #for cls in dataset.classes: cls = classes[cls_idx] dirname = config.get_jumping_windows_dir(dataset.get_name()) filename = os.path.join(dirname,'%s'%cls) sw.evaluate_recall(cls, filename, metaparams=None, mode='jw', plot=True) if args.mode=='evaluate_jw_grid': """ Evaluate the jumping window approach by producing plots of recall vs. #windows. """ sw = SlidingWindows(dataset,train_dataset) jw = JumpingWindowsDetectorGrid() sw.jw = jw for cls in dataset.classes: dirname = config.get_jumping_windows_dir(dataset.get_name()) filename = os.path.join(dirname,'%s'%cls) if os.path.isfile(config.data_dir + 'JumpingWindows/'+cls): sw.evaluate_recall(cls, filename, metaparams=None, mode='jw', plot=True) if args.mode=='train_svm': randomize = not os.path.exists('/home/tobibaum') d = Dataset('full_pascal_train') dtest = Dataset('full_pascal_val') e = Extractor() classes = config.pascal_classes num_words = 3000 iters = 5 feature_type = 'dsift' codebook_samples = 15 num_pos = 'max' testsize = 'max' if args.first_n: num_pos = args.first_n testsize = 1.5*num_pos kernel = args.kernel if comm_rank == 0: ut.makedirs(config.data_dir + 'features/' + feature_type + '/times/') ut.makedirs(config.data_dir + 'features/' + feature_type + '/codebooks/times/') ut.makedirs(config.data_dir + 'features/' + feature_type + '/svms/train_times/') for cls_idx in range(comm_rank, len(classes), comm_size): #for cls in classes: cls = classes[cls_idx] codebook = e.get_codebook(d, feature_type) pos_arr = d.get_pos_windows(cls) neg_arr = d.get_neg_windows(pos_arr.shape[0], cls, max_overlap=0) if not num_pos == 'max': if not randomize: pos_arr = pos_arr[:num_pos] neg_arr = pos_arr[:num_pos] else: rand = np.random.random_integers(0, pos_arr.shape[0] - 1, size=num_pos) pos_arr = pos_arr[rand] rand = np.random.random_integers(0, neg_arr.shape[0] - 1, size=num_pos) neg_arr = neg_arr[rand] pos_table = Table(pos_arr, ['x','y','w','h','img_ind']) neg_table = Table(neg_arr, pos_table.cols) train_with_hard_negatives(d, dtest, num_words,codebook_samples,codebook,\ cls, pos_table, neg_table,feature_type, \ iterations=iters, kernel=kernel, L=2, \ testsize=testsize,randomize=randomize) if args.mode=='evaluate_metaparams': """ Grid search over metaparams values for get_windows_new, with the AUC of recall vs. # windows evaluation. """ sw.grid_search_over_metaparams() return if args.mode=='final_metaparams': dirname = config.get_sliding_windows_metaparams_dir(train_dataset.get_name()) # currently these are the best auc/complexity params best_params_for_classes = [ (62,15,12,'importance',0), #aeroplane (83,15,12,'importance',0), #bicycle (62,15,12,'importance',0), #bird (62,15,12,'importance',0), #boat (125,12,12,'importance',0), #bottle (83,12,9,'importance',0), #bus (125,15,9,'importance',0), #car (125,12,12,'linear',0), #cat (125,15,9,'importance',0), #chair (125,9,6,'importance',0), #cow (125,15,6,'linear',0), #diningtable (62,15,12,'importance',0), #dog (83,15,6,'importance',0), #horse (83,12,6,'importance',0), #motorbike (83,15,12,'importance',0), #person (83,15,6,'importance',0), #pottedplant (83,15,12,'importance',0), #sheep (83,9,6,'importance',0), #sofa (62,12,6,'importance',0), #train (62,12,12,'importance',0), #tvmonitor (125,9,12,'importance',0) #all ] # ACTUALLY THEY ARE ALL THE SAME! cheap_params = (62, 9, 6, 'importance', 0) for i in range(comm_rank,dataset.num_classes(),comm_size): cls = dataset.classes[i] best_params = best_params_for_classes[i] #samples,num_scales,num_ratios,mode,priority,cls = cheap_params metaparams = { 'samples_per_500px': samples, 'num_scales': num_scales, 'num_ratios': num_ratios, 'mode': mode, 'priority': 0 } filename = '%s_%d_%d_%d_%s_%d'%( cls, metaparams['samples_per_500px'], metaparams['num_scales'], metaparams['num_ratios'], metaparams['mode'], metaparams['priority']) filename = os.path.join(dirname,filename) tables = sw.evaluate_recall(cls,filename,metaparams,'sw',plot=True,force=False) metaparams = { 'samples_per_500px': samples, 'num_scales': num_scales, 'num_ratios': num_ratios, 'mode': mode, 'priority': 1 } filename = '%s_%d_%d_%d_%s_%d'%( cls, metaparams['samples_per_500px'], metaparams['num_scales'], metaparams['num_ratios'], metaparams['mode'], metaparams['priority']) filename = os.path.join(dirname,filename) tables = sw.evaluate_recall(cls,filename,metaparams,'sw',plot=True,force=False) return if args.mode=='extract_sift': e=Extractor() e.extract_all(['sift'], ['full_pascal_trainval','full_pascal_test'], 0, 0) if args.mode=='extract_assignments': e=Extractor() feature_type = 'sift' for image_set in ['full_pascal_trainval','full_pascal_test']: d = Dataset(image_set) codebook = e.get_codebook(d, feature_type) print 'codebook loaded' for img_ind in range(comm_rank,len(d.images),comm_size): img = d.images[img_ind] #for img in d.images: e.get_assignments(np.array([0,0,img.size[0],img.size[1]]), feature_type, \ codebook, img) if args.mode=='extract_codebook': d = Dataset('full_pascal_trainval') e = Extractor() codebook = e.get_codebook(d, args.feature_type)