예제 #1
0
def get_classifier_crossval(cls=None):
  dirname = join(res_dir, 'classify_scores')
  makedirs(dirname) 
  if cls == None:
    return join(dirname, 'crossval.txt')
  else:
    return join(dirname, 'crossval_%s.txt'%cls)
예제 #2
0
파일: config.py 프로젝트: raldam/timely
def get_classifier_crossval(cls=None):
    dirname = join(res_dir, 'classify_scores')
    makedirs(dirname)
    if cls == None:
        return join(dirname, 'crossval.txt')
    else:
        return join(dirname, 'crossval_%s.txt' % cls)
예제 #3
0
def get_classifier_svm_name(cls, C, gamma, current_fold):
  dirname = join(res_dir, 'classify_svm')
  makedirs(dirname) 
  if current_fold == -1: 
    filename = join(dirname, '%s_%f_%f'%(cls, C, gamma))
  else:
    filename = join(dirname, '%s_%f_%f_%d'%(cls, C, gamma, current_fold)) 
  return filename
예제 #4
0
파일: config.py 프로젝트: raldam/timely
def get_classifier_svm_name(cls, C, gamma, current_fold):
    dirname = join(res_dir, 'classify_svm')
    makedirs(dirname)
    if current_fold == -1:
        filename = join(dirname, '%s_%f_%f' % (cls, C, gamma))
    else:
        filename = join(dirname, '%s_%f_%f_%d' % (cls, C, gamma, current_fold))
    return filename
예제 #5
0
def get_gist_fastinf_table_name(dataset, cls):
  savedir = makedirs(join(get_gist_svm_dirname(dataset), 'classif'))
  if cls == None:
    savefile = join(savedir,'cls_gt')
  else:
    savefile = join(savedir,'cls_gt_%s'%cls)
  return savefile
예제 #6
0
파일: config.py 프로젝트: raldam/timely
def get_gist_fastinf_table_name(dataset, cls):
    savedir = makedirs(join(get_gist_svm_dirname(dataset), 'classif'))
    if cls == None:
        savefile = join(savedir, 'cls_gt')
    else:
        savefile = join(savedir, 'cls_gt_%s' % cls)
    return savefile
예제 #7
0
def get_ext_dets_filename(dataset, suffix):
  if dataset == None:
    # TODO:
    """
    Just load eeeevery image 
    """
    dataset_name = 'full_pascal_full'
  else:
    dataset_name = dataset.name # NOTE does not depend on # images
  dirname = makedirs(join(res_dir,'ext_dets'))  
  return join(dirname, '%s_%s.npy'%(dataset_name,suffix))
예제 #8
0
파일: config.py 프로젝트: raldam/timely
def get_ext_dets_filename(dataset, suffix):
    if dataset == None:
        # TODO:
        """
    Just load eeeevery image 
    """
        dataset_name = 'full_pascal_full'
    else:
        dataset_name = dataset.name  # NOTE does not depend on # images
    dirname = makedirs(join(res_dir, 'ext_dets'))
    return join(dirname, '%s_%s.npy' % (dataset_name, suffix))
예제 #9
0
파일: config.py 프로젝트: raldam/timely
def get_codebook_path(feature):
    dirname = join(data_dir, feature, 'codebooks')
    makedirs(dirname)
    return join(dirname, 'codebook')
예제 #10
0
파일: config.py 프로젝트: raldam/timely
def get_ext_dets_foldname(dataset):
    dirname = makedirs(join(res_dir, 'ext_dets', dataset.name))
    return dirname
예제 #11
0
def get_gist_svm_dirname(dataset):
  return makedirs(join(res_dir, 'gist_svm', dataset.name))
예제 #12
0
파일: config.py 프로젝트: raldam/timely
def get_evals_dp_dir(dp, train=False):
    dirname = get_evals_dir(dp.dataset.get_name())
    if train:
        dirname = get_evals_dir(dp.train_dataset.get_name())
    return makedirs(join(dirname, dp.get_config_name()))
예제 #13
0
파일: config.py 프로젝트: raldam/timely
def get_dp_weights_filename(dp):
    dirname = get_evals_dir(dp.weights_dataset_name)
    dirname = makedirs(join(dirname, dp.get_config_name()))
    return join(dirname, 'weights.txt')
예제 #14
0
'''
Created on Nov 17, 2011

@author: Tobias Baumgartner
'''

from common_imports import *
from common_mpi import *

import scipy.io
import synthetic.util as ut

folder = config.VOC_dir + 'JPEGImages/gist/'
gist_save = os.path.join(config.res_dir,'gist_features/')
ut.makedirs(gist_save)
files = os.listdir(folder)

mat = {}
for file_idx in range(len(files)):
  infile = files[file_idx]
  mat[infile[:-4]] = np.transpose(scipy.io.loadmat(os.path.join(folder,infile))['g'])

cPickle.dump(mat, open(os.path.join(gist_save, 'features'),'w'))
예제 #15
0
'''
Created on Nov 17, 2011

@author: Tobias Baumgartner
'''

from common_imports import *
from common_mpi import *

import scipy.io
import synthetic.util as ut

folder = config.VOC_dir + 'JPEGImages/gist/'
gist_save = os.path.join(config.res_dir, 'gist_features/')
ut.makedirs(gist_save)
files = os.listdir(folder)

mat = {}
for file_idx in range(len(files)):
    infile = files[file_idx]
    mat[infile[:-4]] = np.transpose(
        scipy.io.loadmat(os.path.join(folder, infile))['g'])

cPickle.dump(mat, open(os.path.join(gist_save, 'features'), 'w'))
예제 #16
0
def get_classifier_score_name(img):
  dirname = join(res_dir, 'classify_scores')
  makedirs(dirname) 
  return join(dirname, img.name[:-4])
예제 #17
0
def get_dp_weights_filename(dp):
  dirname = get_evals_dir(dp.weights_dataset_name)
  dirname = makedirs(join(dirname, dp.get_config_name()))
  return join(dirname,'weights.txt')
예제 #18
0
def get_classifier_dataset_dirname(classifier, dataset):
  return makedirs(join(get_classifier_dirname(classifier), dataset.name))
예제 #19
0
def get_classifier_featvect_name(img):
  dirname = join(res_dir, 'classify_featvects')
  makedirs(dirname) 
  return join(dirname, img.name[:-4])
예제 #20
0
def get_classifier_dirname(classifier):
  name = classifier.name+'_svm'
  if len(classifier.suffix) >= 1:
    name += '_'+classifier.suffix
  return makedirs(join(res_dir,name))
예제 #21
0
def get_cached_dataset_filename(name):
  assert(name in pascal_paths)
  dirname = makedirs(join(res_dir,'cached_datasets'))
  return join(dirname, str(VOCyear)+'_'+name+'.pickle')
예제 #22
0
def get_ext_dets_foldname(dataset):
  dirname = makedirs(join(res_dir,'ext_dets',dataset.name))
  return dirname
예제 #23
0
파일: config.py 프로젝트: raldam/timely
def get_fastinf_data_file(dataset, suffix):
    dirname = makedirs(join(fastinf_dir, dataset.name, suffix))
    return join(dirname, 'data.txt')
예제 #24
0
def get_assignments_path(feature, image):
  dirname = join(data_dir, feature, 'assignments/')
  makedirs(dirname)
  return join(dirname, image.name[0:-4])
예제 #25
0
파일: config.py 프로젝트: raldam/timely
def get_fastinf_cache_file(dataset, suffix, m='0', r2='', s=0):
    dirname = makedirs(join(fastinf_dir, dataset.name, suffix))
    filename = append_settings('cache.pickle', m, r2)
    return join(dirname, filename + '_Is_' + str(s))
예제 #26
0
파일: config.py 프로젝트: raldam/timely
def get_ext_test_support_dir():
    return makedirs(join(test_support_dir, 'ext_cls_test'))
예제 #27
0
def train_with_hard_negatives(d, dtest,cbwords, cbsamps, codebook, cls, pos_table, neg_table,feature_type,\
                               iterations, kernel='chi2', L=2, \
                               testsize = 'max',randomize=False): 
  """ 
    An iterative training with hard negatives
    -input: 
      d - training Dataset
      dtest - test Dataset
      codebook - dsift codebook for pyramid (recommended size 3000)
      cls - the class to be trained
      pos_table - Table with cols [x,y,w,h,img_ind]
      neg_table - same
      kernel - 'linear', 'rbf', 'chi2'
      iterations - number of rounds of training
      L - levels for pyramids    
      testsize - size of initial test 
  """
  # Featurize and pyramidize the input
  e = Extractor()
  M = codebook.shape[0]
  pyr_size = M*1./3.*(4**(L+1)-1)

  print 'get pos train pyrs'
  pos_pyrs = get_pyr(d,e,pos_table,pyr_size,L,codebook,feature_type,cls)
  print 'get neg train pyrs'
  neg_pyrs = get_pyr(d,e,neg_table,pyr_size,L,codebook,feature_type,cls)
    
  print 'built all positive pyramids'
  
  classification = np.asarray([1]*pos_table.arr.shape[0] + [-1]*neg_table.arr.shape[0])
  
  filename = config.data_dir + 'features/' + feature_type + '/svms/' + kernel + \
        '/'+ cls
  ut.makedirs(config.data_dir + 'features/' + feature_type + '/svms/' + kernel)
  
    
  # with that we now determined our whole dataset D  
  #D = np.concatenate((pos_pyrs, neg_pyrs, pos_test_pyr, neg_test_pyr))
  #Y = np.concatenate((classification, test_classification))
  #idx_train = np.arange(pos_pyrs.shape[0] + neg_pyrs.shape[0])
  #idx_test = np.arange(pos_test_pyr.shape[0] + neg_test_pyr.shape[0]) + idx_train.size
  train_pyrs = np.concatenate((pos_pyrs,neg_pyrs))  
  for i in range(iterations):
    # train new model - according to hard mining algorithm by Felszenswalb et al.
    # "Object Detection with Discriminatively Trained Part Based Modles"
    
    [test_pyrs, test_classification] = get_test_windows(testsize,dtest,e,\
                                          pyr_size,L,codebook,feature_type,cls,\
                                          pos_table.cols, randomize=randomize)
    print 'node',comm_rank,'training in round', i, 'with', np.sum(classification==1),'pos and',\
      np.sum(classification == -1),'negs'
    print 'testing', test_pyrs.shape[0], 'new samples'
    print time.strftime('%m-%d %H:%M')
    
    # get new test samples
    
    
    # ----------1-------------
    model = train_svm(train_pyrs, classification, kernel)
  
    testY = svm_predict(np.concatenate((train_pyrs,test_pyrs)), model)
    result = testY
    print result
    
    res_train = testY[:train_pyrs.shape[0]]
    res_test = testY[train_pyrs.shape[0]:]
          
    # ----------3-------------        
    # remove easy samples from train-set
    idx_tr_list = []
    for s_ind in range(res_train.shape[0]):
      if res_train[s_ind]*classification[s_ind] <= 1:
        idx_tr_list.append(s_ind)
    indices = np.matrix(idx_tr_list).reshape(1,len(idx_tr_list))
    indices = np.array(indices.astype(Int))[0]        
    train_pyrs = train_pyrs[indices]
    classification = classification[indices]
    
    # ----------4-------------
    idx_hn_list = []
    new_hards = False
    for s_ind in range(res_test.shape[0]):
      if res_test[s_ind]*test_classification[s_ind] < 1:
        new_hards = True
        idx_hn_list.append(s_ind)    

    nu_train_idc = np.matrix(idx_hn_list).reshape(1,len(idx_hn_list))
    nu_train_idc = np.array(nu_train_idc.astype(Int))[0]      
    train_pyrs = np.vstack((train_pyrs, test_pyrs[nu_train_idc]))
    classification = np.concatenate((classification, test_classification[nu_train_idc]))
        
    test_result = result[-test_pyrs.shape[0]:]
    fp = np.sum(np.multiply(test_result < 0, np.transpose(np.matrix(test_classification == 1))))
    tp = np.sum(np.multiply(test_result > 0, np.transpose(np.matrix(test_classification == 1))))
    fn = np.sum(np.multiply(test_result > 0, np.transpose(np.matrix(test_classification == -1))))
    
    # save these to the training file
    prec = tp/float(tp+fp)
    rec = tp/float(tp+fn)
    print 'tp, fp:',tp,fp
    print 'prec, rec:', prec,rec
    with open(filename + '_train', "a") as myfile:
      myfile.write(str(prec) + ' ' + str(rec)+'\n')    
  
    # ----------2-------------
    if not new_hards:
      # no new_hards from test set,we want to quit.
      break
예제 #28
0
def get_dp_features_images_dirname(dp):
  dirname = get_evals_dir(dp.weights_dataset_name)
  dirname = makedirs(join(dirname, dp.get_config_name()))
  return makedirs(join(dirname,'features_images'))
예제 #29
0
def get_evals_dir(dataset_name):
  return makedirs(join(evals_dir,dataset_name))
예제 #30
0
파일: config.py 프로젝트: raldam/timely
def get_classifier_dirname(classifier):
    name = classifier.name + '_svm'
    if len(classifier.suffix) >= 1:
        name += '_' + classifier.suffix
    return makedirs(join(res_dir, name))
예제 #31
0
파일: config.py 프로젝트: raldam/timely
def get_dp_features_images_dirname(dp):
    dirname = get_evals_dir(dp.weights_dataset_name)
    dirname = makedirs(join(dirname, dp.get_config_name()))
    return makedirs(join(dirname, 'features_images'))
예제 #32
0
파일: config.py 프로젝트: raldam/timely
def get_classifier_featvect_name(img):
    dirname = join(res_dir, 'classify_featvects')
    makedirs(dirname)
    return join(dirname, img.name[:-4])
예제 #33
0
파일: config.py 프로젝트: raldam/timely
def get_cached_dataset_filename(name):
    assert (name in pascal_paths)
    dirname = makedirs(join(res_dir, 'cached_datasets'))
    return join(dirname, str(VOCyear) + '_' + name + '.pickle')
예제 #34
0
def get_evals_dp_dir(dp,train=False):
  dirname = get_evals_dir(dp.dataset.get_name())
  if train:
    dirname = get_evals_dir(dp.train_dataset.get_name())
  return makedirs(join(dirname, dp.get_config_name()))
예제 #35
0
def get_codebook_path(feature):
  dirname = join(data_dir, feature, 'codebooks')
  makedirs(dirname)
  return join(dirname, 'codebook')
예제 #36
0
def get_ext_test_support_dir():
  return makedirs(join(test_support_dir, 'ext_cls_test'))
예제 #37
0
파일: config.py 프로젝트: raldam/timely
def get_gist_svm_dirname(dataset):
    return makedirs(join(res_dir, 'gist_svm', dataset.name))
예제 #38
0
def get_mrf_bound_filename(dataset, suffix):
  dirname = makedirs(join(fastinf_dir, dataset.name, suffix))
  return join(dirname, 'bounds.txt')
예제 #39
0
def get_fastinf_data_file(dataset, suffix):
  dirname = makedirs(join(fastinf_dir, dataset.name, suffix))
  return join(dirname, 'data.txt')
예제 #40
0
def get_fastinf_res_file(dataset, suffix, m='0', r2=''):
  dirname = makedirs(join(fastinf_dir, dataset.name, suffix))
  return join(dirname, append_settings('res.txt',m,r2))
예제 #41
0
파일: config.py 프로젝트: raldam/timely
def get_classifier_dataset_dirname(classifier, dataset):
    return makedirs(join(get_classifier_dirname(classifier), dataset.name))
예제 #42
0
def get_fastinf_cache_file(dataset,suffix, m='0', r2='', s=0):
  dirname = makedirs(join(fastinf_dir, dataset.name, suffix))
  filename = append_settings('cache.pickle',m,r2)
  return join(dirname, filename+'_Is_'+str(s))
예제 #43
0
파일: config.py 프로젝트: raldam/timely
def get_classifier_score_name(img):
    dirname = join(res_dir, 'classify_scores')
    makedirs(dirname)
    return join(dirname, img.name[:-4])
예제 #44
0
def get_dataset_stats_dir(dataset):
  return makedirs(join(res_dir,'dataset_stats',dataset.name))
예제 #45
0
파일: config.py 프로젝트: raldam/timely
def get_assignments_path(feature, image):
    dirname = join(data_dir, feature, 'assignments/')
    makedirs(dirname)
    return join(dirname, image.name[0:-4])
예제 #46
0
def get_dets_configs_dir(dataset):
  return makedirs(join(res_dir,'det_configs',dataset.name))
예제 #47
0
파일: config.py 프로젝트: raldam/timely
def get_mrf_bound_filename(dataset, suffix):
    dirname = makedirs(join(fastinf_dir, dataset.name, suffix))
    return join(dirname, 'bounds.txt')
예제 #48
0
def get_sliding_windows_dir(dataset_name):
  return makedirs(join(res_dir, 'sliding_windows_%s'%dataset_name))
예제 #49
0
파일: config.py 프로젝트: raldam/timely
def get_fastinf_res_file(dataset, suffix, m='0', r2=''):
    dirname = makedirs(join(fastinf_dir, dataset.name, suffix))
    return join(dirname, append_settings('res.txt', m, r2))
예제 #50
0
파일: config.py 프로젝트: raldam/timely
def get_sliding_windows_cached_dir(dataset_name):
    sliding_windows_dir = join(temp_res_dir,
                               'sliding_windows_%s' % dataset_name)
    return makedirs(join(sliding_windows_dir, 'cached'))
예제 #51
0
파일: config.py 프로젝트: raldam/timely
def get_dataset_stats_dir(dataset):
    return makedirs(join(res_dir, 'dataset_stats', dataset.name))
예제 #52
0
파일: config.py 프로젝트: raldam/timely
def get_sliding_windows_params_dir(dataset_name):
    return makedirs(join(get_sliding_windows_dir(dataset_name), 'params'))
예제 #53
0
  codebook_samples = 15
  num_pos = 'max'
  testsize = 'max' 
  kernel = 'chi2'
  
#  num_pos = 3
#  testsize = 4
  
  # For my local testings
  classes = ['dog']
  #classes = ['bicycle','bird','boat','bottle','bus','car','cat']
#  testsize = 1
#  num_pos = 1

  if comm_rank == 0:
    ut.makedirs(config.data_dir + 'features/' + feature_type + '/times/')
    ut.makedirs(config.data_dir + 'features/' + feature_type + '/codebooks/times/')
    ut.makedirs(config.data_dir + 'features/' + feature_type + '/svms/train_times/')
    
  for cls_idx in range(comm_rank, len(classes), comm_size): 
  #for cls in classes:
    cls = classes[cls_idx]
    codebook = e.get_codebook(d, feature_type)
    pos_arr = d.get_pos_windows(cls)[10:]
    
    neg_arr = d.get_neg_windows(pos_arr.shape[0], cls, max_overlap=0)
    
    if not num_pos == 'max':    
      if not randomize:
        pos_arr = pos_arr[:num_pos]
        neg_arr = pos_arr[:num_pos]
예제 #54
0
파일: config.py 프로젝트: raldam/timely
def get_jumping_windows_dir(dataset_name):
    return makedirs(join(res_dir, 'jumping_windows_%s' % dataset_name))
예제 #55
0
def main():
    parser = argparse.ArgumentParser(
        description='Execute different functions of our system')
    parser.add_argument('--first_n',
                        type=int,
                        help='only take the first N images in the datasets')
    parser.add_argument('--name',
                        help='name for this run',
                        default='default',
                        choices=['default', 'nolateral', 'nohal', 'halfsize'])
    parser.add_argument('--force',
                        action='store_true',
                        default=False,
                        help='force overwrite')

    args = parser.parse_args()
    print(args)

    #configuration class
    class config(object):
        pass

    cfg = config()
    cfg.testname = "../ctfdet/data/finalRL/%s2_test"  #object model
    cfg.bottomup = False  #use complete search
    cfg.resize = 1.0  #resize the input image
    cfg.hallucinate = True  #use HOGs up to 4 pixels
    cfg.initr = 1  #initial radious of the CtF search
    cfg.ratio = 1  #radious at the next levels
    cfg.deform = True  #use deformation
    cfg.usemrf = True  #use lateral constraints

    if args.name == 'default':
        cfg
        # sticking with the default params
    elif args.name == 'nolateral':
        cfg.usemrf = False
    elif args.name == 'nohal':
        cfg.hallucinate = False
    elif args.name == 'halfsize':
        cfg.resize = 0.5

    # f**k it, do both
    test_datasets = ['val', 'test', 'train']
    for test_dataset in test_datasets:
        # Load the dataset
        dataset = Dataset('full_pascal_' + test_dataset)
        if args.first_n:
            dataset.images = dataset.images[:args.first_n]

        # create directory for storing cached detections
        dirname = './temp_data'
        if os.path.exists('/u/sergeyk'):
            dirname = '/u/vis/x1/sergeyk/object_detection'
        dirname = dirname + '/ctfdets/%s' % (args.name)
        ut.makedirs(dirname)

        num_images = len(dataset.images)
        for img_ind in range(comm_rank, num_images, comm_size):
            # check for existing det
            image = dataset.images[img_ind]
            filename = os.path.join(dirname, image.name + '.npy')
            if os.path.exists(filename) and not args.force:
                #table = np.load(filename)[()]
                continue

            #read the image
            imname = dataset.get_image_filename(img_ind)
            img = util2.myimread(imname, resize=cfg.resize)
            #compute the hog pyramid
            f = pyrHOG2.pyrHOG(img,
                               interv=10,
                               savedir="",
                               notsave=True,
                               notload=True,
                               hallucinate=cfg.hallucinate,
                               cformat=True)

            #for each class
            all_dets = []
            for ccls in dataset.classes:
                t = time.time()
                cls_ind = dataset.get_ind(ccls)
                print "%s Img %d/%d Class: %s" % (test_dataset, img_ind + 1,
                                                  num_images, ccls)
                #load the class model
                m = util2.load("%s%d.model" % (cfg.testname % ccls, 7))
                res = []
                t1 = time.time()
                #for each aspect
                for clm, m in enumerate(m):
                    #scan the image with left and right models
                    res.append(
                        pyrHOG2RL.detectflip(f,
                                             m,
                                             None,
                                             hallucinate=cfg.hallucinate,
                                             initr=cfg.initr,
                                             ratio=cfg.ratio,
                                             deform=cfg.deform,
                                             bottomup=cfg.bottomup,
                                             usemrf=cfg.usemrf,
                                             small=False,
                                             cl=clm))
                fuse = []
                numhog = 0
                #fuse the detections
                for mix in res:
                    tr = mix[0]
                    fuse += mix[1]
                    numhog += mix[3]
                rfuse = tr.rank(fuse, maxnum=300)
                nfuse = tr.cluster(rfuse, ovr=0.3, inclusion=False)
                #print "Number of computed HOGs:",numhog
                time_elapsed = time.time() - t
                print "Elapsed time: %.3f s" % time_elapsed

                bboxes = [nf['bbox'] for nf in nfuse]
                scores = [nf['scr'] for nf in nfuse]
                assert (len(bboxes) == len(scores))
                if len(bboxes) > 0:
                    arr = np.zeros((len(bboxes), 7))
                    arr[:, :4] = BoundingBox.convert_arr_from_corners(
                        np.array(bboxes))
                    arr[:, 4] = scores
                    arr[:, 5] = time_elapsed
                    arr[:, 6] = cls_ind
                    all_dets.append(arr)
            cols = ['x', 'y', 'w', 'h', 'score', 'time', 'cls_ind']
            if len(all_dets) > 0:
                all_dets = np.concatenate(all_dets, 0)
            else:
                all_dets = np.array([])
            table = Table(all_dets, cols)
            np.save(filename, table)
예제 #56
0
파일: config.py 프로젝트: raldam/timely
def get_evals_dir(dataset_name):
    return makedirs(join(evals_dir, dataset_name))
예제 #57
0
def main():
    parser = argparse.ArgumentParser(description="Execute different functions of our system")
    parser.add_argument("--first_n", type=int, help="only take the first N images in the datasets")
    parser.add_argument(
        "--name", help="name for this run", default="default", choices=["default", "nolateral", "nohal", "halfsize"]
    )
    parser.add_argument("--force", action="store_true", default=False, help="force overwrite")

    args = parser.parse_args()
    print (args)

    # configuration class
    class config(object):
        pass

    cfg = config()
    cfg.testname = "../ctfdet/data/finalRL/%s2_test"  # object model
    cfg.bottomup = False  # use complete search
    cfg.resize = 1.0  # resize the input image
    cfg.hallucinate = True  # use HOGs up to 4 pixels
    cfg.initr = 1  # initial radious of the CtF search
    cfg.ratio = 1  # radious at the next levels
    cfg.deform = True  # use deformation
    cfg.usemrf = True  # use lateral constraints

    if args.name == "default":
        cfg
        # sticking with the default params
    elif args.name == "nolateral":
        cfg.usemrf = False
    elif args.name == "nohal":
        cfg.hallucinate = False
    elif args.name == "halfsize":
        cfg.resize = 0.5

    # f**k it, do both
    test_datasets = ["val", "test", "train"]
    for test_dataset in test_datasets:
        # Load the dataset
        dataset = Dataset("full_pascal_" + test_dataset)
        if args.first_n:
            dataset.images = dataset.images[: args.first_n]

        # create directory for storing cached detections
        dirname = "./temp_data"
        if os.path.exists("/u/sergeyk"):
            dirname = "/u/vis/x1/sergeyk/object_detection"
        dirname = dirname + "/ctfdets/%s" % (args.name)
        ut.makedirs(dirname)

        num_images = len(dataset.images)
        for img_ind in range(comm_rank, num_images, comm_size):
            # check for existing det
            image = dataset.images[img_ind]
            filename = os.path.join(dirname, image.name + ".npy")
            if os.path.exists(filename) and not args.force:
                # table = np.load(filename)[()]
                continue

            # read the image
            imname = dataset.get_image_filename(img_ind)
            img = util2.myimread(imname, resize=cfg.resize)
            # compute the hog pyramid
            f = pyrHOG2.pyrHOG(
                img, interv=10, savedir="", notsave=True, notload=True, hallucinate=cfg.hallucinate, cformat=True
            )

            # for each class
            all_dets = []
            for ccls in dataset.classes:
                t = time.time()
                cls_ind = dataset.get_ind(ccls)
                print "%s Img %d/%d Class: %s" % (test_dataset, img_ind + 1, num_images, ccls)
                # load the class model
                m = util2.load("%s%d.model" % (cfg.testname % ccls, 7))
                res = []
                t1 = time.time()
                # for each aspect
                for clm, m in enumerate(m):
                    # scan the image with left and right models
                    res.append(
                        pyrHOG2RL.detectflip(
                            f,
                            m,
                            None,
                            hallucinate=cfg.hallucinate,
                            initr=cfg.initr,
                            ratio=cfg.ratio,
                            deform=cfg.deform,
                            bottomup=cfg.bottomup,
                            usemrf=cfg.usemrf,
                            small=False,
                            cl=clm,
                        )
                    )
                fuse = []
                numhog = 0
                # fuse the detections
                for mix in res:
                    tr = mix[0]
                    fuse += mix[1]
                    numhog += mix[3]
                rfuse = tr.rank(fuse, maxnum=300)
                nfuse = tr.cluster(rfuse, ovr=0.3, inclusion=False)
                # print "Number of computed HOGs:",numhog
                time_elapsed = time.time() - t
                print "Elapsed time: %.3f s" % time_elapsed

                bboxes = [nf["bbox"] for nf in nfuse]
                scores = [nf["scr"] for nf in nfuse]
                assert len(bboxes) == len(scores)
                if len(bboxes) > 0:
                    arr = np.zeros((len(bboxes), 7))
                    arr[:, :4] = BoundingBox.convert_arr_from_corners(np.array(bboxes))
                    arr[:, 4] = scores
                    arr[:, 5] = time_elapsed
                    arr[:, 6] = cls_ind
                    all_dets.append(arr)
            cols = ["x", "y", "w", "h", "score", "time", "cls_ind"]
            if len(all_dets) > 0:
                all_dets = np.concatenate(all_dets, 0)
            else:
                all_dets = np.array([])
            table = Table(all_dets, cols)
            np.save(filename, table)
예제 #58
0
def get_sliding_windows_params_dir(dataset_name):
  return makedirs(join(get_sliding_windows_dir(dataset_name), 'params'))