Example #1
0
  def __init__(self,dataset,model_name,num_obs_vars,m='0',r1='1', lbp_iters=3000):
    # TODO: experiment with different values of fastinf

    self.dataset = dataset
    self.fd = FastinfDiscretizer(self.dataset, model_name)
    self.res_fname = config.get_fastinf_res_file(dataset,model_name,m,r1)

    # TODO: experiment with different amounts of smoothing
    # amount of smoothing is correlated with fastinf slowness, values [0,1)
    self.smoothing = 0.5
    self.cache_fname = config.get_fastinf_cache_file(dataset,model_name,m,r1,self.smoothing)

    if opexists(self.cache_fname):
      with open(self.cache_fname) as f:
        print("Loading fastinf cache from file")
        self.cache = cPickle.load(f)
    else:
      self.cache = {}
    self.cmd = config.fastinf_bin+" -i %s -m 0 -Is %f -Imm %d"%(self.res_fname, self.smoothing, lbp_iters)
    self.num_obs_vars = num_obs_vars
    self.tt = ut.TicToc().tic()
    self.process = pexpect.spawn(self.cmd)
    self.blacklist = []
  
    marginals = self.get_marginals()
    print("FastinfModel: Computed initial marginals in %.3f sec"%self.tt.qtoc())
Example #2
0
def run_fastinf_different_settings(d, ms, rs, suffixs, num_bins = 5):
  
  settings = list(itertools.product(suffixs, ms, rs))
  table_gt = d.get_cls_ground_truth().arr.astype(int)
  print 'run with a total of %d settings'%len(settings)
  
  for setindx in range(comm_rank, len(settings), comm_size):
    second_table = None
    setin = settings[setindx]
    suffix = setin[0]
    m = str(setin[1])
    r1 = str(setin[2])
    
    print 'node %d runs %s, m=%s, r1=%s'%(comm_rank, suffix, m, r1)

    filename = config.get_fastinf_mrf_file(d, suffix)
    data_filename = config.get_fastinf_data_file(d, suffix)
    
    if suffix == 'perfect':      
      table = np.hstack((table_gt, table_gt))
      bounds = np.tile(np.linspace(0, 1, num_bins+1),(table_gt.shape[1],1))
      print bounds
      
    elif suffix == 'GIST':
      table = gist_classify_dataset(d)   
      bounds, discr_table = discretize_table(table, num_bins)  
      table = np.hstack((table_gt, discr_table))
      
    elif suffix == 'CSC':
      filename_csc = os.path.join(config.get_ext_dets_foldname(d),'table')
      print filename_csc
      if not os.path.exists(filename_csc):
        raise RuntimeWarning('The csc classification could not be loaded from %s'%filename_csc)
      orig_table = cPickle.load(open(filename_csc,'r'))
      if isinstance(orig_table, Table):
        orig_table = orig_table.arr[:,:-1]
      bounds, discr_table = discretize_table(orig_table, num_bins)
      table = np.hstack((table_gt, discr_table))
      
    elif suffix == 'CSC_regions':
      rm = RegionModel("1big_2small", 0.5)
      detector = 'csc_default'
      from synthetic.dataset_policy import DatasetPolicy
      orig_table = DatasetPolicy.load_ext_detections(d, detector)            
      gt = d.get_det_gt().copy()
      # we need to spice up the gt by a score of 1 for each class (results in less code)
      gt.cols.append('score')
      gt.arr = np.hstack((gt.arr, np.ones((gt.shape[0], 1))))  
      table_gt_region = create_regioned_table(rm, gt, d.images, len(d.classes))
      # At this point we need to split them for the different regions
      orig_table_region = create_regioned_table(rm, orig_table, d.images, len(d.classes))
      
      bounds, discr_table_region = discretize_table(orig_table_region, num_bins)
      table = np.hstack((table_gt_region, discr_table_region))
      
    elif suffix == 'GIST_CSC':
      filename_csc = os.path.join(config.get_ext_dets_foldname(d),'table')
      if not os.path.exists(filename_csc):
        raise RuntimeWarning('The csc classification could not be loaded from %s'%filename_csc)
      orig_table = cPickle.load(open(filename_csc,'r'))
      if isinstance(orig_table, Table):
        orig_table = orig_table.arr[:,:-1]
      bounds, discr_table = discretize_table(orig_table, num_bins)      
      table = np.hstack((table_gt, discr_table))
      store_bound(d, 'CSC', bounds)
      
      second_table = gist_classify_dataset(d)      
      sec_bounds, second_table = discretize_table(second_table, num_bins)      
      store_bound(d, 'GIST', sec_bounds)
      
      full_bound = np.hstack((sec_bounds, bounds))
      store_bound(d, 'GIST_CSC', full_bound)
    
    if not suffix == 'GIST_CSC':
      store_bound(d, suffix, bounds)
    
    print 'set up table on %d, write out mrf for %s, m=%s, r1=%s'%(comm_rank, suffix, m, r1)   
      
    write_out_mrf(table, num_bins, filename, data_filename, second_table=second_table)
    
    add_sets = ['-m',m]
    if not r1 == '':
      add_sets += ['-r1', r1]
          
    if not second_table == None:
      sec_bound_file = '%s_secbounds'%filename
      for s in add_sets:
        sec_bound_file += '_'+s
      np.savetxt(sec_bound_file, sec_bounds)
      
    print '%d start running lbp for %s, m=%s, r1=%s'%(comm_rank, suffix, m, r1)
    
    filename_out = config.get_fastinf_res_file(d, suffix, m, r1)
    execute_lbp(filename, data_filename, filename_out, add_settings=add_sets)
def run_fastinf_different_settings(d, ms, rs, suffixs, num_bins = 5):
  
  settings = list(itertools.product(suffixs, ms, rs))
  table_gt = d.get_cls_ground_truth().arr.astype(int)
  print 'run with a total of %d settings'%len(settings)
  
  for setindx in range(mpi.comm_rank, len(settings), mpi.comm_size):
    second_table = None
    setin = settings[setindx]
    suffix = setin[0]
    m = str(setin[1])
    r1 = str(setin[2])
    
    print 'node %d runs %s, m=%s, r1=%s'%(mpi.comm_rank, suffix, m, r1)

    filename = config.get_fastinf_mrf_file(d, suffix)
    data_filename = config.get_fastinf_data_file(d, suffix)
    
    if suffix == 'perfect':      
      table = np.hstack((table_gt, table_gt))
      bounds = np.tile(np.linspace(0, 1, num_bins+1),(table_gt.shape[1],1))
      print bounds
      
    elif suffix == 'GIST':
      table = gist_classify_dataset(d)   
      bounds, discr_table = discretize_table(table, num_bins)  
      table = np.hstack((table_gt, discr_table))
      
    elif suffix == 'CSC':
      filename_csc = os.path.join(config.get_ext_dets_foldname(d),'table')
      print filename_csc
      if not os.path.exists(filename_csc):
        raise RuntimeWarning('The csc classification could not be loaded from %s'%filename_csc)
      orig_table = cPickle.load(open(filename_csc,'r'))
      if isinstance(orig_table, Table):
        orig_table = orig_table.arr[:,:-1]
      bounds, discr_table = discretize_table(orig_table, num_bins)
      table = np.hstack((table_gt, discr_table))
      
    elif suffix == 'CSC_regions':
      rm = RegionModel("1big_2small", 0.5)
      detector = 'csc_default'
      from synthetic.dataset_policy import DatasetPolicy
      orig_table = DatasetPolicy.load_ext_detections(d, detector)            
      gt = d.get_det_gt().copy()
      # we need to spice up the gt by a score of 1 for each class (results in less code)
      gt.cols.append('score')
      gt.arr = np.hstack((gt.arr, np.ones((gt.shape[0], 1))))  
      table_gt_region = create_regioned_table(rm, gt, d.images, len(d.classes))
      # At this point we need to split them for the different regions
      orig_table_region = create_regioned_table(rm, orig_table, d.images, len(d.classes))
      
      bounds, discr_table_region = discretize_table(orig_table_region, num_bins)
      table = np.hstack((table_gt_region, discr_table_region))
      
    elif suffix == 'GIST_CSC':
      filename_csc = os.path.join(config.get_ext_dets_foldname(d),'table')
      if not os.path.exists(filename_csc):
        raise RuntimeWarning('The csc classification could not be loaded from %s'%filename_csc)
      orig_table = cPickle.load(open(filename_csc,'r'))
      if isinstance(orig_table, Table):
        orig_table = orig_table.arr[:,:-1]
      bounds, discr_table = discretize_table(orig_table, num_bins)      
      table = np.hstack((table_gt, discr_table))
      store_bound(d, 'CSC', bounds)
      
      second_table = gist_classify_dataset(d)      
      sec_bounds, second_table = discretize_table(second_table, num_bins)      
      store_bound(d, 'GIST', sec_bounds)
      
      full_bound = np.hstack((sec_bounds, bounds))
      store_bound(d, 'GIST_CSC', full_bound)
    
    if not suffix == 'GIST_CSC':
      store_bound(d, suffix, bounds)
    
    print 'set up table on %d, write out mrf for %s, m=%s, r1=%s'%(mpi.comm_rank, suffix, m, r1)   
      
    write_out_mrf(table, num_bins, filename, data_filename, second_table=second_table)
    
    add_sets = ['-m',m]
    if not r1 == '':
      add_sets += ['-r1', r1]
          
    if not second_table == None:
      sec_bound_file = '%s_secbounds'%filename
      for s in add_sets:
        sec_bound_file += '_'+s
      np.savetxt(sec_bound_file, sec_bounds)
      
    print '%d start running lbp for %s, m=%s, r1=%s'%(mpi.comm_rank, suffix, m, r1)
    
    filename_out = config.get_fastinf_res_file(d, suffix, m, r1)
    execute_lbp(filename, data_filename, filename_out, add_settings=add_sets)