Example #1
0
def rematch(C, Q, dbsift):
  start = time.time()
  rdr = reader.get_reader(C.params['descriptor'])
  q = rdr.load_file(Q.siftpath)
  db = rdr.load_file(dbsift)
  flann = pyflann.FLANN()
  results, dists = flann.nn(db['vec'], q['vec'], 1, algorithm='linear')
  results, dists, q = np.array(results), np.array(dists), np.array(q)
  idx = np.argsort(dists)
  results = results[idx]
  dists = dists[idx]
  q = q[idx]
  count = 1000
  matches = []
  closed = set()
  for i in range(0,len(results)):
    if results[i] not in closed and dists[i] < 40000:
      closed.add(results[i])
      atom = {'db': db[results[i]]['geom'].copy(),
                      'query': q[i]['geom'].copy()}
      matches.append(atom)
      count -= 1
    if count == 0:
      break
  INFO_TIMING("rematch took %f" % (time.time() - start))
  return matches
Example #2
0
    def predict(self, infer_config, material):
        """predict"""
        infer_reader = reader.get_reader(self.name, 'infer', infer_config, material=material)
        feature_list = []
        for infer_iter, data in enumerate(infer_reader()):
            inputs      = [items[0] for items in data]
            winds       = [items[1] for items in data]
            feat_info   = [items[2] for items in data]
            feature_T   = feat_info[0][0]
            feature_N   = feat_info[0][1]

            inputs = np.array(inputs)
            pred_bmn, pred_sta, pred_end = self.infer(inputs)

            if infer_iter == 0:
                sum_pred_bmn = np.zeros((2, feature_N, feature_T))
                sum_pred_sta = np.zeros((feature_T, ))
                sum_pred_end = np.zeros((feature_T, ))
                sum_pred_cnt = np.zeros((feature_T, ))

            for idx, sub_wind in enumerate(winds):
                sum_pred_bmn[:, :, sub_wind[0]: sub_wind[1]] += pred_bmn[idx]
                sum_pred_sta[sub_wind[0]: sub_wind[1]] += pred_sta[idx]
                sum_pred_end[sub_wind[0]: sub_wind[1]] += pred_end[idx]
                sum_pred_cnt[sub_wind[0]: sub_wind[1]] += np.ones((sub_wind[1] - sub_wind[0], ))

        pred_bmn = sum_pred_bmn / sum_pred_cnt
        pred_sta = sum_pred_sta / sum_pred_cnt
        pred_end = sum_pred_end / sum_pred_cnt

        score_result = self.generate_props(pred_bmn, pred_sta, pred_end)
        results = process_proposal(score_result, self.min_frame_thread, self.nms_thread, self.min_pred_score)

        return results
Example #3
0
def rematch(C, Q, dbsift):
    start = time.time()
    rdr = reader.get_reader(C.params['descriptor'])
    q = rdr.load_file(Q.siftpath)
    db = rdr.load_file(dbsift)
    flann = pyflann.FLANN()
    results, dists = flann.nn(db['vec'], q['vec'], 1, algorithm='linear')
    results, dists, q = np.array(results), np.array(dists), np.array(q)
    idx = np.argsort(dists)
    results = results[idx]
    dists = dists[idx]
    q = q[idx]
    count = 1000
    matches = []
    closed = set()
    for i in range(0, len(results)):
        if results[i] not in closed and dists[i] < 40000:
            closed.add(results[i])
            atom = {
                'db': db[results[i]]['geom'].copy(),
                'query': q[i]['geom'].copy()
            }
            matches.append(atom)
            count -= 1
        if count == 0:
            break
    INFO_TIMING("rematch took %f" % (time.time() - start))
    return matches
Example #4
0
  def map_tags_hybrid3(self, pixelmap, C):
    tags = self.map_tags_camera()
    accepted = []
    outside = []
    bad = []
    obs = self.source.get_loc_dict()
    for (tag, (_, pixel)) in tags:
      location = geom.picknearestll(pixelmap, tag)
      error = tag.xydistance(location)
      if error < 2.0:
        accepted.append((tag, (_, pixel)))
      elif error < 15.0 or not geom.contains(pixel, self.image.size):
        outside.append((tag, (_, pixel)))
      else:
        bad.append((tag, (999, pixel)))

    cell = util.getclosestcell(self.lat, self.lon, C.dbdir)[0]
    cellpath = os.path.join(C.dbdir, cell)
    pm = reader.get_reader(C.params['descriptor'])\
      .load_PointToViewsMap(cellpath, C.infodir)

    for (tag, (_, pixel)) in outside:
      vis, t = pm.hasView(C, tag.lat, tag.lon,\
        self.lat, self.lon, self.yaw, 25)
      emv = tag.emIsVisible(self.source, C, 25)
      if (vis or emv):
        if geom.norm_compatible(tag, self):
          accepted.append((tag, (_, pixel)))
        else:
          bad.append((tag, (12, pixel)))
      else:
        bad.append((tag, (15, pixel)))
    return accepted, bad
Example #5
0
    def predict(self, infer_config, material):
        """predict"""
        infer_reader = reader.get_reader(self.name,
                                         'infer',
                                         infer_config,
                                         material=material)
        results = []
        for infer_iter, data in enumerate(infer_reader()):
            video_id = [[items[-2], items[-1]] for items in data]
            input1 = [items[0] for items in data]
            input1_arr, input1_lod = self.pre_process(input1)
            output1, output2 = self.infer(input1_arr, input1_lod)

            predictions_id = output1
            predictions_iou = output2
            for i in range(len(predictions_id)):
                topk_inds = predictions_id[i].argsort()[0 - self.topk:]
                topk_inds = topk_inds[::-1]
                preds_id = predictions_id[i][topk_inds]
                preds_iou = predictions_iou[i][0]
                results.append((video_id[i], preds_id.tolist(),
                                topk_inds.tolist(), preds_iou.tolist()))

        predict_result = get_action_result(results, self.label_map_file,
                                           self.fps, self.cls_thread,
                                           self.iou_thread, self.nms_id,
                                           self.nms_thread, self.frame_offset)
        return predict_result
Example #6
0
def infer(args):
    # parse config
    config = parse_config(args.config)
    infer_config = merge_configs(config, 'infer', vars(args))
    print_configs(infer_config, "Infer")
    infer_model = models.get_model(args.model_name,
                                   infer_config,
                                   mode='infer',
                                   is_videotag=True)
    infer_model.build_input(use_dataloader=False)
    infer_model.build_model()
    infer_feeds = infer_model.feeds()
    infer_outputs = infer_model.outputs()

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    exe.run(fluid.default_startup_program())

    filelist = args.filelist or infer_config.INFER.filelist
    filepath = args.video_path or infer_config.INFER.get('filepath', '')
    if filepath != '':
        assert os.path.exists(filepath), "{} not exist.".format(filepath)
    else:
        assert os.path.exists(filelist), "{} not exist.".format(filelist)

    # get infer reader
    infer_reader = get_reader(args.model_name.upper(), 'infer', infer_config)

    if args.weights:
        assert os.path.exists(
            args.weights), "Given weight dir {} not exist.".format(
                args.weights)
    # if no weight files specified, download weights from paddle
    weights = args.weights or infer_model.get_weights()

    infer_model.load_test_weights(exe, weights, fluid.default_main_program())

    infer_feeder = fluid.DataFeeder(place=place, feed_list=infer_feeds)
    fetch_list = infer_model.fetches()

    infer_metrics = get_metrics(args.model_name.upper(), 'infer', infer_config)
    infer_metrics.reset()

    if not os.path.isdir(args.save_dir):
        os.makedirs(args.save_dir)

    for infer_iter, data in enumerate(infer_reader()):
        data_feed_in = [items[:-1] for items in data]
        video_id = [items[-1] for items in data]
        bs = len(video_id)
        feature_outs = exe.run(fetch_list=fetch_list,
                               feed=infer_feeder.feed(data_feed_in))
        for i in range(bs):
            filename = video_id[i].split('/')[-1][:-4]
            np.save(os.path.join(args.save_dir, filename + '.npy'),
                    feature_outs[0][i])  #shape: seg_num*feature_dim

    logger.info("Feature extraction End~")
Example #7
0
 def predict(self, infer_config):
     """predict"""
     infer_reader = reader.get_reader(self.name, 'infer', infer_config)
     feature_list = []
     for infer_iter, data in enumerate(infer_reader()):
         inputs = [items[:-1] for items in data]
         inputs = np.array(inputs)
         output = self.infer(inputs)
         feature_list.append(np.squeeze(output))
     feature_list = np.vstack(feature_list)
     return feature_list
Example #8
0
def test(args):
    # parse config
    config = parse_config(args.config)
    test_config = merge_configs(config, 'test', vars(args))
    print_configs(test_config, "Test")
    use_dali = test_config['TEST'].get('use_dali', False)

    # build model
    test_model = models.get_model(args.model_name, test_config, mode='test')
    test_model.build_input(use_dataloader=False)
    test_model.build_model()
    test_feeds = test_model.feeds()
    test_fetch_list = test_model.fetches()

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    exe.run(fluid.default_startup_program())

    if args.weights:
        assert os.path.exists(
            args.weights), "Given weight dir {} not exist.".format(
                args.weights)
    weights = args.weights or test_model.get_weights()

    logger.info('load test weights from {}'.format(weights))

    test_model.load_test_weights(exe, weights, fluid.default_main_program())

    # get reader and metrics
    test_reader = get_reader(args.model_name.upper(), 'test', test_config)
    test_metrics = get_metrics(args.model_name.upper(), 'test', test_config)

    test_feeder = fluid.DataFeeder(place=place, feed_list=test_feeds)

    epoch_period = []
    for test_iter, data in enumerate(test_reader()):
        cur_time = time.time()
        test_outs = exe.run(fetch_list=test_fetch_list,
                            feed=test_feeder.feed(data))
        period = time.time() - cur_time
        epoch_period.append(period)
        test_metrics.accumulate(test_outs)

        # metric here
        if args.log_interval > 0 and test_iter % args.log_interval == 0:
            info_str = '[EVAL] Batch {}'.format(test_iter)
            test_metrics.calculate_and_log_out(test_outs, info_str)

    if not os.path.isdir(args.save_dir):
        os.makedirs(args.save_dir)
    test_metrics.finalize_and_log_out("[EVAL] eval finished. ", args.save_dir)
Example #9
0
 def predict(self, infer_config):
     """predict"""
     infer_reader = reader.get_reader(self.name, 'infer', infer_config)
     feature_list = []
     pcm_list = []
     for infer_iter, data in enumerate(infer_reader()):
         inputs = np.array(data, dtype = 'float32')
         output = self.infer(inputs)
         feature_list.append(np.squeeze(output))
         pcm_list.append(inputs)
     feature_values = np.vstack(feature_list)
     pcm_values = np.vstack(pcm_list)
     return feature_values, pcm_values
Example #10
0
 def ddFetch(self, featurefile, view, info=0):
   "Fetches pixels for view using ddObject"
   if info == 0:
       info = eval(open(os.path.join(self.infodir, view + '.info')).read())
   else:
       info = eval(open(info).read())
   pixels = set()
   reader = get_reader(os.path.basename(featurefile))
   for x,y,foo,bar in reader.load_file(featurefile)['geom']:
     pixels.add((int(x), int(y)))
     print str(int(x)) + ', ' + str(int(y))
   data = ddGetAllPixels(pixels, info['id'], keep_None=True)
   assert len(data) == len(pixels)
   return data
Example #11
0
def infer(args):
    # parse config
    config = parse_config(args.config)
    infer_config = merge_configs(config, 'infer', vars(args))
    print_configs(infer_config, "Infer")
    inference_model = args.inference_model 
    model_filename = 'EDVR_model.pdmodel'
    params_filename = 'EDVR_params.pdparams'
    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    [inference_program, feed_list, fetch_list] = fluid.io.load_inference_model(dirname=inference_model, model_filename=model_filename, params_filename=params_filename, executor=exe)

    infer_reader = get_reader(args.model_name.upper(), 'infer', infer_config)
    #infer_metrics = get_metrics(args.model_name.upper(), 'infer', infer_config)
    #infer_metrics.reset()

    periods = []
    cur_time = time.time()
    for infer_iter, data in enumerate(infer_reader()):
        if args.model_name == 'EDVR':
            data_feed_in = [items[0] for items in data]
            video_info = [items[1:] for items in data]
            infer_outs = exe.run(inference_program,
                                 fetch_list=fetch_list,
                                 feed={feed_list[0]:np.array(data_feed_in)})
            infer_result_list = [item for item in infer_outs]
            videonames = [item[0] for item in video_info]
            framenames = [item[1] for item in video_info]
            for i in range(len(infer_result_list)):
                img_i = get_img(infer_result_list[i])
                save_img(img_i, 'img' + videonames[i] + framenames[i])
                
                

        prev_time = cur_time
        cur_time = time.time()
        period = cur_time - prev_time
        periods.append(period)

        #infer_metrics.accumulate(infer_result_list)

        if args.log_interval > 0 and infer_iter % args.log_interval == 0:
            logger.info('Processed {} samples'.format(infer_iter + 1))

    logger.info('[INFER] infer finished. average time: {}'.format(np.mean(periods)))

    if not os.path.isdir(args.save_dir):
        os.makedirs(args.save_dir)
Example #12
0
 def ddFetch(self, featurefile, view, info=0):
     "Fetches pixels for view using ddObject"
     if info == 0:
         info = eval(
             open(os.path.join(self.infodir, view + '.info')).read())
     else:
         info = eval(open(info).read())
     pixels = set()
     reader = get_reader(os.path.basename(featurefile))
     for x, y, foo, bar in reader.load_file(featurefile)['geom']:
         pixels.add((int(x), int(y)))
         print str(int(x)) + ', ' + str(int(y))
     data = ddGetAllPixels(pixels, info['id'], keep_None=True)
     assert len(data) == len(pixels)
     return data
Example #13
0
  def map_tags_lookup(self, C):
    tags = self.map_tags_camera()
    cell = util.getclosestcell(self.lat, self.lon, C.dbdir)[0]
    cellpath = os.path.join(C.dbdir, cell)

    pm = reader.get_reader(C.params['descriptor'])\
      .load_PointToViewsMap(cellpath, C.infodir)

    accepted, bad = [], []

    for (tag, (_, pixel)) in tags:
      vis, t = pm.hasView(C, tag.lat, tag.lon, self.lat, self.lon, self.yaw, 20)
      if vis:
        accepted.append((tag, (_, pixel)))
      else:
        bad.append((tag, (999, pixel)))
    return accepted + bad
Example #14
0
 def __init__(self, C, Q, cell, outfile, barrier=None):
     assert len(C.params) == len(PARAMS_DEFAULT)
     threading.Thread.__init__(self)
     self.qpath = Q.siftpath
     if type(cell) is list:
         self.cellpath = [os.path.join(C.dbdir, c) for c in cell]
     else:
         self.cellpath = os.path.join(C.dbdir, cell)
     self.infodir = C.infodir
     self.celldir = C.dbdir
     self.outfile = outfile
     self.params = C.params
     self.criteria = C.criteria
     self.barrier = barrier
     self.dump = self.outfile + ('-detailed%s.npy' % DETAIL_VERSION)
     pyflann.set_distance_type(C.params['distance_type'])
     self.reader = reader.get_reader(C.params['descriptor'])
Example #15
0
 def __init__(self, C, Q, cell, outfile, barrier=None):
   assert len(C.params) == len(PARAMS_DEFAULT)
   threading.Thread.__init__(self)
   self.qpath = Q.siftpath
   if type(cell) is list:
     self.cellpath = [os.path.join(C.dbdir, c) for c in cell]
   else:
     self.cellpath = os.path.join(C.dbdir, cell)
   self.infodir = C.infodir
   self.celldir = C.dbdir
   self.outfile = outfile
   self.params = C.params
   self.criteria = C.criteria
   self.barrier = barrier
   self.dump = self.outfile + ('-detailed%s.npy' % DETAIL_VERSION)
   pyflann.set_distance_type(C.params['distance_type'])
   self.reader = reader.get_reader(C.params['descriptor'])
Example #16
0
  def map_tags_hybrid2(self, pixelmap, C):
    """Tags are filtered by using the image's pt cloud
       when source tag is visible in the db image. Otherwise,
       a combination of earthmine occlusion queries and
       database occlusion queries are performed."""
    tags = self.map_tags_camera(self.lat, self.lon)
    accepted = []
    outside = []
    bad = []
    THRESHOLD = 15
    outside = tags # XXX
#    for (tag, (_, pixel)) in tags:
#      location = pixelmap[geom.picknearest(pixelmap, *pixel)]
#      if location is None:
#        if not geom.contains(pixel, self.image.size):
#          outside.append((tag, (_, pixel)))
#        else:
#          bad.append((tag, (999, pixel)))
#      else:
#        dist = tag.xydistance(location)
#        if dist < THRESHOLD:
#          accepted.append((tag, (_, pixel)))
#        elif not geom.contains(pixel, self.image.size):
#          outside.append((tag, (_, pixel)))
#        else:
#          bad.append((tag, (999, pixel)))

    cell = util.getclosestcell(self.lat, self.lon, C.dbdir)[0]
    cellpath = os.path.join(C.dbdir, cell)
    pm = reader.get_reader(C.params['descriptor'])\
      .load_PointToViewsMap(cellpath, C.infodir)

    for (tag, (_, pixel)) in outside:
      vis, t = pm.hasView(C, tag.lat, tag.lon, self.lat, self.lon, self.yaw, 30)
      emv = tag.emIsVisible(self.source, C, 30)
      if (vis or emv):
        if geom.norm_compatible(tag, self):
          accepted.append((tag, (_, pixel)))
        else:
          bad.append((tag, (12, pixel)))
      else:
        bad.append((tag, (17, pixel)))

    return accepted + bad
Example #17
0
  def map_tags_hybrid(self, pixelmap, C, elat, elon):
    """Uses tag projection from estimated lat, lon.
       Tags are filtered by using the image's pt cloud
       when source tag is visible in the db image. Otherwise,
       3d occlusion detection is performed."""
    THRESHOLD = 15.0 # meters
    tags = self.map_tags_camera(elat, elon)
    accepted = []
    outside = []
    bad = []
    for (tag, (_, pixel)) in tags:
      location = pixelmap[geom.picknearest(pixelmap, *pixel)]
      if location is None:
        if not geom.contains(pixel, self.image.size):
          outside.append((tag, (_, pixel)))
        else:
          bad.append((tag, (999, pixel)))
      else:
        dist = tag.xydistance(location)
        if dist < THRESHOLD:
          accepted.append((tag, (_, pixel)))
        elif not geom.contains(pixel, self.image.size):
          outside.append((tag, (_, pixel)))
        else:
          bad.append((tag, (999, pixel)))

    # use ocs method for tags outside db image
    cell = util.getclosestcell(self.lat, self.lon, C.dbdir)[0]
    cellpath = os.path.join(C.dbdir, cell)
    tree3d = reader.get_reader(C.params['descriptor']).load_tree3d(cellpath, C)
    for (tag, (d, pixel)) in outside:
#      if tag.isVisible2(self.source, tree3d, elat, elon):
#        accepted.append((tag, (_, pixel)))
#      else:
#        bad.append((tag, (15, pixel)))
        accepted.append((tag, (d, pixel)))

    return accepted + bad
def highresSift(C, Q, dbmatch):

    # timing info
    start = time.time()

    # set sift paths
    qname = Q.name
    dbsift = os.path.join(C.hiresdir,dbmatch+'sift.txt')
    qsift = os.path.join(C.querydir,'hires',qname+'sift.txt')

    # high res rematch parameters
    maxmatch, maxangle, maxdist = 1, np.pi/3, 10**7
    maxratio = C.pose_param['maxratio']

    # rematch file
    filename = qname + ';' + dbmatch + ';maxratio=' + str(maxratio) + \
                    ';maxmatch=' + str(maxmatch) + ';maxdist=' + \
                    str(maxdist/1000) + 'k;maxangle=' + \
                    str(int(round(180/np.pi*maxangle))) + '.npz'
    hrRematchFile = os.path.join( C.querydir, 'hires', 'siftmatch', filename )

    ### HIGH RES REMATCH ###
    matches = {}
    if not os.path.isdir(os.path.dirname(hrRematchFile)): os.mkdir(os.path.dirname(hrRematchFile))
    if os.path.isfile(hrRematchFile): # load nearest neighbor data
        print 'Loading high-res sift matches...'
        match_data = np.load(hrRematchFile)
        matches['nmat'] = len(match_data['q2d'])
        matches['numq'] = len(match_data['qidx'])-1
        matches['qidx'] = match_data['qidx']
        matches['q2d'] = match_data['q2d']
        matches['qprm'] = match_data['qprm']
        matches['d2d'] = match_data['d2d']
        matches['dprm'] = match_data['dprm']
        matches['nnd'] = match_data['nnd']
    else: # generate nearest neighbor data and save
        print 'Generating high-res sift matches...'
        rdr = reader.get_reader('sift')
        q = rdr.load_file(qsift)
        db = rdr.load_file(dbsift)
        flann = pyflann.FLANN()
        results, dists = flann.nn(db['vec'], q['vec'], 1+maxmatch, algorithm='linear')
        matches = {'qidx': np.array([]), \
                   'd2d': np.zeros([0,2]), \
                   'dprm': np.zeros([0,2]), \
                   'q2d': np.zeros([0,2]), \
                   'qprm': np.zeros([0,2]), \
                   'nnd': np.array([]) }
        nmat, numq = 0, 0
        for i in xrange(len(results)):
            grads = np.mod(np.array([q[i]['geom'][3]-db[results[i][k]]['geom'][3] for k in range(maxmatch)]),2*np.pi)
            dist = np.float_(np.array(dists[i]))
            high_dist = dist[-1]
            ratios = dist[:maxmatch]/high_dist
            idx = np.nonzero( np.logical_and( ratios<maxratio, \
                np.logical_and( np.array(dists[i][:maxmatch])<maxdist , \
                np.logical_or( grads<maxangle , grads-2*np.pi>-maxangle ) ) ) )[0]
#            idx = np.nonzero( np.ones(len(ratios)) )[0]
            qadd = len(idx)
            if qadd == 0:
                continue
            q2d = np.tile(np.array([q[i]['geom'][1],q[i]['geom'][0]]),[qadd,1])
            qprm = np.tile(np.array(q[i]['geom'][2:].copy()),[qadd,1])
            d2d = np.array( [ [ db[results[i][k]]['geom'][1] , db[results[i][k]]['geom'][0] ] for k in idx ] )
            dprm = np.array( [ db[results[i][k]]['geom'][2:].copy() for k in idx ] )
            nnd = np.array( [ dists[i][k] for k in idx ] )
            matches['d2d'] = np.append(matches['d2d'],d2d,axis=0)
            matches['dprm'] = np.append(matches['dprm'],dprm,axis=0)
            matches['q2d'] = np.append(matches['q2d'],q2d,axis=0)
            matches['qprm'] = np.append(matches['qprm'],qprm,axis=0)
            matches['nnd'] = np.append(matches['nnd'],nnd,axis=0)
            matches['qidx'] = np.append(matches['qidx'],nmat)
            nmat += qadd
            numq += 1
        matches['qidx'] = np.append(matches['qidx'],nmat)
        matches['nmat'] = nmat
        matches['numq'] = numq
        print 'High res rematch took %.1f seconds.' % (time.time() - start)
        np.savez( hrRematchFile, qidx=matches['qidx'], d2d=matches['d2d'], dprm=matches['dprm'],
                  q2d=matches['q2d'], qprm=matches['qprm'], nnd=matches['nnd'] )
        # END OF RUNNING NN SEARCH OR LOADING NN FILE
    # fill in other keys
    nmat = matches['nmat']
    numq = matches['numq']
    matches['dray'] = np.zeros([nmat,3])
    matches['qray'] = np.zeros([nmat,3])
    matches['w3d'] = np.zeros([nmat,3])
    matches['ddep'] = np.zeros(nmat)
    matches['plane'] = np.int_( -1 * np.ones(nmat) )
    matches['weight'] = np.zeros(nmat)
    matches['imask'] = np.bool_(np.zeros(nmat))
    matches['hvrf'] = 0
    # Print rematch statistics
    print 'Number of query features matched: %.0f' % numq
    print 'Total number of feature matches: %.0f' % nmat
    print 'Average number of database matches considered = %.1f' % (float(nmat)/numq)
    return matches
Example #19
0
    def open(self, featurefile, info=0):
        """Returns map of (x,y) => {'lat':lat, lon, alt}"""
        name = os.path.basename(featurefile)[:-4]  # gps coords, angle
        view = os.path.basename(featurefile)[:-8]  # + descriptor type
        if info == 0:
            cached = os.path.join(self.datastore, name) + '.npy'
        else:
            cached = os.path.join(self.datastore, 'highres', name) + '.npy'
        if not os.path.exists(cached):
            INFO("*** fetching pixel data from earthmine ***")
            data = self.ddFetch(featurefile, view, info)
            save_atomic(lambda d: np.save(d, data), cached)
        return np.load(cached).item()


if __name__ == '__main__':
    mapper = PixelMap(
        '/home/ericl/shiraz/Research/collected_images/earthmine-fa10.1/37.871955,-122.270829'
    )
    superdir = '/home/ericl/shiraz/Research/cells/g=100,r=d=236.6/'
    for dir in os.listdir(superdir):
        dir = os.path.join(superdir, dir)
        if os.path.isdir(dir):
            for f in get_reader('sift').get_feature_files_in_dir(dir):
                try:
                    mapper.open(f)
                except:
                    pass

# vim: et sw=2
Example #20
0
def match(C, Q):
    if C.shuffle_cells:
      C._dbdir = None
    if C.override_cells:
      INFO('override cells')
      cells_in_range = [(c,0) for c in C.override_cells]
    else:
      # compute closest cells
      closest_cells = util.getclosestcells(Q.query_lat, Q.query_lon, C.dbdir)
      if C.restrict_cells:
        closest_cells = filter(lambda c: c[0] in C.restrict_cells, closest_cells)
      cells_in_range = [(cell, dist)
        for cell, dist in closest_cells[0:C.ncells]
          if dist < C.cellradius + C.ambiguity + C.matchdistance]
    INFO('Using %d cells' % len(cells_in_range))
    if C.shuffle_cells:
      import reader
      sr = reader.get_reader('sift')
      supercell = sr.get_supercelldir(
        C.dbdir,
        [c for (c,d) in cells_in_range],
        C.overlap_method)
      C._dbdir = supercell

    if not cells_in_range:
        raise LocationOutOfRangeError

    # cache for fuzz runs
    if C.cacheEnable:
        key = derive_key(C, cells_in_range, Q.siftname)
        if key in cache:
            print 'cache hit'
            return cache[key]
        else:
            print 'cache miss'

    # compute output file paths for the cells

    cellpath = [c for c,d in cells_in_range]
    listofimages = []
    if C.one_big_cell:
      INFO('Using 1 big cell (%d union)' % len(cells_in_range))
      outputFilePaths = [os.path.join(C.matchdir, Q.siftname + ',' + getcellid(cellpath) + ".res")]
      #listofimages = lexiconrank.addImagetoList(listofimages, C.dbdir + cellpath)
      cellpath = [cellpath]
    else:
      outputFilePaths = []
      for cell, dist in cells_in_range:
          if ',' in cell:
            latcell, loncell = cell.split(',')
            latcell = float(latcell)
            loncell = float(loncell)
          else:
            latcell, loncell = 0,0
          actualdist = info.distance(Q.query_lat, Q.query_lon, latcell, loncell)
          outputFilePath = os.path.join(C.matchdir, Q.siftname + ',' + cell + ',' + str(actualdist)  + ".res")
          outputFilePaths.append(outputFilePath)
          #listofimages = lexiconrank.addImagetoList(listofimages, C.dbdir + cell)

    # start query
    query.run_parallel(C, Q, cellpath, outputFilePaths, estimate_threads_avail())
    #d, lexiconmatchedimg = lexiconrank.returnTopMatch_random(C.dbdump, listofimages, Q.jpgpath)

    # combine results
    if C.spatial_comb:
      comb_matches = corr.combine_spatial(outputFilePaths)
    else:
      print outputFilePaths
      comb_matches = corr.combine_matches(outputFilePaths)

    #geometric consistency reranking
    if C.disable_filter_step:
      imm = condense2(sorted(comb_matches.iteritems(), key=lambda x: len(x[1]), reverse=True))
      rsc_ok = True
    else:
      imm, rsc_ok = rerank_ransac(comb_matches, C, Q)

    if C.weight_by_coverage:
      #print 1
      ranked = weight_by_coverage(C, Q, imm)
    elif C.weight_by_distance:
      #print 2
      ranked = weight_by_distance(C, Q, imm)
    else:
      #print 3
      ranked = distance_sort(C, Q, imm)

    # top 1
    stats = check_topn_img(C, Q, ranked, 1)

    # return statistics and top result
    matchedimg = ranked[0][0]
    matches = comb_matches[matchedimg + 'sift.txt']
    if C.cacheEnable:
        cache[key] = (stats, matchedimg, matches, ranked)
    if C.match_callback:
        C.match_callback(C, Q, stats, matchedimg, ranked, cells_in_range, rsc_ok)

    # compute homography and draw images maybe
    if MultiprocessExecution.pool:
      MultiprocessExecution.pool.apply_async(compute_hom, [C.pickleable(), Q, ranked, comb_matches])
    else:
      compute_hom(C, Q, ranked, comb_matches)

    ### Query Pose Estimation ###
    match = any(check_img(C, Q, ranked[0]))
    if (C.solve_pose and match and Q.name not in C.pose_remove) or C.pose_param['solve_bad']:
        #computePose.draw_dbimage(C, Q, matchedimg, match)
        if MultiprocessExecution.pool:
            MultiprocessExecution.pool.apply_async(computePose.estimate_pose, [C.pickleable(), Q, matchedimg, match])
        else:
            computePose.estimate_pose(C, Q, matchedimg, match)

    # done
    return stats, matchedimg, matches, ranked
Example #21
0
def train(args):
    #获取GPU
    place = fluid.CUDAPlace(fluid.dygraph.parallel.Env().dev_id)
    print(place)

    with fluid.dygraph.guard(place):
        #多卡上下文
        strategy = fluid.dygraph.parallel.prepare_context()
        print('strategy', strategy)

        # parse config
        config = parse_config(args.config)
        train_config = merge_configs(config, 'train', vars(args))
        valid_config = merge_configs(config, 'valid', vars(args))
        print_configs(train_config, 'Train')
        print(train_config)

        # if args.fix_random_seed:
        #     startup.random_seed = 1000
        #     train_prog.random_seed = 1000

        train_model = Tpn_Model(
            None, cfg=train_config, mode='train'
        )  # models.get_model(args.model_name, train_config, mode='train')

        valid_model = Tpn_Model(
            None
        )  # models.get_model(args.model_name, valid_config, mode='valid')
        train_model.build_input()
        train_dataloader = train_model.dataloader()
        opt = train_model.optimizer()

        # load weights
        weight, _ = fluid.load_dygraph('./ckpt/k400_tpn_r50f32s2')
        model_weights = train_model.state_dict()
        model_weights.update(
            {k: v
             for k, v in weight.items() if k in model_weights})
        train_model.load_dict(model_weights)
        print('load model success')

        # 模型并行
        train_model = fluid.dygraph.parallel.DataParallel(
            train_model, strategy)

        log_interval = args.log_interval
        is_profiler = args.is_profiler
        profiler_path = args.profiler_path
        trainer_id = 0
        fix_random_seed = args.fix_random_seed
        save_dir = args.save_dir
        save_model_name = args.model_name

        # if args.resume:
        #     # if resume weights is given, load resume weights directly
        #     assert os.path.exists(args.resume + '.pdparams'), \
        #         "Given resume weight dir {}.pdparams not exist.".format(args.resume)
        #     fluid.load(train_prog, model_path=args.resume, executor=exe)
        # else:
        #     # if not in resume mode, load pretrain weights
        #     if args.pretrain:
        #         assert os.path.exists(args.pretrain), \
        #             "Given pretrain weight dir {} not exist.".format(args.pretrain)
        #     pretrain = args.pretrain or train_model.get_pretrain_weights()
        #     if pretrain:
        #         train_model.load_pretrain_params(exe, pretrain, train_prog, place)

        # get reader
        bs_denominator = 1
        if args.use_gpu:
            # check number of GPUs
            gpus = os.getenv("CUDA_VISIBLE_DEVICES", "")
            if gpus == "":
                pass
            else:
                gpus = gpus.split(",")
                num_gpus = len(gpus)
                assert num_gpus == train_config.TRAIN.num_gpus, \
                    "num_gpus({}) set by CUDA_VISIBLE_DEVICES " \
                    "shoud be the same as that " \
                    "set in {}({})".format(
                        num_gpus, args.config, train_config.TRAIN.num_gpus)
            bs_denominator = train_config.TRAIN.num_gpus

        train_config.TRAIN.batch_size = int(train_config.TRAIN.batch_size /
                                            bs_denominator)
        valid_config.VALID.batch_size = int(valid_config.VALID.batch_size /
                                            bs_denominator)
        train_reader = get_reader(args.model_name.upper(), 'train',
                                  train_config)
        valid_reader = get_reader(args.model_name.upper(), 'valid',
                                  valid_config)

        # get metrics
        train_metrics = get_metrics(args.model_name.upper(), 'train',
                                    train_config)
        valid_metrics = get_metrics(args.model_name.upper(), 'valid',
                                    valid_config)

        epochs = args.epoch  #or train_model.epoch_num()

        print()

        train_dataloader.set_sample_list_generator(train_reader, places=place)
        # valid_dataloader.set_sample_list_generator(valid_reader, places=exe_places)

        ##多GPU数据读取,必须确保每个进程读取的数据是不同的
        train_dataloader = fluid.contrib.reader.distributed_batch_reader(
            train_dataloader)

        train_model.train()

        for epoch in range(epochs):
            log_lr_and_step()
            train_iter = 0
            epoch_periods = []
            cur_time = time.time()
            for data in train_dataloader():
                train_outs = train_model(data)
                losses, _, _ = train_outs
                log_vars = OrderedDict()
                for loss_name, loss_value in losses.items():
                    # print(loss_name, ':', loss_value.numpy())
                    log_vars[loss_name] = fluid.layers.reduce_mean(loss_value)
                    # print(loss_name, ':', log_vars[loss_name].numpy())

                loss = sum(_value for _key, _value in log_vars.items()
                           if 'loss' in _key)
                # print('total loss', loss.numpy())

                train_outs = [
                    loss.numpy(), train_outs[1].numpy(), train_outs[2].numpy()
                ]

                # print(train_outs[0])
                # print(train_outs[1].shape)
                # print(train_outs[2].shape)

                # # #分类结果
                # prob = softmax(train_outs[1].squeeze())
                #
                # idx = np.argsort(-prob)
                # #print('idx', idx)
                # for i in range(0, 5):
                #     print('{:.3f} -> {}'.format(prob[idx[i]], [idx[i]]),train_outs[2])

                avg_loss = loss
                # 多GPU训练需要对Loss做出调整,并聚合不同设备上的参数梯度
                #avg_loss = train_model.scale_loss(avg_loss)

                avg_loss.backward()
                # 多GPU
                #train_model.apply_collective_grads()

                opt.minimize(avg_loss)
                train_model.clear_gradients()
                period = time.time() - cur_time
                epoch_periods.append(period)
                timeStamp = time.time()
                localTime = time.localtime(timeStamp)
                strTime = time.strftime("%Y-%m-%d %H:%M:%S", localTime)

                if log_interval > 0 and (train_iter % log_interval == 0):
                    train_metrics.calculate_and_log_out(train_outs, \
                                                        info='[TRAIN {}] Epoch {}, iter {}, time {}, '.format(strTime,
                                                                                                              epoch,
                                                                                                              train_iter,
                                                                                                              period))

                    # print('[TRAIN {}] Epoch {}, iter {}, time {}, total_loss {}, loss_cls {},loss_aux {}'.
                    #       format(strTime, epoch, train_iter, period, loss.numpy(),
                    #              log_vars['loss_cls'].numpy(), log_vars['loss_aux'].numpy()
                    #              ))
                train_iter += 1
                cur_time = time.time()

                # NOTE: profiler tools, used for benchmark
                if is_profiler and epoch == 0 and train_iter == log_interval:
                    profiler.start_profiler("All")
                elif is_profiler and epoch == 0 and train_iter == log_interval + 5:
                    profiler.stop_profiler("total", profiler_path)
                    return
            if len(epoch_periods) < 1:
                logger.info(
                    'No iteration was executed, please check the data reader')
                sys.exit(1)

            logger.info(
                '[TRAIN] Epoch {} training finished, average time: {}'.format(
                    epoch, np.mean(epoch_periods[1:])))

            # if trainer_id == 0:
            #     save_model(exe, train_prog, save_dir, save_model_name,
            #                "_epoch{}".format(epoch))
            # if compiled_test_prog and valid_interval > 0 and (
            #         epoch + 1) % valid_interval == 0:
            #     test_with_dataloader(exe, compiled_test_prog, test_dataloader,
            #                          test_fetch_list, test_metrics, log_interval,
            #                          save_model_name)

        if trainer_id == 0:
            # save_model(exe, train_prog, save_dir, save_model_name)
            fluid.save_dygraph(train_model.state_dict(),
                               "{}/{}".format(save_dir, save_model_name))
            fluid.save_dygraph(opt.state_dict(),
                               "{}/{}}".format(save_dir, save_model_name))
        # when fix_random seed for debug
        if fix_random_seed:
            cards = os.environ.get('CUDA_VISIBLE_DEVICES')
            gpu_num = len(cards.split(","))
            print("kpis\ttrain_cost_card{}\t{}".format(gpu_num, loss))
            print("kpis\ttrain_speed_card{}\t{}".format(
                gpu_num, np.mean(epoch_periods)))
Example #22
0
def get_menu():
    '''
    Returns the list of lines from the menu
    '''
    return jsonify(get_reader())
Example #23
0
def infer(args):
    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    with fluid.dygraph.guard(place):
        # parse config
        config = parse_config(args.config)
        infer_config = merge_configs(config, 'infer', vars(args))
        print_configs(infer_config, "Infer")

        infer_model = Tpn_Model(None, cfg=infer_config, mode='test')
        infer_model.build_input(use_dataloader=False)

        infer_model.eval()


        filelist = args.filelist or infer_config.INFER.filelist
        filepath = args.video_path or infer_config.INFER.get('filepath', '')
        if filepath != '':
            assert os.path.exists(filepath), "{} not exist.".format(filepath)
        else:
            assert os.path.exists(filelist), "{} not exist.".format(filelist)

        # get infer reader
        infer_reader = get_reader(args.model_name.upper(), 'infer', infer_config)

        # if args.weights:
        # assert os.path.exists(
        #     args.weights), "Given weight dir {} not exist.".format(args.weights)
        weight, _ = fluid.load_dygraph(args.weights)
        model_weights = infer_model.state_dict()
        model_weights.update({k: v for k, v in weight.items()
                              if k in model_weights})
        infer_model.load_dict(model_weights)
        print('load model success')



        infer_metrics = get_metrics(args.model_name.upper(), 'infer', infer_config)
        infer_metrics.reset()

        periods = []
        cur_time = time.time()

        for infer_iter, data in enumerate(infer_reader()):
            # print(infer_iter, data)
            data_feed_in = [items[:-1] for items in data]
            video_id = [items[-1] for items in data]
            input = fluid.dygraph.to_variable(data_feed_in[0][0])
            #print(input.numpy().shape)
            input = fluid.layers.unsqueeze(input, 0)

            # x = np.load('/home/j/Desktop/TPN-master/save_data/x.npy')
            # x = fluid.dygraph.to_variable(x)

            data_feed_in = input

            # print('input shape', data_feed_in[0].shape)

            infer_outs = infer_model(data_feed_in)


            # print("infer_outs")
            # print(infer_outs)
            # print(infer_outs[1].numpy().shape)
            # infer_outs = [infer_outs[1].numpy(),video_id]

            pred = softmax(infer_outs.numpy())

            # #分类结果
            # prob = softmax(infer_outs.numpy().squeeze())
            #
            # idx = np.argsort(-prob)
            # #print('idx', idx)
            # for i in range(0, 5):
            #     print('{:.3f} -> {}'.format(prob[idx[i]], [idx[i]]))


            infer_result_list = [pred, video_id]  # [item[1].numpy() for item in infer_outs] + [video_id]

            prev_time = cur_time
            cur_time = time.time()
            period = cur_time - prev_time
            periods.append(period)

            infer_metrics.accumulate(infer_result_list)

            if args.log_interval > 0 and infer_iter % args.log_interval == 0:
                logger.info('Processed {} samples'.format((infer_iter + 1) * len(
                    video_id)))

        logger.info('[INFER] infer finished. average time: {}'.format(
            np.mean(periods)))

        if not os.path.isdir(args.save_dir):
            os.makedirs(args.save_dir)

        infer_metrics.finalize_and_log_out(savedir=args.save_dir)
Example #24
0
import flask
import json

import annotator
import article
import config
import reader
import writer
import numpy as np

application = flask.Flask(__name__)

anne = annotator.Annotator(reader.get_reader(config.reader)(**config.reader_params),
                           writer.get_writer(config.writer)(**config.writer_params))

valid_users = np.loadtxt('usernames.txt', delimiter = ',', dtype = 'str')

"""
Display the main page.
"""
@application.route('/', methods=['GET'])
def index():
    return flask.render_template('index.html')

"""
Start the program.
"""
@application.route('/start/', methods=['GET', 'POST'])
def start():
    userid = flask.request.form['userid']
    if not(userid in valid_users):
Example #25
0
def main():
    """
    Video classification model of 3000 Chinese tags.
    videotag_extractor_prdictor (as videotag_TSN_AttentionLSTM)
    two stages in our model:
        1. extract feature from input video(mp4 format) using extractor
        2. predict classification results from extracted feature  using predictor
    we implement this using two name scopes, ie. extractor_scope and predictor_scope.
    """

    if not os.path.isdir(args.save_dir):
        os.makedirs(args.save_dir)
    extractor_config = parse_config(args.extractor_config)
    extractor_infer_config = merge_configs(extractor_config, 'infer',
                                           vars(args))
    extractor_start_time = time.time()
    extractor_scope = fluid.Scope()
    with fluid.scope_guard(extractor_scope):
        extractor_startup_prog = fluid.Program()
        extractor_main_prog = fluid.Program()
        with fluid.program_guard(extractor_main_prog, extractor_startup_prog):
            with fluid.unique_name.guard():
                # build model
                extractor_model = models.get_model(args.extractor_name,
                                                   extractor_infer_config,
                                                   mode='infer',
                                                   is_videotag=True)
                extractor_model.build_input(use_dataloader=False)
                extractor_model.build_model()
                extractor_feeds = extractor_model.feeds()
                extractor_fetch_list = extractor_model.fetches()

                place = fluid.CUDAPlace(
                    0) if args.use_gpu else fluid.CPUPlace()
                exe = fluid.Executor(place)

                exe.run(extractor_startup_prog)

                logger.info('load extractor weights from {}'.format(
                    args.extractor_weights))

                extractor_model.load_pretrain_params(exe,
                                                     args.extractor_weights,
                                                     extractor_main_prog)

                # get reader and metrics
                extractor_reader = get_reader(args.extractor_name, 'infer',
                                              extractor_infer_config)
                extractor_feeder = fluid.DataFeeder(place=place,
                                                    feed_list=extractor_feeds)

                feature_list = []
                file_list = []
                for idx, data in enumerate(extractor_reader()):
                    file_id = [item[-1] for item in data]
                    feed_data = [item[:-1] for item in data]
                    feature_out = exe.run(
                        fetch_list=extractor_fetch_list,
                        feed=extractor_feeder.feed(feed_data))
                    feature_list.append(feature_out[0])  #get out from list
                    file_list.append(file_id)
                    logger.info(
                        '========[Stage 1 Sample {} ] Extractor finished======'
                        .format(idx))
        extractor_end_time = time.time()
        print('extractor_time', extractor_end_time - extractor_start_time)

    predictor_config = parse_config(args.predictor_config)
    predictor_infer_config = merge_configs(predictor_config, 'infer',
                                           vars(args))

    # get Predictor input from Extractor output
    predictor_feed_list = []
    for i in range(len(feature_list)):
        feature_out = feature_list[i]
        if args.predictor_name == "AttentionCluster":
            extractor_seg_num = extractor_infer_config.INFER.seg_num
            predictor_seg_num = predictor_infer_config.MODEL.seg_num
            idxs = []
            stride = float(extractor_seg_num) / predictor_seg_num
            for j in range(predictor_seg_num):
                pos = (j + np.random.random()) * stride
                idxs.append(min(extractor_seg_num - 1, int(pos)))
            extractor_feature = feature_out[:, idxs, :].astype(
                float)  # get from bs dim
        else:
            extractor_feature = feature_out.astype(float)
        predictor_feed_data = [extractor_feature]
        predictor_feed_list.append((predictor_feed_data, file_list[i]))

    predictor_start_time = time.time()
    predictor_scope = fluid.Scope()
    with fluid.scope_guard(predictor_scope):
        predictor_startup_prog = fluid.Program()
        predictor_main_prog = fluid.Program()
        with fluid.program_guard(predictor_main_prog, predictor_startup_prog):
            with fluid.unique_name.guard():
                # parse config
                predictor_model = models.get_model(args.predictor_name,
                                                   predictor_infer_config,
                                                   mode='infer')
                predictor_model.build_input(use_dataloader=False)
                predictor_model.build_model()
                predictor_feeds = predictor_model.feeds()

                exe.run(predictor_startup_prog)

                logger.info('load predictor weights from {}'.format(
                    args.predictor_weights))
                predictor_model.load_test_weights(exe, args.predictor_weights,
                                                  predictor_main_prog)

                predictor_feeder = fluid.DataFeeder(place=place,
                                                    feed_list=predictor_feeds)
                predictor_fetch_list = predictor_model.fetches()
                predictor_metrics = get_metrics(args.predictor_name.upper(),
                                                'infer',
                                                predictor_infer_config)
                predictor_metrics.reset()

                for idx, data in enumerate(predictor_feed_list):
                    file_id = data[1]
                    predictor_feed_data = data[0]
                    final_outs = exe.run(
                        fetch_list=predictor_fetch_list,
                        feed=predictor_feeder.feed(predictor_feed_data))
                    logger.info(
                        '=======[Stage 2 Sample {} ] Predictor finished========'
                        .format(idx))
                    final_result_list = [item
                                         for item in final_outs] + [file_id]

                    predictor_metrics.accumulate(final_result_list)
                predictor_metrics.finalize_and_log_out(
                    savedir=args.save_dir, label_file=args.label_file)
    predictor_end_time = time.time()
    print('predictor_time', predictor_end_time - predictor_start_time)
import flask
import writer

import annotator
import config
import reader
import numpy as np
from functools import reduce
from agreement_statistics import get_stat

application = flask.Flask(__name__)

anne = annotator.Annotator(
    reader.get_reader(config.reader)(**config.reader_params),
    writer.get_writer(config.writer)(**config.writer_params))
"""
Display the main page.
"""


@application.route('/', methods=['GET'])
def index():
    return flask.render_template('index.html')


"""

Only go to this if there are no more articles to be annotated.
"""

Example #27
0
      pixmap = self.cached[featurefile] = self.open(featurefile)
    return pixmap[x,y]
  
  def open(self, featurefile, info=0):
    """Returns map of (x,y) => {'lat':lat, lon, alt}"""
    name = os.path.basename(featurefile)[:-4] # gps coords, angle
    view = os.path.basename(featurefile)[:-8] # + descriptor type
    if info == 0:
        cached = os.path.join(self.datastore, name) + '.npy'
    else:
        cached = os.path.join(self.datastore, 'highres', name) + '.npy'
    if not os.path.exists(cached):
      INFO("*** fetching pixel data from earthmine ***")
      data = self.ddFetch(featurefile, view, info)
      save_atomic(lambda d: np.save(d, data), cached)
    return np.load(cached).item()

if __name__ == '__main__':
  mapper = PixelMap('/home/ericl/shiraz/Research/collected_images/earthmine-fa10.1/37.871955,-122.270829')
  superdir = '/home/ericl/shiraz/Research/cells/g=100,r=d=236.6/'
  for dir in os.listdir(superdir):
    dir = os.path.join(superdir, dir)
    if os.path.isdir(dir):
      for f in get_reader('sift').get_feature_files_in_dir(dir):
        try:
          mapper.open(f)
        except:
          pass

# vim: et sw=2
Example #28
0
def test(args):
    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    # place = fluid.CUDAPlace(fluid.dygraph.parallel.Env().dev_id)
    with fluid.dygraph.guard(place):
        # parse config
        config = parse_config(args.config)
        test_config = merge_configs(config, 'test', vars(args))
        print_configs(test_config, "Test")
        use_dali = test_config['TEST'].get('use_dali', False)

        # build model
        test_model = Tpn_Model(None, cfg=test_config, mode='eval')
        test_model.build_input(use_dataloader=False)

        # test_dataloader = test_model.dataloader()

        #if args.weights:
        # assert os.path.exists(
        #     args.weights), "Given weight dir {} not exist.".format(args.weights)
        weight, _ = fluid.load_dygraph(args.weights)
        model_weights = test_model.state_dict()
        model_weights.update(
            {k: v
             for k, v in weight.items() if k in model_weights})
        test_model.load_dict(model_weights)
        print('load model success')

        # get reader and metrics
        test_reader = get_reader(args.model_name.upper(), 'test', test_config)
        test_metrics = get_metrics(args.model_name.upper(), 'test',
                                   test_config)

        test_model.eval()

        epoch_period = []
        for test_iter, data in enumerate(test_reader()):
            cur_time = time.time()
            video_id = [items[-1] for items in data]

            # print(len(data))
            # print(len(data[0]),len(data[1]),len(data[2]),len(data[3]))
            # print( data[0][0].shape)
            # print(data[0][1])

            input_data = []
            for i in range(len(data)):
                input_data.append(fluid.dygraph.to_variable(data[i][0]))

            # print(len(input_data))
            # print(input_data[0].shape)

            data_feed_in = fluid.layers.stack(input_data, 0)
            #print('data_feed_in.shape',data_feed_in.shape)

            #input = fluid.dygraph.to_variable(data)

            test_outs = test_model(data_feed_in)

            # print(test_outs.shape)
            # print('video_id',np.stack(video_id, axis=0))

            test_outs = [test_outs.numpy(), np.stack(video_id, axis=0)]

            period = time.time() - cur_time
            epoch_period.append(period)
            test_metrics.accumulate(test_outs)

            # metric here
            if args.log_interval > 0 and test_iter % args.log_interval == 0:
                info_str = '[EVAL] Batch {}'.format(test_iter)
                test_metrics.calculate_and_log_out(test_outs, info_str)

        if not os.path.isdir(args.save_dir):
            os.makedirs(args.save_dir)
        test_metrics.finalize_and_log_out("[EVAL] eval finished. ",
                                          args.save_dir)
Example #29
0
def train(args):
    # parse config
    config = parse_config(args.config)
    train_config = merge_configs(config, 'train', vars(args))
    valid_config = merge_configs(config, 'valid', vars(args))
    print_configs(train_config, 'Train')
    train_model = models.get_model(args.model_name, train_config, mode='train')
    valid_model = models.get_model(args.model_name, valid_config, mode='valid')

    # build model
    startup = fluid.Program()
    train_prog = fluid.Program()
    if args.fix_random_seed:
        startup.random_seed = 1000
        train_prog.random_seed = 1000
    with fluid.program_guard(train_prog, startup):
        with fluid.unique_name.guard():
            train_model.build_input(use_dataloader=True)
            train_model.build_model()
            # for the input, has the form [data1, data2,..., label], so train_feeds[-1] is label
            train_feeds = train_model.feeds()
            train_fetch_list = train_model.fetches()
            train_loss = train_fetch_list[0]
            optimizer = train_model.optimizer()
            optimizer.minimize(train_loss)
            train_dataloader = train_model.dataloader()

    valid_prog = fluid.Program()
    with fluid.program_guard(valid_prog, startup):
        with fluid.unique_name.guard():
            valid_model.build_input(use_dataloader=True)
            valid_model.build_model()
            valid_feeds = valid_model.feeds()
            valid_fetch_list = valid_model.fetches()
            valid_dataloader = valid_model.dataloader()

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(startup)

    if args.resume:
        # if resume weights is given, load resume weights directly
        assert os.path.exists(args.resume + '.pdparams'), \
                "Given resume weight dir {}.pdparams not exist.".format(args.resume)
        fluid.load(train_prog, model_path=args.resume, executor=exe)
    else:
        # if not in resume mode, load pretrain weights
        if args.pretrain:
            assert os.path.exists(args.pretrain), \
                    "Given pretrain weight dir {} not exist.".format(args.pretrain)
        pretrain = args.pretrain or train_model.get_pretrain_weights()
        if pretrain:
            train_model.load_pretrain_params(exe, pretrain, train_prog, place)

    build_strategy = fluid.BuildStrategy()
    build_strategy.enable_inplace = True
    if args.model_name in ['CTCN']:
        build_strategy.enable_sequential_execution = True

    exec_strategy = fluid.ExecutionStrategy()

    compiled_train_prog = fluid.compiler.CompiledProgram(
        train_prog).with_data_parallel(loss_name=train_loss.name,
                                       build_strategy=build_strategy,
                                       exec_strategy=exec_strategy)
    compiled_valid_prog = fluid.compiler.CompiledProgram(
        valid_prog).with_data_parallel(share_vars_from=compiled_train_prog,
                                       build_strategy=build_strategy,
                                       exec_strategy=exec_strategy)

    # get reader
    bs_denominator = 1
    if args.use_gpu:
        # check number of GPUs
        gpus = os.getenv("CUDA_VISIBLE_DEVICES", "")
        if gpus == "":
            pass
        else:
            gpus = gpus.split(",")
            num_gpus = len(gpus)
            assert num_gpus == train_config.TRAIN.num_gpus, \
                   "num_gpus({}) set by CUDA_VISIBLE_DEVICES " \
                   "shoud be the same as that " \
                   "set in {}({})".format(
                   num_gpus, args.config, train_config.TRAIN.num_gpus)
        bs_denominator = train_config.TRAIN.num_gpus

    train_config.TRAIN.batch_size = int(train_config.TRAIN.batch_size /
                                        bs_denominator)
    valid_config.VALID.batch_size = int(valid_config.VALID.batch_size /
                                        bs_denominator)
    train_reader = get_reader(args.model_name.upper(), 'train', train_config)
    valid_reader = get_reader(args.model_name.upper(), 'valid', valid_config)

    # get metrics
    train_metrics = get_metrics(args.model_name.upper(), 'train', train_config)
    valid_metrics = get_metrics(args.model_name.upper(), 'valid', valid_config)

    epochs = args.epoch or train_model.epoch_num()

    exe_places = fluid.cuda_places() if args.use_gpu else fluid.cpu_places()
    train_dataloader.set_sample_list_generator(train_reader, places=exe_places)
    valid_dataloader.set_sample_list_generator(valid_reader, places=exe_places)

    train_with_dataloader(exe,
                          train_prog,
                          compiled_train_prog,
                          train_dataloader,
                          train_fetch_list,
                          train_metrics,
                          epochs=epochs,
                          log_interval=args.log_interval,
                          valid_interval=args.valid_interval,
                          save_dir=args.save_dir,
                          save_model_name=args.model_name,
                          fix_random_seed=args.fix_random_seed,
                          compiled_test_prog=compiled_valid_prog,
                          test_dataloader=valid_dataloader,
                          test_fetch_list=valid_fetch_list,
                          test_metrics=valid_metrics,
                          is_profiler=args.is_profiler,
                          profiler_path=args.profiler_path)
Example #30
0
def highresSift(C, Q, dbmatch):

    # timing info
    start = time.time()

    # set sift paths
    qname = Q.name
    dbsift = os.path.join(C.hiresdir, dbmatch + 'sift.txt')
    qsift = os.path.join(C.querydir, 'hires', qname + 'sift.txt')

    # high res rematch parameters
    maxmatch, maxangle, maxdist = 1, np.pi / 3, 10**7
    maxratio = C.pose_param['maxratio']

    # rematch file
    filename = qname + ';' + dbmatch + ';maxratio=' + str(maxratio) + \
                    ';maxmatch=' + str(maxmatch) + ';maxdist=' + \
                    str(maxdist/1000) + 'k;maxangle=' + \
                    str(int(round(180/np.pi*maxangle))) + '.npz'
    hrRematchFile = os.path.join(C.querydir, 'hires', 'siftmatch', filename)

    ### HIGH RES REMATCH ###
    matches = {}
    if not os.path.isdir(os.path.dirname(hrRematchFile)):
        os.mkdir(os.path.dirname(hrRematchFile))
    if os.path.isfile(hrRematchFile):  # load nearest neighbor data
        print 'Loading high-res sift matches...'
        match_data = np.load(hrRematchFile)
        matches['nmat'] = len(match_data['q2d'])
        matches['numq'] = len(match_data['qidx']) - 1
        matches['qidx'] = match_data['qidx']
        matches['q2d'] = match_data['q2d']
        matches['qprm'] = match_data['qprm']
        matches['d2d'] = match_data['d2d']
        matches['dprm'] = match_data['dprm']
        matches['nnd'] = match_data['nnd']
    else:  # generate nearest neighbor data and save
        print 'Generating high-res sift matches...'
        rdr = reader.get_reader('sift')
        q = rdr.load_file(qsift)
        db = rdr.load_file(dbsift)
        flann = pyflann.FLANN()
        results, dists = flann.nn(db['vec'],
                                  q['vec'],
                                  1 + maxmatch,
                                  algorithm='linear')
        matches = {'qidx': np.array([]), \
                   'd2d': np.zeros([0,2]), \
                   'dprm': np.zeros([0,2]), \
                   'q2d': np.zeros([0,2]), \
                   'qprm': np.zeros([0,2]), \
                   'nnd': np.array([]) }
        nmat, numq = 0, 0
        for i in xrange(len(results)):
            grads = np.mod(
                np.array([
                    q[i]['geom'][3] - db[results[i][k]]['geom'][3]
                    for k in range(maxmatch)
                ]), 2 * np.pi)
            dist = np.float_(np.array(dists[i]))
            high_dist = dist[-1]
            ratios = dist[:maxmatch] / high_dist
            idx = np.nonzero( np.logical_and( ratios<maxratio, \
                np.logical_and( np.array(dists[i][:maxmatch])<maxdist , \
                np.logical_or( grads<maxangle , grads-2*np.pi>-maxangle ) ) ) )[0]
            #            idx = np.nonzero( np.ones(len(ratios)) )[0]
            qadd = len(idx)
            if qadd == 0:
                continue
            q2d = np.tile(np.array([q[i]['geom'][1], q[i]['geom'][0]]),
                          [qadd, 1])
            qprm = np.tile(np.array(q[i]['geom'][2:].copy()), [qadd, 1])
            d2d = np.array(
                [[db[results[i][k]]['geom'][1], db[results[i][k]]['geom'][0]]
                 for k in idx])
            dprm = np.array(
                [db[results[i][k]]['geom'][2:].copy() for k in idx])
            nnd = np.array([dists[i][k] for k in idx])
            matches['d2d'] = np.append(matches['d2d'], d2d, axis=0)
            matches['dprm'] = np.append(matches['dprm'], dprm, axis=0)
            matches['q2d'] = np.append(matches['q2d'], q2d, axis=0)
            matches['qprm'] = np.append(matches['qprm'], qprm, axis=0)
            matches['nnd'] = np.append(matches['nnd'], nnd, axis=0)
            matches['qidx'] = np.append(matches['qidx'], nmat)
            nmat += qadd
            numq += 1
        matches['qidx'] = np.append(matches['qidx'], nmat)
        matches['nmat'] = nmat
        matches['numq'] = numq
        print 'High res rematch took %.1f seconds.' % (time.time() - start)
        np.savez(hrRematchFile,
                 qidx=matches['qidx'],
                 d2d=matches['d2d'],
                 dprm=matches['dprm'],
                 q2d=matches['q2d'],
                 qprm=matches['qprm'],
                 nnd=matches['nnd'])
        # END OF RUNNING NN SEARCH OR LOADING NN FILE
    # fill in other keys
    nmat = matches['nmat']
    numq = matches['numq']
    matches['dray'] = np.zeros([nmat, 3])
    matches['qray'] = np.zeros([nmat, 3])
    matches['w3d'] = np.zeros([nmat, 3])
    matches['ddep'] = np.zeros(nmat)
    matches['plane'] = np.int_(-1 * np.ones(nmat))
    matches['weight'] = np.zeros(nmat)
    matches['imask'] = np.bool_(np.zeros(nmat))
    matches['hvrf'] = 0
    # Print rematch statistics
    print 'Number of query features matched: %.0f' % numq
    print 'Total number of feature matches: %.0f' % nmat
    print 'Average number of database matches considered = %.1f' % (
        float(nmat) / numq)
    return matches
Example #31
0
def infer(args):
    # parse config
    config = parse_config(args.config)
    infer_config = merge_configs(config, 'infer', vars(args))
    print_configs(infer_config, "Infer")
    infer_model = models.get_model(args.model_name, infer_config, mode='infer')
    infer_model.build_input(use_dataloader=False)
    infer_model.build_model()
    infer_feeds = infer_model.feeds()
    infer_outputs = infer_model.outputs()

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    exe.run(fluid.default_startup_program())

    filelist = args.filelist or infer_config.INFER.filelist
    filepath = args.video_path or infer_config.INFER.get('filepath', '')
    if filepath != '':
        assert os.path.exists(filepath), "{} not exist.".format(filepath)
    else:
        assert os.path.exists(filelist), "{} not exist.".format(filelist)

    # get infer reader
    infer_reader = get_reader(args.model_name.upper(), 'infer', infer_config)

    if args.weights:
        assert os.path.exists(
            args.weights), "Given weight dir {} not exist.".format(
                args.weights)
    # if no weight files specified, download weights from paddle
    weights = args.weights or infer_model.get_weights()

    infer_model.load_test_weights(exe, weights, fluid.default_main_program())

    infer_feeder = fluid.DataFeeder(place=place, feed_list=infer_feeds)
    fetch_list = infer_model.fetches()

    infer_metrics = get_metrics(args.model_name.upper(), 'infer', infer_config)
    infer_metrics.reset()

    periods = []
    cur_time = time.time()
    for infer_iter, data in enumerate(infer_reader()):
        data_feed_in = [items[:-1] for items in data]
        video_id = [items[-1] for items in data]
        infer_outs = exe.run(fetch_list=fetch_list,
                             feed=infer_feeder.feed(data_feed_in))
        infer_result_list = [item for item in infer_outs] + [video_id]

        prev_time = cur_time
        cur_time = time.time()
        period = cur_time - prev_time
        periods.append(period)

        infer_metrics.accumulate(infer_result_list)

        if args.log_interval > 0 and infer_iter % args.log_interval == 0:
            logger.info('Processed {} samples'.format(
                (infer_iter + 1) * len(video_id)))

    logger.info('[INFER] infer finished. average time: {}'.format(
        np.mean(periods)))

    if not os.path.isdir(args.save_dir):
        os.makedirs(args.save_dir)

    infer_metrics.finalize_and_log_out(savedir=args.save_dir,
                                       label_file=args.label_file)
Example #32
0
def train(args):
    # implement distributed training by fleet
    use_fleet = True
    if use_fleet:
        role = role_maker.PaddleCloudRoleMaker(is_collective=True)
        fleet.init(role)
        args.num_trainers = int(os.getenv("PADDLE_TRAINERS_NUM", "1"))
        args.trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
        print('-------------', args.num_trainers, args.trainer_id)

    if args.trainer_id == 0:
        if not os.path.exists(args.save_dir):
            os.makedirs(args.save_dir)

    # parse config
    config = parse_config(args.config)
    train_config = merge_configs(config, 'train', vars(args))
    print_configs(train_config, 'Train')
    train_model = models.get_model(args.model_name, train_config, mode='train')

    # build model
    startup = fluid.Program()
    train_prog = fluid.Program()
    if args.fix_random_seed:
        startup.random_seed = 1000
        train_prog.random_seed = 1000
    with fluid.program_guard(train_prog, startup):
        with fluid.unique_name.guard():
            train_model.build_input(use_dataloader=True)
            train_model.build_model()
            # for the input, has the form [data1, data2,..., label], so train_feeds[-1] is label
            train_feeds = train_model.feeds()
            train_fetch_list = train_model.fetches()
            train_loss = train_fetch_list[0]
            optimizer = train_model.optimizer()

            if use_fleet:
                optimizer = fleet.distributed_optimizer(optimizer)

            optimizer.minimize(train_loss)
            train_dataloader = train_model.dataloader()

    gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
    place = fluid.CUDAPlace(gpu_id) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(startup)

    if args.resume:
        # if resume weights is given, load resume weights directly
        assert os.path.exists(args.resume + '.pdparams'), \
                "Given resume weight dir {}.pdparams not exist.".format(args.resume)
        fluid.load(train_prog, model_path=args.resume, executor=exe)
    else:
        # if not in resume mode, load pretrain weights
        if args.pretrain:
            assert os.path.exists(args.pretrain), \
                    "Given pretrain weight dir {} not exist.".format(args.pretrain)
        pretrain = args.pretrain or train_model.get_pretrain_weights()
        if pretrain:
            train_model.load_pretrain_params(exe, pretrain, train_prog, place)

    build_strategy = fluid.BuildStrategy()
    build_strategy.enable_inplace = True
    if args.model_name in ['CTCN']:
        build_strategy.enable_sequential_execution = True

    exec_strategy = fluid.ExecutionStrategy()

    if use_fleet:
        compiled_train_prog = fleet.main_program
    else:
        compiled_train_prog = fluid.compiler.CompiledProgram(
            train_prog).with_data_parallel(loss_name=train_loss.name,
                                           build_strategy=build_strategy,
                                           exec_strategy=exec_strategy)
    # get reader
    bs_denominator = 1
    if args.use_gpu:
        # check number of GPUs
        gpus = os.getenv("CUDA_VISIBLE_DEVICES", "")
        if gpus == "":
            pass
        else:
            gpus = gpus.split(",")
            num_gpus = len(gpus)
            assert num_gpus == train_config.TRAIN.num_gpus, \
                   "num_gpus({}) set by CUDA_VISIBLE_DEVICES " \
                   "shoud be the same as that " \
                   "set in {}({})".format(
                   num_gpus, args.config, train_config.TRAIN.num_gpus)
        bs_denominator = train_config.TRAIN.num_gpus

    train_config.TRAIN.batch_size = int(train_config.TRAIN.batch_size /
                                        bs_denominator)
    train_reader = get_reader(args.model_name.upper(), 'train', train_config)

    # get metrics
    train_metrics = get_metrics(args.model_name.upper(), 'train', train_config)

    epochs = args.epoch or train_model.epoch_num()
    exe_places = fluid.cuda_places() if args.use_gpu else fluid.cpu_places()

    train_dataloader.set_batch_generator(train_reader, places=place)

    train_with_dataloader(exe,
                          train_prog,
                          compiled_train_prog,
                          train_dataloader,
                          train_fetch_list,
                          train_metrics,
                          epochs=epochs,
                          log_interval=args.log_interval,
                          save_dir=args.save_dir,
                          num_trainers=args.num_trainers,
                          trainer_id=args.trainer_id,
                          save_model_name=args.model_name,
                          fix_random_seed=args.fix_random_seed,
                          is_profiler=args.is_profiler,
                          profiler_path=args.profiler_path)