示例#1
0
        for i,kp in enumerate(kp1):
            pt = (int(round(kp.pt[0])), int(round(kp.pt[1])))
            cv2.circle(kp_on_img1, pt, 1, (0, 255, 0), -1, lineType=16)
        
        # metrics
        print('** %d **'%img_key)
        f.write('** %d **\n'%img_key)

        rep, N1, N2, M = bench_tools.rep(new_size, H,
                kp0, kp1, cst.THRESH_OVERLAP)
        print('rep: %.3f - N1: %d - N2: %d - M: %d'%(rep,N1,N2,len(M)))
        f.write('rep:%.3f - N1:%d - N2:%d - M:%d\n' %(rep,N1,N2,len(M)))


        (ms, N1, N2, M_len, M_d_len, inter) = bench_tools.ms(new_size, H,
                kp0, kp1, 
                des0, des1,
                cst.THRESH_OVERLAP, cst.THRESH_DESC)
        print('ms:%.3f - N1:%d - N2:%d - M:%d - M_d:%d - inter:%d'
                %(ms,N1,N2,M_len, M_d_len, inter))
        f.write('ms:%.3f - N1:%d - N2:%d - M:%d - M_d:%d - inter:%d\n'
            %(ms, N1, N2, M_len, M_d_len, inter))


        if cst.DEBUG:
            good = []
            matches = matcher.knnMatch(des0, des1,k=2)
            for i,(m,n) in enumerate(matches):
                if m.distance < 0.8*n.distance:
                    good.append(m)

            match_des_img = cv2.drawMatches(img0, kp0,
示例#2
0
def main(config):

    # Build Networks
    tf.reset_default_graph()

    photo_ph = tf.placeholder(
        tf.float32,
        [1, None, None, 1])  # input grayscale image, normalized by 0~1
    is_training = tf.constant(False)  # Always False in testing

    ops = build_networks(config, photo_ph, is_training)

    tfconfig = tf.ConfigProto()
    tfconfig.gpu_options.allow_growth = True
    sess = tf.Session(config=tfconfig)
    sess.run(tf.global_variables_initializer())

    # load model
    saver = tf.train.Saver()
    print('Load trained models...')

    if os.path.isdir(config.model):
        checkpoint = tf.train.latest_checkpoint(config.model)
        model_dir = config.model
    else:
        checkpoint = config.model
        model_dir = os.path.dirname(config.model)

    if checkpoint is not None:
        print('Checkpoint', os.path.basename(checkpoint))
        print("[{}] Resuming...".format(time.asctime()))
        saver.restore(sess, checkpoint)
    else:
        raise ValueError('Cannot load model from {}'.format(model_dir))
    print('Done.')

    avg_elapsed_time = 0

    ##########################################################################

    new_size = (config.w, config.h)

    # setup output dir
    res_dir = os.path.join('res/lfnet/', config.trials)
    if not os.path.exists(res_dir):
        os.makedirs(res_dir)

    # write human readable logs
    f = open(os.path.join(res_dir, 'log.txt'), 'w')
    f.write('lfnet\n')
    f.write('data: %s\n' % cst.DATA)
    f.write('thresh_overlap: %d\n' % cst.THRESH_OVERLAP)
    f.write('thresh_desc: %d\n' % cst.THRESH_DESC)

    # feature matcher handler for visualization
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    matcher = cv2.FlannBasedMatcher(index_params, search_params)
    norm = 'L2'

    global_start_time = time.time()
    for scene_name in cst.SCENE_LIST:
        duration = time.time() - global_start_time
        print('*** %s *** %d:%02d' %
              (scene_name, duration / 60, duration % 60))
        f.write('*** %s *** %d:%02d\n' %
                (scene_name, duration / 60, duration % 60))

        # get 1st img, (resize it), convert to BW
        img0_fn = os.path.join(cst.DATA_DIR, scene_name, '%d.ppm' % 1)
        print('img_fn: %s' % img0_fn)
        img0 = cv2.imread(img0_fn)
        old_size0 = (img0.shape[1], img0.shape[0])
        if config.resize == 1:
            img0 = cv2.resize(img0, new_size, interpolation=cv2.INTER_LINEAR)
        height, width = img0.shape[:2]
        if img0.ndim == 3 and img0.shape[-1] == 3:
            img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY)
        img0_bw = img0.copy()
        img0 = img0[None, ..., None].astype(
            np.float32) / 255.0  # normalize 0-1
        assert img0.ndim == 4  # [1,H,W,1]

        # detect and describe
        outs = sess.run({
            'kpts': ops['kpts'],
            'feats': ops['feats']
        },
                        feed_dict={photo_ph: img0})
        pts0 = outs['kpts'].T
        des0 = outs['feats']

        if config.save2txt:
            out_dir = os.path.join(res_dir, scene_name)
            if not os.path.exists(out_dir):
                os.makedirs(out_dir)
            pts_fn = os.path.join(out_dir, '%d_kp.txt' % 1)
            np.savetxt(pts_fn, pts0)

        # convert to cv2 kp for prototype conformity
        kp0 = []
        for pt in pts0.T:
            kp = cv2.KeyPoint(x=pt[0],
                              y=pt[1],
                              _size=2,
                              _angle=0,
                              _response=0,
                              _octave=0,
                              _class_id=0)
            kp0.append(kp)

        # draw kp on img
        kp_on_img0 = np.tile(np.expand_dims(img0_bw, 2), (1, 1, 3))
        for i, kp in enumerate(kp0):
            pt = (int(round(kp.pt[0])), int(round(kp.pt[1])))
            cv2.circle(kp_on_img0, pt, 1, (0, 255, 0), -1, lineType=16)

        for img_key in range(2, cst.MAX_IMG_NUM + 1):

            # get 2nd img
            img1_fn = os.path.join(cst.DATA_DIR, scene_name,
                                   '%d.ppm' % img_key)
            #print('img_fn: %s'%img1_fn)
            img1 = cv2.imread(img1_fn)
            H = np.loadtxt(
                os.path.join(cst.DATA_DIR, scene_name, 'H_1_%d' % img_key))
            # correct H with new size
            if config.resize == 1:
                s1x = 1.0 * new_size[0] / old_size0[0]
                s1y = 1.0 * new_size[1] / old_size0[1]
                six = 1.0 * new_size[0] / img1.shape[1]
                siy = 1.0 * new_size[1] / img1.shape[0]
                #print('s1x - s1y - six - siy', s1x, s1y, six, siy)
                H = np.diag((six, siy,
                             1)).dot(H.dot(np.diag((1.0 / s1x, 1.0 / s1y, 1))))
                img1 = cv2.resize(img1,
                                  new_size,
                                  interpolation=cv2.INTER_LINEAR)

            img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
            img1_bw = img1.copy()
            if img1.ndim == 3 and img1.shape[-1] == 3:
                img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
            img1 = img1[None, ..., None].astype(
                np.float32) / 255.0  # normalize 0-1
            assert img1.ndim == 4  # [1,H,W,1]

            # detect and describe
            outs = sess.run({
                'kpts': ops['kpts'],
                'feats': ops['feats']
            },
                            feed_dict={photo_ph: img1})
            pts1 = outs['kpts'].T
            des1 = outs['feats']

            if config.save2txt:
                pts_fn = os.path.join(out_dir, '%d_kp.txt' % img_key)
                np.savetxt(pts_fn, pts1)
                continue

            # convert to cv2 kp for prototype conformity
            kp1 = []
            for pt in pts1.T:
                kp = cv2.KeyPoint(x=pt[0],
                                  y=pt[1],
                                  _size=2,
                                  _angle=0,
                                  _response=0,
                                  _octave=0,
                                  _class_id=0)
                kp1.append(kp)

            # draw kp
            kp_on_img1 = np.tile(np.expand_dims(img1_bw, 2), (1, 1, 3))
            for i, kp in enumerate(kp1):
                pt = (int(round(kp.pt[0])), int(round(kp.pt[1])))
                cv2.circle(kp_on_img1, pt, 1, (0, 255, 0), -1, lineType=16)

            # metrics
            print('** %d **' % img_key)
            f.write('** %d **\n' % img_key)

            rep, N1, N2, M = bench_tools.rep(new_size, H, kp0, kp1,
                                             cst.THRESH_OVERLAP)
            print('rep: %.3f - N1: %d - N2: %d - M: %d' %
                  (rep, N1, N2, len(M)))
            f.write('rep:%.3f - N1:%d - N2:%d - M:%d\n' %
                    (rep, N1, N2, len(M)))

            (ms, N1, N2, M_len, M_d_len,
             inter) = bench_tools.ms(new_size, H, kp0, kp1, des0, des1,
                                     cst.THRESH_OVERLAP, cst.THRESH_DESC, norm)
            print('ms:%.3f - N1:%d - N2:%d - M:%d - M_d:%d - inter:%d' %
                  (ms, N1, N2, M_len, M_d_len, inter))
            f.write('ms:%.3f - N1:%d - N2:%d - M:%d - M_d:%d - inter:%d\n' %
                    (ms, N1, N2, M_len, M_d_len, inter))

            if cst.DEBUG:
                good = []
                matches = matcher.knnMatch(des0, des1, k=2)
                for i, (m, n) in enumerate(matches):
                    if m.distance < 0.8 * n.distance:
                        good.append(m)

                match_des_img = cv2.drawMatches(img0_bw,
                                                kp0,
                                                img1_bw,
                                                kp1,
                                                good,
                                                None,
                                                flags=2)
                cv2.imshow('match_des', match_des_img)
                cv2.imshow('kp_on', np.hstack((kp_on_img0, kp_on_img1)))
                cv2.waitKey(0)

    f.close()
    print('Done.')
示例#3
0
文件: hpatch.py 项目: zebrajack/elf
def main(config):

   
    # Build Networks
    tf.reset_default_graph()

    # detector
    photo_ph = tf.placeholder(tf.float32, [1, config.h, config.w, 1]) # input grayscale image, normalized by 0~1
    is_training = tf.constant(False) # Always False in testing

    ops = build_networks(config, photo_ph, is_training)
    lfnet_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    
    # proxy-descriptor
    img_col_op = tf.placeholder(dtype=tf.float32, shape=[1, config.h, config.w, 3])
    feats_op, _ = model_des.model_grad(img_col_op, is_training=False)

    tfconfig = tf.ConfigProto()
    tfconfig.gpu_options.allow_growth = True 

    sess = tf.Session(config=tfconfig)
    sess.run(tf.global_variables_initializer())

    # load lfnet model
    saver = tf.train.Saver(lfnet_var_list)
    print('Load trained models...')
    if os.path.isdir(config.model):
        checkpoint = tf.train.latest_checkpoint(config.model)
        model_dir = config.model
    else:
        checkpoint = config.model
        model_dir = os.path.dirname(config.model)
    if checkpoint is not None:
        print('Checkpoint', os.path.basename(checkpoint))
        print("[{}] Resuming...".format(time.asctime()))
        saver.restore(sess, checkpoint)
    else:
        raise ValueError('Cannot load model from {}'.format(model_dir))    
    print('Done.')
       

    # load vgg proxy-descriptor    
    init_weights.restore_vgg(sess, '%s/vgg/data.ckpt'%cst.WEIGHT_DIR)


    avg_elapsed_time = 0
    ##########################################################################

    new_size = (config.w, config.h)

    # setup output dir
    res_dir = os.path.join('res/elf-lfnet/', config.trials)
    if not os.path.exists(res_dir):
        os.makedirs(res_dir)
   
    # write human readable logs
    f = open(os.path.join(res_dir, 'log.txt'), 'w')
    f.write('grad_block: %d - grad_name: %s\n'%(config.grad_block, config.grad_name))
    f.write('feat_block: %d - feat_name: %s\n'%(config.feat_block, config.feat_name))
    f.write('thr_k_size: %d - ostu_sigma: %d\n'%(config.thr_k_size, config.thr_sigma))
    f.write('noise_k_size: %d - noise_sigma: %d\n'%(config.noise_k_size, config.noise_sigma))
    global_start_time = time.time()
    
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks=50)
    matcher = cv2.FlannBasedMatcher(index_params,search_params)
    norm = 'L2'

    for scene_name in cst.SCENE_LIST:
        duration = time.time() - global_start_time
        print('*** %s *** %d:%02d'%(scene_name, duration/60, duration%60))
        f.write('*** %s *** %d:%02d\n'%(scene_name, duration/60, duration%60))

        
        # get 1st img, (resize it), convert to BW
        img0_fn = os.path.join(cst.DATA_DIR, scene_name,'%d.ppm'%1)
        img0 = cv2.imread(img0_fn)
        old_size0 = (img0.shape[1], img0.shape[0])
        if config.resize==1:
            img0 = cv2.resize(img0, new_size, interpolation=cv2.INTER_LINEAR)
        if img0.ndim == 3 and img0.shape[-1] == 3:
            img0_bw = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY)
        kp_on_img0 = img0_bw.copy()


        # detection
        patch = tools_elf.preproc(img0_bw)
        block = config.grad_block
        grad_name = config.grad_name
        grad = sess.run(ops['grads_dict']['block-%d'%block][grad_name],
                feed_dict={photo_ph: patch})[0]
        grad = np.squeeze(grad)
        pts_fail, pts0 = tools_elf.postproc(grad, config.noise_k_size, 
                config.noise_sigma, config.thr_k_size, config.thr_sigma, 
                config.nms_dist, config.border_remove, config.max_num_feat)
            
        if pts_fail:
            print('all the scene is screwed')
            for img_key in range(2, cst.MAX_IMG_NUM+1):
                f.write('** %d **\n'%img_key)
                # ms
                print('min_conf: %.5f - my_raw_ms:%.3f - N1:%d - N2:%d - M:%d - M_d:%d - inter:%d'
                        %(-1, 0, 0, 0, 0, 0, 0))
                f.write('rep:%.3f - N1:%d - N2:%d - M:%d\n'%(0,0,0,0))
                f.write('ms:%.3f - N1:%d - N2:%d - M:%d - M_d:%d - inter:%d\n'
                        %(0, 0, 0, 0, 0, 0))
            print('goto next scene')
            continue # goto next scene

        # convert to cv2 kp for prototype homogeneity
        kp0 = []
        for pt in pts0.T:
            kp = cv2.KeyPoint(x=pt[0],y=pt[1], _size=4,
                _angle=0, _response=0, _octave=0, _class_id=0)
            kp0.append(kp) 

        # draw kp on img
        kp_on_img0 = np.tile(np.expand_dims(kp_on_img0,2), (1,1,3))
        for i,kp in enumerate(kp0):
            pt = (int(round(kp.pt[0])), int(round(kp.pt[1])))
            cv2.circle(kp_on_img0, pt, 1, (0, 255, 0), -1, lineType=16)


        # descriptor
        patch = tools_elf.preproc(img0) 
        des_coarse = sess.run(feats_op[config.feat_name],
                feed_dict={img_col_op: patch})[0,:,:,:]
        des0 = tools_elf.SuperPoint_interpolate(
                pts0, des_coarse, new_size[0], new_size[1])


        for img_key in range(2,cst.MAX_IMG_NUM+1):
            # get 2nd img
            img1_fn = os.path.join(cst.DATA_DIR, scene_name,'%d.ppm'%img_key)
            img1 = cv2.imread(img1_fn)
            H = np.loadtxt(os.path.join(cst.DATA_DIR, scene_name, 'H_1_%d'%img_key))
            
            if config.resize==1:
                s1x = 1.0*new_size[0]/old_size0[0]
                s1y = 1.0*new_size[1]/old_size0[1]
                six = 1.0*new_size[0]/img1.shape[1]
                siy = 1.0*new_size[1]/img1.shape[0]
                #print('s1x - s1y - six - siy', s1x, s1y, six, siy)
                H = np.diag((six,siy,1)).dot(H.dot( np.diag((1.0/s1x, 1.0/s1y, 1)) ) )
                img1 = cv2.resize(img1, new_size, interpolation=cv2.INTER_LINEAR)
            if img1.ndim == 3 and img1.shape[-1] == 3:
                img1_bw = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
            kp_on_img1 = img1_bw.copy()

            # detection
            patch = tools_elf.preproc(img1_bw)
            block = config.grad_block
            grad_name = config.grad_name
            grad = sess.run(ops['grads_dict']['block-%d'%block][grad_name],
                    feed_dict={photo_ph: patch})[0]
            grad = np.squeeze(grad)
            pts_fail, pts1 = tools_elf.postproc(grad, config.noise_k_size, 
                    config.noise_sigma, config.thr_k_size, config.thr_sigma, 
                    config.nms_dist, config.border_remove, config.max_num_feat)
            
            if pts_fail:
                f.write('** %d **\n'%img_key)
                # ms
                print('min_conf: %.5f - my_raw_ms:%.3f - N1:%d - N2:%d - M:%d - M_d:%d - inter:%d'
                        %(-1, 0, 0, 0, 0, 0, 0))
                f.write('rep:%.3f - N1:%d - N2:%d - M:%d\n'%(0,0,0,0))
                f.write('ms:%.3f - N1:%d - N2:%d - M:%d - M_d:%d - inter:%d\n'
                        %(0, 0, 0, 0, 0, 0))
                continue # go to next img


            # convert to cv2 kp for prototype homogeneity
            kp1 = []
            for pt in pts1.T:
                kp = cv2.KeyPoint(x=pt[0],y=pt[1], _size=4,
                    _angle=0, _response=0, _octave=0, _class_id=0)
                kp1.append(kp)
            
            # draw kp on img
            kp_on_img1 = np.tile(np.expand_dims(kp_on_img1,2), (1,1,3))
            for i,kp in enumerate(kp0):
                pt = (int(round(kp.pt[0])), int(round(kp.pt[1])))
                cv2.circle(kp_on_img1, pt, 1, (0, 255, 0), -1, lineType=16)


            # descriptor
            patch = tools_elf.preproc(img1) 
            des_coarse = sess.run(feats_op[config.feat_name],
                    feed_dict={img_col_op: patch})[0,:,:,:]
            des1 = tools_elf.SuperPoint_interpolate(
                    pts1, des_coarse, new_size[0], new_size[1])


            # metrics
            print('** %d **'%img_key)
            f.write('** %d **\n'%img_key)
            
            rep, N1, N2, M = bench_tools.rep(new_size, H, kp0, kp1, 
                    cst.THRESH_OVERLAP)
            print('rep: %.3f - N1: %d - N2: %d - M: %d'%(rep,N1,N2,len(M)))
            f.write('rep:%.3f - N1:%d - N2:%d - M:%d\n' %(rep,N1,N2,len(M)))
            
            (ms, N1, N2, M_len, M_d_len, inter) = bench_tools.ms(new_size, H, 
                    kp0, kp1, des0, des1, cst.THRESH_OVERLAP, cst.THRESH_DESC, norm)
            print('ms:%.3f - N1:%d - N2:%d - M:%d - M_d:%d - inter:%d'
                    %(ms,N1,N2,M_len, M_d_len, inter))
            f.write('ms:%.3f - N1:%d - N2:%d - M:%d - M_d:%d - inter:%d\n'
                %(ms, N1, N2, M_len, M_d_len, inter))
            
            if cst.DEBUG:
                good = []
                matches = matcher.knnMatch(des0, des1,k=2)
                for i,(m,n) in enumerate(matches):
                    if m.distance < 0.8*n.distance:
                        good.append(m)
            
                match_des_img = cv2.drawMatches(img0, kp0, img1, kp1, good, None, 
                        flags=2)
                cv2.imshow('match_des', match_des_img)
                cv2.imshow('kp_on', np.hstack((kp_on_img0, kp_on_img1)))
                cv2.waitKey(0)


    f.close()
    print('Done.')
示例#4
0
                        pts1, des_coarse, new_size[0], new_size[1])

                    # metrics
                    print('** %d **' % img_key)
                    f.write('** %d **\n' % img_key)

                    rep, N1, N2, M = bench_tools.rep(new_size, H, kp0, kp1,
                                                     cst.THRESH_OVERLAP)
                    print('rep: %.3f - N1: %d - N2: %d - M: %d' %
                          (rep, N1, N2, len(M)))
                    f.write('rep:%.3f - N1:%d - N2:%d - M:%d\n' %
                            (rep, N1, N2, len(M)))

                    (ms, N1, N2, M_len, M_d_len,
                     inter) = bench_tools.ms(new_size, H, kp0, kp1, des0, des1,
                                             cst.THRESH_OVERLAP,
                                             cst.THRESH_DESC, norm)
                    print(
                        'ms:%.3f - N1:%d - N2:%d - M:%d - M_d:%d - inter:%d' %
                        (ms, N1, N2, M_len, M_d_len, inter))
                    f.write(
                        'ms:%.3f - N1:%d - N2:%d - M:%d - M_d:%d - inter:%d\n'
                        % (ms, N1, N2, M_len, M_d_len, inter))

                    if cst.DEBUG:
                        good = []
                        matches = matcher.knnMatch(des0, des1, k=2)
                        for i, (m, n) in enumerate(matches):
                            if m.distance < 0.8 * n.distance:
                                good.append(m)