示例#1
0
def compute(image_file_path: str, config: Dict, model: Any) -> np.array:
    """Computes descriptors from keypoints saved in a file."""

    img = cv2.imread(image_file_path, 0)
    img = io_utils.smart_scale(
        img, config['max_size'],
        prevent_upscaling=True) if config['max_size'] is not None else img

    # Infer the path to the corresponding csv file for the keypoints.
    collection_name, set_name, image_name, _ = io_utils.get_path_components(
        image_file_path)

    # find path to keypoints file
    keypoints_file_path = io_utils.build_output_path(
        config['output_dir'],
        collection_name,
        set_name,
        'keypoints',
        config['detector_name'],
        image_name,
        max_size=config['max_size'],
        max_num_keypoints=config['max_num_keypoints'])

    if not os.path.isfile(keypoints_file_path):
        print('Could not find keypoints in path: {}\n.Skip'.format(
            keypoints_file_path))
        return None

    # Load keypoints from csv file as numpy array.
    kpts_numpy = io_utils.get_keypoints_from_csv(keypoints_file_path)

    # Convert numpy array to List of cv2.KeyPoint list
    kpts_cv2 = io_utils.numpy_to_cv2_kp(kpts_numpy)

    # Create iamge patches for each keypoint
    patches = create_patches(img, kpts_cv2, 42)

    # Save patches in tmp dir
    path_to_desc = os.path.join(config['tmp_dir_doap'], 'descriptors.csv')
    path_to_patches = os.path.join(config['tmp_dir_doap'], 'patches.csv')
    io_utils.save_patches_list(patches, path_to_patches)

    # Compute descritpors in matlab. Save result in tmp_dir
    # TODO: file paths for vlfeat, matconvnet and the model must be parameters
    subprocess.check_call([
        'matlab', '-nosplash', '-r',
        "use_doap_with_file('vlfeat-0.9.21', 'matconvnet-1.0-beta25', 'HPatches_ST_LM_128d.mat', '.', '{}', '{}');quit"
        .format(path_to_patches, path_to_desc)
    ])

    # Load matlab results and return.
    desc = np.loadtxt(path_to_desc, delimiter=',')

    return desc
示例#2
0
def compute(image_file_path: str, config: Dict, model: Any) -> np.array:
    """Computes descriptors from keypoints saved in a file."""
    # Load image and scale appropiately. Image is later used to create patch,
    # which in turn is used to create the descriptor.
    img = cv2.imread(image_file_path, 0)
    img = io_utils.smart_scale(
        img, config['max_size'],
        prevent_upscaling=True) if config['max_size'] is not None else img

    # Infer the path to the corresponding csv file for the keypoints.
    collection_name, set_name, image_name, _ = io_utils.get_path_components(
        image_file_path)

    # find path to keypoints file
    keypoints_file_path = io_utils.build_output_path(
        config['output_dir'],
        collection_name,
        set_name,
        'keypoints',
        config['detector_name'],
        image_name,
        max_size=config['max_size'],
        max_num_keypoints=config['max_num_keypoints'])

    if not os.path.isfile(keypoints_file_path):
        print('Could not find keypoints in path: {}\n.Skip'.format(
            keypoints_file_path))
        return None

    # Load keypoints from csv file as numpy array.
    kpts_numpy = io_utils.get_keypoints_from_csv(keypoints_file_path)

    # Convert numpy array to List of cv2.KeyPoint list
    kpts_cv2 = io_utils.numpy_to_cv2_kp(kpts_numpy)

    # Create image patches for each keypoint
    patches = rectify_patches(img, kpts_cv2, 32, 3)

    #Compute descriptors
    desc = compute_descriptors(model, patches, use_gpu=False)

    return desc
def main(args):
    # Get necessary directories
    output_dir = args.output_dir
    image_dir = args.image_dir
    data_dir = args.data_dir
    tensorflow_dir = os.path.join(data_dir, args.tensorflow_dir)
    stats_dir = os.path.join(data_dir, args.stats_dir)

    train_name = args.train_name
    stats_name = args.stats_name
    save_feature_name = args.save_feature

    # Create list of file names.
    file_list = args.file_list.split(' ')

    # Parameters
    alpha = args.alpha
    patch_size = args.patch_size
    batch_size = args.batch_size
    descriptor_dim = args.descriptor_dim

    stats_path = os.path.join(stats_dir, 'stats_{}.pkl'.format(stats_name))

    print('Loading training stats:')
    with open(stats_path, 'rb') as src:
        mean, std = pickle.load(src, encoding='utf-8')
    print('Training data loaded:\nMean: {}\nStd: {}'.format(mean, std))

    CNNConfig = {
        'patch_size': patch_size,
        'descriptor_dim': descriptor_dim,
        'batch_size': batch_size,
        'alpha': alpha,
        'train_flag': False
    }

    cnn_model = patch_cnn.PatchCNN(CNNConfig)

    saver = tf.train.Saver()
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        try:
            saver.restore(
                sess,
                os.path.join(tensorflow_dir,
                             '{}_model.ckpt'.format(train_name)))
            # saver.restore(sess, '../tensorflow_model/'+train_name+'_model.ckpt')
            print('Model restored.')
        except:
            print('No model found .Exit.')
            exit()

        for file_path in file_list:
            collection_name, set_name, file_base, extension = io_utils.get_path_components(
                file_path)
            file_name = '{}.{}'.format(file_base, extension)

            save_dir = os.path.join(output_dir, save_feature_name,
                                    collection_name, set_name)
            io_utils.create_dir(
                save_dir)  # Create folder if it does not exists

            output_list = []
            if extension in ['.ppm', '.pgm', '.png', '.jpg']:
                save_file = file_base + '.mat'
                save_name = os.path.join(save_dir, save_file)

                #read image
                img, ratio = read_image_from_name(file_path)
                if img.shape[2] == 1:
                    img = np.repeat(img, 3, axis=2)

                #build image pyramid
                pyramid = pyramid_gaussian(img,
                                           max_layer=4,
                                           downscale=np.sqrt(2))

                #predict transformation
                for (j, resized) in enumerate(pyramid):
                    fetch = {'o1': cnn_model.o1}

                    resized = np.asarray(resized)
                    resized = (resized - mean) / std
                    resized = resized.reshape(
                        (1, resized.shape[0], resized.shape[1],
                         resized.shape[2]))

                    result = sess.run(fetch,
                                      feed_dict={cnn_model.patch: resized})
                    result_mat = result['o1'].reshape(
                        (result['o1'].shape[1], result['o1'].shape[2],
                         result['o1'].shape[3]))
                    output_list.append(result_mat)

                sio.savemat(save_name, {'output_list': output_list})
示例#4
0
def compute(image_file_path: str, config: Dict, model: Any) -> np.array:
    """
    Computes the descriptors for all keypoints in a keypoint .csv file.
    1) Check if keypoint file exisits.
    2) Load and scale image.
    2b) Save image in tmp dir.
    3) Load .csv file mit keypoints as cv2.KeyPoint list.
    4) ...and convert  it with correct format for LIFT.
    5) Save keypoint list to text in `tmp` dir as .txt
    6) Compute orientation
    7) compute descriptors
    8) Load descriptors as numpy
    """
    # Build paths
    lift_path = os.path.join(config['root_dir_lift'], 'tf-lift')
    path_tmp_img = os.path.join(config['tmp_dir_lift'], 'tmp_img.png')
    path_tmp_kpts = os.path.join(config['tmp_dir_lift'], 'tmp_kpts.txt')
    path_tmp_ori = os.path.join(config['tmp_dir_lift'], 'tmp_ori.txt')
    path_tmp_desc = os.path.join(config['tmp_dir_lift'], 'tmp_desc.h5')

    # Infer the path to the corresponding csv file for the keypoints.
    collection_name, set_name, image_name, _ = io_utils.get_path_components(
        image_file_path)

    # find path to keypoints file
    keypoints_file_path = io_utils.build_output_path(
        config['output_dir'],
        collection_name,
        set_name,
        'keypoints',
        config['detector_name'],
        image_name,
        max_size=config['max_size'],
        max_num_keypoints=config['max_num_keypoints'])

    # 1) Check if keypoint file exists.
    if not os.path.isfile(keypoints_file_path):
        print('Could not find keypoints in path: {}\n.Skip'.format(
            keypoints_file_path))
        return None

    # 2) Load and scale image
    img = cv2.imread(image_file_path, 0)
    img = io_utils.smart_scale(
        img, config['max_size'], prevent_upscaling=config['prevent_upscaling']
    ) if config['max_size'] is not None else img

    # 2b) Write image in tmp dir
    cv2.imwrite(path_tmp_img, img)

    # 3) Load .csv file mit keypoints as cv2.KeyPoint list.
    kpts_numpy = io_utils.get_keypoints_from_csv(keypoints_file_path)

    # Convert numpy array to List of cv2.KeyPoint list
    kpts_cv2 = io_utils.numpy_to_cv2_kp(kpts_numpy)

    # 4) Convert to LIFT format
    kpts_lift = opencv_kp_list_2_kp_list(kpts_cv2)

    # 5) Save keypoint list as .txt file for LIFT
    saveKpListToTxt(kpts_lift, None, path_tmp_kpts)

    try:
        # 6) Orientation
        subprocess.check_call([
            'python', 'main.py', '--subtask=ori',
            '--test_img_file={}'.format(path_tmp_img),
            '--test_out_file={}'.format(path_tmp_ori),
            '--test_kp_file={}'.format(path_tmp_kpts)
        ],
                              cwd=lift_path)

        # 7) Descriptors
        subprocess.check_call([
            'python', 'main.py', '--subtask=desc',
            '--test_img_file={}'.format(path_tmp_img),
            '--test_out_file={}'.format(path_tmp_desc),
            '--test_kp_file={}'.format(path_tmp_ori)
        ],
                              cwd=lift_path)
    except Exception:
        print(
            'Could not compute descriptros for image {} at max_size {}. Skip.'.
            format(image_file_path, config['max_size']))
        return None

    # 8)
    f = h5py.File(path_tmp_desc, 'r')
    descriptors = np.array(list(f['descriptors']))

    return descriptors
示例#5
0
def detect_bulk(file_list: List[str], config: Dict) -> None:
    """Computes keypoints for all files in `file_list`. Additionally for each
    file, create an image with the corresponding keypoints drawn into it.
    All results will be saved within the `tmp` folder for this module.

    Arguments:
        file_list {List[str]} -- List of all images for which to compute keypoints.
        config {Dict} -- General configuration object. See config_run_detectors.py
        for more information.

    Returns:
        None -- All results here are saved within the `tmp` dir specified within
        the `config` object.
    """

    try:
        # Create feature map for each image in 'covariant_points' folder
        subprocess.check_call([
            'python',
            'patch_network_point_test.py',
            '--save_feature',
            'covariant_points',
            '--output_dir',
            config['tmp_dir_tcovdet'],
            '--file_list',
            ' '.join(file_list),
        ])

    except Exception as e:
        print('TCovDet: Covariant feature map creation failed.')
        print(e)
        raise e

    try:
        collection_names = []
        set_names = []
        image_names = []
        for file_path in file_list:
            collection_name, set_name, file_base, extension = io_utils.get_path_components(
                file_path)
            collection_names.append(collection_name)
            set_names.append(set_name)
            image_names.append(file_base)

        # The path to this file
        matlab_config_path = os.path.join(config['tmp_dir_tcovdet'],
                                          'filelist.mat')
        # Where to save the keypoints
        dir_output = os.path.join(config['tmp_dir_tcovdet'], 'feature_points')
        io_utils.create_dir(dir_output)  # Create on the fly if not existent

        # Where the .mat files of the covariant step lie
        dir_data = os.path.join(config['tmp_dir_tcovdet'], 'covariant_points')

        # Set maxinal number of keypoints to find
        point_number = 1000 if config['max_num_keypoints'] is None else config[
            'max_num_keypoints']

        savemat(
            matlab_config_path, {
                'file_list': file_list,
                'collection_names': collection_names,
                'set_names': set_names,
                'image_names': image_names,
                'dir_output': dir_output,
                'dir_data': dir_data,
                'point_number': point_number
            })
        subprocess.check_call([
            'matlab', '-nosplash', '-r',
            "point_extractor('vlfeat-0.9.21', '{}');quit".format(
                matlab_config_path)
        ])

    except Exception as e:
        print('TCovDet: Keypoint feature map creation failed.')
        print(e)
        raise e

    # Load each created .mat file, extract the keypoints (Column 2 and 5),
    # create list of cv2.KeyPoint.
    # Then load the image, scale it, draw the keypoints in it and save everything
    for i in tqdm(range(len(file_list))):
        file = file_list[i]
        mat_path = os.path.join(config['tmp_dir_tcovdet'], 'feature_points',
                                collection_names[i], set_names[i],
                                image_names[i] + '.mat')
        kpts_numpy = loadmat(mat_path)['feature'][:, [2, 5]]  # numpy array
        scores = loadmat(mat_path)['score']

        if len(kpts_numpy):
            kpts_cv2 = [
                cv2.KeyPoint(x[0], x[1], 1.0, _response=scores[idx])
                for idx, x in enumerate(kpts_numpy)
            ]  # list of cv2.KeyPoint

            img = cv2.imread(file, 0)
            if (img.shape[0] * img.shape[1]) > (1024 * 768):
                ratio = (1024 * 768 /
                         float(img.shape[0] * img.shape[1]))**(0.5)
                img = cv2.resize(
                    img,
                    (int(img.shape[1] * ratio), int(img.shape[0] * ratio)),
                    interpolation=cv2.INTER_CUBIC)

            img_kp = io_utils.draw_keypoints(img, kpts_cv2, config)

            # Save everything.
            io_utils.save_detector_output(file, config['detector_name'],
                                          config, kpts_cv2, img_kp, None)
        else:
            print('Warning: Did not find any keypoints!')