def compute_statistics(pores, detections):
    """Computes true detection rate (TDR), false detection rate (FDR) and the corresponding F-score for the given groundtruth and detections.

  Args:
    pores: pore groundtruth coordinates in format [N, P, 2].
      Second dimension must be np.arrays.
    detections: detection coordinates in the same format as pores, ie
      [N, D, 2] and second dimension np.arrays.

  Returs:
    f_score: F-score for the given detections and groundtruth.
    tdr: TDR for the given detections and groundtruth.
    fdr: FDR for the given detections and groundtruth.
  """
    # find correspondences between detections and pores
    total_pores = 0
    total_dets = 0
    true_dets = 0
    for i in range(len(pores)):
        # update totals
        total_pores += len(pores[i])
        total_dets += len(detections[i])
        true_dets += len(utils.find_correspondences(pores[i], detections[i]))

    # compute tdr, fdr and f score
    eps = 1e-5
    tdr = true_dets / (total_pores + eps)
    fdr = (total_dets - true_dets) / (total_dets + eps)
    f_score = 2 * (tdr * (1 - fdr)) / (tdr + (1 - fdr))

    return f_score, tdr, fdr
Esempio n. 2
0
def zero_recovery():
  # every instance in one side has the same match
  # in the other side, leading to a single pair
  instances1 = []
  for i in range(32):
    instance = np.zeros(32, dtype=np.float32)
    instance[i] = 1
    instances1.append(instance)

  instances2 = [instance + 1 for instance in instances1]
  instances2.append(np.zeros(32, dtype=np.float32))

  # find correspondences
  instances1 = np.array(instances1)
  instances2 = np.array(instances2)
  pairs = utils.find_correspondences(instances1, instances2)

  if len(pairs) != 1:
    return False
  if pairs[0][1] != len(instances2) - 1:
    return False
  if pairs[0][2] != 1:
    return False

  return True
def basic(descs1, descs2, pts1=None, pts2=None, thr=None):
    '''
  Finds bidirectional correspondences between descriptors
  descs1 and descs2. If thr is provided, discards correspondences
  that fail a distance ratio check with threshold thr; in this
  case, returns correspondences satisfying SIFT's criterion.

  Args:
    descs1: [N, M] array of N descriptors of dimension M each.
    descs2: [N, M] array of N descriptors of dimension M each.
    pts1: sentinel argument for matching function signature
      standardization.
    pts2: sentinel argument for matching function signature
      standardization.
    thr: distance ratio check threshold.

  Returns:
    number of found bidirectional correspondences. If thr is not
      None, number of bidirectional correspondences that satisfy
      a distance ratio check.
  '''
    if len(descs1) == 0 or len(descs2) == 0:
        return 0

    return len(utils.find_correspondences(descs1, descs2, thr=thr))
def spatial(descs1, descs2, pts1, pts2, thr=None):
    '''
  Computes the matching score proposed by Pamplona Segundo &
  Lemes (Pore-based ridge reconstruction for fingerprint
  recognition, 2015) using bidirectional correspondences
  between descriptors descs1 and descs2.
  If thr is provided, correspondences that fail a distance
  ratio check with threshold thr are discarded.

  Args:
    descs1: [N, M] array of N descriptors of dimension M each.
    descs2: [N, M] array of N descriptors of dimension M each.
    pts1: [N, 2] array of coordinates from which each descriptor
      of descs1 was computed.
    pts1: [N, 2] array of coordinates from which each descriptor
      of descs2 was computed.
    thr: distance ratio check threshold.

  Returns:
    matching score between descs1 and descs2.
  '''
    if len(descs1) == 0 or len(descs2) == 0:
        return 0

    pairs = utils.find_correspondences(descs1, descs2, thr=thr)

    pts1 = np.array(pts1)
    pts2 = np.array(pts2)
    score = 0
    for pair1, pair2 in itertools.combinations(pairs, 2):
        d1 = np.linalg.norm(pts1[pair1[0]] - pts1[pair2[0]])
        d2 = np.linalg.norm(pts2[pair1[1]] - pts2[pair2[1]])
        score += 1 / (1 + abs(d1 - d2))

    return score
Esempio n. 5
0
def perfect_recovery():
  # every instance has a single perfect match
  instances = []
  for i in range(32):
    instance = np.zeros(32, dtype=np.float32)
    instance[i] = 1
    instances.append(instance)

  # find correspondences
  instances = np.array(instances)
  pairs = utils.find_correspondences(instances, instances)

  # check correctness
  for i, j, d in pairs:
    if i != j or d != 0:
      return False

  return True
Esempio n. 6
0
def random_recovery():
  # get random instances
  instances1 = np.random.random((100, 32))
  instances2 = np.random.random((100, 32))

  # find correspondences
  pairs = utils.find_correspondences(instances1, instances2)

  # check uniqueness
  seen_indices1 = set()
  seen_indices2 = set()
  for i, j, _ in pairs:
    if i in seen_indices1 or j in seen_indices2:
      return False

    seen_indices1.add(i)
    seen_indices2.add(j)

  return True
def validate(pores_by_image, detections_by_image):
  # find correspondences between detections and pores
  total_pores = 0
  total_dets = 0
  true_dets = 0
  for i, pores in enumerate(pores_by_image):
    dets = detections_by_image[i]

    # update totals
    total_pores += len(pores)
    total_dets += len(dets)
    true_dets += len(utils.find_correspondences(pores, dets))

  # compute tdr, fdr and f score
  eps = 1e-12
  tdr = true_dets / (total_pores + eps)
  fdr = (total_dets - true_dets) / (total_dets + eps)
  f_score = 2 * (tdr * (1 - fdr)) / (tdr + 1 - fdr)

  print('TDR = {}'.format(tdr))
  print('FDR = {}'.format(fdr))
  print('F score = {}'.format(f_score))
def by_images(sess, pred_op, patches_pl, dataset, discard=False):
    '''
  Computes detection parameters that optimize the keypoint detection
  F-score in the dataset with a grid search. This differs from
  by_patches because images are post-processed with thresholding and
  NMS. Parameters of both methods are included in the grid-search.

  Args:
    sess: tf session with loaded pred_op variables.
    pred_op: tf op for detection probability prediction.
    patches_pl: patch input placeholder for pred_op op.
    dataset: dataset to perform grid-search on.
    discard: whether to only consider keypoints in the area in which
      method is capable of detecting.

  Returns:
    best_f_score: value of best found F-score.
    best_fdr: corresponding value of False Detection Rate.
    best_tdr: corresponding value of True Detection Rate.
    best_inter_thr: NMS intersection threshold that achieves found
      F-score.
    best_prob_thr: probability threshold that achieves found
      F-score.
  '''
    patch_size = dataset.patch_size
    half_patch_size = patch_size // 2
    preds = []
    pores = []
    print('Predicting pores...')
    for _ in range(dataset.num_images):
        # get next image and corresponding image label
        (img, *_), (label, *_) = dataset.next_image_batch(1)

        # predict for each image
        pred = sess.run(pred_op,
                        feed_dict={
                            patches_pl: np.reshape(img,
                                                   (-1, ) + img.shape + (1, ))
                        })

        # put predictions in image format
        pred = np.array(pred).reshape(img.shape[0] - patch_size + 1,
                                      img.shape[1] - patch_size + 1)

        # treat borders lost in convolution
        if discard:
            label = label[half_patch_size:-half_patch_size,
                          half_patch_size:-half_patch_size]
        else:
            pred = np.pad(pred, ((half_patch_size, half_patch_size),
                                 (half_patch_size, half_patch_size)),
                          'constant')

        # add image prediction to predictions
        preds.append(pred)

        # turn pore label image into list of pore coordinates
        pores.append(np.argwhere(label))
    print('Done.')

    # validate over thresholds
    inter_thrs = np.arange(0.7, 0, -0.1)
    prob_thrs = np.arange(0.9, 0, -0.1)
    best_f_score = 0
    best_tdr = None
    best_fdr = None
    best_inter_thr = None
    best_prob_thr = None

    # put inference in nms proper format
    for prob_thr in prob_thrs:
        coords = []
        probs = []
        for i in range(dataset.num_images):
            img_preds = preds[i]
            pick = img_preds > prob_thr
            coords.append(np.argwhere(pick))
            probs.append(img_preds[pick])

        for inter_thr in inter_thrs:
            # filter detections with nms
            dets = []
            for i in range(dataset.num_images):
                det, _ = utils.nms(coords[i], probs[i], 7, inter_thr)
                dets.append(det)

            # find correspondences between detections and pores
            total_pores = 0
            total_dets = 0
            true_dets = 0
            for i in range(dataset.num_images):
                # update totals
                total_pores += len(pores[i])
                total_dets += len(dets[i])
                true_dets += len(utils.find_correspondences(pores[i], dets[i]))

            # compute tdr, fdr and f score
            eps = 1e-5
            tdr = true_dets / (total_pores + eps)
            fdr = (total_dets - true_dets) / (total_dets + eps)
            f_score = 2 * (tdr * (1 - fdr)) / (tdr + (1 - fdr))

            # update best parameters
            if f_score > best_f_score:
                best_f_score = f_score
                best_tdr = tdr
                best_fdr = fdr
                best_inter_thr = inter_thr
                best_prob_thr = prob_thr

    return best_f_score, best_tdr, best_fdr, best_inter_thr, best_prob_thr
Esempio n. 9
0
def iterative(img1,
              pts1,
              img2,
              pts2,
              descs1=None,
              descs2=None,
              euclidean_lambda=500,
              weighted=False,
              max_iter=10):
  '''
  Iteratively align image 'img1' to 'img2', using
  Horn's absolute orientation method to minimize
  the mean squared error between keypoints'
  correspondences in sets 'pts1' and 'pts2'.
  Correspondences between keypoints are found
  with the following metric:

    d(u, v) = ||SIFT(u) - SIFT(v)||^2 +
        + (\lambda * ||u - v||^2) / MSE

  where '\lambda' is a user specified weight and
  'MSE' is the mean squared error from the
  previous alignment. For the first iteration,
  MSE = Inf.

  Args:
    img1: np array with image to align.
    pts1: np array with img1 keypoints, one
      keypoint coordinate per row.
    img2: np array with image to align to.
    pts2: same as pts1, but for img2.
    descs1: precomputed descriptors for img1.
    descs2: same as descs1, but for img2.
    euclidean_lambda: \lambda in above equation.
    weighted: whether should consider the
      correspondence confidence - computed as
      the reciprocal of its distance - in
      Horn's method.
    max_iter: Maximum number of iterations.

  Returns:
    A, b, s: the found alignment. For further
      information, read _horn() documentation.
  '''
  # initialize before first alignment
  mse = np.inf
  euclidean_weight = -1
  A = np.identity(2)
  s = 1
  b = np.array([0, 0])

  # precompute sift descriptors, if not given
  if descs1 is None:
    descs1 = utils.sift_descriptors(img1, pts1, scale=8)
  if descs2 is None:
    descs2 = utils.sift_descriptors(img2, pts2, scale=8)

  # iteratively align
  for _ in range(max_iter):
    # convergence criterion
    if np.isclose(mse * euclidean_weight, euclidean_lambda):
      break

    # compute weight of correspondences' euclidean distance
    euclidean_weight = euclidean_lambda / (mse + 1e-5)

    # find correspondences
    pairs = utils.find_correspondences(
        descs1,
        descs2,
        pts1=pts1,
        pts2=pts2,
        euclidean_weight=euclidean_weight,
        transf=lambda x: _transf(x, A, s, b),
        thr=0.8)

    # end alignment if no further correspondences are found
    if len(pairs) <= 1:
      break

    # make correspondence aligned array
    if weighted:
      max_dist = np.max(np.asarray(pairs)[:, 2])
      w = []
      L = []
      R = []
      for pair in pairs:
        L.append(pts1[pair[0]])
        R.append(pts2[pair[1]])
        w.append((max_dist - pair[2]) / max_dist)
    else:
      w = None
      L = []
      R = []
      for pair in pairs:
        L.append(pts1[pair[0]])
        R.append(pts2[pair[1]])

    # find alignment transformation
    A, b, s = _horn(L, R, weights=w)

    # compute alignment mse
    L = np.array(L)
    R = np.array(R)
    error = R - (s * np.dot(L, A.T) + b)
    dists = np.sum(error * error, axis=1)
    mse = np.mean(dists)

    # filter points and corresponding descriptors
    # that are out of the images overlap
    pts1_ = []
    descs1_ = []
    for i, pt in enumerate(pts1):
      t_pt = _transf(pt, A, s, b)
      if _inside(img2, t_pt):
        pts1_.append(pt)
        descs1_.append(descs1[i])
    pts1 = pts1_
    descs1 = np.array(descs1_)

    # same for second set
    pts2_ = []
    descs2_ = []
    for i, pt in enumerate(pts2):
      t_pt = _inv_transf(pt, A, s, b)
      if _inside(img1, t_pt):
        pts2_.append(pt)
        descs2_.append(descs2[i])
    pts2 = pts2_
    descs2 = np.array(descs2_)

  return A, b, s