def PairDiscovery(img1Name,
                  img1Dir,
                  img2Name,
                  img2Dir,
                  model,
                  transform,
                  tolerance,
                  margin,
                  scaleImgRef,
                  scaleList,
                  eta=1e-7,
                  featLayer='conv4',
                  scoreType='ALL',
                  RefFeat={},
                  flip=False):

    if scoreType == 'Identity':
        ScorePos = outils.ScorePosIdentity
    elif scoreType == 'Hough':
        ScorePos = outils.ScorePosHough
    elif scoreType == 'Affine':
        ScorePos = outils.ScorePosAffine

    strideNet = 16
    minNet = 15

    featChannel = 256 if featLayer == 'conv4' else 512

    img1Path = os.path.join(img1Dir, img1Name)

    I1 = Image.open(img1Path).convert('RGB')

    feat1, pilImg1W, pilImg1H, feat1W, feat1H, list1W, list1H, img1Bbox = outils.FeatImgRef(
        I1, scaleImgRef, minNet, strideNet, margin, transform, model,
        featChannel, eta)
    toleranceRef = tolerance / scaleImgRef

    match1, match2, similarity, gridSize = outils.MatchPair(
        minNet, strideNet, model, transform, scaleList, feat1, feat1W, feat1H,
        img2Dir, img2Name, list1W, list1H, featChannel, tolerance, margin, eta,
        RefFeat, flip)

    if len(match2) < 3:
        return 0., {}

    bestScore, inlier = ScorePos(match1, match2, similarity, gridSize,
                                 toleranceRef)

    return bestScore, inlier
Exemple #2
0
def PairDiscovery(featQuery, img2Path, model, transform, tolerance, minFeatCC,
                  margin, scaleImgRef, scaleList, houghInitial, nbSamplePoint,
                  nbIter, saveQuality, computeSaliencyCoef, out1, out2):

    strideNet = 16
    minNet = 15

    featChannel = 256

    feat1, I1, pilImg1W, pilImg1H, feat1W, feat1H, list1W, list1H, img1Bbox = featQuery
    vote = outils.VoteMatrix(tolerance)
    toleranceRef = tolerance / scaleImgRef
    I2 = Image.open(img2Path).convert('RGB')
    pilImg2W, pilImg2H = I2.size

    match1, match2, similarity, matchSetT = outils.MatchPair(
        minNet, strideNet, model, transform, scaleList, feat1, feat1W, feat1H,
        I2, list1W, list1H, featChannel, tolerance, vote)
    matchSetT = matchSetT if houghInitial else range(len(match1))

    if len(matchSetT) < nbSamplePoint:
        if out1:
            SkipIteration(I1, I2, saveQuality, out1, out2)
        return 0.

    bestParams, bestScore, inlier = outils.RANSAC(nbIter, match1, match2,
                                                  matchSetT, similarity,
                                                  toleranceRef, nbSamplePoint)

    if len(bestParams) == 0:
        if out1:
            SkipIteration(I1, I2, saveQuality, index)
        return 0.

    feat2W, feat2H = outils.FeatSizeImgTarget(bestParams, feat1W, feat1H)

    if feat2W == 0 or feat2H == 0 or feat2W >= 1000 or feat2H >= 1000:
        if out1:
            SkipIteration(I1, I2, saveQuality, out1, out2)
        return 0.

    match1, match2, score = outils.BackwardVerification(
        feat2W, feat2H, feat1W, feat1H, inlier)

    finalMask1 = np.ones(
        (img1Bbox[3] - img1Bbox[1], img1Bbox[2] - img1Bbox[0]),
        dtype=np.uint8) * 100
    finalMask2 = np.ones((pilImg2H, pilImg2W), dtype=np.uint8) * 100

    mask2 = np.zeros((feat2W, feat2H))
    mask1 = np.zeros((feat1W, feat1H))

    match1, match2, score = outils.KeepOnlyLargeCC(match1, match2, mask1,
                                                   mask2, minFeatCC, score)
    if len(match1) == 0:
        if out1:
            SkipIteration(I1, I2, saveQuality, out1, out2)
        return 0.

    _, score = outils.GetCC(match1, match2, mask1, mask2, score)
    sumScore = np.sum(score)
    finalScore = sumScore / float(feat1.size()[1])

    if out1:
        match1, match2 = np.array(outils.ExtendRemove(match1)), np.array(
            outils.ExtendRemove(match2))

        mask1[match1[:, 0], match1[:, 1]] = 1
        mask2[match2[:, 0], match2[:, 1]] = 1

        mask1 = imresize(mask1,
                         (finalMask1.shape[0], finalMask1.shape[1])) / 128 > 0
        mask2 = imresize(mask2,
                         (finalMask2.shape[0], finalMask2.shape[1])) / 128 > 0

        finalMask1[mask1] = 255
        finalMask2[mask2] = 255

        I1RGBA = cv2.cvtColor(np.array(I1), cv2.COLOR_RGBA2BGRA)
        I2RGBA = cv2.cvtColor(np.array(I2), cv2.COLOR_RGBA2BGRA)

        mask1Index = finalMask1 > 0
        mask2Index = finalMask2 > 0

        mask1 = np.ones((I1RGBA.shape[0], I1RGBA.shape[1])) * 100
        mask1[img1Bbox[1]:img1Bbox[3], img1Bbox[0]:img1Bbox[2]] = finalMask1
        mask2 = finalMask2

        mask1, mask2 = mask1.astype(np.uint8), mask2.astype(np.uint8)
        I1RGBA[:, :, 3] = mask1
        I2RGBA[:, :, 3] = mask2

        ratio1 = max(
            max(I1RGBA.shape[0], I1RGBA.shape[1]) / float(saveQuality), 1)
        I1RGBA = imresize(
            I1RGBA,
            (int(I1RGBA.shape[0] / ratio1), int(I1RGBA.shape[1] / ratio1)))

        ratio2 = max(I2RGBA.shape[0], I2RGBA.shape[1]) / float(saveQuality)
        I2RGBA = imresize(
            I2RGBA,
            (int(I2RGBA.shape[0] / ratio2), int(I2RGBA.shape[1] / ratio2)))

        I1RGBA[:, :, 3] = BlurMask(I1RGBA[:, :, 3]).astype(np.uint8)
        I2RGBA[:, :, 3] = BlurMask(I2RGBA[:, :, 3]).astype(np.uint8)

        cv2.imwrite(out1, I1RGBA)
        cv2.imwrite(out2, I2RGBA)

    return finalScore