コード例 #1
0
def memfunc():

    # Note: these files default to notre dame, unless otherwise specified
    image1_file = "../data/NotreDame/NotreDame1.jpg"
    image2_file = "../data/NotreDame/NotreDame2.jpg"
    eval_file = "../data/NotreDame/NotreDameEval.mat"

    scale_factor = 0.5
    feature_width = 16

    image1 = img_as_float32(
        rescale(rgb2gray(io.imread(image1_file)), scale_factor))
    image2 = img_as_float32(
        rescale(rgb2gray(io.imread(image2_file)), scale_factor))

    (x1, y1) = student.get_interest_points(image1, feature_width)
    (x2, y2) = student.get_interest_points(image2, feature_width)

    image1_features = student.get_features(image1, x1, y1, feature_width)
    image2_features = student.get_features(image2, x2, y2, feature_width)

    matches, confidences = student.match_features(image1_features,
                                                  image2_features)

    evaluate_correspondence(image1, image2, eval_file, scale_factor, x1, y1,
                            x2, y2, matches, confidences, 0)
コード例 #2
0
def memfunc():
    # Note: these files default to notre dame, unless otherwise specified
    # image1_file = "../data/NotreDame/NotreDame1.jpg"
    #   image2_file = "../data/NotreDame/NotreDame2.jpg"
    #eval_file = "../data/NotreDame/NotreDameEval.mat"

    image1_file = "../data/EpiscopalGaudi/EGaudi_1.jpg"
    image2_file = "../data/EpiscopalGaudi/EGaudi_1.jpg"
    eval_file = "../data/EpiscopalGaudi/EGaudiEval.mat"

    #image1_file = "../data/MountRushmore/Mount_Rushmore1.jpg"
    #image2_file = "../data/MountRushmore/Mount_Rushmore2.jpg"
    #eval_file = "../data/MountRushmore/MountRushmoreEval.mat"

    scale_factor = 0.5
    feature_width = 16

    image1 = img_as_float32(
        rescale(rgb2gray(io.imread(image1_file)), scale_factor))
    image2 = img_as_float32(
        rescale(rgb2gray(io.imread(image2_file)), scale_factor))

    (x1, y1) = student.get_interest_points(image1, feature_width, 0.4, 0.06, 0,
                                           0, 0, 400)
    (x2, y2) = student.get_interest_points(image2, feature_width, 5, 0.06, 0,
                                           0, 0, 500)

    image1_features = student.get_features(image1, x1, y1, feature_width)
    image2_features = student.get_features(image2, x2, y2, feature_width)

    matches, confidences = student.match_features(image1_features,
                                                  image2_features)

    evaluate_correspondence(image1, image2, eval_file, scale_factor, x1, y1,
                            x2, y2, matches, confidences, 0)
コード例 #3
0
def find_matches(student, image1, image2, eval_file):

    (x1, y1) = student.get_interest_points(image1, feature_width)
    (x2, y2) = student.get_interest_points(image2, feature_width)

    image1_features = student.get_features(image1, x1, y1, feature_width)
    image2_features = student.get_features(image2, x2, y2, feature_width)

    matches, confidences = student.match_features(image1_features,
                                                  image2_features)

    return x1, y1, x2, y2, matches, confidences
コード例 #4
0
def main():
    """
    Reads in the data,

    Command line usage: python main.py -p | --pair <image pair name>

    -p | --pair - flag - required. specifies which image pair to match

    """

    # create the command line parser
    parser = argparse.ArgumentParser()

    parser.add_argument(
        "-p",
        "--pair",
        required=True,
        help=
        "Either notre_dame, mt_rushmore, or e_gaudi. Specifies which image pair to match"
    )

    args = parser.parse_args()

    print(args)

    # (1) Load in the data
    image1_color, image2_color, eval_file = load_data(args.pair)

    # You don't have to work with grayscale images. Matching with color
    # information might be helpful. If you choose to work with RGB images, just
    # comment these two lines

    image1 = rgb2gray(image1_color)
    # Our own rgb2gray coefficients which match Rec.ITU-R BT.601-7 (NTSC) luminance conversion - only mino
    # performance improvements and could be confusing to students image1 = image1[:,:,0] * 0.2989 + image1[:,:,
    # 1] * 0.5870 + image1[:,:,2] * 0.1140
    image2 = rgb2gray(image2_color)
    # image2 = image2[:,:,0] * 0.2989 + image2[:,:,1] * 0.5870 + image2[:,:,2] * 0.1140
    # make images smaller to speed up the algorithm. This parameter
    # gets passed into the evaluation code, so don't resize the images
    # except for changing this parameter - We will evaluate your code using
    # scale_factor = 0.5, so be aware of this
    scale_factor = 0.5

    # Bilinear rescaling
    image1 = np.float32(rescale(image1, scale_factor))
    image2 = np.float32(rescale(image2, scale_factor))

    # width and height of each local feature, in pixels
    feature_width = 16

    # (2) Find distinctive points in each image. See Szeliski 4.1.1
    # !!! You will need to implement get_interest_points. !!!

    print("Getting interest points...")

    (x1, y1) = student.get_interest_points(image1, feature_width, 2, 0.06)
    (x2, y2) = student.get_interest_points(image2, feature_width, 2.5, 0.06)

    # For development and debugging get_features and match_features, you will likely
    # want to use the ta ground truth points, you can comment out the preceding two
    # lines and uncomment the following line to do this. Note that the ground truth
    # points for mt. rushmore will not produce good results, so you'll have to use
    # your own function for that image pair.

    # (x1, y1, x2, y2) = cheat_interest_points(eval_file, scale_factor)

    # if you want to view your corners uncomment these next lines!

    plt.imshow(image1, cmap="gray")
    plt.scatter(x1, y1, s=20, facecolors='none', edgecolors='b')
    plt.scatter(x1, y1, alpha=0.5, s=0.5)
    plt.show()

    plt.imshow(image2, cmap="gray")
    plt.scatter(x2, y2, alpha=0.9, s=3)
    plt.show()

    print("Done!")

    # 3) Create feature vectors at each interest point. Szeliski 4.1.2
    # !!! You will need to implement get_features. !!!

    print("Getting features...")

    image1_features = student.get_features(image1, x1, y1, feature_width)
    image2_features = student.get_features(image2, x2, y2, feature_width)

    print("Done!")

    # 4) Match features. Szeliski 4.1.3
    # !!! You will need to implement match_features !!!

    print("Matching features...")

    matches, confidences = student.match_features(image1_features,
                                                  image2_features)

    print("Done!")

    # 5) Evaluation and visualization

    # The last thing to do is to check how your code performs on the image pairs
    # we've provided. The evaluate_correspondence function below will print out
    # the accuracy of your feature matching for your 50 most confident matches,
    # 100 most confident matches, and all your matches. It will then visualize
    # the matches by drawing green lines between points for correct matches and
    # red lines for incorrect matches. The visualizer will show the top
    # num_pts_to_visualize most confident matches, so feel free to change the
    # parameter to whatever you like.

    print("Matches: " + str(matches.shape[0]))

    num_pts_to_visualize = 50

    evaluate_correspondence(image1_color, image2_color, eval_file,
                            scale_factor, x1, y1, x2, y2, matches, confidences,
                            num_pts_to_visualize, args.pair + '_matches.jpg')
コード例 #5
0
ファイル: main.py プロジェクト: xieyushansun/cs143_projects
def main():
    """
    Reads in the data,

    Command line usage: python main.py [-a | --average_accuracy] -p | --pair <image pair name>

    -a | --average_accuracy - flag - if specified, will compute your solution's
    average accuracy on the (1) notre dame, (2) mt. rushmore, and (3) episcopal
    guadi image pairs

    -p | --pair - flag - required. specifies which image pair to match

    """

    # create the command line parser
    parser = argparse.ArgumentParser()

    parser.add_argument(
        "-a",
        "--average_accuracy",
        help=
        "Include this flag to compute the average accuracy of your matching.")
    parser.add_argument(
        "-p",
        "--pair",
        required=True,
        help=
        "Either notre_dame, mt_rushmore, or e_gaudi. Specifies which image pair to match"
    )

    args = parser.parse_args()

    # (1) Load in the data
    image1, image2, eval_file = load_data(args.pair)

    # You don't have to work with grayscale images. Matching with color
    # information might be helpful. If you choose to work with RGB images, just
    # comment these two lines
    image1 = rgb2gray(image1)
    image2 = rgb2gray(image2)

    # make images smaller to speed up the algorithm. This parameter
    # gets passed into the evaluation code, so don't resize the images
    # except for changing this parameter - We will evaluate your code using
    # scale_factor = 0.5, so be aware of this
    scale_factor = 0.5

    # Bilinear rescaling
    image1 = np.float32(rescale(image1, scale_factor))
    image2 = np.float32(rescale(image2, scale_factor))

    # width and height of each local feature, in pixels
    feature_width = 16

    # (2) Find distinctive points in each image. See Szeliski 4.1.1
    # !!! You will need to implement get_interest_points. !!!

    print("Getting interest points...")

    # For development and debugging get_features and match_features, you will likely
    # want to use the ta ground truth points, you can comment out the precedeing two
    # lines and uncomment the following line to do this.

    #(x1, y1, x2, y2) = cheat_interest_points(eval_file, scale_factor)

    (x1, y1) = student.get_interest_points(image1, feature_width)
    (x2, y2) = student.get_interest_points(image2, feature_width)

    # if you want to view your corners uncomment these next lines!

    # plt.imshow(image1, cmap="gray")
    # plt.scatter(x1, y1, alpha=0.9, s=3)
    # plt.show()

    # plt.imshow(image2, cmap="gray")
    # plt.scatter(x2, y2, alpha=0.9, s=3)
    # plt.show()

    print("Done!")

    # 3) Create feature vectors at each interest point. Szeliski 4.1.2
    # !!! You will need to implement get_features. !!!

    print("Getting features...")
    image1_features = student.get_features(image1, x1, y1, feature_width)
    image2_features = student.get_features(image2, x2, y2, feature_width)

    print("Done!")

    # 4) Match features. Szeliski 4.1.3
    # !!! You will need to implement match_features !!!

    print("Matching features...")
    matches, confidences = student.match_features(image1_features,
                                                  image2_features)

    if len(matches.shape) == 1:
        print("No matches!")
        return

    print("Done!")

    # 5) Visualization

    # You might want to do some preprocessing of your interest points and matches
    # before visualizing (e.g. only visualizing 100 interest points). Once you
    # start detecting hundreds of interest points, the visualization can become
    # crowded. You may also want to threshold based on confidence

    # visualize.show_correspondences produces a figure that shows your matches
    # overlayed on the image pairs. evaluate_correspondence computes some statistics
    # about the quality of your matches, then shows the same figure. If you want to
    # just see the figure, you can uncomment the function call to visualize.show_correspondences

    num_pts_to_visualize = matches.shape[0]
    print("Matches: " + str(num_pts_to_visualize))
    # visualize.show_correspondences(image1, image2, x1, y1, x2, y2, matches, filename=args.pair + "_matches.jpg")

    ## 6) Evaluation
    # This evaluation function will only work for the Notre Dame, Episcopal
    # Gaudi, and Mount Rushmore image pairs. Comment out this function if you
    # are not testing on those image pairs. Only those pairs have ground truth
    # available.
    #
    # It also only evaluates your top 100 matches by the confidences
    # that you provide.
    #
    # Within evaluate_correspondences(), we sort your matches in descending order
    #
    num_pts_to_evaluate = matches.shape[0]

    evaluate_correspondence(image1, image2, eval_file, scale_factor, x1, y1,
                            x2, y2, matches, confidences, num_pts_to_visualize)

    return
コード例 #6
0
ファイル: test.py プロジェクト: QingYuAnWayne/homework_set_2
from skimage import io, filters, feature, img_as_float32
from skimage.transform import rescale
from skimage.color import rgb2gray
import helpers
import student

feature_width = 16
scale_factor = 0.5
image1_file = "../data/NotreDame/NotreDame1.jpg"
image1_color = img_as_float32(io.imread(image1_file))
image1 = rgb2gray(image1_color)
image2_file = "../data/NotreDame/NotreDame2.jpg"
image2_color = img_as_float32(io.imread(image2_file))
image2 = rgb2gray(image2_color)
x1, y1, x2, y2 = helpers.cheat_interest_points(
    '/Users/qingyuan/Desktop/大三下/CV/homework_set_2/data/NotreDame/NotreDameEval.mat',
    scale_factor)
feature1 = student.get_features(image1, x1, y1, feature_width)
feature2 = student.get_features(image2, x2, y2, feature_width)
match, confidence = student.match_features(feature1, feature2)
print("test success!")