コード例 #1
0
ファイル: ar.py プロジェクト: mishra39/16720_ComputerVision
def ar_func(ind,ar_src, cv_cover, cv_cov_vid, opts):
    im = ar_src[44:311,:,:]
    cropped_im =  cv2.resize(im,(im.shape[1],cv_cover.shape[0]))
    matches, locs1, locs2 = matchPics(cv_cover,cv_cov_vid,opts)
    locs1 = locs1[matches[:,0],0:2]
    locs2 = locs2[matches[:,1],0:2]
    bestH2to1, inliers = computeH_ransac(locs1, locs2, opts)
    resize_im =  cv2.resize(im,(im.shape[1],cv_cover.shape[0]))
    cropped_im = resize_im[:, int(cropped_im.shape[1]/2)-(int(cv_cover.shape[1]/2)) : int(cropped_im.shape[1]/2)+(int(cv_cover.shape[1]/2)),:]
    composite_img = compositeH(bestH2to1, cropped_im, cv_cov_vid)
    return composite_img
コード例 #2
0
def func_result(cv_cover, frame, ar_f, opts):
    matches, locs1, locs2 = matchPics(cv_cover, frame, opts)
    x1 = locs1[matches[:, 0], 0:2]
    x2 = locs2[matches[:, 1], 0:2]

    H2to1, inliers = computeH_ransac(x1, x2, opts)
    ar_f = ar_f[45:310, :, :]
    cover_width = cv_cover.shape[1]
    width = int(ar_f.shape[1] / ar_f.shape[0]) * cv_cover.shape[0]

    resized_ar = cv2.resize(ar_f, (width, cv_cover.shape[0]),
                            interpolation=cv2.INTER_AREA)
    h, w, d = resized_ar.shape
    cropped_ar = resized_ar[:,
                            int(w / 2) - int(cover_width / 2):int(w / 2) +
                            int(cover_width / 2), :]

    result = compositeH(H2to1, cropped_ar, frame)

    return result
コード例 #3
0
def createVideoFiles(args):
    opts, numframes, ar_source, book, cv_cover, bookdim = args

    for i in range(numframes):
        bookframe = book[i, :, :, :]
        pandaframe = ar_source[i, :, :]
        panda_crop = pandaframe[40:320, 208:431, :]
        panda = cv2.resize(panda_crop, bookdim, interpolation=cv2.INTER_AREA)

        matches, locs1, locs2 = matchPics(cv_cover, bookframe, opts)

        # plotMatches(cv_cover, bookframe, matches, locs1, locs2)
        locs1 = locs1[matches[:, 0]]
        locs2 = locs2[matches[:, 1]]
        locs1[:, [1, 0]] = locs1[:, [0, 1]]
        locs2[:, [1, 0]] = locs2[:, [0, 1]]

        H2to1, inliers = computeH_ransac(locs1, locs2, opts)

        image = compositeH(H2to1, panda, bookframe)
        np.save("../test1/img{}.npy".format(i), image)
コード例 #4
0
def createvideo(panda, book_frames, edge_crop, x_start, x_end, cv_cover):
    for idx in range(frame_no):
        panda_img = panda[idx]
        # crop the panda frame
        panda_img = panda_img[edge_crop:-edge_crop, x_start:x_end]
        panda_img = cv2.resize(panda_img,
                               (cv_cover.shape[1], cv_cover.shape[0]))
        #book_frame
        book_img = book_frames[idx]

        matches, locs1, locs2 = matchPics(book_img, cv_cover, opts)
        pair1 = locs1[matches[:, 0]]
        pair2 = locs2[matches[:, 1]]
        homography = computeH_ransac(pair1, pair2, opts)
        # panda_warped = cv2.warpPerspective(panda_img,homography,(book_img.shape[1],book_img.shape[0]))
        # mask = compositeH(homography, book_img, panda_warped)
        merge_frame = compositeH(homography, book_img, panda_img)

        #merge frame
        # merge_frame = (1- mask)*(book_img) + mask * panda_warped
        # print(merge_frame.shape)
        ar_avi.write(merge_frame)
コード例 #5
0
ファイル: panorama.py プロジェクト: alexwume/CV
import imutils

#Write script for Q2.2.4
opts = get_opts()
#read in images
img_left = cv2.imread('../data/pano_left.jpg')
img_right = cv2.imread('../data/pano_right.jpg')
print(img_left.shape)
print(img_right.shape)
img_left = cv2.cvtColor(img_left, cv2.COLOR_BGR2RGB)
img_right = cv2.cvtColor(img_right, cv2.COLOR_BGR2RGB)

matches, locs1, locs2 = matchPics(img_left, img_right, opts)
pair1 = locs1[matches[:, 0]]
pair2 = locs2[matches[:, 1]]
homography = computeH_ransac(pair1, pair2, opts)
right_warped = cv2.warpPerspective(img_right, homography,
                                   (img_left.shape[1], img_left.shape[0]))

images = []
images.append(img_left)
images.append(img_right)

stitcher = cv2.createStitcher() if imutils.is_cv3() else cv2.Stitcher_create()
(status, stitched) = stitcher.stitch(images)

if status == 0:
    # write the output stitched image to disk
    # display the output stitched image to our screen
    cv2.imshow("Stitched", stitched)
    cv2.waitKey(0)
コード例 #6
0
from helper import plotMatches
from matplotlib import pyplot as plt

#Import necessary functions

#Write script for Q2.2.4
opts = get_opts()

cv_cover = cv2.imread('../data/cv_cover.jpg')
cv_desk = cv2.imread('../data/cv_desk.png')
hp_cover = cv2.imread('../data/hp_cover.jpg')

matches, locs1, locs2 = matchPics(cv_cover, cv_desk, opts)

plotMatches(cv_cover, cv_desk, matches, locs1, locs2)
locs1 = locs1[matches[:, 0]]
locs2 = locs2[matches[:, 1]]
locs1[:, [1, 0]] = locs1[:, [0, 1]]
locs2[:, [1, 0]] = locs2[:, [0, 1]]

print(np.shape(locs1))

H2to1, inliers = computeH_ransac(locs1, locs2, opts)

template = cv2.resize(hp_cover, (cv_cover.shape[1], cv_cover.shape[0]),
                      interpolation=cv2.INTER_AREA)

image = compositeH(H2to1, template, cv_desk)

plt.imshow(image)
plt.axis('off')
コード例 #7
0
#print(cv_desk.shape)
hp_cover=cv2.imread('../data/hp_cover.jpg')
#print(hp_cover.shape)

matches1,locs1,locs2=matchPics(cv_desk,cv_cover,  opts)#Match between first two images

locs1[:,[0,1]] = locs1[:,[1,0]]
locs2[:,[0,1]] = locs2[:,[1,0]]

plotMatches(cv_desk,cv_cover,  matches1,locs1,locs2)

print('locs1m1 Shape :',locs1.shape)
print('locs2m1 Shape :',locs2.shape)

print('matches :',matches1.shape)

bestH2to1,inliers=computeH_ransac(locs1[matches1[:,0]],locs2[matches1[:,1]],opts)

#print('Best :',bestH2to1)
dim=(cv_cover.shape[1],cv_cover.shape[0])
hp_cover=cv2.resize(hp_cover,dim)

composite_img=compositeH(bestH2to1,hp_cover ,cv_desk)
print("Shape of composite image:",composite_img.shape)

cv2.imwrite('../data/warp_exp1.jpg',composite_img)




コード例 #8
0
import numpy as np
import cv2
from opts import get_opts

#Import necessary functions
from matchPics import matchPics
from planarH import computeH_ransac, compositeH

#Write script for Q2.2.4
opts = get_opts()
cv_cover = cv2.imread('../data/cv_cover.jpg')
cv_desk = cv2.imread('../data/cv_desk.png')
hp_cover = cv2.imread('../data/hp_cover.jpg')

matches, locs1, locs2 = matchPics(np.transpose(cv_cover, (1, 0, 2)),
                                  np.transpose(cv_desk, (1, 0, 2)), opts)
bestH2to1, inliers = computeH_ransac(matches, locs1, locs2, opts)
composite_img = compositeH(bestH2to1, hp_cover, cv_desk, cv_cover)
コード例 #9
0
ファイル: ar.py プロジェクト: dbxwong/Computer-Vision
cv_cover = cv2.imread('../data/cv_cover.jpg')

for frame in range(book_source.shape[0]):
    # assigned each video frame to variable: book_frame
    print(frame)  #print frame number to track processing
    book_frame = book_source[frame]

    #compute matches
    matches, locs1, locs2 = matchPics(book_frame, cv_cover, opts)

    #compute ransac
    matched_locs1 = np.array([locs1[i] for i in matches[:0]])
    matched_locs2 = np.array([locs2[i] for i in matches[:1]])

    bestH2to1, inliers = computeH_ransac(
        matched_locs1, matched_locs2,
        opts)  # buggy computeH.ranac function, unable to produce output
    #bestH2to1, inlirs = cv2.findHomography(matched_locs1, matched_locs2, cv2.RANSAC,5.0) - for debugging

    #crop and resize
    width_newPanda = int(cv.cover.shape[1] / cv_cover.shape[0] *
                         panda_source.shape[1])  # w x h
    crop = int(panda_source.shape[2] - width_newPanda) // 2
    ##crop = int(panda_source.shape[2]- width_newPanda)/2 for debugging
    resized_panda = cv2.resize(cropped, panda,
                               (cv_cover.shape[1], cv_cover.shape[0]))

    #compose composite pic
    compsiteFrame = composite(bestH2to1, resized_panda, book_frame)

    #write composite pic into output video
コード例 #10
0
from matchPics import matchPics
from planarH import computeH_ransac
from helper import plotMatches
import matplotlib.pyplot as plt

matches = np.array([[0,0],[1,1],[2,2,],[3,3]])
#Write script for Q2.2.4
opts = get_opts()
#read in images
cv_cover = cv2.imread('../data/cv_cover.jpg')
cv_desk = cv2.imread('../data/cv_desk.png')
hp_cover = cv2.imread('../data/hp_cover.jpg')
hp_cover = cv2.cvtColor(hp_cover,cv2.COLOR_BGR2RGB)
cv_desk = cv2.cvtColor(cv_desk,cv2.COLOR_BGR2RGB)

x_desk = np.array([[240, 163, 577, 498],[194,493,485,190]]).T
x_cov = np.array([[0,0,349,349],[0,439,439,0]]).T


homography = computeH_ransac(x_desk, x_cov,opts)
print(homography)
x_desk_homo = np.array([[240, 163, 577, 498],[194,493,485,190],[1,1,1,1]],dtype=float)
x_cov_homo = np.array([[0,0,349,349],[0,439,439,0],[1,1,1,1]], dtype=float)
# projected_x_cover = homography @ x_cov_homo
# projected_x_cover = projected_x_cover * (1 / projected_x_cover[2, :])
# print(projected_x_cover[:2,:])

hp_warped = cv2.warpPerspective(cv_cover,homography,(cv_desk.shape[1],cv_desk.shape[0]))
plt.imshow (hp_warped)
plt.show()
コード例 #11
0
from opts import get_opts
from matchPics import matchPics
from planarH import computeH_ransac
from matplotlib import pyplot as plt


# Write script for Q4.2x
def generate_panorama(img1, img2, H2to1):
    H, W = img1.shape[:2]
    warpped_img2 = cv2.warpPerspective(img2, H2to1, (W + 667, H))

    img1 = np.hstack((img1, np.zeros((H, 667, 3)))).astype('uint8')
    panorama = np.maximum(img1, warpped_img2)
    plt.imshow(cv2.cvtColor(panorama, cv2.COLOR_BGR2RGB))
    plt.show()
    return panorama


opts = get_opts()
pano_left = cv2.imread('pano_left.jpg')
pano_right = cv2.imread('pano_right.jpg')

matches, locs1, locs2 = matchPics(pano_left, pano_right, opts)
x1 = locs1[matches[:, 0], :]
x2 = locs2[matches[:, 1], :]
x1[:, [0, 1]] = x1[:, [1, 0]]
x2[:, [0, 1]] = x2[:, [1, 0]]
bestH2to1, inliers = computeH_ransac(x1, x2, opts)

panorama = generate_panorama(pano_left, pano_right, bestH2to1)
コード例 #12
0
ファイル: HarryPotterize.py プロジェクト: caosen469/CMU16720
import numpy as np
import cv2
import skimage.io 
import skimage.color
from opts import get_opts

#Import necessary functions
from matchPics import matchPics
from planarH import computeH_ransac
from helper import plotMatches

#Write script for Q2.2.4
opts = get_opts()

# Read cv_cover.jpg, cv_desk.png and hp_cover.jpg
img1 = cv2.imread('D:/Academic/CMU/Course/2020Fall/CV/Homework/HW2_Handout/HW2_Handout/data\cv_cover.jpg',1)
img2 = cv2.imread('D:/Academic/CMU/Course/2020Fall/CV/Homework/HW2_Handout/HW2_Handout/data\cv_desk.png',1)
img3 = cv2.imread('D:/Academic/CMU/Course/2020Fall/CV/Homework/HW2_Handout/HW2_Handout/data\hp_cover.jpg',1)

matches, locs1, locs2 = matchPics(img1,img1, opts)
locs1 = locs1[matches[:,0],:]
locs2 = locs2[matches[:,1],:]
plotMatches(img1, img2, matches, locs1, locs2)

result = computeH_ransac(locs2, locs1, opts)
コード例 #13
0
import cv2
from opts import get_opts

#Import necessary functions
from matchPics import matchPics
from planarH import computeH_ransac
from planarH import compositeH
from helper import plotMatches

#Write script for Q2.2.4
opts = get_opts()

cv_cover = cv2.imread('../data/cv_cover.jpg')
cv_desk = cv2.imread('../data/cv_desk.png')
hp_cover = cv2.imread('../data/hp_cover.jpg')

matches, locs1, locs2 = matchPics(cv_cover, cv_desk, opts)

plotMatches(cv_cover, cv_desk, matches, locs1, locs2)

x1 = locs1[matches[:,0], 0:2]
x2 = locs2[matches[:,1], 0:2]

H, inliers = computeH_ransac(x1, x2, opts)

hp_cover = cv2.resize(hp_cover, (cv_cover.shape[1],cv_cover.shape[0]))
result = compositeH(H, hp_cover, cv_desk)

cv2.imwrite('hp_result.jpeg',result)