Пример #1
0
def ar_func(ind,ar_src, cv_cover, cv_cov_vid, opts):
    im = ar_src[44:311,:,:]
    cropped_im =  cv2.resize(im,(im.shape[1],cv_cover.shape[0]))
    matches, locs1, locs2 = matchPics(cv_cover,cv_cov_vid,opts)
    locs1 = locs1[matches[:,0],0:2]
    locs2 = locs2[matches[:,1],0:2]
    bestH2to1, inliers = computeH_ransac(locs1, locs2, opts)
    resize_im =  cv2.resize(im,(im.shape[1],cv_cover.shape[0]))
    cropped_im = resize_im[:, int(cropped_im.shape[1]/2)-(int(cv_cover.shape[1]/2)) : int(cropped_im.shape[1]/2)+(int(cv_cover.shape[1]/2)),:]
    composite_img = compositeH(bestH2to1, cropped_im, cv_cov_vid)
    return composite_img
Пример #2
0
def func_result(cv_cover, frame, ar_f, opts):
    matches, locs1, locs2 = matchPics(cv_cover, frame, opts)
    x1 = locs1[matches[:, 0], 0:2]
    x2 = locs2[matches[:, 1], 0:2]

    H2to1, inliers = computeH_ransac(x1, x2, opts)
    ar_f = ar_f[45:310, :, :]
    cover_width = cv_cover.shape[1]
    width = int(ar_f.shape[1] / ar_f.shape[0]) * cv_cover.shape[0]

    resized_ar = cv2.resize(ar_f, (width, cv_cover.shape[0]),
                            interpolation=cv2.INTER_AREA)
    h, w, d = resized_ar.shape
    cropped_ar = resized_ar[:,
                            int(w / 2) - int(cover_width / 2):int(w / 2) +
                            int(cover_width / 2), :]

    result = compositeH(H2to1, cropped_ar, frame)

    return result
Пример #3
0
def createVideoFiles(args):
    opts, numframes, ar_source, book, cv_cover, bookdim = args

    for i in range(numframes):
        bookframe = book[i, :, :, :]
        pandaframe = ar_source[i, :, :]
        panda_crop = pandaframe[40:320, 208:431, :]
        panda = cv2.resize(panda_crop, bookdim, interpolation=cv2.INTER_AREA)

        matches, locs1, locs2 = matchPics(cv_cover, bookframe, opts)

        # plotMatches(cv_cover, bookframe, matches, locs1, locs2)
        locs1 = locs1[matches[:, 0]]
        locs2 = locs2[matches[:, 1]]
        locs1[:, [1, 0]] = locs1[:, [0, 1]]
        locs2[:, [1, 0]] = locs2[:, [0, 1]]

        H2to1, inliers = computeH_ransac(locs1, locs2, opts)

        image = compositeH(H2to1, panda, bookframe)
        np.save("../test1/img{}.npy".format(i), image)
Пример #4
0
def createvideo(panda, book_frames, edge_crop, x_start, x_end, cv_cover):
    for idx in range(frame_no):
        panda_img = panda[idx]
        # crop the panda frame
        panda_img = panda_img[edge_crop:-edge_crop, x_start:x_end]
        panda_img = cv2.resize(panda_img,
                               (cv_cover.shape[1], cv_cover.shape[0]))
        #book_frame
        book_img = book_frames[idx]

        matches, locs1, locs2 = matchPics(book_img, cv_cover, opts)
        pair1 = locs1[matches[:, 0]]
        pair2 = locs2[matches[:, 1]]
        homography = computeH_ransac(pair1, pair2, opts)
        # panda_warped = cv2.warpPerspective(panda_img,homography,(book_img.shape[1],book_img.shape[0]))
        # mask = compositeH(homography, book_img, panda_warped)
        merge_frame = compositeH(homography, book_img, panda_img)

        #merge frame
        # merge_frame = (1- mask)*(book_img) + mask * panda_warped
        # print(merge_frame.shape)
        ar_avi.write(merge_frame)
Пример #5
0
from helper import plotMatches
from matplotlib import pyplot as plt

#Import necessary functions

#Write script for Q2.2.4
opts = get_opts()

cv_cover = cv2.imread('../data/cv_cover.jpg')
cv_desk = cv2.imread('../data/cv_desk.png')
hp_cover = cv2.imread('../data/hp_cover.jpg')

matches, locs1, locs2 = matchPics(cv_cover, cv_desk, opts)

plotMatches(cv_cover, cv_desk, matches, locs1, locs2)
locs1 = locs1[matches[:, 0]]
locs2 = locs2[matches[:, 1]]
locs1[:, [1, 0]] = locs1[:, [0, 1]]
locs2[:, [1, 0]] = locs2[:, [0, 1]]

print(np.shape(locs1))

H2to1, inliers = computeH_ransac(locs1, locs2, opts)

template = cv2.resize(hp_cover, (cv_cover.shape[1], cv_cover.shape[0]),
                      interpolation=cv2.INTER_AREA)

image = compositeH(H2to1, template, cv_desk)

plt.imshow(image)
plt.axis('off')
#print(cv_desk.shape)
hp_cover=cv2.imread('../data/hp_cover.jpg')
#print(hp_cover.shape)

matches1,locs1,locs2=matchPics(cv_desk,cv_cover,  opts)#Match between first two images

locs1[:,[0,1]] = locs1[:,[1,0]]
locs2[:,[0,1]] = locs2[:,[1,0]]

plotMatches(cv_desk,cv_cover,  matches1,locs1,locs2)

print('locs1m1 Shape :',locs1.shape)
print('locs2m1 Shape :',locs2.shape)

print('matches :',matches1.shape)

bestH2to1,inliers=computeH_ransac(locs1[matches1[:,0]],locs2[matches1[:,1]],opts)

#print('Best :',bestH2to1)
dim=(cv_cover.shape[1],cv_cover.shape[0])
hp_cover=cv2.resize(hp_cover,dim)

composite_img=compositeH(bestH2to1,hp_cover ,cv_desk)
print("Shape of composite image:",composite_img.shape)

cv2.imwrite('../data/warp_exp1.jpg',composite_img)




Пример #7
0
import numpy as np
import cv2
import skimage.io
import skimage.color
from opts import get_opts
from matplotlib import pyplot as plt

import pdb
#Import necessary functions
from matchPics import matchPics
from planarH import computeH_ransac
from planarH import compositeH

#Write script for Q2.2.4
opts = get_opts()

#Read required images
cv_cover = cv2.imread('../data/cv_cover.jpg')  #use PIL
cv_desk = cv2.imread('../data/cv_desk.png')
hp_cover = cv2.imread('../data/hp_cover.jpg')

matches, locs1, locs2 = matchPics(cv_cover, cv_desk, opts)

hp_cover_resize = cv2.resize(hp_cover, (cv_cover.shape[1], cv_cover.shape[0]))

locs1 = locs1[matches[:, 0], 0:2]
locs2 = locs2[matches[:, 1], 0:2]
bestH2to1, inliers = computeH_ransac(locs1, locs2, opts)
composite_img = compositeH(bestH2to1, hp_cover_resize, cv_desk)
cv2.imwrite('../result/final_norm_harry_poterize_debug.jpg', composite_img)
# im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
# im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# im3 = cv2.cvtColor(im3, cv2.COLOR_BGR2GRAY)

# to resize im3 is better.
im3 = cv2.resize(im3, (im1.shape[1], im1.shape[0]))
# im1 = cv2.resize(im1, (im3.shape[1], im3.shape[0]))

locs1, desc1 = briefLite(im1)
locs2, desc2 = briefLite(im2)

matches = briefMatch(desc1, desc2)

print(im1.shape)
print(im2.shape)
print(im3.shape)

# plotMatches(im1,im2,matches,locs1,locs2)
num_iter = 5e3
tol = 3
bestH = ransacH(matches, locs1, locs2, num_iter=num_iter, tol=tol)
print('H:', bestH)

final_img = compositeH(bestH, im2, im3)
# final_img = cv2.warpPerspective(im3, bestH, (im2.shape[1],im2.shape[0]))

res = final_img
cv2.imshow("iter=%d_tol=%d" % (num_iter, tol), res)
cv2.waitKey(0)
cv2.destroyAllWindows()
Пример #9
0
from opts import get_opts

#Import necessary functions
from matchPics import matchPics
from planarH import computeH_ransac, compositeH
from loadVid import loadVid

#Write script for Q3.1
opts = get_opts()
cv_cover = cv2.imread('../data/cv_cover.jpg')  # 440, 350, 3
ar_source = loadVid('../data/ar_source.mov')
np.save("ar_source.npy", ar_source)
book = loadVid('../data/book.mov')
np.save("book.npy", book)
# ar_source = np.load('ar_source.npy', allow_pickle=True)  # 511, 360, 640, 3
# book = np.load('book.npy', allow_pickle=True)  # 641, 480, 640, 3

#Processing the video one frame at a time
cap = cv2.VideoWriter('ar.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 15.0, (book.shape[0], book.shape[1]))
for frame_num in range(ar_source.shape[0]):
    print(frame_num)
    frame_source = ar_source[frame_num]
    frame_book = book[frame_num]
    matches, locs1, locs2 = matchPics(np.transpose(cv_cover, (1, 0, 2)), np.transpose(frame_book, (1, 0, 2)), opts)
    bestH2to1, inliers = computeH_ransac(matches, locs1, locs2, opts)
    frame_source = frame_source[48:-48, 145:495]  # crop black part from top and bottom of ar_cource video
    composite_img = compositeH(bestH2to1, frame_source, frame_book, cv_cover)
    # cv2.imwrite('ar_final/frame_{}.png'.format(frame_num), composite_img)
    cap.write(composite_img)
cap.release()
Пример #10
0
import cv2
from opts import get_opts

#Import necessary functions
from matchPics import matchPics
from planarH import computeH_ransac
from planarH import compositeH
from helper import plotMatches

#Write script for Q2.2.4
opts = get_opts()

cv_cover = cv2.imread('../data/cv_cover.jpg')
cv_desk = cv2.imread('../data/cv_desk.png')
hp_cover = cv2.imread('../data/hp_cover.jpg')

matches, locs1, locs2 = matchPics(cv_cover, cv_desk, opts)

plotMatches(cv_cover, cv_desk, matches, locs1, locs2)

x1 = locs1[matches[:,0], 0:2]
x2 = locs2[matches[:,1], 0:2]

H, inliers = computeH_ransac(x1, x2, opts)

hp_cover = cv2.resize(hp_cover, (cv_cover.shape[1],cv_cover.shape[0]))
result = compositeH(H, hp_cover, cv_desk)

cv2.imwrite('hp_result.jpeg',result)
Пример #11
0
  
    aspect_ratio=cv_cover.shape[1]/cv_cover.shape[0]#w/h
    #print('Aspect ratio :',aspect_ratio)
    
    video_cover=ar[i,:,:,:]
    video_cover=video_cover[44:-44,:]#We chop off the black portions

    H,W,C=video_cover.shape
    #print('Shape of ar:',video_cover_cropped.shape)
    h=H/2
    w=W/2
    width_ar=H*cv_cover.shape[1]/cv_cover.shape[0]
    video_cover=video_cover[:,int(w-width_ar/2):int(w+width_ar/2)]#Height is fixed
    video_cover=cv2.resize(video_cover,dim)
    
    composite_img=compositeH(bestH2to1,video_cover,book[i,:,:,:])
    composite_list.append(composite_img)  

       

def make_video(images, outimg=None, fps=5, size=None,
               is_color=True, format="XVID",outvid="../data/output_3.mov"):
    
    from cv2 import VideoWriter, VideoWriter_fourcc, imread, resize
    fourcc = VideoWriter_fourcc(*format)
    vid = None
    for img in images:
        if vid is None:
            if size is None:
                size = img.shape[1], img.shape[0]
            vid = VideoWriter(outvid, fourcc, float(fps), size, is_color)
Пример #12
0
img_left = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY)
img_right = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY)

# find the keypoints and descriptors with ORB - alternative to SIFT
orb = cv2.ORB_create(
    nfeatures=5000
)  #ablation study of top# of matches doesnt yield much visual difference between 1000-5000
locs1, des1 = orb.detectAndCompute(img_left, None)
locs2, des2 = orb.detectAndCompute(img_right, None)

# match features - https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html
bfmatch = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bfmatch.match(des1, des2)
sort_matches = sorted(matches, key=lambda x: x.distance)

matched_locs1 = np.array(
    [locs1[match.queryIdx].pt for match in sort_matches[:700]])
matched_locs2 = np.array(
    [locs2[match.trainIdx].pt for match in sort_matches[:700]]
)  #ablation study of top# of matches doesnt yield much visual difference between 300-700

# compute homography - https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.html
H, mask = cv2.findHomography(matched_locs1, matched_locs2, cv2.RANSAC, 5.0)

# compose composite image
composite_pano = compositeH(H, img_left, img_right)
composite_pano = cv2.cvtColor(composite_pano, cv2.COLOR_GRAY2RGB)

plt.imshow(composite_pano)
plt.show()
Пример #13
0
#Import necessary functions
from matchPics import matchPics
from planarH import computeH_ransac
from planarH import compositeH
import matplotlib.pyplot as plt

#Write script for Q2.2.4
opts = get_opts()
#read in images
cv_cover = cv2.imread('../data/cv_cover.jpg')
cv_desk = cv2.imread('../data/cv_desk.png')
hp_cover = cv2.imread('../data/hp_cover.jpg')
hp_cover = cv2.cvtColor(hp_cover, cv2.COLOR_BGR2RGB)
cv_desk = cv2.cvtColor(cv_desk, cv2.COLOR_BGR2RGB)
hp_cover = cv2.resize(hp_cover, (cv_cover.shape[1], cv_cover.shape[0]))
# print(hp_cover.shape)
# print(cv_cover.shape)

matches, locs1, locs2 = matchPics(cv_desk, cv_cover, opts)
pair1 = locs1[matches[:, 0]]
pair2 = locs2[matches[:, 1]]
homography = computeH_ransac(pair1, pair2, opts)
# hp_warped = cv2.warpPerspective(hp_cover,homography,(cv_desk.shape[1],cv_desk.shape[0]))
# print(hp_warped)
# mask = compositeH(homography, cv_desk, hp_warped)
final_img = compositeH(homography, cv_desk, hp_cover)
# plt.imshow((1- mask)*(cv_desk)/255 + mask * hp_warped / 255)
plt.imshow(final_img)
plt.show()
Пример #14
0
# Write script for Q3.1
opts = get_opts()
ar_frames = loadVid('../data/ar_source.mov')
book_frames = loadVid('../data/book.mov')
cv_cover = cv2.imread('../data/cv_cover.jpg')
f, H, W = ar_frames.shape[:3]
ar_frames = ar_frames[:, 44:-44, 200:430, :]

if not os.path.exists('../result'):
    os.mkdir('../result')

fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
writer = cv2.VideoWriter('../result/ar.avi', fourcc, 25,
                         (book_frames.shape[2], book_frames.shape[1]), True)

for i in range(f):
    book = book_frames[i]
    ar = ar_frames[i]

    matches, locs1, locs2 = matchPics(book, cv_cover, opts)
    if matches.shape[0] >= 4:
        x1 = locs1[matches[:, 0], :]
        x2 = locs2[matches[:, 1], :]
        x1[:, [0, 1]] = x1[:, [1, 0]]
        x2[:, [0, 1]] = x2[:, [1, 0]]
        bestH2to1, inliers = computeH_ransac(x1, x2, opts)

        ar = cv2.resize(ar, (345, 444))
        composite_img = compositeH(bestH2to1, ar, book)
        writer.write(composite_img)