Exemple #1
0
def stitch_images(imageA, imageB):

    ''' Stitches 2 images together
        Args:
            imageA: First image
            imageB: Second image

        Returns:
            result: the images stitched together

    '''
    # load the two images and resize them to have a width of 400 pixels
    # (for faster processing)
    imageA = imutils.resize(imageA, width=400, height=400)
    imageB = imutils.resize(imageB, width=400, height=400)

    # stitch the images together to create a panorama
    stitcher = Stitcher()
    (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)
    # show the images (commented out for using pi through ssh)
    #cv2.imshow("Image A", imageA)
    #cv2.imshow("Image B", imageB)
    #cv2.imshow("Keypoint Matches", vis)
    #cv2.imshow("Result", result)
    #cv2.waitKey(0)
    return result
Exemple #2
0
from matplotlib import pyplot as plt
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True,
	help="path to the first image")
ap.add_argument("-s", "--second", required=True,
	help="path to the second image")
args = vars(ap.parse_args())

# load the two images and resize them to have a width of 400 pixels
# (for faster processing)
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])
#imageA = imutils.resize(imageA, width=4000)
#imageB = imutils.resize(imageB, width=4000)

# stitch the images together to create a panorama
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

# show the images
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
cv2.imshow("Keypoint Matches", vis)
cv2.imshow("Result", result)
cv2.waitKey(0)
cv2.imwrite('mosaic.jpg', result)

import numpy as np
import datetime
import imutils
import time
import cv2

# initialize the video streams and allow them to warmup
print("[INFO] starting cameras...")
leftStream = VideoStream(src=1).start()
#rightStream = VideoStream(usePiCamera=True).start()
rightStream = VideoStream(src=0).start()
time.sleep(2.0)

# initialize the image stitcher, motion detector, and total
# number of frames read
stitcher = Stitcher()
motion = BasicMotionDetector(minArea=500)
total = 0

# loop over frames from the video streams
while True:
	# grab the frames from their respective video streams
	left = leftStream.read()
	right = rightStream.read()

	# resize the frames
	left = imutils.resize(left, width=400)
	right = imutils.resize(right, width=400)

	# stitch the frames together to form the panorama
	# IMPORTANT: you might have to change this line of code
Exemple #4
0
ap.add_argument("-f", "--first", required=True,
	help="path to the first image")
ap.add_argument("-s", "--second", required=True,
	help="path to the second image")
args = vars(ap.parse_args())

# load the two images and resize them to have a width of 400 pixels
# (for faster processing)
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])
#imageA = cv2.resize(imageA, (imageA.shape[0],imageA.shape[1]//2,))
#imageB = cv2.resize(imageB, (imageB.shape[0],imageB.shape[1]//2))
#imageA = imutils.resize(imageA, width=600)
#imageB = imutils.resize(imageB, width=600)

# stitch the images together to create a panorama
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=True,ratio=0.75, reprojThresh=4.0)

# show the images
cv2.imshow("Image A", imutils.resize(imageA, width=800))
cv2.imshow("Image B", imutils.resize(imageB, width=800))
cv2.imshow("Keypoint Matches", imutils.resize(vis, width=1280))
cv2.imshow("Result", imutils.resize(result, width=1280))
cv2.waitKey(0)
cv2.destroyAllWindows()

outdir = os.path.split(args["first"])[0]
filename = os.path.split(args["first"])[-1]
cv2.imwrite(os.path.sep.join([outdir, "stitched.png"]),result)
Exemple #5
0
from imutils.video import VideoStream
import numpy as np
import datetime
import imutils
import time
import cv2

# initialize the video streams and allow them to warmup
print("[INFO] starting cameras...")
#leftStream = VideoStream(src=0).start()
#rightStream = VideoStream(usePiCamera=True).start()
#time.sleep(2.0)

# initialize the image stitcher, motion detector, and total
# number of frames read
stitcher = Stitcher()
#motion = BasicMotionDetector(minArea=500)
total = 0
vidcap_left = cv2.VideoCapture('/Users/jayesh/Documents/Jayesh/left.mp4')
vidcap_right = cv2.VideoCapture('/Users/jayesh/Documents/Jayesh/right.mp4')
# loop over frames from the video streams
while True:
    # grab the frames from their respective video streams
    #left = leftStream.read()
    #right = rightStream.read()
    # resize the frames

    success_left, image_left = vidcap_left.read()
    success_right, image_right = vidcap_right.read()
    if success_left is False or success_right is False:
        print("frames are finished")
Exemple #6
0
# USAGE
# python realtime_stitching.py

# import the necessary packages
from __future__ import print_function
from pyimagesearch.panorama import Stitcher

import datetime
import time
import cv2
import socket
import numpy

stitcher = Stitcher()
sock = socket.socket()
cap_1 = cv2.VideoCapture(0)

Self_IP = "192.168.0.110"
Self_PORT = 8005

Pre_IP = "192.168.0.111"
Pre_PORT = 8002

sock.connect((Pre_IP, Pre_PORT))


def recvall(sock, count):
    buf = b''
    while count:
        newbuf = sock.recv(count)
        if not newbuf: return None
		# si 0 enregistrer l'image resultante

	# load the two images and resize them to have a width of 400 pixels
	# (for faster processing)
	if (i == 2 or (nbrVerticalImages == 2 and i == 0)):
		n = j - 1
		first = file + "/" + str(n) + "." + formatImage
		imageA = cv2.imread(first)
		second = file + "/" + str(j) + "." + formatImage
		imageB = cv2.imread(second)

		imageA = imutils.resize(imageA, width=400)
		imageB = imutils.resize(imageB, width=400)

		# stitch the images together to create a panorama
		stitcher = Stitcher()
		(result, vis) = stitcher.stitch_horizontal([imageA, imageB], showMatches=True)

	elif i != 1:
		imageA = result
		second = file + "/" + str(j) + "." + formatImage
		imageB = cv2.imread(second)

		imageB = imutils.resize(imageB, width=400)

		# stitch the images together to create a panorama
		stitcher = Stitcher()
		(result, vis) = stitcher.stitch_horizontal([imageA, imageB], showMatches=True)

	if interm:
		# show the images
from pyimagesearch.panorama import Stitcher
import argparse
import imutils
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True,
	help="path to the first image")
ap.add_argument("-s", "--second", required=True,
	help="path to the second image")
args = vars(ap.parse_args())

# load the two images and resize them to have a width of 400 pixels
# (for faster processing)
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])
imageA = imutils.resize(imageA, width=400)
imageB = imutils.resize(imageB, width=400)

# stitch the images together to create a panorama
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

# show the images
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
cv2.imshow("Keypoint Matches", vis)
cv2.imshow("Result", result)
cv2.waitKey(0)
Exemple #9
0
from pyimagesearch.panorama import Stitcher
import argparse
import imutils
import cv2
import numpy as np

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True, help="path to the first image")
ap.add_argument("-s",
                "--second",
                required=True,
                help="path to the second image")
args = vars(ap.parse_args())

imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])
imageA = imutils.resize(imageA, width=500)
imageB = imutils.resize(imageB, width=500)

# stitch the images together to create a panorama
stitcher = Stitcher()
(result) = stitcher.stitch([imageA, imageB])

# show the images
#cv2.imshow("Image A", imageA)
#cv2.imshow("Image B", imageB)
cv2.imshow("Result", result)
cv2.imshow("Stacked", np.hstack((imageA, imageB)))
cv2.imwrite("Result.jpg", result)
cv2.waitKey(0)
Exemple #10
0
# import the necessary packages
from pyimagesearch.panorama import Stitcher
import argparse
import imutils
import cv2

# define basic parameter
IM_NUM = 17
result = []
stitcher = Stitcher()

for i in range(IM_NUM, 14, -1):
    if (i < 10):
        im_name = "000" + str(i) + ".jpg"
    else:
        im_name = "00" + str(i) + ".jpg"

    im_path = "/auto/extra/b02902015/py-faster-rcnn/video_image/Compress/" + im_name
    image = cv2.imread(im_path)
    cv2.imshow(im_name, image)
    if (i == IM_NUM):
        result = image
        continue
    result = stitcher.stitch([result, image], showMatches=True, diraction=0)

cv2.imshow("Result", result)
cv2.waitKey(0)
Exemple #11
0
from pyimagesearch.panorama import Stitcher
import argparse
import imutils
import cv2
import numpy as np

img1 = cv2.imread("images/femfel1.png")
img2 = cv2.imread("images/femfel2.png")

stitcher = Stitcher()
(result, vis) = stitcher.stitch([img1, img2], showMatches=True)

img_diff = cv2.absdiff(result, img1)

kernel = np.ones((2, 2), np.uint8)
img_eroded = cv2.erode(img_diff, kernel, iterations=1)

gray = cv2.cvtColor(img_eroded, cv2.COLOR_BGR2GRAY)
retval, dest = cv2.threshold(gray, 40, 255, cv2.THRESH_BINARY)

image, contours, hierarchy = cv2.findContours(dest, cv2.RETR_EXTERNAL,
                                              cv2.CHAIN_APPROX_NONE)

for c in contours:
    (x, y, w, h) = cv2.boundingRect(c)
    cv2.rectangle(img_eroded, (x, y), (x + w, y + h), (0, 0, 255), 1)

# Initiate ORB detector
orb = cv2.ORB_create()

# find the keypoints and descriptors with ORB
import time
import cv2

import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError

# ialize the video streams and allow them to warmup
print("[INFO] starting cameras...")
leftStream = VideoStream(src=0).start()
rightStream = VideoStream(src=1).start()
time.sleep(1.0)

# initialize the image stitcher, motion detector, and total
# number of frames read
stitcher = Stitcher()
motion = BasicMotionDetector(minArea=500)
total = 0

ImgStitcher = rospy.Publisher('ImgStitcher', Image, queue_size=10)
rospy.init_node('ImgPublisher')
rate = rospy.Rate(10)  # 10hz

# loop over frames from the video streams
while not rospy.is_shutdown():
    # grab the frames from their respective video streams
    left = leftStream.read()
    right = rightStream.read()

    # resize the frames
    left = imutils.resize(left, width=400)
args = vars(ap.parse_args())

# grab a reference to the video files
vs1 = cv2.VideoCapture(args["video1"])
vs2 = cv2.VideoCapture(args["video2"])

# allow the camera or video file to warm up
time.sleep(2.0)

#image = image_org.copy()
#image = imutils.resize(image_org, width=800)

outdir = os.path.split(args["video1"])[0]

# stitch the images together to create a panorama
stitcher = Stitcher()
fc1 = 0
fc2 = 0

fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30, (2560, 960), True)

while True:
  # grab the current frame from left video
  result1, image1 = vs1.read()
  
  # if we are viewing a video and we did not grab a frame,
  # then we have reached the end of the video
  if result1 == False:
    break