Ejemplo n.º 1
0
def partialform(first, second):
    imageA = cv2.imread(first)
    imageB = cv2.imread(second)
    imageA = imutils.resize(imageA, width=800)
    imageB = imutils.resize(imageB, width=800)
    stitcher = Stitcher()
    (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)
    displayImg(imageA, True)
    displayImg(imageB, True)
    displayImg(vis, True)
    displayImg(result, True)
    cv2.imwrite('\\'.join(first.split('\\')[:-1]) + '\\stitched_.png', result)
def originFunc():
    # initialize the video streams and allow them to warmup
    print("[INFO] starting cameras...")
    leftStream = VideoStream(src=1).start()
    rightStream = VideoStream(src=0).start()
    time.sleep(2.0)

    # number of frames read
    stitcher = Stitcher()
    total = 0

    # loop over frames from the video streams
    while True:
        # grab the frames from their respective video streams
        left = leftStream.read()
        right = rightStream.read()

        # resize the frames
        left = imutils.resize(left, width=400)
        right = imutils.resize(right, width=400)

        # stitch the frames together to form the panorama
        # IMPORTANT: you might have to change this line of code
        # depending on how your cameras are oriented; frames
        # should be supplied in left-to-right order
        result = stitcher.stitch([left, right])
        # no homograpy could be computed
        if result is None:
            print("[INFO] homography could not be computed")
            break

        # show the output images
        cv2.imshow("Result", result)
        cv2.imshow("Left Frame", left)
        cv2.imshow("Right Frame", right)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    print("[INFO] cleaning up...")
    cv2.destroyAllWindows()
    leftStream.stop()
    rightStream.stop()
Ejemplo n.º 3
0
# import the necessary packages
from panorama import Stitcher
import argparse
import imutils
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True,
	help="path to the first image")
ap.add_argument("-s", "--second", required=True,
	help="path to the second image")
args = vars(ap.parse_args())

# load the two images and resize them to have a width of 400 pixels
# (for faster processing)
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])
imageA = imutils.resize(imageA, width=400)
imageB = imutils.resize(imageB, width=400)

# stitch the images together to create a panorama
stitcher = Stitcher()
result = stitcher.stitch([imageA, imageB])

# show the images
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
cv2.imshow("Result", result)
cv2.waitKey(0)
Ejemplo n.º 4
0
hB, wB = imageB.shape[:2]
hC, wC = imageC.shape[:2]
hD, wD = imageD.shape[:2]

imageA = cv2.resize(imageA, (400, int(400 * hA / wA)),
                    cv2.INTER_CUBIC)  # resize
imageB = cv2.resize(imageB, (400, int(400 * hB / wB)), cv2.INTER_CUBIC)
imageC = cv2.resize(imageC, (400, int(400 * hC / wC)), cv2.INTER_CUBIC)
imageD = cv2.resize(imageD, (400, int(400 * hD / wD)), cv2.INTER_CUBIC)
images = [imageB, imageA, imageD,
          imageC]  # order of original each image => CDAB
order_set = []  # to correct order of each image
# stitch the images together to create a panorama
stitcher = Stitcher()  # declartion

order_set = stitcher.detectKeyPoint(images)  # return correct order of images

result = stitcher.stitch(
    images[order_set[0]],
    images[order_set[1]])  # group by two images and switch them
result2 = stitcher.stitch(images[order_set[2]], images[order_set[3]])

result3 = stitcher.stitch(result, result2)

cv2.imshow("Result", result)  # first stitching
cv2.imshow("Result2", result2)  # second stitching

cv2.imshow("Result3", result3)  # stitching the first and second one

cv2.waitKey(0)
Ejemplo n.º 5
0
    fp = open(input_file_list, 'r')
    _images = [each.rstrip('\r\n') for each in fp.readlines()]

    begin_time = datetime.datetime.now()

    for x in _images:
        print x

    try:
        images = []
        for _image in _images:
            images.append(load_image(_image))

        # Stitch the first two images
        stitcher = Stitcher()
        result, kps, features, deg = stitcher.stitch([images[0], images[1]],
                                                     firstTime=True)

        # stitch the result image with the image with idx until stiching all images in the array
        for idx in range(2, len(images)):
            stitcher = Stitcher()
            result, kps, features, deg = stitcher.stitch([result, images[idx]],
                                                         firstTime=False,
                                                         l_ori_kps=kps,
                                                         l_features=features,
                                                         l_deg=deg)

        cv2.imwrite(output_file, result)

    except Exception as e:
        print(e)
Ejemplo n.º 6
0
# loop over frames from the video streams
while True:
    # grab the frames from their respective video streams
    left = leftStream.read()
    right = rightStream.read()

    # resize the frames
    left = imutils.resize(left, width=640, height=480)
    right = imutils.resize(right, width=640, height=480)

    # stitch the frames together to form the panorama
    # IMPORTANT: you might have to change this line of code
    # depending on how your cameras are oriented; frames
    # should be supplied in left-to-right order
    result = stitcher.stitch([left, right])

    # no homograpy could be computed
    if result is None:
        print("[INFO] homography could not be computed")
        break

    # increment the total number of frames read and draw the
    # timestamp on the image
    total += 1
    timestamp = datetime.datetime.now()
    ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
    cv2.putText(result, ts, (10, result.shape[0] - 10),
                cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

    # show the output images
Ejemplo n.º 7
0
stitcher = Stitcher()
perspective = [
    'test_images/halfdome-00.png', 'test_images/halfdome-01.png',
    'test_images/halfdome-02.png', 'test_images/halfdome-03.png',
    'test_images/halfdome-04.png', 'test_images/halfdome-05.png'
]
perspectiveReverse = [
    'test_images/halfdome-05.png', 'test_images/halfdome-04.png',
    'test_images/halfdome-03.png', 'test_images/halfdome-02.png',
    'test_images/halfdome-01.png', 'test_images/halfdome-00.png'
]
affine = [
    'test_images/t1.jpg', 'test_images/t2.jpg', 'test_images/t3.jpg',
    'test_images/t4.jpg', 'test_images/t5.jpg', 'test_images/t6.jpg',
    'test_images/t7.jpg', 'test_images/t8.jpg', 'test_images/t9.jpg',
    'test_images/t10.jpg'
]
diagonalLeftRight = [
    'test_images/b0.jpg', 'test_images/b1.jpg', 'test_images/b2.jpg',
    'test_images/b3.jpg'
]
diagonalRightLeft = [
    'test_images/b3.jpg', 'test_images/b2.jpg', 'test_images/b1.jpg',
    'test_images/b0.jpg'
]

two = ['test_images/halfdome-03.png', 'test_images/halfdome-02.png']

res = stitcher.stitch(affine)
cv2.waitKey(0)
Ejemplo n.º 8
0
    return count, nameArray


#function call to count files
fileCount, names = countFiles()

#get 4 files each and stich
for i in range(0, fileCount, 4):
    imageA = cv2.imread(names[i])
    imageB = cv2.imread(names[i + 1])
    imageC = cv2.imread(names[i + 2])
    imageD = cv2.imread(names[i + 3])

    # stitch the images together to create a panorama
    stitcher = Stitcher()
    (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

    # stitch the images together to create a panorama
    stitcher = Stitcher()
    (result2, vis2) = stitcher.stitch([imageC, imageD], showMatches=True)

    imageE = result
    imageF = result2
    # stitch the images together to create a panorama
    stitcher = Stitcher()
    (result3, vis3) = stitcher.stitch([imageE, imageF], showMatches=True)

    #rename the file with date and time stamp
    anomalyId = str(uuid.uuid4())
    notificationId = str(uuid.uuid4())
    dateTime = time.strftime("%Y%m%d-%H%M%S")
Ejemplo n.º 9
0
        start_time = time.time()
        for img_idx in range(img_cnt):
            # img_idx = 8
            fix_img_name = os.path.join(data_in_dir,sub_dir[ihc_idx],"HE","level"+str(lv_idx),str(img_idx)+".jpg")
            float_img_name = os.path.join(data_in_dir, sub_dir[ihc_idx], sub_dir_IHC[ihc_idx], "level" + str(lv_idx),str(img_idx) + ".jpg")
            print(float_img_name)
            print(fix_img_name)
            Img_fix_col = Image.open(fix_img_name)
            Img_float_col = Image.open(float_img_name)
            # Img_fix = sp.misc.fromimage(Img_fix_col,True)  # flatten is True, means we convert images into graylevel images.
            # Img_float = sp.misc.fromimage(Img_float_col,True)
            Img_fix = np.array(Img_fix_col)  # flatten is True, means we convert images into graylevel images.
            Img_float = np.array(Img_float_col)
            stitcher = Stitcher()

            (result, vis) = stitcher.stitch([Img_fix, Img_float], showMatches=True)
            # cv2.imwrite("ImageA.jpg", cv2.cvtColor(Img_fix, cv2.COLOR_BGR2RGB))
            # cv2.imwrite("ImageB.jpg", cv2.cvtColor(Img_float, cv2.COLOR_BGR2RGB))
            # cv2.imwrite("matches.jpg", cv2.cvtColor(vis, cv2.COLOR_BGR2RGB))
            # cv2.imwrite("Result.jpg", cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
            # print("OK")

            '''
            if the rotation angle is confirmed to be 0, we can use below method to distill offset.
            '''
            matches, ptsA, ptsB, H, status = stitcher.returnMatchCoord([Img_fix, Img_float]) # here, already with RANSAC algorithm to match key points
            matched_ptsA = []
            matched_ptsB = []
            slops = []
            offsets = []
            for m_idx, m in enumerate(ptsA):
Ejemplo n.º 10
0
    return count, nameArray


fileCount, names = countFiles()



for i in range(0,fileCount,4):
    imageA = cv2.imread(names[i])
    imageB = cv2.imread(names[i+1])
    imageC = cv2.imread(names[i+2])
    imageD = cv2.imread(names[i+3])

    # stitch the images together to create a panorama
    stitcher = Stitcher()
    (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

    # stitch the images together to create a panorama
    stitcher = Stitcher()
    (result2, vis2) = stitcher.stitch([imageC, imageD], showMatches=True)

    imageE = result
    imageF = result2

    #merging two results

    height, width, channels = imageE.shape
    #create a null image using numpy with all zeros with diemnetions = 4*width
    blank_image = np.zeros((height,width*2,channels), np.uint8)
    
    blank_image[:,0:width] = imageE    
# import the necessary packages
from panorama import Stitcher
import argparse
import imutils
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True,
	help="path to the first image")
ap.add_argument("-s", "--second", required=True,
	help="path to the second image")
args = vars(ap.parse_args())

# load the two images and resize them to have a width of 400 pixels
# (for faster processing)
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])
imageA = imutils.resize(imageA, width=400)
imageB = imutils.resize(imageB, width=400)
 
# stitch the images together to create a panorama
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)
 
# show the images
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
cv2.imshow("Keypoint Matches", vis)
cv2.imshow("Result", result)
cv2.waitKey(0)
Ejemplo n.º 12
0
    #right = rightStream.read()
    #print("left: ", left)
    #print("right: ", right)
    # resize the frames
    #left = imutils.resize(left, width=400)
    #right = imutils.resize(right, width=400)

    leftImage = leftVideo.read()[1]
    rightImage = rightVideo.read()[1]
    print("left:", leftImage)
    print("right:", rightImage)
    cv2.imwrite("left.jpg", leftImage)
    cv2.imwrite("right.jpg", rightImage)

    print("getting")
    result = stitcher.stitch([leftImage, leftImage])
    print("got")

    # stitch the frames together to form the panorama
    # IMPORTANT: you might have to change this line of code
    # depending on how your cameras are oriented; frames
    # should be supplied in left-to-right order
    print("getting result")
    #result = stitcher.stitch([left, right])
    print("got result")

    # no homograpy could be computed
    if result is None:
        print("[INFO] homography could not be computed")
        break
import cv2
import imutils
from panorama import Stitcher
import numpy as np


def sharpen(img):
    kern = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
    return cv2.filter2D(img, -1, kern)


a = cv2.imread("frames/002.jpg")
b = cv2.imread("frames/003.jpg")

a = sharpen(a)
b = sharpen(b)

stitcher = Stitcher()
(result, vis) = stitcher.stitch([a, b], showMatches=True)

cv2.imshow("a", vis)
cv2.waitKey(0)
cv2.imshow("a", result)
cv2.waitKey(0)
cv2.imwrite("vis.jpg", vis)
cv2.imwrite("res.jpg", result)
def main(args):

    Pano = 1
    rospy.init_node('panorama', anonymous=True)
    ic = image_converter()
    stitcher = Stitcher()
    M_left_center = None
    M_right_center = None

    while True:
        try:
            # if ic.left_image is not None and ic.center_image is not None and ic.right_image is not None and M_left_center is None and M_center_right is None:
            #     print("Cheguei aqui!")
            #     (M_left_center, M_center_right) = stitcher.transformationsCalculator([ic.left_image, ic.center_image, ic.right_image], ratio=0.8, reprojThresh=4.0)    and ic.left_image_seg is not None and ic.center_image_seg is not None and ic.right_image_seg is not None
            if ic.left_image is not None and ic.center_image is not None and ic.right_image is not None:

                if M_left_center is None and M_right_center is None:
                    (M_left_center,
                     M_right_center) = stitcher.transformationsCalculator(
                         [ic.left_image, ic.center_image, ic.right_image],
                         ratio=0.8,
                         reprojThresh=4.0)

            if ic.left_image is not None and ic.center_image is not None and ic.right_image is not None:
                if Pano == 0:

                    if M_left_center is not None and M_right_center is not None:

                        result = stitcher.stitch(
                            [ic.left_image, ic.center_image, ic.right_image],
                            M_left_center,
                            M_right_center,
                            ratio=0.8,
                            reprojThresh=4.0)

                        if result is None:
                            print(
                                "There was an error in the stitching procedure"
                            )
                        else:
                            pub_panorama(result)
                            ic.clean_images()
                    else:
                        print(
                            "Nao foram calculadas as matrizes de transformacao"
                        )
            else:
                continue

            if ic.left_image_seg is not None and ic.center_image_seg is not None and ic.right_image_seg is not None:
                if Pano == 1:

                    if M_left_center is not None and M_right_center is not None:

                        result = stitcher.stitch([
                            ic.left_image_seg, ic.center_image_seg,
                            ic.right_image_seg
                        ],
                                                 M_left_center,
                                                 M_right_center,
                                                 ratio=0.8,
                                                 reprojThresh=4.0)

                        if result is None:
                            print(
                                "There was an error in the stitching procedure"
                            )
                        else:
                            pub_panorama_seg(result)
                            ic.clean_images()
                    else:
                        print(
                            "Nao foram calculadas as matrizes de transformacao"
                        )

            else:
                continue
        except KeyboardInterrupt:
            print("Shutting down")
Ejemplo n.º 15
0
import cv2 as cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True, help="path to the first image")
ap.add_argument("-s",
                "--second",
                required=True,
                help="path to the second image")
args = vars(ap.parse_args())
# load the two images and resize them to have a width of 400 pixels
# (for faster processing)

imageB = cv2.imread("bryce_right_01.png")
imageA = cv2.imread("bryce_right_02.png")
imageA = imutils.resize(imageA, width=400)
imageB = imutils.resize(imageB, width=400)
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)

cv2.waitKey(0)
# stitch the images together to create a panorama
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)
image
# show the images
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
cv2.imshow("Keypoint Matches", vis)
cv2.imshow("Result", result)
cv2.waitKey(0)
Ejemplo n.º 16
0
import cv2
import numpy as np
from panorama import Stitcher
import imutils

# képek beolvasása
kep1 = cv2.imread("1.jpg")
kep2 = cv2.imread("2.jpg")
#átméretezés
kep1=imutils.resize(kep1, height=400)
kep2=imutils.resize(kep2, height=400)
#összeillesztés
stitcher = Stitcher()
(panorama, kpontok) = stitcher.stitch([kep1, kep2], showMatches=True)

panorama = imutils.resize (panorama, height = 400)
# megjelenítés és mentés
cv2.imshow("első kép", kep1)
cv2.imshow("második kép", kep2)
cv2.imshow("Kulcspontok", kpontok)
cv2.imshow("Panoramakep", panorama)
cv2.imwrite ("Panoramakep.png", panorama)
cv2.waitKey(0)
Ejemplo n.º 17
0
    def open_stream(self):

        total_frame = 0
        # colecionando imagenes para el stream
        print('Iniciando streaming de la camara en: ', self.client_address)
        e1 = cv2.getTickCount()

        # obtener las imagenes del stream una por una
        try:
            myfont = pygame.font.SysFont("monospace", 15)
            screen = pygame.display.set_mode((200, 200), 0, 24)
            label = myfont.render(
                "Presione q o x para finalizar\n el programa.", 1,
                (255, 255, 0))
            screen.blit(label, (0, 0))
            pygame.display.flip()
            cam = cv2.VideoCapture()
            while self.corriendo_programa:
                # Read the length of the image as a 32-bit unsigned int. If the
                # length is zero, quit the loop
                image_len = struct.unpack(
                    '<L', self.connection.read(struct.calcsize('<L')))[0]
                if not image_len:
                    print('Finalizado por Cliente')
                    break
                # Construct a stream to hold the image data and read the image
                # data from the connection
                image_stream = io.BytesIO()
                image_stream.write(self.connection.read(image_len))

                image_stream.seek(0)

                jpg = image_stream.read()
                image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),
                                     cv2.IMREAD_COLOR)
                image = cv2.rectangle(image, (0, 120), (318, 238),
                                      (30, 230, 30), 1)

                # image = cv2.flip(image, -1)

                # guardar la imagen
                cv2.imwrite(
                    'streamtest_img/frame{:>05}.jpg'.format(total_frame),
                    image)
                # mostrar la imagen

                imageA = imutils.resize(image, width=400)
                imageB = imutils.resize(cam, width=400)

                stitcher = Stitcher()
                (result, vis) = stitcher.stitch([imageA, imageB],
                                                showMatches=True)
                cv2.imshow('Computer Vision', result)

                total_frame += 1
                screen.blit(
                    myfont.render(("Total Frames: " + str(total_frame)), 1,
                                  (255, 255, 0), (0, 0, 0)), (60, 0))
                for event in pygame.event.get():
                    if event.type == KEYDOWN:
                        key_input = pygame.key.get_pressed()
                        if key_input[pygame.K_x] or key_input[pygame.K_q]:
                            print("Deteniendo el stream")
                            self.corriendo_programa = False
                            break

            e2 = cv2.getTickCount()
            # calcular el total de streaming
            time0 = (e2 - e1) / cv2.getTickFrequency()
            print("Duracion del streaming:", time0)
            print('Total cuadros   : ', total_frame)
        finally:

            pygame.quit()
            self.connection.close()
            self.server_socket.close()
            cv2.destroyAllWindows()
            os.system("pause")
Ejemplo n.º 18
0
    try:
        os.mkdir('Resultats Stitch/' + sequence)
    except OSError as error:
        print(error)

    while True:
        path1 = 'SYS809_projet2021_sequences/' + sequence + 'A-' + str(
            index).zfill(2) + '.jpg'
        path2 = 'SYS809_projet2021_sequences/' + sequence + 'A-' + str(
            index + 1).zfill(2) + '.jpg'
        image1 = cv2.imread(path1)
        image2 = cv2.imread(path2)

        if image2 is not None:
            (result, vis) = stitcher.stitch([image1, image2],
                                            ratio=0.8,
                                            showMatches=True)
            (hA, wA) = vis.shape[:2]
            (hB, wB) = result.shape[:2]
            total = np.zeros((hA + hB, max(wA, wB), 3), dtype="uint8")
            total[0:hA, 0:wA] = vis
            total[hA:, 0:wA] = result

            cv2.imwrite(
                'Resultats Stitch/' + sequence + '/' + str(index).zfill(2) +
                '-' + str(index + 1).zfill(2) + '.jpg', total)

            (result, vis) = stitcher.stitch([image2, image1],
                                            ratio=0.8,
                                            showMatches=True)
            (hA, wA) = vis.shape[:2]