示例#1
0
def stitchWithFeature():
    Stitcher.featureMethod = "surf"  # "sift","surf" or "orb"
    Stitcher.isGPUAvailable = True
    Stitcher.searchRatio = 0.75  # 0.75 is common value for matches
    Stitcher.offsetCaculate = "mode"  # "mode" or "ransac"
    Stitcher.offsetEvaluate = 3  # 40 menas nums of matches for mode, 4.0 menas  of matches for ransac
    Stitcher.roiRatio = 0.2  # roi length for stitching in first direction
    Stitcher.fuseMethod = "fadeInAndFadeOut"
    stitcher = Stitcher()
    startTime = time.time()

    # method = "featureSearchIncre";  Stitcher.direction = 1;  Stitcher.directIncre = 0;
    # projectAddress = "images\\iron"
    # outputAddress = "result\\" + method + "\\iron" + str.capitalize(Stitcher.fuseMethod) + "\\"
    # stitcher.imageSetStitchWithMutiple(projectAddress, outputAddress, 50, stitcher.calculateOffsetForFeatureSearchIncre,
    #                         startNum=1, fileExtension="jpg", outputfileExtension="jpg")

    # method = "featureSearchIncre"; Stitcher.direction = 1;  Stitcher.directIncre = 1;
    # projectAddress = "images\\dendriticCrystal"
    # outputAddress = "result\\" + method + "\\dendriticCrystal" + str.capitalize(Stitcher.fuseMethod) + "\\"
    # stitcher.imageSetStitchWithMutiple(projectAddress, outputAddress, 11, stitcher.calculateOffsetForFeatureSearchIncre,
    #                         startNum=1, fileExtension="jpg", outputfileExtension="jpg")

    # Stitcher.featureMethod = "surf"; Stitcher.searchRatio = 0.95; Stitcher.offsetEvaluate = 3;
    # method = "featureSearchIncre";  Stitcher.direction = 1;  Stitcher.directIncre = 1;
    # Stitcher.isEnhance = True;  # Stitcher.isClahe = True;
    # Stitcher.fuseMethod = "notFuse"
    # projectAddress = "images\\superalloyTurbineblade"
    # outputAddress = "result\\" + method + "\\superalloyTurbineblade" + str.capitalize(Stitcher.fuseMethod) + "\\"
    # stitcher.imageSetStitchWithMutiple(projectAddress, outputAddress, 1, stitcher.calculateOffsetForFeatureSearchIncre,
    #                         startNum=1, fileExtension="jpg", outputfileExtension="jpg")

    method = "featureSearchIncre"
    Stitcher.direction = 4
    Stitcher.directIncre = 0
    projectAddress = "images\\zirconLarge"
    outputAddress = "result\\" + method + "\\zirconLarge" + str.capitalize(
        Stitcher.fuseMethod) + "\\"
    stitcher.imageSetStitchWithMutiple(
        projectAddress,
        outputAddress,
        97,
        stitcher.calculateOffsetForFeatureSearchIncre,
        startNum=2,
        fileExtension="jpg",
        outputfileExtension="png")

    # method = "featureSearch"; Stitcher.direction = 4;  Stitcher.directIncre = 0;
    # projectAddress = "images\\zirconLargeResized_4_INTER_AREA"
    # outputAddress = "result\\" + method + "\\zirconLargeResized_4_INTER_AREA" + str.capitalize(Stitcher.fuseMethod) + "\\"
    # stitcher.imageSetStitchWithMutiple(projectAddress, outputAddress, 97, stitcher.calculateOffsetForFeatureSearch,
    #                         startNum=1, fileExtension="jpg", outputfileExtension="jpg")

    # method = "featureSearch"; Stitcher.direction = 4;  Stitcher.directIncre = 0;
    # projectAddress = "images\\zirconSmall"
    # outputAddress = "result\\" + method + "\\zirconSmall" + str.capitalize(Stitcher.fuseMethod) + "\\"
    # stitcher.imageSetStitchWithMutiple(projectAddress, outputAddress, 194, stitcher.calculateOffsetForFeatureSearch,
    #                         startNum=1, fileExtension="jpg", outputfileExtension="jpg")
    endTime = time.time()
    print("Time Consuming = " + str(endTime - startTime))
示例#2
0
    def __init__(self):

        bpy.ops.wm.open_mainfile(filepath="blend_files/player2.blend")

        # has to be saved in the blender file
        # check to make sure it's not set to the default
        self.animation_name = bpy.data.scenes[0].name

        self.stitcher = Stitcher(self.animation_name)

        config = ConfigParser()
        config.read('config.ini')

        result = config.read('animation_config/' + self.animation_name +
                             ".cfg")
        if ''.join(result) == '':
            sys.stderr.write("** could not read " + self.animation_name +
                             ".cfg")
            self.animations = {}
        else:
            self.animations = config._sections['animations']

        self.ignored_actions = config.get('config', 'ignored_actions')

        self.directions = config._sections['directions']
        use_antialiasing = eval(config.get('output', 'use_antialiasing'))
        self.frame_step = int(config.get('output', 'frame_step'))

        self.grip = bpy.data.objects['Grip']
        self.grip.rotation_mode = 'XYZ'
        self.target = bpy.data.objects['Armature']
        bpy.data.scenes[0].render.use_antialiasing = use_antialiasing
示例#3
0
 def imageStiching(self, img1, img2):
     # 读取拼接图片
     imageA = cv2.imread(img1)
     imageB = cv2.imread(img2)
     # cv2.imshow("Image A", imageA)
     # cv2.imshow("Image B", imageB)
     cv2.waitKey(0)
     cv2.destroyAllWindows()
     # 把图片拼接起来
     stitcher = Stitcher()
     stitcher.stitch([imageA, imageB])
示例#4
0
def stitchWithPhase():
    method = "phaseCorrelate"
    Stitcher.fuseMethod = "notFuse"
    stitcher = Stitcher()
    projectAddress = "images\\zirconSmall"
    outputAddress = "result\\" + method + "\\zirconSmall" + str.capitalize(
        Stitcher.fuseMethod) + "\\"
    stitcher.imageSetStitch(projectAddress,
                            outputAddress,
                            51,
                            stitcher.calculateOffsetForPhaseCorrleate,
                            startNum=43,
                            fileExtension="jpg",
                            outputfileExtension="jpg")
    Stitcher.phase.shutdown()
示例#5
0
    def __init__(self):

        bpy.ops.wm.open_mainfile(filepath="blend_files/player2.blend")

        # has to be saved in the blender file
        # check to make sure it's not set to the default
        self.animation_name = bpy.data.scenes[0].name

        self.stitcher = Stitcher(self.animation_name)

        config = ConfigParser()
        config.read('config.ini')

        result = config.read('animation_config/' + self.animation_name + ".cfg")
        if ''.join(result) == '':
            sys.stderr.write("** could not read " + self.animation_name + ".cfg")
            self.animations = {}
        else:
            self.animations = config._sections['animations']

        self.ignored_actions = config.get('config', 'ignored_actions')

        self.directions = config._sections['directions']
        use_antialiasing = eval(config.get('output', 'use_antialiasing'))
        self.frame_step = int(config.get('output', 'frame_step'))

        self.grip = bpy.data.objects['Grip']
        self.grip.rotation_mode = 'XYZ'
        self.target = bpy.data.objects['Armature']
        bpy.data.scenes[0].render.use_antialiasing=use_antialiasing
示例#6
0
from Stitcher import Stitcher
import cv2

# 读取拼接图片
imageA = cv2.imread("image/left_01.png")
imageB = cv2.imread("image/right_01.png")

# 把图片拼接成全景图
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB])

# 显示所有图片
cv2.imshow("Matches", vis)
cv2.imshow("Result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
示例#7
0
class RenderScript():

    def __init__(self):

        bpy.ops.wm.open_mainfile(filepath="blend_files/player2.blend")

        # has to be saved in the blender file
        # check to make sure it's not set to the default
        self.animation_name = bpy.data.scenes[0].name

        self.stitcher = Stitcher(self.animation_name)

        config = ConfigParser()
        config.read('config.ini')

        result = config.read('animation_config/' + self.animation_name + ".cfg")
        if ''.join(result) == '':
            sys.stderr.write("** could not read " + self.animation_name + ".cfg")
            self.animations = {}
        else:
            self.animations = config._sections['animations']

        self.ignored_actions = config.get('config', 'ignored_actions')

        self.directions = config._sections['directions']
        use_antialiasing = eval(config.get('output', 'use_antialiasing'))
        self.frame_step = int(config.get('output', 'frame_step'))

        self.grip = bpy.data.objects['Grip']
        self.grip.rotation_mode = 'XYZ'
        self.target = bpy.data.objects['Armature']
        bpy.data.scenes[0].render.use_antialiasing=use_antialiasing


    def render_position(self, action, direction, rotation, grip):

        grip.rotation_euler[2] = rotation
        filepath = 'tmp/' + action + '_' + direction + '_'
        bpy.data.scenes[0].render.filepath = filepath
        bpy.ops.render.render(animation=True)


    def main(self):

        start_time = time.time()
        for action in bpy.data.actions.keys():
            self.render(action)

        self.stitcher.save()
        shutil.rmtree('tmp')

        end = time.time()
        return end - start_time

    def render(self, action):

        if action in self.ignored_actions:
            return

        start, finish = bpy.data.actions[action].frame_range
        self.target.animation_data.action = bpy.data.actions[action]

        bpy.data.scenes[0].frame_start = start
        bpy.data.scenes[0].frame_end = finish - 1 # finish frame should be dupe of start frame
        bpy.data.scenes[0].frame_step = self.frame_step

        if self.animations[action]:
            keys = self.animations[action].split(",")
        else:
            keys = self.directions.keys()

        for key in keys:
            self.render_position(action, key, eval(self.directions[key]), self.grip)
            self.stitcher.add_to_sheet(action, key)
示例#8
0
class RenderScript():
    def __init__(self):

        bpy.ops.wm.open_mainfile(filepath="blend_files/player2.blend")

        # has to be saved in the blender file
        # check to make sure it's not set to the default
        self.animation_name = bpy.data.scenes[0].name

        self.stitcher = Stitcher(self.animation_name)

        config = ConfigParser()
        config.read('config.ini')

        result = config.read('animation_config/' + self.animation_name +
                             ".cfg")
        if ''.join(result) == '':
            sys.stderr.write("** could not read " + self.animation_name +
                             ".cfg")
            self.animations = {}
        else:
            self.animations = config._sections['animations']

        self.ignored_actions = config.get('config', 'ignored_actions')

        self.directions = config._sections['directions']
        use_antialiasing = eval(config.get('output', 'use_antialiasing'))
        self.frame_step = int(config.get('output', 'frame_step'))

        self.grip = bpy.data.objects['Grip']
        self.grip.rotation_mode = 'XYZ'
        self.target = bpy.data.objects['Armature']
        bpy.data.scenes[0].render.use_antialiasing = use_antialiasing

    def render_position(self, action, direction, rotation, grip):

        grip.rotation_euler[2] = rotation
        filepath = 'tmp/' + action + '_' + direction + '_'
        bpy.data.scenes[0].render.filepath = filepath
        bpy.ops.render.render(animation=True)

    def main(self):

        start_time = time.time()
        for action in bpy.data.actions.keys():
            self.render(action)

        self.stitcher.save()
        shutil.rmtree('tmp')

        end = time.time()
        return end - start_time

    def render(self, action):

        if action in self.ignored_actions:
            return

        start, finish = bpy.data.actions[action].frame_range
        self.target.animation_data.action = bpy.data.actions[action]

        bpy.data.scenes[0].frame_start = start
        bpy.data.scenes[
            0].frame_end = finish - 1  # finish frame should be dupe of start frame
        bpy.data.scenes[0].frame_step = self.frame_step

        if self.animations[action]:
            keys = self.animations[action].split(",")
        else:
            keys = self.directions.keys()

        for key in keys:
            self.render_position(action, key, eval(self.directions[key]),
                                 self.grip)
            self.stitcher.add_to_sheet(action, key)
示例#9
0
def main():
    stitcher = Stitcher()
    if config_scale:
        background = cv2.imread('images/background_scaled.jpg')
    else:
        background = cv2.imread('images/background.jpg')

    transformer = Transformer(config_scale)

    cap_left = cv2.VideoCapture(videos_path + videos[0])
    cap_mid = cv2.VideoCapture(videos_path + videos[1])
    cap_right = cv2.VideoCapture(videos_path + videos[2])

    frame_width = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_WIDTH))
    frame_height = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_HEIGHT))
    frame_count = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_COUNT))

    init_points = {'C0': (71, 1153), \
                   'R0': (80, 761), 'R1': (80, 1033), 'R2': (95, 1127), 'R3': (54, 1156), 'R4': (65, 1185),
                   'R5': (61, 1204), 'R6': (56, 1217), 'R7': (69, 1213), 'R8': (67, 1253), 'R9': (75, 1281),
                   'R10': (92, 1347), \
                   'B0': (71, 1409), 'B1': (72, 1016), 'B2': (47, 1051), 'B3': (58, 1117), 'B4': (74, 1139),
                   'B5': (123, 1156), 'B6': (61, 1177), 'B7': (48, 1198), 'B8': (102, 1353)}

    points = init_points.values()
    tracker = Tracker(background, config_scale, init_points.values())

    # cap_left.set(cv.CV_CAP_PROP_POS_FRAMES, 1400)
    # cap_mid.set(cv.CV_CAP_PROP_POS_FRAMES, 1400)
    # cap_right.set(cv.CV_CAP_PROP_POS_FRAMES, 1400)
    for fr in range(frame_count):
        print(fr)
        status_left, frame_left = cap_left.read()
        status_mid, frame_mid = cap_mid.read()
        status_right, frame_right = cap_right.read()

        scaled_size = (frame_width / image_down_scale_factor,
                       frame_height / image_down_scale_factor)
        frame_left = cv2.resize(frame_left, scaled_size)
        frame_mid = cv2.resize(frame_mid, scaled_size)
        frame_right = cv2.resize(frame_right, scaled_size)

        # Adjust the brightness difference.
        frame_mid = cv2.convertScaleAbs(frame_mid, alpha=0.92)

        if status_left and status_mid and status_right:
            warped_left_mid = stitcher.stitch(frame_mid, frame_left,
                                              H_left_mid)
            warped_left_mid_right = stitcher.stitch(warped_left_mid,
                                                    frame_right, H_mid_right)
            warped_left_mid_right_cropped = crop_img(warped_left_mid_right)

            # plt.imshow(warped_left_mid_right_cropped)
            # plt.show()
            # cv2.waitKey(0)

            points = tracker.tracking(warped_left_mid_right_cropped)
            for i in range(len(points)):
                cv2.circle(warped_left_mid_right_cropped,
                           (points[i][1], points[i][0]), 3, (0, 0, 255), -1)

            height, width = warped_left_mid_right_cropped.shape[:2]
            warped_left_mid_right_cropped = cv2.resize(
                warped_left_mid_right_cropped, (width / 2, height / 2))
            cv2.imshow('Objects', warped_left_mid_right_cropped)
            cv2.waitKey(1)

            # background = transformer.transform(points)
            # plt.imshow(warped_left_mid_right_cropped)
            # plt.show()
            # cv2.imshow('Objects', background)
            # cv2.waitKey(30)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
    cap_left.release()
    cap_mid.release()
    cap_right.release()
示例#10
0
print('start')
device1_cap = cv2.VideoCapture(device1_url)#读取视频流
device2_cap = cv2.VideoCapture(device2_url)#读取视频流

while(device1_cap.isOpened() or device2_cap.isOpened()):
    print('success')
    if(device1_cap.isOpened()):
        device1_ret, device1_frame = device1_cap.read()
        cv2.imshow('frame1',device1_frame)
    if(device2_cap.isOpened()):
        device2_ret, device2_frame = device2_cap.read()
        cv2.imshow('frame2',device2_frame)

    if(device1_cap.isOpened() and device2_cap.isOpened()):
    # 把图像拼接成全景图
        stitcher = Stitcher()
        (result, vis) = stitcher.stitch([device1_frame, device2_frame], showMatches=True)
        print("panomanic mode")
        cv2.imshow("Result", result)
   
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break


print('end')
device1_cap.release()
device2_cap.release()

cv2.destroyAllWindows()
示例#11
0
文件: main.py 项目: pan-long/cs4243
def main():
    stitcher = Stitcher()
    if config_scale:
        background = cv2.imread('images/background_scaled.jpg')
    else:
        background = cv2.imread('images/background.jpg')

    transformer = Transformer(config_scale)

    cap_left = cv2.VideoCapture(videos_path + videos[0])
    cap_mid = cv2.VideoCapture(videos_path + videos[1])
    cap_right = cv2.VideoCapture(videos_path + videos[2])

    frame_width = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_WIDTH))
    frame_height = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_HEIGHT))
    frame_count = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_COUNT))

    init_points = {'C0': (71, 1153), \
                   'R0': (80, 761), 'R1': (80, 1033), 'R2': (95, 1127), 'R3': (54, 1156), 'R4': (65, 1185),
                   'R5': (61, 1204), 'R6': (56, 1217), 'R7': (69, 1213), 'R8': (67, 1253), 'R9': (75, 1281),
                   'R10': (92, 1347), \
                   'B0': (71, 1409), 'B1': (72, 1016), 'B2': (47, 1051), 'B3': (58, 1117), 'B4': (74, 1139),
                   'B5': (123, 1156), 'B6': (61, 1177), 'B7': (48, 1198), 'B8': (102, 1353)}

    points = init_points.values()
    tracker = Tracker(background, config_scale, init_points.values())

    # cap_left.set(cv.CV_CAP_PROP_POS_FRAMES, 1400)
    # cap_mid.set(cv.CV_CAP_PROP_POS_FRAMES, 1400)
    # cap_right.set(cv.CV_CAP_PROP_POS_FRAMES, 1400)
    for fr in range(frame_count):
        print(fr)
        status_left, frame_left = cap_left.read()
        status_mid, frame_mid = cap_mid.read()
        status_right, frame_right = cap_right.read()

        scaled_size = (frame_width / image_down_scale_factor, frame_height / image_down_scale_factor)
        frame_left = cv2.resize(frame_left, scaled_size)
        frame_mid = cv2.resize(frame_mid, scaled_size)
        frame_right = cv2.resize(frame_right, scaled_size)

        # Adjust the brightness difference.
        frame_mid = cv2.convertScaleAbs(frame_mid, alpha=0.92)

        if status_left and status_mid and status_right:
            warped_left_mid = stitcher.stitch(frame_mid, frame_left, H_left_mid)
            warped_left_mid_right = stitcher.stitch(warped_left_mid, frame_right, H_mid_right)
            warped_left_mid_right_cropped = crop_img(warped_left_mid_right)

            # plt.imshow(warped_left_mid_right_cropped)
            # plt.show()
            # cv2.waitKey(0)

            points = tracker.tracking(warped_left_mid_right_cropped)
            for i in range(len(points)):
                cv2.circle(warped_left_mid_right_cropped, (points[i][1], points[i][0]), 3, (0, 0, 255), -1)

            height, width = warped_left_mid_right_cropped.shape[:2]
            warped_left_mid_right_cropped = cv2.resize(warped_left_mid_right_cropped, (width / 2, height / 2))
            cv2.imshow('Objects', warped_left_mid_right_cropped)
            cv2.waitKey(1)

            # background = transformer.transform(points)
            # plt.imshow(warped_left_mid_right_cropped)
            # plt.show()
            # cv2.imshow('Objects', background)
            # cv2.waitKey(30)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
    cap_left.release()
    cap_mid.release()
    cap_right.release()
示例#12
0
def stitchWithFeature():
    Stitcher.featureMethod = "surf"  # "sift","surf" or "orb"
    Stitcher.isGPUAvailable = True
    Stitcher.searchRatio = 0.75  # 0.75 is common value for matches
    Stitcher.offsetCaculate = "mode"  # "mode" or "ransac"
    Stitcher.offsetEvaluate = 3  # 3 menas nums of matches for mode, 3.0 menas  of matches for ransac
    Stitcher.roiRatio = 0.2  # roi length for stitching in first direction
    Stitcher.fuseMethod = "fadeInAndFadeOut"  # "notFuse","average","maximum","minimum","fadeInAndFadeOut","trigonometric", "multiBandBlending"
    stitcher = Stitcher()

    Stitcher.direction = 1
    Stitcher.directIncre = 0
    projectAddress = "demoImages\\iron"
    outputAddress = "result\\iron" + str.capitalize(Stitcher.fuseMethod) + "\\"
    stitcher.imageSetStitchWithMutiple(
        projectAddress,
        outputAddress,
        1,
        stitcher.calculateOffsetForFeatureSearchIncre,
        startNum=1,
        fileExtension="jpg",
        outputfileExtension="jpg")

    Stitcher.direction = 1
    Stitcher.directIncre = 1
    projectAddress = "demoImages\\dendriticCrystal"
    outputAddress = "result\\dendriticCrystal" + str.capitalize(
        Stitcher.fuseMethod) + "\\"
    stitcher.imageSetStitchWithMutiple(
        projectAddress,
        outputAddress,
        1,
        stitcher.calculateOffsetForFeatureSearchIncre,
        startNum=1,
        fileExtension="jpg",
        outputfileExtension="jpg")

    Stitcher.direction = 4
    Stitcher.directIncre = 0
    projectAddress = "demoImages\\zirconBSE"
    outputAddress = "result\\zirconBSE" + str.capitalize(
        Stitcher.fuseMethod) + "\\"
    stitcher.imageSetStitchWithMutiple(
        projectAddress,
        outputAddress,
        1,
        stitcher.calculateOffsetForFeatureSearch,
        startNum=1,
        fileExtension="jpg",
        outputfileExtension="jpg")

    Stitcher.direction = 4
    Stitcher.directIncre = 0
    projectAddress = "demoImages\\zirconCL"
    outputAddress = "result\\zirconCL" + str.capitalize(
        Stitcher.fuseMethod) + "\\"
    stitcher.imageSetStitchWithMutiple(
        projectAddress,
        outputAddress,
        1,
        stitcher.calculateOffsetForFeatureSearch,
        startNum=1,
        fileExtension="jpg",
        outputfileExtension="jpg")

    Stitcher.direction = 4
    Stitcher.directIncre = 0
    projectAddress = "demoImages\\zirconREM"
    outputAddress = "result\\zirconREM" + str.capitalize(
        Stitcher.fuseMethod) + "\\"
    stitcher.imageSetStitchWithMutiple(
        projectAddress,
        outputAddress,
        1,
        stitcher.calculateOffsetForFeatureSearch,
        startNum=1,
        fileExtension="jpg",
        outputfileExtension="jpg")

    Stitcher.direction = 4
    Stitcher.directIncre = 0
    projectAddress = "demoImages\\zirconTEM"
    outputAddress = "result\\zirconTEM" + str.capitalize(
        Stitcher.fuseMethod) + "\\"
    stitcher.imageSetStitchWithMutiple(
        projectAddress,
        outputAddress,
        1,
        stitcher.calculateOffsetForFeatureSearch,
        startNum=1,
        fileExtension="jpg",
        outputfileExtension="jpg")
示例#13
0
from Stitcher import Stitcher
import cv2
from os import listdir
from os.path import isfile, join

mypath = 'images'
# f = []
files = [join(mypath, f) for f in listdir(mypath) if isfile(join(mypath, f))]
files = sorted(files)
# print(files)
stitcher = Stitcher()

j = files[-1]

im1 = cv2.imread(j)
# cv2.imshow('i',im1)

for i in reversed(xrange(len(files) - 1)):
    print i
    im2 = cv2.imread(files[i])
    # cv2.imshow('n',im2)

    result = stitcher.stitch(im1, im2)

    im1 = result

    # im1= result[~np.all(result == 0, axis=2)]
# result = stitcher.stitch(im1,im2)
# im1= result[~np.all(result == 0, axis=2)]

# result = stitcher.stitch(im1,im2)
# initialize video streams
no_of_streams = 2
vss = [VideoStream(calibrator, src=1), VideoStream(calibrator, src=0)]
calibrator.calculate_optimal_camera_matrix(vss[0].read().shape[1],vss[0].read().shape[0])

# initialize homographies
homographies = []
for i in range(no_of_streams - 1):
    homographies.append(matcher.match(vss[i+1].frame, vss[i].frame))

vss_frames_list = []
for i in range(no_of_streams):
    vss_frames_list.append(vss[i].read())

stitcher = Stitcher(vss_frames_list, homographies)

vss[0].start()
time.sleep(1)
vss[1].start()
time.sleep(1)

while True:
    frame1 = vss[0].frame
    # print frame1
    frame2 = vss[1].frame

    stitcher.set_images([frame1, frame2])
    stitcher.leftshift()

    cv2.imshow('Result', stitcher.result)
示例#15
0
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True, help="path to the first image")
ap.add_argument("-s",
                "--second",
                required=True,
                help="path to the second image")
args = vars(ap.parse_args())

# load the two images and resize them to have a width of 400 pixels
# (for faster processing)
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])

imageA = cv2.imread("IMG_0090.jpg")
imageB = cv2.imread("IMG_0091.jpg")
imageA = imutils.resize(imageA, width=400)
imageB = imutils.resize(imageB, width=400)

# stitch the images together to create a panorama
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

# show the images
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
cv2.imshow("Keypoint Matches", vis)
cv2.imshow("Result", result)
cv2.waitKey(0)
import cv2
import imutils
from Stitcher import Stitcher
import copy
import numpy as np

if __name__ == "__main__":
    imageA = cv2.imread("images/random_forest2.jpg")
    imageB = cv2.imread("images/random_forest1_lowexposure.jpg")
    # imageA = cv2.imread("images/boat1.jpg")
    # imageB = cv2.imread("images/boat2.jpg")
    imageA = imutils.resize(imageA, width=2000)
    imageB = imutils.resize(imageB, width=2000)
    # stitch the images together to create a panorama
    stitcher = Stitcher()
    (result, H, vis) = stitcher.stitch(imageB, imageA, showMatches=True)
    # get the wrapped image that we use for blending
    wrapped_image = cv2.warpPerspective(
        imageB, H, (imageA.shape[1] + imageB.shape[1], imageB.shape[0]))
    src = copy.deepcopy(wrapped_image)
    # convert it to grayscale image
    im_bw = cv2.cvtColor(wrapped_image, cv2.COLOR_RGB2GRAY)
    # threshold the image
    ret, thresh_im = cv2.threshold(im_bw, 0, 255, 0)
    # calculate the contours from the black and white image
    _, contours, hierarchy = cv2.findContours(im_bw, cv2.RETR_TREE,
                                              cv2.CHAIN_APPROX_SIMPLE)
    # approximate the contour by polygon
    # this polygon is used as mask for the blending
    epsilon = 0.1 * cv2.arcLength(contours[0], True)
示例#17
0
from Stitcher import Stitcher

# .pos file specifying the names of the tile images
position_list_filename = '/home/sihao/SchultzBox/Sihao/Imaging/050618Grid_stack/050618aaPositionList.pos'

# Create Stitcher object
s = Stitcher(position_list_filename)

# Stitch and save
s.stitch()
s.save('stitched.png')
示例#18
0
# initialize video streams
no_of_streams = 2
vss = [VideoStream(calibrator, src=1), VideoStream(calibrator, src=0)]
calibrator.calculate_optimal_camera_matrix(vss[0].read().shape[1],
                                           vss[0].read().shape[0])

# initialize homographies
homographies = []
for i in range(no_of_streams - 1):
    homographies.append(matcher.match(vss[i + 1].frame, vss[i].frame))

vss_frames_list = []
for i in range(no_of_streams):
    vss_frames_list.append(vss[i].read())

stitcher = Stitcher(vss_frames_list, homographies)

vss[0].start()
time.sleep(1)
vss[1].start()
time.sleep(1)

while True:
    frame1 = vss[0].frame
    # print frame1
    frame2 = vss[1].frame

    stitcher.set_images([frame1, frame2])
    stitcher.leftshift()

    cv2.imshow('Result', stitcher.result)