Example #1
0
def warp_flow(img, flow):
    h, w = flow.shape[:2]
    flow = -flow
    flow[:, :, 0] += np.arange(w)
    flow[:, :, 1] += np.arange(h)[:, np.newaxis]
    res = cv.remap(img, flow, None, cv.INTER_LINEAR)
    return res
Example #2
0
def show_remap(cameras, map1, map2):
    """Computes undistortion and rectification maps and remaps camera
    outputs to show a stereo undistorted image."""

    stream_prefix = 'Camera'

    stream_windows = initialize_windows(stream_prefix, cameras)
    dst_windows = initialize_windows('dst', cameras)

    focus_window(stream_windows[0])

    keypress = -1

    while keypress == -1:

        for cam, swin, dwin, m1, m2 in zip(cameras, stream_windows,
                                           dst_windows, map1, map2):
            cap = cam.stream.read()
            cap = cv2.cvtColor(cap, cv2.COLOR_BGR2GRAY)

            dst = cv2.remap(cap, m1, m2, cv2.INTER_LINEAR)

            cv2.imshow(swin, cap)
            cv2.imshow(dwin, dst)

        keypress = cv2.waitKey(5)

    return
Example #3
0
def right():

    s = cv2.FileStorage()
    s.open("resources/calibration/11-09-0a-0f-05-09.yaml",
           cv2.FILE_STORAGE_READ)
    camMat = s.getNode("CameraMatrix").mat()
    distCoef = s.getNode("DistortionCoeffs").mat()
    w = int(s.getNode("Width").real())
    h = int(s.getNode("Height").real())
    img = cv2.imread("resources/video/record-2018-07-25-13-51-49_0_Moment.jpg")

    nk = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(camMat,
                                                                distCoef,
                                                                (1920, 1080),
                                                                np.eye(3),
                                                                balance=0)
    map1, map2 = cv2.fisheye.initUndistortRectifyMap(camMat, distCoef,
                                                     np.eye(3), nk,
                                                     (1920, 1080),
                                                     cv2.CV_16SC2)
    img = cv2.remap(img, map1, map2, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)
    scale_percent = 100  # Процент от изначального размера
    width = int(img.shape[1] * scale_percent / 100)
    height = int(img.shape[0] * scale_percent / 100)
    dim = (width, height)
    resized = cv2.resize(img, dim, interpolation=cv2.INTER_LANCZOS4)
    return resized
Example #4
0
def depth(lmap, rmap, lImg, rImg, f):
    l_images = cv2.imread(lImg)
    #cv2.imshow('left image',l_images)
    r_images = cv2.imread(rImg)
    #cv2.imshow('right image',r_images)

    #双目重映射
    img1_rectified = cv2.remap(l_images, lmap['lm1'], lmap['lm2'],
                               cv2.INTER_LINEAR)
    #left_map1&left_map2=>重映射1/2 采用线性插值
    img2_rectified = cv2.remap(r_images, rmap['rm1'], rmap['rm2'],
                               cv2.INTER_LINEAR)

    imgL = cv2.cvtColor(img1_rectified, cv2.COLOR_BGR2GRAY)
    imgR = cv2.cvtColor(img2_rectified, cv2.COLOR_BGR2GRAY)

    cv2.imshow("left", imgL)
    cv2.imshow("right", imgR)
    #生成视差图
    window_size = 3
    minDisp = 0
    numDisp = 128 - minDisp
    stereo = cv2.StereoSGBM_create(minDisparity=minDisp,
                                   numDisparities=numDisp,
                                   blockSize=5,
                                   P1=8 * 3 * window_size**2,
                                   P2=32 * 3 * window_size**2,
                                   disp12MaxDiff=1,
                                   uniquenessRatio=10,
                                   speckleWindowSize=50,
                                   speckleRange=16)
    #stereo = cv2.StereoBM_create(numDisparities=0, blockSize=5)
    #disparity = stereo.compute(imgL, imgR)
    #disparity = stereo.compute(imgGrayL, imgGrayR).astype(np.float32) / 16
    #disp = cv2.normalize(disp, disp, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
    #disparity = (disparity - minDisp) / numDisp
    #threeD_disp = cv2.reprojectImageTo3D(disparity,lmap['Q'],0)
    #f.write(str(threeD_disp))
    #cv2.imshow('3d',threeD_disp)
    #看一下重映射(remap)的参数有没有问题
    f.write(str(lmap))
    f.write(str(rmap))
Example #5
0
 def getUndistortImage(self):
     rat, img = self.capture.read()
     if rat:
         nk = self.camMat.copy()
         nk[0, 0] /= 1
         nk[1, 1] /= 1
         cv2.resize(img, (self.w, self.h), cv2.INTER_AREA)
         map1, map2 = cv2.fisheye.initUndistortRectifyMap(
             self.camMat, self.distCoef, np.eye(3), nk, (self.w, self.h),
             cv2.CV_16SC2)
         img = cv2.remap(img, map1, map2, cv2.INTER_LINEAR,
                         cv2.BORDER_CONSTANT)
     return rat, img
Example #6
0
def show_rectified(cameras, rect, proj, map1, map2):
    """Computes undistortion and rectification maps and remaps camera
    outputs to show a stereo undistorted image."""

    udists = [np.empty(cameras[0].size) for __ in range(len(cameras))]

    stream_prefix = 'Camera'

    stream_windows = initialize_windows(stream_prefix, cameras)

    focus_window(stream_windows[0])

    keypress = -1

    while keypress == -1:

        for i, (camera, m1, m2) in enumerate(zip(cameras, map1, map2)):
            cap = camera.stream.read()
            cap = cv2.cvtColor(cap, cv2.COLOR_BGR2GRAY)
            cv2.imshow(stream_windows[i], cap)

            cv2.remap(cap, m1, m2, cv2.INTER_LINEAR, udists[i])

        udist_join = np.zeros(
            (cameras[0].size[0], cameras[0].size[1] * len(udists)), np.uint8)

        x_orig = 0

        for udist in udists:
            udist_join[:udist.shape[0], x_orig:x_orig + udist.shape[1]] = udist
            x_orig += udist.shape[1]

        cv2.imshow('Stereo Images', udist_join)
        # cv2.imshow('Stereo Images 0', udists[0])
        # cv2.imshow('Stereo Images 1', udists[1])

        keypress = cv2.waitKey(5)

    return
Example #7
0
def getTexture(imagePath):
    #posmap_pred = generatePositionMap(image, model)
    img = cv2.imread(imagePath)
    #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    posmap_path = imagePath.replace('jpg', 'npy')
    posmap = np.load(posmap_path)
    texture = cv2.remap(img,
                        posmap[:, :, :2].astype(np.float32),
                        None,
                        interpolation=cv2.INTER_LINEAR,
                        borderMode=cv2.BORDER_CONSTANT,
                        borderValue=(0))
    texture_path = 'temp.png'
    cv2.imwrite(texture_path, texture)
    return texture
Example #8
0
    def remap(self, parmas):
        rvec = parmas[:3]
        tvec = parmas[3:6]
        image_points, _ = cv.projectPoints(
            self.objpoints, rvec, tvec, self.K, np.zeros(5))

        img_gray = cv.cvtColor(self.input_image, cv.COLOR_BGR2GRAY)
        x,y = img_gray.shape
        image_height_coords = image_points[:, 0, 0].reshape(
            (y, x)).astype(np.float32).T 
        image_width_coords = image_points[:, 0, 1].reshape(
            (y, x)).astype(np.float32).T 

        remapped = cv.remap(img_gray, image_height_coords,
                            image_width_coords, cv.INTER_CUBIC, None, cv.BORDER_REPLICATE)
        plt.imshow(remapped)
        plt.show()
Example #9
0
def rear():

    s = cv2.FileStorage()
    s.open("resources/calibration/04-1e-1b-08-02-07.yaml",
           cv2.FILE_STORAGE_READ)
    camMat = s.getNode("CameraMatrix").mat()
    distCoef = s.getNode("DistortionCoeffs").mat()
    w = int(s.getNode("Width").real())
    h = int(s.getNode("Height").real())
    img = cv2.imread("resources/video/record-2018-07-25-13-51-49_3_Moment.jpg")
    nk = camMat.copy()
    nk[0, 0] /= 1
    nk[1, 1] /= 2
    cv2.resize(img, (w, h), cv2.INTER_AREA)
    map1, map2 = cv2.fisheye.initUndistortRectifyMap(camMat, distCoef,
                                                     np.eye(3), nk, (w, h),
                                                     cv2.CV_16SC2)
    img = cv2.remap(img, map1, map2, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)
    scale_percent = 50  # Процент от изначального размера
    width = int(img.shape[1] * scale_percent / 100)
    height = int(img.shape[0] * scale_percent / 100)
    dim = (width, height)
    resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
    return resized
Example #10
0
def generateModel(imagePath, model='saved_models/256_256_resfcn256_weight'):
    image = cv2.imread(imagePath)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    posmap_pred = generatePositionMap(image, model)

    triangles = np.loadtxt('indices/triangles.txt').astype(np.int32)
    uv_coords = generate_uv_coords()

    texture = cv2.remap(image,
                        posmap_pred[:, :, :2].astype(np.float32),
                        None,
                        interpolation=cv2.INTER_LINEAR,
                        borderMode=cv2.BORDER_CONSTANT,
                        borderValue=(0))
    kpt = get_landmarks(posmap_pred)
    vertices = get_vertices(posmap_pred)
    new_vertices = frontalize(vertices)
    new_vertices[:, 1] = 255 - new_vertices[:, 1]
    colors = get_colors(image, vertices)
    path_texture = 'gui_models/' + str(len(glob.glob('gui_models/*.obj')) +
                                       1) + '_tex.obj'
    write_obj_with_texture(path_texture, new_vertices, triangles, texture,
                           uv_coords / 256.0)
    return path_texture
Example #11
0
    def stereo_sgbm(self):

        voice = Voice()
        voice.completed.set()

        Left_Stereo_Map = (config.left_map1, config.left_map2)
        Right_Stereo_Map = (config.right_map1, config.right_map2)

        cv.namedWindow("Two Camera")
        # cv.namedWindow('depth')
        # cv.createTrackbar("windowsize", "depth", 1, 25, lambda x: None)
        # cv.createTrackbar("max disp", "depth", 247, 256, lambda x: None)
        leftCam = cv.VideoCapture(self.left_camera_id + cv.CAP_DSHOW)
        rightCam = cv.VideoCapture(self.right_camera_id + cv.CAP_DSHOW)

        leftCam.set(cv.CAP_PROP_FRAME_HEIGHT, self.frameHeight)
        leftCam.set(cv.CAP_PROP_FRAME_WIDTH, self.frameWidth)
        rightCam.set(cv.CAP_PROP_FRAME_HEIGHT, self.frameHeight)
        rightCam.set(cv.CAP_PROP_FRAME_WIDTH, self.frameWidth)

        # leftCam.set(cv.CAP_PROP_FPS, 3)
        # rightCam.set(cv.CAP_PROP_FPS, 3)

        # leftCam.set(cv.CAP_PROP_BUFFERSIZE, 3)
        _, fl = leftCam.read()
        # window_size = cv.getTrackbarPos("windowsize", "depth")
        window_size = 1
        # max_disp = cv.getTrackbarPos("max disp", "depth")
        max_disp = 247
        min_disp = 0
        num_disp = max_disp - min_disp
        p1_var = 8 * len(fl.shape) * window_size * window_size
        p2_var = 32 * len(fl.shape) * window_size * window_size
        stereo = cv.StereoSGBM_create(minDisparity=min_disp,
                                      numDisparities=num_disp,
                                      blockSize=window_size,
                                      uniquenessRatio=10,
                                      speckleWindowSize=100,
                                      speckleRange=1,
                                      disp12MaxDiff=10,
                                      P1=p1_var,
                                      P2=p2_var)
        # Used for the filtered image
        # Create another stereo for right this time
        stereoR = cv.ximgproc.createRightMatcher(stereo)

        # WLS FILTER Parameters
        lmbda = 80000
        sigma = 2.0

        wls_filter = cv.ximgproc.createDisparityWLSFilter(matcher_left=stereo)
        wls_filter.setLambda(lmbda)
        wls_filter.setSigmaColor(sigma)
        shot = True
        if not (leftCam.isOpened() and rightCam.isOpened()):
            exit(1)
        while True:
            retvalOfRight, rightFrame = rightCam.read()
            retvalOfLeft, leftFrame = leftCam.read()
            if not (retvalOfRight and retvalOfLeft):
                print("read fail")
                break
            key = cv.waitKey(1)
            twoFrame = cv.hconcat([rightFrame, leftFrame])
            cv.imshow("Two Camera", twoFrame)
            if key & 0xFF == ord('q'):
                print("結束")
                break
            # elif key & 0xFF == ord('s'):
            elif shot:
                frameL = leftFrame
                frameR = rightFrame
                shot = False
            else:
                time.sleep(0.1)
                shot = True
                continue

            remapped_left_side = cv.remap(frameL, Left_Stereo_Map[0],
                                          Left_Stereo_Map[1],
                                          cv.INTER_LANCZOS4,
                                          cv.BORDER_CONSTANT, 0)
            remapped_right_side = cv.remap(frameR, Right_Stereo_Map[0],
                                           Right_Stereo_Map[1],
                                           cv.INTER_LANCZOS4,
                                           cv.BORDER_CONSTANT, 0)

            grayR = cv.cvtColor(remapped_right_side, cv.COLOR_BGR2GRAY)
            grayL = cv.cvtColor(remapped_left_side, cv.COLOR_BGR2GRAY)
            grayR = cv.equalizeHist(grayR)
            grayL = cv.equalizeHist(grayL)

            # cv.imshow('grayR', grayR)
            # cv.imshow('grayL', grayR)

            disp = stereo.compute(grayL, grayR)
            dispL = np.int16(disp)

            dispR = stereoR.compute(grayR, grayL)

            dispR = np.int16(dispR)
            # cv.imshow('dispR', dispR)
            filteredImg = wls_filter.filter(dispL, grayL, None, dispR)
            # cv.imshow('filteredImg', filteredImg)

            filteredImg = cv.normalize(src=filteredImg,
                                       dst=filteredImg,
                                       beta=0,
                                       alpha=255,
                                       norm_type=cv.NORM_MINMAX)
            filteredImg = np.uint8(filteredImg)

            # contours, hierarchy = cv.findContours(filteredImg, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE )
            # print(len(contours[0]))
            # cv.drawContours(grayL, [cnt], 0, (0,255,0), 3)
            # cv.drawContours(filteredImg, contours, 1, (0,0,255), 20)

            self.filter_disp = (
                (filteredImg.astype(np.float32) / 16) - min_disp) / num_disp
            # filt_Color = cv.applyColorMap(filteredImg, cv.COLORMAP_RAINBOW )
            # cv.imshow('depth', filt_Color)

            cv.imshow('depth', filteredImg)
            left = cv.line(remapped_left_side, (0, 0), (0, self.frameHeight),
                           (0, 0, 0), 1)

            self.detect()

            if len(self.sentences) == 1:
                if voice.completed.is_set():
                    voice.completed.clear()
                    voice.say(self.sentences.pop(len(self.sentences) - 1))

            cv.imshow('calc', left)

        rightCam.release()
        leftCam.release()
        cv.destroyAllWindows()
        voice.terminate()
Example #12
0
    def stereo_bm(self):
        Left_Stereo_Map = (config.left_map1, config.left_map2)
        Right_Stereo_Map = (config.right_map1, config.right_map2)
        cv.namedWindow("Test Two Camera")
        cv.namedWindow('depth')
        cv.createTrackbar("numDisparities", "depth", 9, 100, lambda x: None)
        cv.createTrackbar("filterCap", "depth", 62, 63, lambda x: None)
        cv.createTrackbar("filterSize", "depth", 255, 255, lambda x: None)
        cv.createTrackbar("SADWindowSize", "depth", 0, 255, lambda x: None)
        leftCam = cv.VideoCapture(self.left_camera_id + cv.CAP_DSHOW)
        rightCam = cv.VideoCapture(self.right_camera_id + cv.CAP_DSHOW)

        leftCam.set(cv.CAP_PROP_FRAME_HEIGHT, self.frameHeight)
        leftCam.set(cv.CAP_PROP_FRAME_WIDTH, self.frameWidth)
        rightCam.set(cv.CAP_PROP_FRAME_HEIGHT, self.frameHeight)
        rightCam.set(cv.CAP_PROP_FRAME_WIDTH, self.frameWidth)
        if not (leftCam.isOpened() and rightCam.isOpened()):
            exit(1)
        while True:
            # Start Reading Camera images
            retvalOfRight, rightFrame = rightCam.read()
            retvalOfLeft, leftFrame = leftCam.read()
            # ret, frame = capture.read()
            if not (retvalOfRight and retvalOfLeft):
                print("read fail")
                break
            key = cv.waitKey(1)
            twoFrame = cv.hconcat([rightFrame, leftFrame])
            cv.imshow("Test Two Camera", twoFrame)
            if key & 0xFF == ord('q'):
                print("結束")
                break
            # elif key & 0xFF == ord('s'):
            elif 1 == 1:
                frameL = leftFrame
                frameR = rightFrame
            else:
                continue
            cv.imshow("left", frameL)
            cv.imshow("right", frameR)

            Left_nice = cv.remap(frameL, Left_Stereo_Map[0],
                                 Left_Stereo_Map[1], cv.INTER_LANCZOS4,
                                 cv.BORDER_CONSTANT, 0)
            Right_nice = cv.remap(frameR, Right_Stereo_Map[0],
                                  Right_Stereo_Map[1], cv.INTER_LANCZOS4,
                                  cv.BORDER_CONSTANT, 0)

            grayR = cv.cvtColor(Right_nice, cv.COLOR_BGR2GRAY)
            grayL = cv.cvtColor(Left_nice, cv.COLOR_BGR2GRAY)
            numDisparities = cv.getTrackbarPos("numDisparities", "depth")
            filterCap = cv.getTrackbarPos("filterCap", "depth")
            filterSize = cv.getTrackbarPos("filterSize", "depth")
            if filterSize % 2 == 0:
                filterSize += 1
            if filterSize < 5:
                filterSize = 5
            if filterCap < 1:
                filterCap = 1
            SADWindowSize = cv.getTrackbarPos("SADWindowSize", "depth")
            if SADWindowSize % 2 == 0:
                SADWindowSize += 1
            if SADWindowSize < 5:
                SADWindowSize = 5

            stereo = cv.StereoBM_create(numDisparities=16 * numDisparities,
                                        blockSize=SADWindowSize)
            stereo.setPreFilterCap(filterCap)
            stereo.setPreFilterSize(filterSize)
            disparity = stereo.compute(grayL, grayR)
            disparity.astype(np.float32) / 16
            disp = cv.normalize(disparity,
                                disparity,
                                alpha=255,
                                beta=1,
                                norm_type=cv.NORM_MINMAX,
                                dtype=cv.CV_8UC1)
            cv.imshow("depth", disp)
Example #13
0
    if ret == True:
        objpoints.append(objp)
        corners2 = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
        imgpoints.append(corners)
        # Draw and display the corners
        cv.drawChessboardCorners(img, (11, 8), corners2, ret)

        ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(
            objpoints, imgpoints, gray.shape[::-1], None, None)

        h, w = img.shape[:2]
        newcameramtx, roi = cv.getOptimalNewCameraMatrix(
            mtx, dist, (w, h), 1, (w, h))
        # print(roi)
        mapx, mapy = cv.initUndistortRectifyMap(mtx, dist, None, newcameramtx,
                                                (w, h), 5)
        dst = cv.remap(img, mapx, mapy, cv.INTER_LINEAR)

        x, y, w, h = roi
        dst = dst[y:y + h, x:x + w]

        npz = np.load('./output.npz')
        print(npz.files)

        cv.imwrite('calibresult.png', dst)

        plt_img = img[:, :, ::-1]
        plt.figure(fname)
        plt.imshow(plt_img)
        plt.show()
Example #14
0
    def _map_projection_data_generator(src_image, label_lanes, label_h_samples,
                                       net_input_img_size, x_anchors,
                                       y_anchors, max_lane_count, H, map_x,
                                       map_y, groundSize):
        # transform image by perspective matrix
        height, width = src_image.shape[:2]
        if width != 1280 or height != 720:
            src_image = cv2.resize(src_image, (1280, 720))

        gImg = cv2.remap(src_image,
                         map_x,
                         map_y,
                         interpolation=cv2.INTER_NEAREST,
                         borderValue=(125, 125, 125))
        imgf = np.float32(gImg) * (1.0 / 255.0)

        # create label for class
        class_list = {'background': [0, 1], 'lane_marking': [1, 0]}
        class_count = len(class_list)  # [background, road]

        # create label for slice id mapping from  ground x anchor, and y anchor
        #   [y anchors,
        #    x anchors,
        #    class count + x offset]
        #
        class_count = 2
        offset_dim = 1
        instance_label_dim = 1
        label = np.zeros((y_anchors, x_anchors,
                          class_count + offset_dim + instance_label_dim),
                         dtype=np.float32)
        acc_count = np.zeros((y_anchors, x_anchors, class_count + offset_dim),
                             dtype=np.float32)
        class_idx = 0
        x_offset_idx = class_count
        instance_label_idx = class_count + offset_dim

        # init values
        label[:, :,
              class_idx:class_idx + class_count] = class_list['background']
        label[:, :, x_offset_idx] = 0.0001

        # transform "h_samples" & "lanes" to desired format
        anchor_scale_x = (float)(x_anchors) / (float)(groundSize[1])
        anchor_scale_y = (float)(y_anchors) / (float)(groundSize[0])

        # calculate anchor offsets
        for laneIdx in range(min(len(label_lanes), max_lane_count)):
            lane_data = label_lanes[laneIdx]

            prev_gx = None
            prev_gy = None
            prev_ax = None
            prev_ay = None
            for idx in range(len(lane_data)):
                dy = label_h_samples[idx]
                dx = lane_data[idx]

                if (dx < 0):
                    continue

                # do perspective transform at dx, dy
                gx, gy, gz = np.matmul(H, [[dx], [dy], [1.0]])
                if gz > 0:
                    continue

                # conver to anchor coordinate(grid)
                gx = int(gx / gz)
                gy = int(gy / gz)
                if gx < 0 or gy < 0 or gx >= (groundSize[1] -
                                              1) or gy >= (groundSize[0] - 1):
                    continue

                ax = int(gx * anchor_scale_x)
                ay = int(gy * anchor_scale_y)

                if ax < 0 or ay < 0 or ax >= (x_anchors -
                                              1) or ay >= (y_anchors - 1):
                    continue

                instance_label_value = (laneIdx + 1.0) * 50
                label[ay][ax][class_idx:class_idx +
                              class_count] = class_list['lane_marking']

                # do line interpolation to padding label data for perspectived coordinate.
                if prev_gx is None:
                    prev_gx = gx
                    prev_gy = gy
                    prev_ax = ax
                    prev_ay = ay
                else:
                    if abs(ay - prev_ay) <= 1:
                        if acc_count[ay][ax][x_offset_idx] > 0:
                            continue
                        offset = gx - (ax / anchor_scale_x)
                        label[ay][ax][x_offset_idx] += math.log(offset +
                                                                0.0001)
                        label[ay][ax][
                            instance_label_idx] = instance_label_value
                        acc_count[ay][ax][x_offset_idx] = 1
                    else:
                        gA = np.array([prev_gx, prev_gy])
                        gB = np.array([gx, gy])
                        gLen = (float)(np.linalg.norm(gA - gB))

                        gV = (gA - gB) / gLen

                        inter_len = min(max((int)(abs(prev_gy - gy)), 1), 10)
                        for dy in range(inter_len):
                            gC = gB + gV * (float(dy) /
                                            float(inter_len)) * gLen

                            ax = np.int32(gC[0] * anchor_scale_x)
                            ay = np.int32(gC[1] * anchor_scale_y)

                            if acc_count[ay][ax][x_offset_idx] > 0:
                                continue

                            offset = gC[0] - (ax / anchor_scale_x)
                            label[ay][ax][x_offset_idx] += math.log(offset +
                                                                    0.0001)
                            label[ay][ax][class_idx:class_idx +
                                          class_count] = class_list[
                                              'lane_marking']
                            label[ay][ax][
                                instance_label_idx] = instance_label_value
                            acc_count[ay][ax][x_offset_idx] = 1

                    prev_gx = gx
                    prev_gy = gy
                    prev_ax = ax
                    prev_ay = ay

        return (imgf, label)
print("dist畸变值:\n",dist   )   # 畸变系数   distortion cofficients = (k_1,k_2,p_1,p_2,k_3)
print("rvecs旋转(向量)外参:\n",rvecs)   # 旋转向量  # 外参数
print("tvecs平移(向量)外参:\n",tvecs  )  # 平移向量  # 外参数
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (u, v), 0, (u, v))
print('newcameramtx外参',newcameramtx)
#打开摄像机
camera=cv2.VideoCapture(0)
while True:
    (grabbed,frame)=camera.read()
    h1, w1 = frame.shape[:2]
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (u, v), 0, (u, v))
    # 纠正畸变
    dst1 = cv2.undistort(frame, mtx, dist, None, newcameramtx)
    #dst2 = cv2.undistort(frame, mtx, dist, None, newcameramtx)
    mapx,mapy=cv2.initUndistortRectifyMap(mtx,dist,None,newcameramtx,(w1,h1),5)
    dst2=cv2.remap(frame,mapx,mapy,cv2.INTER_LINEAR)
    # 裁剪图像,输出纠正畸变以后的图片
    x, y, w1, h1 = roi
    dst1 = dst1[y:y + h1, x:x + w1]

    #cv2.imshow('frame',dst2)
    #cv2.imshow('dst1',dst1)
    cv2.imshow('dst2', dst2)
    if cv2.waitKey(1) & 0xFF == ord('q'):  # 按q保存一张图片
        cv2.imwrite("../u4/frame.jpg", dst1)
        break

camera.release()
cv2.destroyAllWindows()

Example #16
0
#     if e == cv2.EVENT_LBUTTONDOWN:
#         print(threeD[y][x])

# cv2.setMouseCallback("depth", callbackFunc, None)

while True:
    ret, frame_vga = videoIn.read()
    #ret2, frame2 = camera2.read()
    frame1 = frame_vga
    frame2 = frame_vga

    if not ret:
        break

    # 根据更正map对图片进行重构
    img1_rectified = cv2.remap(frame1, left_map1, left_map2, cv2.INTER_LINEAR)
    img2_rectified = cv2.remap(frame2, right_map1, right_map2,
                               cv2.INTER_LINEAR)

    # 将图片置为灰度图,为StereoBM作准备
    imgL = cv2.cvtColor(img1_rectified, cv2.COLOR_BGR2GRAY)
    imgR = cv2.cvtColor(img2_rectified, cv2.COLOR_BGR2GRAY)
    #imgGrayL = cv2.equalizeHist(imgL)
    #imgGrayR = cv2.equalizeHist(imgR)

    # through gausiann filter
    imgGrayL = cv2.GaussianBlur(imgL, (5, 5), 0)  #高斯滤波
    imgGrayR = cv2.GaussianBlur(imgR, (5, 5), 0)

    # 两个trackbar用来调节不同的参数查看效果
    # num = cv2.getTrackbarPos("num", "depth")
Example #17
0
def testing(test_inds, folder = 'posmap_output', model = 'saved_models/MSE_model_10_300W', generate_models = False):
    mode = 'my_computer'
    if len(sys.argv) > 1 and sys.argv[1] == 'HiPerGator':
        mode = sys.argv[1]

    network = resfcn256()

    #Loading mask for loss calculation
    face_mask = cv2.imread('masks/uv_face_mask.png', cv2.IMREAD_GRAYSCALE)
    weight_mask = cv2.imread("masks/uv_weight_mask.png", cv2.IMREAD_GRAYSCALE)
    face_mask = np.array(face_mask).astype('float32')
    weight_mask = np.array(weight_mask).astype('float32')
    face_mask = face_mask / 255.0
    weight_mask = weight_mask / 16.0
    mask_comb = face_mask*weight_mask
    temp = np.arange(256*256*3)
    temp = temp.reshape(1,256,256,3).astype('float32')
    temp[0,:,:,0] = mask_comb
    temp[0,:,:,1] = mask_comb
    temp[0,:,:,2] = mask_comb

    files = glob.glob('model_output/*') #Clearing for new generation
    for f in files:
        os.remove(f)

    #test_inds, img_paths, posmap_paths = train(mode)
    img_paths = glob.glob(folder + '/image?????.jpg')
    posmap_paths = glob.glob(folder + '/image?????.npy')
    #img_paths = glob.glob('data_input/*.jpg')
    #test_inds = random.sample(range(0,len(img_paths)), num_samples)
    #test_inds = dict.fromkeys(test_inds, True)


    inp = tf.placeholder(tf.float32, shape=[None,256,256,3])
    out = network(inp, is_training=False)
    ground_truth = tf.placeholder(tf.float32, shape=[None,256,256,3])
    sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))


    #tf.train.Saver(network.vars).restore(sess, 'saved_models/256_256_resfcn256_weight')
    tf.train.Saver(network.vars).restore(sess, model)

    test_imgs = np.arange(len(test_inds.keys())*256*256*3)
    test_imgs = test_imgs.reshape(len(test_inds.keys()), 256, 256, 3).astype('float32')
    test_posmaps = np.arange(len(test_inds.keys())*256*256*3)
    test_posmaps = test_posmaps.reshape(len(test_inds.keys()), 256, 256, 3).astype('float32')
    test_imgs_filenames = []

    ind = 0

    for key in test_inds:
        filenames = img_paths[key].replace('\\','/')
        filenames_2 = filenames.split('/')
        filenames_3 = filenames_2[1].split('.')
        test_imgs_filenames.append(filenames_3[0])
        temp_img = cv2.imread(img_paths[key])
        temp_img = cv2.cvtColor(temp_img,cv2.COLOR_BGR2RGB)

        temp_posmap = np.load(posmap_paths[key])/(256.0*1.1)
        #test_imgs.append(temp_img/256.0)
        #test_posmaps.append((np.load(posmap_paths[key]))/(256.0*1.1))
        test_imgs[ind] = (temp_img/256.0)
        test_posmaps[ind] = temp_posmap
        ind += 1
    

    posmaps_pred = sess.run(out, feed_dict = {inp: test_imgs})
    posmaps_pred_loss = np.array(posmaps_pred)
    #loss = tf.reduce_mean(tf.square(posmaps_pred - test_posmaps)*temp)
    loss = np.mean(np.square(posmaps_pred_loss - test_posmaps)*temp)
    #loss = tf.metrics.mean_squared_error(new1, new2, weights=temp, name = 'MSE')
    posmaps_pred = posmaps_pred * (256.0*1.1)
    print("Loss: " + str(loss))


    triangles = np.loadtxt('indices/triangles.txt').astype(np.int32)
    uv_coords = generate_uv_coords()


    if model != 'saved_models/256_256_resfcn256_weight' and generate_models == True:
        for i in range(len(posmaps_pred)):
            texture = cv2.remap(test_imgs[i], posmaps_pred[i][:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
            kpt = get_landmarks(posmaps_pred[i])
            vertices = get_vertices(posmaps_pred[i])
            new_vertices = frontalize(vertices)
            new_vertices[:,1] = 255 - new_vertices[:,1]

            path_texture = 'model_output/' + test_imgs_filenames[i] + '_tex.obj'
            write_obj_with_texture(path_texture, new_vertices, triangles, texture, uv_coords/256.0)