def unwarp(img, nx, ny, mtx, dist):
    img_size = (img.shape[1], img.shape[0])

    corners = np.float32([[190,720],[589,457],[698,457],[1145,720]])
    new_top_left=np.array([corners[0,0],0])
    new_top_right=np.array([corners[3,0],0])
    offset=[150,0]


    src = np.float32([corners[0],corners[1],corners[2],corners[3]])
    dst = np.float32([corners[0]+offset,new_top_left+offset,new_top_right-offset ,corners[3]-offset])

    src = np.float32([(575,464),(707,464),(258,682),(1049,682)])
    dst = np.float32([(450,0),(img_size[0]-450,0),(450,img_size[1]),(img_size[0]-450,img_size[1])])

    undist = cv2.undistort(img, mtx, dist, None, mtx)

    # Given src and dst points, calculate the perspective transform matrix
    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    # Warp the image using OpenCV warpPerspective()
    warped = cv2.warpPerspective(undist, M, img_size,flags=cv2.INTER_LINEAR)

    # Return the resulting image and matrix
    return warped, M, Minv
コード例 #2
0
def detect_cards(im, numcontures = 10):
    origin = im
    contours = find_contures(im, numcontures)

    warps = []
    diffs = []

    for card in contours:
        peri = cv2.arcLength(card,True)
        conture = cv2.approxPolyDP(card,0.02*peri,True)

        if len(conture) != 4:
            continue

        approx = rectify(conture)
        cnt = card[4]
        cv2.drawContours(origin, card, 0, (0, 255, 255), 5)
        # show_image(origin, "card")
        h_vertical = np.array([ [0,0],[im_width,0], [im_width,im_height],[0,im_height] ], np.float32)
        h_horizontal = np.array([ [0,im_height], [0,0], [im_width,0],[im_width,im_height] ], np.float32)

        transform = cv2.getPerspectiveTransform(approx,h_vertical)
        warp = cv2.warpPerspective(im,transform,(im_width,im_height))
        show_image(warp, "vertical")

        in_database(warp, diffs, card, transform)

        transform = cv2.getPerspectiveTransform(approx,h_horizontal)
        warp = cv2.warpPerspective(im,transform,(im_width,im_height))
        #show_image(warp, "horizontal")

        #in_database(warp, diffs, card)

    return diffs
コード例 #3
0
def simplePerspectiveTransform(img, quad, shape=None,
                               interpolation=cv2.INTER_LINEAR,
                               inverse=False):
    p = sortCorners(quad).astype(np.float32)
    if shape is not None:
        height, width = shape
    else:
        # get output image size from avg. quad edge length
        width = int(round(0.5 * (np.linalg.norm(p[0] - p[1]) +
                                 np.linalg.norm(p[3] - p[2]))))
        height = int(round(0.5 * (np.linalg.norm(p[1] - p[2]) +
                                  np.linalg.norm(p[0] - p[3]))))

    dst = np.float32([[0,     0],
                      [width, 0],
                      [width, height],
                      [0,     height]])

    if inverse:
        s0, s1 = img.shape[:2]
        dst /= ((width / s1), (height / s0))
        H = cv2.getPerspectiveTransform(dst, p)
    else:
        H = cv2.getPerspectiveTransform(p, dst)

    return cv2.warpPerspective(img, H, (width, height), flags=interpolation)
コード例 #4
0
def actual_width_in_mm(lb, lt, rb, rt, cxr, cxl):
    """
    * Function Name:actual_width_in_mm()
    * Input:	    co-ordinates of left bottom, left top, right bottom, right top,
                    right contour centroid, left contour centroid
    * Output:		returns actual width of the door
    * Logic:		It takes the actual depth and using filters the black noise spaces are made white
                    The 20 pixels of the area of left and right edges are processed.
                    the minimum value in them is found and the depth is that value.
                    Using pixel knowledge we find the angle and then using cosine rule
                    we find the actual width of the door.
    * Example Call:	actual_width_in_mm(lb, lt, rb, rt, cxr, cxl)
    """
    a = freenect.sync_get_depth(format=freenect.DEPTH_MM)[0]
    a /= 30.0
    a = a.astype(np.uint8)
    ret, mask = cv2.threshold(a, 1, 255, cv2.THRESH_BINARY_INV)
    ad = a + mask
    pts1 = np.float32([[lt[0] - 30, lt[1]], [lt[0], lt[1]], [lb[0] - 30, lb[1]], [lb[0], lb[1]]])
    pts2 = np.float32([[0, 0], [30, 0], [0, lb[1] - lt[1]], [30, lb[1] - lt[1]]])
    m = cv2.getPerspectiveTransform(pts1, pts2)
    dst = cv2.warpPerspective(ad, m, (30, lb[1] - lt[1]))
    left_depth = np.amin(dst) * 30
    pts1 = np.float32([[rt[0], rt[1]], [rt[0] + 30, rt[1]], [rb[0], rb[1]], [rb[0] + 30, rb[1]]])
    pts2 = np.float32([[0, 0], [30, 0], [0, rb[1] - rt[1]], [30, rb[1] - rt[1]]])
    m = cv2.getPerspectiveTransform(pts1, pts2)
    dst = cv2.warpPerspective(ad, m, (30, rb[1] - rt[1]))
    right_depth = np.amin(dst) * 30
    pixel_width = cxr - cxl
    angle = (pixel_width / 640.0) * (57 / 180.0) * math.pi
    width = (left_depth * left_depth) + (right_depth * right_depth) - (2 * left_depth * right_depth * math.cos(angle))
    width = math.sqrt(width)
    return width
コード例 #5
0
def Video_filter(cap,i ):
    # Capture frame-by-frame
    ret, frame = cap.read()
    if(i==2):# bottom camera
        ch = frame.shape
        pts1 = np.float32([[112,275],[637,284],[149,345],[587,357]])
        pts2 = np.float32([[0,0],[484,0],[0,162],[484,162]])
        M = cv2.getPerspectiveTransform(pts1,pts2)
        frame = cv2.warpPerspective(frame,M,(484,162))
    elif(i==1):#t0p camera
        ch = frame.shape
        pts1 = np.float32([[2,242],[610,260],[58,258],[490,274]])
        pts2 = np.float32([[0,0],[484,0],[0,20],[484,20]])
        M = cv2.getPerspectiveTransform(pts1,pts2)
        frame= cv2.warpPerspective(frame,M,(484,20))
        cv2.imwrite('frame.jpg',frame)
    
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    lower_blue = np.array([0, 30, 60])
    upper_blue = np.array([20, 150, 255])
    mask = cv2.inRange(hsv, lower_blue, upper_blue)
   
    kernel = np.ones((7,7),np.uint8)
    erosion = cv2.dilate(mask,kernel,iterations = 2)
    dilation = cv2.erode(erosion,kernel,iterations = 2)
    ret3,th3 = cv2.threshold(dilation,100,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    edges = cv2.Canny(th3,150,255)
    cv2.imshow('edge%d'%i,edges)
    cv2.imshow('colour%d'%i,frame)
    return edges
コード例 #6
0
def warp(img):
    img = cv2.undistort(img, mtx, dist, None, mtx)
    preprocess_image = np.zeros_like(img[:,:,0])
    gradx = abs_sobel_thresh(img, orient='x', thresh=(12, 255))
    grady = abs_sobel_thresh(img, orient='y', thresh=(25, 255))

    c_binary = color_thresh(img, sthresh=(60, 255), vthresh=(50, 255), lthresh=(75, 255))
    preprocess_image[((gradx == 1) & (grady == 1) | (c_binary == 1))] = 255

    img_size = (img.shape[1], img.shape[0])

    # map trapezium view of the lane to
    bot_width = .76     # percentage of width of bottom of trapezium
    mid_width = .08     # top of trapezium width
    height_pct = .62    # height of trapezium %age of image ht
    bottom_trim = .935  # ignoring the hood of the car
    src = np.float32([
        [img.shape[1]*(.5-mid_width/2), img.shape[0]*height_pct],
        [img.shape[1]*(.5+mid_width/2), img.shape[0]*height_pct],
        [img.shape[1]*(.5+bot_width/2), img.shape[0]*bottom_trim],
        [img.shape[1]*(.5-bot_width/2), img.shape[0]*bottom_trim],
    ])
    # map to a rectangle
    offset = img.shape[1]*.25
    dst = np.float32([
        [offset, 0],
        [img.shape[1]-offset, 0],
        [img.shape[1]-offset, img.shape[0]],
        [offset, img.shape[0]]
    ])
    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    warped = cv2.warpPerspective(preprocess_image, M, img_size, flags=cv2.INTER_LINEAR)
    return warped, M, Minv
コード例 #7
0
    def homography(self):
        if self._homography is None:
            b = self.opts['border']
            if self.quad is not None:

                if self.refQuad is not None:
                    dst = self.refQuad.astype(np.float32)
                else:
                    sy, sx = self._newBorders
                    dst = np.float32([
                        [b,  b],
                        [sx - b, b],
                        [sx - b, sy - b],
                        [b,  sy - b]])

                self._homography = cv2.getPerspectiveTransform(
                    self.quad.astype(np.float32), dst)
            else:
                try:
                    # GET HOMOGRAPHY FROM REFERENCE IMAGE USING PATTERN
                    # RECOGNITION
                    self._Hinv = h = self.pattern.findHomography(self.img)[0]
                    H = self.pattern.invertHomography(h)
                except Exception as e:
                    print(e)
                    if perspCorrectionViaQuad:
                        # PROPRIETARY FALLBACK METHOD
                        quad = perspCorrectionViaQuad(
                            self.img, self.ref, border=b)
                        sy, sx = self.ref.shape
                        dst = np.float32([
                            [b,  b],
                            [sx - b, b],
                            [sx - b, sy - b],
                            [b,  sy - b]])

                        H = cv2.getPerspectiveTransform(
                            quad.astype(np.float32), dst)

                    else:
                        raise e

# #                 #test fit quality:
#                 if abs(decompHomography(H)[-1]) > self.opts['maxShear']:
#                     #shear too big
#

                self._homography = H

                sy, sx = self.opts['new_size']
                ssy, ssx = self.ref.shape[:2]
                if sx is None:
                    sx = ssx
                if sy is None:
                    sy = ssy
                self._newBorders = (sy, sx)

        return self._homography
コード例 #8
0
ファイル: raycast.py プロジェクト: JJones30/Apatite-to-Zircon
def skew(image):
    im_x = image.shape[1]
    im_y = image.shape[0]
    src = np.array([(0,0), (im_x, 0), (im_x, im_y), (0, im_y)], np.float32)
    dst = np.array([(0,0), (im_x, im_y), (im_x,2*im_y), (0, im_y)], np.float32)
    affine = cv2.getPerspectiveTransform(src, dst)
    reverse_affine = cv2.getPerspectiveTransform(dst, src)
    image = cv2.warpPerspective(image, affine, (im_x, 2*im_y), borderMode=cv2.BORDER_CONSTANT, borderValue=255, flags=cv2.INTER_NEAREST)
    return image, reverse_affine
コード例 #9
0
def perspective(frame, pitch=0):
    if pitch == 0:
        pts1 = np.float32([[0,0],[13,478],[COLS,ROWS],[COLS,0]])
        pts2 = np.float32([[0,0],[0,ROWS],[COLS,ROWS],[COLS,0]])
        M = cv2.getPerspectiveTransform(pts1,pts2)
        dst = cv2.warpPerspective(frame,M,(COLS,ROWS))
        return dst
    else:
        pts1 = np.float32([[5,5],[5,475],[COLS,ROWS],[COLS,0]])
        pts2 = np.float32([[0,0],[0,ROWS],[COLS,ROWS],[COLS,0]])
        M = cv2.getPerspectiveTransform(pts1,pts2)
        dst = cv2.warpPerspective(frame,M,(COLS,ROWS))
        return dst           
コード例 #10
0
ファイル: data.py プロジェクト: Bruslan/MV3D-1
 def transform_image(self, img, bbox_src, bbox_dst):
     projected_pts_src = box3d_to_rgb_box(bbox_src)
     # print('projected_pts_src dim: ', projected_pts_src.shape)
     projected_pts_src = projected_pts_src.squeeze()
     projected_pts_dst = box3d_to_rgb_box(bbox_dst)
     # print('projected_pts_dst dim: ', projected_pts_dst.shape)
     projected_pts_dst = projected_pts_dst.squeeze()
     M1 = cv2.getPerspectiveTransform(np.float32(projected_pts_src[2:6]),
                                      np.float32(projected_pts_dst[2:6]))
     M2 = cv2.getPerspectiveTransform(np.float32(projected_pts_src[:4]),
                                      np.float32(projected_pts_dst[:4]))
     M = (M1 + M2) / 2
     rows, cols = img.shape[:2]
     new_img = cv2.warpPerspective(img, M, (cols, rows))
     return new_img
コード例 #11
0
ファイル: Simulator.py プロジェクト: djnugent/Precland
    def simulate_target(self,thetaX,thetaY,thetaZ, aX, aY, aZ, cX, cY, cZ, camera_height, camera_width, fov):
        img_width = self.target_width
        img_height = self.target_height

        #point maps
        corners = np.float32([[-img_width/2,img_height/2],[img_width/2 ,img_height/2],[-img_width/2,-img_height/2],[img_width/2, -img_height/2]])
        newCorners = np.float32([[0,0],[0,0],[0,0],[0,0]])


        #calculate projection for four corners of image
        for i in range(0,len(corners)):

            #shift to world
            x = corners[i][0] + cX - img_width/2.0
            y = corners[i][1] + cY - img_height/2.0


            #calculate perspective and position
            x , y = self.project_3D_to_2D(thetaX,thetaY,thetaZ, aY, aX, aZ, y, x, cZ,camera_height,camera_width,fov)

            #shift to camera
            x , y = shift_to_image((x,y),camera_width,camera_height)
            newCorners[i] = x,y


        #project image
        M = cv2.getPerspectiveTransform(corners,newCorners)
        sim = cv2.warpPerspective(self.target,M,(self.camera_width,self.camera_height),borderValue=self.backgroundColor)

        return sim
コード例 #12
0
ファイル: mosaic.py プロジェクト: davidraleigh/dronedeploy
def four_point_transform(img, RotY, RotX):
    height, width = img.shape[:2]

    # the from pts to be warped
    src_rect = create_src_rect(height, width)
    # the rotated destination pts
    dst_rect = pitch_roll_pts(height, width, RotY, RotX)
    (tl, tr, br, bl) = dst_rect

    # compute the width of the new image, which will be the
    # maximum distance between bottom-right and bottom-left
    # x-coordiates or the top-right and top-left x-coordinates
    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    # compute the height of the new image, which will be the
    # maximum distance between the top-right and bottom-right
    # y-coordinates or the top-left and bottom-left y-coordinates
    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    # compute the perspective transform matrix and then apply it
    # M, mask = cv2.findHomography(src_rect, dst_rect, cv2.RANSAC, 5.0) return same result for M
    M = cv2.getPerspectiveTransform(src_rect, dst_rect)
    warped = cv2.warpPerspective(img, M, (maxWidth, maxHeight))

    # return the warped image
    return warped
コード例 #13
0
def getCards(im, numcards=4):
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (1, 1), 1000)
    flag, thresh = cv2.threshold(blur, 120, 255, cv2.THRESH_BINARY)

    contours, hierarchy = cv2.findContours(
        thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    contours = sorted(contours, key=cv2.contourArea, reverse=True)[:numcards]

    for card in contours:
        peri = cv2.arcLength(card, True)
        approx = rectify(cv2.approxPolyDP(card, 0.02 * peri, True))

        # box = np.int0(approx)
        # cv2.drawContours(im,[box],0,(255,255,0),6)
        # imx = cv2.resize(im,(1000,600))
        # cv2.imshow('a',imx)

        h = np.array([[0, 0], [449, 0], [449, 449], [0, 449]], np.float32)

        transform = cv2.getPerspectiveTransform(approx, h)
        warp = cv2.warpPerspective(im, transform, (450, 450))

        yield warp
コード例 #14
0
def main():
    global countClicks, coordinates, copyimage

    cv2.resizeWindow(windowname, 700, 700)

    while (countClicks < 4):
        preseedKey = cv2.waitKey(1)
        cv2.imshow(windowname, image)

        if preseedKey & 0xFF == 27:
            break

    pointone = np.float32(
        [[coordinates[0], coordinates[1]],
         [coordinates[2], coordinates[3]],
         [coordinates[4], coordinates[5]],
         [coordinates[6], coordinates[7]]])
    pointtwo = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])

    perspective = cv2.getPerspectiveTransform(pointone, pointtwo)
    output = cv2.warpPerspective(copyimage, perspective, (310, 310))

    cv2.imshow("Output Image", output)
    cv2.waitKey(0)

    cv2.destroyAllWindows()
コード例 #15
0
ファイル: transformer.py プロジェクト: WenchenLi/imageToText
def four_point_transform(image, pts):
    # obtain a consistent order of the points and unpack them
    # individually
    rect = order_points(pts)
    (tl, tr, br, bl) = rect

    # compute the width of the new image, which will be the
    # maximum distance between bottom-right and bottom-left
    # x-coordiates or the top-right and top-left x-coordinates
    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))

    # compute the height of the new image, which will be the
    # maximum distance between the top-right and bottom-right
    # y-coordinates or the top-left and bottom-left y-coordinates
    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))

    # now that we have the dimensions of the new image, construct
    # the set of destination points to obtain a "birds eye view",
    # (i.e. top-down view) of the image, again specifying points
    # in the top-left, top-right, bottom-right, and bottom-left
    # order
    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype="float32")

    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))

    return warped
コード例 #16
0
ファイル: main.py プロジェクト: frederikhermans/imageframer
    def extract(self, img, output_shape, corners=None, hints=None):
        """Extract a frame from `img`.

        This function always corrects for perspective distortion and may
        correct for radial distortion."""
        if img.dtype != np.uint8:
            raise ValueError('Can only operate on uint8.')
        if corners is None:
            corners = self.locate(img, hints=hints)

        if self.calibration_profile is not None and \
           undistort.should_undistort(img, corners, self.calibration_profile):
            img, corners = undistort.undistort(img, corners,
                                               self.calibration_profile)
            corners = self.locate(img, hints=hints)

        # Crop image to corners (speeds up the perspective transform)
        img, corners = crop_to_corners(img, corners)

        # Compute perspective transform
        corners = np.fliplr(corners).astype(np.float32)
        dst_corners = np.array(output_shape) * ((0, 0), (1, 0), (1, 1), (0, 1))
        dst_corners = np.fliplr(dst_corners).astype(np.float32)
        m = cv2.getPerspectiveTransform(corners, dst_corners)

        return cv2.warpPerspective(img, m, output_shape,
                                   flags=cv2.INTER_NEAREST)
コード例 #17
0
def windowToFieldCoordinates(basePoint, x1, y1, x2, y2, x3, y3, x4, y4, maxWidth=0, maxHeight=0):
    (xp, yp) = basePoint
    src = np.array([
        [x1, y1],
        [x2, y2],
        [x3, y3],
        [x4, y4]], dtype = "float32")

    # those should be the same aspect as the real width/height of field
    maxWidth = (x4-x1) if maxWidth == 0 else maxWidth
    maxHeight = (y1-y2) if maxHeight == 0 else maxHeight

    # make a destination rectangle with the width and height of above (starts at 0,0)
    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype = "float32")

    # find the transformation matrix for our transforms
    transformationMatrix = cv2.getPerspectiveTransform(src, dst)

    # put the original (source) x,y points in an array (not sure why do we have to put it 3 times though)    
    original = np.array([((xp, yp), (xp, yp), (xp, yp))], dtype=np.float32)

    # use perspectiveTransform to transform our original(mouse coords) to new coords with the transformation matrix
    transformed = cv2.perspectiveTransform(original, transformationMatrix)[0][0]

    return transformed
コード例 #18
0
def rot(img,angel,shape,max_angel):
    """ 使图像轻微的畸变

        img 输入图像
        factor 畸变的参数
        size 为图片的目标尺寸

    """
    size_o = [shape[1],shape[0]]

    size = (shape[1]+ int(shape[0]*cos((float(max_angel )/180) * 3.14)),shape[0])


    interval = abs( int( sin((float(angel) /180) * 3.14)* shape[0]));

    pts1 = np.float32([[0,0]         ,[0,size_o[1]],[size_o[0],0],[size_o[0],size_o[1]]])
    if(angel>0):

        pts2 = np.float32([[interval,0],[0,size[1]  ],[size[0],0  ],[size[0]-interval,size_o[1]]])
    else:
        pts2 = np.float32([[0,0],[interval,size[1]  ],[size[0]-interval,0  ],[size[0],size_o[1]]])

    M  = cv2.getPerspectiveTransform(pts1,pts2);
    dst = cv2.warpPerspective(img,M,size);

    return dst;
コード例 #19
0
 def _calculate_hat_points(self, marker_points):
     perspective_matrix = cv2.getPerspectiveTransform(
         self._square_definition, marker_points
     )
     hat_points = cv2.perspectiveTransform(self._hat_definition, perspective_matrix)
     hat_points.shape = 6, 2
     return hat_points
コード例 #20
0
def transformImage(img, corners, boardSize):
    size = img.shape[1],img.shape[0]

    rcCorners = corners.reshape(boardSize[0], boardSize[1], 2)

    outerPoints = getOuterPoints(rcCorners)
    tl,tr,bl,br = outerPoints

    patternSize = np.array([
        np.sqrt(((tr - tl)**2).sum(0)),
        np.sqrt(((bl - tl)**2).sum(0)),
    ])
   
    inQuad = np.array(outerPoints, np.float32)

    outQuad = np.array([
        tl,
        tl + np.array([patternSize[0],0.0]),
        tl + np.array([0.0,patternSize[1]]),
        tl + patternSize,
    ],np.float32)

    transform = cv2.getPerspectiveTransform(inQuad, outQuad)
    transformed = cv2.warpPerspective(img, transform, size)

    return transformed
コード例 #21
0
ファイル: glyph.py プロジェクト: picopter/picopterx
def add_substitute_quad(image, substitute_quad, dst):
 
    # dst (zero-set) and src points
    dst = order_points(dst)
    if dst.shape[0] < 4:
        return
     
    (tl, tr, br, bl) = dst
    min_x = min(int(tl[0]), int(bl[0]))
    min_y = min(int(tl[1]), int(tr[1]))
 
    for point in dst:
        point[0] = point[0] - min_x
        point[1] = point[1] - min_y
 
    (max_width,max_height) = max_width_height(dst)
    src = topdown_points(max_width, max_height)
 
    # warp perspective (with white border)
    substitute_quad = cv2.resize(substitute_quad, (max_width,max_height))
 
    warped = np.zeros((max_height,max_width, 3), np.uint8)
    warped[:,:,:] = 255
 
    matrix = cv2.getPerspectiveTransform(src, dst)
    cv2.warpPerspective(substitute_quad, matrix, (max_width,max_height), warped, borderMode=cv2.BORDER_TRANSPARENT)
 
    # add substitute quad
    image[min_y:min_y + max_height, min_x:min_x + max_width] = warped
 
    return image
コード例 #22
0
ファイル: opencv.py プロジェクト: JamesTonthat/sdaps
def transform_using_corners(img, paper_width, paper_height):
    surf = to_a1_surf(convert_to_monochrome(img))

    matrix, res = _fallback_matrix(surf.get_width(), surf.get_height(), paper_width, paper_height)

    top_left = image.find_corner_marker(surf, matrix, 1)
    top_right = image.find_corner_marker(surf, matrix, 2)
    bottom_right = image.find_corner_marker(surf, matrix, 3)
    bottom_left = image.find_corner_marker(surf, matrix, 4)

    scale = 1 * res

    x0 = scale * defs.corner_mark_left
    y0 = scale * defs.corner_mark_top
    x1 = scale * (paper_width - defs.corner_mark_right)
    y1 = scale * (paper_height - defs.corner_mark_bottom)

    width, height = int(scale * paper_width), int(scale * paper_height)
    # Increase width to be a multiple of 4
    if width % 4:
        width = width + 4 - width % 4

    transform_matrix = cv2.getPerspectiveTransform(
        np.array((top_left, top_right, bottom_right, bottom_left), dtype=np.float32),
        np.array(((x0, y0), (x1, y0), (x1, y1), (x0, y1)), dtype=np.float32))

    transformed = cv2.warpPerspective(img, transform_matrix, dsize=(width, height))

    return transformed
コード例 #23
0
ファイル: vision.py プロジェクト: debasmitdas/learn_play
    def _project_other_roi(self):
        warped_in = np.float32([np.array(self._other_roi_points)])
        project_out = np.float32([[0, 0],
                                  [self._side_other_roi, 0],
                                  [self._side_other_roi, self._side_other_roi],
                                  [0, self._side_other_roi]])
        M = cv2.getPerspectiveTransform(warped_in, project_out)
        self.subLock.acquire(True)
        local_image = deepcopy(self._np_image)
        self.subLock.release()
        self._other_projected = cv2.warpPerspective(local_image,
                                                    M,
                                                    (self._side_other_roi,
                                                     self._side_other_roi))
        # Finds red colors in HSV space
        hsv = cv2.cvtColor(self._other_projected, cv2.COLOR_BGR2HSV)

        self._inrange_colour = cv2.inRange(hsv, self._low_colour,
                                           self._high_colour)

        cv.ShowImage('Colour', cv.fromarray(self._inrange_colour))
        # the following can probably be optimized
        red_cnt = 0
        for x in range(self._side_other_roi):
            for y in range(self._side_other_roi):
                if red_cnt > self._inrange_colour_thresh:  # Speed tweak
                    return True
                else:
                    if self._inrange_colour[x, y] == 255:
                        red_cnt += 1
        return False
コード例 #24
0
ファイル: warp.py プロジェクト: shinh/IkaLog
    def reset(self):
        # input source
        w = 1280
        h = 720

        self.pts2 = np.float32([[0, 0], [w, 0], [w, h], [0, h]])
        self.M = cv2.getPerspectiveTransform(self.pts2, self.pts2)
コード例 #25
0
def getCards(im, numcards=4):
  gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
  blur = cv2.GaussianBlur(gray,(1,1),1000)
  flag, thresh = cv2.threshold(blur, 120, 255, cv2.THRESH_BINARY) 
       
  contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #Haal de contouren van het plaatje op, bestaande uit een paar honderd punten per contour.

  contours = sorted(contours, key=cv2.contourArea,reverse=True)[:numcards] #Sorteer deze op grootte, en behoud slechts 1 contour ( [:numcards] ).

  for card in contours:
    peri = cv2.arcLength(card,True)
    squareContour = cv2.approxPolyDP(card,0.02*peri,True) #Benader de contour bestaande uit honderden punten met een contour die slechts 4 punten bevat, een rechthoek

    if squareContour.shape[0] != 4: #Vang een error op, en laat aan de gebruiker zien waar het fout gaat.
        print "Contour gevonden met punten ongelijk aan 4! Punten: %d" % squareContour.shape[0]
        box = np.int0(squareContour)
        cv2.drawContours(im,[box],0,(255,255,0),6)
        imx = cv2.resize(im,(1000,600))
        cv2.imshow("foute contour" ,imx)
        cv2.waitKey(0)
        continue

    approx = rectify(squareContour) #Zet de contour in een plaatje van 450x450 pixels

    h = np.array([ [0,0],[449,0],[449,449],[0,449] ],np.float32)

    transform = cv2.getPerspectiveTransform(approx,h)
    warp = cv2.warpPerspective(im,transform,(450,450))

    yield warp
コード例 #26
0
ファイル: simple_demo.py プロジェクト: simutoni/ocr_examples
def crop_image(frame, found_contour):
    pts = found_contour.reshape(4, 2)
    rect = np.zeros((4, 2), dtype="float32")

    # top-left
    s = pts.sum(axis=1)
    rect[0] = pts[np.argmin(s)]
    rect[2] = pts[np.argmax(s)]

    # top-right
    diff = np.diff(pts, axis=1)
    rect[1] = pts[np.argmin(diff)]
    rect[3] = pts[np.argmax(diff)]

    # width
    (tl, tr, br, bl) = rect
    width_a = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    width_b = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))

    # height
    height_a = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    height_b = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    max_width = max(int(width_a), int(width_b))
    max_height = max(int(height_a), int(height_b))

    # final points
    dst = np.array([[0, 0], [max_width - 1, 0], [max_width - 1, max_height - 1], [0, max_height - 1]], dtype="float32")

    # apply perspective transform
    M = cv2.getPerspectiveTransform(rect, dst)
    warp = cv2.warpPerspective(frame, M, (max_width, max_height))
    return warp
コード例 #27
0
ファイル: vision.py プロジェクト: Team3309/Vision2016
def fix_target_perspective(contour, bin_shape):
    """
    Fixes the perspective so it always looks as if we are viewing it head-on
    :param contour:
    :param bin_shape: numpy shape of the binary image matrix
    :return: a new version of contour with corrected perspective, a new binary image to test against,
    """
    before_warp = np.zeros(bin_shape, np.uint8)
    cv2.drawContours(before_warp, [contour], -1, 255, -1)

    try:
        corners = get_corners(contour)

        # get a perspective transformation so that the target is warped as if it was viewed head on
        shape = (400, 280)
        dest_corners = np.array([(0, 0), (shape[0], 0), (0, shape[1]), (shape[0], shape[1])], np.float32)
        warp = cv2.getPerspectiveTransform(corners, dest_corners)
        fixed_perspective = cv2.warpPerspective(before_warp, warp, shape)
        fixed_perspective = fixed_perspective.astype(np.uint8)

        if int(cv2.__version__.split('.')[0]) >= 3:
            _, contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        else:
            contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        new_contour = contours[0]

        return new_contour, fixed_perspective

    except ValueError:
        raise ValueError('Failed to detect rectangle')
コード例 #28
0
def main():
#============================================Read video in==========================================================
	directory = os.path.dirname(__file__)
	vidLoc = os.path.join(directory, "../video-image/video.mp4")

	vid = cv2.VideoCapture()
	vid.open(vidLoc)

	vid.set(1,479)
	
#=====================================================Corners and Warping==================================================
	corners = numpy.array([[141,81],[179,663],[1125,527],[1083,86]], numpy.float32)
	
	topLength = corners[3][0] - corners[0][0]
	botLength = corners[2][0] - corners[1][0]

	leftLength = corners[1][1] - corners[0][1]
	rightLength = corners[2][1] - corners[3][1]
	
	avgTop = int((topLength + botLength)/2)
	avgLeft = int((leftLength + rightLength)/2)

	write = cv2.VideoWriter("write.mpg", cv.CV_FOURCC("M", "J", "P", "G"), int(vid.get(5)), (avgTop,avgLeft))

	newcorners = numpy.array([[0,0], [0, avgLeft], [avgTop, avgLeft], [avgTop,0]],numpy.float32)

	dsize = (avgTop, avgLeft)
#====================================================Warp all frames=====================================================
	for x in range(2820):
		debug, im = vid.read()
		transformMatrix = cv2.getPerspectiveTransform(corners, newcorners)

		image = cv2.warpPerspective(im, transformMatrix, dsize)
		
		write.write(image)
コード例 #29
0
def four_point_transform(image, pts):
	
	rect = order_points(pts)
	(tl, tr, br, bl) = rect
 
	# width of new image will be max of dist between (br and bl) ans (tr and tl)
	widthmaxA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
	widthmaxB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
	maxWidth = max(int(widthmaxA), int(widthmaxB))
 
	# height of new image will be max of dist between (tr and br) ans (tl and bl)
	heightmaxA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
	heightmaxB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
	maxHeight = max(int(heightmaxA), int(heightmaxB))
 
	# Specifying points in the order of tl,tr,br,bl
	dst = np.array([
		[0, 0],
		[maxWidth - 1, 0],
		[maxWidth - 1, maxHeight - 1],
		[0, maxHeight - 1]], dtype = "float32")
 
	
	M = cv2.getPerspectiveTransform(rect, dst)
	warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
 
	
	return warped
コード例 #30
0
ファイル: deda_anonmask_create.py プロジェクト: elypter/deda
 def restorePerspective(self):
     _,_, angle = cv2.minAreaRect(self._getMagentaMarkers())
     angle = angle%90 if angle%90<45 else angle%90-90
     print("Skew correction: rotating by %+f°"%angle)
     self.im = rotateImage(self.im, angle)
     
     inputPoints = self._getMagentaMarkers()
     outputPoints = np.array([(x*self.dpi, y*self.dpi) for x,y in [
         (0+EDGE_MARGIN, 0+EDGE_MARGIN),
         (0+EDGE_MARGIN, TESTPAGE_SIZE[1]-EDGE_MARGIN),
         (TESTPAGE_SIZE[0]-EDGE_MARGIN, 0+EDGE_MARGIN),
         (TESTPAGE_SIZE[0]-EDGE_MARGIN, TESTPAGE_SIZE[1]-EDGE_MARGIN),
     ]],dtype=np.float32)
     """
     inputPoints = np.array([
         (np.min(cEdges[:,0]),np.min(cEdges[:,1])),
         (np.min(cEdges[:,0]),np.max(cEdges[:,1])),
         (np.max(cEdges[:,0]),np.min(cEdges[:,1])),
         (np.max(cEdges[:,0]),np.max(cEdges[:,1])),
     ], dtype=np.float32)
     outputPoints = np.array([(x*self.dpi, y*self.dpi) for x,y in [
         (0+EDGE_MARGIN, 0+EDGE_MARGIN-MARKER_SIZE),
         (0+EDGE_MARGIN, TESTPAGE_SIZE[1]-EDGE_MARGIN),
         (TESTPAGE_SIZE[0]-EDGE_MARGIN+MARKER_SIZE, 0+EDGE_MARGIN-MARKER_SIZE),
         (TESTPAGE_SIZE[0]-EDGE_MARGIN+MARKER_SIZE, TESTPAGE_SIZE[1]-EDGE_MARGIN),
     ]],dtype=np.float32)
     """
     l = cv2.getPerspectiveTransform(inputPoints,outputPoints)
     print("Perspective Transform")
     for (x1,y1),(x2,y2) in zip(inputPoints,outputPoints):
         print("\tMapping %d,%d -> %d,%d"%(x1,y1,x2,y2))
     testpageSizePx = (
         int(TESTPAGE_SIZE[0]*self.dpi), int(TESTPAGE_SIZE[1]*self.dpi))
     self.im = cv2.warpPerspective(self.im,l,testpageSizePx)
コード例 #31
0
    interestingContoursApprox = []
    interestingContoursROI = []
    minArea = 20.0
    # Use a poly aproximation to identify possible glyph regions, and check for minimum area
    for cnt in contours:
        approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
        if len(approx) == 4:  # Aproximation has four edges, considered square
            area = cv2.contourArea(cnt)
            if area > minArea:
                cv2.drawContours(source, [cnt], 0, (0, 0, 255), 4)
                interestingContoursApprox.append(approx)
                #print "ROI Approx",approx
                approx = rectify(approx)
                h = np.array([[0, 0], [249, 0], [249, 249], [0, 249]],
                             np.float32)
                retval = cv2.getPerspectiveTransform(approx, h)
                warp = cv2.warpPerspective(sourceClone, retval, (250, 250))
                interestingContoursROI.append(warp)
                M = cv2.moments(cnt)
                #print "MOMENTS ", M

    interestingContoursOTSU = []
    validGlyhpsFound = []
    for roi in interestingContoursROI:
        roi_gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
        roi_blur = cv2.GaussianBlur(
            roi_gray, (5, 5),
            0)  # 5x5 gaussian blur to preprocess before canny
        # OTSU details http://docs.opencv.org/trunk/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.html
        ret, otsu = cv2.threshold(roi_blur, 0, 255,
                                  cv2.THRESH_BINARY + cv2.THRESH_OTSU)
コード例 #32
0
ファイル: driveCNN.py プロジェクト: teamsoulless/thunderhill
from main import preprocessImages, customLoss
from Preprocess import perspectiveTransform
from matplotlib import image as mpimg
import cv2
import functools

sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None

img = mpimg.imread('./simulator/data/data/IMG/center_2016_12_01_13_30_48_287.jpg')
h, w = img.shape[:2]
src = np.float32([[w/2 - 57, h/2], [w/2 + 57, h/2], [w+140,h], [-140,h]])
dst = np.float32([[w/4,0], [w*3/4,0], [w*3/4,h], [w/4,h]])
M = cv2.getPerspectiveTransform(src, dst)
invM = cv2.getPerspectiveTransform(dst, src)
transform = functools.partial(perspectiveTransform, M = M)

def staticVar(**kwargs):
    """This function allows to define C-Like
    static variables for function in python using"""
    def decorate(func):
        for k in kwargs:
            setattr(func, k, kwargs[k])
        return func
    return decorate

#static Var to store the previous steering angles
@staticVar(angles = list(np.zeros(5)))
@sio.on('telemetry')
コード例 #33
0
def detect():
    global corners
    change = False
    firstFrame = None
    finalFrame = None
    for i in range(50):
        (grabbed, f1) = camera.read()
    initialFrame = imutils.resize(f1, width=500)
    (h1,w1) = initialFrame.shape[:2]
    cv2.imshow('initial before loop',initialFrame)
    counter = 0
    while True:
        counter+=1
        (grabbed, frame) = camera.read()
        text = "Unoccupied"

        if not grabbed:
            break

        frame = imutils.resize(frame, width=500)
        #frame = imutils.resize(frame,width=320,height=320)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        if firstFrame is None:
            firstFrame = gray
            continue
        if(len(corners)==4 and corners[3]!=None):
            M = cv2.getPerspectiveTransform(np.float32(corners), np.float32([(0, 0), (320, 0), (0, 320), (320, 320)]))
            frame = cv2.warpPerspective(frame, M, (320, 320))
            frameDelta = cv2.absdiff(firstFrame, gray)
            thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]

            thresh = cv2.dilate(thresh, None, iterations=2)
            (_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                            cv2.CHAIN_APPROX_SIMPLE)

            for c in cnts:
                if cv2.contourArea(c) < min_area:
                    continue
                (x, y, w, h) = cv2.boundingRect(c)
                (x,y,w,h) = (x-corners[0][0],y-corners[1][1],w,h)
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                text = "Occupied"
                change = True

        '''cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
            cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)'''

        cv2.imshow("Security Feed", frame)
        cv2.setMouseCallback('Security Feed',mousePos)
        if cnt >4:
            cv2.setMouseCallback('Security Feed',None)
        #cv2.imshow("Thresh", thresh)
        #cv2.imshow("Frame Delta", frameDelta)
        key = cv2.waitKey(1) & 0xFF
        '''if acc==False and counter%50==0:
            temp,corners = a.chess_corners_HSV(initialFrame)
            M = cv2.getPerspectiveTransform(np.float32(corners), np.float32([(0, 0), (320, 0), (0, 320), (320, 320)]))
            temp = cv2.warpPerspective(initialFrame,M, (320, 320))
            cv2.imshow('initFrame',temp)
            time.sleep(3)
            ans = input("Is this the right transformation?")
            if ans=='y' or ans=='Y':
                acc = True
                '''
        if text == "Unoccupied" and change == True:
            time.sleep(3)
            (ret, finalFrame) = camera.read()
            break

        if key == ord("q"):
            break

    '''rows = finalFrame.shape[0]
    cols = finalFrame.shape[1]
    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), -90, 1)
    finalFrame = cv2.warpAffine(finalFrame, M, (cols, rows))
    rows = initialFrame.shape[0]
    cols = initialFrame.shape[1]
    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), -90, 1)
    initialFrame = cv2.warpAffine(initialFrame, M, (cols, rows))
    cv2.imshow('img', initialFrame)
    #cv2.imshow('im', finalFrame)
    print(a.determine_move(initialFrame,finalFrame))
    # finalFrame = adjust_gamma(finalFrame,5)
    finalFrame = a.chess_corners_HSV(finalFrame)
    #initialFrame = a.chess_corners(initialFrame)
    cv2.imshow('final', finalFrame)
    cv2.imshow('img', initialFrame)'''
    
    print('Checkpoint 1')
    cv2.imshow('initial after loop',initialFrame)
    finalFrame = imutils.resize(finalFrame, width=500)
    #initialFrame,corners = a.chess_corners_HSV(initialFrame)
    #finalFrame,corners1 = a.chess_corners_HSV(finalFrame,corners)
    M = cv2.getPerspectiveTransform(np.float32(corners), np.float32([(0, 0), (320, 0), (0, 320), (320, 320)]))
    initialFrame = cv2.warpPerspective(initialFrame, M, (320, 320))
    finalFrame = cv2.warpPerspective(finalFrame, M, (320, 320))
    '''for i in range(4):
        cv2.circle(finalFrame,(corners[i][0],corners[i][1]), 3, (0,0,255), -1)'''
    #finalFrame = a.chess_corners_HSV(finalFrame,corners)[0]
    rows = finalFrame.shape[0]
    cols = finalFrame.shape[1]
    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), -90, 1)
    finalFrame = cv2.warpAffine(finalFrame, M, (cols, rows))
    rows = initialFrame.shape[0]
    cols = initialFrame.shape[1]
    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), -90, 1)
    initialFrame = cv2.warpAffine(initialFrame, M, (cols, rows))
    #cv2.imshow('final',finalFrame)
    '''initialFrame = cv2.cvtColor(initialFrame, cv2.COLOR_BGR2GRAY)
    finalFrame = cv2.cvtColor(finalFrame,cv2.COLOR_BGR2GRAY)
    frameDelta = cv2.absdiff(initialFrame, finalFrame)
    thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
    thresh = cv2.dilate(thresh, None, iterations=2)'''
    #cv2.imshow('thresh',thresh)
    rows = thresh.shape[0]
    cols = thresh.shape[1]
    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), -90, 1)
    thresh = cv2.warpAffine(thresh, M, (cols, rows))
    cv2.imshow('initial',initialFrame)
    cv2.imshow('final',finalFrame)
    #cv2.imshow('thresh',thresh)
    '''initial = imutils.resize(initialFrame, width=500)
    initial = cv2.cvtColor(initial, cv2.COLOR_BGR2GRAY)
    initial = cv2.GaussianBlur(initial, (21, 21), 0)
    final = imutils.resize(finalFrame, width=500)
    final = cv2.cvtColor(final, cv2.COLOR_BGR2GRAY)
    final = cv2.GaussianBlur(final, (21, 21), 0)
    frameDelta = cv2.absdiff(initial, final)
    thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
    thresh = cv2.dilate(thresh, None, iterations=2)'''
    '''initialFrame = cv2.cvtColor(initialFrame, cv2.COLOR_BGR2GRAY)
    finalFrame = cv2.cvtColor(finalFrame, cv2.COLOR_BGR2GRAY)
    thresh = cv2.absdiff(initialFrame,finalFrame)'''
    clahe = cv2.createCLAHE(clipLimit=5.0, tileGridSize=(8,8))
    initialFrame1 = cv2.cvtColor(initialFrame, cv2.COLOR_BGR2GRAY)
    initialFrame1 = cv2.GaussianBlur(initialFrame1, (21, 21), 0)
    finalFrame1 = cv2.cvtColor(finalFrame, cv2.COLOR_BGR2GRAY)
    finalFrame1 = cv2.GaussianBlur(finalFrame1, (21, 21), 0)
    frameDelta = cv2.absdiff(initialFrame1, finalFrame1)
    initialFrame1 = clahe.apply(initialFrame1)
    finalFrame1 = clahe.apply(finalFrame1)
    thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
    #res = a.determine_move4(initialFrame1,finalFrame1)
    #print(a.determine_move3(thresh))
    return a.determine_move3(thresh)
コード例 #34
0
        rect = cv2.minAreaRect(contour)
        points = cv2.boxPoints(rect)
        points = np.array(points, np.float32)
        #
        height, width = img2.shape
        points, h, w = hf.get_right_points_order(points)
        # print("points")
        # print(points)

        points = np.array([points[0], points[1], points[2], points[3]],
                          np.float32)

        dst = np.array([[w - 1, 0], [w - 1, h - 1], [0, h - 1], [0, 0]],
                       np.float32)
        M = cv2.getPerspectiveTransform(points, dst)

        card = cv2.warpPerspective(imgColour, M, (w, h))

        height, width, i = card.shape
        colorCard = card
        # cv2.imshow('Card ' + str(result + 1), card)
        # cv2.waitKey(3000)

        #Canny
        edges = cv2.Canny(card, 100, 200)
        # print(edges)

        # load the image, convert it to grayscale, and blur it slightly
        gray = cv2.cvtColor(card, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(card, (3, 3), 0)
コード例 #35
0
ファイル: main.py プロジェクト: ychaim/Final
                                        (orcImageWidthPx, orcImageHeightPx))
    transmtx = imgTransform.getTransformationMatrix1(img_data.plate_corners,
                                                     cropSize[0], cropSize[1])

    projectPoints = copy.deepcopy(img_data.plate_corners)
    projectPoints = np.array(projectPoints, np.float32)
    img_data.color_deskewed = np.zeros((cropSize[0], cropSize[1]),
                                       dtype=img_data.colorImg.dtype)
    cols1, rows1 = img_data.color_deskewed.shape
    deskewed_points = []
    deskewed_points.append((0, 0))
    deskewed_points.append((cols1, 0))
    deskewed_points.append((cols1, rows1))
    deskewed_points.append((0, rows1))
    deskewed_points = np.array(deskewed_points, np.float32)
    color_transmtx = cv2.getPerspectiveTransform(projectPoints,
                                                 deskewed_points)

    img_data.color_deskewed = cv2.warpPerspective(
        img_data.colorImg, color_transmtx,
        (img_data.color_deskewed.shape[0], img_data.color_deskewed.shape[1]))
    cv2.imshow("3gray", img_data.crop_gray)

    if len(img_data.color_deskewed.shape) > 2:
        img_data.crop_gray = cv2.cvtColor(img_data.color_deskewed,
                                          cv2.COLOR_BGR2GRAY)
    else:
        img_data.crop_gray = copy.deepcopy(img_data.color_deskewed)
    cv2.imshow("4gray", img_data.crop_gray)

    newLines = []
    for i in range(0, len(img_data.textLines)):
コード例 #36
0
    def run(self):

        while True:
            # Only run loop if we have an image
            if self.imgRcvd:

                # step 1: undistort image

                #Define region of interest for cropping
                height = self.latestImage.shape[0]
                width = self.latestImage.shape[1]
                """ Gazebo Conde 
             self.vertices = np.array( [[
                        [2.75*width/5, 3*height/5],
                        [2.25*width/5, 3*height/5],
                        [.5*width/5, height], 
                        [4.5*width/5, height]
                    ]], dtype=np.int32 )
             """
                """ Raspicam """
                """"original
             self.vertices = np.array( [[
                        [2.75*width/5, 3*height/5],
                        [2.25*width/5, 3*height/5],
                        [.5*width/5, height], 
                        [4.5*width/5, height]
                    ]], dtype=np.int32 )
             """
                self.vertices = np.array(
                    [[[3.75 * width / 5, 2 * height / 5],
                      [1.25 * width / 5, 2 * height / 5],
                      [.05 * width / 5, height], [4.95 * width / 5, height]]],
                    dtype=np.int32)

                self.maskedImage = ld.region_of_interest(
                    self.latestImage, self.vertices)

                # step 2: perspective transform
                self.warpedImage, _, _ = ld.perspective_transform(
                    self.maskedImage, self.corners)

                # step 3: detect binary lane markings
                #self.binaryImage,  self.channelImage = ld.HLS_sobel(self.warpedImage)
                self.binaryImage = ld.binary_thresh(self.warpedImage,
                                                    self.boundaries,
                                                    'HSV')  #RGB or HSV

                # step 4: fit polynomials
                if self.global_fit is not None:
                    ploty, fitx, fit = ld.fast_fit_polynomials(
                        self.binaryImage, self.global_fit)
                else:
                    ploty, fitx, fit = ld.fit_polynomials(
                        self.warpedImage, self.binaryImage)

                self.global_fit = fit

                if self.binaryImage is not None:

                    data = cv2.cvtColor(self.binaryImage, cv2.COLOR_GRAY2BGR)

                    r1, g1, b1 = 255, 255, 255  # Original value
                    r2, g2, b2 = 255, 0, 0  # Value that we want to replace it with

                    red, green, blue = data[:, :, 0], data[:, :, 1], data[:, :,
                                                                          2]
                    mask = (red == r1) & (green == g1) & (blue == b1)
                    data[:, :, :3][mask] = [b2, g2, r2]

                    output = cv2.bitwise_and(
                        self.warpedImage,
                        self.warpedImage,
                        mask=self.binaryImage)  #Returns an RGB image

                    _, src, dst = ld.perspective_transform(
                        self.latestImage, self.corners)
                    Minv = cv2.getPerspectiveTransform(dst, src)

                    newwarp = cv2.warpPerspective(
                        data, Minv,
                        (self.latestImage.shape[1], self.latestImage.shape[0]))

                    self.maskedRGBImage = cv2.addWeighted(
                        newwarp, 1, self.latestImage, 2.0, 0)

                if fitx.shape[0] > 1:

                    # step 5: draw lane
                    self.processedImage = ld.render_lane(
                        self.maskedRGBImage, self.corners, ploty, fitx)

                    # step 6: print curvature
                    #self.curv = get_curvature(ploty, fitx)

                    # step 6: Calculate Setpoint
                    pts = np.vstack((fitx, ploty)).astype(np.float64).T
                    self.avg = ld.movingAverage(self.avg, pts[-1][0], N=3)
                    self.intersectionPoint = np.array([self.avg])

                    # Draw the Setpoint onto the warped blank image
                    radius = 5
                    cv2.circle(self.processedImage,
                               (self.intersectionPoint,
                                (self.processedImage.shape[0] - radius)),
                               radius, (255, 255, 0), -1)

                    # step 6: Adjust Motors
                    self.flag = control.adjustMotorSpeed(
                        self.latestImage, self.intersectionPoint, self.speed,
                        self.cmdVelocityPub, self.cmdVelocityStampedPub,
                        self.flag)
                else:
                    self.processedImage = self.latestImage

                # Publish Processed Image
                self.outputImage = self.processedImage
                self.publish(self.outputImage, self.bridge, self.image_pub)
コード例 #37
0
def getPoly_core(boxes, labels, mapper, linkmap):
    # configs
    num_cp = 5
    max_len_ratio = 0.7
    expand_ratio = 1.45
    max_r = 2.0
    step_r = 0.2

    polys = []
    for k, box in enumerate(boxes):
        # size filter for small instance
        w, h = int(np.linalg.norm(box[0] - box[1]) +
                   1), int(np.linalg.norm(box[1] - box[2]) + 1)
        if w < 30 or h < 30:
            polys.append(None)
            continue

        # warp image
        tar = np.float32([[0, 0], [w, 0], [w, h], [0, h]])
        M = cv2.getPerspectiveTransform(box, tar)
        word_label = cv2.warpPerspective(labels,
                                         M, (w, h),
                                         flags=cv2.INTER_NEAREST)
        try:
            Minv = np.linalg.inv(M)
        except:
            polys.append(None)
            continue

        # binarization for selected label
        cur_label = mapper[k]
        word_label[word_label != cur_label] = 0
        word_label[word_label > 0] = 1
        """ Polygon generation """
        # find top/bottom contours
        cp = []
        max_len = -1
        for i in range(w):
            region = np.where(word_label[:, i] != 0)[0]
            if len(region) < 2: continue
            cp.append((i, region[0], region[-1]))
            length = region[-1] - region[0] + 1
            if length > max_len: max_len = length

        # pass if max_len is similar to h
        if h * max_len_ratio < max_len:
            polys.append(None)
            continue

        # get pivot points with fixed length
        tot_seg = num_cp * 2 + 1
        seg_w = w / tot_seg  # segment width
        pp = [None] * num_cp  # init pivot points
        cp_section = [[0, 0]] * tot_seg
        seg_height = [0] * num_cp
        seg_num = 0
        num_sec = 0
        prev_h = -1
        for i in range(0, len(cp)):
            (x, sy, ey) = cp[i]
            if (seg_num + 1) * seg_w <= x and seg_num <= tot_seg:
                # average previous segment
                if num_sec == 0: break
                cp_section[seg_num] = [
                    cp_section[seg_num][0] / num_sec,
                    cp_section[seg_num][1] / num_sec
                ]
                num_sec = 0

                # reset variables
                seg_num += 1
                prev_h = -1

            # accumulate center points
            cy = (sy + ey) * 0.5
            cur_h = ey - sy + 1
            cp_section[seg_num] = [
                cp_section[seg_num][0] + x, cp_section[seg_num][1] + cy
            ]
            num_sec += 1

            if seg_num % 2 == 0: continue  # No polygon area

            if prev_h < cur_h:
                pp[int((seg_num - 1) / 2)] = (x, cy)
                seg_height[int((seg_num - 1) / 2)] = cur_h
                prev_h = cur_h

        # processing last segment
        if num_sec != 0:
            cp_section[-1] = [
                cp_section[-1][0] / num_sec, cp_section[-1][1] / num_sec
            ]

        # pass if num of pivots is not sufficient or segment widh is smaller than character height
        if None in pp or seg_w < np.max(seg_height) * 0.25:
            polys.append(None)
            continue

        # calc median maximum of pivot points
        half_char_h = np.median(seg_height) * expand_ratio / 2

        # calc gradiant and apply to make horizontal pivots
        new_pp = []
        for i, (x, cy) in enumerate(pp):
            dx = cp_section[i * 2 + 2][0] - cp_section[i * 2][0]
            dy = cp_section[i * 2 + 2][1] - cp_section[i * 2][1]
            if dx == 0:  # gradient if zero
                new_pp.append([x, cy - half_char_h, x, cy + half_char_h])
                continue
            rad = -math.atan2(dy, dx)
            c, s = half_char_h * math.cos(rad), half_char_h * math.sin(rad)
            new_pp.append([x - s, cy - c, x + s, cy + c])

        # get edge points to cover character heatmaps
        isSppFound, isEppFound = False, False
        grad_s = (pp[1][1] - pp[0][1]) / (pp[1][0] - pp[0][0]) + (
            pp[2][1] - pp[1][1]) / (pp[2][0] - pp[1][0])
        grad_e = (pp[-2][1] - pp[-1][1]) / (pp[-2][0] - pp[-1][0]) + (
            pp[-3][1] - pp[-2][1]) / (pp[-3][0] - pp[-2][0])
        for r in np.arange(0.5, max_r, step_r):
            dx = 2 * half_char_h * r
            if not isSppFound:
                line_img = np.zeros(word_label.shape, dtype=np.uint8)
                dy = grad_s * dx
                p = np.array(new_pp[0]) - np.array([dx, dy, dx, dy])
                cv2.line(line_img, (int(p[0]), int(p[1])),
                         (int(p[2]), int(p[3])),
                         1,
                         thickness=1)
                if np.sum(np.logical_and(
                        word_label, line_img)) == 0 or r + 2 * step_r >= max_r:
                    spp = p
                    isSppFound = True
            if not isEppFound:
                line_img = np.zeros(word_label.shape, dtype=np.uint8)
                dy = grad_e * dx
                p = np.array(new_pp[-1]) + np.array([dx, dy, dx, dy])
                cv2.line(line_img, (int(p[0]), int(p[1])),
                         (int(p[2]), int(p[3])),
                         1,
                         thickness=1)
                if np.sum(np.logical_and(
                        word_label, line_img)) == 0 or r + 2 * step_r >= max_r:
                    epp = p
                    isEppFound = True
            if isSppFound and isEppFound:
                break

        # pass if boundary of polygon is not found
        if not (isSppFound and isEppFound):
            polys.append(None)
            continue

        # make final polygon
        poly = []
        poly.append(warpCoord(Minv, (spp[0], spp[1])))
        for p in new_pp:
            poly.append(warpCoord(Minv, (p[0], p[1])))
        poly.append(warpCoord(Minv, (epp[0], epp[1])))
        poly.append(warpCoord(Minv, (epp[2], epp[3])))
        for p in reversed(new_pp):
            poly.append(warpCoord(Minv, (p[2], p[3])))
        poly.append(warpCoord(Minv, (spp[2], spp[3])))

        # add to final result
        polys.append(np.array(poly))

    return polys
コード例 #38
0
import cv2
import numpy as np
import matplotlib.pyplot as plt


image = cv2.imread("img2.jpg")
cv2.circle(image, (1100,700), 5, (0,0,255), -1)
cv2.circle(image, (1600, 700), 5, (0,0,255), -1)
cv2.circle(image, (400, 1450), 5, (0,0,255), -1)
cv2.circle(image, (2340, 1450), 5, (0,0,255), -1)
pts1 = np.float32([[1100,700],[1600,700],[400,1450],[2340,1450]])
pts2 = np.float32([[0,0],[1500,0],[0,2400],[1500,2400]])

matrix = cv2.getPerspectiveTransform(pts1, pts2)

result = cv2.warpPerspective(image, matrix, (1500,2400))
cv2.imshow("result", result)

height, width = image.shape[:2]
print(height)
print(width)

cv2.waitKey(5000)
コード例 #39
0
#here we gray the image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#cv2.imshow("Title-gray",gray)

#here we blur the image using gaussian blur system
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#cv2.imshow("GaussianBlur",blurred)

#here is the edge detection system
edged = cv2.Canny(blurred, 30, 50)
#cv2.imshow("Canny",edged)

image, contours, hierarchy = cv2.findContours(edged, cv2.RETR_LIST,
                                              cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)

for c in contours:
    p = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.02 * p, True)

    if len(approx) == 4:
        target = approx
        break
approx = mapper.mapp(target)

pts = np.float32([[0, 0], [800, 0], [800, 800], [0, 800]])
op = cv2.getPerspectiveTransform(approx, pts)
dst = cv2.warpPerspective(orig, op, (800, 800))
cv2.imshow("Scanned final", dst)
コード例 #40
0
ファイル: transformtesting.py プロジェクト: ankersh/Projects
Xtrain, Xtest, Ytrain, Ytest = train_test_split(
    images, labels, test_size=0.6,
    random_state=10)  #Splits training data randomly

a, RealTest, b, RealLabels = train_test_split(
    Xtest, Ytest, test_size=0.5,
    random_state=10)  #used to randomly select a portion of the training data.

htest = []
for image in RealTest:
    himage = hog(image, pixels_per_cell=(8, 8), cells_per_block=(3, 3))
    htest.append(himage)

inputc1 = np.float32([[0, 72], [128, 72], [128, 0], [0, 0]])
outputc1 = np.float32([[3, 72], [128, 68], [128, 4], [1, 4]])
t1 = cv2.getPerspectiveTransform(inputc1, outputc1)

inputc2 = np.float32([[0, 72], [128, 72], [128, 0], [0, 0]])
outputc2 = np.float32([[0, 0], [128, 1], [128, 72], [1, 72]])
t2 = cv2.getPerspectiveTransform(inputc2, outputc2)

transformedimages = []
transformedlabels = []

for i in range(0, (len(Xtrain) / 2)):
    image = Xtrain[i]
    transformedlabels.append(Ytrain[i])
    timage = cv2.warpPerspective(image, t1, (128, 72))
    h = hog(timage, pixels_per_cell=(8, 8), cells_per_block=(3, 3))
    transformedimages.append(h)
コード例 #41
0
ファイル: solve.py プロジェクト: psycane/Sudoku-Solver
    return (ar[3], ar[0], ar[1], ar[2])


# point to remap
points1 = np.array([
    np.array([0.0, 0.0], np.float32) + np.array([144, 0], np.float32),
    np.array([0.0, 0.0], np.float32),
    np.array([0.0, 0.0], np.float32) + np.array([0.0, 144], np.float32),
    np.array([0.0, 0.0], np.float32) + np.array([144, 144], np.float32),
], np.float32)
outerPoints = getOuterPoints(approximation)
points2 = np.array(outerPoints, np.float32)

# Transformation matrix
pers = cv2.getPerspectiveTransform(points2, points1)

# remap the image
warp = cv2.warpPerspective(
    img, pers, (SUDOKU_SIZE * IMAGE_HEIGHT, SUDOKU_SIZE * IMAGE_WIDHT))
warp_gray = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
show(warp_gray)
# IMAGE_WIDHT, IMAGE_HEIGHT, SUDOKU_SIZE, N_MIN_ACTVE_PIXELS = 450, 450, 9, 10


def extract_number(x, y):
    # square -> position x-y
    im_number = warp_gray[x * IMAGE_HEIGHT:(x + 1) *
                          IMAGE_HEIGHT][:,
                                        y * IMAGE_WIDHT:(y + 1) * IMAGE_WIDHT]
コード例 #42
0
import time

cv.startWindowThread()
cap = cv.VideoCapture('video/Sentry_2.mkv')
img= cv.imread("Sample2.png")
fourcc = cv.VideoWriter_fourcc(*'mp4v')
out = cv.VideoWriter('output2.mp4', fourcc, 15.0, (449,809),True)

scale_percent = 40 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
frame = cv.resize(img, dim, interpolation = cv.INTER_AREA)
pts1 = np.float32([[178,141],[211,79],[390,91],[468,177]])
pts2 = np.float32([[105,404],[225,190],[347,403],[226,618]])#4 top vaale
M = cv.getPerspectiveTransform(pts1,pts2)

frames=1
while(frames<299):
    map_img= cv.imread("arena4.png")
    frames=frames+1
    ret, img = cap.read()
    scale_percent = 40 # percent of original size
    width = int(img.shape[1] * scale_percent / 100)
    height = int(img.shape[0] * scale_percent / 100)
    dim = (width, height)
    frame = cv.resize(img, dim, interpolation = cv.INTER_AREA)
    frame2=frame.copy()
    frame3=frame.copy()
    frame4=np.zeros(frame.shape)
    ''' frame : hb detection '''
コード例 #43
0
    def method(self, img):
        imgtype = img.dtype
        h, w, _ = img.shape
        centery = h * 0.5
        centerx = w * 0.5

        alpha = math.radians(self.shear)
        beta = math.radians(self.anglez)

        lambda1 = self.scale[0]
        lambda2 = self.scale[1]

        tx = self.translate[0]
        ty = self.translate[1]

        sina = math.sin(alpha)
        cosa = math.cos(alpha)
        sinb = math.sin(beta)
        cosb = math.cos(beta)

        M00 = cosb * (lambda1 * cosa ** 2 + lambda2 * sina ** 2) - sinb * (lambda2 - lambda1) * sina * cosa
        M01 = - sinb * (lambda1 * sina ** 2 + lambda2 * cosa ** 2) + cosb * (lambda2 - lambda1) * sina * cosa

        M10 = sinb * (lambda1 * cosa ** 2 + lambda2 * sina ** 2) + cosb * (lambda2 - lambda1) * sina * cosa
        M11 = + cosb * (lambda1 * sina ** 2 + lambda2 * cosa ** 2) + sinb * (lambda2 - lambda1) * sina * cosa
        M02 = centerx - M00 * centerx - M01 * centery + tx
        M12 = centery - M10 * centerx - M11 * centery + ty
        affine_matrix = np.array([[M00, M01, M02], [M10, M11, M12], [0, 0, 1]], dtype=np.float32)
        # -------------------------------------------------------------------------------
        z = np.sqrt(w ** 2 + h ** 2) / 2 / np.tan(math.radians(self.fov / 2))

        radx = math.radians(self.anglex)
        rady = math.radians(self.angley)

        sinx = math.sin(radx)
        cosx = math.cos(radx)
        siny = math.sin(rady)
        cosy = math.cos(rady)

        r = np.array([[cosy, 0, -siny, 0],
                      [-siny * sinx, cosx, -sinx * cosy, 0],
                      [cosx * siny, sinx, cosx * cosy, 0],
                      [0, 0, 0, 1]])

        pcenter = np.array([centerx, centery, 0, 0], np.float32)

        p1 = np.array([0, 0, 0, 0], np.float32) - pcenter
        p2 = np.array([w, 0, 0, 0], np.float32) - pcenter
        p3 = np.array([0, h, 0, 0], np.float32) - pcenter
        p4 = np.array([w, h, 0, 0], np.float32) - pcenter

        dst1 = r.dot(p1)
        dst2 = r.dot(p2)
        dst3 = r.dot(p3)
        dst4 = r.dot(p4)

        list_dst = [dst1, dst2, dst3, dst4]

        org = np.array([[0, 0],
                        [w, 0],
                        [0, h],
                        [w, h]], np.float32)

        dst = np.zeros((4, 2), np.float32)

        for i in range(4):
            dst[i, 0] = list_dst[i][0] * z / (z - list_dst[i][2]) + pcenter[0]
            dst[i, 1] = list_dst[i][1] * z / (z - list_dst[i][2]) + pcenter[1]

        perspective_matrix = cv2.getPerspectiveTransform(org, dst)
        total_matrix = perspective_matrix @ affine_matrix

        result_img = cv2.warpPerspective(img, total_matrix, (w, h), flags=INTER_MODE[self.resample],
                                         borderMode=cv2.BORDER_CONSTANT, borderValue=self.fillcolor)
        return result_img.astype(imgtype)
コード例 #44
0
                [sin, cos, (1 - cos) * cy - sin * cx], [0, 0, 1]])
M6_shift = np.array([[1, 0, x], [0, 1, y], [0, 0, 1]])
xs = Acorners[:, 0] - x
ys = Acorners[:, 1] - y
zs = np.ones_like(xs)
homoc = np.array([xs, ys, zs])
#homoc = np.matmul(M6_, homoc)
#homoc = np.matmul(np.matmul(M6_shift,M6), homoc)
homoc = np.matmul(M6, homoc)
resx = (homoc[0, :] / homoc[2, :]) + x
resy = (homoc[1, :] / homoc[2, :]) + y
Bcorners = np.array([resx, resy]).transpose()
print(Acorners, Bcorners)
#print(M6_,np.matmul(M6_shift,M6))

shift = np.array([-53, -53])
Bcorners_shift = Bcorners - shift
M7 = cv2.getPerspectiveTransform(np.float32(Acorners), np.float32(Bcorners))
M7_shift = cv2.getPerspectiveTransform(np.float32(Bcorners),
                                       np.float32(Bcorners_shift))
print(M7, M6)

height, width, channels = img.shape
#pts1 = np.float32( [ [0,0],[256,0],[0,256],[256,256] ] )
#pts2 = np.float32( [ [0,0],[800,0],[0,800],[800,800] ] )+10
height, width = np.int32((height * np.sqrt(2), width * np.sqrt(2)))
dst = cv2.warpPerspective(img, np.matmul(M7_shift, M7), (width, height))
#dst = cv2.warpPerspective( img , M7 , (width, height))
im = Image.fromarray(dst)
im.show()
コード例 #45
0
def main(plate_num, plate_type, fontnum):
    global counter, quantity, type_plate_list, row_img, num_circle
    init()
    # time.sleep(2.5)
    while camera.isOpened():
        # img = cv2.imread('plate04.png')
        ret, img = camera.read()
        img_show = img.copy()
        edges = cv2.Canny(img, 100, 200)
        im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_CCOMP,
                                                    cv2.CHAIN_APPROX_SIMPLE)

        for i in contours:
            M = cv2.moments(i)
            # if 60000 <= M['m00'] < 120000:
            if 1 <= M['m00'] < 40000:
                new_contour.append(i)
                contour_size.append(M['m00'])
        biggest = contour_size.index(max(contour_size))
        print(contour_size[biggest])
        print('Index of Biggest contour:', biggest)

        print("Number of New contours:", len(new_contour))

        cv2.drawContours(img, [new_contour[biggest]], 0, (0, 255, 0), 5)

        approx = cv2.approxPolyDP(
            new_contour[biggest],
            0.01 * cv2.arcLength(new_contour[biggest], True), True)
        for i in approx:
            new_approx.append(i[0])
        old_coor = sorted(new_approx, key=lambda k: [k[0], k[1]])
        left_side.append(old_coor[0])
        left_side.append(old_coor[1])
        right_side.append(old_coor[2])
        right_side.append(old_coor[3])
        new_left_side = sorted(left_side, key=lambda k: [k[1], k[0]])
        new_right_side = sorted(right_side, key=lambda k: [k[1], k[0]])

        pts1 = np.float32([
            new_left_side[0], new_left_side[1], new_right_side[0],
            new_right_side[1]
        ])
        pts2 = np.float32([[0, 0], [0, 300], [300, 00], [300, 300]])

        L = cv2.getPerspectiveTransform(pts1, pts2)

        crop = cv2.warpPerspective(img_show, L, (300, 300))
        copy_crop = crop.copy()

        gray_crop = crop.copy()
        gray_crop = cv2.cvtColor(gray_crop, cv2.COLOR_BGR2GRAY)
        circle = cv2.HoughCircles(gray_crop,
                                  cv2.HOUGH_GRADIENT,
                                  2,
                                  10,
                                  param1=30,
                                  param2=50,
                                  minRadius=0,
                                  maxRadius=50)

        if circle is not None:
            circle = np.uint16(np.around(circle))

            for i in circle[0, :]:
                cv2.circle(img, (i[0], i[1]), i[2], (0, 255, 0), 2)
                cv2.circle(img, (i[0], i[1]), 2, (0, 0, 255), 3)

        resize_image = cv2.resize(copy_crop, (100, 100))
        resize_image = cv2.cvtColor(resize_image, cv2.COLOR_BGR2GRAY)
        ret, bw = cv2.threshold(resize_image, 127, 255, cv2.THRESH_BINARY)

        if counter in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
            if circle is not None:
                print('Number of Circle:', len(circle[0]))
                num_circle = len(circle[0])
            else:
                num_circle = 0

            if counter == 9:
                list_circle.append(num_circle)

            row_img = resize_image
        else:
            if circle is not None:
                print('Number of Circle:', len(circle[0]))
                num_circle = len(circle[0])
            else:
                num_circle = 0
            row_img = np.hstack([row_img, resize_image])
            list_circle.append(num_circle)

        init()
        counter += 1
        cv2.imshow("camera", img)
        cv2.waitKey(1)
        if counter == quantity:
            break

    cv2.imshow('Row', row_img)
    cv2.imwrite(
        str(plate_num) + type_plate_list[plate_type] + str(fontnum) + '.png',
        row_img)
    with open(
            'circle' + str(plate_num) + type_plate_list[plate_type] +
            str(fontnum) + '.csv', 'w') as csvfile:
        file = csv.writer(csvfile,
                          delimiter=',',
                          quotechar='|',
                          quoting=csv.QUOTE_MINIMAL)
        for i, j in enumerate(list_circle):
            file.writerow(str(j))

    # cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #46
0
def main():
    device = RealSense('../material/new_cali.bag')
    try:
        while True:
            ########################
            # Startup              #
            ########################
            # read frames
            colorframe = device.getcolorstream()
            # if fileFlag:
            #    colorframe = cv2.cvtColor(colorframe, cv2.COLOR_RGB2BGR) #Reading from BAG alters the color space and needs to be fixed

            # check if frame empty
            if colorframe is None:
                break
            # process frame
            ########################
            # Calibration           #
            ########################
            global screen_corners, target_corners
            if len(target_corners) != 4:
                frame, screen_corners, target_corners = cal.calibrateViaARUco(
                    colorframe, screen_corners, target_corners)
            else:
                # print(depthframe[int(calibrationMatrix[0][1])][int(calibrationMatrix[0][0])])
                # print("newtabledistance = ", depthframe[calibrationMatrix[0][1]][calibrationMatrix[0][0]])

                M = cv2.getPerspectiveTransform(target_corners, screen_corners)
                # TODO: derive resolution from width and height of original frame?
                caliColorframe = cv2.warpPerspective(colorframe, M,
                                                     (1280, 720))

                ########################
                # Hand Detection       #
                ########################
                frame = np.zeros(colorframe.shape, dtype='uint8')
                colorspace = cv2.COLOR_BGR2LAB
                hands = getHand(caliColorframe, colorframe, colorspace, True)

                # if hands were detected visualize them
                if len(hands) > 0:
                    # Print and log the fingertips
                    for i, hand in enumerate(hands):
                        hand_image = hand["hand_image"]
                        # Altering hand colors (to avoid feedback loop
                        # Option 1: Inverting the picture
                        hand_image = cv2.bitwise_not(hand_image)
                        hand_image[np.where(
                            (hand_image == [255, 255,
                                            255]).all(axis=2))] = [0, 0, 0]

                        # add the hand to the frame
                        frame = cv2.bitwise_or(frame, hand_image)

            frame.astype(np.uint8)
            # frame = reducer(frame, percentage=40)  # reduce frame by 40%
            cv2.namedWindow("Output Frame", cv2.WND_PROP_FULLSCREEN)
            cv2.setWindowProperty("Output Frame", cv2.WND_PROP_FULLSCREEN,
                                  cv2.WINDOW_NORMAL)
            cv2.imshow('Output Frame', frame)
            cv2.waitKey(1)

    finally:
        # Stop streaming
        device.stop()
        pass
コード例 #47
0
ファイル: main.py プロジェクト: psurendrar/suduko-solver
def solve_suduko(img):
    heightImg = 450
    widthImg = 450
    model = intializePredectionModel()  # LOAD THE CNN MODEL

    # 1. PREPARE THE IMAGE
    # img=np.array(img.convert('RGB'))
    img = cv2.resize(img, (widthImg, heightImg))  # RESIZE IMAGE TO MAKE IT A SQUARE IMAGE
    imgBlank = np.zeros((heightImg, widthImg, 3), np.uint8)  # CREATE A BLANK IMAGE FOR TESTING DEBUGING IF REQUIRED
    imgThreshold = preProcess(img)

    # 2. FIND ALL COUNTOURS
    imgContours = img.copy()  # COPY IMAGE FOR DISPLAY PURPOSES
    imgBigContour = img.copy()  # COPY IMAGE FOR DISPLAY PURPOSES
    contours, hierarchy = cv2.findContours(imgThreshold, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)  # FIND ALL CONTOURS
    cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 3)  # DRAW ALL DETECTED CONTOURS

    # 3. FIND THE BIGGEST COUNTOUR AND USE IT AS SUDOKU
    biggest, maxArea = biggestContour(contours)  # FIND THE BIGGEST CONTOUR
    # print(biggest)
    if biggest.size != 0:
        biggest = reorder(biggest)
        # print(biggest)
        cv2.drawContours(imgBigContour, biggest, -1, (0, 0, 255), 25)  # DRAW THE BIGGEST CONTOUR
        pts1 = np.float32(biggest)  # PREPARE POINTS FOR WARP
        pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]])  # PREPARE POINTS FOR WARP
        matrix = cv2.getPerspectiveTransform(pts1, pts2)  # GER
        imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg))
        imgDetectedDigits = imgBlank.copy()
        imgWarpColored = cv2.cvtColor(imgWarpColored, cv2.COLOR_BGR2GRAY)

        # 4. SPLIT THE IMAGE AND FIND EACH DIGIT AVAILABLE
        imgSolvedDigits = imgBlank.copy()
        boxes = splitBoxes(imgWarpColored)
        # print(len(boxes))
        # cv2.imshow("Sample",boxes[65])
        numbers = getPredection(boxes, model)
        # print(numbers)
        imgDetectedDigits = displayNumbers(imgDetectedDigits, numbers, color=(255, 0, 255))
        numbers = np.asarray(numbers)
        posArray = np.where(numbers > 0, 0, 1)
        # print(posArray)

        # 5. FIND SOLUTION OF THE BOARD
        board = np.array_split(numbers, 9)
        # print(board)
        try:
            suduko_solver.solve(board)
        except:
            pass

        flatList = []
        for sublist in board:
            for item in sublist:
                flatList.append(item)
        solvedNumbers = flatList * posArray
        imgSolvedDigits = displayNumbers(imgSolvedDigits, solvedNumbers)

        # #### 6. OVERLAY SOLUTION
        pts2 = np.float32(biggest)  # PREPARE POINTS FOR WARP
        pts1 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]])  # PREPARE POINTS FOR WARP
        matrix = cv2.getPerspectiveTransform(pts1, pts2)  # GER
        imgInvWarpColored = cv2.warpPerspective(imgSolvedDigits, matrix, (widthImg, heightImg))
        img = cv2.addWeighted(imgInvWarpColored, 1, img, 0.5, 1)
        return img
コード例 #48
0
import cv2
import numpy as np
pts3 = np.float32([[300, 238], [617, 238], [184, 482], [724, 482]])
pts4 = np.float32([[0, 0], [550, 0], [0, 550], [550, 550]])
M_perspective = cv2.getPerspectiveTransform(pts3, pts4)
image = cv2.imread('img.jpg')
img_perspective = cv2.warpPerspective(image, M_perspective, (550, 550))
cv2.imshow('name', img_perspective)
cv2.waitKey()
コード例 #49
0
# find the coordinates of the cutting board corners
# save these as the origin points for the followingperspective transform
rectOriginPts = findRectangle(rectImg)

######################## 2. SQUARE IMAGE OF CUTTING SURFACE USING PERSPECTIVE TRANSFORM ########################
# perform the perspective transform
# use the original range of rows and columns
(maxCols, maxRows) = rectOriginPts.max(0)
newCols = 800
newRows = 600
# use reasonable size of python screen for now
rectTargetPts = np.float32([[0, maxRows], [0, 0], [maxCols, 0],
                            [maxCols, maxRows]])

# get the perspective transform
M = cv2.getPerspectiveTransform(rectOriginPts, rectTargetPts)
# warp the image using the transform matrix
dst = cv2.warpPerspective(src=rectImg, M=M, dsize=(newCols, newRows))
# visualize the new image
cv2.imshow('Cutting Surface Square Output', dst)

######################## 3. IMPORT THE PDF OF A PATTERN ########################
# https://github.com/Belval/pdf2image/blob/master/docs/reference.md
# https://www.geeksforgeeks.org/convert-pdf-to-image-using-python/

## In future versions, will likely have the pdf location as an input from the function

# read in the pdf as a jpg
pages = convert_from_path(fileLoc,
                          dpi=300,
                          poppler_path=sidneyPopplerPath,
コード例 #50
0
    def pipeline(self, img):

        undist = cv2.undistort(img, self.mtx, self.dist, None, self.mtx)

        line_bin_image = np.zeros_like(img[:, :, 0])
        ksize = 3
        gradx = self.combinethreshold.abs_sobel_thresh(img, orient='x', sobel_kernel=ksize, thresh=(12, 255))
        grady = self.combinethreshold.abs_sobel_thresh(img, orient='y', sobel_kernel=ksize, thresh=(25, 255))
        c_binary = self.combinethreshold.color_thresh(img, s_thresh=(100, 255), v_thresh=(50, 255))

        line_bin_image[((gradx == 1) & (grady == 1)) | (c_binary == 1)] = 255

        img_height, img_width, channels = img.shape
        img_size = (img.shape[1], img.shape[0])
        #print(img_size)

        src = np.float32(
            [[(img_size[0] / 2) - 55, img_size[1] / 2 + 100],
             [((img_size[0] / 6) - 10), img_size[1]],
             [(img_size[0] * 5 / 6) + 60, img_size[1]],
             [(img_size[0] / 2 + 55), img_size[1] / 2 + 100]])

        dst = np.float32(
            [[(img_size[0] / 4), 0],
             [(img_size[0] / 4), img_size[1]],
             [(img_size[0] * 3 / 4), img_size[1]],
             [(img_size[0] * 3 / 4), 0]])

        # Given src and dst points, calculate the perspective transform matrix
        M = cv2.getPerspectiveTransform(src, dst)

        Minv = cv2.getPerspectiveTransform(dst, src)

        # Warp the image using OpenCV warpPerspective()
        binary_warped = cv2.warpPerspective(line_bin_image, M, (img_width, img_height), flags=cv2.INTER_LINEAR)

        lane_on_warped_blank_img, left_fitx, right_fitx, ploty = self.tracker.search_around_poly(binary_warped)

        # Create an image to draw the lines on
        warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
        color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

        # Recast the x and y points into usable format for cv2.fillPoly()
        pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
        pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
        pts = np.hstack((pts_left, pts_right))

        # Draw the lane onto the warped blank image
        cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

        # Warp the blank back to original image space using inverse perspective matrix (Minv)
        newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
        # Combine the result with the original image
        result = cv2.addWeighted(img, 1, newwarp, 0.3, 0)

        left_curverad, right_curverad = self.measure_curvature_real(left_fitx, right_fitx, ploty)

        radius_of_curvature = (left_curverad + right_curverad)/2
        camera_center = (left_fitx[-1] + right_fitx[-1])/2
        center_diff = (camera_center - newwarp.shape[1]/2) * self.xm_per_pix

        side_pos = 'left'
        if center_diff <=0:
            side_pos = 'right'

        #add radius and offset to the result
        #For straight road larger radius are being caculated so added a threhold and simply output "Straight Road"
        # for those cases
        # if radius_of_curvature > 1500:
        #     cv2.putText(result, 'Straight Road ahead', (50, 50),
        #                 cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        # else:
        #     cv2.putText(result, 'Radius of curvature = ' + str(round(radius_of_curvature, 3)) + ' (m)', (50, 50),
        #             cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        cv2.putText(result, 'Radius of curvature = ' + str(round(radius_of_curvature, 3)) + ' (m)', (50, 50),
                                 cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        cv2.putText(result, 'Vehicle is ' + str(abs(round(center_diff, 3))) + 'm ' + side_pos + ' of center',
                    (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)

        return result
コード例 #51
0
    def _get_params(self, params):
        img = params["image"]
        scale = (self.base_scale * np.exp(
            random.uniform(np.log(self.scale_range[0]),
                           np.log(self.scale_range[1])))
                 if random.random() <= self.scale_prob else self.base_scale)
        ar = (np.exp(
            random.uniform(np.log(self.aspect_range[0]),
                           np.log(self.aspect_range[1])))
              if random.random() <= self.aspect_prob else 1.0)

        flip_v = self.flip[0] and random.random() <= 0.5
        flip_h = self.flip[1] and random.random() <= 0.5
        scale = np.array([scale / np.sqrt(ar), scale * np.sqrt(ar)])
        degrees = (random.uniform(self.rotate_range[0], self.rotate_range[1])
                   if random.random() <= self.rotate_prob else 0)
        pos = np.array(
            [random.uniform(-0.5, +0.5),
             random.uniform(-0.5, +0.5)])
        translate = np.array([
            random.uniform(-self.translate[0], self.translate[0]),
            random.uniform(-self.translate[1], self.translate[1]),
        ])
        # 左上から時計回りに座標を用意
        src_points = np.array([[0, 0], [1, 0], [1, 1], [0, 1]],
                              dtype=np.float32)
        if self.mode == "normal":
            # アスペクト比を無視して出力サイズに合わせる
            dst_points = np.array([[0, 0], [1, 0], [1, 1], [0, 1]],
                                  dtype=np.float32)
        elif self.mode == "preserve_aspect":
            # アスペクト比を維持するように縮小する
            if img.shape[0] < img.shape[1]:
                # 横長
                hr = img.shape[0] / img.shape[1]
                yr = (1 - hr) / 2
                dst_points = np.array(
                    [[0, yr], [1, yr], [1, yr + hr], [0, yr + hr]],
                    dtype=np.float32)
            else:
                # 縦長
                wr = img.shape[1] / img.shape[0]
                xr = (1 - wr) / 2
                dst_points = np.array(
                    [[xr, 0], [xr + wr, 0], [xr + wr, 1], [xr, 1]],
                    dtype=np.float32)
        elif self.mode == "crop":
            # 入力サイズによらず固定サイズでcrop
            hr = self.size[0] / img.shape[0]
            wr = self.size[1] / img.shape[1]
            yr = random.uniform(0, 1 - hr)
            xr = random.uniform(0, 1 - wr)
            dst_points = np.array(
                [[xr, yr], [xr + wr, yr], [xr + wr, yr + hr], [xr, yr + hr]],
                dtype=np.float32,
            )
        else:
            raise ValueError(f"Invalid mode: {self.mode}")
        # 反転
        if flip_h:
            dst_points = dst_points[[1, 0, 3, 2]]
        if flip_v:
            dst_points = dst_points[[3, 2, 1, 0]]
        # 原点が中心になるように移動
        src_points -= 0.5
        # 回転
        theta = degrees * np.pi * 2 / 360
        c, s = np.cos(theta), np.sin(theta)
        r = np.array([[c, -s], [s, c]], dtype=np.float32)
        src_points = np.dot(r, src_points.T).T
        # スケール変換
        src_points /= scale
        # 移動
        # スケール変換で余った分 + 最初に0.5動かした分 + translate分
        src_points += (1 - 1 / scale) * pos + 0.5 + translate / scale
        # 変換行列の作成
        src_points *= [img.shape[1], img.shape[0]]
        dst_points *= [self.size[1], self.size[0]]
        m = cv2.getPerspectiveTransform(src_points, dst_points)
        return {"m": m, "image_size": img.shape[:2]}
コード例 #52
0
def bird_view(source_img, isBridge=False):
    pts1 = np.float32([[0, 85], [300, 85], [0, 200], [300, 200]])
    pts2 = np.float32([[0, 0], [200, 0], [200 - 130, 300], [150, 300]])
    matrix = cv2.getPerspectiveTransform(pts1, pts2)
    bird_view = cv2.warpPerspective(source_img, matrix, (240, 350))
    return bird_view
コード例 #53
0
def detect_grid(im_path='../images/sudoku_6.jpg'):

	########################################################################
	#TO DO: Code for passing in file arguments (ideally many many files at once)#
	########################################################################

	#binarize image
	sudoku = cv2.imread(im_path, 0)
	# clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
	# sudoku = clahe.apply(sudoku)

	image = binarize(sudoku)
	image_cp = image.copy()

	print "Started grid detection..."
	#effectively find the bounding box
	bounding_box = findLongestContour(image_cp)

	# find corners
	br, bl, tl, tr = findCorners(bounding_box)

	cv2.circle(image_cp, (br[0],br[1]), 5, (255,255,0), -1)
	cv2.circle(image_cp, (bl[0],bl[1]), 5, (255,255,0), -1)
	cv2.circle(image_cp, (tr[0],tr[1]), 5, (255,255,0), -1)
	cv2.circle(image_cp, (tl[0],tl[1]), 5, (255,255,0), -1)

	black = [0,0,0]
	outline_plot = cv2.copyMakeBorder(image, 10, 10, 10, 10, cv2.BORDER_CONSTANT, value=black)
	cont_plot = cv2.copyMakeBorder(image_cp, 10, 10, 10, 10, cv2.BORDER_CONSTANT, value=black)

	maxLength = longestEdge(br, bl, tr, tl)

	src = np.matrix([tl, tr, br, bl], dtype = np.float32)
	dst = np.matrix([[0,0],[maxLength, 0],[maxLength, maxLength], [0, maxLength]], dtype = np.float32)

	fixed = np.matrix((maxLength, maxLength), dtype = np.uint8)
	fixed = cv2.warpPerspective(sudoku, cv2.getPerspectiveTransform(src, dst), (int(maxLength), int(maxLength)))

	fixed = cv2.resize(fixed, (360,360))
	maxLength = 360

	fixed_threshold = cv2.adaptiveThreshold(fixed, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 101, 1)

	dist = np.floor(maxLength/9).astype(np.int16)

	currentCell = np.zeros((dist, dist))

	final_img = cv2.hconcat((outline_plot, cont_plot))
	fixed_binarized = binarize(fixed, None)

	edges = cv2.Canny(fixed,50,50, 3)
	lines = cv2.HoughLines(edges,1,np.pi/180,150)
#	print lines
	if lines != None:
		for rho,theta in lines[0]:
			a = np.cos(theta)
			b = np.sin(theta)
			x0 = a*rho
			y0 = b*rho
			x1 = int(x0 + 1000*(-b))
			y1 = int(y0 + 1000*(a))
			x2 = int(x0 - 1000*(-b))
			y2 = int(y0 - 1000*(a))
	#		print x1, y1, x2, y2
			cv2.line(fixed_threshold, (x1, y1), (x2, y2), (0,0,0), 5)


	cells = []
	locations = []
	for i in range(9):
		for j in range(9):
			currentCell = fixed_threshold[i*dist:(i+1)*dist, j*dist:(j+1)*dist]

			# Elliott temp code: attempt to dilate so that 1's are better seen
		#	kernel = np.matrix([[1,1,1],[1,1,1],[1,1,1]], dtype = np.uint8)
		#	currentCell = cv2.dilate(currentCell, kernel)

##########ders analysis
			corners = cv2.cornerHarris(currentCell[10:30, 10:30], 2, 3, 0.04)
			# # cv2.circle(image_cp, (br[0],br[1]), 5, (255,255,0), -1)
			# plt.imshow(corners ,cmap = 'jet')
			# print(corners.reshape((40*40, 1)))
			# plt.show()
		#	plt.imshow(currentCell)
		#	plt.show()
		#	print(corners[corners != 0].shape)
		#	print(corners[corners != 0].shape[0])
			if (corners[corners != 0].shape[0] > 100):
		#		print(corners.shape)
				locations.append((i,j))
				cells.append(currentCell)
		#		plt.imshow(corners, cmap='jet')
		#		plt.show()
		#		plt.imshow(currentCell)
		#		plt.show()

###########

		# 	w, h = currentCell.shape
		# 	analysis = fixed_threshold[i*dist+(w/5):(i+1)*dist-(w/5), j*dist+(h/5):(j+1)*dist-(h/5)]

		# 	moments = cv2.moments(analysis, True)
		# 	m = moments['m00']
		# #	print m
		# #	print analysis.shape[0]*analysis.shape[1]/6



		# 	if m >= analysis.shape[0]*analysis.shape[1]/6:
		# 		locations.append((i,j))
		# 		cells.append(currentCell)
				#print "yes"
			# else:
			# 	print "no"
			# print "\n"
			# cv2.imshow('Test analysis', analysis)
			# cv2.imshow('Test cell', currentCell)
			# cv2.waitKey(0)
	# cv2.imshow('Final', final_img)
	# cv2.imshow('Rectified', fixed)
	# cv2.imshow('Threshold', fixed_threshold)
	# cv2.imshow('Binarized Rectified Image', fixed_binarized)
	# cv2.imshow('Binarized Threshold Image', fixed_threshold)

	return cells, locations
コード例 #54
0
#image dimensions
h, w, _ = img.shape
#store visulaize
cv2.imwrite('points.jpg', img)

#region of intrest in the image
source_points = np.float32([[bl_x, bl_y], [tl_x, tl_y], [tr_x, tr_y],
                            [br_x, br_y]])
#destination points
destination_points = np.float32([[0, 1700], [0, 0], [300, 0], [300, 1700]])

#reading the visualized
image = cv2.imread('points.jpg')
#get homography matrix
matrix = cv2.getPerspectiveTransform(source_points, destination_points)
# get the newe image
result = cv2.warpPerspective(image, matrix, (300, 1700))

# show the image on a axis
plt.figure()
plt.imshow(result)
plt.show()
cv2.imwrite("birdie.jpg", result)

# camera matrix
mtx = [[3095.73828160516, 0, 2016.7322805317165],
       [0, 3102.1298848836359, 1510.5872677598889], [0, 0, 1]]
#distortion matrix
dist = [
    0.21522014938761402, -0.29536385974743162, -0.0062158816142389856,
 def PerspectiveTransform(self):
     M = cv2.getPerspectiveTransform(self.src, self.dst)
     Minv = cv2.getPerspectiveTransform(self.dst, self.src)
     return M, Minv
コード例 #56
0
    dtype=np.float32)

# mapped image dimensions
# simplified
# new_height = 164
# new_width = 530
new_height = 194
new_width = 552

# the target area of roi mapping
target_rect = np.array([[0, 0], [new_width - 1, 0], [0, new_height - 1],
                        [new_width - 1, new_height - 1]],
                       dtype=np.float32)

# compute the perspective transform matrix and apply it
M = cv2.getPerspectiveTransform(roi, target_rect)

# map all frames from video
set = 'test'
count = 0
vc = cv2.VideoCapture('../data/' + set + '.mp4')
isread, frame = vc.read()
while isread:
    warped = cv2.warpPerspective(frame, M, (new_width, new_height))
    # warped = cv2.resize(warped,(354,110))
    cv2.imwrite('../data/image_' + set + '/' + str(count) + '.jpg', warped)

    print('written flow for frame ' + str(count), end='\r')
    count += 1
    isread, frame = vc.read()
コード例 #57
0
ファイル: interDetect.py プロジェクト: AlyneG/comp3431_ass2
    def intersection_detect(self, image):
        image = self.bridge.imgmsg_to_cv2(image,desired_encoding='bgr8')
        #change perspective
        rows, cols = image.shape[:2]
        if(rows != 240 or cols != 320):
            image = cv2.resize(image,(320,240))
            rows = 240
            cols = 320
        rows-=1
        cols-=1
        src_points = numpy.float32([[0,0],[cols, 0],[0,rows], [cols,rows]])
        dst_points = numpy.float32([[0,10],[cols,10], [int(cols*1/7),rows], [int((cols)*6/7),rows]])
        affine_matrix = cv2.getPerspectiveTransform(src_points, dst_points)
        image = cv2.warpPerspective(image, affine_matrix, (cols+1,rows+1))
        image[image==0] = 255
        intersection = False
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        _,mask = cv2.threshold(gray,150,255,cv2.THRESH_BINARY)
        h, w, d = image.shape
        search_top = int(8.5*h/10)
        search_bot = int(h) 
        it = 20
        dilate = cv2.dilate(mask,None, iterations=it)
        mask = cv2.erode(dilate,None, iterations=it)
        mask = 255 - mask
        mask[search_bot:h, 0:w] = 0
        mask[0:search_top, 0:w] = 0
        number = numpy.count_nonzero(mask == 255)*1.0
        total = h*w*1.0
        prop = number/total
        #print(number,total,prop)
        font                   = cv2.FONT_HERSHEY_SIMPLEX
        bottomLeftCornerOfText = (100,25)
        fontScale              = 0.5
        fontColor              = (255,255,255)
        lineType               = 2
        cv2.putText(mask,'{0:2f}'.format(prop), 
        bottomLeftCornerOfText, 
        font, 
        fontScale,
        fontColor,
        lineType)
        if(self.stop != None and datetime.now() < self.stop-timedelta(seconds=10)):
            return
        elif(self.stop != None and datetime.now() >= self.stop-timedelta(seconds=10) and datetime.now()<self.stop):
            #print("start moving")
            self.pub.publish("no")
            return
        else:
            self.stop = None
            #print("end turn")
            self.pub.publish("no")
        if(prop >= 0.01 and prop <= 0.045):
            print("intersection detect")
            self.stop = datetime.now()+timedelta(seconds=13)
            self.pub.publish("yes")
        else:
            self.pub.publish("no")

        #cv2.imshow("inter", mask)
        cv2.waitKey(3)
コード例 #58
0
ファイル: views_shelf2.py プロジェクト: maxenergy/goodsdl2
    def get(self, request):

        picurl = request.query_params['picurl']
        x1 = int(request.query_params['x1'])
        y1 = int(request.query_params['y1'])
        x2 = int(request.query_params['x2'])
        y2 = int(request.query_params['y2'])
        if x1 > x2:
            xt = x1
            yt = y1
            x1 = x2
            y1 = y2
            x2 = xt
            y2 = yt

        x3 = int(request.query_params['x3'])
        y3 = int(request.query_params['y3'])
        x4 = int(request.query_params['x4'])
        y4 = int(request.query_params['y4'])
        if x3 > x4:
            xt = x3
            yt = y3
            x3 = x4
            y3 = y4
            x4 = xt
            y4 = yt

        # width = int(request.query_params['width'])
        # height = int(width * (math.sqrt((x1-x3)*(x1-x3)+(y1-y3)*(y1-y3))) / math.sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)))
        # TODO test for big pic
        height = abs(y1 - y3)  # 800
        width = int(height * math.sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) *
                                       (y1 - y2)) /
                    math.sqrt((x1 - x3) * (x1 - x3) + (y1 - y3) * (y1 - y3)))

        now = datetime.datetime.now()
        source_image_name = '{}.jpg'.format(now.strftime('%Y%m%d_%H%M%S'))
        media_dir = settings.MEDIA_ROOT
        # 通过 picurl 获取图片
        image_dir = os.path.join(settings.MEDIA_ROOT, settings.DETECT_DIR_NAME,
                                 'shelf', 'rectify')
        if not tf.gfile.Exists(image_dir):
            tf.gfile.MakeDirs(image_dir)
        source_image_path = os.path.join(image_dir, source_image_name)
        urllib.request.urlretrieve(picurl, source_image_path)

        dest_image_name = 'rectify_{}.jpg'.format(
            now.strftime('%Y%m%d_%H%M%S'))
        dest_image_path = os.path.join(image_dir, dest_image_name)
        img = cv2.imread(source_image_path)
        rows, cols = img.shape[:2]
        # 原图中书本的四个角点
        pts1 = np.float32([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
        # 变换后分别在左上、右上、左下、右下四个点
        pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])
        # 生成透视变换矩阵
        M = cv2.getPerspectiveTransform(pts1, pts2)
        # 进行透视变换
        dst = cv2.warpPerspective(img, M, (width, height))
        cv2.imwrite(dest_image_path, dst)
        ret = {
            'returl':
            os.path.join(settings.MEDIA_URL, settings.DETECT_DIR_NAME, 'shelf',
                         'rectify', dest_image_name)
        }

        return Response(goods.util.wrap_ret(ret), status=status.HTTP_200_OK)
コード例 #59
0
    ts_img = {}  # initalization of time spatial image
    tsi_object = {}
    fdiff_tsi = {}  # frame difference tsi image
    fdiff_view = {}
    masks = {}

    for view in VIEW:
        img = cv2.imread(img_path.format(ses_id, view), 1)

        points = GT['session{}'.format(ses_id)][view]
        corner = get_corner_ground(vp[view]['vp1'], vp[view]['vp2'], points)

        # get rectangular homography mapping
        corner_gt = np.float32(corner)
        corner_wrap = np.float32([[0, 300], [0, 0], [1000, 0], [1000, 300]])
        M[view] = cv2.getPerspectiveTransform(corner_gt, corner_wrap)

        # for initialization
        ts_img[view] = None  # np.zeros ((300, 1000, 3))
        tsi_object[view] = TSIUtil.TSI(M[view], VDL_IDX=0)

        # for 3 frame difference
        prev_img = [None, None]
        prev_tsi = [None, None]

        for i in range(2):
            img = next(fi[view])
            img_color = img.copy()
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            dst = cv2.warpPerspective(img, M[view], (1000, 300))
コード例 #60
0
ファイル: Demo_ok.py プロジェクト: hemnath-indrasol/Cam_scan
# In[12]:


#Reform input corners to x,y list
icorners = []
for corner in corners:
    pt = [corner[0][0], corner[0][1]]
    icorners.append(pt)
icorners = np.float32(icorners)

#Get corresponding output corners from width and height
ocorners = [[width,0], [0,0], [0,height], [width,height]]
ocorners = np.float32(ocorners)

#Get perspective transform matrix
M = cv2.getPerspectiveTransform(icorners, ocorners)

warped = cv2.warpPerspective(img, M, (width, height))


# In[13]:


#Write Results
cv2.imwrite("efile_thresh.jpg", thresh)
cv2.imwrite("efile_morph.jpg", morph)
cv2.imwrite("efile_polygon.jpg", polygon)
cv2.imwrite("efile_warped.jpg", warped)


# In[ ]: