def transform_image(image,ang_range,shear_range,trans_range):

    # Rotation
    ang_rot = np.random.uniform(ang_range)-ang_range/2
    rows,cols,ch = image.shape    
    Rot_M = cv2.getRotationMatrix2D((cols/2,rows/2),ang_rot,1)

    # Translation
    tr_x = trans_range*np.random.uniform()-trans_range/2
    tr_y = trans_range*np.random.uniform()-trans_range/2
    Trans_M = np.float32([[1,0,tr_x],[0,1,tr_y]])

    # Shear
    pts1 = np.float32([[5,5],[20,5],[5,20]])

    pt1 = 5+shear_range*np.random.uniform()-shear_range/2
    pt2 = 20+shear_range*np.random.uniform()-shear_range/2

    pts2 = np.float32([[pt1,5],[pt2,pt1],[5,pt2]])

    shear_M = cv2.getAffineTransform(pts1,pts2)
        
    image = cv2.warpAffine(image,Rot_M,(cols,rows))
    image = cv2.warpAffine(image,Trans_M,(cols,rows))
    image = cv2.warpAffine(image,shear_M,(cols,rows))
    
    return image
Example #2
0
def affine_skew(tilt, phi, img, mask=None):
    '''
    affine_skew(tilt, phi, img, mask=None) -> skew_img, skew_mask, Ai

    Ai - is an affine transform matrix from skew_img to img
    '''
    h, w = img.shape[:2]
    if mask is None:
        mask = np.zeros((h, w), np.uint8)
        mask[:] = 255
    A = np.float32([[1, 0, 0], [0, 1, 0]])
    if phi != 0.0:
        phi = np.deg2rad(phi)
        s, c = np.sin(phi), np.cos(phi)
        A = np.float32([[c,-s], [ s, c]])
        corners = [[0, 0], [w, 0], [w, h], [0, h]]
        tcorners = np.int32( np.dot(corners, A.T) )
        x, y, w, h = cv2.boundingRect(tcorners.reshape(1,-1,2))
        A = np.hstack([A, [[-x], [-y]]])
        img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
    if tilt != 1.0:
        s = 0.8*np.sqrt(tilt*tilt-1)
        img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
        img = cv2.resize(img, (0, 0), fx=1.0/tilt, fy=1.0, interpolation=cv2.INTER_NEAREST)
        A[0] /= tilt
    if phi != 0.0 or tilt != 1.0:
        h, w = img.shape[:2]
        mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST)
    Ai = cv2.invertAffineTransform(A)
    return img, mask, Ai
Example #3
0
def getPano(M, img1, img2):
    rows, cols = img2.shape[:2]
    # This is the tricky part. For transform, the point is identified as pair
    # of (col, row) instead of (row, col)
    box = numpy.array([[0, 0], [cols - 1, 0], [cols - 1, rows - 1],
                       [0, rows - 1]], dtype=numpy.float32).reshape(-1, 1, 2)
    transformed_box = cv2.transform(box, M)
    min_col = min(transformed_box[:, :, 0])[0]
    min_row = min(transformed_box[:, :, 1])[0]

    if min_col < 0:
        transformed_box[:, :, 0] -= min_col
        M[0, 2] -= min_col

    if min_row < 0:
        transformed_box[:, :, 1] -= min_row
        M[1, 2] -= min_row

    max_col = max(transformed_box[:, :, 0])[0]
    max_row = max(transformed_box[:, :, 1])[0]

    I = numpy.array([[1, 0, 0], [0, 1, 0]], dtype=numpy.float)
    transformed_img1 = cv2.warpAffine(img1, I, (max_col, max_row))
    transformed_img2 = cv2.warpAffine(img2, M, (max_col, max_row))
    numpy.copyto(
        transformed_img1, transformed_img2, where=transformed_img1 == 0)

    return transformed_img1
Example #4
0
def fix_rotation(qr_rect, image, aux_image):
    """Fixes the rotation of the image using the qrcode rectangle. -> cv2.image"""
    actual_down = np.array( [   float(qr_rect[1][0]-qr_rect[0][0]) ,
                                float(qr_rect[1][1]-qr_rect[0][1]) ] )
    actual_down = actual_down/np.linalg.norm(actual_down)
    real_down = np.array([0,1])

    angle = np.arccos(np.dot(actual_down, real_down))

    if np.isnan(angle):
        if (actual_down == real_down).all(): angle = 0.0
        else: angle = np.pi

    if actual_down[0]>0: angle = 2*np.pi-angle
    #calculate the size of the borders to make it squared
    w, h = image.shape[::-1]
    if w>h:
        top, bott, left, right = (w-h)/2, (w-h)/2, 0, 0
    else:
        top, bott, left, right = (h-w)/2, (h-w)/2, 0, 0
    #add the borders
    bigger_img = cv2.copyMakeBorder(image,top,bott,left,right,cv2.BORDER_CONSTANT,value=[0,0,0])
    bigger_aux = cv2.copyMakeBorder(aux_image,top,bott,left,right,cv2.BORDER_CONSTANT,value=[0,0,0])
    #calculate the tramsformation
    w, h = bigger_img.shape[::-1]
    M = cv2.getRotationMatrix2D((w/2,h/2),180*angle/np.pi,1.0)
    #TODO o not use the variable margin here, try to find the real w, h that accounts for the new transformation
    #margin = doc_parameters["margin"]
    return ( cv2.warpAffine(bigger_img,M,(w,h)), cv2.warpAffine(bigger_aux,M,(w,h)) )
    def _apply_(self, *image):

        deg = self.params['deg']
        enlarge = self.params['enlarge']
        res = ()
        n_img = 0
        for img in image:
            rows, cols = img.shape[0], img.shape[1]
            flags = self.SetFlag(n_img)
            if enlarge:
                # this part could be better adjusted
                x = int(rows * (2 - 1.414213) / 1.414213)
                y = int(cols * (2 - 1.414213) / 1.414213)

                z = max(x, y)
                big_image = self.enlarge(img, z, z)

                b_rows, b_cols = big_image.shape[0], big_image.shape[1]
                M = cv2.getRotationMatrix2D((b_cols / 2, b_rows / 2), deg, 1)
                dst = cv2.warpAffine(big_image, M, (b_cols, b_rows), flags=flags)

                res += (self.OutputType(dst[z:(z + rows), z:(z + cols)]), )
            else:
                M = cv2.getRotationMatrix2D((cols / 2, rows / 2), deg, 1)
                sub_res = cv2.warpAffine(img, M, (cols, rows), flags=flags)
                res += (self.OutputType(sub_res),)
            n_img += 1

        return res
Example #6
0
def phasecorr(imlist,imlist2=None,clip=0): #cv  [rowini,rowend,colini,colend]
    import cv2
    cx = 0.0
    cy = 0.0            
    imlist_stb=[]
    if imlist2!=None:
        imlist2_stb=[]

    imi=0
    im_prev = imlist[0]
    im_denoised_prev = np.float32(restoration.denoise_tv_chambolle(im_prev.astype('uint16'), weight=0.1, multichannel=True)) #ref
    for im in imlist:           
        im_denoised = np.float32(restoration.denoise_tv_chambolle(im.astype('uint16'), weight=0.1, multichannel=True))
        # TODO: set window around phase correlation
        dp = cv2.phaseCorrelate(im_denoised_prev, im_denoised)
        cx = cx - dp[0]
        cy = cy - dp[1]
        xform = np.float32([[1, 0, cx], [0, 1, cy]])
        im_stb = cv2.warpAffine(im.astype('float32'), xform, dsize=(im_denoised.shape[1], im_denoised.shape[0]))
        imlist_stb.append(imclipper(im_stb,clip))

        if imlist2!=None:
            im2=imlist2[imi]
            im2_stb=cv2.warpAffine(im2.astype('float32'), xform, dsize=(im_denoised.shape[1], im_denoised.shape[0]))
            imlist2_stb.append(imclipper(im2_stb,clip))

        im_denoised_prev = im_denoised
        imi+=1
    if imlist2!=None:
        return imlist_stb,imlist2_stb
    else:
        return imlist_stb
Example #7
0
    def apply_new_face(self, image, new_face, image_mask, mat, image_size, size):
        base_image = numpy.copy( image )
        new_image = numpy.copy( image )

        cv2.warpAffine( new_face, mat, image_size, new_image, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )

        outImage = None
        if self.seamless_clone:
            unitMask = numpy.clip( image_mask * 365, 0, 255 ).astype(numpy.uint8)
      
            maxregion = numpy.argwhere(unitMask==255)
      
            if maxregion.size > 0:
              miny,minx = maxregion.min(axis=0)[:2]
              maxy,maxx = maxregion.max(axis=0)[:2]
              lenx = maxx - minx;
              leny = maxy - miny;
              masky = int(minx+(lenx//2))
              maskx = int(miny+(leny//2))
              outimage = cv2.seamlessClone(new_image.astype(numpy.uint8),base_image.astype(numpy.uint8),unitMask,(masky,maskx) , cv2.NORMAL_CLONE )
              
              return outimage
              
        foreground = cv2.multiply(image_mask, new_image.astype(float))
        background = cv2.multiply(1.0 - image_mask, base_image.astype(float))
        outimage = cv2.add(foreground, background)

        return outimage
Example #8
0
    def get_image_mask(self, image, new_face, landmarks, mat, image_size):

        face_mask = numpy.zeros(image.shape,dtype=float)
        if 'rect' in self.mask_type:
            face_src = numpy.ones(new_face.shape,dtype=float)
            cv2.warpAffine( face_src, mat, image_size, face_mask, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )

        hull_mask = numpy.zeros(image.shape,dtype=float)
        if 'hull' in self.mask_type:
            hull = cv2.convexHull( numpy.array( landmarks ).reshape((-1,2)).astype(int) ).flatten().reshape( (-1,2) )
            cv2.fillConvexPoly( hull_mask,hull,(1,1,1) )

        if self.mask_type == 'rect':
            image_mask = face_mask
        elif self.mask_type == 'facehull':
            image_mask = hull_mask
        else:
            image_mask = ((face_mask*hull_mask))


        if self.erosion_kernel is not None:
            if self.erosion_kernel_size > 0:
                image_mask = cv2.erode(image_mask,self.erosion_kernel,iterations = 1)
            elif self.erosion_kernel_size < 0:
                dilation_kernel = abs(self.erosion_kernel)
                image_mask = cv2.dilate(image_mask,dilation_kernel,iterations = 1)

        if self.blur_size!=0:
            image_mask = cv2.blur(image_mask,(self.blur_size,self.blur_size))

        return image_mask
Example #9
0
    def _transform(self, imgs):
        bs = imgs.shape[0]
        imgs = imgs.reshape(bs, 3, 32, 32)
        imgs_ = np.zeros_like(imgs)
        for i, img in enumerate(imgs):
            # random flip
            if np.random.randint(2):
                img_ = np.copy(img[:, :, ::-1])
            else:
                img_ = np.copy(img)

            # rotation
            n = np.random.choice(np.arange(-15, 15))
            M = cv2.getRotationMatrix2D((32/2, 32/2), n, 1)
            dst = cv2.warpAffine(img_.transpose(1, 2, 0), M, (32, 32))

            # translation
            M = np.float32([[1,0,np.random.randint(-2, 2)],
                            [0,1,np.random.randint(-2, 2)]])
            dst = cv2.warpAffine(dst, M, (32, 32))

            imgs_[i] = dst.transpose(2, 0, 1)

        imgs_ = imgs_.reshape(bs, 3072)
        return imgs_
Example #10
0
def generate_image(char_images, num_bg_images):
    """
    Generate image
    :param char_images: Dictionary of Images with all characters (char -> Image)
    :param num_bg_images: Number of background image
    :return: (image, number_plate_text)
    """
    bg = generate_background(num_bg_images)

    plate, plate_mask, code = generate_plate(FONT_HEIGHT, char_images)

    while True:
        M, out_of_bounds = make_affine_transform(
                                from_shape=plate.shape,
                                to_shape=bg.shape,
                                min_scale=0.6/2,
                                max_scale=0.875/2,
                                rotation_variation=0.2,
                                scale_variation=1.5,
                                translation_variation=2.0)
        if not out_of_bounds:
            break
    plate = cv2.warpAffine(plate, M, (bg.shape[1], bg.shape[0]))
    plate_mask = cv2.warpAffine(plate_mask, M, (bg.shape[1], bg.shape[0]))
    bounding_box = bounding_box_from_mask(plate_mask)
    out = plate * plate_mask + bg * (1.0 - plate_mask)
    out = cv2.resize(out, (OUTPUT_SHAPE[1], OUTPUT_SHAPE[0]))
    out += numpy.random.normal(scale=0.05, size=out.shape)
    out = numpy.clip(out, 0., 1.)
    return out, code, bounding_box
def faceclone(src_name, dst_name):
    src_img = cv2.imread(src_name)
    dst_img = cv2.imread(dst_name)

    src_rst = api.detection.detect(img = File(src_name), attribute='pose')
    src_img_width   = src_rst['img_width']
    src_img_height  = src_rst['img_height']
    src_face        = src_rst['face'][0]

    dst_rst = api.detection.detect(img = File(dst_name), attribute='pose')
    dst_img_width   = dst_rst['img_width']
    dst_img_height  = dst_rst['img_height']
    dst_face        = dst_rst['face'][0]

    ss = np.array(get_feature_points(src_face, src_img_width, src_img_height), dtype=np.float32)
    ps = np.array(get_feature_points(dst_face, dst_img_width, dst_img_height), dtype=np.float32)
    map_matrix = cv2.getAffineTransform(ps, ss)

    #dsize = (300,300)
    map_result = cv2.warpAffine(dst_img, map_matrix, dsize=(src_img_width,src_img_height))
    
    extract_mask, center = contour.extract_face_mask(src_face['face_id'], src_img_width, src_img_height, src_name)
    # merge 
    ## first blending the border
    extract_alpha = contour.extract_face_alpha(src_face['face_id'], src_img_width, src_img_height, src_name)
    center = (map_result.shape[0]/2, map_result.shape[1]/2)
    map_result = cv2.seamlessClone(src_img, map_result, extract_mask, center, flags=cv2.NORMAL_CLONE)

    imap_matrix = cv2.invertAffineTransform(map_matrix)
    final = cv2.warpAffine(map_result, imap_matrix, dsize=(dst_img.shape[0:2]))
    return final
Example #12
0
def accumulate_transforms(d, A, reverse=False):
    H = [None] * len(A)
    ims = [None] * len(A)
    a_accum = eye(3)
    if reverse:
        A = [inv(a) if a is not None else None for a in A]
        for i, a in reversed(list(enumerate(A))):
            if a is not None:
                a_accum = dot(a_accum,a)
                H[i] = a_accum
                ims[i] = cv2.imread(d[i][2], 0)
    else:
        for i, a in enumerate(A):
            if a is not None:
                a_accum = dot(a_accum,a)
                H[i] = a_accum
                ims[i] = cv2.imread(d[i][2], 0) 
    H_valid = [h for h in H if h is not None]
    
    if None not in H:
        if reverse:
            ims_reg = [cv2.warpAffine(ims[k], H[k][:2, :], ims[-1].shape[::-1]) for k in reversed(range(len(H)))]
        else:
            ims_reg = [cv2.warpAffine(ims[k], H[k][:2, :], ims[0].shape[::-1]) for k in range(len(H))]
    else:
        raise ValueError('None in H')
        
    return H_valid, ims_reg
Example #13
0
def imagedistort(img):
    '''
    distort an image
        shift horizontally and vertically
        rotated clockwise and anticlockwise
    '''
    shift_range = 0.05
    rotate_range = 0.02

    h, w = img.shape
    size = max(w, h) *2
    normal = 255 * np.ones((size, size), np.uint8)
    normal[(size - h) / 2: (size + h) / 2, (size - w) / 2: (size + w) / 2] = img

    # rotate
    degree = 90 * random.uniform(-rotate_range, rotate_range)
    M = cv2.getRotationMatrix2D((size/2, size/2), degree, 1)
    rotated = cv2.warpAffine(normal, M, (size, size))

    # shift
    shift_value_x = size / 2 * random.uniform(-shift_range, shift_range)
    shift_value_y = size / 2 * random.uniform(-shift_range, shift_range)
    M = np.float32([[1, 0, shift_value_x], [0, 1, shift_value_y]])
    shift = cv2.warpAffine(rotated, M, (size, size))
    # crop
    center = (size / 2, size / 2)
    crop = cv2.getRectSubPix(shift, (w, h), center) 

    return crop
Example #14
0
def show_image(idx):
    global cur_img, cur_img_path, if_path, files
    if idx < 0 or idx >= len(files):
        return

    fpath = files[idx]
    new_img_path = "%s/%s" % (if_path, fpath)
    if new_img_path == cur_img_path:
        return

    cur_img_path = new_img_path
    cur_img = cv.imread(cur_img_path)

    # Try to get rotation data and if set, rotate image correctly
    try:
        pil_img = PIL.Image.open(cur_img_path)
        rot_code = pil_img._getexif()[274]
        (h, w, _) = cur_img.shape
        if rot_code == 3:
            cur_img = cv.warpAffine(cur_img, cv.getRotationMatrix2D((w / 2.0, h / 2.0), 180, 1.0), (w, h))
        elif rot_code == 6:
            m = min(h, w)
            cur_img = cv.warpAffine(cur_img, cv.getRotationMatrix2D((m / 2.0, m / 2.0), -90, 1.0), (h, w))
        elif rot_code == 8:
            cur_img = cv.warpAffine(cur_img, cv.getRotationMatrix2D((w / 2.0, w / 2.0), 90, 1.0), (h, w))
        pil_img.close()
    except:
        pass

    imshow(fpath, cur_img)

    if fpath in data:
        print("[%03d/%03d] Showing %s (%s)" % (idx, len(files), fpath, "GOOD" if data[fpath] else "BAD"))
    else:
        print("[%03d/%03d] Showing %s" % (idx, len(files), fpath))
    def render(self):
        """Returns the rendered image (after transformation), and the start point of the image in global coordinates"""
        if self.already_rendered:
            return self.img, np.array([self.bbox[0], self.bbox[1]])

        img = cv2.imread(self.img_path, cv2.IMREAD_ANYDEPTH)
        adjusted_transform = self.transform_matrix[:2].copy()
        adjusted_transform[0][2] -= self.bbox[0]
        adjusted_transform[1][2] -= self.bbox[2]
        
        self.img = cv2.warpAffine(img, adjusted_transform, self.shape, flags=cv2.INTER_AREA)
        self.already_rendered = True
        if self.compute_mask:
            mask_img = np.ones(img.shape)
            self.mask = cv2.warpAffine(mask_img, adjusted_transform, self.shape, flags=cv2.INTER_AREA)
            self.mask[self.mask > 0] = 1
            self.mask = self.mask.astype(np.uint8)
        if self.compute_distances:
            # The initial weights for each pixel is the minimum from the image boundary
            grid = np.mgrid[0:self.height, 0:self.width]
            weights_img = np.minimum(
                                np.minimum(grid[0], self.height - 1 - grid[0]),
                                np.minimum(grid[1], self.width - 1 - grid[1])
                            ).astype(np.float32)
            self.weights = cv2.warpAffine(weights_img, adjusted_transform, self.shape, flags=cv2.INTER_AREA)
        # Returns the transformed image and the start point
        return self.img, (self.bbox[0], self.bbox[2])
def align(im, warp_matrix):


    dic = oib.image_info(im)

    x = int(dic['frame_size_x'])
    y = int(dic['frame_size_y'])
    c = int(dic['channels'])
    z = int(dic['z_steps'])
    t = int(dic['time_frames'])

    with bioformats.ImageReader(im, perform_init=True) as rdr:
        image = np.empty([t,z,y,x,c], np.uint16)
        for c in range(c):
            image[:,:,:,:,c] = rdr.read(c=1, rescale=False)
            image[:,:,:,:,c] = cv2.warpAffine((rdr.read(c=0, rescale=False)),
                                               warp_matrix, (y,x), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
            for t in range(t):
                image[t,:,:,:,c] = cv2.warpAffine((rdr.read(t=t, z=0, c=0, rescale=False)),
                                                   warp_matrix, (y,x), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
                for z in range(z):
                    image[:,z,:,:,c] = cv2.warpAffine((rdr.read(z=z, c=0, rescale=False)),
                                                       warp_matrix, (y,x), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)

    return image
Example #17
0
def transform_image(img, ang_range, shear_range, trans_range):
    '''
    NOTE: Some parts of this method was barrowed from:
    https://nbviewer.jupyter.org/github/vxy10/SCND_notebooks/blob/master/preprocessing_stuff/img_transform_NB.ipynb
    credit should go to the original author
    '''
    # Rotation
    ang_rot = np.random.uniform(ang_range) - ang_range / 2
    rows, cols, ch = img.shape
    Rot_M = cv2.getRotationMatrix2D((cols / 2, rows / 2), ang_rot, 1)

    # Translation
    tr_x = trans_range * np.random.uniform() - trans_range / 2
    tr_y = trans_range * np.random.uniform() - trans_range / 2
    Trans_M = np.float32([[1, 0, tr_x], [0, 1, tr_y]])

    # Shear
    pts1 = np.float32([[5, 5], [20, 5], [5, 20]])

    pt1 = 5 + shear_range * np.random.uniform() - shear_range / 2
    pt2 = 20 + shear_range * np.random.uniform() - shear_range / 2

    pts2 = np.float32([[pt1, 5], [pt2, pt1], [5, pt2]])

    shear_M = cv2.getAffineTransform(pts1, pts2)

    img = cv2.warpAffine(img, Rot_M, (cols, rows))
    img = cv2.warpAffine(img, Trans_M, (cols, rows))
    img = cv2.warpAffine(img, shear_M, (cols, rows))

    return img
def mirror4(img):
    """
    Create 4 mirrored images and return a merged one.

    img: a gray-scaled image
    """
    height, width = img.shape

    # the upper left
    affine = np.array([[0.5, 0.0, 0.0],
                       [0.0, 0.5, 0.0]])
    img_tmp1 = cv2.warpAffine(img, affine, (width, height))

    # the upper right
    affine = np.array([[-0.5, 0.0, width-1],
                       [ 0.0, 0.5,     0.0]])
    img_tmp2 = cv2.warpAffine(img, affine, (width, height))

    # the lower right
    affine = np.array([[-0.5,  0.0,  width-1],
                       [ 0.0, -0.5, height-1]])
    img_tmp3 = cv2.warpAffine(img, affine, (width, height))

    # the lower left
    affine = np.array([[0.5,  0.0,      0.0],
                       [0.0, -0.5, height-1]])
    img_tmp4 = cv2.warpAffine(img, affine, (width, height))

    return cv2.add(cv2.add(img_tmp1, img_tmp2), cv2.add(img_tmp3, img_tmp4))
    def coarse_alignment(t_inv):
        t_ide = np.identity(3, 'float32')[: 2]

        coarse_src_coords = cv2.transform( \
                coarse_trg_coords[:, None], t_inv)[:, 0]

        # Transform target for coarse grid search
        shape = tuple(int(s / d_shift) for s in src_mf.shape)
        trg_mf_t = cv2.warpAffine(trg_mf, t_inv / d_shift, shape[::-1])
        src_mf_t = cv2.warpAffine(src_mf, t_ide / d_shift, shape[::-1])

        # Coarse grid search
        t_corr_list = [libreg.affine_registration.match_template_brute( \
                 get_patch_at(trg_mf_t, \
                     src_coord / d_shift, patch_size / d_shift), \
                 scipy.fftpack.fft2(get_patch_at(src_mf_t, \
                     src_coord / d_shift, shift_space / d_shift)), \
                 rotation=slice(0, 1, 1) if angle_space is None \
                     else slice(-angle_space, +angle_space, d_angle), \
                 logscale_x=slice(0, 1, 1) if scale_space is None \
                     else slice(-scale_space, +scale_space, d_scale), \
                 logscale_y=slice(0, 1, 1) if scale_space is None \
                     else slice(-scale_space, +scale_space, d_scale), \
                 find_translation=libreg.affine_registration \
                     .cross_correlation_fft) \
                for src_coord in coarse_src_coords]
        dx = np.array([np.dot(t[:, :2], \
                (patch_size / d_shift, patch_size / d_shift)) \
              + t[:, 2] - (shift_space / d_shift, shift_space / d_shift) \
              for t, _ in t_corr_list])
        coarse_src_coords += dx * d_shift

        corr = np.array([corr for _, corr in t_corr_list], 'float32')

        return coarse_src_coords, corr
Example #20
0
    def rotate(self):
        for index, (x, y, w, h) in enumerate(self.boundary):
            roi = self.img[y: y + h, x: x + w]
            thresh = roi.copy()

            angle = 0
            smallest = 999
            row, col = thresh.shape

            for ang in range(-60, 61):
                M = cv2.getRotationMatrix2D((col / 2, row / 2), ang, 1)
                t = cv2.warpAffine(thresh.copy(), M, (col, row))

                r, c = t.shape
                right = 0
                left = 999

                for i in range(r):
                    for j in range(c):
                        if t[i][j] == 255 and left > j:
                            left = j
                        if t[i][j] == 255 and right < j:
                            right = j

                if abs(right - left) <= smallest:
                    smallest = abs(right - left)
                    angle = ang

            M = cv2.getRotationMatrix2D((col / 2, row / 2), angle, 1)
            thresh = cv2.warpAffine(thresh, M, (col, row))
            thresh = cv2.resize(thresh, (20, 20))

            cv2.imwrite('tmp/' + str(index) + '.png', thresh)
Example #21
0
    def RegisterImageRoi(self, npRoi, moments):
        npRoiReg = npRoi

        # Center of mass.
        if (moments['m00'] != 0.0):
            xCOM  = moments['m10']/moments['m00']
            yCOM  = moments['m01']/moments['m00']
            
            # Rotate the fly image to 0-degrees.
            angleR = self.GetResolvedAngleFiltered()
            T = cv2.getRotationMatrix2D((xCOM,yCOM), -angleR*180.0/np.pi, 1.0)
            npRoiReg = cv2.warpAffine(npRoi, T, (0,0))
        
        
        # Correct for centering error after the transformation.
        momentsRoiReg = cv2.moments(npRoiReg)
        if (momentsRoiReg['m00'] != 0.0):
            xCOM  = momentsRoiReg['m10']/momentsRoiReg['m00']
            yCOM  = momentsRoiReg['m01']/momentsRoiReg['m00']
            
            # Rotate the fly image to 0-degrees.
            T = np.array([[1, 0, float(self.params['tracking']['roi']['width'])/2.0-xCOM],
                         [0, 1, float(self.params['tracking']['roi']['height'])/2.0-yCOM]], dtype=float)
            npRoiReg = cv2.warpAffine(npRoiReg, T, (0,0))

            
        return npRoiReg
Example #22
0
    def onestep(self):
        logger = logging.getLogger()
        frame0 = self.rawframe = cv2.cvtColor(np.array(self.frames[self.params.skip]), cv2.COLOR_RGB2BGR)
        h,w = frame0.shape[0:2]
        crop = [self.params.crop[0]*w//1000,
                self.params.crop[1]*w//1000,
                self.params.crop[2]*h//1000,
                self.params.crop[3]*h//1000]
        wc = crop[1] - crop[0]
        hc = crop[3] - crop[2]
        out   =  cv2.VideoWriter(self.params.filename+".sr.m4v",cv2.VideoWriter_fourcc('m','p','4','v'), 30, (wc,hc))
        cropped = frame0[crop[2]:crop[3],
                         crop[0]:crop[1], :]
        out.write(cropped)

        dx = 0
        dy = 0
        f = self.params.skip
        while True:
            newframe = cv2.cvtColor(np.array(self.frames[f]), cv2.COLOR_RGB2BGR)
            f += 1
            if 0 < self.params.last <= f:
                return
            d = pass1.motion(newframe, frame0, focus=self.params.focus, maxaccel=self.params.maxaccel, delta=(dx,dy))
            if d is not None:
                dx, dy = d
            logger.info("{2} Delta: {0} {1}".format(dx,dy, f))
            affine = np.matrix(((1.0,0.0,-dx),(0.0,1.0,-dy)))
            h,w = frame0.shape[0:2]
            cv2.warpAffine(newframe, affine, (w,h), newframe)
            cropped = newframe[crop[2]:crop[3],
                               crop[0]:crop[1], :]
            out.write(cropped)
            yield newframe
def rotate(img, angle, crop):
  h,w = img.shape[:2]

  if crop:
    # Calculate the rotation matrix then apply it
    M = cv2.getRotationMatrix2D((w/2,h/2), angle, 1) # (center(x,y),angle,scale)
    rtImg = cv2.warpAffine(img,M,(w,h))    
    return rtImg
  
  else:
    # Calculate the size of the canvas
    r = int(math.sqrt(h*h + w*w)) + 1
    imgC = addCanvas(img,r*2)
    hC,wC = imgC.shape[:2]
    
    # Calculate the rotation matrix then apply it
    M = cv2.getRotationMatrix2D((wC/2,hC/2), angle, 1) # (center(x,y),angle,scale)
    rtImg = cv2.warpAffine(imgC,M,(wC,hC))
    
    relativeCorners = getVertices(h,w, math.radians(angle))
    center = (wC/2,hC/2)
    realCorners = [(corner[0]+center[0] , corner[1]+center[1]) for corner in relativeCorners]
    print realCorners
    print relativeCorners
    
    box = surroundingBox(realCorners[0], realCorners[1], realCorners[2], realCorners[3])
    
#     cv2.rectangle(rtImg,(box[0],box[2]), (box[1],box[3]), (0,255,0),3) 
#     for vertex in realCorners:
#         cv2.circle(rtImg, vertex, 20, (0, 255, 0),5)   
    
    # crop the redundant canvas
    rtImg = rtImg[box[2]:box[3],box[0]:box[1]]
    return rtImg
def GetNormalPic(imgm):
    global HEIGHT
    global WIDTH 
    img = ReadImg(imgm)
    #img1 = cv2.imread(filename)#重新读入图片不然后面移动时会出问题
    img1=imgm.copy()
    A = img.shape #320*240图片 rows=240 cols=320
    rows = A[0]
    cols = A[1]
    Cx,Cy,cnt = GetHeadMeans(img)  #GetMeans(img)#质心cx=278(cols) cy=127(rows)
    #求取最高点和最低点
    topmost = tuple(cnt[cnt[:,:,1].argmin()][0])
    bottommost = tuple(cnt[cnt[:,:,1].argmax()][0])
    #人的身高(detrows)
    Height = bottommost[1] - topmost[1]
    New_X = Cx - Height*(float(1))/3 #剪切开始的坐标x
    NewWidth = Height*float(2.0)/3 #剪切的人的宽度
    M = np.float32([[1,0,0],[0,1,0]])#初始化需要移动的矩阵
    if(New_X+NewWidth>cols):#需要左移
        delt_X = cols-(New_X + NewWidth)
        M[0][2] = float(delt_X)
        dst = cv2.warpAffine(img1,M,(cols,rows))
        Person = dst[topmost[1]:bottommost[1],(New_X+delt_X):cols]#y,x 需要剪切的人的范围
    elif(New_X<0):#需要右移
        M[0][2] = float(abs(New_X))
        dst = cv2.warpAffine(img1,M,(cols,rows))
        Person = dst[topmost[1]:bottommost[1],0:NewWidth]
    else:#不需要移动
        dst = img1
        Person = dst[topmost[1]:bottommost[1],New_X:(New_X+NewWidth)]
    res = cv2.resize(Person,(WIDTH,HEIGHT))
    
    #CxNew,CyNew,cntNew = GetHeadMeans(res)  #GetMeans(img)#质心cx=278(cols) cy=127(rows)
    
    return res
    def translationOp(self):
        # read image
        im = cv2.imread(self.Image)

        # NOTE: Translating (shifting) an image is given by a NumPy matrix in
        # the form:
        # [[1, 0, shiftX], [0, 1, shiftY]]
        # You simply need to specify how many pixels you want to shift the image
        # in the X and Y direction -- let's translate the image 25 pixels to the
        # right and 50 pixels down
        M = np.float32([[1, 0, 25], [0, 1, 50]])
        shifted = cv2.warpAffine(im, M, (im.shape[1], im.shape[0]))

        # show shifted image and wait for key press
        cv2.imshow("Image", shifted)
        cv2.waitKey(0)

        # now, let's shift the image 50 pixels to the left and 90 pixels up, we
        # accomplish this using negative values
        M = np.float32([[1, 0, -50], [0, 1, -90]])
        shifted = cv2.warpAffine(im, M, (im.shape[1], im.shape[0]))
        cv2.imshow("Shifted Up and Left", shifted)
        cv2.waitKey(0)

        # shift down
        shifted = imutils.translate(im, 0, 100)
        cv2.imshow("shifted down", shifted)
        cv2.waitKey(0)

        return
Example #26
0
def warp(frame,pitch=0):
    if pitch == 0: 
        M = cv2.getRotationMatrix2D((COLS/2, ROWS/2), 1, 1)
        return cv2.warpAffine(frame, M, (COLS, ROWS))
    else:
        M = cv2.getRotationMatrix2D((COLS/2, ROWS/2), 0, 1)
        return cv2.warpAffine(frame, M, (COLS, ROWS))
Example #27
0
def task2(img_name, sharpen_factor, h_factor, w_factor, window, threshold, step):
    
    start_time = time.clock()
    
    # read and split the image
    img = cv2.imread(img_name)
    sub_height = int(img.shape[0] / 3)
    blue = img[0:sub_height,:,0]
    green = img[sub_height:sub_height*2,:,1]
    red = img[sub_height*2:sub_height*3,:,2]
    
    # normalization
    normalized_blue = cv2.normalize(blue, 0, 1,norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
    normalized_green = cv2.normalize(green, 0, 1,norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
    normalized_red = cv2.normalize(red, 0, 1,norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
    
    # blue and green
    rows, cols = blue.shape
    b_h, b_w, g_h, g_w, best_match = get_best_match_pyramid(blue, green, sharpen_factor, h_factor, w_factor, window, threshold, step)    
    print "Best match for blue and green : " + str([b_h, b_w, g_h, g_w])
    changed_green = cv2.warpAffine(green, np.float32([[1,0,b_w-g_w],[0,1,b_h-g_h]]), (cols, rows))
    
    # blue and red
    b_h, b_w, r_h, r_w, best_match = get_best_match_pyramid(blue, red, sharpen_factor, h_factor, w_factor, window, threshold, step)
    print "Best match for blue and red : " + str([b_h, b_w, r_h, r_w])
    changed_red = cv2.warpAffine(red, np.float32([[1,0,b_w-r_w],[0,1,b_h-r_h]]), (cols, rows))
    
    # merge the images
    merged_img = cv2.merge([blue, changed_green, changed_red])
    
    end_time = time.clock()
    print "Running time : " + str(end_time-start_time)
    
    return merged_img
Example #28
0
def stitch_horizontal(img1,img2):
	""" Calls functions for stitching horizontally. """

	#img1 = warp.spherical_warp(img1, f)
	#img2 = warp.spherical_warp(img2, f)
	# Call matches for extracting inliers and homography
	H = matching.matches(img1,img2)
	print '----------'
	# Try to calculate the stitched image size
	h2,w2 = img2.shape[:2]
	tx = H[0,2]
	ty = H[1,2]
	#h = int(round(h2 + ty)*1.05)+500
	#w = int(round(w2 + tx)*1.09)+500
	h = int(round(h2 + 100))
	w = int(round(w2*1.6))	
	H = np.matrix([[1,0,tx],[0,1,0],[0,0,1]])
	img2_warped = cv2.warpPerspective(img2,H,(w,h))
	#affine transformation matrix, the picture should align if H is ok
	mat = matrix([[1, 0, 0], [0, 1, 0]], dtype=float)
	im1w = cv2.warpAffine(img1,mat,(w,h))
	im2w = cv2.warpAffine(img2_warped,mat,(w,h))
	# Find a seam between the two images
	A = stitcher.stitch_horizontal(im1w,im2w,w2)

	return A
Example #29
0
def rotate(image, image_thresh):
    rows,cols = image.shape
    angle = random.randint(0-rotate_range,rotate_range)
    M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)
    image = cv2.warpAffine(image,M,(cols,rows), borderValue=random.randint(0,255))
    image_thresh = cv2.warpAffine(image_thresh,M,(cols,rows), borderValue=255)
    return image, image_thresh
Example #30
0
    def create_mask(self, shape, type=MASK_SHAPE_T, overlay_points=None,
                    feather_amount=11, color=1.0, scale=1.0, transform_matrix = None):

        mask = numpy.zeros(self.steps["original"].shape[:2], dtype=numpy.float64)

        for group in overlay_points:
            self.draw_convex_hull(mask,
                             self.landmarks[group],
                             color=color)

        mask = numpy.array([mask, mask, mask]).transpose((1, 2, 0))

        mask = (cv2.GaussianBlur(mask, (feather_amount, feather_amount), 0) > 0) * 1.0
        mask = cv2.GaussianBlur(mask, (feather_amount, feather_amount), 0)

        self.steps["mask"] = mask
        # self.steps["mask_rgb"] = cv2.cvtColor(mask, cv.CV_GRAY2RGB)

        if transform_matrix != None:
            transformed_mask = numpy.zeros(shape, dtype=mask.dtype)
            cv2.warpAffine(mask,
                           transform_matrix[:2],
                           (shape[1], shape[0]),
                           dst=transformed_mask,
                           borderMode=cv2.BORDER_TRANSPARENT,
                           flags=cv2.WARP_INVERSE_MAP
                           )

            self.steps["mask"] = transformed_mask
Example #31
0
import cv2
import numpy as np
from matplotlib import pyplot as plt
import os

os.chdir('C:\\Users\\user\\Desktop\\opencv')

img = cv2.imread('go.jpg')
rows, cols = img.shape[:2]

M = np.float32([[1, 0, 150], [0, 1, 150]])
dst = cv2.warpAffine(img, M, (cols, rows))
# 좌표 150,150 이동
# 출력 이미지의 크기(너비,높이)가 세번쨰인수

plt.subplot(121), plt.imshow(img), plt.title('Original')
plt.subplot(122), plt.imshow(dst), plt.title('warpAffine')
plt.show()
    def extract_image_chips(self, img, points, desired_size=256, padding=0):
        """
            crop and align face
        Parameters:
        ----------
            img: numpy array, bgr order of shape (1, 3, n, m)
                input image
            points: numpy array, n x 10 (x1, x2 ... x5, y1, y2 ..y5)
            desired_size: default 256
            padding: default 0
        Retures:
        -------
            crop_imgs: list, n
                cropped and aligned faces
        """
        crop_imgs = []
        for p in points:
            shape = []
            for k in range(len(p) / 2):
                shape.append(p[k])
                shape.append(p[k + 5])

            if padding > 0:
                padding = padding
            else:
                padding = 0
            # average positions of face points
            mean_face_shape_x = [
                0.224152, 0.75610125, 0.490127, 0.254149, 0.726104
            ]
            mean_face_shape_y = [
                0.2119465, 0.2119465, 0.628106, 0.780233, 0.780233
            ]

            from_points = []
            to_points = []

            for i in range(len(shape) / 2):
                x = (padding + mean_face_shape_x[i]) / (2 * padding +
                                                        1) * desired_size
                y = (padding + mean_face_shape_y[i]) / (2 * padding +
                                                        1) * desired_size
                to_points.append([x, y])
                from_points.append([shape[2 * i], shape[2 * i + 1]])

            # convert the points to Mat
            from_mat = self.list2colmatrix(from_points)
            to_mat = self.list2colmatrix(to_points)

            # compute the similar transfrom
            tran_m, tran_b = self.find_tfrom_between_shapes(from_mat, to_mat)

            probe_vec = np.matrix([1.0, 0.0]).transpose()
            probe_vec = tran_m * probe_vec

            scale = np.linalg.norm(probe_vec)
            angle = 180.0 / math.pi * math.atan2(probe_vec[1, 0], probe_vec[0,
                                                                            0])

            from_center = [(shape[0] + shape[2]) / 2.0,
                           (shape[1] + shape[3]) / 2.0]
            to_center = [0, 0]
            to_center[1] = desired_size * 0.4
            to_center[0] = desired_size * 0.5

            ex = to_center[0] - from_center[0]
            ey = to_center[1] - from_center[1]

            rot_mat = cv2.getRotationMatrix2D((from_center[0], from_center[1]),
                                              -1 * angle, scale)
            rot_mat[0][2] += ex
            rot_mat[1][2] += ey

            chips = cv2.warpAffine(img, rot_mat, (desired_size, desired_size))
            crop_imgs.append(chips)

        return crop_imgs
Example #33
0
    def predict(self, car_pic):
        if type(car_pic) == type(""):
            img = imreadex(car_pic)
        else:
            img = car_pic
        pic_hight, pic_width = img.shape[:2]

        if pic_width > MAX_WIDTH:
            resize_rate = MAX_WIDTH / pic_width
            img = cv2.resize(img, (MAX_WIDTH, int(pic_hight * resize_rate)),
                             interpolation=cv2.INTER_AREA)

        blur = self.cfg["blur"]
        # 高斯去噪
        if blur > 0:
            img = cv2.GaussianBlur(img, (blur, blur), 0)  # 图片分辨率调整
        oldimg = img
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # 去掉图像中不会是车牌的区域
        kernel = np.ones((20, 20), np.uint8)
        img_opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
        img_opening = cv2.addWeighted(img, 1, img_opening, -1, 0)

        # 找到图像边缘
        ret, img_thresh = cv2.threshold(img_opening, 0, 255,
                                        cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        img_edge = cv2.Canny(img_thresh, 100, 200)
        # 使用开运算和闭运算让图像边缘成为一个整体
        kernel = np.ones((self.cfg["morphologyr"], self.cfg["morphologyc"]),
                         np.uint8)
        img_edge1 = cv2.morphologyEx(img_edge, cv2.MORPH_CLOSE, kernel)
        img_edge2 = cv2.morphologyEx(img_edge1, cv2.MORPH_OPEN, kernel)

        # 查找图像边缘整体形成的矩形区域,可能有很多,车牌就在其中一个矩形区域中
        try:
            contours, hierarchy = cv2.findContours(img_edge2, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)
        except ValueError:
            image, contours, hierarchy = cv2.findContours(
                img_edge2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        contours = [cnt for cnt in contours if cv2.contourArea(cnt) > Min_Area]
        #print('len(contours)', len(contours))
        # 一一排除不是车牌的矩形区域
        car_contours = []
        for cnt in contours:
            rect = cv2.minAreaRect(cnt)
            area_width, area_height = rect[1]
            if area_width < area_height:
                area_width, area_height = area_height, area_width
            wh_ratio = area_width / area_height
            # print(wh_ratio)
            # 要求矩形区域长宽比在2到5.5之间,2到5.5是车牌的长宽比,其余的矩形排除
            if wh_ratio > 2 and wh_ratio < 5.5:
                car_contours.append(rect)
                box = cv2.boxPoints(rect)
                box = np.int0(box)
            # oldimg = cv2.drawContours(oldimg, [box], 0, (0, 0, 255), 2)
            # cv2.imshow("edge4", oldimg)
            # print(rect)

        #print(len(car_contours))

        #print("精确定位")
        card_imgs = []
        # 矩形区域可能是倾斜的矩形,需要矫正,以便使用颜色定位
        for rect in car_contours:
            if rect[2] > -1 and rect[2] < 1:  # 创造角度,使得左、高、右、低拿到正确的值
                angle = 1
            else:
                angle = rect[2]
            rect = (rect[0], (rect[1][0] + 5, rect[1][1] + 5), angle
                    )  # 扩大范围,避免车牌边缘被排除

            box = cv2.boxPoints(rect)
            heigth_point = right_point = [0, 0]
            left_point = low_point = [pic_width, pic_hight]
            for point in box:
                if left_point[0] > point[0]:
                    left_point = point
                if low_point[1] > point[1]:
                    low_point = point
                if heigth_point[1] < point[1]:
                    heigth_point = point
                if right_point[0] < point[0]:
                    right_point = point

            if left_point[1] <= right_point[1]:  # 正角度
                new_right_point = [right_point[0], heigth_point[1]]
                pts2 = np.float32([left_point, heigth_point,
                                   new_right_point])  # 字符只是高度需要改变
                pts1 = np.float32([left_point, heigth_point, right_point])
                M = cv2.getAffineTransform(pts1, pts2)
                dst = cv2.warpAffine(oldimg, M, (pic_width, pic_hight))
                point_limit(new_right_point)
                point_limit(heigth_point)
                point_limit(left_point)
                card_img = dst[int(left_point[1]):int(heigth_point[1]),
                               int(left_point[0]):int(new_right_point[0])]
                card_imgs.append(card_img)
            # cv2.imshow("card", card_img)
            # cv2.waitKey(0)
            elif left_point[1] > right_point[1]:  # 负角度

                new_left_point = [left_point[0], heigth_point[1]]
                pts2 = np.float32([new_left_point, heigth_point,
                                   right_point])  # 字符只是高度需要改变
                pts1 = np.float32([left_point, heigth_point, right_point])
                M = cv2.getAffineTransform(pts1, pts2)
                dst = cv2.warpAffine(oldimg, M, (pic_width, pic_hight))
                point_limit(right_point)
                point_limit(heigth_point)
                point_limit(new_left_point)
                card_img = dst[int(right_point[1]):int(heigth_point[1]),
                               int(new_left_point[0]):int(right_point[0])]
                card_imgs.append(card_img)
            # cv2.imshow("card", card_img)
            # cv2.waitKey(0)
        # 开始使用颜色定位,排除不是车牌的矩形,目前只识别蓝、绿、黄车牌

        # 以下为识别车牌中的字符
        predict_result = []
        roi = None
        card_color = None
        color = "blue"
        for card_img in card_imgs:
            gray_img = cv2.cvtColor(card_img, cv2.COLOR_BGR2GRAY)
            # 黄、绿车牌字符比背景暗、与蓝车牌刚好相反,所以黄、绿车牌需要反向
            if color == "green" or color == "yello":
                gray_img = cv2.bitwise_not(gray_img)
            ret, gray_img = cv2.threshold(gray_img, 0, 255,
                                          cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            # 查找水平直方图波峰
            x_histogram = np.sum(gray_img, axis=1)
            x_min = np.min(x_histogram)
            x_average = np.sum(x_histogram) / x_histogram.shape[0]
            x_threshold = (x_min + x_average) / 2
            wave_peaks = find_waves(x_threshold, x_histogram)
            if len(wave_peaks) == 0:
                # print("peak less 0:")
                continue
            # 认为水平方向,最大的波峰为车牌区域
            wave = max(wave_peaks, key=lambda x: x[1] - x[0])
            gray_img = gray_img[wave[0]:wave[1]]
            # 查找垂直直方图波峰
            row_num, col_num = gray_img.shape[:2]
            # 去掉车牌上下边缘1个像素,避免白边影响阈值判断
            gray_img = gray_img[1:row_num - 1]
            y_histogram = np.sum(gray_img, axis=0)
            y_min = np.min(y_histogram)
            y_average = np.sum(y_histogram) / y_histogram.shape[0]
            y_threshold = (y_min + y_average) / 5  # U和0要求阈值偏小,否则U和0会被分成两半

            wave_peaks = find_waves(y_threshold, y_histogram)

            # for wave in wave_peaks:
            #	cv2.line(card_img, pt1=(wave[0], 5), pt2=(wave[1], 5), color=(0, 0, 255), thickness=2)
            # 车牌字符数应大于6
            if len(wave_peaks) <= 6:
                # print("peak less 1:", len(wave_peaks))
                continue

            wave = max(wave_peaks, key=lambda x: x[1] - x[0])
            max_wave_dis = wave[1] - wave[0]
            # 判断是否是左侧车牌边缘
            if wave_peaks[0][1] - wave_peaks[0][
                    0] < max_wave_dis / 3 and wave_peaks[0][0] == 0:
                wave_peaks.pop(0)

            # 组合分离汉字
            cur_dis = 0
            for i, wave in enumerate(wave_peaks):
                if wave[1] - wave[0] + cur_dis > max_wave_dis * 0.6:
                    break
                else:
                    cur_dis += wave[1] - wave[0]
            if i > 0:
                wave = (wave_peaks[0][0], wave_peaks[i][1])
                wave_peaks = wave_peaks[i + 1:]
                wave_peaks.insert(0, wave)

            # 去除车牌上的分隔点
            point = wave_peaks[2]
            if point[1] - point[0] < max_wave_dis / 3:
                point_img = gray_img[:, point[0]:point[1]]
                if np.mean(point_img) < 255 / 5:
                    wave_peaks.pop(2)

            if len(wave_peaks) <= 6:
                # print("peak less 2:", len(wave_peaks))
                continue
            part_cards = seperate_card(gray_img, wave_peaks)
            for i, part_card in enumerate(part_cards):
                # 可能是固定车牌的铆钉
                if np.mean(part_card) < 255 / 5:
                    # print("a point")
                    continue
                part_card_old = part_card
                w = abs(part_card.shape[1] - SZ) // 2

                part_card = cv2.copyMakeBorder(part_card,
                                               0,
                                               0,
                                               w,
                                               w,
                                               cv2.BORDER_CONSTANT,
                                               value=[0, 0, 0])
                part_card = cv2.resize(part_card, (SZ, SZ),
                                       interpolation=cv2.INTER_AREA)

                # part_card = deskew(part_card)
                part_card = preprocess_hog([part_card])
                if i == 0:
                    resp = self.modelchinese.predict(part_card)
                    charactor = provinces[int(resp[0]) - PROVINCE_START]
                else:
                    resp = self.model.predict(part_card)
                    charactor = chr(int(resp[0]))
                # 判断最后一个数是否是车牌边缘,假设车牌边缘被认为是1
                if charactor == "1" and i == len(part_cards) - 1:
                    if part_card_old.shape[0] / part_card_old.shape[
                            1] >= 7:  # 1太细,认为是边缘
                        continue
                predict_result.append(charactor)
            roi = card_img
            card_color = color
            break
        return predict_result, roi, card_color  # 识别到的字符、定位的车牌图像、车牌颜色
Example #34
0
def warp_and_crop_face(src_img,
                       facial_pts,
                       reference_pts=None,
                       crop_size=(96, 112),
                       align_type='smilarity'):
    """
    Function:
    ----------
        apply affine transform 'trans' to uv
    Parameters:
    ----------
        @src_img: 3x3 np.array
            input image
        @facial_pts: could be
            1)a list of K coordinates (x,y)
        or
            2) Kx2 or 2xK np.array
            each row or col is a pair of coordinates (x, y)
        @reference_pts: could be
            1) a list of K coordinates (x,y)
        or
            2) Kx2 or 2xK np.array
            each row or col is a pair of coordinates (x, y)
        or
            3) None
            if None, use default reference facial points
        @crop_size: (w, h)
            output face image size
        @align_type: transform type, could be one of
            1) 'similarity': use similarity transform
            2) 'cv2_affine': use the first 3 points to do affine transform,
                    by calling cv2.getAffineTransform()
            3) 'affine': use all points to do affine transform
    Returns:
    ----------
        @face_img: output face image with size (w, h) = @crop_size
    """

    if reference_pts is None:
        if crop_size[0] == 96 and crop_size[1] == 112:
            reference_pts = REFERENCE_FACIAL_POINTS
        else:
            default_square = False
            inner_padding_factor = 0
            outer_padding = (0, 0)
            output_size = crop_size

            reference_pts = get_reference_facial_points(
                output_size, inner_padding_factor, outer_padding,
                default_square)

    ref_pts = np.float32(reference_pts)
    ref_pts_shp = ref_pts.shape
    if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
        raise FaceWarpException(
            'reference_pts.shape must be (K,2) or (2,K) and K>2')

    if ref_pts_shp[0] == 2:
        ref_pts = ref_pts.T

    src_pts = np.float32(facial_pts)
    src_pts_shp = src_pts.shape
    if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
        raise FaceWarpException(
            'facial_pts.shape must be (K,2) or (2,K) and K>2')

    if src_pts_shp[0] == 2:
        src_pts = src_pts.T

    #    #print('--->src_pts:\n', src_pts
    #    #print('--->ref_pts\n', ref_pts

    if src_pts.shape != ref_pts.shape:
        raise FaceWarpException(
            'facial_pts and reference_pts must have the same shape')

    if align_type is 'cv2_affine':
        tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
    #        #print(('cv2.getAffineTransform() returns tfm=\n' + str(tfm))
    elif align_type is 'affine':
        tfm = get_affine_transform_matrix(src_pts, ref_pts)
    #        #print(('get_affine_transform_matrix() returns tfm=\n' + str(tfm))
    else:
        tfm = get_similarity_transform_for_cv2(src_pts, ref_pts)
    #        #print(('get_similarity_transform_for_cv2() returns tfm=\n' + str(tfm))

    #    #print('--->Transform matrix: '
    #    #print(('type(tfm):' + str(type(tfm)))
    #    #print(('tfm.dtype:' + str(tfm.dtype))
    #    #print( tfm

    face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))

    return face_img
Example #35
0
def translate(image, x, y):
    M = np.float32([[1, 0, x], [0, 1, y]])
    shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
    return shifted
Example #36
0
img_move[0:height, 0:n] = im[0:height, -n:]

#图片镜像
img_mirr = np.zeros([height * 2, width, deep], np.uint8)
for i in range(height):
    for j in range(width):
        img_mirr[i, j] = im[i, j]
        img_mirr[height * 2 - i - 1, j] = im[i, j]
for i in range(width):
    img_mirr[height, i] = (0, 0, 255)

#图片缩放
reheight = int(height * 1.2)
rewidth = int(width * 1.1)
img_resize = cv2.resize(im, (reheight, rewidth))

#图片旋转

matRotate = cv2.getRotationMatrix2D((height * 0.5, width * 0.6), 45,
                                    1)  # mat rotate 1 center 2 angle 3 缩放系数
img_cir = cv2.warpAffine(im, matRotate, (height, width))

# 展示不同的图片
titles = ['img', 'move', 'mirror', 'resize', 'circle']
imgs = [im, img_move, img_mirr, img_resize, img_cir]
for i in range(5):
    plt.subplot(2, 3, i + 1)  #注意,这和matlab中类似,没有0,数组下标从1开始
    plt.imshow(imgs[i])
    plt.title(titles[i])
plt.show()
Example #37
0
    def load_data(self, is_train, repeat, mirror=None):
        # 判断是否需要对其进行镜像处理的图像增强操作
        if (mirror is not None):
            # 读取镜像后坐标点排列的文件,只有一行数
            with open(mirror, 'r') as f:
                lines = f.readlines()
                assert len(lines) == 1
                mirror_idx = lines[0].strip().split(',')
                # 保存landmarks镜像后坐标点的序列
                mirror_idx = list(map(int, mirror_idx))
        # 求原始坐标的最小值(原始最左下角)
        xy = np.min(self.landmark, axis=0).astype(np.int32)
        # 求原始坐标的最大值(原始最右上角)
        zz = np.max(self.landmark, axis=0).astype(np.int32)
        # 求landmark的宽度和高度
        wh = zz - xy + 1
        # 求landmark的中心点(center_x,center_y)
        center = (xy + wh / 2).astype(np.int32)
        img = cv2.imread(self.path)
        # 扩大ROI 0.2倍(以最长的边 * 1.2作为boxsize)
        boxsize = int(np.max(wh) * 1.2)
        # 求取扩展后的最左上点
        xy = center - boxsize // 2
        # 扩展后最左上点
        x1, y1 = xy
        # 扩展后最右下点
        x2, y2 = xy + boxsize
        # 获取图像长宽
        height, width, _ = img.shape
        # 判断是否超出原始图像
        dx = max(0, -x1)
        dy = max(0, -y1)
        # 避免坐标出现负值
        x1 = max(0, x1)
        y1 = max(0, y1)
        # 判断是否超出原始图像
        edx = max(0, x2 - width)
        edy = max(0, y2 - height)
        # 限制bbox不超过原图
        x2 = min(width, x2)
        y2 = min(height, y2)
        # 不会超出原图(截取bbox的图像数据)
        imgT = img[y1:y2, x1:x2]
        # 若超出原始图像就需要对bbox截取的图片进行填充,cv2.BORDER_CONSTANT,扩展区域填充0
        if (dx > 0 or dy > 0 or edx > 0 or edy > 0):
            imgT = cv2.copyMakeBorder(imgT, dy, edy, dx, edx,
                                      cv2.BORDER_CONSTANT, 0)
        # 特殊情况:landmark的跨度,宽度为0,或者长度为0的情况(观察):
        if imgT.shape[0] == 0 or imgT.shape[1] == 0:
            imgTT = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
            for x, y in (self.landmark + 0.5).astype(np.int32):
                cv2.circle(imgTT, (x, y), 1, (0, 0, 255))
            cv2.imshow('0', imgTT)
            if cv2.waitKey(0) == 27:
                exit()
        # 将图片resize到image_size=112,默认112,形状一定要正方形,这个跟landmark有关
        imgT = cv2.resize(imgT, (self.image_size, self.image_size))
        # 将关键点坐标归一化,bbox为正方形
        landmark = (self.landmark - xy) / boxsize
        # 检查坐标点数据是否满足0<= x,y <= 1
        assert (landmark >= 0).all(), str(landmark) + str([dx, dy])
        assert (landmark <= 1).all(), str(landmark) + str([dx, dy])
        # 添加到图片数据集
        self.imgs.append(imgT)
        # 添加到关键点数据集
        self.landmarks.append(landmark)

        if is_train:
            while len(self.imgs) < repeat:
                # 绕随机中心点做随机旋转(-30,30)
                angle = np.random.randint(-30, 30)
                # 上面已经计算了人脸框的中心点
                cx, cy = center
                # 将中心点随机偏移,作为图像的旋转中心
                cx = cx + int(np.random.randint(-boxsize * 0.1, boxsize * 0.1))
                cy = cy + int(np.random.randint(-boxsize * 0.1, boxsize * 0.1))
                # 计算变换矩阵和返回变换后的landmarks
                M, landmark = rotate(angle, (cx, cy), self.landmark)
                # 将图片按照变换矩阵进行仿射变换 输入图像,M: 变换矩阵,dsize:输出图像的大小
                imgT = cv2.warpAffine(
                    img, M, (int(img.shape[1] * 1.1), int(img.shape[0] * 1.1)))

                # np.ptp(axis=0)是同一列中不同列的最大值和最小值的差值,求得新宽度和高度
                wh = np.ptp(landmark, axis=0).astype(np.int32) + 1
                # np.ceil向上取整, 运算称为Ceiling,扩展1.25倍,然后随机选取
                size = np.random.randint(int(np.min(wh)),
                                         np.ceil(np.max(wh) * 1.25))
                # 计算新的左上角坐标
                xy = np.asarray((cx - size // 2, cy - size // 2),
                                dtype=np.int32)
                # 归一化坐标点
                landmark = (landmark - xy) / size
                # 检查比例情况,因为这里有随机过程,如果比例不对就重新算
                if (landmark < 0).any() or (landmark > 1).any():
                    continue

                x1, y1 = xy
                x2, y2 = xy + size
                height, width, _ = imgT.shape
                dx = max(0, -x1)
                dy = max(0, -y1)
                x1 = max(0, x1)
                y1 = max(0, y1)

                edx = max(0, x2 - width)
                edy = max(0, y2 - height)
                x2 = min(width, x2)
                y2 = min(height, y2)

                imgT = imgT[y1:y2, x1:x2]
                if (dx > 0 or dy > 0 or edx > 0 or edy > 0):
                    imgT = cv2.copyMakeBorder(imgT, dy, edy, dx, edx,
                                              cv2.BORDER_CONSTANT, 0)
                # resize 112 X 112
                imgT = cv2.resize(imgT, (self.image_size, self.image_size))
                # 随机水平翻转
                if mirror is not None and np.random.choice((True, False)):
                    # 关键点坐标水平翻转
                    landmark[:, 0] = 1 - landmark[:, 0]
                    # mirror_idx 镜像后的坐标排列,用来表示镜像后的坐标
                    landmark = landmark[mirror_idx]
                    # 图像水平翻转
                    imgT = cv2.flip(imgT, 1)
                # 添加到图片数据集
                self.imgs.append(imgT)
                # 添加到关键点坐标集
                self.landmarks.append(landmark)
def preprocess_tsa_data():

    # OPTION 1: get a list of all subjects for which there are labels
    df = pd.read_csv(STAGE1_LABELS)
    df['Subject'], df['Zone'] = df['Id'].str.split('_',1).str
    SUBJECT_LIST = df['Subject'].unique()

    # OPTION 2: get a list of all subjects for whom there is data
    # SUBJECT_LIST = [os.path.splitext(subject)[0] for subject in os.listdir(INPUT_FOLDER)]
    #print(len(SUBJECT_LIST))
    #print(SUBJECT_LIST)

    # OPTION 3: get a list of subjects for small bore test purposes
    #SUBJECT_LIST = ['00360f79fd6e02781457eda48f85da90','0043db5e8c819bffc15261b1f1ac5e42',
    #                '0050492f92e22eed3474ae3a6fc907fa','006ec59fa59dd80a64c85347eef810c7',
    #                '0097503ee9fa0606559c56458b281a08','011516ab0eca7cad7f5257672ddde70e',
    #                '47e2a4a8e13ec7100f6af8cd839d1bb3','e087226320cc189142228b5fb93ed58f']

    # intialize tracking and saving items
    batch_num = 1
    count = 0
    threat_zone_examples = []
    start_time = timer()
    print(len(SUBJECT_LIST))
    for subject in SUBJECT_LIST:
        count += 1
        # read in the images
        print('--------------------------------------------------------------')
        print('t+> {:5.3f} |Reading images for subject #: {}'.format(timer()-start_time,
                                                                     subject))
        print('--------------------------------------------------------------')
        images = tsa.read_data(INPUT_FOLDER + '/' + subject + '.aps')

        # transpose so that the slice is the first dimension shape(16, 620, 512)
        images = images.transpose()

        # for each threat zone, loop through each image, mask off the zone and then crop it
        for tz_num, threat_zone_x_crop_dims in enumerate(zip(tsa.zone_slice_list,
                                                             tsa.zone_crop_list)):

            threat_zone = threat_zone_x_crop_dims[0]
            crop_dims = threat_zone_x_crop_dims[1]

            # get label
            label = np.array(tsa.get_subject_zone_label(tz_num,
                             tsa.get_subject_labels(STAGE1_LABELS, subject)))
           # print(STAGE1_LABELS, subject)
            for img_num, img in enumerate(images):

                print('Threat Zone:Image -> {}:{}'.format(tz_num, img_num))
                print('Threat Zone Label -> {}'.format(label))
                if label[0] == 0:
                    print('threat is present')
                    if threat_zone[img_num] is not None:

                        # correct the orientation of the image
                        print('-> reorienting base image')
                        base_img = np.flipud(img)
                        print('-> shape {}|mean={}'.format(base_img.shape,
                                                           base_img.mean()))

                        # convert to grayscale
                        print('-> converting to grayscale')
                        rescaled_img = tsa.convert_to_grayscale(base_img)
                        print('-> shape {}|mean={}'.format(rescaled_img.shape,
                                                           rescaled_img.mean()))

                        # spread the spectrum to improve contrast
                        print('-> spreading spectrum')
                        high_contrast_img = tsa.spread_spectrum(rescaled_img)
                        print('-> shape {}|mean={}'.format(high_contrast_img.shape,
                                                           high_contrast_img.mean()))

                        # get the masked image
                        print('-> masking image')
                        masked_img = tsa.roi(high_contrast_img, threat_zone[img_num])
                        print('-> shape {}|mean={}'.format(masked_img.shape,
                                                           masked_img.mean()))

                        # crop the image
                        print('-> cropping image')
                        cropped_img = tsa.crop(masked_img, crop_dims[img_num])
                        print('-> shape {}|mean={}'.format(cropped_img.shape,
                                                           cropped_img.mean()))

                        # normalize the image
                        print('-> normalizing image')
                        normalized_img = tsa.normalize(cropped_img)
                        print('-> shape {}|mean={}'.format(normalized_img.shape,
                                                           normalized_img.mean()))

                        # zero center the image
                        print('-> zero centering')
                        zero_centered_img = tsa.zero_center(normalized_img)
                        print('-> shape {}|mean={}'.format(zero_centered_img.shape,
                                                           zero_centered_img.mean()))

                        # append the features and labels to this threat zone's example array
                        print ('-> appending example to threat zone {}'.format(tz_num))
                        threat_zone_examples.append([[tz_num], zero_centered_img, label])
                        center = (125,125)
                        M = cv2.getRotationMatrix2D(center, 5, 1.0)
                        rotated = cv2.warpAffine(zero_centered_img, M, (250, 250))
                        # print('rotated image shape {} | mean= {}'.format(zero_centered_img.shape,
                                                                        #  zero_centered_img.mean()))
                        # cv2.imwrite("thumbnail.png", cropped)
                        # cv2.imwrite("rotated.jpg", rotated)
                        # cv2.imshow("original.jpg", zero_centered_img)
                        # cv2.waitKey(0)
                        # cv2.imshow("rotated.jpg", rotated)
                        # cv2.waitKey(0)
                        threat_zone_examples.append([[tz_num], rotated, label])
                        M = cv2.getRotationMatrix2D(center, 10, 1.0)
                        rotated1 = cv2.warpAffine(zero_centered_img, M, (250, 250))
                        threat_zone_examples.append([[tz_num], rotated1, label])
                        # cv2.imshow("rotated1.jpg", rotated1)
                        # cv2.waitKey(0)
                        M = cv2.getRotationMatrix2D(center, 15, 1.0)
                        rotated2 = cv2.warpAffine(zero_centered_img, M, (250, 250))
                        threat_zone_examples.append([[tz_num], rotated2, label])
                        # cv2.imshow("rotated2.jpg", rotated2)
                        # cv2.waitKey(0)
                        # M = cv2.getRotationMatrix2D(center, 20, 1.0)
                        rotated3 = cv2.warpAffine(zero_centered_img, M, (250, 250))
                        threat_zone_examples.append([[tz_num], rotated3, label])
                        # cv2.imshow("rotated3.jpg", rotated3)
                        # cv2.waitKey(0)
                        print ('-> shape {:d}:{:d}:{:d}:{:d}:{:d}:{:d}'.format(
                                                             len(threat_zone_examples),
                                                             len(threat_zone_examples[0]),
                                                             len(threat_zone_examples[0][0]),
                                                             len(threat_zone_examples[0][1][0]),
                                                             len(threat_zone_examples[0][1][1]),
                                                             len(threat_zone_examples[0][2])))
                    else:
                        print('-> No view of tz:{} in img:{}. Skipping to next...'.format(
                                    tz_num, img_num))
                    print('------------------------------------------------')
                else:
                    print('threat not present and label is', label[0])
                    if count >= 0:
                        # count = 0
                        print('IN LOOP')
                        if threat_zone[img_num] is not None:
                            # correct the orientation of the image
                            print('-> reorienting base image')
                            base_img = np.flipud(img)
                            print('-> shape {}|mean={}'.format(base_img.shape,
                                                               base_img.mean()))

                            # convert to grayscale
                            print('-> converting to grayscale')
                            rescaled_img = tsa.convert_to_grayscale(base_img)
                            print('-> shape {}|mean={}'.format(rescaled_img.shape,
                                                               rescaled_img.mean()))

                            # spread the spectrum to improve contrast
                            print('-> spreading spectrum')
                            high_contrast_img = tsa.spread_spectrum(rescaled_img)
                            print('-> shape {}|mean={}'.format(high_contrast_img.shape,
                                                               high_contrast_img.mean()))

                            # get the masked image
                            print('-> masking image')
                            masked_img = tsa.roi(high_contrast_img, threat_zone[img_num])
                            print('-> shape {}|mean={}'.format(masked_img.shape,
                                                               masked_img.mean()))

                            # crop the image
                            print('-> cropping image')
                            cropped_img = tsa.crop(masked_img, crop_dims[img_num])
                            print('-> shape {}|mean={}'.format(cropped_img.shape,
                                                               cropped_img.mean()))

                            # normalize the image
                            print('-> normalizing image')
                            normalized_img = tsa.normalize(cropped_img)
                            print('-> shape {}|mean={}'.format(normalized_img.shape,
                                                               normalized_img.mean()))

                            # zero center the image
                            print('-> zero centering')
                            zero_centered_img = tsa.zero_center(normalized_img)
                            print('-> shape {}|mean={}'.format(zero_centered_img.shape,
                                                               zero_centered_img.mean()))

                            # append the features and labels to this threat zone's example array
                            print ('-> appending example to threat zone {}'.format(tz_num))
                            threat_zone_examples.append([[tz_num], zero_centered_img, label])
                            print ('-> shape {:d}:{:d}:{:d}:{:d}:{:d}:{:d}'.format(
                                                                 len(threat_zone_examples),
                                                                 len(threat_zone_examples[0]),
                                                                 len(threat_zone_examples[0][0]),
                                                                 len(threat_zone_examples[0][1][0]),
                                                                 len(threat_zone_examples[0][1][1]),
                                                                 len(threat_zone_examples[0][2])))
                        # count = 0

        # each subject gets EXAMPLES_PER_SUBJECT number of examples (182 to be exact,
        # so this section just writes out the the data once there is a full minibatch
        # complete.
        if ((len(threat_zone_examples) % (BATCH_SIZE * EXAMPLES_PER_SUBJECT)) == 0):
            for tz_num, tz in enumerate(tsa.zone_slice_list):

                tz_examples_to_save = []

                # write out the batch and reset
                print(' -> writing: ' + PREPROCESSED_DATA_FOLDER +
                                        'preprocessed_TSA_scans-tz{}-{}-{}-b{}.npy'.format(
                                        tz_num+1,
                                        len(threat_zone_examples[0][1][0]),
                                        len(threat_zone_examples[0][1][1]),
                                        batch_num))

                # get this tz's examples
                tz_examples = [example for example in threat_zone_examples if example[0] ==
                               [tz_num]]

                # drop unused columns
                tz_examples_to_save.append([[features_label[1], features_label[2]]
                                            for features_label in tz_examples])

                # save batch.  Note that the trainer looks for tz{} where {} is a
                # tz_num 1 based in the minibatch file to select which batches to
                # use for training a given threat zone
                np.save(PREPROCESSED_DATA_FOLDER +
                        'preprocessed_TSA_scans-tz{}-{}-{}-b{}.npy'.format(tz_num+1,
                                                         len(threat_zone_examples[0][1][0]),
                                                         len(threat_zone_examples[0][1][1]),
                                                         batch_num),
                                                         tz_examples_to_save)
                del tz_examples_to_save

            #reset for next batch
            del threat_zone_examples
            threat_zone_examples = []
            batch_num += 1

    # we may run out of subjects before we finish a batch, so we write out
    # the last batch stub

    if (len(threat_zone_examples) > 0):
        for tz_num, tz in enumerate(tsa.zone_slice_list):

            tz_examples_to_save = []

            # write out the batch and reset
            print(' -> writing: ' + PREPROCESSED_DATA_FOLDER
                    + 'preprocessed_TSA_scans-tz{}-{}-{}-b{}.npy'.format(tz_num+1,
                      len(threat_zone_examples[0][1][0]),
                      len(threat_zone_examples[0][1][1]),
                      batch_num))

            # get this tz's examples
            tz_examples = [example for example in threat_zone_examples if example[0] ==
                           [tz_num]]

            # drop unused columns
            tz_examples_to_save.append([[features_label[1], features_label[2]]
                                        for features_label in tz_examples])

            #save batch
            np.save(PREPROCESSED_DATA_FOLDER +
                    'preprocessed_TSA_scans-tz{}-{}-{}-b{}.npy'.format(tz_num+1,
                                                     len(threat_zone_examples[0][1][0]),
                                                     len(threat_zone_examples[0][1][1]),
                                                     batch_num),
                                                     tz_examples_to_save)
Example #39
0
import cv2
import numpy as np
import os

for i in os.listdir("img1/"):
    mask = cv2.imread('mask.jpg', 0)
    mask.reshape(320, 240, 1)
    mask = np.expand_dims(mask, axis=2)

    print(i)
    img = cv2.imread("img1/" + i)
    img = cv2.resize(img, dsize=(320, 240), interpolation=cv2.INTER_AREA)
    dst = cv2.inpaint(img, mask, 3, cv2.INPAINT_TELEA)

    src = dst

    height, width, channel = src.shape
    matrix = cv2.getRotationMatrix2D((width / 2, height / 2), 180, 1)
    ak = cv2.warpAffine(src, matrix, (width, height))

    cv2.waitKey(0)

    cv2.imwrite('asdf/' + i, ak)
Example #40
0
for img_i in os.listdir('./source/'):
    if os.path.splitext(img_i)[-1] == '.jpg':  # skip processing when the extension is not 'jpg'.
        facenum = 0

        src = cv2.imread('./source/' + img_i)

        srcwidth, srcheight = src.shape[:2]

        imwidth  = int(math.hypot(srcwidth, srcheight)) + 2
        imheight = imwidth

        # rotate image by 5 degrees and detect
        for angle_i in range(-20, 25, 5):
            tM = np.float32([[1, 0, (imheight - srcheight) / 2], [0, 1, (imwidth - srcwidth) / 2]])
            img_moved = cv2.warpAffine(src, tM, (imwidth, imheight))  # move image to center

            tM = cv2.getRotationMatrix2D((imwidth * 0.5, imheight * 0.5), angle_i, 1.0)
            img_rotated = cv2.warpAffine(img_moved, tM, (imwidth, imheight))  # rotate image

            src_gray = cv2.cvtColor(img_rotated, cv2.COLOR_BGR2GRAY)

            faces = face_cascade.detectMultiScale(src_gray, scaleFactor=factor)

            if len(faces) >= 1:
                for x, y, w, h in faces:
                    face = img_rotated[y: y + h, x: x + w]
                    cv2.imwrite('./faces/' + os.path.splitext(os.path.basename(img_i))[0] + '_' + str(angle_i).zfill(3) + '_' + str(facenum) + '.jpg', face)  # save face image
                    cv2.rectangle(img_rotated, (x, y), (x + w, y + h), (255, 0, 0), 2)  # draw frame at detected area of source image
                    facenum += 1
Example #41
0
def createSamplesAndSave(imagePath, dirPath, basename, infoDat, increaseFlag = False):
    # 画像ファイルを読み込み
    img = cv2.imread(imagePath)
    # 画像ファイルを読み込めたか確認
    if img is None:
        print(imagePath + " cant read")
        return

    # img内から顔を検出して顔下半分のみの画像にする
    face = cutUnderFaceFromImage(img)
    # faceから口を切り取る
    mouth = cutMouthFromImage(face)
    # 口周りのひげやしわを切り取る
    cleanMouth = cutMouthExtraPart(mouth)

    # DirPathに新たなファイルを作成
    os.makedirs(dirPath, exist_ok=True)
    # DirPathとBaseNameを結合
    basePath = os.path.join(dirPath, basename)

    # 画像の水増しをしない
    if not increaseFlag:
        height, width = cleanMouth.shape[:2]
        cv2.imwrite(basePath + ".jpg", cleanMouth)
        infoDat.write(basename + ".jpg 1 0 0 " + str(width) + " " + str(height)+"\n")
    # 画像の水増しをする
    else:
        # imgを左右反転
        flipCleanMouth = cv2.flip(cleanMouth, 1)

        # 水増しのベースとなる画像リストに格納
        baseImgs = []
        baseImgs.append(cleanMouth)
        baseImgs.append(flipCleanMouth)

        # リサイズの倍率
        resizeVector = (0.9, 1.0, 1.2)
        # 回転角
        angles = (350, 355, 0, 5, 10)
        # コントラストの倍率
        contrast = (1.2, 1.5, 1.7)

        # このiは水増しによって生成された画像の数を表す
        i = 0
        for baseImg in baseImgs:
            height, width = baseImg.shape[:2]

            # ベース画像をリサイズする
            for vx, vy in list(itertools.product(resizeVector, repeat=2)):
                newImg = cv2.resize(baseImg, (int(width*vx), int(height*vy)))
                
                newHeight, newWidth = newImg.shape[:2]

                # 回転の中心を指定
                center = (newWidth//2, newHeight//2)
                # 画像を回転させる
                for angle in angles:
                    trans = cv2.getRotationMatrix2D(center, angle, 1.0)

                    newImgRotated = cv2.warpAffine(newImg, trans, (newWidth, newHeight))
                    newRotatedHeight, newRotatedWidth = newImgRotated.shape[:2]

                    # 回転させた画像を保存
                    cv2.imwrite('{}{}.{}'.format(basePath, str(i), 'jpg'), newImgRotated)                
                    # info.datにvecファイルを生成するのに必要なデータを書きこむ
                    infoDat.write(basename + str(i)+ ".jpg 1 0 0 " + str(newRotatedWidth) + " " + str(newRotatedHeight)+"\n")
            
                    i += 1

                # コントラストを調整する
                for k in contrast:
                    newImgContrast = adjust(newImg, contrast=k)
                    newContrastHeight, newContrastWidth = newImgContrast.shape[:2]

                    # コントラスト調整した画像を保存
                    cv2.imwrite('{}{}.{}'.format(basePath, str(i), 'jpg'), newImgContrast)
                    # info.txtにdatファイルを生成するのに必要なデータを書きこむ
                    infoDat.write(basename + str(i)+ ".jpg 1 0 0 " + str(newContrastWidth) + " " + str(newContrastHeight)+"\n")
                    
                    i += 1
Example #42
0
    filename = p.split("\\")[-2] + p.split("\\")[-1]
    all_bb=np.load("./align_data/bb/"+filename+".npy")
    all_landmark = np.load("./align_data/landmark/" + filename + ".npy")

    new_path=p.split("\\")[-2] +"/"+ p.split("\\")[-1]+"/"

    i=0
    for path in all_image:
        bgr = cv2.imread(path)
        bb=all_bb[i,:]
        npLandmarks = np.float32(all_landmark[i,:,:])

        face=bgr[int(bb[1]):int(bb[3]),int(bb[0]):int(bb[2])]

        H = cv2.estimateRigidTransform(npLandmarks[ALIGN_POINTS], dstLandmarks[ALIGN_POINTS],0)

        align_face=cv2.warpAffine(bgr, H, (256, 256))

        if not os.path.exists("./align_face/"+new_path):
            os.makedirs("./align_face/"+new_path)

        if not os.path.exists("./face/"+new_path):
            os.makedirs("./face/"+new_path)

        cv2.imwrite("./face/"+new_path+path.split("\\")[-1],face)
        cv2.imwrite("./align_face/" + new_path + path.split("\\")[-1], align_face)

        i+=1

    print(p+' done')
Example #43
0
def random_perspective(img,
                       targets=(),
                       degrees=10,
                       translate=.1,
                       scale=.1,
                       shear=10,
                       perspective=0.0,
                       border=(0, 0)):
    # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
    # targets = [cls, xyxy]

    height = img.shape[0] + border[0] * 2  # shape(h,w,c)
    width = img.shape[1] + border[1] * 2

    # Center
    C = np.eye(3)
    C[0, 2] = -img.shape[1] / 2  # x translation (pixels)
    C[1, 2] = -img.shape[0] / 2  # y translation (pixels)

    # Perspective
    P = np.eye(3)
    P[2, 0] = random.uniform(-perspective,
                             perspective)  # x perspective (about y)
    P[2, 1] = random.uniform(-perspective,
                             perspective)  # y perspective (about x)

    # Rotation and Scale
    R = np.eye(3)
    a = random.uniform(-degrees, degrees)
    # a += random.choice([-180, -90, 0, 90])  # add 90deg rotations to small rotations
    s = random.uniform(1 - scale, 1 + scale)
    # s = 2 ** random.uniform(-scale, scale)
    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)

    # Shear
    S = np.eye(3)
    S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi /
                       180)  # x shear (deg)
    S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi /
                       180)  # y shear (deg)

    # Translation
    T = np.eye(3)
    T[0, 2] = random.uniform(0.5 - translate,
                             0.5 + translate) * width  # x translation (pixels)
    T[1, 2] = random.uniform(
        0.5 - translate, 0.5 + translate) * height  # y translation (pixels)

    # Combined rotation matrix
    M = T @ S @ R @ P @ C  # order of operations (right to left) is IMPORTANT
    if (border[0] != 0) or (border[1] !=
                            0) or (M != np.eye(3)).any():  # image changed
        if perspective:
            img = cv2.warpPerspective(img,
                                      M,
                                      dsize=(width, height),
                                      borderValue=(114, 114, 114))
        else:  # affine
            img = cv2.warpAffine(img,
                                 M[:2],
                                 dsize=(width, height),
                                 borderValue=(114, 114, 114))

    # Visualize
    # import matplotlib.pyplot as plt
    # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
    # ax[0].imshow(img[:, :, ::-1])  # base
    # ax[1].imshow(img2[:, :, ::-1])  # warped

    # Transform label coordinates
    n = len(targets)
    if n:
        # warp points
        xy = np.ones((n * 4, 3))
        xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(
            n * 4, 2)  # x1y1, x2y2, x1y2, x2y1
        xy = xy @ M.T  # transform
        if perspective:
            xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8)  # rescale
        else:  # affine
            xy = xy[:, :2].reshape(n, 8)

        # create new boxes
        x = xy[:, [0, 2, 4, 6]]
        y = xy[:, [1, 3, 5, 7]]
        xy = np.concatenate(
            (x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T

        # # apply angle-based reduction of bounding boxes
        # radians = a * math.pi / 180
        # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
        # x = (xy[:, 2] + xy[:, 0]) / 2
        # y = (xy[:, 3] + xy[:, 1]) / 2
        # w = (xy[:, 2] - xy[:, 0]) * reduction
        # h = (xy[:, 3] - xy[:, 1]) * reduction
        # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T

        # clip boxes
        xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
        xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)

        # filter candidates
        i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
        targets = targets[i]
        targets[:, 1:5] = xy[i]

    return img, targets
Example #44
0
def rotate(image, angle):
    height, width = image.shape[:2]
    rot_mat = cv.getRotationMatrix2D((width / 2, height / 2), angle, 1)
    rotated_img = cv.warpAffine(image, rot_mat, (width, height))
    return rotated_img
Example #45
0
def detection():
    print('Starting detection')

    # Initialising variable
    counter = 0
    marker = 1
    positions = []
    headings = []
    centres = []
    height_of_target = []
    square = 2

    # if Static_Test:
    # cap = cv2.VideoCapture("TestData2.mp4")  # video use

    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800)  #800
    cap.set(3, 800)  #800
    cap.set(4, 800)  #800
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 800)
    cap.set(cv2.CAP_PROP_FPS, 60)

    time.sleep(2)  # allows the camera to start-up
    print('Camera on')
    # Run detection when camera is turn on
    while (cap.isOpened()):  # for video use
        # while True:
        # the camera will keep running even after the if statement so it can detect multiple ground marker
        if counter == 0 or start - end < 5:
            if Static_Test:
                distance = input("Distance it was taken")
            #  start - end < 5
            if not Static_Test:
                distance = 1
            ret, frame = cap.read()

            # Gathering data from Pixhawk
            if GPS:
                position = vehicle.location.global_relative_frame
                heading = vehicle.heading
            # end if

            # starting the timer for the length of time it hasn't found a target
            start = time.time()

            # applying image processing
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # converts to gray
            blurred = cv2.GaussianBlur(
                gray, (5, 5),
                0)  # blur the gray image for better edge detection
            edged = cv2.Canny(
                blurred, 14,
                10)  # the lower the value the more detailed it would be

            # find contours in the thresholded image and initialize the
            (contours,
             _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)  # grabs contours

            # outer square
            for c in contours:
                peri = cv2.arcLength(
                    c, True
                )  # grabs the contours of each points to complete a shape
                # get the approx. points of the actual edges of the corners
                approx = cv2.approxPolyDP(c, 0.01 * peri, True)
                if 4 <= len(approx) <= 6:
                    (x, y, w, h) = cv2.boundingRect(
                        approx
                    )  # gets the (x,y) of the top left of the square and the (w,h)
                    aspectRatio = w / float(
                        h)  # gets the aspect ratio of the width to height
                    area = cv2.contourArea(
                        c)  # grabs the area of the completed square
                    hullArea = cv2.contourArea(cv2.convexHull(c))
                    solidity = area / float(hullArea)
                    keepDims = w > 25 and h > 25
                    keepSolidity = solidity > 0.9  # to check if it's near to be an area of a square
                    keepAspectRatio = 0.6 <= aspectRatio <= 1.4
                    if keepDims and keepSolidity and keepAspectRatio:  # checks if the values are true
                        # captures the region of interest with a 5 pixel lesser in all 2D directions
                        roi = frame[y:y + h, x:x + w]

                        height, width, numchannels = frame.shape

                        centre_region = (x + w / 2, y + h / 2)
                        if GPS:
                            centre_target = (y + h / 2, x + w / 2)

                        # grabs the angle for rotation to make the square level
                        angle = cv2.minAreaRect(approx)[
                            -1]  # -1 is the angle the rectangle is at

                        if 0 == angle:
                            angle = angle
                        elif -45 > angle > 90:
                            angle = -(90 + angle)
                        elif -45 > angle:
                            angle = 90 + angle
                        else:
                            angle = angle

                        rotated = cv2.getRotationMatrix2D(
                            tuple(centre_region), angle, 1.0)

                        imgRotated = cv2.warpAffine(
                            frame, rotated,
                            (width, height))  # width and height was changed

                        imgCropped = cv2.getRectSubPix(imgRotated, (w, h),
                                                       tuple(centre_region))

                        HSVCropp = cv2.cvtColor(imgCropped, cv2.COLOR_BGR2HSV)

                        if square == 2:
                            color = imgCropped[int((h / 2) -
                                                   (h / 4)):int((h / 2) +
                                                                (h / 4)),
                                               int((w / 2) -
                                                   (w / 4)):int((w / 2) +
                                                                (w / 4))]
                        else:
                            color = imgCropped

                        if Step_detection:
                            cv2.imshow("crop", imgCropped)
                            cv2.imshow("okay", color)
                            print(HSVCropp[int((h / 2) - (h * (6 / 10))),
                                           int((w / 2) - (w * (6 / 10)))])

                        # # Convert the image to grayscale and turn to outline of  the letter
                        # g_rotated = cv2.cvtColor(imgCropped, cv2.COLOR_BGR2GRAY)
                        # b_rotated = cv2.GaussianBlur(g_rotated, (5, 5), 0)
                        # e_rotated = cv2.Canny(b_rotated, 70, 20)
                        #
                        # # uses the outline to detect the corners for the cropping of the image
                        # (contours, _) = cv2.findContours(e_rotated.copy(), cv2.RETR_LIST,
                        #                                  cv2.CHAIN_APPROX_SIMPLE)
                        #
                        # # inner square detection
                        # for cny in contours:
                        #   perin = cv2.arcLength(cny, True)
                        #   approxny = cv2.approxPolyDP(cny, 0.01 * perin, True)
                        #   if 4 <= len(approxny) <= 6:
                        #     (xx, yy), (ww, hh), angle = cv2.minAreaRect(approxny)
                        #     aspectRatio = ww / float(hh)
                        #     keepAspectRatio = 0.7 <= aspectRatio <= 1.3
                        #     angle = cv2.minAreaRect(approxny)[-1]
                        #     keep_angle = angle == 0, 90, 180, 270, 360
                        #     if keepAspectRatio and keep_angle:
                        #       (xxx, yyy, www, hhh) = cv2.boundingRect(approxny)
                        #       color = imgCropped[yyy:yyy + hhh, xxx:xxx + www]

                        # appends the data of the image to the list
                        if GPS:
                            positions.append(
                                [position.lat, position.lon, position.alt])
                            headings.append(heading)
                            centres.append(centre_target)
                            height_of_target.append(h)

                        # time that the target has been last seen
                        end = time.time()
                        time.sleep(0.5)

                        # keep count of number of saved images
                        counter = counter + 1
                        cv2.imwrite("colour%d.png" % counter, color)
                        cv2.imwrite(
                            'C:/Users/kevin/Desktop/2018-2019/method A/results/{0}_{1}.png'
                            .format(marker, counter), color)
                        print("Detected and saved a target")

                        if Static_Test:
                            # testing purposes
                            if not os.path.exists(distance):
                                os.makedirs(distance)
                            cv2.imwrite(
                                'C:/Users/kevin/Desktop/2018-2019/method A/{0}/results{1}_{2}.png'
                                .format(distance, marker, counter), color)
                            cv2.imwrite(
                                'C:/Users/kevin/Desktop/2018-2019/method A/{0}/captured{1}_{2}.png'
                                .format(distance, marker, counter), roi)
                            cv2.imwrite(
                                'C:/Users/kevin/Desktop/2018-2019/method A/{0}/orginal{1}_{2}.png'
                                .format(distance, marker, counter), frame)
                        else:
                            distance = 0

                        if Step_detection:
                            cv2.imshow("roi", roi)
                            cv2.imshow("cropped", imgCropped)
                            cv2.waitKey(0)
                        # end if
                        if counter == 7:
                            counter, marker = solution(counter, marker,
                                                       distance)
        else:
            counter, marker = solution(counter, marker, distance)

        if Step_camera:
            cv2.imshow('frame', frame)
            cv2.imshow('edge', edged)
            k = cv2.waitKey(5) & 0xFF
            if k == 27:
                break
        # end if

    cap.release()
    cv2.destroyAllWindows()
Example #46
0
print('INTER_CUBIC zoom cost {}'.format(time.time() - start_time))

# 組合 + 顯示圖片
img_zoom = np.hstack((img_area_scale, img_cubic_scale))
while True:
    cv2.imshow('zoom image', img_zoom)
    k = cv2.waitKey(0)
    if k == 27:
        cv2.destroyAllWindows()
        break

# %%
'''
## 平移幾何轉換
'''

# %%
# 設定 translation transformation matrix
# x 平移 50 pixel; y 平移 100 pixel
M = np.array([[1, 0, 50], [0, 1, 100]], dtype=np.float32)
shift_img = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]))

# 組合 + 顯示圖片
img_shift = np.hstack((img, shift_img))
while True:
    cv2.imshow('shift image', img_shift)
    k = cv2.waitKey(0)
    if k == 27:
        cv2.destroyAllWindows()
        break
Example #47
0
def calculate_slice_stats(s):
    # Read in image and skeleton
    image = cv.imread(s.image, -1)
    label = cv.imread(s.skel, -1)

    # Use the scipy curve_fit() method to fit the quadratic function to the data.
    params, params_covariance = optimize.curve_fit(quad, s.greys, s.densities)

    # Find the center point
    a, b = s.points
    c = center_point(a, b)

    # Find the vector from the firs point to the second.
    vector = [b[1] - a[1], b[0] - a[0]]

    # Find angle between the line drawn by the two points and the x axis
    angle_r = np.arctan2(*vector)
    angle_d = np.degrees(angle_r)

    # Rotate the image around the central point
    matrix = cv.getRotationMatrix2D(center=tuple(c), angle=angle_d, scale=1)
    rotated_image = cv.warpAffine(src=image, M=matrix, dsize=image.shape[::-1])
    rotated_label = cv.warpAffine(src=label, M=matrix, dsize=label.shape[::-1])

    # Rotate the two points too.
    a_r, b_r = rotate([a, b], origin=c, angle=-angle_r)

    # Define how wide the rectangular area should be.
    box_width = s.box_width // 2

    # Crop the image and the label to the rectangular area.
    cropped_image = rotated_image[int(c[1] - box_width):int(c[1] + box_width):,
                                  int(a_r[0]):int(b_r[0])]
    cropped_label = rotated_label[int(c[1] - box_width):int(c[1] + box_width):,
                                  int(a_r[0]):int(b_r[0])]
    _, cropped_label = cv.threshold(cropped_label, 50, 255, cv.THRESH_BINARY)

    # Find any very small boundaries that should be removed.
    processed = morphology.remove_small_objects(cropped_label.astype(bool),
                                                min_size=6,
                                                connectivity=2).astype(int)
    # black out pixels
    cropped_label[np.where(processed == 0)] = 0

    c_image_densities = np.zeros(cropped_image.shape)

    for y in range(cropped_image.shape[0]):
        for x in range(cropped_image.shape[1]):
            c_image_densities[y, x] = quad(cropped_image[y, x], *params)

    # Calculate the density error. Do so by finding a mean density for each horizontal
    # line of pixels and then find the standard deviation of these means.
    density_means = np.zeros(box_width * 2)

    for y in range(box_width * 2):
        split = c_image_densities[y, :]
        density_means[y] = np.mean(split)

    density_error = np.std(density_means)
    mean_density = np.mean(density_means)

    # Use the OpenCV connectedComponents() method to label the individual boundaries.
    num_labels, labels_image = cv.connectedComponents(cropped_label)

    for i in range(num_labels):
        if np.sum(labels_image == i) < 15:
            cropped_label[labels_image == i] = 0
            labels_image[labels_image == i] = 0

    labels = []
    yearly_labels = []

    # Sort the boundaries with the growth surface first.
    for x in range(labels_image.shape[1] - 1, -1, -1):
        for y in range(labels_image.shape[0]):
            label = labels_image[y, x]
            if cropped_label[y, x] != 0 and label not in labels:
                labels.append(label)

    for i in range(len(labels)):
        if i % 2 == 0:
            yearly_labels.append(labels[i])

    boundaries = {}

    for i in range(len(yearly_labels)):
        boundaries[i] = []

    # Save the boundary pixels into a boundaries dictionary.
    for y in range(labels_image.shape[0]):
        for x in range(labels_image.shape[1]):
            label = labels_image[y, x]
            if label in yearly_labels:
                index = yearly_labels.index(label)
                boundaries[index].append((y, x))

    euclidean_image = euclidean(cropped_label.shape, boundaries)

    raw_distances = []
    # Find the average distance in pixels between each of the boundaries.
    averages = np.zeros(len(yearly_labels) - 1)
    for i in range(len(yearly_labels) - 1):
        for j in boundaries[i]:
            averages[i] += +euclidean_image[j]
            raw_distances.append(euclidean_image[j])
        averages[i] /= len(boundaries[i])

    # Calculate the extension rate standard error
    extension_error = (np.std(raw_distances) /
                       np.sqrt(len(raw_distances))) * s.voxel_size
    # print(raw_distances)

    # Calculate the linear extension rate and the calcification rate.
    linear_extension_mm = np.mean(averages) * s.voxel_size
    calcification = (linear_extension_mm / 10) * mean_density

    # Calculate the calcification rate standard error
    calcification_error = np.sqrt((density_error / mean_density)**2 +
                                  (extension_error / linear_extension_mm)**2)

    return (mean_density, density_error, linear_extension_mm, extension_error,
            calcification, calcification_error, density_means, raw_distances)
Example #48
0
print('dx2', dx2)
print('dy2', dy2)

# by 김주희_두 기울기의 평균으로 회전하기_201019
ave = math.atan(dy2/dx2)
# ave = (math.atan(dy/dx) + math.atan(dy2/dx2)) / 2
# math.atan(dy2/dx2)
print('atan', ave)

# by 김주희_ 각도 구하기 _201012
angle = ave * (180.0 / math.pi)

h, w = closing.shape[:2]
# M1 = cv.getRotationMatrix2D((w/2, h/2), 10, 1)
M = cv.getRotationMatrix2D((w/2, h/2), angle, 1)
rotation = cv.warpAffine(closing, M,(w, h))

# by 김주희_ 현재 closing에 contour 실시함 -> 사각형 그려져 있음._201012
# by 김주희_ 거리를 계산하여 주변에 점이 많으면 같이 없앰.
# 넓이들중에 최빈 값을 구한다.
cnt = Counter(compare_area)
# print(cnt.most_common(3))
# print(cnt)
#
# print(closing.shape)
ro_center = []
center_x = []
center_y = []
contours, _ = cv.findContours(rotation[:, :, 0], cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
for i in range(len(contours)):
    cnt = contours[i]
Example #49
0
    #기울어진 빗변을 내렸을 때의 비율을 구해서 나중에 wapaffine
    scale = aligned_eye_distance / eye_distance

    #eyes의 중간점을 찾아서 이를 기준으로 돌리기 위해 좌표 필요
    eyes_center = ((left_eye_center[0, 0] + right_eye_center[0, 0]) // 2,
                   (left_eye_center[0, 1] + right_eye_center[0, 1]) // 2)
    cv2.circle(image, eyes_center, 5, (255, 0, 0), -1)

    #기준점인 eye_center를 가지고 앞에서 계산한 degree만큼 scale을 해서 돌리기 위해 필요한 matrix
    metrix = cv2.getRotationMatrix2D(eyes_center, degree, scale)
    cv2.putText(image, "{:.5f}".format(degree),
                (right_eye_center[0, 0], right_eye_center[0, 1] + 20),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)

    warped = cv2.warpAffine(image_origin,
                            metrix, (image_width, image_height),
                            flags=cv2.INTER_CUBIC)

    cv2.imshow("warpAffine", warped)
    (startX, endX, startY, endY) = getCropDimension(rect, eyes_center)
    croped = warped[startY:endY, startX:endX]
    output = cv2.resize(croped, OUTPUT_SIZE)
    cv2.imshow("output", output)

    for (i, point) in enumerate(show_parts):
        x = point[0, 0]
        y = point[0, 1]
        cv2.circle(image, (x, y), 1, (0, 255, 255), -1)

cv2.imshow("Face Alignment", image)
cv2.waitKey(0)
Example #50
0
    for k in range(0, len(intrseccon)):
        p.append([intrseccon[k][0][0][1], intrseccon[k][0][0][0]])

#### rotating the intersection points and scaling them by the factor a/b

    im_test = np.zeros(ROI.shape)
    rows, cols = ROI.shape

    for k in range(0, len(p)):
        im_test[p[k][0], p[k][1]] = 255

    alpha = -THETA[m_er] * 180 / (np.pi)

    M = cv2.getRotationMatrix2D((int(CENTER[m_er][1]), int(CENTER[m_er][0])),
                                alpha, 1)
    im_test = cv2.warpAffine(im_test, M, (cols, rows))

    for i in range(0, im_test.shape[0]):
        for j in range(0, im_test.shape[1]):
            if im_test[i, j] != 0:
                im_test[i, j] = 255

    im_res = cv2.resize(im_test, (0, 0), fx=1, fy=float(a) / float(b))

    im_res = np.array(im_res, dtype=np.uint8)

    p_prime = np.zeros((len(p), 2))
    _, testcon, _ = cv2.findContours(im_res.copy(), cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_SIMPLE)

    for k in range(0, len(testcon)):
Example #51
0
    def __call__(self, img, bbox, name):

        output_w = self._width // self._scale_factor
        output_h = self._height // self._scale_factor

        if self._augmentation:

            # random color jittering
            distortion = np.random.choice([False, True], p=[0.7, 0.3])
            if distortion:
                img = image_random_color_distort(img)

            # random expansion with prob 0.5
            expansion = np.random.choice([False, True], p=[0.7, 0.3])
            if expansion:
                # Random expand original image with borders, this is identical to placing the original image on a larger canvas.
                img, expand = random_expand(img, max_ratio=4, fill=[m * 255 for m in [0.485, 0.456, 0.406]],
                                            keep_ratio=True)
                bbox = box_translate(bbox, x_offset=expand[0], y_offset=expand[1], shape=img.shape[:-1])

            # random cropping
            h, w, _ = img.shape
            bbox, crop = box_random_crop_with_constraints(bbox, (w, h),
                                                          min_scale=0.1,
                                                          max_scale=1,
                                                          max_aspect_ratio=2,
                                                          constraints=None,
                                                          max_trial=30)

            x0, y0, w, h = crop
            img = mx.image.fixed_crop(img, x0, y0, w, h)

            # random horizontal flip with probability of 0.5
            h, w, _ = img.shape
            img, flips = random_flip(img, px=0.5)
            bbox = box_flip(bbox, (w, h), flip_x=flips[0])

            # random vertical flip with probability of 0.5
            img, flips = random_flip(img, py=0.5)
            bbox = box_flip(bbox, (w, h), flip_y=flips[1])

            # random translation
            translation = np.random.choice([False, True], p=[0.5, 0.5])
            if translation:
                img[:, :, (0, 1, 2)] = img[:, :, (2, 1, 0)]
                img = img.asnumpy()
                x_offset = np.random.randint(-7, high=7)
                y_offset = np.random.randint(-7, high=7)
                M = np.float32([[1, 0, x_offset], [0, 1, y_offset]])  # +일 경우, (오른쪽, 아래)
                img = cv2.warpAffine(img, M, (w, h), borderValue=[m * 255 for m in [0.406, 0.456, 0.485]])
                bbox = box_translate(bbox, x_offset=x_offset, y_offset=y_offset, shape=(h, w))
                img[:, :, (0, 1, 2)] = img[:, :, (2, 1, 0)]
                img = mx.nd.array(img)

            # resize with random interpolation
            h, w, _ = img.shape
            interp = np.random.randint(0, 5)
            img = mx.image.imresize(img, self._width, self._height, interp=interp)
            bbox = box_resize(bbox, (w, h), (output_w, output_h))

        else:
            h, w, _ = img.shape
            img = mx.image.imresize(img, self._width, self._height, interp=1)
            bbox = box_resize(bbox, (w, h), (output_w, output_h))

        # heatmap 기반이기 때문에 제한 해줘야 한다.
        bbox[:, 0] = np.clip(bbox[:, 0], 0, output_w)
        bbox[:, 1] = np.clip(bbox[:, 1], 0, output_h)
        bbox[:, 2] = np.clip(bbox[:, 2], 0, output_w)
        bbox[:, 3] = np.clip(bbox[:, 3], 0, output_h)

        img = mx.nd.image.to_tensor(img)  # 0 ~ 1 로 바꾸기
        img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)

        if self._make_target:
            bbox = bbox[np.newaxis, :, :]
            bbox = mx.nd.array(bbox)
            heatmap, offset_target, wh_target, mask_target = self._target_generator(bbox[:, :, :4], bbox[:, :, 4:5],
                                                                                    output_w, output_h, img.context)
            return img, bbox[0], heatmap[0], offset_target[0], wh_target[0], mask_target[0], name
        else:
            return img, bbox, name
Example #52
0
import cv2
import numpy as np
import argparse

ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True)
args = vars(ap.parse_args())

image = cv2.imread(args["image"])
cv2.imshow("original", image)
cv2.waitKey(0)

M = np.float32([[1, 0, 25], [0, 1, 50]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
cv2.imshow("Shifted Down and right", shifted)
cv2.waitKey(0)

M = np.float32([[1, 0, -50], [0, 1, -90]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
cv2.imshow("Shifted up and left", shifted)
cv2.waitKey(0)
Example #53
0
    def Rotate(path, pickle_name, factor=1):
        """Augment dataset

              Parameters
            ----------
            path : str
                The file location of the .pickle
            factor: int
                How many augmented variations will be created for each frame

           """
        dataset = pd.read_pickle(path)
        logging.info('[DataManipulator] dataset shape: ' + str(dataset.shape))

        h, w, c = DataManipulator.GetSizeDataFromDataFrame(dataset)
        sizes = DataManipulator.CreateSizeDataFrame(h, w, c)

        x_set = dataset['x'].values
        y_set = dataset['y'].values
        z_set = dataset['z'].values
        t_set = dataset['t'].values

        x_set = np.vstack(x_set[:])
        x_set = np.reshape(x_set, (-1, h, w, c))
        np.random.seed()

        x_augset = []
        y_augset = []
        z_augset = []
        t_augset = []
        r_augset = []

        max_angle = int(factor / 2)
        center = (w / 2, h / 2)
        scale = 1.0

        for i in range(len(x_set)):

            y = y_set[i]
            z = z_set[i]
            t = t_set[i]
            x = x_set[i]
            x = np.reshape(x, (h, w)).astype("uint8")

            for r in range(factor):

                #rot_angle = np.random.randint(-max_angle, max_angle)
                rot_angle = r - max_angle

                M = cv2.getRotationMatrix2D(center, rot_angle, scale)
                img = cv2.warpAffine(x, M, (h, w))

                x_augset.append(img)
                y_augset.append(y)
                z_augset.append(z)
                t_augset.append(t)
                r_augset.append(rot_angle)

        data = pd.DataFrame(
            data={
                'x': x_augset,
                'y': y_augset,
                'z': z_augset,
                't': t_augset,
                'r': r_augset
            })
        df2 = pd.concat([data, sizes], axis=1)
        df2.to_pickle(pickle_name)
Example #54
0
def funcRotate(degree=0):
    degree = cv2.getTrackbarPos('degree', 'Frame')
    rotation_matrix = cv2.getRotationMatrix2D((width / 2, height / 2), degree,
                                              1)
    rotated_image = cv2.warpAffine(original, rotation_matrix, (width, height))
    cv2.imshow('Rotate', rotated_image)
Example #55
0
def extractPlate(imgOriginal, listOfMatchingChars):
    possiblePlate = PossiblePlate.PossiblePlate(
    )  # this will be the return value

    listOfMatchingChars.sort(
        key=lambda matchingChar: matchingChar.intCenterX
    )  # sort chars from left to right based on x position

    # calculate the center point of the plate
    fltPlateCenterX = (
        listOfMatchingChars[0].intCenterX +
        listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterX) / 2.0
    fltPlateCenterY = (
        listOfMatchingChars[0].intCenterY +
        listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterY) / 2.0

    ptPlateCenter = fltPlateCenterX, fltPlateCenterY

    # calculate plate width and height
    intPlateWidth = int(
        (listOfMatchingChars[len(listOfMatchingChars) - 1].intBoundingRectX +
         listOfMatchingChars[len(listOfMatchingChars) - 1].intBoundingRectWidth
         - listOfMatchingChars[0].intBoundingRectX) *
        PLATE_WIDTH_PADDING_FACTOR)

    intTotalOfCharHeights = 0

    for matchingChar in listOfMatchingChars:
        intTotalOfCharHeights = intTotalOfCharHeights + matchingChar.intBoundingRectHeight
    # end for

    fltAverageCharHeight = intTotalOfCharHeights / len(listOfMatchingChars)

    intPlateHeight = int(fltAverageCharHeight * PLATE_HEIGHT_PADDING_FACTOR)

    # calculate correction angle of plate region
    fltOpposite = listOfMatchingChars[
        len(listOfMatchingChars) -
        1].intCenterY - listOfMatchingChars[0].intCenterY
    fltHypotenuse = DetectChars.distanceBetweenChars(
        listOfMatchingChars[0],
        listOfMatchingChars[len(listOfMatchingChars) - 1])
    fltCorrectionAngleInRad = math.asin(fltOpposite / fltHypotenuse)
    fltCorrectionAngleInDeg = fltCorrectionAngleInRad * (180.0 / math.pi)

    # pack plate region center point, width and height, and correction angle into rotated rect member variable of plate
    possiblePlate.rrLocationOfPlateInScene = (tuple(ptPlateCenter),
                                              (intPlateWidth, intPlateHeight),
                                              fltCorrectionAngleInDeg)

    # final steps are to perform the actual rotation

    # get the rotation matrix for our calculated correction angle
    rotationMatrix = cv2.getRotationMatrix2D(tuple(ptPlateCenter),
                                             fltCorrectionAngleInDeg, 1.0)

    height, width, numChannels = imgOriginal.shape  # unpack original image width and height

    imgRotated = cv2.warpAffine(imgOriginal, rotationMatrix,
                                (width, height))  # rotate the entire image

    imgCropped = cv2.getRectSubPix(imgRotated, (intPlateWidth, intPlateHeight),
                                   tuple(ptPlateCenter))

    possiblePlate.imgPlate = imgCropped  # copy the cropped plate image into the applicable member variable of the possible plate

    return possiblePlate
grey_img = cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY)

print("Applying Adaptive Threshold with kernel :- 21 X 21")
bin_img = cv2.adaptiveThreshold(grey_img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,21,20)
coords = np.column_stack(np.where(bin_img > 0))
angle = cv2.minAreaRect(coords)[-1]
if angle < -45:
    angle = -(90 + angle)
else:
    angle = -angle
h = bin_img.shape[0]
w = bin_img.shape[1]
center = (w//2,h//2)
angle = 0
M = cv2.getRotationMatrix2D(center,angle,1.0)
bin_img = cv2.warpAffine(bin_img,M,(w,h),
                         flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)

bin_img1 = bin_img.copy()
bin_img2 = bin_img.copy()

kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
kernel1 = np.array([[1,0,1],[0,1,0],[1,0,1]], dtype = np.uint8)
# final_thr = cv2.morphologyEx(bin_img, cv2.MORPH_OPEN, kernel)
# final_thr = cv2.dilate(bin_img,kernel1,iterations = 1)
print("Noise Removal From Image.........")
final_thr = cv2.morphologyEx(bin_img, cv2.MORPH_CLOSE, kernel)
contr_retrival = final_thr.copy()


print("Beginning Character Segmentation..............")
count_x = np.zeros(shape= (height))
def compute_features(img):
    scalenum = 2
    feat = []
    # make a copy of the image
    im_original = img.copy()

    # scale the images twice
    for itr_scale in range(scalenum):
        im = im_original.copy()
        # normalize the image
        im = im / 255.0

        # calculating MSCN coefficients
        mu = cv2.GaussianBlur(im, (7, 7), 1.166)
        mu_sq = mu * mu

        sigma = cv2.GaussianBlur(im * im, (7, 7), 1.166)
        sigma = (sigma - mu_sq) ** 0.5

        # structdis is the MSCN image
        structdis = im - mu
        structdis /= (sigma + 1.0 / 255)

        # calculate best fitted parameters from MSCN image
        best_fit_params = AGGDfit(structdis)
        # unwrap the best fit parameters
        lsigma_best = best_fit_params[0]
        rsigma_best = best_fit_params[1]
        gamma_best = best_fit_params[2]

        # append the best fit parameters for MSCN image
        feat.append(gamma_best)
        feat.append((lsigma_best * lsigma_best + rsigma_best * rsigma_best) / 2)

        # shifting indices for creating pair-wise products
        shifts = [[0, 1], [1, 0], [1, 1], [-1, 1]]  # H V D1 D2

        for itr_shift in range(1, len(shifts) + 1):
            OrigArr = structdis
            reqshift = shifts[itr_shift - 1]  # shifting index

            # create transformation matrix for warpAffine function
            M = np.float32([[1, 0, reqshift[1]], [0, 1, reqshift[0]]])
            ShiftArr = cv2.warpAffine(OrigArr, M, (structdis.shape[1], structdis.shape[0]))

            Shifted_new_structdis = ShiftArr
            Shifted_new_structdis = Shifted_new_structdis * structdis
            # shifted_new_structdis is the pairwise product
            # best fit the pairwise product
            best_fit_params = AGGDfit(Shifted_new_structdis)
            lsigma_best = best_fit_params[0]
            rsigma_best = best_fit_params[1]
            gamma_best = best_fit_params[2]

            constant = m.pow(tgamma(1 / gamma_best), 0.5) / m.pow(tgamma(3 / gamma_best), 0.5)
            meanparam = (rsigma_best - lsigma_best) * (tgamma(2 / gamma_best) / tgamma(1 / gamma_best)) * constant

            # append the best fit calculated parameters
            feat.append(gamma_best)  # gamma best
            feat.append(meanparam)  # mean shape
            feat.append(m.pow(lsigma_best, 2))  # left variance square
            feat.append(m.pow(rsigma_best, 2))  # right variance square

        # resize the image on next iteration
        im_original = cv2.resize(im_original, (0, 0), fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)
    return feat
def rotate_image(image, angle):
    (h, w) = image.shape[:2]
    center = (w / 2, h / 2)
    rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
    return cv2.warpAffine(image, rotation_matrix, (w, h))
    for (i, c) in enumerate(contours):
        (x, y, w, h) = cv2.boundingRect(c)
        if (w > 500):
            return (True, contours[i])
    return (False, 0)


#get edge by Sobel kernel
frame = getPict(VIDEO_PATH, FRAME_INDEX)
img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
img_line = cv2.filter2D(img_gray, -1, Sobel_kernel)
img_cont = cv2.filter2D(img_gray, -1, my_kernel)

rows, cols = img_gray.shape
M = cv2.getRotationMatrix2D(((cols - 1) / 2.0, (rows - 1) / 2.0), 90, 1)
img_line = cv2.warpAffine(img_line, M, (cols, rows))
img_cont = cv2.warpAffine(img_cont, M, (cols, rows))
frame = cv2.warpAffine(frame, M, (cols, rows))

line_flag, line = lineDetect(img_line)
if (line_flag):
    x1 = line[0]
    y1 = line[1]
    x2 = line[2]
    y2 = line[3]
    print('Line: ', np.sqrt(np.power(x1 - x2, 2) + np.power(y1 - y2, 2)),
          ', k = ' + str((y2 - y1) / (x2 - x1)))
    cv2.line(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
else:
    print('No Line')
Example #60
0
def rotate_image(image, angle):
    """
    Rotates an OpenCV 2 / NumPy image about it's centre by the given angle
    (in degrees). The returned image will be large enough to hold the entire
    new image, with a black background
    """

    # Get the image size
    # No that's not an error - NumPy stores image matricies backwards
    image_size = (image.shape[1], image.shape[0])
    image_center = tuple(np.array(image_size) / 2)

    # Convert the OpenCV 3x2 rotation matrix to 3x3
    rot_mat = np.vstack(
        [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]
    )

    rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])

    # Shorthand for below calcs
    image_w2 = image_size[0] * 0.5
    image_h2 = image_size[1] * 0.5

    # Obtain the rotated coordinates of the image corners
    rotated_coords = [
        (np.array([-image_w2,  image_h2]) * rot_mat_notranslate).A[0],
        (np.array([ image_w2,  image_h2]) * rot_mat_notranslate).A[0],
        (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],
        (np.array([ image_w2, -image_h2]) * rot_mat_notranslate).A[0]
    ]

    # Find the size of the new image
    x_coords = [pt[0] for pt in rotated_coords]
    x_pos = [x for x in x_coords if x > 0]
    x_neg = [x for x in x_coords if x < 0]

    y_coords = [pt[1] for pt in rotated_coords]
    y_pos = [y for y in y_coords if y > 0]
    y_neg = [y for y in y_coords if y < 0]

    right_bound = max(x_pos)
    left_bound = min(x_neg)
    top_bound = max(y_pos)
    bot_bound = min(y_neg)

    new_w = int(abs(right_bound - left_bound))
    new_h = int(abs(top_bound - bot_bound))

    # We require a translation matrix to keep the image centred
    trans_mat = np.matrix([
        [1, 0, int(new_w * 0.5 - image_w2)],
        [0, 1, int(new_h * 0.5 - image_h2)],
        [0, 0, 1]
    ])

    # Compute the tranform for the combined rotation and translation
    affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]

    # Apply the transform
    result = cv2.warpAffine(
        image,
        affine_mat,
        (new_w, new_h),
        flags=cv2.INTER_LINEAR
    )

    return result