コード例 #1
0
ファイル: preproc_old.py プロジェクト: lpigou/chalearn2014
def inpaint(vid, threshold):
    for i,img in enumerate(vid):
        mask = img.copy()
        mask[mask<threshold] = 0
        cv2.inpaint(img, mask, 3, cv2.INPAINT_TELEA, img) 
        img[img>threshold] = 0
        vid[i] = img
    return vid
コード例 #2
0
def inpaint_vessels(img,mask,radius,method):
	img = cv2.imread(img)
	mask = cv2.imread(mask,0)
        kernel = np.ones((5,5),np.uint8)
        mask = cv2.dilate(mask,kernel,iterations=1)
        if method == 1:
		dst = cv2.inpaint(img,mask,radius,cv2.INPAINT_TELEA)
        else:
		dst = cv2.inpaint(img,mask,radius,cv2.INPAINT_NS)
        cv2.imshow('ori',img)
	cv2.imshow('dst',dst)
	cv2.imwrite('output_inpaint.jpg', dst)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
コード例 #3
0
	def inpaint(self, image, topLeft=None, bottomRight=None,mask=None):
		if(mask is None):
			x1 = topLeft[0]
			x2 = bottomRight[0]

			y1 = topLeft[1]
			y2 = bottomRight[1]

			mask = numpy.zeros((image.shape[0], image.shape[1]), numpy.uint8)
			mask[y1:y2, x1:x2] = 1
			
			image = cv2.inpaint(image, mask, 3, cv2.INPAINT_TELEA)
		else:
			image = cv2.inpaint(image, mask, 3, cv2.INPAINT_NS)
		return image
コード例 #4
0
ファイル: ImageRestoration.py プロジェクト: zoyron/opencv
def main():
    image = cv2.imread("../data/Damaged Image.tiff", 1)
    mask_image = cv2.imread("../data/Mask.tiff", 0)

    telea_image = cv2.inpaint(image, mask_image, 5, cv2.INPAINT_TELEA)
    ns_image = cv2.inpaint(image, mask_image, 5, cv2.INPAINT_NS)

    cv2.imshow("Orignal Image", image)
    cv2.imshow("Mask Image", mask_image)

    cv2.imshow("TELEA Restored Image", telea_image)
    cv2.imshow("NS Restored Image", ns_image)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #5
0
ファイル: destripe.py プロジェクト: SeanTater/tscan
 def run(self, meta):
     mask = self.noise_mask(meta.data)
     # Inpaint radius = 3, acceptable?
     meta.data = cv2.inpaint(meta.data, numpy.array(mask, dtype=numpy.uint8), 5,
                 cv2.INPAINT_NS)
     code.interact(local=vars())
     return meta
コード例 #6
0
def segment_image_inpaint(input_image, segdir, frpath):
    path = os.path.join(segdir, frpath)
    S = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
    input_image = scipy.misc.imresize(input_image, np.shape(S))
    input_image = cv2.inpaint(input_image, S, 5, cv2.INPAINT_NS)
    input_image = input_image.astype(float) / np.max(input_image)
    return input_image
コード例 #7
0
ファイル: preprocess.py プロジェクト: moritayasuaki/pycv
def reflectionMask(img):
  # lab = cv2.cvtColor(img,cv2.COLOR_BGR2LAB)
  # l = lab[:,:,0]
  # a = lab[:,:,1]
  # b = lab[:,:,2]
  # ul = (l*2.56).astype(np.uint8)
  # ua = ((100+a)*1.28).astype(np.uint8)
  # ub = ((100+b)*1.28).astype(np.uint8)
  # ule = cv2.equalizeHist(ul)
  # ret, mask = cv2.threshold(ule, 242, 255, cv2.THRESH_BINARY)
  # mask = mask.astype(np.uint8)
  # iul = cv2.inpaint(ul, mask, 20, cv2.INPAINT_TELEA)
  # iua = cv2.inpaint(ua, mask, 20, cv2.INPAINT_TELEA)
  # iub = cv2.inpaint(ub, mask, 20, cv2.INPAINT_TELEA)

  # ifl = (iul.astype(np.float32))/2.56
  # ifa = (iua.astype(np.float32)-128)/1.28
  # ifb = (iub.astype(np.float32)-128)/1.28

  # lab = np.array([ifl,ifa,ifb]).swapaxes(0,1).swapaxes(1,2)
  b = img[:,:,0]
  g = img[:,:,1]
  r = img[:,:,2]
  ret, mask = cv2.threshold(2*b+g+r,2.8,255, cv2.THRESH_BINARY)
  mask = mask.astype(np.uint8)
  mimg = cv2.inpaint((img*256).astype(np.uint8),mask, 10, cv2.INPAINT_TELEA)
  mimg = mimg.astype(np.float32)/256
  return (mimg,mask)
コード例 #8
0
ファイル: inpaint.py プロジェクト: adamrankin/opencv
def main():
    import sys
    try:
        fn = sys.argv[1]
    except:
        fn = 'fruits.jpg'

    img = cv.imread(cv.samples.findFile(fn))
    if img is None:
        print('Failed to load image file:', fn)
        sys.exit(1)

    img_mark = img.copy()
    mark = np.zeros(img.shape[:2], np.uint8)
    sketch = Sketcher('img', [img_mark, mark], lambda : ((255, 255, 255), 255))

    while True:
        ch = cv.waitKey()
        if ch == 27:
            break
        if ch == ord(' '):
            res = cv.inpaint(img_mark, mark, 3, cv.INPAINT_TELEA)
            cv.imshow('inpaint', res)
        if ch == ord('r'):
            img_mark[:] = img
            mark[:] = 0
            sketch.show()

    print('Done')
コード例 #9
0
def preprocess_image(image):
    # Copy the depth part of the image
    depth_pixels = image.pixels[..., 2].copy()
    depth_pixels = rescale_to_opencv_image(depth_pixels)
    filtered_depth_pixels = median_filter(depth_pixels, 5)

    # Build mask for floodfilling, this lets me ignore all the pixels
    # from the background and around the ears
    mask = np.zeros((depth_pixels.shape[0] + 2, depth_pixels.shape[1] + 2),
                    dtype=np.uint8)
    # Flood fill from top left
    cv2.floodFill(filtered_depth_pixels, mask, (0, 0),
                  (255, 255, 255), flags=cv2.FLOODFILL_MASK_ONLY)
    # Flood fill from top right
    cv2.floodFill(filtered_depth_pixels, mask, (depth_pixels.shape[1] - 1, 0),
                  (255, 255, 255), flags=cv2.FLOODFILL_MASK_ONLY)
    # Truncate and negate the flood filled areas to find the facial region
    floodfill_mask = (~mask.astype(np.bool))[1:-1, 1:-1]

    # Build a mask of the areas inside the face that need inpainting
    inpaint_mask = ~image.mask.mask & floodfill_mask
    # Inpaint the image and filter to smooth
    inpainted_pixels = cv2.inpaint(depth_pixels,
                                   inpaint_mask.astype(np.uint8),
                                   5, cv2.INPAINT_NS)
    inpainted_pixels = median_filter(inpainted_pixels, 5)

    # Back to depth pixels
    image.pixels[..., 2] = rescale_to_depth_image(image, inpainted_pixels)
    # Reset the mask!
    image.mask.pixels[..., 0] = ~np.isnan(image.pixels[..., 2])
コード例 #10
0
ファイル: SkinSegTest.py プロジェクト: pazagra/catkin_ws
def Inpaintv1(Depth):
    Depth_Small = Depth
    Temp2 = Depth
    x1 = int(Depth.shape[0] * 0.2)
    x2 = int(Depth.shape[1] * 0.2)
    x3 = Depth.shape[2]
    cv2.resize(Depth, (x1, x2), Depth_Small)
    Temp = Depth_Small
    mask = (Depth_Small == 0)
    zeros = np.zeros(Depth_Small.shape, Depth_Small.dtype)
    ones = np.ones(Depth_Small.shape, Depth_Small.dtype)
    ones *= 255
    maskk = np.where(mask == True, ones, zeros)
    maskk = maskk[:, :, 0]
    cv2.inpaint(Depth_Small, maskk, 10.0, cv2.INPAINT_TELEA, Temp)
    cv2.resize(Temp, (Depth.shape[0], Depth.shape[1]), Temp2)
    return Temp2
コード例 #11
0
ファイル: overfeat_node.py プロジェクト: CURG/overfeat_svm
def depth_features(depth_img):
    '''
    fill in np.nan holes using cv2.inpaint,
    then return histogram of oriented gradients
    '''
    mask   = np.isnan(depth_img).astype(np.uint8)
    normed = (255.0 * depth_img / np.nanmax(depth_img)).astype(np.uint8) 
    filled = cv2.inpaint(normed, mask, 8, cv2.INPAINT_NS)
    hogged = hog(filled, pixels_per_cell=(40,40), cells_per_block=(2,2))
    return hogged
コード例 #12
0
def inpaint(p_image, p_mask):
    # if len(sys.argv) < 3:
    #     print "Not enough params"
    #     sys.exit()
    img = cv2.imread(p_image)
    mask = cv2.imread(p_mask, 0)
    res = cv2.inpaint(img, mask, 3, cv2.INPAINT_NS)
    cv2.imshow('res', res)
    cv2.waitKey(10000)
    cv2.destroyAllWindows()
    sys.exit()
コード例 #13
0
ファイル: remove_glare.py プロジェクト: derbedhruv/openDR
def remove_glare(im):
   # generate binary image mask - dilated circles around the saturated bright spots at the center
    temp = im[y-w:y+w, x-w:x+w,1]  # single channel
    ret, temp_mask = cv2.threshold(temp, thresh*256, 255, cv2.THRESH_BINARY)
    kernel = numpy.ones((25,25), 'uint8')
    temp_mask = cv2.dilate(temp_mask, kernel)

    # perform the inpainting...
    im[y-w:y+w, x-w:x+w,:] = cv2.inpaint(im[y-w:y+w, x-w:x+w,:], temp_mask, 1, cv2.INPAINT_TELEA)

    # return file
    return im
コード例 #14
0
	def replace(self, marker):
		firstPoint = self.refinePoint(marker.getFirstPoint())
		secondPoint = self.refinePoint(marker.getSecondPoint())
		x1 = firstPoint[0]
		y1 = firstPoint[1]
		x2 = secondPoint[0]
		y2 = secondPoint[1]
		
		mask = numpy.zeros((self.image.shape[0], self.image.shape[1]), numpy.uint8)
		mask[y1:y2, x1:x2] = 1
		
		self.image = cv2.inpaint(self.image, mask, 3, cv2.INPAINT_TELEA)
コード例 #15
0
ファイル: methods.py プロジェクト: pupil-labs/pupil
def erase_specular(image, lower_threshold=0.0, upper_threshold=150.0):
    """erase_specular: removes specular reflections
            within given threshold using a binary mask (hi_mask)
    """
    thresh = cv2.inRange(image, np.asarray(float(lower_threshold)), np.asarray(256.0))

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
    hi_mask = cv2.dilate(thresh, kernel, iterations=2)

    specular = cv2.inpaint(image, hi_mask, 2, flags=cv2.INPAINT_TELEA)
    # return cv2.max(hi_mask,image)
    return specular
コード例 #16
0
    def _transform_msg_into_cv2images(self, msg_color, msg_mapping, msg_mask, inpaint):

        rgb_frame_numpy = numpy.fromstring(msg_color, numpy.uint8).reshape(1080, 1920,2)
        frame_rgb = cv2.cvtColor(rgb_frame_numpy, cv2.COLOR_YUV2BGR_YUY2)  # YUY2 to BGR
        mapped_frame_numpy = numpy.fromstring(msg_mapping, numpy.uint8).reshape(1080*self._factor, 1920*self._factor)
        mapped_image = numpy.uint8(cv2.normalize(mapped_frame_numpy, None, 0, 255, cv2.NORM_MINMAX))
        mask_numpy = numpy.fromstring(msg_mask, numpy.uint8).reshape(1080*self._factor, 1920*self._factor)
        mask = numpy.uint8(cv2.normalize(mask_numpy, None, 0, 255, cv2.NORM_MINMAX))
        if inpaint is True:
            smoothing = cv2.inpaint(mapped_image, mask, 3, cv2.INPAINT_TELEA)
            

        return frame_rgb, mapped_image, mask
コード例 #17
0
ファイル: inpaint_demo.py プロジェクト: Andor-Z/scpy2
 def draw(self):
     if self.mask_artist is None:
         self.draw_image(self.img)
         return
     mask = self.mask_artist.get_mask_array()
     if self.img.shape[:2] == mask.shape:
         img2 = cv2.inpaint(self.img, mask, self.r, getattr(cv2, self.method))
         self.img2 = img2
         self.show_mask = False
         self.mask_artist.hide_mask()
         self.draw_image(img2)
     else:
         self.draw_image(self.img)
コード例 #18
0
ファイル: server.py プロジェクト: JNazare/story-api
def inpaint_image(img_file):
	img = cv2.imread(img_file)
	neg_mask = np.zeros(img.shape, dtype=np.uint8)
	text_area = np.array([ [110,450], [440,450], [440,690], [110,690] ], np.int32)
	cv2.fillConvexPoly(neg_mask, text_area, (255,255,255))
	neg_mask = np.invert(neg_mask)
	mask = np.bitwise_or(img, neg_mask)
	gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
	ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
	blank = cv2.inpaint(img,thresh,3,cv2.INPAINT_TELEA)
	blank_file = 'photos/blank_'+img_file.split('/')[1]
	cv2.imwrite(blank_file, blank)
	return blank_file
コード例 #19
0
    def image_callback(self, image_in):
        """ Get image to which we're subscribed. """

        # Import and convert
        image_cv2 = self.rcv.toCv2(image_in)
        image_hsv = cv2.cvtColor(image_cv2, cv2.COLOR_BGR2HSV)

        try:
            pink_lowerb = numpy.array((140, 100,100))
            pink_upperb = numpy.array((170,255, 255))
            pink_x, pink_y, pink_area = self.rcv.find_marker(image_cv2, pink_lowerb, pink_upperb)
            
            green_lowerb = numpy.array((50, 100,100))
            green_upperb = numpy.array((80,255, 255))
            green_x, green_y, green_area = self.rcv.find_marker(image_cv2, green_lowerb, green_upperb)

            special_area = image_hsv[pink_y:green_y, pink_x:green_x] 

            markings = image_cv2[pink_y:green_y, pink_x:green_x]
            markings = cv2.cvtColor(markings, cv2.COLOR_BGR2GRAY)
            edges = cv2.Canny(markings, 10, 20)
            img_height = len(image_cv2)
            img_width = len(image_cv2[0])
            mask = numpy.zeros((img_height, img_width), dtype=numpy.uint8)
            mask[pink_y:green_y, pink_x:green_x] = edges
            kernel = numpy.ones((5,5),'uint8')
            mask = cv2.dilate(mask, kernel)
            # mask = cv2.erode(mask, kernel)
            board_depth = self.depth_image[pink_y, pink_x]
            # print "board depth = {0}".format(board_depth)
            # print self.depth_image
            # print numpy.where(self.depth_image <= board_depth - 0.2)
            # http://stackoverflow.com/questions/432112/is-there-a-numpy-function-to-return-the-first-index-of-something-in-an-array
            # for i in range(img_height):
            #     for j in range(img_width):
            #         if self.depth_image[i][j] <= board_depth - 0.25:
            #             mask[i][j] = 0

            image_cv2 = cv2.inpaint(image_cv2, mask, 5, cv2.INPAINT_TELEA)            
            # cv2.rectangle(image_cv2, (green_x, green_y), (pink_x, pink_y), (0, 0, 0), 3)
        except(ZeroDivisionError):
            pass
        # except(ZeroDivisionError, TypeError, AttributeError):
        self.rcv.imshow(self.depth_image)
        # self.rcv.imshow(image_cv2)

        # Convert back to ROS Image msg
        image_out = self.rcv.toRos(image_cv2)
        self.pub.publish(image_out)
コード例 #20
0
def inpaint_retina(source_path,mask_path,dest_path,img_ext):
        print "Hello"
        sources = [ f for f in listdir(source_path) if f.endswith(img_ext) ]
        print "Number of files to process is ",len(sources),"."
        masks = [ f for f in listdir(mask_path) if f.endswith(img_ext) ]
        kernel = np.ones((7,7),np.uint8)
        for (s,m) in zip(sources,masks):
                img = cv2.imread(join(source_path,s))
                mask = cv2.imread(join(mask_path,m),0)
                mask = cv2.threshold(mask,0, 255, cv2.THRESH_BINARY)[1]
                mask = cv2.dilate(mask,kernel,iterations = 1)
                dst = cv2.inpaint(img,mask,55,cv2.INPAINT_NS)
                cv2.imwrite(join(dest_path,s),dst)
                print "File ",s, " processed.\n"
        return
コード例 #21
0
def LightInpaint(im, mask, rsizFac = 4):
  maskGlints = cv2.resize(mask, (im.shape[1]/rsizFac, im.shape[0]/rsizFac))
  imTmp = cv2.resize(im, (im.shape[1]/rsizFac, im.shape[0]/rsizFac))
  #imTmp = im.copy()

  krnl = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
  dilt = cv2.dilate(maskGlints, krnl)
  inpaintGlint = np.float64(cv2.inpaint(np.uint8(imTmp*255),np.uint8(dilt*255),10,cv2.INPAINT_NS))/255
  fullInpaint = cv2.resize(inpaintGlint, (im.shape[1], im.shape[0]), interpolation = cv2.INTER_CUBIC)
  rsizDilt = cv2.resize(dilt, (im.shape[1], im.shape[0]), interpolation = cv2.INTER_NEAREST)
  rsizDilt[rsizDilt!=0]=1
  fullInpaintMask = fullInpaint*rsizDilt
  tmpMask = np.zeros_like(im)
  tmpMask[np.where(rsizDilt==0)]=1
  imNew = im*tmpMask + fullInpaintMask
  return imNew
コード例 #22
0
ファイル: inpaintU.py プロジェクト: zhangyeyong/homeWork
def inpaint(img, threshold=50, minLineLength=50, maxLineGap=5, thickness=2, inpaintRadius=3):
    edges = cv2.Canny(img, 50, 150, apertureSize=3)  
    # 经验参数  
    lines = cv2.HoughLinesP(edges, 1, np.pi / 180, threshold, minLineLength=minLineLength, maxLineGap=maxLineGap)  
    if lines is None:
        return img
    result = np.zeros(img.shape[:2], np.uint8)
    count = 0
    h, w = img.shape[:2]
    for x1, y1, x2, y2 in lines[0]:
        if y1 + 2 <= h:
            y1 = y1 + 2
        if y2 + 2 <= h:
            y2 = y2 + 2
        cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), thickness)
    return cv2.inpaint(img, result, inpaintRadius, cv2.INPAINT_TELEA)
コード例 #23
0
ファイル: pre_processing.py プロジェクト: Cosijopiii/EyeTab
 def erase_specular(self, eye_img, debug=False):
 
     # Rather arbitrary decision on how large a specularity may be
     max_specular_contour_area = sum(eye_img.shape[:2])/2
 
     # Extract top 50% of intensities
     eye_img_grey = cv2.cvtColor(eye_img, cv2.COLOR_BGR2GRAY)
     eye_img_grey_blur = cv2.GaussianBlur(eye_img_grey, (5, 5), 0)
     
     # Close to suppress eyelashes
     morph_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
     eye_img_grey_blur = cv2.morphologyEx(eye_img_grey_blur, cv2.MORPH_CLOSE, morph_kernel)
     
     if eye_img_grey_blur is None:
         raise eye_extractor.NoEyesFound()
     
     thresh_val = int(np.percentile(eye_img_grey_blur, 50))
     
     _, thresh_img = cv2.threshold(eye_img_grey_blur, thresh_val, 255, cv2.THRESH_BINARY)
     
     # Find all contours and throw away the big ones
     contours, _ = cv2.findContours(np.copy(thresh_img), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
     small_contours = filter(lambda x : cv2.contourArea(x) < max_specular_contour_area, contours)
     small_contours_mask = np.zeros_like(eye_img_grey)
     cv2.drawContours(small_contours_mask, small_contours, -1, 255, -1)
     
     # Dilate the smallest contours found
     small_contours_mask_dilated = cv2.dilate(small_contours_mask, morph_kernel)
     
     removed_specular_img = cv2.inpaint(eye_img, small_contours_mask_dilated, 2, flags=cv2.INPAINT_TELEA)
     
     if debug:
         thresh_hierarchy = cv2.cvtColor(eye_img_grey, cv2.COLOR_GRAY2BGR)
         cv2.drawContours(thresh_hierarchy, contours, -1, (0, 0, 255), -1)
         thresh_hierarchy = cv2.add(thresh_hierarchy, cv2.cvtColor(small_contours_mask_dilated, cv2.COLOR_GRAY2BGR))
         cv2.drawContours(thresh_hierarchy, small_contours, -1, (255, 0, 0), -1)
         stacked_imgs = np.concatenate([eye_img, thresh_hierarchy, removed_specular_img],axis=1)
         
         if debug == 1:
             self.full_debug_img = stacked_imgs
         elif debug == 2:
             self.full_debug_img = image_utils.stack_imgs_vertical([self.full_debug_img, stacked_imgs])
             cv2.imshow(winname, self.full_debug_img)
         elif debug == 3:
             cv2.imshow(winname, stacked_imgs);
     
     return removed_specular_img
コード例 #24
0
ファイル: threshold.py プロジェクト: alexMoreau/unshred
def _calc_block_mean_variance(image, mask, blocksize):
    """Adaptively determines image background.

    Args:
        image: image converted 1-channel image.
        mask: 1-channel mask, same size as image.
        blocksize: adaptive algorithm parameter.

    Returns:
        image of same size as input with foreground inpainted with background.
    """
    I = image.copy()
    I_f = I.astype(np.float32) / 255.  # Used for mean and std.

    result = np.zeros(
        (image.shape[0] / blocksize, image.shape[1] / blocksize),
        dtype=np.float32)

    for i in xrange(0, image.shape[0] - blocksize, blocksize):
        for j in xrange(0, image.shape[1] - blocksize, blocksize):

            patch = I_f[i:i+blocksize+1, j:j+blocksize+1]
            mask_patch = mask[i:i+blocksize+1, j:j+blocksize+1]

            tmp1 = np.zeros((blocksize, blocksize))
            tmp2 = np.zeros((blocksize, blocksize))
            mean, std_dev = cv2.meanStdDev(patch, tmp1, tmp2, mask_patch)

            value = 0
            if std_dev[0][0] > MEAN_VARIANCE_THRESHOLD:
                value = mean[0][0]

            result[i/blocksize, j/blocksize] = value

    small_image = cv2.resize(I, (image.shape[1] / blocksize,
                                 image.shape[0] / blocksize))

    res, inpaintmask = cv2.threshold(result, 0.02, 1, cv2.THRESH_BINARY)

    inpainted = cv2.inpaint(small_image, inpaintmask.astype(np.uint8), 5,
                            cv2.INPAINT_TELEA)

    res = cv2.resize(inpainted, (image.shape[1], image.shape[0]))

    return res
コード例 #25
0
ファイル: nodes.py プロジェクト: openalea/openalea-opencv
def inpaint(image, mask, flags=cv2.INPAINT_NS):
    ### Added to Wralea.py
    """The function can be used to restore the image content in a provided
    region of interest using the neighborhood to this region.

    :Parameters:
    -`image` specifies the loaded image that should be processsed.
    The image can be either a 8-Bit 1- or 3-channel image.

    -`mask` specifies a mask by a provided 1-channel 8-Bit image, in which
    non-zero pixels define the regions of interest, which should be restored.

    -`flags`

    :Returns:
    -`restoredimage`, which is the image with the restored pixels.

    :Notes:
    """
    return cv2.inpaint(image, mask, flags)
コード例 #26
0
def pretty_depth(frame):
	'''Assumes a very raw 16 bit depth image from the kinect. Converts to an 8 bit image, keeping as much contrast as possible.'''
	frame = frame.copy()
	errs = np.where(frame==2047)
	# constrain the frame to 0-255
	frame = np.clip(frame, 768, 1023)
	frame = frame - 768
	frame = np.uint8(frame)
	# ok, now we have a uint8 object to deal with


	# now let's fix all the errors using inpainting
	mask = np.zeros(frame.shape, dtype=np.uint8)
	mask[errs] = 1
	inpainted = cv2.inpaint(frame,mask,2,cv2.INPAINT_TELEA)

	# there are also some errors around the edge of the image which dont get dealt with
	cropped = inpainted[1:-1, 1:-1]
	padded = cv2.copyMakeBorder(cropped,1,1,1,1,cv2.BORDER_REPLICATE)

	return padded
コード例 #27
0
    def clean_depth_image(self, depth_image):
        """
        http://www.morethantechnical.com/2011/03/05/neat-opencv-smoothing-trick-when-kineacking-kinect-hacking-w-code/
        need to get a mask for inpaintMask
        """
        depth_image = depth_image.copy()

        depth_image[depth_image > MAX_DEPTH] = MAX_DEPTH + BUFFER_DEPTH
        depth_image = (depth_image / (MAX_DEPTH + BUFFER_DEPTH)) * 255

        depth_image = depth_image.astype(np.uint8)

        mask = np.zeros_like(depth_image)
        mask[depth_image == 0] = 255

        mask = cv2.resize(mask, (320, 240))
        depth_image = cv2.resize(depth_image, (320, 240))


        depth_image = cv2.inpaint(depth_image, mask, 5.0, cv2.INPAINT_TELEA)

        return depth_image
コード例 #28
0
    data = json.load(f)


db = soccer3d.YoutubeVideo(opt.path_to_data)
db.digest_metadata()


img = db.get_frame(0)
mask = db.get_mask_from_detectron(0)
cam_npy = db.calib[db.frame_basenames[0]]

cam = cam_utils.Camera('tmp', cam_npy['A'], cam_npy['R'], cam_npy['T'], db.shape[0], db.shape[1])

mask = cv2.dilate(mask, np.ones((9, 9), np.int8), iterations=1)

img = cv2.inpaint((img[:, :, (2, 1, 0)]*255).astype(np.uint8), (mask*255).astype(np.uint8), 3, cv2.INPAINT_TELEA)[:, :, (2, 1, 0)]/255.

W, H = 104.73, 67.74

#   a X-------------------------X b
#     |                         |
#     |                         |
#   d X-------------------------X c

# Whole field
p3 = np.array([[-W/2., 0, H/2], [W/2., 0, H/2], [W/2., 0, -H/2] , [-W/2., 0, -H/2]])
p2, _ = cam.project(p3)

pp2 = np.array([[0, 0], [db.shape[1]-1, 0], [db.shape[1], db.shape[0]], [0, db.shape[0]-1]])

filled = (img*255).astype(np.uint8)
コード例 #29
0
def LaplacianDeform(depth_in, handle_3d, intrinsic=None, vis=False):
    """depth_ctrl is a reference Nx3 depth map
    Args:
        depth_in: the depth predicted by network
        handle_3d: the control sparse points, in [x, y, z]
        intrinsic: the intrinsic matrix
        vis: whether visualize using igl

    Output:
        depth: the warpped depth through asap
    """

    height, width = depth_in.shape[0], depth_in.shape[1]
    depth_in, y, x, handle_3d = ReScale(depth_in,
                                        handle_3d,
                                        intrinsic,
                                        get_image_coor=True)

    point_3d = uts_3d.depth2xyz(depth_in, intrinsic, False)
    select_id = y * width + x
    # test_id = range(10)
    # select_id = select_id[test_id]
    one_hot = np.zeros((point_3d.shape[0]), dtype=np.int32)
    one_hot[select_id] = 1
    mesh_idx = uts_3d.grid_mesh(height, width)

    V = igl.eigen.MatrixXd(np.float64(point_3d))
    U = V
    F = igl.eigen.MatrixXi(np.int32(mesh_idx))
    S = igl.eigen.MatrixXd(np.float64(one_hot))
    b = igl.eigen.MatrixXi(np.int32(select_id))

    P_origin = igl.eigen.MatrixXd(np.float64(point_3d[select_id, :]))
    P = igl.eigen.MatrixXd(np.float64(handle_3d))

    bc = igl.eigen.MatrixXd(np.float64(handle_3d))
    arap_data = igl.ARAPData()

    # Set color based on selection
    # C = igl.eigen.MatrixXd(F.rows(), 3)
    # purple = igl.eigen.MatrixXd([[80.0 / 255.0, 64.0 / 255.0, 255.0 / 255.0]])
    # gold = igl.eigen.MatrixXd([[255.0 / 255.0, 228.0 / 255.0, 58.0 / 255.0]])

    # pdb.set_trace()
    # for f in range(0, F.rows()):
    #     if S[F[f, 0]] > 0 or S[F[f, 1]] > 0 or S[F[f, 2]] > 0:
    #         C.setRow(f, purple)
    #     else:
    #         C.setRow(f, gold)

    # # Plot the mesh with pseudocolors
    # viewer = igl.viewer.Viewer()
    # viewer.data.set_mesh(V, F)
    # viewer.data.set_colors(C)
    # viewer.core.is_animating = False
    # viewer.launch()
    if vis:
        viewer = igl.viewer.Viewer()
        viewer.data.set_mesh(U, F)
        viewer.data.add_points(P, igl.eigen.MatrixXd([[1, 0, 0]]))
        viewer.core.is_animating = False
        viewer.launch()

    # start compute deform
    arap_data.max_iter = 30
    arap_data.ym = 450
    igl.arap_precomputation(V, F, V.cols(), b, arap_data)
    igl.arap_solve(bc, arap_data, U)

    if vis:
        viewer = igl.viewer.Viewer()
        viewer.data.set_mesh(V, F)
        # viewer.data.add_points(P_origin, igl.eigen.MatrixXd([[0, 0, 1]]))
        # viewer.data.add_points(P, igl.eigen.MatrixXd([[0, 1, 0]]))
        viewer.core.is_animating = False
        viewer.launch()

    point_3d_new = np.float32(np.array(U, dtype='float64', order='C'))
    depth = uts_3d.xyz2depth(point_3d_new, intrinsic, depth_in.shape)

    mask = depth <= 0
    max_depth = np.max(depth)
    depth_inpaint = cv2.inpaint(np.uint8(depth / max_depth * 255),
                                np.uint8(mask), 5, cv2.INPAINT_TELEA)
    depth[mask] = np.float32(depth_inpaint[mask]) * max_depth / 255

    return depth
コード例 #30
0
ファイル: Inpaint.py プロジェクト: prajunnoy/Handwritting
# ret,thresh = cv2.threshold(img_gray,100,255,cv2.THRESH_BINARY_INV)
# ostu not work
# ret3, thresh_ = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)

# contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# cv2.drawContours(thresh,contours,-1,(255,255,255),5)

mask = cv2.cvtColor(thresh, cv2.COLOR_GRAY2RGB)
kernel = np.ones((5,5),np.uint8)
dilation = cv2.dilate(mask,kernel,iterations = 2)

cv2.imwrite('mask.jpg', dilation)


##########################  Inpaint Function  ##########################################
mask = cv2.imread('mask.jpg', 0)

dst_NS = cv2.inpaint(img, mask, 5, cv2.INPAINT_NS)

cv2.imwrite('inpaint.jpg', dst_NS)
# cv2.imwrite('in.jpg', dst_TELEA)

# plt.subplot(221),plt.imshow(img_gray)
# # plt.title('Original Image')
# plt.subplot(222),plt.imshow(thresh,'gray')
# # plt.title('Extracted extra region Image')
# plt.subplot(223),plt.imshow(dst_NS)
# # plt.title('Cropped Image')
# plt.show()
コード例 #31
0
            #########################
            # Prepare the stuff
            #########################

            # read images and create mask
            rgbImg = cv2.imread(rgbImgPath)
            depImg = cv2.imread(depImgPath, cv2.IMREAD_UNCHANGED)

            # inpainting
            scaleOri = np.amax(depImg)
            inPaiMa = np.where(depImg == 0.0, 255, 0)
            inPaiMa = inPaiMa.astype(np.uint8)
            inPaiDia = 5.0
            depth_refine = depImg.astype(np.float32)
            depPaint = cv2.inpaint(depth_refine, inPaiMa, inPaiDia, cv2.INPAINT_NS)

            depNorm = depPaint - np.amin(depPaint)
            rangeD = np.amax(depNorm)
            depNorm = np.divide(depNorm, rangeD)
            depth_refine = np.multiply(depNorm, scaleOri)


            # create image number and name
            template = '00000'
            s = int(s)
            ssm = int(ss) + 1
            pre = (s-1) * 1296
            img_id = pre + ssm
            tempSS = template[:-len(str(img_id))]
コード例 #32
0
ファイル: inpaint.py プロジェクト: fantasy-mark/faceai
#coding=utf-8
#图片修复

import cv2
import numpy as np

path = "img/inpaint.png"

img = cv2.imread(path)
hight, width, depth = img.shape[0:3]

#图片二值化处理,把[240, 240, 240]~[255, 255, 255]以外的颜色变成0
thresh = cv2.inRange(img, np.array([240, 240, 240]), np.array([255, 255, 255]))

#创建形状和尺寸的结构元素
kernel = np.ones((3, 3), np.uint8)

#扩张待修复区域
hi_mask = cv2.dilate(thresh, kernel, iterations=1)
specular = cv2.inpaint(img, hi_mask, 5, flags=cv2.INPAINT_TELEA)

cv2.namedWindow("Image", 0)
cv2.resizeWindow("Image", int(width / 2), int(hight / 2))
cv2.imshow("Image", img)

cv2.namedWindow("newImage", 0)
cv2.resizeWindow("newImage", int(width / 2), int(hight / 2))
cv2.imshow("newImage", specular)
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #33
0
mask = cv.resize(mask, (513, 288), interpolation=cv.INTER_AREA)

video = cv.VideoWriter('lion_defenced.mp4', cv.VideoWriter_fourcc(*'MP4V'), 30,
                       (513, 288))

while (cap.isOpened()):

    ret, frame = cap.read()

    if ret == True:

        frame = cv.resize(frame, (513, 288), interpolation=cv.INTER_AREA)

        # dst = cv.inpaint(frame,mask,3,cv.INPAINT_TELEA)

        dst = cv.inpaint(frame, mask, 3, cv.INPAINT_NS)

        video.write(dst)

        key = cv.waitKey(30)

        if key == 27 or key == 'q':

            cont = False

            break

    else:

        break
コード例 #34
0
imgThresholdInv = cv2.bitwise_not(imgGreenThreshold)
imgThresholdColor = cv2.cvtColor(imgGreenThreshold, cv2.COLOR_GRAY2BGR)
imgDiff = cv2.add(imgInput, imgThresholdColor)
imgDiffHSV = cv2.cvtColor(imgDiff, cv2.COLOR_BGR2HSV)
imgGray = imgDiffHSV[:, :, 2]
cv2.imshow("imgGray", imgGray)
cv2.imshow("imgGreenThreshold", imgGreenThreshold)

r_img = m_img = np.array(imgGray)
rimg = spc.derive_m(imgInput, r_img)
s_img = spc.derive_saturation(imgInput, rimg)
spec_mask = spc.check_pixel_specularity(rimg, s_img)
cv2.imshow("spec_mask", spec_mask)
enlarged_spec = spc.enlarge_specularity(spec_mask)
radius = 12
telea = cv2.inpaint(imgInput, enlarged_spec, radius, cv2.INPAINT_TELEA)
ns = cv2.inpaint(imgInput, enlarged_spec, radius, cv2.INPAINT_NS)
cv2.imshow("telea", telea)
cv2.imshow("ns", ns)

#imgBlurred = cv2.GaussianBlur(imgGray, (3, 3), 0)
#ret, imgThresh = cv2.threshold(imgBlurred,  250, 255, cv2.THRESH_BINARY_INV)
#kernel = np.ones((3,3), np.uint8)

#dist = cv2.distanceTransform(imgGreenThreshold, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
#dist = cv2.normalize(dist, None, 255,0, cv2.NORM_MINMAX, cv2.CV_8UC1)
#cv2.imshow("dist", dist)

#kernel = np.ones((3,3), np.uint8)
#morph = cv2.morphologyEx(imgThresh, cv2.MORPH_CLOSE, kernel)
#morph = cv2.morphologyEx(morph, cv2.MORPH_OPEN, kernel)
コード例 #35
0
#Resize
res = cv2.resize(img, None, fx=0.2, fy=0.2, interpolation=cv2.INTER_CUBIC)

#Erode
kernel = np.ones((5, 5), np.uint8)
imgEroded = cv2.erode(res, kernel, iterations=1)

#Convert to grayscale
gray = cv2.cvtColor(imgEroded, cv2.COLOR_BGR2GRAY)

#Binarize to get highlight
ret, mask = cv2.threshold(gray, 230, 255, cv2.THRESH_BINARY)

#Inpaint highlight
dst = cv2.inpaint(imgEroded, mask, 3, cv2.INPAINT_TELEA)

#Convert to HSV
HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv = cv2.cvtColor(dst, cv2.COLOR_BGR2HSV)

while True:
    #    img = cv2.imread("lambo.png")
    h_min = cv2.getTrackbarPos("Hue Min", "TrackBars")
    h_max = cv2.getTrackbarPos("Hue Max", "TrackBars")
    s_min = cv2.getTrackbarPos("Sat Min", "TrackBars")
    s_max = cv2.getTrackbarPos("Sat Max", "TrackBars")
    v_min = cv2.getTrackbarPos("Val Min", "TrackBars")
    v_max = cv2.getTrackbarPos("Val Max", "TrackBars")
    print(h_min, h_max, s_min, s_max, v_min, v_max)
コード例 #36
0
def inpaint(img, mask):
    res = cv.inpaint(img, mask, 129, cv.INPAINT_NS)
    cv.imshow('result', res)
    cv.waitKey(800)
    return res
コード例 #37
0
import numpy as np
import cv2

img = cv2.imread('new2.jpg')
img_rgb=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_hsv=cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_hls=cv2.cvtColor(img, cv2.COLOR_BGR2HLS)

hsv_gray = cv2.cvtColor(img_hsv,cv2.COLOR_BGR2GRAY)
kernel_size = 5
hsv_blur = cv2.GaussianBlur(hsv_gray,(kernel_size, kernel_size),0)

ret, thresh = cv2.threshold(hsv_gray, 150, 255, cv2.THRESH_BINARY)
kernel = np.ones((2,2))
erosion = cv2.dilate(thresh,kernel,iterations = 1)

final = cv2.inpaint(img, thresh, 3, cv2.INPAINT_TELEA)

cv2.imshow('hsv_gray', hsv_gray)
cv2.imshow('img', img)
cv2.imshow('final', final)
cv2.imshow('erosion', erosion)
cv2.imshow('img_hsv', img_hsv)
#cv2.imshow('img_hls', img_hls)

cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #38
0
def inpaint(image, mask):
    k = numpy.ones((5, 5), numpy.uint8)
    m = cv2.dilate(mask, k, iterations=1)
    i = cv2.inpaint(image, m, 10, cv2.INPAINT_TELEA)
    return i
コード例 #39
0
import numpy as np
import cv2

img = cv2.imread("images/Messi.jpg")
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY);

scratch = cv2.imread("images/Messi_scratches.jpg")
scratch_gray = cv2.cvtColor(scratch, cv2.COLOR_BGR2GRAY);
cv2.imshow("Scratch", scratch)

mask = scratch - img
abs_diff = cv2.absdiff(scratch_gray, img_gray)
# cv2.imshow("Abs Diff", abs_diff)

_, mask = cv2.threshold(abs_diff, 100, 255, cv2.THRESH_BINARY)
# cv2.imshow("Mask", mask)

dst = cv2.inpaint(img, mask, 3, cv2.INPAINT_TELEA)
cv2.imshow('dst', dst)

cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #40
0
ファイル: GMM_rodzial.py プロジェクト: checu/mgr
img_blur = cv2.GaussianBlur(img_RGB_after, (5, 5), 0)
mask = np.zeros(img_RGB_after.shape, np.uint8)

gray = cv2.cvtColor(img_RGB_after, cv2.COLOR_BGR2GRAY)

# laplacian = cv2.Laplacian(gray,cv2.CV_64F)
#
# cv2.imshow("laplacian", laplacian)

thresh = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY_INV)[1]

# thresh = cv2.threshold(gray, 60, 255, cv2.THRESH_OTSU)[1]

# cv2.imshow("tresholded image", thresh)

inpaint = cv2.inpaint(img_RGB_after, thresh, 3, cv2.INPAINT_TELEA)

# cv2.imshow("inpaint image", inpaint)

kernel = np.ones((6, 6), np.uint8)

closing = cv2.bitwise_not(thresh)

closing = cv2.morphologyEx(closing, cv2.MORPH_CLOSE, kernel)

closing = cv2.dilate(closing, None, iterations=2)
closing = cv2.erode(closing, None, iterations=2)

clop = np.where(closing[..., None] == 0, [255, 255, 255], [0, 0, 0])

cv2.imshow("clop img", closing)
コード例 #41
0
if __name__ == '__main__':
    #filename = os.path.join('prova.jpg')
    #image_orig = io.imread(filename)
    xmin = 351  #xmin
    ymin = 52  #ymin
    xmax = 598
    ymax = 140
    img = cv.imread('prova.jpg')
    h, w = img.shape[:-1]
    # Create mask with a bounding box defect regions
    #mask=  np.zeros(img.shape[:-1])
    cvmask = np.zeros((h, w, 1), dtype=np.uint8)
    cvmask[ymin:ymin + ymax, xmin:xmin + xmax] = 1

    #image_result = inpaint.inpaint_biharmonic(image_defect, mask, multichannel=True)
    image_result = cv.inpaint(img, cvmask, 3, cv.INPAINT_TELEA)
    #cv.imshow('dst', image_result)
    image_result2 = cv.inpaint(img, cvmask, 3, cv.INPAINT_NS)
    #cv.imshow('dst', image_result2)

    fig, axes = plt.subplots(ncols=2, nrows=2)
    ax = axes.ravel()

    ax[0].set_title('Original image')
    ax[0].imshow(img)

    ax[1].set_title('Mask')
    ax[1].imshow(cvmask, cmap=plt.cm.gray)

    #ax[2].set_title('Defected image')
    #ax[2].imshow(image_defect)
コード例 #42
0
def get_normal(depth_refine, fx=-1, fy=-1, cx=-1, cy=-1, for_vis=True):
    res_y = depth_refine.shape[0]
    res_x = depth_refine.shape[1]

    # inpainting
    scaleOri = np.amax(depth_refine)
    inPaiMa = np.where(depth_refine == 0.0, 255, 0)
    inPaiMa = inPaiMa.astype(np.uint8)
    inPaiDia = 5.0
    depth_refine = depth_refine.astype(np.float32)
    depPaint = cv2.inpaint(depth_refine, inPaiMa, inPaiDia, cv2.INPAINT_NS)

    depNorm = depPaint - np.amin(depPaint)
    rangeD = np.amax(depNorm)
    depNorm = np.divide(depNorm, rangeD)
    depth_refine = np.multiply(depNorm, scaleOri)

    centerX = cx
    centerY = cy

    constant = 1 / fx
    uv_table = np.zeros((res_y, res_x, 2), dtype=np.int16)
    column = np.arange(0, res_y)

    uv_table[:, :, 1] = np.arange(0, res_x) - centerX  # x-c_x (u)
    uv_table[:, :, 0] = column[:, np.newaxis] - centerY  # y-c_y (v)
    uv_table_sign = np.copy(uv_table)
    uv_table = np.abs(uv_table)

    # kernel = np.ones((5, 5), np.uint8)
    # depth_refine = cv2.dilate(depth_refine, kernel, iterations=1)
    # depth_refine = cv2.medianBlur(depth_refine, 5 )
    depth_refine = ndimage.gaussian_filter(depth_refine, 2)  # sigma=3)
    # depth_refine = ndimage.uniform_filter(depth_refine, size=11)

    # very_blurred = ndimage.gaussian_filter(face, sigma=5)
    v_x = np.zeros((res_y, res_x, 3))
    v_y = np.zeros((res_y, res_x, 3))
    normals = np.zeros((res_y, res_x, 3))

    dig = np.gradient(depth_refine, 2, edge_order=2)
    v_y[:, :, 0] = uv_table_sign[:, :, 1] * constant * dig[0]
    v_y[:, :, 1] = depth_refine * constant + (uv_table_sign[:, :, 0] * constant) * dig[0]
    v_y[:, :, 2] = dig[0]

    v_x[:, :, 0] = depth_refine * constant + uv_table_sign[:, :, 1] * constant * dig[1]
    v_x[:, :, 1] = uv_table_sign[:, :, 0] * constant * dig[1]
    v_x[:, :, 2] = dig[1]

    cross = np.cross(v_x.reshape(-1, 3), v_y.reshape(-1, 3))
    norm = np.expand_dims(np.linalg.norm(cross, axis=1), axis=1)
    # norm[norm == 0] = 1

    cross = cross / norm
    cross = cross.reshape(res_y, res_x, 3)
    cross = np.abs(cross)
    cross = np.nan_to_num(cross)

    # cross_ref = np.copy(cross)
    # cross[cross_ref==[0,0,0]]=0 #set zero for nan values

    # cam_angle = np.arccos(cross[:, :, 2])
    # cross[np.abs(cam_angle) > math.radians(75)] = 0  # high normal cut
    cross[depth_refine <= 400] = 0  # 0 and near range cut
    cross[depth_refine > depthCut] = 0  # far range cut
    if not for_vis:
        scaDep = 1.0 / np.nanmax(depth_refine)
        depth_refine = np.multiply(depth_refine, scaDep)
        cross[:, :, 0] = cross[:, :, 0] * (1 - (depth_refine - 0.5))  # nearer has higher intensity
        cross[:, :, 1] = cross[:, :, 1] * (1 - (depth_refine - 0.5))
        cross[:, :, 2] = cross[:, :, 2] * (1 - (depth_refine - 0.5))
        scaCro = 255.0 / np.nanmax(cross)
        cross = np.multiply(cross, scaCro)
        cross = cross.astype(np.uint8)

    return cross, depth_refine
コード例 #43
0
ファイル: engine.py プロジェクト: zhang405744522/mrgaze
def FindRemoveGlint(roi, cfg):
    '''
    Locate small bright region roughly centered in ROI
    This function should be called before any major preprocessing of the frame.
    The ROI should be unscaled and unblurred, since we assume glints
    are saturated and have maximum intensity (255 in uint8)

    Arguments
    ----
    roi : 2D numpy uint8 array
        Pupil/iris ROI image
    cfg : configuration object
        Configuration parameters including fractional glint diameter estimate
    pupil_bw : 2D numpy unit8 array
        Black and white pupil segmentation

    Returns
    ----
    glint : float array
        N x 2 array of glint centroids
    glint_mask : 2D numpy uint8 array
        Bright region mask used by glint removal
    roi_noglint : 2D numpy uint8 array
        Pupil/iris ROI without small bright areas
    '''

    DEBUG = False

    # ROI dimensions and center
    ny, nx = roi.shape
    roi_cx, roi_cy = nx / 2.0, ny / 2.0

    if DEBUG:
        print("%s, %s" % (roi_cx, roi_cy))

    # Estimated glint diameter in pixels
    glint_d = int(cfg.getfloat('PUPILSEG', 'glintdiameterperc') * nx / 100.0)

    # Glint diameter should be >= 1 pixel
    if glint_d < 1:
        glint_d = 1

    # Reasonable upper and lower bounds on glint area (x3, /3)
    glint_A = np.pi * (glint_d / 2.0)**2
    A_min, A_max = glint_A / 3.0, glint_A * 9.0

    # print
    # print A_min
    # print A_max

    # Find bright pixels in full scale uint8 image (ie value > 250)
    bright = np.uint8(roi > 254)

    # Label connected regions (blobs)
    bright_labels = measure.label(bright, background=0) + 1

    # Get region properties for all bright blobs in mask
    bright_props = measure.regionprops(bright_labels)

    # Init glint parameters
    r_min = np.Inf
    glint_label = -1
    glint = (0, 0)
    glint_mask = np.zeros_like(roi, dtype="uint8")
    roi_noglint = roi.copy()

    # Find closest blob to ROI center within glint area range
    for props in bright_props:

        # Blob area in pixels^2
        A = props.area

        # Only accept blobs with area in glint range
        if A > A_min and A < A_max:
            # Check distance from ROI center

            cy, cx = props.centroid  # (row, col)
            r = np.sqrt((cx - roi_cx)**2 + (cy - roi_cy)**2)

            if r < r_min:
                r_min = r
                glint_label = props.label
                glint = (cx, cy)

    if glint_label > 0:

        # Construct glint mask
        glint_mask = np.uint8(bright_labels == glint_label)

        # Dilate glint mask
        k = glint_d
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k, k))
        glint_mask = cv2.morphologyEx(glint_mask, cv2.MORPH_DILATE, kernel)

        # Inpaint dilated glint in ROI
        roi_noglint = cv2.inpaint(roi, glint_mask, 3, cv2.INPAINT_TELEA)

    return glint, glint_mask, roi_noglint
コード例 #44
0
# 2、扩张修复区域:

# 识别到修复区域并根据相邻像素值进行扩张达到弥补像素值修复图片的效果。cv2.inpaint()函数主要涉及两种算法。

# 一种算法是从该区域的边界开始,然后进入区域内,逐渐填充边界中的所有内容。
# 它需要在邻近的像素周围的一个小邻域进行修复。该像素由邻居中所有已知像素的归一化加权和代替。
# 选择权重是一个重要的问题。对于靠近该点的那些像素,靠近边界的法线和位于边界轮廓上的像素,给予更多的权重。

# 另一种是基于流体动力学并利用偏微分方程。基本原则是heurisitic。
# 它首先沿着已知区域的边缘行进到未知区域(因为边缘是连续的)。
# 它继续等照片(连接具有相同强度的点的线,就像轮廓连接具有相同高度的点一样),同时在修复区域的边界处匹配渐变矢量。
# 为此,使用来自流体动力学的一些方法。获得颜色后,填充颜色以减少该区域的最小差异。
#扩张待修复区域
hi_mask = cv2.dilate(thresh, kernel, iterations=1)
specular = cv2.inpaint(img, hi_mask, 5, flags=cv2.INPAINT_TELEA)
cv2.namedWindow("Image", 0)
cv2.resizeWindow("Image", int(width / 2), int(hight / 2))
cv2.imshow("Image", img)
cv2.namedWindow("newImage", 0)
cv2.resizeWindow("newImage", int(width / 2), int(hight / 2))
a=cv2.imshow("newImage", specular)
cv2.imwrite("43.jpg",specular)
cv2.waitKey(0)
cv2.destroyAllWindows()



# 修复程序处理二的搭建

# 1、图像处理第二步:
コード例 #45
0
ファイル: haircut.py プロジェクト: ZhangYuewan/hair_detect
def haircut(img_thresh, img, coordidate, sideLength):
    coordidate_length = len(coordidate)
    x = []
    y = []
    for i in range(coordidate_length):
        coordidate_tuple = coordidate[i]
        x_mid = int(coordidate_tuple[0] + coordidate_tuple[2]) // 2
        y_mid = int(coordidate_tuple[1] + coordidate_tuple[3]) // 2
        x.append(y_mid)
        y.append(x_mid)

    cv2.imshow('src', img)
    cv2.waitKey(0)
    rows = img_thresh.shape[0]
    cols = img_thresh.shape[1]
    # print(rows)
    # print(cols)
    for i in range(rows):
        for j in range(cols):
            if img_thresh[i][j] < 180:
                img_thresh[i][j] = 0
            else:
                img_thresh[i][j] = 255
    length = len(x)
    # 对边界的点进行移动
    for i in range(length):
        if x[i] - sideLength // 2 < 0:
            abs = sideLength // 2 - x[i]
            x[i] = x[i] + abs
        if x[i] + sideLength // 2 > rows:
            abs = x[i] + sideLength / 2 - rows
            x[i] = x[i] - abs
        if y[i] - sideLength // 2 < 0:
            abs = sideLength // 2 - y[i]
            y[i] = y[i] + abs
        if y[i] + sideLength // 2 > cols:
            abs = y[i] + sideLength // 2 - cols
            y[i] = y[i] - abs

    x1 = []
    y1 = []
    x2 = []
    y2 = []
    for i in range(length):
        a1 = int(x[i] - sideLength // 2)
        b1 = int(y[i] - sideLength // 2)
        a2 = int(x[i] + sideLength // 2)
        b2 = int(y[i] + sideLength // 2)
        x1.append(a1)
        y1.append(b1)
        x2.append(a2)
        y2.append(b2)

    # print(x1)
    # print(y1)
    # print(x2)
    # print(y2)
    #进行剃发的相关代码
    # Convert the original image to grayscale
    grayScale = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # cv2.imwrite('grayScale_sample1.jpg', grayScale, [int(cv2.IMWRITE_JPEG_QUALITY), 90])

    # Kernel for the morphological filtering
    kernel = cv2.getStructuringElement(1, (60, 60))

    # Perform the blackHat filtering on the grayscale image to find the
    # hair countours
    blackhat = cv2.morphologyEx(grayScale, cv2.MORPH_BLACKHAT, kernel)
    # cv2.imwrite('blackhat_sample1.jpg', blackhat, [int(cv2.IMWRITE_JPEG_QUALITY), 90])

    # intensify the hair countours in preparation for the inpainting
    # algorithm
    ret, thresh2 = cv2.threshold(blackhat, 10, 255, cv2.THRESH_BINARY)
    print(thresh2.shape)
    # cv2.imwrite('thresholded_sample1.jpg', thresh2, [int(cv2.IMWRITE_JPEG_QUALITY), 90])

    # inpaint the original image depending on the mask
    dst = cv2.inpaint(img, thresh2, 1, cv2.INPAINT_TELEA)
    cv2.imwrite('./pic/temp/background.jpg', dst,
                [int(cv2.IMWRITE_JPEG_QUALITY), 90])
    background = cv2.imread("./pic/temp/background.jpg")

    #对光头图片进行毛发渲染
    result = img.copy()
    for i in range(length):
        for m in range(x1[i], x2[i]):
            for n in range(y1[i], y2[i]):
                result[m][n] = [6, 6, 6]

    print('完成')

    for i in range(rows):
        for j in range(cols):
            if all(result[i][j] != [6, 6, 6]):
                result[i][j] = background[i][j]

    for i in range(length):
        for m in range(x1[i], x2[i]):
            for n in range(y1[i], y2[i]):
                result[m][n] = img[m][n]
    return result
コード例 #46
0
def colour(counter, marker, distance):
    if Timing:
        Start_of_code = time.time()

    results = []  # empty list
    for k in range(1, counter + 1):
        num_storage = []  # store tuple colour's value in RGB
        name_storage = []  # stores the colour's name
        img = cv2.imread("colour%d.png" % k)

        height, width, numchannels = img.shape

        roi = img[int((height / 2) -
                      (height / 2) * 0.85):int((height / 2) +
                                               (height / 2) * 0.85),
                  int((width / 2) -
                      (width / 2) * 0.85):int((width / 2) +
                                              (width / 2) * 0.85)]

        nroi = cv2.resize(roi, (100, 100))

        imgGray = cv2.cvtColor(nroi, cv2.COLOR_BGR2GRAY)

        # img = cv2.imread("colour%d.png" % k, cv2.COLOR_BGR2RGB)
        Gauss = cv2.GaussianBlur(imgGray, (5, 5), 0)
        # blur = cv2.medianBlur(Gauss, 7)
        # fliter = cv2.bilateralFilter(blur, 15, 75, 75)
        # kernel = np.ones((10, 10), np.uint8)
        # erode = cv2.erode(Gauss, kernel, iterations=10)
        # dilation = cv2.dilate(erode, kernel, iterations=20)
        # denoised = cv2.fastNlMeansDenoisingColored(dilation, None, 10, 10, 7, 21)

        ret, otsu = cv2.threshold(Gauss, 0, 255,
                                  cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        inpaint = cv2.inpaint(nroi, otsu, 3, cv2.INPAINT_NS)

        h = 8
        w = 8
        # h, w = img[:2]

        current_mode = 0

        # # defined boundaries HSV
        # boundaries = [("black", [0, 0, 0], [179, 255, 50]), ("white", [0, 0, 185], [179, 30, 255]),
        #                 ("orange", [10, 30, 50], [25, 255, 255]), ("yellow", [25, 30, 50], [35, 255, 255]),
        #                 ("green", [35, 30, 50], [85, 255, 255]), ("blue", [85, 30, 50], [130, 255, 255]),
        #                 ("purple", [130, 30, 50], [145, 255, 255]), ("pink", [145, 30, 50], [165, 255, 255]),
        #                 ("red", [165, 30, 50], [179, 255, 255]), ("red", [0, 30, 50], [10, 255, 255]),
        #                 ("grey", [0, 0, 50], [179, 30, 185])]

        # # defined boundaries RGB
        # boundaries = [("black", [0, 0, 0]), ("white", [255, 255, 255]),
        #               ("orange", [255, 165, 0]), ("yellow", [255, 255, 0]),
        #               ("green", [0, 128, 0]), ("blue", [0, 0, 255]),
        #               ("purple", [128, 0, 128]), ("pink", [255, 192, 203]),
        #               ("red", [255, 0, 0]), ("grey", [128, 128, 128]),
        #               ("aqua", [0, 255, 255]), ("fuchsia", [255, 0, 255]),
        #               ("silver", [192, 192, 192]), ("maroon", [128, 0, 0]),
        #               ("olive", [128, 128, 0]), ("lime", [0, 255, 0]),
        #               ("teal", [0, 128, 128]), ("navy", [0, 0, 128]),]

        # # defined boundaries RGB
        # boundaries = [("black", [0, 0, 0]), ("white", [255, 255, 255]),
        #               ("yellow", [255, 255, 0]), ("purple", [128, 0, 128]),
        #               ("green", [0, 128, 0]), ("blue", [0, 0, 255]),
        #               ("red", [255, 0, 0]), ("grey", [128, 128, 128]),
        #               ("blue", [0, 255, 255]), ("pink", [255, 0, 255]),
        #               ("grey", [192, 192, 192]), ("red", [128, 0, 0]),
        #               ("yellow", [128, 128, 0]), ("green", [0, 255, 0]),
        #               ("blue", [0, 128, 128]), ("blue", [0, 0, 128])]

        # # defined boundaries HSV
        # boundaries = [("black", [0, 0, 0]), ("white", [0, 0, 255]),
        #               ("yellow", [30, 255, 255]), ("purple", [150, 255, 127]),
        #               ("green", [60, 255, 127]), ("blue", [120, 255, 255]),
        #               ("red", [0, 255, 255]), ("grey", [0, 0, 127]),
        #               ("blue", [90, 255, 255]), ("pink", [150, 255, 255]),
        #               ("grey", [0, 0, 191]), ("red", [0, 255, 127]),
        #               ("yellow", [30, 255, 127]), ("green", [60, 255, 255]),
        #               ("blue", [90, 255, 127]), ("blue", [120, 255, 127])]

        # # defined boundaries HSV
        # boundaries = [("black", [0, 0, 0], [179, 255, 50]), ("white", [0, 0, 179], [179, 38, 255]),
        #               ("orange", [15, 38, 50], [22, 255, 255]), ("yellow", [23, 38, 50], [44, 255, 255]),
        #               ("yellow green", [45, 38, 50], [52, 255, 255]), ("green", [53, 38, 50], [74, 255, 255]),
        #               ("green cyan", [75, 38, 50], [82, 255, 255]), ("cyan", [83, 38, 50], [104, 255, 255]),
        #               ("blue cyan", [105, 38, 50], [112, 255, 255]), ("blue", [113, 38, 50], [134, 255, 255]),
        #               ("violet", [135, 38, 50], [142, 255, 255]), ("magenta", [143, 38, 50], [164, 255, 255]),
        #               ("red magenta", [165, 38, 50], [172, 255, 255]), ("red", [0, 38, 50], [14, 255, 255]),
        #               ("red", [173, 38, 50], [180, 255, 255]), ("gray", [0, 0, 50], [179, 38, 179])]

        # Colour Boundaries red, blue, yellow and gray HSV
        # this requires dtype="uint16" for lower and upper & HSV = np.float32(HSV) before the conversion of HSV_FULL
        boundaries = [("black", [0, 0, 0], [360, 255, 50]),
                      ("white", [0, 0, 179], [360, 38, 255]),
                      ("orange", [15, 38, 50], [31, 255, 255]),
                      ("yellow", [31, 38, 50], [75, 255, 255]),
                      ("yellow green", [75, 38, 50], [91, 255, 255]),
                      ("green", [91, 38, 50], [135, 255, 255]),
                      ("green cyan", [135, 38, 50], [150, 255, 255]),
                      ("cyan", [150, 38, 50], [195, 255, 255]),
                      ("blue cyan", [195, 38, 50], [210, 255, 255]),
                      ("blue", [210, 38, 50], [255, 255, 255]),
                      ("violet", [255, 38, 50], [270, 255, 255]),
                      ("magenta", [270, 38, 50], [315, 255, 255]),
                      ("red magenta", [315, 38, 50], [330, 255, 255]),
                      ("red", [0, 38, 50], [15, 255, 255]),
                      ("red", [330, 38, 50], [360, 255, 255]),
                      ("gray", [0, 0, 50], [360, 38, 179])]

        resizeBGR = cv2.resize(
            nroi,
            (w,
             h))  # reduces the size of the image so the process would run fast

        if Save_Data:
            if not os.path.exists("color"):
                os.makedirs("color")
            color_result = "color/{0}_{1}.png".format(marker, k)
            color_result_des = os.path.join(script_dir, color_result)
            cv2.imwrite(color_result_des, nroi)

        if Static_Test:
            color_result = "{0}/{1}_{2}.png".format(distance, marker, k)
            color_result_des = os.path.join(script_dir, color_result)
            cv2.imwrite(color_result_des, nroi)

        if Rover_Marker:
            marker_name = "marker={0}".format(marker)
            color_result = "{0}/{1}_{2}.png".format(marker_name, marker, k)
            color_result_des = os.path.join(script_dir, color_result)
            cv2.imwrite(color_result_des, nroi)

        # print(resizeBGR[1,1])

        resizeBGR = np.float32(resizeBGR)
        resizeHSV = cv2.cvtColor(resizeBGR, cv2.COLOR_BGR2HSV_FULL)
        resizeHSV[:, :, 1] = np.dot(resizeHSV[:, :, 1], 255)
        # resizeRGB = cv2.cvtColor(resizeBGR, cv2.COLOR_BGR2RGB)

        # print(resizeHSV[1,1])

        if Step_color:
            # for x in range(0, w):
            #   for y in range(0, h):
            #     num_storage.append(resizeRGB[x, y])

            # print(num_storage)

            cv2.imshow("fliter", dilation)
            cv2.imshow("denos", denoised)
            cv2.imshow("resize", resizeBGR)
            cv2.waitKey(0)
        # end if

        # for i in range(0, h):
        #   for j in range(0, w):
        #     RGB = resizeHSV[i, j]
        #     differences = []
        #     for (name, value) in boundaries:
        #       for component1, component2 in zip(RGB, value):
        #         difference = sum([abs(component1 - component2)])
        #         differences.append([difference, name])
        #     differences.sort()
        #     name_storage.append(differences[0][1])
        #
        # majority = Counter(name_storage)
        # results.append(majority.most_common(1)[0][0])

        # for i in range(0, h):
        #   for j in range(0, w):
        #     # RGB = []
        #     RGB = resizeRGB[i, j]
        #     # num_storage.append(RGB)
        #     # Finds the nearest colour name within the webcolors dataset by converting the classification to rgb then then find the closest the square is to remove the negative value.
        #     try:
        #       colorname = webcolors.rgb_to_name(RGB)
        #       name_storage.append(colorname)
        #
        #     except ValueError:
        #       min_colours = {}
        #       for key, name in webcolors.CSS3_HEX_TO_NAMES.items():
        #         r_c, g_c, b_c = webcolors.hex_to_rgb(key)
        #         rd = (r_c - RGB[0]) ** 2
        #         gd = (g_c - RGB[1]) ** 2
        #         bd = (b_c - RGB[2]) ** 2
        #         min_colours[(rd + gd + bd)] = name
        #       name_storage.append(min_colours[min(min_colours.keys())])
        #
        # majority = Counter(name_storage)
        #
        # results.append(majority.most_common(1)[0][0])

        # comparing each pixel of the picture and append the colour name in to a list (BGR to RGB to get the name)
        for (color, lower, upper) in boundaries:
            lower = np.array(lower, dtype="uint16")
            upper = np.array(upper, dtype="uint16")

            mask = cv2.inRange(resizeHSV, lower, upper)

            ratio = np.round(
                (cv2.countNonZero(mask) / (resizeHSV.size / 3)) * 100, 2)
            if ratio > current_mode:
                current_mode = ratio
                name_storage.append(color)
            else:
                pass
        results.append(name_storage[-1])

    mode = Counter(results)

    if Step_color:
        print(name_storage)
        print(majority)
        print(mode)

    if mode == Counter():
        colourname = "None"
    else:
        colourname = mode.most_common(1)[0][0]

    if Timing:
        Duration_of_color_recognition = time.time() - Start_of_code
        print("Duraction of Colour Recognition = {0}".format(
            Duration_of_color_recognition))

        # with open('Character Color.csv', 'a') as csvfile:  # for testing purposes
        #   filewriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
        #   filewriter.writerow(
        #     [str(marker), str(Duration_of_color_recognition)])

    return colourname
コード例 #47
0
    
print('maxW = %d minW = %d maxH = %d minH = %d'%(max(widthList),min(widthList),max(heightList),min(heightList)))
print('Resize operation finished!')

##图像去除标记红框代码
for imgName in tqdm(os.listdir(dsr2)):
    imgDir = dsr2+imgName
    img = cv2.imread(imgDir)
    img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
    h,w,c = img.shape
    mask = np.zeros((h,w,c), np.uint8)
    for i in h:
        for j in w:
            data = img[i][j]
            #uint8做减法会溢出,需转类型到int后再做减法
            if(int(data[0]) > 100 and abs(int(data[0])-int(data[1]))>50):
                #创建impainting模板,即图像中红色的区域,mask只需要单通道
                mask[i][j] +=255
                dstimg = cv2.inpaint(img,mask[:,:,0],20,cv2.INPAINT_TELEA)
                cv2.imwrite(dsr2+imgName,dstimg)
print('Get rid of the red box is finished!')
                
                
                
                
            
    
    


コード例 #48
0
    def warp_image(self, inputs):

        image = np.array(inputs['left_image'])
        if self.background_mode == 'background':
            background_image = np.array(inputs['background'])

        warped_image = np.zeros_like(image).astype(float)
        warped_image = np.stack([warped_image] * 2, 0)
        pix_locations = self.xs - inputs['disparity']

        mask = self.get_occlusion_mask(pix_locations)
        masked_pix_locations = pix_locations * mask - self.process_width * (
            1 - mask)

        # do projection - linear interpolate up to 1 pixel away
        weights = np.ones((2, self.crop_height, self.process_width)) * 10000

        for col in range(self.process_width - 1, -1, -1):
            loc = masked_pix_locations[:, col]
            loc_up = np.ceil(loc).astype(int)
            loc_down = np.floor(loc).astype(int)
            weight_up = loc_up - loc
            weight_down = 1 - weight_up

            mask = loc_up >= 0
            mask[mask] = \
                weights[0, np.arange(self.crop_height)[mask], loc_up[mask]] > weight_up[mask]
            weights[0, np.arange(self.crop_height)[mask], loc_up[mask]] = \
                weight_up[mask]
            warped_image[0, np.arange(self.crop_height)[mask], loc_up[mask]] = \
                image[:, col][mask] / 255.

            mask = loc_down >= 0
            mask[mask] = \
                weights[1, np.arange(self.crop_height)[mask], loc_down[mask]] > weight_down[mask]
            weights[1, np.arange(self.crop_height)[mask],
                    loc_down[mask]] = weight_down[mask]
            warped_image[1, np.arange(self.crop_height)[mask], loc_down[mask]] = \
                image[:, col][mask] / 255.

        weights /= weights.sum(0, keepdims=True) + 1e-7  # normalise
        weights = np.expand_dims(weights, -1)
        warped_image = warped_image[0] * weights[1] + warped_image[
            1] * weights[0]
        warped_image *= 255.

        # now fill occluded regions with random background

        if self.background_mode == 'background':
            warped_image[warped_image.max(-1) == 0] = background_image[
                warped_image.max(-1) == 0]
            warped_image = warped_image.astype(np.uint8)
        else:
            _h, _w = warped_image.shape[:2]
            inpaint_mask = np.zeros(shape=(_h, _w, 1), dtype=np.uint8)

            cmask = (warped_image.max(-1) == 0)
            inpaint_mask[cmask] = 1
            inputs['mask'] = inpaint_mask
            if self.background_mode == 'opencv':
                warped_image[cmask] = 0
                warped_image = warped_image.astype(np.uint8)
                warped_image = cv2.inpaint(warped_image, inpaint_mask, 3,
                                           cv2.INPAINT_TELEA)
            else:
                warped_image[cmask] = 128
                warped_image = warped_image.astype(np.uint8)

            inputs['mask'] = inputs['mask'][:, :self.crop_width]
            ## Do this outside
            # inference = self.Inpainter.inference(warped_image, inpaint_mask)
            # inference = (tensor2img(inference) * 255).astype(int)
            # warped_image = np.where(inpaint_mask == 0, warped_image, inference)
        # plt.imshow(warped_image)
        # plt.show()
        # exit()

        return warped_image
コード例 #49
0
    def get_negatives_stitching(self, path, mode=1):
        import cv2
        import os
        import scipy.ndimage.morphology as morph
        import random

        # decide if the negative is of color or shape (only one, so that they cannot mix)
        color_or_shape = np.random.randint(
            0, 3)  # 0 is color, 1 is shape, 2 is material

        path_image_original = self.path_dataset + '/images/' + self.split + '/positive/' + path + '.' + self.image_extension
        index_object_original = np.random.randint(0, 3)
        image_original = cv2.imread(path_image_original)
        seg_original = cv2.imread(
            path_image_original.replace('positive', 'segmentation'))
        with open(
                os.path.join(self.path_dataset, 'scenes', self.split,
                             f'{path}.json'), 'rb') as f:
            scene_original = json.load(f)
        color_original = scene_original['objects'][index_object_original][
            'color']
        shape_original = scene_original['objects'][index_object_original][
            'shape']
        material_original = scene_original['objects'][index_object_original][
            'material']
        position = scene_original['objects'][index_object_original][
            'pixel_coords'][:2]

        # inpaint hole of previous object
        mask_inpaint = (seg_original[..., 2] == index_object_original +
                        1).astype(np.uint8)
        mask_inpaint = morph.binary_dilation(mask_inpaint,
                                             structure=np.ones(
                                                 (5, 5))).astype(np.uint8)
        image_inpainted = cv2.inpaint(image_original, mask_inpaint, 3,
                                      cv2.INPAINT_TELEA)

        # open other image
        other_correct = False

        while not other_correct:
            path_other = random.choice(list(self.paths.values()))
            index_object_other = np.random.randint(0, 3)
            with open(
                    os.path.join(self.path_dataset, 'scenes', self.split,
                                 f'{path_other}.json'), 'rb') as f:
                scene_other = json.load(f)
            color_other = scene_other['objects'][index_object_other]['color']
            shape_other = scene_other['objects'][index_object_other]['shape']
            material_other = scene_other['objects'][index_object_other][
                'material']
            # brute force...
            if mode == 1:
                if color_or_shape == 1 and (color_other != color_original or
                                            material_other != material_original
                                            or shape_other == shape_original):
                    continue
                elif color_or_shape == 0 and (
                        color_other == color_original
                        or material_other != material_original
                        or shape_other != shape_original):
                    continue
                elif color_or_shape == 2 and (
                        color_other != color_original or material_other
                        == material_original or shape_other != shape_original):
                    continue
                else:
                    other_correct = True
            elif mode == 2:
                if color_or_shape == 1 and (shape_other == shape_original):
                    continue
                elif color_or_shape == 0 and (color_other == color_original):
                    continue
                elif color_or_shape == 2 and (material_other
                                              == material_original):
                    continue
                else:
                    other_correct = True

            path_image_other = self.path_dataset + '/images/' + self.split + '/positive/' + path_other + '.' + self.image_extension
            image_other = cv2.imread(path_image_other)
            seg_other = cv2.imread(
                path_image_other.replace('positive', 'segmentation'))

        # stitch object
        # %10 in the case of SCLEVR3, if not, it does not harm
        mask_stitch = (seg_other[..., 2] % 10) == index_object_other + 1
        mask_stitch = mask_stitch.astype(np.uint8)
        # extend mask so that the gradients (borders) are visible
        mask_stitch = morph.binary_dilation(mask_stitch,
                                            structure=np.ones(
                                                (10, 10))).astype(np.uint8)
        mask_stitch = np.stack((mask_stitch, ) * 3, -1) * 255

        try:
            image_stitched = cv2.seamlessClone(image_other, image_inpainted,
                                               mask_stitch, tuple(position),
                                               cv2.NORMAL_CLONE)
        except:
            print('retry')
            return self.get_negatives_stitching(path)

        img = Image.fromarray(image_stitched).convert('RGB')
        img = self.transform(img)
        return img
コード例 #50
0
def extract(anns: list, img: np.ndarray, config: AugSegConfig):
    """
    Inpaint background, extract list of instances (in overlapping groups), get transformations and other useful
    information from input image and annotations.
    :param anns: list of input annotations
    :param img: input original image
    :param config: an AugSegConfig instance containing transform parameters
    :return: [background: inpainted background image
              instances_list: list of instance rgba images
              transforms_list: list of transformation dicts
              groupbnd_list: list of bounding boxes
              group: list of instence indices in groups]
    """
    width = img.shape[1]
    height = img.shape[0]

    mask, labels = __get_coco_masks(anns, height, width)

    # inpainting
    #inpaint_mask = np.uint8(mask > 0)
    #inpaint_mask = ndimage.binary_dilation(inpaint_mask, structure=ndimage.generate_binary_structure(2, 2), iterations=2)
    background = cv2.inpaint(img, np.uint8(mask), 5, cv2.INPAINT_NS)

    numinst = len(anns)
    bboxs = [ann['bbox'] for ann in anns]

    inst_group_belonging = [0] * numinst
    group = []
    group_idx = 1
    for i in range(numinst):
        if inst_group_belonging[i] == 0:
            group.append([i])
            inst_group_belonging[i] = group_idx
            dfs(bboxs, inst_group_belonging, group[len(group) - 1], group_idx, i)
            group_idx += 1
    realbboxs = []

    instances_list = []
    transforms_list = []
    groupbnd_list = []
    for i in range(len(group)):
        x, y, w, h = bboxs[group[i][0]]
        realbboxs.append([x, y, x + w, y + h])
        for j in range(len(group[i])):
            x, y, w, h = bboxs[group[i][j]]
            realbboxs[i][0] = min(realbboxs[i][0], x)
            realbboxs[i][1] = min(realbboxs[i][1], y)
            realbboxs[i][2] = max(realbboxs[i][2], x + w)
            realbboxs[i][3] = max(realbboxs[i][3], y + h)
            xmin, ymin, xmax, ymax = realbboxs[i]

        maskgroupi = get_masks(mask, group[i])
        trimapi = gettrimap(maskgroupi, 5)

        alphamapi = global_matting(img, trimapi)
        alphamapi = guided_filter(img, trimapi, alphamapi, 10, 1e-5)

        ymin, ymax, xmin, xmax = [int(round(x)) for x in (ymin, ymax, xmin, xmax)]
        resulti = np.dstack((img[ymin:ymax, xmin:xmax], alphamapi[ymin:ymax, xmin:xmax]))

        restricts = get_restriction([xmin, ymin, xmax, ymax], width, height)
        resulti, transformi = get_transform(resulti, restricts, config) # resulti may be flipped

        transformi['tx'] += (xmin + xmax) / 2
        transformi['ty'] += (ymin + ymax) / 2

        instances_list.append(resulti)
        transforms_list.append(transformi)
        groupbnd_list.append([xmin, ymin, xmax, ymax])

    return background, instances_list, transforms_list, groupbnd_list, group
コード例 #51
0
def inpaint(image, mask):
    RADIOUS = 3
    return cv2.inpaint(image, mask, RADIOUS, cv2.INPAINT_TELEA)
コード例 #52
0
 def __apply_inpaint(self, frame, mask):
     output = cv.inpaint(frame, mask, 3, cv.INPAINT_TELEA)
     return output
コード例 #53
0
# -*- coding: utf-8 -*-
from PIL import Image

from googletrans import Translator
import pytesseract

import os

import cv2

import  imutils
path= 'C:/Users/yyw/Downloads/taobao/a/TB24c7ls_lYBeNjSszcXXbwhFXa_!!2871484755.jpg'
img = cv2.imread(path)
mask = cv2.threshold(img, 210, 255, cv2.THRESH_BINARY)[1][:,:,0]

dst = cv2.inpaint(img, mask, 7, cv2.INPAINT_NS)

cv2.imshow("input image",dst)
cv2.waitKey(0)
コード例 #54
0
upper2 = np.array([7, 255, 255], dtype=np.uint8)
mask2 = cv2.inRange(hsvImage, lower2, upper2)

mask = cv2.bitwise_or(mask1, mask2)

# Perform morphological close operation to connect the laser lines
kernel = np.ones((5, 5), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=15)

# Subtract red lazer beams from the image
channels = cv2.split(image)
maskDilation = cv2.dilate(mask, kernel, iterations=5)
maskInverse = cv2.bitwise_not(maskDilation)
redChannel = cv2.bitwise_and(channels[2], maskInverse)
image = cv2.merge([channels[0], channels[1], redChannel])
image = cv2.inpaint(image, maskDilation, 3, cv2.INPAINT_TELEA)

# Find and draw contours
image2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)

for cnt in contours:
    if (cv2.contourArea(cnt) >= 10000 and cv2.contourArea(cnt) <= 3000000):
        rect = cv2.minAreaRect(cnt)
        box = cv2.boxPoints(rect)
        box = np.int0(box)
        cv2.drawContours(image, [box], 0, (255, 0, 0), 50)
# cv2.drawContours(image, contours, -1, (0,255,0), 5)

# Display the result
result = cv2.cvtColor(redChannel, cv2.COLOR_GRAY2BGR)
コード例 #55
0
def main(argv):
    # [load_image]
    if len(argv) < 1:
        print('Not enough parameters')
        print('Usage:\nmorph_lines_detection.py < path_to_image >')
        return -1
    # print(argv[0])
    argv = base64.b64decode(argv).decode("utf-8")
    # Load the image
    src = imread(argv)
    # Check if image is loaded fine
    if src is None:
        print('Error opening image: ' + argv)
        return -1

    # Read the image
    img = cv2.imread(argv, 0)

    # Thresholding the image
    (thresh, img_bin) = cv2.threshold(img, 128, 255,
                                      cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    # Invert the image
    img_bin = 255 - img_bin
    # cv2.imwrite("C:/ICR/uploads/tempFileName_2019041809585560-01.jpg", img_bin)

    # Defining a kernel length
    kernel_length = np.array(img).shape[1] // 60

    # A verticle kernel of (1 X kernel_length), which will detect all the verticle lines from the image.
    verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
                                                (1, kernel_length))
    # A horizontal kernel of (kernel_length X 1), which will help to detect all the horizontal line from the image.
    hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))
    # A kernel of (3 X 3) ones.
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

    # Morphological operation to detect vertical lines from an image
    img_temp1 = cv2.erode(img_bin, verticle_kernel, iterations=2)
    verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=2)
    #cv2.imwrite("C:/ICR/uploads/verticle_lines.jpg", verticle_lines_img)

    # Morphological operation to detect horizontal lines from an image
    img_temp2 = cv2.erode(img_bin, hori_kernel, iterations=2)
    horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=2)

    # Weighting parameters, this will decide the quantity of an image to be added to make a new image.
    alpha = 0.5
    beta = 1.0 - alpha
    # This function helps to add two image with specific weight parameter to get a third image as summation of two image.
    img_final_bin = cv2.addWeighted(verticle_lines_img, alpha,
                                    horizontal_lines_img, beta, 0.0)
    img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)
    (thresh,
     img_final_bin) = cv2.threshold(img_final_bin, 128, 255,
                                    cv2.THRESH_BINARY | cv2.THRESH_OTSU)

    # 테두리 저장
    img_final_bin = 255 - img_final_bin

    # merge
    dst = cv2.inpaint(img, img_final_bin, 5, cv2.INPAINT_TELEA)
    cv2.imwrite(argv, dst)
コード例 #56
0
        print(str(round(rate * 100)) + '%')
        rate = rate + 0.1
    kernel = np.ones((3, 3), np.uint8)
    mask_list[i] = cv2.dilate(mask_list[i], kernel, iterations=1)
    mask_list[i] = cv2.morphologyEx(mask_list[i], cv2.MORPH_OPEN, kernel)
del kernel
gc.collect()

# マスク領域を除去し周りの画素から補完
print('Image Inpainting...')
rate = 0.1
for i in range(fnum):
    if ((i / fnum) >= rate):
        print(str(round(rate * 100)) + '%')
        rate = rate + 0.1
    frame_list[i] = cv2.inpaint(frame_list[i], mask_list[i], 3,
                                cv2.INPAINT_TELEA)
del mask_list
gc.collect()

# TLDの初期化
print('Making Tracker...')
# 1人目の追跡器の追跡器
player1, b1 = None, None
player1, b1 = InitializingTLD(player1, b1, frame_list[0])
# 2人目の追跡器の初期化
player2, b2 = None, None
player2, b2 = InitializingTLD(player2, b2, frame_list[0])

# TLDの初期化
print('Tracking Learning Detection...')
rate = 0.1
コード例 #57
0
import cv2
import numpy as np

# load gambar yang rusak
image = cv2.imread('abraham.jpg')
cv2.imshow('Original Damaged Photo', image)
cv2.waitKey(0)

# load gambar yang sudah di mark bagian yang rusak
marked_damages = cv2.imread('mask.jpg', 0)
cv2.imshow('Marked Damages', marked_damages)
cv2.waitKey(0)

# kasih threshold biar keliatan jelas bagian yang rusak
ret, thresh1 = cv2.threshold(marked_damages, 254, 255, cv2.THRESH_BINARY)
cv2.imshow('Threshold Binary', thresh1)
cv2.waitKey(0)

# gambar di dilate biar bagian yang rusak terlihat makin lebar
kernel = np.ones((7, 7), np.uint8)
mask = cv2.dilate(thresh1, kernel, iterations=1)
cv2.imshow('Dilated Mask', mask)
cv2.imwrite('images/abraham_mask.png', mask)

cv2.waitKey(0)
# fungsi utama yang digunakan untuk photo restoration
restored = cv2.inpaint(image, mask, 3, cv2.INPAINT_TELEA)

cv2.imshow('Restored', restored)
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #58
0
 def inpaint_depth(self, rad, depth=None):
     """inpaint depth"""
     if depth is None: depth = self.depth
     return cv2.inpaint(depth, self.invalid_depth_mask(depth=depth), rad, cv2.INPAINT_TELEA)
コード例 #59
0
ファイル: inpaint.py プロジェクト: 12rohanb/opencv
if __name__ == '__main__':
    import sys
    try:
        fn = sys.argv[1]
    except:
        fn = '../data/fruits.jpg'

    print(__doc__)

    img = cv2.imread(fn)
    if img is None:
        print('Failed to load image file:', fn)
        sys.exit(1)

    img_mark = img.copy()
    mark = np.zeros(img.shape[:2], np.uint8)
    sketch = Sketcher('img', [img_mark, mark], lambda : ((255, 255, 255), 255))

    while True:
        ch = 0xFF & cv2.waitKey()
        if ch == 27:
            break
        if ch == ord(' '):
            res = cv2.inpaint(img_mark, mark, 3, cv2.INPAINT_TELEA)
            cv2.imshow('inpaint', res)
        if ch == ord('r'):
            img_mark[:] = img
            mark[:] = 0
            sketch.show()
    cv2.destroyAllWindows()
コード例 #60
0
def convertToNumpy(inputPath, idx):
    # copied from datasetVideo.py
    inputExtension = ".exr"

    def get_image_name(i, j, mode):
        if mode == 'high':
            return os.path.join(inputPath,
                                "high_tmp_%05d%s" % (j, inputExtension))
        if mode == 'highdn':
            return os.path.join(inputPath,
                                "high_tmp_%05d_depth%s" % (j, inputExtension))
        if mode == 'highfx':
            return os.path.join(inputPath,
                                "high_tmp_%05d_fx%s" % (j, inputExtension))
        elif mode == 'low':
            return os.path.join(inputPath,
                                "low_tmp_%05d%s" % (j, inputExtension))
        elif mode == 'dn':
            return os.path.join(inputPath,
                                "low_tmp_%05d_depth%s" % (j, inputExtension))
        elif mode == 'flow':
            return os.path.join(inputPath,
                                "low_tmp_%05d_flow%s" % (j, inputExtension))

    high = [None] * numFrames
    low = [None] * numFrames
    flow = [None] * numFrames
    for j in range(numFrames):
        high_rgb = np.clip(
            np.asarray(imageio.imread(get_image_name(idx, j,
                                                     'high'))).transpose(
                                                         (2, 0, 1)), 0, 1)
        high_dn = np.asarray(imageio.imread(get_image_name(
            idx, j, 'highdn'))).transpose((2, 0, 1))
        high_fx = np.asarray(imageio.imread(get_image_name(
            idx, j, 'highfx'))).transpose((2, 0, 1))
        high[j] = np.concatenate(
            (high_rgb[3:4, :, :], high_dn, high_fx[0:1, :, :]), axis=0)
        high[j][0, :, :] = high[j][0, :, :] * 2 - 1
        assert high[j].shape[0] == 6

        low_rgb = np.clip(
            np.asarray(imageio.imread(get_image_name(idx, j,
                                                     'low'))).transpose(
                                                         (2, 0, 1)), 0, 1)
        low_dn = np.asarray(imageio.imread(get_image_name(idx, j,
                                                          'dn'))).transpose(
                                                              (2, 0, 1))
        low[j] = np.concatenate((low_rgb[3:4], low_dn), axis=0)
        low[j][0, :, :] = low[j][0, :, :] * 2 - 1  # transform mask to [-1,1]
        assert low[j].shape[0] == 5

        flow_xy = imageio.imread(get_image_name(idx, j, 'flow'))[:, :, 0:2]
        flow_inpaint = np.stack(
            (cv.inpaint(flow_xy[:, :, 0], np.uint8(low_rgb[3, :, :] == 0), 3,
                        cv.INPAINT_NS),
             cv.inpaint(flow_xy[:, :, 1], np.uint8(low_rgb[3, :, :] == 0), 3,
                        cv.INPAINT_NS)),
            axis=0)
        flow[j] = flow_inpaint
    images_high = np.stack(high, axis=0)
    images_low = np.stack(low, axis=0)
    flow_low = np.stack(flow, axis=0)
    # save as numpy array
    np.save(os.path.join(inputPath, "high_%05d.npy" % idx), images_high)
    np.save(os.path.join(inputPath, "low_%05d.npy" % idx), images_low)
    np.save(os.path.join(inputPath, "flow_%05d.npy" % idx), flow_low)