def process_image(self, im): """ :param im: :return: :raise TypeError: """ if not isinstance(im, Image): raise TypeError("Must be a valid Image (package.image) object") if im.colorspace != self._colorspace: raise AttributeError("Image must be in BGR color space") # if app.DB: # try: # mask = app.DB[self.dbname(im.imgname)] # # print("returning mask for " + imgname + " [0][0:5]: " + str(mask[4][10:25])) # return mask # except FileNotFoundError: # # Not in DataBase, continue calculating # pass bgdmodel = np.zeros((1, 65), np.float64) fgdmodel = np.zeros((1, 65), np.float64) mask = self._mask.copy() # mask = copy.copy(self._mask) cv2.grabCut(im, mask, None, bgdmodel, fgdmodel, self._iter_count, cv2.GC_INIT_WITH_MASK) mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') # if app.DB: # app.DB[self.dbname(im.imgname)] = mask return mask
def findObject(img): """Find object within image Input: -img: input image object Output: -mask: mask containing object -contours: contours outlining mask -hierarchy: hierarchy of contours """ mask = np.zeros(img.shape[:2],np.uint8) bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) #Cut out object (this is quite slow) rect = (0,0,1023,833) cv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT) mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8') #Instead just set a threshold on intensity #Mask out background img = img*mask2[:,:,np.newaxis] im2, contours, hierarchy = cv2.findContours(mask2.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #cv2.drawContours(img, contours, -1, (0,255,0), 3) #plt.imshow(img),plt.colorbar(),plt.show() return (mask2, contours, hierarchy)
def process_scan_leaf(scan_leaf_path, output_folder): leaf_picture_name = os.path.split(scan_leaf_path)[-1] image = cv2.imread(scan_leaf_path) grey_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) grey_image = cv2.GaussianBlur(grey_image, (5, 5), 3) ret, threshold = cv2.threshold(grey_image, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) contours, hierarchy = cv2.findContours(threshold, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) height, width, depth = image.shape if len(contours) <= 50: image = cv2.bitwise_and(image, image, mask=threshold) print "[leafscan] " + leaf_picture_name + ' a fost procesata de leaf scan simplu' else: rect = (10, 10, width - 21, height - 21) mask = np.zeros(image.shape[:2], np.uint8) bgd_model = np.zeros((1, 65), np.float64) fgd_model = np.zeros((1, 65), np.float64) cv2.grabCut(image, mask, rect, bgd_model, fgd_model, 5, cv2.GC_INIT_WITH_RECT) mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') image = image * mask2[:, :, np.newaxis] print "[leafscan] " + leaf_picture_name + ' a fost procesata de leaf scan complex' image = crop_image(image, height, width) cv2.imwrite(os.path.join(output_folder, leaf_picture_name), image)
def execute(self, image): gray = cv2.cvtColor(image, cv.CV_BGR2GRAY) cv2.equalizeHist(gray, gray) faces = self.face_cascade.detectMultiScale(gray, 1.1, 2, 0 | cv.CV_HAAR_SCALE_IMAGE, (30, 30)) for face in faces: faceimg = self.get_face(image, face) mask = np.zeros( (image.shape[0], image.shape[1], 1), dtype=np.uint8) rect = (face[0], face[1], face[0] + face[2], face[1] + face[3]) bgd_model = np.zeros((1, 5 * 13)) fgd_model = np.zeros((1, 5 * 13)) cv2.grabCut( image, mask, rect, bgd_model, fgd_model, 10, mode=cv2.GC_INIT_WITH_RECT) b, g, r = cv2.split(image) b[mask == cv2.GC_BGD] = 255 g[mask == cv2.GC_BGD] = 255 r[mask == cv2.GC_BGD] = 255
def grabcuthm(im, hm): size = hm.shape bright = np.amax(hm) ret,fgd = cv2.threshold(hm, FGD_BOUND * bright, 1 * bright, cv2.THRESH_BINARY) fgd[1:size[0]/2] = 0 fgd[1:size[0], 1:size[1]/4] = 0 fgd[1:size[0], size[1]*3/4:size[1]] = 0 ret,pr_fgd = cv2.threshold(hm, FGD_BGD_SEP * bright, 1 * bright, cv2.THRESH_BINARY) pr_fgd -= fgd ret, bgd = cv2.threshold(hm, BGD_BOUND * bright, 1 * bright, cv2.THRESH_BINARY_INV) bgd[size[0]/3:size[0]] = 0 ret,pr_bgd = cv2.threshold(hm, FGD_BGD_SEP * bright, 1 * bright, cv2.THRESH_BINARY_INV) pr_bgd -= bgd mask = cv2.GC_BGD * bgd + cv2.GC_FGD * fgd + cv2.GC_PR_BGD * pr_bgd + cv2.GC_PR_FGD * pr_fgd mask = mask.astype(np.uint8, copy=False) bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) rect = (0, im.shape[:2][0]/2, im.shape[:2][1], im.shape[:2][0]) cv2.grabCut(im, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK) mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8') return mask2
def grabcut(rgb_chip): (h, w) = rgb_chip.shape[0:2] _mask = np.zeros((h, w), dtype=np.uint8) # Initialize: mask # Set inside to cv2.GC_PR_FGD (probably forground) _mask[ :, :] = cv2.GC_PR_FGD # Set border to cv2.GC_BGD (definitely background) _mask[ 0, :] = cv2.GC_BGD _mask[-1, :] = cv2.GC_BGD _mask[:, 0] = cv2.GC_BGD _mask[:, -1] = cv2.GC_BGD # Grab Cut Parameters rect = (0, 0, w, h) num_iters = 5 mode = cv2.GC_INIT_WITH_MASK bgd_model = np.zeros((1, 13 * 5), np.float64) fgd_model = np.zeros((1, 13 * 5), np.float64) # Grab Cut Execution cv2.grabCut(rgb_chip, _mask, rect, bgd_model, fgd_model, num_iters, mode=mode) is_forground = (_mask == cv2.GC_FGD) + (_mask == cv2.GC_PR_FGD) chip_mask = np.where(is_forground, 255, 0).astype('uint8') # Crop chip_mask = clean_mask(chip_mask) chip_mask = np.array(chip_mask, np.float) / 255.0 # Mask value component of HSV space chip_hsv = cv2.cvtColor(rgb_chip, cv2.COLOR_RGB2HSV) chip_hsv = np.array(chip_hsv, dtype=np.float) / 255.0 chip_hsv[:, :, 2] *= chip_mask chip_hsv = np.array(np.round(chip_hsv * 255.0), dtype=np.uint8) seg_chip = cv2.cvtColor(chip_hsv, cv2.COLOR_HSV2RGB) return seg_chip
def get_foreground(img): """Divide the foreground && background of the given image""" h, w = img.shape[:2] rectangle = (1, 1, w, h) result = np.zeros(shape=(h, w), dtype=np.uint8) bgModel = np.zeros((1, 13 * 5)) fgModel = np.zeros((1, 13 * 5)) cv.grabCut(img, result, rectangle, bgModel, fgModel, 5, cv.GC_INIT_WITH_RECT) #cv.imshow("test", result) #cv.waitKey(100) temp1 = np.array(cv.GC_PR_FGD, dtype=np.uint8) result2 = cv.compare(result, temp1, cv.CMP_EQ) cv.imshow("test", result2) cv.waitKey(2000) #print result2 x, y = img.shape[:2] background = np.ones(shape=(x, y, 3), dtype=np.uint8) #background *= 255 foreground = np.ones(shape=(x, y, 3), dtype=np.uint8) #foreground *= 255 for i in range(0, x): for j in range(0, y): if (result2[i, j].all() == 0): background[i, j] = img[i, j] else: foreground[i, j] = img[i, j] #print background cv.imshow("test", foreground) cv.waitKey(2000) cv.imshow("test1", background) cv.waitKey(2000) cv.imwrite("prueba.png", foreground) return foreground
def test_3(): import urllib2 import io url = "http://i.gzdmc.net/images/bd2888edd41a951de1ab8cbfb33c82e2.jpg@1e_400w_400h_1c_0i_1o_90Q_1x.png" res = urllib2.urlopen(url) # save data into Bytes imimage = io.BytesIO(res.read()) nparr = np.fromstring(imimage.read(), np.uint8) img = cv2.imdecode(nparr, cv2.CV_LOAD_IMAGE_COLOR) print dir(img) height, width, channels = img.shape print height print width print channels # img = cv2.imread() mask = np.zeros(img.shape[:2], np.uint8) bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) rect = (0, 0, height - 1, width - 1) cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT) mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype("uint8") img = img * mask2[:, :, np.newaxis] plt.imshow(img), plt.colorbar(), plt.show()
def grabcut(img, targetness): u""" Segmenting the best target-like region from an targetness map. """ mask = np.ones(img.shape[:2], np.uint8) * cv2.GC_BGD score_th = scoreatpercentile(targetness, 95) mask[targetness >= score_th] = cv2.GC_PR_FGD score_th = scoreatpercentile(targetness, 99) mask[targetness >= score_th] = cv2.GC_FGD mask = cv2.medianBlur(mask, 15) bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) cv2.grabCut(img, mask, None, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK) mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') lab_mask2 = bwlabel(mask2) lab_list = np.unique(lab_mask2.flatten())[1:] lab_argmax = np.argmax( [np.max(targetness[lab_mask2 == i]) for i in lab_list]) mask2[lab_mask2 != lab_list[lab_argmax]] = 0 img2 = img.copy() img2[mask2 < 1, :] = [0, 43, 54] img2 = mark_boundaries(img2, mask2) return img2, mask2
def grabcut(bgr_img, prior_mask, binary=True): """ Referencs: http://docs.opencv.org/trunk/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.html """ # Grab Cut Parameters (h, w) = bgr_img.shape[0:2] rect = (0, 0, w, h) num_iters = 5 mode = cv2.GC_INIT_WITH_MASK bgd_model = np.zeros((1, 13 * 5), np.float64) fgd_model = np.zeros((1, 13 * 5), np.float64) # Grab Cut Execution post_mask = prior_mask.copy() cv2.grabCut(bgr_img, post_mask, rect, bgd_model, fgd_model, num_iters, mode=mode) if binary: is_forground = (post_mask == cv2.GC_FGD) + (post_mask == cv2.GC_PR_FGD) post_mask = np.where(is_forground, 255, 0).astype('uint8') else: label_colors = [ 255, 170, 50, 0] label_values = [cv2.GC_FGD, cv2.GC_PR_FGD, cv2.GC_PR_BGD, cv2.GC_BGD] pos_list = [post_mask == value for value in label_values] for pos, color in zip(pos_list, label_colors): post_mask[pos] = color return post_mask
def grabcut_components(image, mask, num_components=1): h, w, _ = image.shape kernel_size = h / 100 kernel = np.ones((kernel_size, kernel_size), np.uint8) foreground = cv2.erode(mask, kernel, iterations=1) foreground = largest_components(foreground, num_components) background = cv2.dilate(mask, kernel, iterations=1) mask = cv2.GC_PR_FGD*np.ones((h, w), dtype='uint8') mask[np.where(foreground > 0)] = cv2.GC_FGD mask[np.where(background < 255)] = cv2.GC_BGD backgroundModel = np.zeros((1, 65), np.float64) foregroundModel = np.zeros((1, 65), np.float64) cv2.grabCut(image, mask, rect=None, bgdModel=backgroundModel, fgdModel=foregroundModel, iterCount=10, mode=cv2.GC_INIT_WITH_MASK) mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') mask_holes_removed = largest_components(mask, num_components=1, output_bounding_box=False) segmented_image = image * (mask_holes_removed[:, :, np.newaxis] / 255) return segmented_image, mask_holes_removed * 255
def remove_background(imgo) : #Load the Image height, width = imgo.shape[:2] #Create a mask holder mask = np.zeros(imgo.shape[:2],np.uint8) #Grab Cut the object bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) #Hard Coding the Rect The object must lie within this rect. rect = (10,10,width-30,height-30) cv2.grabCut(imgo,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT) mask = np.where((mask==2)|(mask==0),0,1).astype('uint8') img1 = imgo*mask[:,:,np.newaxis] #Get the background background = imgo - img1 #Change all pixels in the background that are not black to white background[np.where((background > [0,0,0]).all(axis = 2))] = [255,255,255] #Add the background and the image final = background + img1 gray_pic = cv2.cvtColor(final, cv2.COLOR_BGR2GRAY) imghls = cv2.cvtColor(final, cv2.COLOR_BGR2HLS) return final, gray_pic, imghls
def grabcut(img, iters=5, roi=None, margin=5): """Wrapper for OpenCV's grabCut function. Runs the GrabCut algorithm for segmentation. Returns an 8-bit single-channel mask. Its elements may have the following values: * ``cv2.GC_BGD`` defines an obvious background pixel * ``cv2.GC_FGD`` defines an obvious foreground pixel * ``cv2.GC_PR_BGD`` defines a possible background pixel * ``cv2.GC_PR_FGD`` defines a possible foreground pixel The GrabCut algorithm is executed with `iters` iterations. The region of interest `roi` can be a 4-tuple ``(x,y,width,height)``. If the ROI is not set, the ROI is set to the entire image, with a margin of `margin` pixels from the borders. This method is indirectly executed by :meth:`make`. """ mask = np.zeros(img.shape[:2], np.uint8) bgdmodel = np.zeros((1,65), np.float64) fgdmodel = np.zeros((1,65), np.float64) # Use the margin to set the ROI if the ROI was not provided. if not roi: roi = (margin, margin, img.shape[1]-margin*2, img.shape[0]-margin*2) cv2.grabCut(img, mask, roi, bgdmodel, fgdmodel, iters, cv2.GC_INIT_WITH_RECT) return mask
def fave_avg_glv(path): global face_cascade img = cv2.imread(path) height,width = img.shape[:2] mask = np.zeros(img.shape[:2], np.uint8) bgdModel = np.zeros((1,65), np.float64) fgdModel = np.zeros((1,65), np.float64) faces = face_cascade.detectMultiScale(img, 1.3, 5) if len(faces) > 1: face = [f for f in faces if f[2]>width/4 and f[3]>height/3] if len(faces) == 1: for (x,y,w,h) in faces: rect = (x,y,w,h) else: rect = (int(width*0.3),int(height*0.2),int(width*0.7),int(height*0.8)) cv2.grabCut(img, mask, rect, bgdModel, fgdModel,5, cv2.GC_INIT_WITH_RECT) mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8') img = img*mask2[:,:,np.newaxis] (_, img) = cv2.threshold(img, 50, 255, cv2.THRESH_TOZERO) img = np.ma.masked_equal(img,0) return img.mean()
def segment(self): print 'Keys:' print '\tPress "space" to skip the current image' print '\tPress "q" to quit and save your previous segmentations' print '\tPress w to run segmentation on the image' key = self.wait_for_key(' qnw') if key in [' ', 'q']: self.out_mask = None self.mask = None self.brush_size_change = None return None, key self.done_drawing = True bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) out_mask = np.copy(self.mask) if out_mask[out_mask == 1].size == 0: out_mask = np.zeros(shape=image.shape[:2], dtype=np.uint8) else: cv2.grabCut( self.image, out_mask, None, bgdModel, fgdModel, 1, cv2.GC_INIT_WITH_MASK ) return_mask = np.zeros(out_mask.shape).astype(np.float64) display_mask = np.ones(out_mask.shape).astype(np.float64) * 0.1 bgnd = (out_mask == cv2.GC_PR_BGD) | (out_mask == cv2.GC_BGD) ctr = self.get_biggest_ctr(np.logical_not(bgnd).astype(np.uint8)) if ctr is not None: cv2.drawContours(display_mask, [ctr], -1, 1, -1) cv2.drawContours(return_mask, [ctr], -1, 1, -1) self.out_mask = out_mask display = display_mask[:, :, np.newaxis] * self.image.astype(np.float64) cv2.imshow('segment', display / np.max(display)) cv2.imshow('all', np.logical_not(bgnd).astype(np.uint8) * 255) print 'Keys:' print '\tPress "space" to skip the current segmentation' print '\tPress "q" to quit and save all segmentations including this' print '\tPress w record this segmentation and move on to the next image' key = self.wait_for_key('qnzw ') if key in [' ', 'q']: self.out_mask = None self.mask = None self.brush_size_change = None return None, key return return_mask, key
def refine(self): fg_mask = np.where((self.mask == self.FG) + (self.mask == self.PR_FG), 255, 0).astype('uint8') # Get rid of noise using morphological opening _, fg_mask = self.morph_open(fg_mask, 3, 3, 3) # Refine foreground/background mask eroded, opened = self.morph_open(fg_mask, 3, 8, 15) if DEBUG == 1: cv2.imshow('Eroded', eroded) cv2.imshow('Opened', opened) cv2.waitKey() has_fg = False for i in xrange(self.out_img.shape[0]): for j in xrange(self.out_img.shape[1]): if eroded[i, j] == 255: self.mask[i, j] = self.FG has_fg = True elif opened[i, j] == 255: self.mask[i, j] = self.PR_BG else: self.mask[i, j] = self.BG # Terminate if we haven't detected any big enough object. if has_fg: cv2.grabCut(self.out_img, self.mask, self.rect_init, self.bgdmodel, self.fgdmodel, 1, cv2.GC_INIT_WITH_MASK) else: logging.error("No foreground object.") if self.resize: factor = float(self.in_img.shape[1])/float(self.mask.shape[1]) self.mask = cv2.resize(self.mask, (self.in_img.shape[1], self.in_img.shape[0]), interpolation = cv2.INTER_NEAREST)
def apply_segmentation(self, coords=None, pix_locs=None): # actually take the coords and call grabcut # we're going to assume that cur_mask_ind points to the previous mask if len(self.mask_dict[self.cur_indv][self.cur_indv_ind]) != 0: # first we'll load up the previous mask and bgd/fgd mod (if they exist) bgd_mod = self.open_seg('bgd') fgd_mod = self.open_seg('fgd') mask = self.open_seg('mask') # so the idea is now we want to take the mask and the region outlined by the coordinates # we want to this in the form lower_y:upper_y,lower_x:upper_x if pix_locs is None and not (coords is None): mask[coords[1]:coords[3],coords[0]:coords[2]] = self.region_marker elif coords is None and not (pix_locs is None): print(pix_locs) mask[list(zip(*pix_locs))] = self.region_marker init = cv2.GC_INIT_WITH_MASK else: # if not, we'll initialize with rect bgd_mod = np.zeros((1,65)) fgd_mod = np.zeros((1,65)) mask = np.zeros(self.cur_img_shape[:2],np.uint8) init = cv2.GC_INIT_WITH_RECT iterations = 2 # TODO: Make this a selector print("SEGMENTING PLEASE WAIT") cv2.grabCut(self.cur_img, mask, coords, bgd_mod, fgd_mod, iterations, init) # then store the mask, fgd_mod, bgd_mod self.cur_mask_ind += 1 seg_dict = self.save_segmentation(mask, fgd_mod, bgd_mod) # chop off the old list of segmentations old_seg_dicts = self.mask_dict[self.cur_indv][self.cur_indv_ind] new_seg_dicts = self.mask_dict[self.cur_indv][self.cur_indv_ind][:self.cur_mask_ind] # major TODO: clean up segmentations currently in that directory ahead of this one new_seg_dicts.append(seg_dict) self.mask_dict[self.cur_indv][self.cur_indv_ind] = new_seg_dicts self.cleanup_previous_files(old_seg_dicts)
def openCVGrabCut(img, bbox): mask = np.zeros(img.shape[:2],dtype='uint8') tmp1 = np.zeros((1, 13 * 5)) tmp2 = np.zeros((1, 13 * 5)) cv2.grabCut(img,mask,bbox,tmp1,tmp2,iterCount=1,mode=cv2.GC_INIT_WITH_RECT) return mask
def grabCut(img, rect=None, mask=None, ite=5): height, width, channels = img.shape # if no arguments, try to segment using a large rectangle if rect == None and mask == None: rect = (int(width*0.15), 15, int(width*0.85), height-15) initOpt = cv2.GC_INIT_WITH_RECT # if rectangle argument but no mask, init mask with rectangle elif mask == None: mask = np.zeros((height, width), np.uint8) initOpt = cv2.GC_INIT_WITH_RECT # if mask argument but no rectangle, use mask and let rect to None elif rect == None: initOpt = cv2.GC_INIT_WITH_MASK rect = (0, 0, width, height) mask = np.uint8(mask) # if mask argument and rectangle, set pixels outside the mask as background else: mask = np.uint8(mask) rect = rectangleutil.checkRectangleBounds(rect, mask.shape) maskRect = rectangleutil.rectangle2mask(rect, mask.shape) mask[maskRect == 0] = cv2.GC_BGD initOpt = cv2.GC_INIT_WITH_MASK #imageblured = np.zeros(img.shape, img.dtype) #cv2.smooth(img, imageblured, cv.CV_GAUSSIAN, 5) tmp1 = np.zeros((1, 13 * 5)) tmp2 = np.zeros((1, 13 * 5)) cv2.grabCut(img, mask, rect, tmp1, tmp2, ite, initOpt) mask[mask == cv2.GC_BGD] = 0 mask[mask == cv2.GC_PR_BGD] = 0 mask[mask == cv2.GC_FGD] = 255 mask[mask == cv2.GC_PR_FGD] = 255 return mask
def propagate_label_gmm(previmage,objmask,nextimage): rows = previmage.shape[0] cols = previmage.shape[1] gmm_mask = np.ones((rows,cols),np.uint8) gmm_mask = gmm_mask * 0 #print np.sum(gmm_mask) gmm_mask[objmask[0],objmask[1]] = 3 sure_fg = np.random.randint((objmask[0].shape[0]),size=100) for x in np.nditer(sure_fg): gmm_mask[objmask[0][x],objmask[1][x]] = 1 bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) img = nextimage print type(img[0][0]) tmp_mask = np.zeros((rows,cols),np.uint8) m1,m2 = np.where(gmm_mask==1) r_mask, bgdModel, fgdModel = cv2.grabCut(img,gmm_mask,None,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK) r_mask, bgdModel, fgdModel = cv2.grabCut(img,tmp_mask,(0,199,120,120),bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT| cv2.GC_INIT_WITH_MASK) print sure_fg print np.sum(gmm_mask) print r_mask.shape print objmask[1].shape print img.shape,r_mask.shape l_img = img* r_mask[:,:,np.newaxis] cv2.imwrite('gmmm.png',l_img) rgb_np = np.zeros((self.gtframe.width,self.gtframe.height,3)) rgb_np[np.where(r_mask)]=[0,255,0] data = rgb_np rescaled = (255.0 / data.max() * (data - data.min())).astype(np.uint8) rgb_image = Image.fromarray(rescaled) return rgb_image
def main(): """main method for grabCut.""" image = cv2.imread('rawImage.png') mask = np.zeros(image.shape[:2], np.uint8) bgd_model = np.zeros((1, 65), np.float64) fgd_model = np.zeros((1, 65), np.float64) manual_mask = cv2.resize(cv2.imread('result.png', 0), image.shape[:2][::-1], 0, 0, cv2.INTER_NEAREST) # Map mask to {0, 1} mask[manual_mask < 127] = 0 mask[manual_mask >= 127] = 1 mask, bgd_model, fgd_model = cv2.grabCut(image, mask, None, bgd_model, fgd_model, 5, cv2.GC_INIT_WITH_MASK) mask, bgd_model, fgd_model = cv2.grabCut(image, mask, None, bgd_model, fgd_model, 5, cv2.GC_EVAL) mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') image = image * mask[:, :, np.newaxis] cv2.imwrite("masked.png", image)
def do_grabcut(f): img = cv2.imread(f) mask = np.zeros(img.shape[:2],np.uint8) (h,w,d) = img.shape #print (h,w,d) upper_corner=(h/10,w/10) lower_corner=(h-(h/10),w-(w/10)) bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) #rect = (upper_corner[0],upper_corner[1],lower_corner[0],lower_corner[1]) rect= (50,50,w-100,h-100) #print rect iterCount=1 cv2.grabCut(img,mask,rect,bgdModel,fgdModel,iterCount,cv2.GC_INIT_WITH_RECT) mask2 = np.where((mask==1) + (mask==3),255,0).astype('uint8') output = cv2.bitwise_and(img,img,mask=mask2) # mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8') # img = img*mask2[:,:,np.newaxis] #plt.imshow(img),plt.show() cv2.imwrite('gcut.jpg',output) return output
def _refine_predug(self, candidate): """ uses the color information directly to specify the predug """ # determine a bounding rectangle region = candidate.bounds size = max(region.width, region.height) region.buffer(0.5 * size) # < increase by 50% in each direction # extract the region from the image slice_x, slice_y = region.slices img = self.image[slice_y, slice_x].astype(np.uint8, copy=True) # build the estimate polygon poly_p = affinity.translate(candidate.polygon, -region.x, -region.y) poly_s = affinity.translate(self.ground.get_sky_polygon(), -region.x, -region.y) def fill_mask(color, margin=0): """ fills the mask with the buffered regions """ for poly in (poly_p, poly_s): pts = np.array(poly.buffer(margin).boundary.coords, np.int32) cv2.fillPoly(mask, [pts], color) # prepare the mask for the grabCut algorithm burrow_width = self.params["burrows/width"] mask = np.full_like(img, cv2.GC_BGD, dtype=np.uint8) # < sure background fill_mask(cv2.GC_PR_BGD, 0.25 * burrow_width) # < possible background fill_mask(cv2.GC_PR_FGD, 0) # < possible foreground fill_mask(cv2.GC_FGD, -0.25 * burrow_width) # < sure foreground # run GrabCut algorithm img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) bgdmodel = np.zeros((1, 65), np.float64) fgdmodel = np.zeros((1, 65), np.float64) try: cv2.grabCut(img, mask, (0, 0, 1, 1), bgdmodel, fgdmodel, 2, cv2.GC_INIT_WITH_MASK) except: # any error in the GrabCut algorithm makes the whole function useless logging.warn("GrabCut algorithm failed for predug") return candidate # turn the sky into background pts = np.array(poly_s.boundary.coords, np.int32) cv2.fillPoly(mask, [pts], cv2.GC_BGD) # extract a binary mask determining the predug predug_mask = (mask == cv2.GC_FGD) | (mask == cv2.GC_PR_FGD) predug_mask = predug_mask.astype(np.uint8) # simplify the mask using binary operations w = int(0.5 * burrow_width) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (w, w)) predug_mask = cv2.morphologyEx(predug_mask, cv2.MORPH_OPEN, kernel) # extract the outline of the predug contour = regions.get_contour_from_largest_region(predug_mask) # translate curves back into global coordinate system contour = curves.translate_points(contour, region.x, region.y) return shapes.Polygon(contour)
def runGrabCutOnTheImage(image, seeds): bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) import copy mask = copy.deepcopy( seeds ) cv2.grabCut(image, mask, None, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK ) result = np.where( (mask == cv2.GC_FGD) | (mask == cv2.GC_PR_FGD), 1, 0) return result
def grabCut(src, box): mask = np.zeros(src.shape[:2], np.uint8) bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) cv2.grabCut(src, mask, box, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT) mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') dst = src*mask2[:, :, np.newaxis] return dst
def refine_saliency_with_grabcut(img, saliency): rect = largest_contours_rect(saliency) bgdmodel = np.zeros((1, 65),np.float64) fgdmodel = np.zeros((1, 65),np.float64) saliency[np.where(saliency > 0)] = cv2.GC_FGD mask = saliency cv2.grabCut(img, mask, rect, bgdmodel, fgdmodel, 1, cv2.GC_INIT_WITH_RECT) mask = np.where((mask==2)|(mask==0),0,1).astype('uint8') return mask
def refine_background_via_grabcut(img, is_background, dilate=False): #use grabcut (http://docs.opencv.org/trunk/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.html) # to cut out other background pixels bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) rect = (0,0,img.shape[1],img.shape[0]) grabcut_mask = np.where(is_background!=0,cv2.GC_BGD,cv2.GC_PR_FGD).astype(np.uint8) #background should be 0, probable foreground = 3 cv2.grabCut(img, grabcut_mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK) return np.where((grabcut_mask ==2)|(grabcut_mask ==0),0,1).astype(np.uint8)
def grabcut(img, iters=5, roi=None, margin=5): """Run the GrabCut algorithm for segmentation and return the mask.""" mask = np.zeros(img.shape[:2], np.uint8) bgdmodel = np.zeros((1,65), np.float64) fgdmodel = np.zeros((1,65), np.float64) if not roi: roi = (margin, margin, img.shape[1]-margin*2, img.shape[0]-margin*2) cv2.grabCut(img, mask, roi, bgdmodel, fgdmodel, iters, cv2.GC_INIT_WITH_RECT) return mask
def callback(rect, dumb): mask = np.zeros(im.shape[:2], np.uint8) t_0 = time.time() cv2.grabCut(im, mask, rect, None, None, 6, cv2.GC_INIT_WITH_RECT) deltaT = time.time() - t_0 print(f'deltaT = {deltaT}') mask = np.where((mask==2)|(mask==0), 0, 1).astype('uint8') img = im * mask[:,:, np.newaxis] pylab.imshow(img) pylab.show()
def foregroundExtract(input): x1,y1,x2,y2 = 0,0,500,350 mask = np.zeros(input.shape[:2],np.uint8) bgd_model = np.zeros((1,65),np.float64) fgd_model = np.zeros((1,65),np.float64) rect = (x1,y1,x2,y2) cv2.grabCut(input,mask,rect,bgd_model,fgd_model,5,cv2.GC_INIT_WITH_RECT) mask2 = np.where((mask==2)|(mask==0),0,1).astype("uint8") output = input*mask2[:,:,np.newaxis] return output
print('run grabcut - rect') print('crop_img.shape') print(crop_img.shape) rect = (gc_bg_rect, gc_bg_rect, cimg_w - gc_bg_rect, cimg_h - gc_bg_rect) if DEBUG: print(rect) #plt.imshow(crop_img),plt.colorbar(),plt.show() #raise SystemExit(0) # cv2.imshow("cropped",crop_img) #cv2.waitKey(0) cv2.grabCut(crop_img, crop_mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT) mask2 = np.where((crop_mask == 2) | (crop_mask == 0), 0, 1).astype('uint8') img = crop_img * mask2[:, :, np.newaxis] if DEBUG: outimgname = img_name[0:-4] + '_img_grabcut.png' cv2.imwrite(os.path.join(debug_path, plate_num, outimgname), img) outimgname = img_name[0:-4] + '_mask_grabcut.png' cv2.imwrite(os.path.join(debug_path, plate_num, outimgname), mask2 * 255) # apply CC mask if DEBUG: print(img.shape) print(ccmask_croped.shape)
rects = faces[0] neighbours = faces[1] for (x, y, w, h) in faces[0]: roi_gray = gray[y:y + h, x:x + w] roi_color = img[y:y + h, x:x + w] eyes = eye_cascade.detectMultiScale(roi_gray) for (ex, ey, ew, eh) in eyes: cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2) cv2.imshow('img', img) mask = np.zeros(img.shape[:2], np.uint8) bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) rect = (399, 277, 102, 102) cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT) mask2 = np.where(((mask == 0) | (mask == 2)), 0, 1).astype('uint8') img = img * mask2[:, :, np.newaxis] body = bodydetection.detectMultiScale(gray, 1.1, 5, 60) grown = 0 prev = 0 curr = 0 grow = 0 grow1 = grow flag = 1 for y in faces[0]: for x in faces[2]: temp = y prev = x
import cv2 import numpy as np import matplotlib.pyplot as pt frame = cv2.imread('3d.jpg') mask = np.zeros(frame.shape[:2], np.uint8) bgM = np.zeros((1, 65), np.float64) fgM = np.zeros((1, 65), np.float64) rect = (40, 40, 170, 140) cv2.grabCut(frame, mask, rect, bgM, fgM, 5, cv2.GC_INIT_WITH_RECT) mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') img = frame * mask2[:, :, np.newaxis] cv2.imshow('image', img) cv2.waitKey(0) cv2.destroyAllWindows()
def seg_butterfly(image, method="otsu", alpha=1.0, gmmborder=0.1, use_otsu=True): """ segment a given image and return statistics of the largest object """ image_hsv = rgb2hsv(image) saturation = image_hsv[:, :, 1] if method == "otsu": t = alpha * threshold_otsu(saturation) print "Threshold: {}".format(t) contours = find_contours(saturation, t) binaryimage = saturation > t elif not mixture_disabled and method == "gmm": g = GMM(3) bx = int(gmmborder * image.shape[1]) - 1 by = int(gmmborder * image.shape[0]) - 1 X = np.concatenate([ np.reshape(image_hsv[:, :bx, :], [-1, 3]), np.reshape(image_hsv[:, -bx:, :], [-1, 3]), np.reshape(image_hsv[:by, :, :], [-1, 3]), np.reshape(image_hsv[-by:, :, :], [-1, 3]) ]) print "GMM learning ..." g.fit(X) Xt = np.array(np.reshape(image_hsv, [-1, 3]), dtype=float) s = np.reshape(g.score(Xt), image_hsv.shape[:2]) if use_otsu: t = alpha * threshold_otsu(s) else: t = alpha print "Threshold: {}".format(t) contours = find_contours(s, t) binaryimage = s > t elif not grabcut_disabled and method == "grabcut": grabcut_mask = np.zeros(image.shape[:2], np.uint8) grabcut_rect = (int(image.shape[1] * 0.05), int(image.shape[0] * 0.1), int(image.shape[1] * 0.9), int(image.shape[0] * 0.8)) bgd_model = np.zeros((1, 65), np.float64) fgd_model = np.zeros((1, 65), np.float64) grabCut(image.astype(np.uint8), grabcut_mask, grabcut_rect, bgd_model, fgd_model, 10, cv2.GC_INIT_WITH_RECT) grabcut_mask[grabcut_mask == 2] = 0 grabcut_mask[grabcut_mask == 3] = 1 binaryimage = grabcut_mask contours = find_contours(binaryimage, 0.5) else: raise Exception("Method {} not supported!".format(method)) # The intensity channel value_channel = image_hsv[binaryimage, 2] # The saturation channel saturation = image_hsv[binaryimage, 1] # The hue channel hue = image_hsv[binaryimage, 0] stats = {} stats['median-intensity'] = np.median(value_channel) stats['mean-intensity'] = np.mean(value_channel) stats['stddev-intensity'] = np.std(value_channel) stats['median-saturation'] = np.median(saturation) stats['mean-saturation'] = np.mean(saturation) stats['stddev-saturation'] = np.std(saturation) stats['median-hue'] = np.median(hue) stats['mean-hue'] = np.mean(hue) stats['stddev-hue'] = np.std(hue) stats['seg-absolute-size'] = len(value_channel) stats['seg-relative-size'] = len(value_channel) / float( image_hsv.shape[0] * image_hsv.shape[1]) maxc = 0 for n, contour in enumerate(contours): if len(contours[n]) > len(contours[maxc]): maxc = n stats['c-length'] = len(contours[maxc]) stats['c-area'] = contour_area(contours[maxc]) # compute bounding box stats['c-xmin'] = np.amin(contours[maxc][:, 1]) stats['c-xmax'] = np.amax(contours[maxc][:, 1]) stats['c-ymin'] = np.amin(contours[maxc][:, 0]) stats['c-ymax'] = np.amax(contours[maxc][:, 0]) return stats, contours[maxc], binaryimage
# Show the rough, approximated output cv2.imshow("Rough Output", roughOutput) cv2.waitKey(0) # Any mask values greater than zero should be set to probable foreground mask[mask > 0] = cv2.GC_PR_FGD mask[mask == 0] = cv2.GC_BGD # Allocate memory for two arrays that the GrabCut algorithm internally uses # when segmenting the foreground from the background fgModel = np.zeros((1, 65), dtype="float") bgModel = np.zeros((1, 65), dtype="float") # Apply GrabCut using the mask segmentation method start = time.time() (mask, bgModel, fgModel) = cv2.grabCut(image, mask, None, bgModel, fgModel, iterCount=args["iter"], mode=cv2.GC_INIT_WITH_MASK) end = time.time() print("[INFO] Applying GrabCut took {:.2f} seconds".format(end - start)) # The output mask has 4 possible output values, marking each pixel in the mask as: # (1) Definite Background # (2) Definite Foreground # (3) Probable Background # (4) Probable Background values = (("Definite Background", cv2.GC_BGD), ("Probable Background", cv2.GC_PR_BGD), ("Definite Foreground", cv2.GC_FGD), ("Probable Foreground", cv2.GC_PR_FGD)) # Loop over the possible GrabCut mask values for (name, value) in values: # Construct a mask for each one of the current values print("[INFO] Showing mask for '{}'".format(name))
''' 使用 GrabCut 算法进行交互式前景提取 ''' import cv2 import numpy as np from matplotlib import pyplot as plt ''' 原理 从用户的角度来看它到底是如何工作的呢?开始时用户需要用一个矩形将前景区域框住(前景区域应该完全被包括在矩形框内部)。 然后算法进行迭代式分割直达达到最好结果。但是有时分割的结果不够理想,比如把前景当成了背景,或者把背景当成了前景。 在这种情况下,就需要用户来进行修改了。用户只需要在不理想的部位画一笔(点一下鼠标)就可以了。 画一笔就等于在告诉计算机:“嗨,老兄,你把这里弄反了,下次迭代的时候记得改过来呀!”。 然后,在下一轮迭代的时候你就会得到一个更好的结果了 ''' ''' cv2.grabCut() * img - 输入图像 * mask-掩模图像,用来确定哪些区域是背景,前景,可能是前景/背景等。可以设置为:cv2.GC_BGD,cv2.GC_FGD,cv2.GC_PR_BGD,cv2.GC_PR_FGD,或者直接输入 0,1,2,3 也行。 * rect - 包含前景的矩形,格式为 (x,y,w,h) * bdgModel, fgdModel - 算法内部使用的数组. 你只需要创建两个大小为 (1,65),数据类型为 np.float64 的数组。 * iterCount - 算法的迭代次数 * mode 可以设置为 cv2.GC_INIT_WITH_RECT 或 cv2.GC_INIT_WITH_MASK,也可以联合使用。这是用来确定我们进行修改的方式,矩形模式或者掩模模式。 ''' image = cv2.imread('../data/plane.jpg') mask = np.zeros(image.shape[:2], np.uint8) bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) rect = (50, 50, 450, 290) # 函数的返回值是更新的 mask, bgdModel, fgdModel cv2.grabCut(image, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT)
import cv2 import numpy as np img = cv2.imread('../img/shyaro.jpg') mask = np.zeros(img.shape[:2], np.uint8) src = np.zeros((1, 65), np.float64) sub = np.zeros((1, 65), np.float64) rect = (161, 79, 150, 150) cv2.grabCut(img, mask, rect, src, sub, 5, cv2.GC_INIT_WITH_RECT) mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') img = img * mask2[:, :, np.newaxis] cv2.imshow('cut', img) cv2.waitKey() cv2.DestroyAllWindow()
def fgBgCombiner(path, bg_frame, save_dir): ''' Change background in videos using GrabCut algorithm ------------------------ Parameters path (string): path of the video to change the background bg_frame (array like): background image (or nd.array) save_dir (string): path where to save obtained video Returns None ------------------------ ''' # Get video name from full path file_name = path.split('/')[-1].split('.')[0] # Open the video and check if it successfully done cap = cv2.VideoCapture(cv2.samples.findFileOrKeep(path)) if not cap.isOpened: print('Unable to open: ' + path) exit(0) # Get fps and shape of the video if int(MAJOR_VER) < 3: fps = cap.get(cv2.cv.CV_CAP_PROP_FPS) else: fps = cap.get(cv2.CAP_PROP_FPS) size = (int(cap.get(3)), int(cap.get(4))) # Create video writer fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(save_dir + file_name + '.avi', fourcc, fps, size) # Resize background frame (image) to video shape background = cv2.resize(bg_frame, size, interpolation=cv2.INTER_AREA) # Loop over the frames of the video while True: # Grab the current frame _, frame = cap.read() # If the frame could not be grabbed, then we have reached the end of the video if frame is None: break # Convert it to grayscale frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Initial mask mask = np.zeros(frame.shape[:2], np.uint8) # These are arrays used by the algorithm internally. bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) # Specify a region of interest (RoI) and apply grabCut algorithm rect = (200, 50, 300, 400) # Number of iterations the algorithm should run is 1 # which is fast but not good for correct segmentation cv2.grabCut(frame, mask, rect, bgdModel, fgdModel, 1, cv2.GC_INIT_WITH_RECT) # New mask for moving object mask2 = np.where((mask == 2) | (mask == 0), (0, ), (1, )).astype('uint8') frame = frame * mask2[:, :, np.newaxis] mask_1 = frame > 0 mask_2 = frame <= 0 # Linear combination of bgd and fgd frames with mask_1 and mask_2 "scaliars" combination = cv2.cvtColor( frame, cv2.COLOR_BGR2RGB) * mask_1 + background * mask_2 combination = combination.astype(dtype=np.uint8) # Write combined frame out.write(combination) # When everything done, cleanup the camera and close any open windows print('Background change is finished') cap.release() cv2.destroyAllWindows()
import cv2 import numpy as np from matplotlib import pyplot as plt image_bgr = cv2.imread('images/input_2.jpg') image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB) rectangle = (0, 0, 300, 380) # 600, 550, 1150, 2000 mask = np.zeros(image_rgb.shape[:2], np.uint8) bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) cv2.grabCut(image_rgb, mask, rectangle, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT) mask_2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') image_rgd_nobg = image_rgb * mask_2[:, :, np.newaxis] plt.imshow(image_rgd_nobg), plt.axis('off') plt.show()
# 原图mask,与原图等大小 mask = np.zeros(src.shape[:2], dtype=np.uint8) # 矩形roi rect = (int(r[0]), int(r[1]), int(r[2]), int(r[3])) # 包括前景的矩形,格式为(x,y,w,h) # bg模型的临时数组 bgdmodel = np.zeros((1, 65), np.float64) # fg模型的临时数组 fgdmodel = np.zeros((1, 65), np.float64) cv2.grabCut(src, mask, rect, bgdmodel, fgdmodel, 11, mode=cv2.GC_INIT_WITH_RECT) print(np.unique(mask)) # 提取前景和可能的前景区域 mask2 = np.where((mask == 1) | (mask == 3), 255, 0).astype('uint8') print(mask2.shape) # 按位与 src & src == 0,得到的是二进制 result = cv2.bitwise_and(src, src, mask=mask2) # cv2.imwrite('result.jpg', result) # cv2.imwrite('roi.jpg', roi)
erosion = cv.erode(mask_down, kernel, iterations = 25) gcmask = cv.addWeighted(erosion, 0.5, mask_down, 0.5,0) gcmask = np.where((gcmask != 128)&(gcmask != 255), cv.GC_BGD, gcmask) gcmask[gcmask == 128] = cv.GC_PR_FGD gcmask[gcmask == 255] = cv.GC_FGD backgroundModel = np.zeros((1, 65), np.float64) foregroundModel = np.zeros((1, 65), np.float64) rectangle = (150,150,100,100) start = time.time() cv.grabCut(road_down, gcmask, rectangle, backgroundModel, foregroundModel, 1, cv.GC_INIT_WITH_MASK) print(time.time() - start) mask2 = np.where((mask_down == 2)|(mask_down == 0), 0, 1).astype('uint8') mask2 = cv.pyrUp(mask2) gcroad = road * mask2[:, :, np.newaxis] # video.write(road) # output segmented image with colorbar cv.imshow('road', gcroad) cv.waitKey(10) #cv.destroyAllWindows()
def fname(name, url): # # x = name[7:] # # path = 'media/'+name[7:] # # print(path) # # return path # #return type(name) # path = 'media/' # path += name # #return path # if path != "media/": # with Image.open(path) as image: # width, height = image.size # image.save("media/"+name+"converted.jpeg") # return image # else: # return '' # global generator # generator = keras.models.load_model('home/pikay2.h5') path = 'media/' path += name #return path if path != "media/": img = cv2.imread(path) img2 = cv2.resize(img, (256, 256)) x_test = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB) x_test = np.array(x_test) / 127.5 - 1. x_test = x_test.reshape( (1, 256, 256, 3) ) #1 since keras expects first element of shape to be reserved for batch size gen = generator.predict(x_test) scipy.misc.imsave("home/static/home/downloaded_images/" + name, gen[0]) image = img2 #pp is the grayscale gan I/P image template = cv2.imread("home/static/home/downloaded_images/" + name) #res is the result after predict kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2)) template = cv2.morphologyEx(template, cv2.MORPH_ERODE, kernel, iterations=1) image[template == 0] = 255 # cv2.imwrite('resl.jpg', template) image = cv2.imread(path) image = cv2.resize(image, (256, 256)) mask = np.zeros(image.shape[:2], np.uint8) backgroundModel = np.zeros((1, 65), np.float64) foregroundModel = np.zeros((1, 65), np.float64) rectangle = (0, 0, 255, 255) cv2.grabCut(image, mask, rectangle, backgroundModel, foregroundModel, 3, cv2.GC_INIT_WITH_RECT) mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') image = image * mask2[:, :, np.newaxis] # cv2.imwrite('temp2.jpg', image) result = template gray = image result[gray <= 2] = 255 # cv2.imshow('res', result) # cv2.imshow('gray', gray) # plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB)) scipy.misc.imsave('home/static/home/downloaded_images/' + name, cv2.cvtColor(result, cv2.COLOR_BGR2RGB)) img = PIL.Image.open('home/static/home/downloaded_images/' + name) converter = PIL.ImageEnhance.Color(img) img2 = converter.enhance(1.5) img2.save('home/static/home/downloaded_images/' + name) # fs = FileSystemStorage() # fs.save("saved_img.png","my.png") #plt.imshow(gen[0]) #print(type(img)) return '' else: return ''
rect = (0, 0, 1, 1) drawing = False rectangle = False rect_or_mask = 100 rect_over = False value = DRAW_FG img = img2.copy() mask = np.zeros(img.shape[:2], dtype=np.uint8) # mask initialized to PR_BG output = np.zeros(img.shape, np.uint8) # output image to be shown elif k == ord('n'): # segment the image print( """ For finer touchups, mark foreground and background after pressing keys 0-3 and again press 'n' \n""") if (rect_or_mask == 0): # grabcut with rect bgdmodel = np.zeros((1, 65), np.float64) fgdmodel = np.zeros((1, 65), np.float64) cv.grabCut(img2, mask, rect, bgdmodel, fgdmodel, 1, cv.GC_INIT_WITH_RECT) rect_or_mask = 1 elif rect_or_mask == 1: # grabcut with mask bgdmodel = np.zeros((1, 65), np.float64) fgdmodel = np.zeros((1, 65), np.float64) cv.grabCut(img2, mask, rect, bgdmodel, fgdmodel, 1, cv.GC_INIT_WITH_MASK) mask2 = np.where((mask == 1) + (mask == 3), 255, 0).astype('uint8') output = cv.bitwise_and(img2, img2, mask=mask2) cv.destroyAllWindows()
import cv2 from watershed1 import * from matplotlib import pyplot as plt fig = plt.figure(figsize=(24, 24)) bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) rect = (50, 50, 450, 290) ax = [] imgs = ['trial6.jpg', 'index.jpeg', 'trial.jpg'] cvtimg = [] oimg = [] for i, img in enumerate(imgs): oimg.append(cv2.imread(img)) mask = np.zeros(oimg[i].shape[:2], np.uint8) cv2.grabCut(oimg[i], mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT) mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') maskimg = oimg[i] * mask2[:, :, np.newaxis] cvtimg.append(cv2.cvtColor(maskimg, cv2.COLOR_BGR2GRAY)) watershed_algo(cvtimg[i]) for i, img in enumerate(oimg): ax.append(fig.add_subplot(len(imgs), 2, i * 2 + 1)) ax[-1].set_title("original image") plt.imshow(img) ax.append(fig.add_subplot(len(imgs), 2, i * 2 + 2)) ax[-1].set_title("segemented image") plt.imshow(cvtimg[i]) plt.show() ''' img3=cv2.imread('trial6.jpg')
import cv2 import numpy as np import matplotlib.pyplot as plt import time start =time.time() img=cv2.imread('rose.jpg') mask=np.zeros(img.shape[:2],np.uint8) bgdModel=np.zeros((1,65),np.float64) fgdModel=np.zeros((1,65),np.float64) rect=(45,79,150,150) rect2=(150,75,550,450) cv2.grabCut(img,mask,rect2,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT) mask2=np.where((mask==2)|(mask==0),0,1).astype('uint8') img=img*mask2[:,:,np.newaxis] plt.imshow(img) plt.colorbar() plt.show() end=time.time() print end-start
cv2.namedWindow('image') cv2.setMouseCallback('image', mouse_callback) while True: cv2.imshow('image', show_img) k = cv2.waitKey(1) if k == ord('a') and not mouse_pressed: if w * h > 0: break cv2.destroyAllWindows() labels = np.zeros(img.shape[:2], np.uint8) labels, bgdModel, fgdModel = cv2.grabCut(img, labels, (x, y, w, h), None, None, 5, cv2.GC_INIT_WITH_RECT) show_img = np.copy(img) show_img[(labels == cv2.GC_PR_BGD) | (labels == cv2.GC_BGD)] //= 3 cv2.imshow('image', show_img) cv2.waitKey() cv2.destroyAllWindows() label = cv2.GC_BGD lbl_clrs = {cv2.GC_BGD: (0, 0, 0), cv2.GC_FGD: (255, 255, 255)} def mouse_callback(event, x, y, flags, param): global mouse_pressed
color=1, thickness=-1) cv2.rectangle(m, (bbox[0] + (bbox[2] - bbox[0]) // 4, bbox[1] + (bbox[3] - bbox[1]) // 4), (bbox[0] + (bbox[2] - bbox[0]) // 4 * 3, bbox[1] + (bbox[3] - bbox[1]) // 4 * 3), color=2, thickness=-1) m_ = np.where(m > 0, m + 1, 0) # # print(m.shape, m.dtype) # # print(cv_img.shape, cv_img.dtype) # cv2.grabCut(cv_img, m_, (x1, y1, w, h), bgModel, fgModel, iterCount=3, mode=cv2.GC_INIT_WITH_MASK) # for visualization purpose m_cut = np.where(((m_ == 2) | (m_ == 0)), 0, 1).astype(np.uint8) # cat_image = np.concatenate([m // 2 * 255, m_cut * 255], axis=1) # cv2.imshow('mask vs. cut', cat_image) # # cv_img_mask = cv_img * m_cut[:, :, np.newaxis] # # cv_img_mask = cv_img * m[:, :, np.newaxis] # cv2.imshow('image', cv_img_mask) # if cv2.waitKey() & 0xFF == ord('q'): # exit() result['segmentation'] = encode_mask(m_cut)
elif k == ord('r') or k == ord('2'): print('Restart') mask_fore = np.zeros_like(img) mask_back = np.zeros_like(img) # clear foreground mask elif k == ord('4'): print('Reselect foreground') mask_fore = np.zeros_like(img) # clear background mask elif k == ord('5'): print('Reselect background') mask_back = np.zeros_like(img) # run grabcut algorithm elif k == ord('c') or k == ord('3') or k == 13: print('Cutting foreground from the picture') mask_global = np.zeros(img.shape[:2], np.uint8) + 2 mask_global[mask_fore[:, :, 1] == 255] = 1 mask_global[mask_back[:, :, 2] == 255] = 0 mask_global, bgdModel, fgdModel = cv2.grabCut(img, mask_global, None, bgdModel, fgdModel, iteration, cv2.GC_INIT_WITH_MASK) mask_global = np.where((mask_global == 2) | (mask_global == 0), 0, 1).astype('uint8') target = img * mask_global[:, :, np.newaxis] cv2.imshow('target', target) cv2.imwrite('result.jpg', target) elif k == 27: break cv2.destroyAllWindows()
while(1): ret ,frame = cap.read() if not ret: break #gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) # edges = cv2.Canny(gray,50,150,apertureSize = 3) #img = cv2.medianBlur(gray,5) #circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,50,param1=50,param2=30,minRadius=30,maxRadius=40) mask = np.zeros(frame.shape[:2],np.uint8) bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) rect = (50,50,450,290) cv2.grabCut(frame,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT) mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8') frame = frame*mask2[:,:,np.newaxis] # if circles is None: # continue # circles = np.uint16(np.around(circles)) # for i in circles[0,:]: # # draw the outer circle # cv2.circle(frame,(i[0],i[1]),i[2],(0,255,0),2) # # draw the center of the circle # cv2.circle(frame,(i[0],i[1]),2,(0,0,255),3) # minLineLength = 600 # maxLineGap = 10
def main(argv): kNumGrabCutIters = 5; kStartImage = 100; base_dir=sys.argv[1]; # Setup. image_dir = os.path.join(base_dir, 'dump', 'feature_tracking_primary'); bounding_box_file = os.path.join(base_dir, 'dump', 'finger_bounding_box.txt'); marker_pixels_file = os.path.join(base_dir,'dump', 'marker_pixels.txt'); bounding_info = np.genfromtxt(bounding_box_file, delimiter=","); marker_info = np.genfromtxt(marker_pixels_file, delimiter=","); assert(len(marker_info) == len(bounding_info)); image_format = 'image_%05d.ppm'; output_dir = os.path.join(base_dir, 'dump', 'grabcut_refined1'); grabcut_format = 'grabcut_%05d.jpg'; # Create output dir if needed. call(['mkdir', '-p', output_dir]); timer_list = list(); plt.hold(False); total_iters = len(bounding_info); counter = 0; print(('%.2f' % (float(counter) / total_iters * 100)) + ' %', end='\r'); sys.stdout.flush(); # traverse by row. for row_i in range(len(bounding_info)): if row_i < kStartImage : counter = counter + 1; continue; crop_info_i = bounding_info[row_i]; marker_info_i = marker_info[row_i]; assert (marker_info_i[0] == crop_info_i[0]); # Extract Box data. image_num = crop_info_i[0].astype('uint32'); # Fix 1-indexing used by matlab. top_left_x = crop_info_i[1].astype('uint32') - 1; top_left_y = crop_info_i[2].astype('uint32') - 1; height = crop_info_i[3].astype('uint32'); width = crop_info_i[4].astype('uint32'); rect = (top_left_x, top_left_y, width, height); # Fix the 1-indexing used by matlab! marker_info_i = marker_info_i[1:] - 1; # Extract marker_masks (cropped) marker_masks = ExtractMarkerMasksFromCoords(rect, marker_info_i); # Skip if not in bounds. if top_left_x < 0 or top_left_y < 0 or height < 1 or width < 1: # print('Could not extract bounding box ' + str(image_num)); counter = counter + 1; continue; if len(marker_masks) == 0: # print('Could not extract marker box ' + str(image_num)); counter = counter + 1; continue; # Load image. image_name = image_format % image_num; image_fullname = os.path.join(image_dir, image_name); cv_img = cv2.imread(image_fullname); full_mask = np.zeros(cv_img.shape[:2], np.uint8); # Histograms for GrabCut. bgdModel = np.zeros((1,65),np.float64); fgdModel = np.zeros((1,65),np.float64); # Run GrabCut using the rect on original image. start = time.time(); cv2.grabCut(cv_img, full_mask, rect, bgdModel, fgdModel, \ kNumGrabCutIters, cv2.GC_INIT_WITH_RECT); timer_list.append(time.time() - start); ## DEBUG # Reshape for concatenation. full_mask = np.where((full_mask==2) | (full_mask==0), \ 0, 1).astype('uint8') * 255; full_mask = full_mask[:, :, np.newaxis]; full_mask = np.concatenate((full_mask, full_mask, full_mask), \ axis=2); # Concat the two images b,g,r = cv2.split(cv_img); rgb_img = cv2.merge([r,g,b]); stitched_img_show = np.hstack((rgb_img, full_mask)); plt.imshow(stitched_img_show); plt.show(block=False); wait(); ## End DEBUG # Crop image. crop_img = cv_img[top_left_y : top_left_y + height, \ top_left_x : top_left_x + width ]; # Mask for combining returned masks from OpenCV. final_mask = np.zeros(crop_img.shape[:2], np.uint8); # Run OpenCV. for marker_mask in marker_masks: print( "size crop_img: " + str(crop_img.shape)); print("mask shape: " + str(marker_mask.shape)); # Create Mats for opencv. start = time.time(); tmp_mask, tmp_bgd, tmp_fgd = cv2.grabCut(crop_img, marker_mask, \ None, bgdModel, fgdModel, kNumGrabCutIters, \ cv2.GC_INIT_WITH_MASK); timer_list.append(time.time() - start); # Set foreground_pixel = 0 tmp_mask = np.where((mask==2) | (mask==0), 0, 1).astype('uint8'); # Add to final mask. final_mask = final_mask + tmp_mask; final_mask = np.where((final_mask > 0), 1, 0).astype('uint8'); final_mask = final_mask * 255; # Reshape returned mask for concatenation. final_mask = final_mask[:, :, np.newaxis]; final_mask = np.concatenate((final_mask, final_mask, final_mask), \ axis=2); # Concat the two images b,g,r = cv2.split(cv_img); rgb_img = cv2.merge([r,g,b]); stitched_img_show = np.hstack((rgb_img, final_mask)); stitched_img_save = np.hstack((cv_img, final_mask)); ## Show the image. # plt.imshow(stitched_img_show); # plt.show(block=False); # Save the image. outfile = grabcut_format % image_num; outfullfile = os.path.join(output_dir, outfile); cv2.imwrite(outfullfile, stitched_img_save); wait(); counter = counter + 1; print(('%.2f' % (float(counter) / total_iters * 100)) + ' %', end='\r'); sys.stdout.flush(); print('Timing Statistics: '); print('mean: ', np.average(timer_list)); print('std-dev: ', np.std(timer_list)); print('min: ', np.min(timer_list)); print('max: ', np.max(timer_list));
pt4 = (200, 300) rec2 = pt3 + pt4 #img4 = cv.line(img1, pt1, pt2, (255, 0, 0), 3) #img5 = cv.line(img2, pt3, pt4, (255, 0, 0), 3) #cv.imshow("1", img4) #cv.imshow("2", img5) mask1 = np.zeros(img1.shape[:2], np.uint8) mask2 = np.zeros(img2.shape[:2], np.uint8) bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) cv.grabCut(img1, mask1, rec1, bgdModel, fgdModel, 5, cv.GC_INIT_WITH_RECT) mask1_2 = np.where((mask1 == 2) | (mask1 == 0), 0, 1).astype('uint8') img1 = img1 * mask1_2[:, :, np.newaxis] #cv.imshow("4", img1) cv.grabCut(img2, mask2, rec2, bgdModel, fgdModel, 5, cv.GC_INIT_WITH_RECT) mask2_2 = np.where((mask2 == 2) | (mask2 == 0), 0, 1).astype('uint8') img2 = img2 * mask2_2[:, :, np.newaxis] #cv.imshow("5", img2) img1 = img1.astype(float) img2 = img2.astype(float) person = cv.add(img1, img2) #cv.imwrite("images/result99.jpg", person) alpha = person
bbox[0] = int(float(bbox[0]) * im.shape[0]) bbox[2] = int(float(bbox[2]) * im.shape[0]) bbox[1] = int(float(bbox[1]) * im.shape[1]) bbox[3] = int(float(bbox[3]) * im.shape[1]) t_mask = np.zeros((512, 512, 1), np.uint8) #print(bbox) cv2.rectangle(mask, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255), -1) cv2.rectangle(t_mask, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255), -1) grab_mask = np.zeros((512, 512, 1), np.uint8) grab_mask[t_mask == 255] = 1 preproc_mask, bgdModel, fgdModel = cv2.grabCut( im, grab_mask, None, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK) preproc_mask = np.where((preproc_mask == 2) | (preproc_mask == 0), 0, 1).astype('uint8') temp = np.ones((im.shape[0], im.shape[1], 1), np.uint8) temp = temp * preproc_mask[:, :] #,np.newaxis] #cv2.imshow('scrab', temp*255)#*255) mask_nonzero = np.where(target_mask == 0, 1, 0) #target_mask= target_mask + temp * mask_nonzero[:, :] target_mask[:, :, target_class] = target_mask[:, :, target_class] + temp[:, :, 0] #cv2.imshow('scrab_temp', target_mask[:,:,target_class]*255)#*255)
""" import cv2 import numpy as np resim = cv2.imread("resim.jpg") mask = np.zeros(resim.shape[:2], np.uint8) bgdModel = np.zeros((1, 65), dtype=np.float64) fgdModel = np.zeros((1, 65), dtype=np.float64) 100, 0, 300, 300 rect = (100, 0, 600, 5550) 80, 0, 300, 550 #agac cv2.grabCut(resim, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT) mask2 = np.where((mask == 0) | (mask == 2), 0, 1).astype(np.uint8) resim = resim * mask2[:, :, np.newaxis] cv2.imshow("dizi resmi", resim) cv2.waitKey(0) cv2.destroyAllWindows() # """ ders 18 keskinleştirme laplacian resim
def main(input_dir, output_dir): input_path = Path(input_dir) output_path = Path(output_dir) if not input_path.exists(): raise ValueError('Input directory not found. Please ensure it exists.') if not output_path.exists(): output_path.mkdir(parents=True) wnd_name = "Labeling tool" io.named_window(wnd_name) io.capture_mouse(wnd_name) io.capture_keys(wnd_name) #for filename in io.progress_bar_generator (Path_utils.get_image_paths(input_path), desc="Labeling"): for filename in Path_utils.get_image_paths(input_path): filepath = Path(filename) if filepath.suffix == '.png': dflimg = DFLPNG.load(str(filepath)) elif filepath.suffix == '.jpg': dflimg = DFLJPG.load(str(filepath)) else: dflimg = None if dflimg is None: io.log_err("%s is not a dfl image file" % (filepath.name)) continue lmrks = dflimg.get_landmarks() lmrks_list = lmrks.tolist() orig_img = cv2_imread(str(filepath)) h, w, c = orig_img.shape mask_orig = LandmarksProcessor.get_image_hull_mask( orig_img.shape, lmrks).astype(np.uint8)[:, :, 0] ero_dil_rate = w // 8 mask_ero = cv2.erode( mask_orig, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (ero_dil_rate, ero_dil_rate)), iterations=1) mask_dil = cv2.dilate(mask_orig, cv2.getStructuringElement( cv2.MORPH_ELLIPSE, (ero_dil_rate, ero_dil_rate)), iterations=1) #mask_bg = np.zeros(orig_img.shape[:2],np.uint8) mask_bg = 1 - mask_dil mask_bgp = np.ones(orig_img.shape[:2], np.uint8) #default - all background possible mask_fg = np.zeros(orig_img.shape[:2], np.uint8) mask_fgp = np.zeros(orig_img.shape[:2], np.uint8) img = orig_img.copy() l_thick = 2 def draw_4_lines(masks_out, pts, thickness=1): fgp, fg, bg, bgp = masks_out h, w = fg.shape fgp_pts = [] fg_pts = np.array([pts[i:i + 2] for i in range(len(pts) - 1)]) bg_pts = [] bgp_pts = [] for i in range(len(fg_pts)): a, b = line = fg_pts[i] ba = b - a v = ba / npl.norm(ba) ccpv = np.array([v[1], -v[0]]) cpv = np.array([-v[1], v[0]]) step = 1 / max(np.abs(cpv)) fgp_pts.append( np.clip(line + ccpv * step * thickness, 0, w - 1).astype(np.int)) bg_pts.append( np.clip(line + cpv * step * thickness, 0, w - 1).astype(np.int)) bgp_pts.append( np.clip(line + cpv * step * thickness * 2, 0, w - 1).astype(np.int)) fgp_pts = np.array(fgp_pts) bg_pts = np.array(bg_pts) bgp_pts = np.array(bgp_pts) cv2.polylines(fgp, fgp_pts, False, (1, ), thickness=thickness) cv2.polylines(fg, fg_pts, False, (1, ), thickness=thickness) cv2.polylines(bg, bg_pts, False, (1, ), thickness=thickness) cv2.polylines(bgp, bgp_pts, False, (1, ), thickness=thickness) def draw_lines(masks_steps, pts, thickness=1): lines = np.array([pts[i:i + 2] for i in range(len(pts) - 1)]) for mask, step in masks_steps: h, w = mask.shape mask_lines = [] for i in range(len(lines)): a, b = line = lines[i] ba = b - a ba_len = npl.norm(ba) if ba_len != 0: v = ba / ba_len pv = np.array([-v[1], v[0]]) pv_inv_max = 1 / max(np.abs(pv)) mask_lines.append( np.clip(line + pv * pv_inv_max * thickness * step, 0, w - 1).astype(np.int)) else: mask_lines.append(np.array(line, dtype=np.int)) cv2.polylines(mask, mask_lines, False, (1, ), thickness=thickness) def draw_fill_convex(mask_out, pts, scale=1.0): hull = cv2.convexHull(np.array(pts)) if scale != 1.0: pts_count = hull.shape[0] sum_x = np.sum(hull[:, 0, 0]) sum_y = np.sum(hull[:, 0, 1]) hull_center = np.array([sum_x / pts_count, sum_y / pts_count]) hull = hull_center + (hull - hull_center) * scale hull = hull.astype(pts.dtype) cv2.fillConvexPoly(mask_out, hull, (1, )) def get_gc_mask_bgr(gc_mask): h, w = gc_mask.shape bgr = np.zeros((h, w, 3), dtype=np.uint8) bgr[gc_mask == 0] = (0, 0, 0) bgr[gc_mask == 1] = (255, 255, 255) bgr[gc_mask == 2] = (0, 0, 255) #RED bgr[gc_mask == 3] = (0, 255, 0) #GREEN return bgr def get_gc_mask_result(gc_mask): return np.where((gc_mask == 1) + (gc_mask == 3), 1, 0).astype(np.int) #convex inner of right chin to end of right eyebrow #draw_fill_convex ( mask_fgp, lmrks_list[8:17]+lmrks_list[26:27] ) #convex inner of start right chin to right eyebrow #draw_fill_convex ( mask_fgp, lmrks_list[8:9]+lmrks_list[22:27] ) #convex inner of nose draw_fill_convex(mask_fgp, lmrks[27:36]) #convex inner of nose half draw_fill_convex(mask_fg, lmrks[27:36], scale=0.5) #left corner of mouth to left corner of nose #draw_lines ( [ (mask_fg,0), ], lmrks_list[49:50]+lmrks_list[32:33], l_thick) #convex inner: right corner of nose to centers of eyebrows #draw_fill_convex ( mask_fgp, lmrks_list[35:36]+lmrks_list[19:20]+lmrks_list[24:25]) #right corner of mouth to right corner of nose #draw_lines ( [ (mask_fg,0), ], lmrks_list[54:55]+lmrks_list[35:36], l_thick) #left eye #draw_fill_convex ( mask_fg, lmrks_list[36:40] ) #right eye #draw_fill_convex ( mask_fg, lmrks_list[42:48] ) #right chin draw_lines([ (mask_bg, 0), (mask_fg, -1), ], lmrks[8:17], l_thick) #left eyebrow center to right eyeprow center draw_lines([ (mask_bg, -1), (mask_fg, 0), ], lmrks_list[19:20] + lmrks_list[24:25], l_thick) # #draw_lines ( [ (mask_bg,-1), (mask_fg,0), ], lmrks_list[24:25] + lmrks_list[19:17:-1], l_thick) #half right eyebrow to end of right chin draw_lines([ (mask_bg, -1), (mask_fg, 0), ], lmrks_list[24:27] + lmrks_list[16:17], l_thick) #import code #code.interact(local=dict(globals(), **locals())) #compose mask layers gc_mask = np.zeros(orig_img.shape[:2], np.uint8) gc_mask[mask_bgp == 1] = 2 gc_mask[mask_fgp == 1] = 3 gc_mask[mask_bg == 1] = 0 gc_mask[mask_fg == 1] = 1 gc_bgr_before = get_gc_mask_bgr(gc_mask) #io.show_image (wnd_name, gc_mask ) ##points, hierarcy = cv2.findContours(original_mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) ##gc_mask = ( (1-erode_mask)*2 + erode_mask )# * dilate_mask #gc_mask = (1-erode_mask)*2 + erode_mask #cv2.addWeighted( #gc_mask = mask_0_27 + (1-mask_0_27)*2 # ##import code ##code.interact(local=dict(globals(), **locals())) # #rect = (1,1,img.shape[1]-2,img.shape[0]-2) # # cv2.grabCut(img, gc_mask, None, np.zeros((1, 65), np.float64), np.zeros((1, 65), np.float64), 5, cv2.GC_INIT_WITH_MASK) gc_bgr = get_gc_mask_bgr(gc_mask) gc_mask_result = get_gc_mask_result(gc_mask) gc_mask_result_1 = gc_mask_result[:, :, np.newaxis] #import code #code.interact(local=dict(globals(), **locals())) orig_img_gc_layers_masked = (0.5 * orig_img + 0.5 * gc_bgr).astype( np.uint8) orig_img_gc_before_layers_masked = (0.5 * orig_img + 0.5 * gc_bgr_before).astype( np.uint8) pink_bg = np.full(orig_img.shape, (255, 0, 255), dtype=np.uint8) orig_img_result = orig_img * gc_mask_result_1 orig_img_result_pinked = orig_img_result + pink_bg * (1 - gc_mask_result_1) #io.show_image (wnd_name, blended_img) ##gc_mask, bgdModel, fgdModel = # #mask2 = np.where((gc_mask==1) + (gc_mask==3),255,0).astype('uint8')[:,:,np.newaxis] #mask2 = np.repeat(mask2, (3,), -1) # ##mask2 = np.where(gc_mask!=0,255,0).astype('uint8') #blended_img = orig_img #-\ # #0.3 * np.full(original_img.shape, (50,50,50)) * (1-mask_0_27)[:,:,np.newaxis] # #0.3 * np.full(original_img.shape, (50,50,50)) * (1-dilate_mask)[:,:,np.newaxis] +\ # #0.3 * np.full(original_img.shape, (50,50,50)) * (erode_mask)[:,:,np.newaxis] #blended_img = np.clip(blended_img, 0, 255).astype(np.uint8) ##import code ##code.interact(local=dict(globals(), **locals())) orig_img_lmrked = orig_img.copy() LandmarksProcessor.draw_landmarks(orig_img_lmrked, lmrks, transparent_mask=True) screen = np.concatenate([ orig_img_gc_before_layers_masked, orig_img_gc_layers_masked, orig_img, orig_img_lmrked, orig_img_result_pinked, orig_img_result, ], axis=1) io.show_image(wnd_name, screen.astype(np.uint8)) while True: io.process_messages() for (x, y, ev, flags) in io.get_mouse_events(wnd_name): pass #print (x,y,ev,flags) key_events = [ev for ev, in io.get_key_events(wnd_name)] for key in key_events: if key == ord('1'): pass if key == ord('2'): pass if key == ord('3'): pass if ord(' ') in key_events: break import code code.interact(local=dict(globals(), **locals()))
from matplotlib import pyplot as ppl # Lots of help from... # http://docs.opencv.org/master/d8/d83/tutorial_py_grabcut.html#gsc.tab=0 input_image, fg_mat, bg_mat, output_image = sys.argv[1], sys.argv[2],sys.argv[3],sys.argv[4] img_input = cv2.imread(input_image) img_fg = cv2.imread(fg_mat,0) img_bg = cv2.imread(bg_mat,0) mask = np.zeros(img_input.shape[:2],np.uint8) bgdModel = np.zeros((1,65), np.float64) fgdModel = np.zeros((1,65), np.float64) mask[:] = 3 mask[img_bg != 0] = 0 mask[img_fg != 0] = 1 rect = None cv2.grabCut(img_input, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK) mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8') img_out = img_input*mask2[:,:,np.newaxis] # ppl.imshow(img_out) # ppl.show() cv2.imwrite(output_image, img_out)
# -*- coding: utf-8 -*- """ Created on Tue Jun 30 18:13:36 2020 @author: Avinash """ import cv2 import numpy as np import matplotlib.pyplot as plt newmask = cv2.imread("masked1.jpg", 0) mask[newmask == 0] = 0 mask[newmask == 1] = 1 mask, bgdModel, fgdModel = cv2.grabCut(img, )
# cv2.imshow('img', img) bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) mask = np.zeros(img.shape[:2], np.uint8) + cv2.GC_PR_BGD point = None flag = True cv2.namedWindow('image') cv2.setMouseCallback('image', grabcut, [False, None]) while True: cv2.imshow('image', imga) cv2.imshow('mask', cv2.multiply(mask2, 255)) if flag: cv2.grabCut(img, mask, None, bgdModel, fgdModel, 5) mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype(np.uint8) mask2_not = cv2.subtract(1, mask2) img_m = cv2.bitwise_and(img, img, mask=mask2) img_m = cv2.cvtColor(img_m, cv2.COLOR_BGR2BGRA) img_n = cv2.bitwise_and(gray, gray, mask=mask2_not) img_n = cv2.merge([ img_n, img_n, img_n, cv2.add(np.zeros(gray.shape, np.uint8), 255) ]) imga = cv2.add(img_m, img_n) flag = False if cv2.waitKey(20) == ord('q'): break cv2.destroyAllWindows()
def background_removal(image_bytes): src = image_bytes #src = get_tensor(image_bytes) #src = src.numpy()[:, :, :] #src = src.transpose(2,0,1) #src = src.permute(1,2,0) #src1 = np.ascontiguousarray(src, dtype=np.uint8) #src2 = np.ascontiguousarray(src, dtype=np.float) hsv = cv2.bilateralFilter(src, 15, 50, 50) hsv = cv2.cvtColor(hsv, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, (18, 70, 0), (86, 225, 255)) ## slice the green imask = mask > 0 green = np.zeros_like(src, np.uint8) green[imask] = src[imask] blurred = cv2.GaussianBlur(green, (3, 3), 0) blurred_float = blurred.astype(np.float32) / 255.0 edgeDetector = cv2.ximgproc.createStructuredEdgeDetection( r'model.yml/model.yml') edges = edgeDetector.detectEdges(blurred_float) * 255.0 edges_8u = np.asarray(edges, np.uint8) edges_8u = cv2.medianBlur(edges_8u, 3) def findSignificantContour(edgeImg): contours, hierarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours = sorted(contours, key=cv2.contourArea) # Ascending Order largestContour = contours[-1] # -1 => Largest Area return largestContour contour = findSignificantContour(edges_8u) # Draw the contour on the original image contourImg = np.copy(src) cv2.drawContours(contourImg, [contour], 0, (0, 255, 0), 2, cv2.LINE_AA, maxLevel=1) mask = np.zeros_like(edges_8u) cv2.fillPoly(mask, [contour], 255) # calculate sure foreground area by dilating the mask mapFg = cv2.erode(mask, np.ones((5, 5), np.uint8), iterations=10) # mark inital mask as "probably background" # and mapFg as sure foreground trimap = np.copy(mask) trimap[mask == 0] = cv2.GC_BGD trimap[mask == 255] = cv2.GC_PR_BGD trimap[mapFg == 255] = cv2.GC_FGD bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) rect = (0, 0, mask.shape[0] - 1, mask.shape[1] - 1) cv2.grabCut(src, trimap, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK) # create mask again mask2 = np.where((trimap == cv2.GC_FGD) | (trimap == cv2.GC_PR_FGD), 255, 0).astype('uint8') contour2 = findSignificantContour(mask2) mask3 = np.zeros_like(mask2) cv2.fillPoly(mask3, [contour2], 255) foreground = np.copy(src).astype(float) foreground[mask3 == 0] = 255 pil_cutout = Image.fromarray( cv2.cvtColor(foreground.astype('uint8'), cv2.COLOR_BGR2RGB)) return (pil_cutout)
def background_sub(image): """ This function: 1. Takes image as input and 2. Asks user to select ROI by dragging mouse pointer. 3. Performs background subtraction. 4. Displaying the updated image. 5. Saving the processed image in your current directory. GrabCut Algorithm is used link : https://www.geeksforgeeks.org/python-foreground-extraction-in-an-image-using-grabcut-algorithm/ :param image: Image on which Background subtraction is to be performed. :return: """ # To check if ROI has been selected or not global drawn # This names the window so we can reference it cv2.namedWindow(winname='BG Subractor') # Connects the mouse button to our callback function cv2.setMouseCallback('BG Subractor', draw_rectangle) print("\nSelect ROI from mouse pointer.") # Creating mask, background and foregound models for Grabcut Algorithm black_mask = np.zeros(image.shape[:2], np.uint8) background = np.zeros((1, 65), np.float64) foreground = np.zeros((1, 65), np.float64) while True: # Runs forever until we break with Esc key on keyboard # If ROI is selected if drawn: print("\nPerforming Background Subtraction") # Using Grabcut Algorithm only when ROI is drawn and saved in # variable named rectangle cv2.grabCut(image, black_mask, rectangle, background, foreground, 5, cv2.GC_INIT_WITH_RECT) # mask with 1 and 4 denotes foreground # mask with 2 and 0 denotes background so converting the bg pixels into black mask2 = np.where((black_mask == 2) | (black_mask == 0), 0, 1).astype('uint8') # multiplying mask2 with original image so that we can get our resultant image = image * mask2[:, :, np.newaxis] # For saving the file cv2.imwrite('Bg_removed.jpg', image) print(f'\nBg_removed.jpg saved in your current directory!') print('Great Success!!!') # Once the processing has been done setting drawn to False drawn = False # Shows the resultant image in image window cv2.imshow('BG Subractor', image) # Press ESC to exit if cv2.waitKey(1) & 0xFF == 27: break # It closes all windows (just in case you have multiple windows called) cv2.destroyAllWindows()
!wget "https://raw.githubusercontent.com/PedroHaupenthal/Image-Processing/master/Atividade03/Exercicio01/bmw.jpg" -O "bmw.jpg" img1 = cv.imread("bmw.jpg") img1 = cv.cvtColor(img1, cv.COLOR_BGR2RGB) img2 = img1.copy() p1 = (100, 194) p2 = (597, 290) img2 = cv.rectangle(img2, p1, p2, (255,0,0), 2) mask = np.zeros(img1.shape[:2], np.uint8) bgModel = np.zeros((1, 65), np.float64) fgModel = np.zeros((1, 65), np.float64) rectangle = p1 + p2 cv.grabCut(img1, mask, rectangle, bgModel, fgModel, 5, cv.GC_INIT_WITH_RECT) filter = np.where ( (mask == 0) | (mask == 2), 0, 1).astype('uint8') img3 = img1.copy() img3 = img3 * filter[:, :, np.newaxis] plt.figure(figsize=(25,25)) plt.subplot(131), plt.imshow(img1) plt.subplot(132), plt.imshow(img2) plt.subplot(133), plt.imshow(img3)