Esempio n. 1
0
def GetRadius(image):
         retina = copy.copy(image);
         retina_blue , retina_green , retina_red = cv2.split(retina)
         # scaling the histogram linearly to normalize all the images
         retina_green = cv2.bitwise_not(retina_green);
         ret, retina_green = cv2.threshold(retina_green,retina_green.max()-1,255,cv2.THRESH_BINARY);
         retina_green = cv2.bitwise_not(retina_green)
         retina_green = cv2.medianBlur(retina_green,55)
         img,contours,hierarchy = cv2.findContours(retina_green, 1, 2)
         area = 0;
         for item in contours:
              if cv2.contourArea(item) > area :
                  area = cv2.contourArea(item)
                  cnt = item          
         leftmost = tuple(cnt[cnt[:,:,0].argmin()][0])
         rightmost = tuple(cnt[cnt[:,:,0].argmax()][0])
         topmost = tuple(cnt[cnt[:,:,1].argmin()][0])
         bottommost = tuple(cnt[cnt[:,:,1].argmax()][0])
         topmost = list(topmost);
         bottommost = list(bottommost);
         leftmost = list(leftmost);
         rightmost = list(rightmost);
         centre = [0,0];
         centre[1] = (rightmost[0] + leftmost[0] + topmost[0] + bottommost[0])/4 ;
         centre[0] = (rightmost[1] + leftmost[1] + topmost[1] + bottommost[1])/4 ;         
         radius_x = math.pow(rightmost[0]-leftmost[0],2) + math.pow(rightmost[1]-leftmost[1],2);
         radius_x = math.sqrt(radius_x)/2;
         radius_y = math.pow(topmost[0]-bottommost[0],2) + math.pow(topmost[1]-bottommost[1],2);
         radius_y = math.sqrt(radius_y)/2;
         radius =( radius_x + radius_y )/2;
         return(radius);
Esempio n. 2
0
    def getLabelMask(self):
        #returns mask for coloured pixels
        imgHSV = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)

        #white
        lowerWhite = np.array([0,0,0], dtype=np.uint8)
        upperWhite = np.array([0,0,255], dtype=np.uint8)
        maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)

        #black
        lowerBlack = np.array([0, 0, 0], dtype=np.uint8)
        upperBlack = np.array([180, 255, 30], dtype=np.uint8)
        maskBlack = cv2.inRange(imgHSV, lowerBlack, upperBlack)

        mask = cv2.bitwise_or(maskWhite, maskBlack)
        mask = cv2.bitwise_not(mask)

        imgGRAY = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        lowerWhite = np.array([250], dtype=np.uint8)
        upperWhite = np.array([255], dtype=np.uint8)
        maskWhite = cv2.inRange(imgGRAY, lowerWhite, upperWhite)

        lowerBlack = np.array([0], dtype=np.uint8)
        upperBlack = np.array([5], dtype=np.uint8)
        maskBlack = cv2.inRange(imgGRAY, lowerBlack, upperBlack)

        mask1 = cv2.bitwise_or(maskWhite, maskBlack)
        mask1 = cv2.bitwise_not(mask1)

        mask = cv2.bitwise_and(mask, mask1)

        return mask
Esempio n. 3
0
    def genImage(self,c_batch_num,id, tranformFactor=7, shadeSize=10, shadeFilter=[], smuFilter=[], blur=3,rotFilter = [],blurFilter_level1=[],
                 blurFilter_level2=[], size=20):

        if (id > 30):
            img = self.GenEng(self.fontE, self.chars[id]);
            img = cv2.bitwise_not(img)
        else:
            img = self.GenCh(self.fontC, self.chars[id].decode(encoding="utf-8"));
            img = cv2.bitwise_not(img)
            border = r(tranformFactor);
            side = border*2
            img = cv2.resize(img, (40-14, 50))
            img = cv2.copyMakeBorder(img, 0, 0, 7, 7, cv2.BORDER_CONSTANT);
        if id not in rotFilter and (c_batch_num<300 or (c_batch_num>600  and c_batch_num>750 )):
            img = rotRandrom(img, tranformFactor, (img.shape[1], img.shape[0]));
        if id not in shadeFilter and c_batch_num<600:
            img = self.randomWindows(img, 0.20);
        # 添加遮罩
        if id not in smuFilter and c_batch_num<900:
            img = AddSmudginess(img, self.smu);
        # 添加污迹
        if id not in blurFilter_level2 and c_batch_num<1200:
            if id in blurFilter_level1:
                img = AddGauss(img, r(blur - 1));
            else:
                img = AddGauss(img, r(blur) + 1);
        # 添加模糊
        img = thes(img)
        img = setPadding(img, 0)
        # 阈值
        img = cv2.resize(img, (size, size))
        #debugshow(img)
        # 20 *20 图像
        return img;
    def detectEdgesInColorPlanes(self, imageToSplit, colorPlane="red"):

        morpher = ImageMorpher()
        channels = cv2.split(imageToSplit)

        colorEdges = []

        for i in range(len(channels)):
            blurred = cv2.GaussianBlur(channels[i], (7, 7), 3)
            colorEdges.append(morpher.dilateWithSquare(cv2.Canny(blurred, 50, 100), 3))

        if (colorPlane == "red"):
            print 'anding'
            result = cv2.bitwise_and(colorEdges[0], colorEdges[1])
            result = morpher.closeWithSquare(result, 2)
            #self.showImage(result)
            result = cv2.bitwise_and(result, cv2.bitwise_not(colorEdges[2]))
        elif (colorPlane == "green"):
            result = cv2.bitwise_and(colorEdges[0], colorEdges[2])
            result = morpher.closeWithSquare(result, 2)
            result = cv2.bitwise_and(result, cv2.bitwise_not(colorEdges[1]))
        elif (colorPlane == "blue"):
            result = cv2.bitwise_and(colorEdges[1], colorEdges[2])
            result = morpher.closeWithSquare(result, 2)
            result = cv2.bitwise_and(result, cv2.bitwise_not(colorEdges[0]))
        #self.showImage(result)
        result = morpher.openWithSquare(result, 2)
        #self.showImage(result)

        return result
Esempio n. 5
0
def clean_page(img, max_scale=defaults.CC_SCALE_MAX, min_scale=defaults.CC_SCALE_MIN):
    #img = cv2.imread(sys.argv[1])
    (h,w,d)=img.shape
    
    gray = grayscale(img)
    
    #create gaussian filtered and unfiltered binary images
    sigma = arg.float_value('sigma',default_value=defaults.GAUSSIAN_FILTER_SIGMA)
    if arg.boolean_value('verbose'):
        print 'Binarizing image with sigma value of ' + str(sigma)
    gaussian_filtered = scipy.ndimage.gaussian_filter(gray, sigma=sigma)
    binary_threshold = arg.integer_value('binary_threshold',default_value=defaults.BINARY_THRESHOLD)
    if arg.boolean_value('verbose'):
        print 'Binarizing image with sigma value of ' + str(sigma)
    gaussian_binary = binarize(gaussian_filtered, threshold=binary_threshold)
    binary = binarize(gray, threshold=binary_threshold)
    
    #Draw out statistics on average connected component size in the rescaled, binary image
    average_size = cc.average_size(gaussian_binary)
    #print 'Initial mask average size is ' + str(average_size)
    max_size = average_size*max_scale
    min_size = average_size*min_scale
    
    #primary mask is connected components filtered by size
    mask = cc.form_mask(gaussian_binary, max_size, min_size)
    
    #secondary mask is formed from canny edges
    canny_mask = form_canny_mask(gaussian_filtered, mask=mask)
    
    #final mask is size filtered connected components on canny mask
    final_mask = cc.form_mask(canny_mask, max_size, min_size)
    
    #apply mask and return images
    cleaned = cv2.bitwise_not(final_mask * binary)
    return (cv2.bitwise_not(binary), final_mask, cleaned)
    def process_depth(self, depth_img):
        ## Filter NaN
        idx = np.isnan(depth_img)
        depth_img[idx] = 0
        ## Convert to UINT8 image

        depth_img = self.filter_low_high(depth_img, MIN, MAX)
        depth_img = depth_img/(MAX) * 255
        depth_img = np.uint8(depth_img)
        depth_img = cv2.medianBlur(depth_img, 9)

        # frame_filter = cv2.Canny(depth_img,0,50)

        #cv2.imshow('canny', frame_filter)
        #cv.WaitKey(0)
        frame_filter = cv2.adaptiveThreshold(depth_img,
                                            255,
                                            #cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                             cv2.ADAPTIVE_THRESH_MEAN_C,
                                             cv2.THRESH_BINARY,

                                            11,  # neighbourhood
                                            2)

        ## Invert Colors
        cv2.bitwise_not(frame_filter, frame_filter)
        kernel = np.ones((3, 3), 'uint8')
        frame_filter = cv2.erode(frame_filter, kernel)

        return frame_filter
Esempio n. 7
0
def apply_mask(img, mask, mask_color, device, debug=False):
  # Apply white image mask to image, with bitwise AND operator bitwise NOT operator and ADD operator
  # img = image object, color(RGB)
  # mask= image object, binary (black background with white object)
  # mask_color= white or black  
  # device = device number. Used to count steps in the pipeline
  # debug= True/False. If True, print image
  device += 1
  if mask_color=='white':
    # Mask image
    masked_img= cv2.bitwise_and(img,img, mask = mask)
    # Create inverted mask for background
    mask_inv=cv2.bitwise_not(mask)
    # Invert the background so that it is white, but apply mask_inv so you don't white out the plant
    white_mask= cv2.bitwise_not(masked_img,mask=mask_inv)
    # Add masked image to white background (can't just use mask_inv because that is a binary)
    white_masked= cv2.add(masked_img, white_mask)
    if debug:
      print_image(white_masked, (str(device) + '_wmasked.png'))
    return device, white_masked
  elif mask_color=='black':
    masked_img= cv2.bitwise_and(img,img, mask = mask)
    if debug:
      print_image(masked_img, (str(device) + '_bmasked.png'))
    return device, masked_img
  else:
      fatal_error('Mask Color' + str(mask_color) + ' is not "white" or "black"!')
def main(argv):
	if os.path.exists(OUTPUT_DIR):
		shutil.rmtree(OUTPUT_DIR)
	os.makedirs(OUTPUT_DIR)

	example_rock = 'calibration_images/example_rock2.jpg'
	rock_img = mpimg.imread(example_rock)

	terrain_thresh = color_thresh(rock_img, (160, 160, 160))
	obstacles_thresh = cv2.bitwise_not(terrain_thresh * 255)

	rock_samples_thresh = color_thresh(rock_img, (140, 115, 0))
	tf = tempfile.NamedTemporaryFile(
		dir='output',
		prefix="rocks-",
		suffix=".JPG",
		delete=False
	)

	rock_wthout_obstacles = cv2.bitwise_and(rock_samples_thresh, obstacles_thresh)
	rock_wthout_obstacles = cv2.bitwise_not(rock_wthout_obstacles * 255)

	(im2, contours, h) = cv2.findContours(rock_wthout_obstacles,1,2)

	for cnt in contours:
		area = cv2.contourArea(cnt)
		if area > 200 and area < 300:
			x,y,w,h = cv2.boundingRect(cnt)

			wh_ratio = int(float(w)/h)
			if wh_ratio == 1:
				cv2.rectangle(rock_img, (x,y),(x+w,y+h),(0,255,0),2)

	scipy.misc.imsave(tf.name, rock_img)
Esempio n. 9
0
def run(capture):
    """ Update display based on user input """
    current_frame = get_new_frame(capture)

    while capture.isOpened():
        if cv2.getTrackbarPos(tbar_play_video_name, win_default_name):
            try:
                current_frame = get_new_frame(capture)
            except StopIteration:
                print("End of clip")
                break

        #This IS NOT HSV. This is BGR
        (hue_frame, sat_frame, val_frame) = cv2.split(current_frame)

        if cv2.getTrackbarPos(tbar_invert_name, win_hue_name):
            hue_frame = cv2.bitwise_not(hue_frame)

        if cv2.getTrackbarPos(tbar_invert_name, win_sat_name):
            sat_frame = cv2.bitwise_not(sat_frame)

        if cv2.getTrackbarPos(tbar_invert_name, win_val_name):
            val_frame = cv2.bitwise_not(val_frame)

        cv2.imshow(win_default_name, current_frame)
        cv2.imshow(win_hue_name, hue_frame)
        cv2.imshow(win_sat_name, sat_frame)
        cv2.imshow(win_val_name, val_frame)

        cv2.waitKey(40)
Esempio n. 10
0
def findFish(frame, circle_mask):
  kernel = np.ones((3,3),np.uint8)
  hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
  gray_circle_mask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

  color_mask = maskBlue(hsv)
  color_mask = cv2.bitwise_and(color_mask, gray_circle_mask)
  color_mask = cv2.bitwise_or(color_mask, cv2.bitwise_not(gray_circle_mask))
  color_mask = cv2.bitwise_not(color_mask)
  color_mask = cv2.morphologyEx(color_mask, cv2.MORPH_OPEN, kernel)
  color_mask = cv2.GaussianBlur(color_mask, (3, 3), 0)



  canny_frame = autoCanny(color_mask, .2)
  canny_frame = cv2.bitwise_and(gray_circle_mask, canny_frame)
  contours, hierarchy = cv2.findContours(canny_frame,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

  fish_contours = []
  min_area = 250
  for cnt in contours:
    hull = cv2.convexHull(cnt)
    area = cv2.contourArea(hull)
    if (area > min_area):
     
      fish_contours.append(hull)

  return fish_contours
Esempio n. 11
0
def Background_remove(img_trimmed,sample_path):
    roi = cv2.imread(sample_path)
    hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)   
 
    target = img_trimmed
    hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)
 
    # calculating object histogram
    roihist = cv2.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )
 
    # normalize histogram and apply backprojection
    cv2.normalize(roihist,roihist,0,255,cv2.NORM_MINMAX)
    dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)
 
    # Now convolute with circular disc
    disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
    cv2.filter2D(dst,-1,disc,dst)
 
    # threshold and binary AND
    ret,thresh = cv2.threshold(dst,5,255,0)
    #invert to get the object of interest
    cv2.bitwise_not(thresh,thresh)
    thresh = cv2.merge((thresh,thresh,thresh))
    res = cv2.bitwise_and(target,thresh)
 
    #res = np.vstack((target,thresh,res))
    return res
    def process_rgb(self, rgb_img):
        frame_gray = cv2.cvtColor(rgb_img, cv.CV_RGB2GRAY)
        # gray_blurred = cv2.GaussianBlur(frame_gray, (9, 9), 0)
        gray_blurred = cv2.medianBlur(frame_gray, 5)
        # gray_blurred = cv2.bilateralFilter(frame_gray, 8, 16, 4)
        # cv2.imshow("gray_blurred", gray_blurred)


        gray_filter = cv2.adaptiveThreshold(gray_blurred,
                                            255.0,
                                            # cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,
                                            cv.CV_ADAPTIVE_THRESH_MEAN_C,
                                            cv.CV_THRESH_BINARY,
                                            9,  # neighbourhood
                                            9)
        cv2.bitwise_not(gray_filter, gray_filter)
        kernel = np.ones((3, 3), 'uint8')
        # gray_erode = gray_filter
        gray_erode = cv2.erode(gray_filter, kernel)

        kernel2 = np.ones((5, 5), 'uint8')

        gray_erode = cv2.dilate(gray_erode, kernel2)
        gray_erode = cv2.erode(gray_erode, kernel)

        size = rgb_img.shape
        size = (size[1] - 1, size[0] - 1)

        cv2.rectangle(gray_erode, (0, 0), size,
                      0,  # color
                      20,  # thickness
                      8,  # line-type ???
                      0)  # random shit

        return gray_erode
Esempio n. 13
0
def createOverlay(description):
    if description == "before":
        image = first_before
        projection = cv2.cvtColor(cv2.absdiff(before_projection, first_before), cv2.COLOR_BGR2GRAY)
        #invert        
        projection = cv2.bitwise_not(before_projection)
        path = vidPath + "_2fps.AVI_"+str(before_start)+"_"+str(before_end) + "_before.jpg"

    else if description == "after":
        image = first_after
        projection = cv2.cvtColor(cv2.absdiff(after_projection, first_after), cv2.COLOR_BGR2GRAY)
        #invert        
        projection = cv2.bitwise_not(after_projection)
        path = vidPath + "_2fps.AVI_"+str(after_start)+"_"+str(after_end) + "_after.jpg"
    

    colorTrackImg = np.zeros(image.shape, np.uint8)
    colorTrackImg.fill(255)
    
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    weight=0.5
    colorTrackImg[:,:,1] = projection[:]
    combinedTrackAndPhoto = cv2.addWeighted( colorTrackImg, weight, image, 1-weight, 0 )


    cv2.imwrite(path, projection)
    cv2.imwrite(vidPath + "_2fps.AVI_"+description+"_overlay.jpg", combinedTrackAndPhoto)
Esempio n. 14
0
def cleaned2segmented(cleaned, average_size):
    "cleaned是已经把图像中的字细化了"
    
    vertical_smoothing_threshold = defaults.VERTICAL_SMOOTHING_MULTIPLIER*average_size
    horizontal_smoothing_threshold = defaults.HORIZONTAL_SMOOTHING_MULTIPLIER*average_size
    
    (h,w) = cleaned.shape[:2]
    
    if arg.boolean_value('verbose'):
        print 'Applying run length smoothing with vertical threshold ' + str(vertical_smoothing_threshold) \
        +' and horizontal threshold ' + str(horizontal_smoothing_threshold)
    run_length_smoothed = rls.RLSO( cv2.bitwise_not(cleaned), vertical_smoothing_threshold, horizontal_smoothing_threshold)
    components = cc.get_connected_components(run_length_smoothed)
    text = np.zeros((h,w),np.uint8)
    #text_columns = np.zeros((h,w),np.uint8)
    #text_rows = np.zeros((h,w),np.uint8)
    for component in components:
        seg_thresh = arg.integer_value('segment_threshold',default_value=1)
        (aspect, v_lines, h_lines) = ocr.segment_into_lines(cv2.bitwise_not(cleaned), component,min_segment_threshold=seg_thresh)
        if len(v_lines)<2 and len(h_lines)<2:continue
        
        ocr.draw_2d_slices(text,[component],color=255,line_size=-1)
        #ocr.draw_2d_slices(text_columns,v_lines,color=255,line_size=-1)
        #ocr.draw_2d_slices(text_rows,h_lines,color=255,line_size=-1)
    return text
Esempio n. 15
0
    def otsu(self, img):
        # global thresholding
        ret1,th1 = cv2.threshold(img,50,255,cv2.THRESH_BINARY)
        per = np.percentile(img.ravel(), np.linspace(0,100,100))
        print("percentile = {}".format(per))
#         plt.hist(img.ravel(), 256)
#         plt.figure()

        
        # Otsu's thresholding
        ret2,th2 = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        # Otsu's thresholding after Gaussian filtering
        blur = cv2.GaussianBlur(img,(5,5),0)
        ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        
        print("global = {}, ostu={}, gaussinaostu={}".format(ret1, ret2, ret3))
        # plot all the images and their histograms
        images = [img, 0, cv2.bitwise_not(th1),
                  img, 0, cv2.bitwise_not(th2),
                  blur, 0, cv2.bitwise_not(th3)]
        titles = ['Original Noisy Image','Histogram','Global Thresholding (v=127)',
                  'Original Noisy Image','Histogram',"Otsu's Thresholding",
                  'Gaussian filtered Image','Histogram',"Otsu's Thresholding"]
        for i in range(3):
            plt.subplot(3,3,i*3+1),plt.imshow(images[i*3],'gray')
            plt.title(titles[i*3])
            plt.subplot(3,3,i*3+2),plt.hist(images[i*3].ravel(),256)
            plt.title(titles[i*3+1])
            plt.subplot(3,3,i*3+3),plt.imshow(images[i*3+2],'gray')
            plt.title(titles[i*3+2])
        plt.show()
        return
Esempio n. 16
0
def draw(photo_file, cloud, alfa, betta, gamma):#Функция, собирающая всё вместе
    corners = getCorners(photo_file)#взяли углы с фотки
    cloud = rotateCloud(cloud, alfa, betta, gamma)#повернули облако на тот угол, с которого делалась фотография
    pr=[]
    
    for i in range(len(cloud)):#берём проекцию
        p = Point2(cloud[i].x+270,cloud[i].z+300)#числа взяты так, чтобы проекция рисовалась примерно по центру
        pr.append(p)
   
    conf_pr = getConformity(pr, corners)#сопоставляем проекцию и углы
    triangles = []
    meshes = getMeshes('cube.jpg', corners, 10, 10)#запоминаем нужные грани
    for i in range(len(meshes)):#сопоставляем треугольники с проекции облака и треугольники с фотографии, 
                                #чтобы удобнее было копировать кусочки изображения
        trngl=[]
        
        for j in range(3):
            for k in range(len(corners)):
                if meshes[i][j].x == corners[k].x and meshes[i][j].y == corners[k].y:
                    trngl.append(conf_pr[k])
        triangles.append(trngl)
     
    #наложение текстур
    image = cv2.imread(photo_file) 
    rows,cols,ch = image.shape
    new_image = numpy.zeros(image.shape, numpy.uint8)
    new_image = cv2.bitwise_not(new_image) 
    for i in range(len(meshes)):
        #точки треугольника с фотографии
        x1 = meshes[i][0].x
        y1 = meshes[i][0].y
        x2 = meshes[i][1].x
        y2 = meshes[i][1].y
        x3 = meshes[i][2].x
        y3 = meshes[i][2].y
        pts1 = numpy.float32([[x1,y1],[x2,y2],[x3,y3]])
        roi_corners = numpy.array([[(x1,y1), (x2,y2), (x3,y3)]], dtype=numpy.int32)
        mask = numpy.zeros(image.shape, dtype=numpy.uint8)#маска для фотографии
        #точки треугольника проекции облака
        X1 = triangles[i][0].x
        Y1 = triangles[i][0].y
        X2 = triangles[i][1].x
        Y2 = triangles[i][1].y
        X3 = triangles[i][2].x
        Y3 = triangles[i][2].y       
        pts2 = numpy.float32([[X1,Y1],[X2,Y2],[X3,Y3]])
        roi2_corners = numpy.array([[(X1,Y1), (X2,Y2), (X3,Y3)]], dtype=numpy.int32)
        mask2 = numpy.zeros(new_image.shape, dtype=numpy.uint8)#маска для места, куда вставим изображение
        
        cv2.fillPoly(mask, roi_corners, (255,255,255))#создаём маску
        masked_image = cv2.bitwise_and(image, mask)#применяем маску к фотографии
        M = cv2.getAffineTransform(pts1,pts2)#применяем аффинные преобразования
        warp_affin_img = cv2.warpAffine(masked_image,M,(cols,rows))
        
        cv2.fillPoly(mask2, roi2_corners, (255,255,255))#создаём вторую маску
        mask2 = cv2.bitwise_not(mask2)#инвентируем для обратного эффекта (заполнять нужно то, что вне треугольника)
        new_image = cv2.bitwise_and(new_image, mask2)#применяем маску к прекции
        new_image = cv2.bitwise_or(new_image, warp_affin_img)#объединяем изображения
    cv2.imshow('result',new_image)
Esempio n. 17
0
def obtain_cand(Initial,Initial2,Nsme,user,action,Total):
    TT = Initial[1].copy()
    Rg = Initial[0].copy()
    Output = []
    kernel = np.ones((7, 7), np.uint8)
    Mask2 = cv2.dilate(Initial2[1][:,:,0], kernel, 1)
    kernel = np.ones((4, 4), np.uint8)
    Mask2 = cv2.erode(Mask2, kernel, 1)
    Mask2 = cv2.bitwise_not(Mask2)
    kernel = np.ones((7, 7), np.uint8)
    Mask1 = cv2.dilate(Initial2[0][:,:,0], kernel, 1)
    kernel = np.ones((4, 4), np.uint8)
    Mask1 = cv2.erode(Mask1, kernel, 1)
    Mask1 = cv2.bitwise_not(Mask1)
    Rg1 = cv2.bitwise_and(Rg,Rg,mask=Mask1)
    Sup1 = cv2.bitwise_and(Initial[1],Initial[1],mask=Mask2)
    Sup = cv2.cvtColor(Sup1, cv2.COLOR_BGR2RGB)
    segments_fz = slic(Sup, n_segments=250, compactness=20, sigma=5)
    segments_fz[Mask2 < 1] = -1
    segments_fz += 2
    # Img_Slic = label2rgb(segments_fz, Sup, kind='avg')
    # Img_Slic_TT = cv2.cvtColor(Img_Slic, cv2.COLOR_RGB2BGR)
    # Img_Slic = cv2.cvtColor(Img_Slic, cv2.COLOR_RGB2BGR)
    for i in xrange(len(Total)):
        col = [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]
        T= Total[i][0][0]
        x,y,x2,y2 = T[0],T[1],T[2],T[3]
        cv2.rectangle(Rg1, (T[4], T[5]), (T[6],T[7]), col, 2)
        P1 = Obj_segment.Rect.Point(T[4], T[5])
        P2 = Obj_segment.Rect.Point(T[6],T[7])
        Rec_top = Obj_segment.Rect.Rect(P1,P2)
        sp = np.array(segments_fz[y:y2,x:x2])
        sp = np.unique(sp)
        if len(sp) == 0:
            # Output =Img_Slic[y:y2,x:x2]
            P1 = Obj_segment.Rect.Point(x,y)
            P2 = Obj_segment.Rect.Point(x2,y2)
            rec = Obj_segment.Rect.Rect(P1,P2)
        elif sp[0] ==[1] and len(sp)==1:
            # Output = Img_Slic[y:y2, x:x2]
            P1 = Obj_segment.Rect.Point(x, y)
            P2 = Obj_segment.Rect.Point(x2, y2)
            rec = Obj_segment.Rect.Rect(P1, P2)
        else:
            rmin, rmax,cmin, cmax = bbox2(segments_fz, sp,(x,y),(x2,y2))
            if rmin is None:
                continue
            # Output = TT[cmin:cmax,rmin:rmax]
            P1 = Obj_segment.Rect.Point(rmin, cmin)
            P2 = Obj_segment.Rect.Point(rmax, cmax)
            rec = Obj_segment.Rect.Rect(P1, P2)
        Ouput_Top = Rg[T[5]:T[7],T[4]:T[6]]
        Output.append((rec,Rec_top))
        # cv2.imwrite("Morphed/Patches_Front/"+user+"_"+action+"_"+Nsme[:-4]+"_"+i.__str__()+"_Front.jpg",Output)
        # cv2.imwrite("Morphed/Patches_Top/" + user + "_" + action + "_" + Nsme[:-4] + "_" + i.__str__() + "_Top.jpg", Ouput_Top)
        # cv2.rectangle(Img_Slic_TT,(x,y),(x2,y2),col,3)
    # cv2.imwrite("Morphed/Top/" + user + "_" + action + "_" + Nsme[:-4] + "_v2" + "_Top.jpg", Rg1)
    # cv2.imwrite("Morphed/Front/"+user+"_"+action+"_"+Nsme[:-4]+"_v2"+ "_Front.jpg",Img_Slic_TT)
    return Output
Esempio n. 18
0
    def test_mser(self):

        img = self.get_sample('cv/mser/puzzle.png', 0)
        smallImg = [
         [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
         [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
         [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
         [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
         [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
         [255, 255, 255, 255, 255,   0,   0,   0,   0, 255, 255, 255, 255, 255, 255, 255, 255, 255,   0,   0,   0,   0, 255, 255, 255, 255],
         [255, 255, 255, 255, 255,   0,   0,   0,   0,   0, 255, 255, 255, 255, 255, 255, 255, 255,   0,   0,   0,   0, 255, 255, 255, 255],
         [255, 255, 255, 255, 255,   0,   0,   0,   0,   0, 255, 255, 255, 255, 255, 255, 255, 255,   0,   0,   0,   0, 255, 255, 255, 255],
         [255, 255, 255, 255, 255,   0,   0,   0,   0, 255, 255, 255, 255, 255, 255, 255, 255, 255,   0,   0,   0,   0, 255, 255, 255, 255],
         [255, 255, 255, 255, 255, 255,   0,   0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,   0,   0, 255, 255, 255, 255, 255],
         [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
         [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
         [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
         [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255]
        ]
        thresharr = [ 0, 70, 120, 180, 255 ]
        kDelta = 5
        mserExtractor = cv2.MSER_create()
        mserExtractor.setDelta(kDelta)
        np.random.seed(10)

        for i in range(100):

            use_big_image = int(np.random.rand(1,1)*7) != 0
            invert = int(np.random.rand(1,1)*2) != 0
            binarize = int(np.random.rand(1,1)*5) != 0 if use_big_image else False
            blur = int(np.random.rand(1,1)*2) != 0
            thresh = thresharr[int(np.random.rand(1,1)*5)]
            src0 = img if use_big_image else np.array(smallImg).astype('uint8')
            if src0 is None:
                print('Ops! src is None')
                continue
            src = src0.copy()

            kMinArea = 256 if use_big_image else 10
            kMaxArea = int(src.shape[0]*src.shape[1]/4)

            mserExtractor.setMinArea(kMinArea)
            mserExtractor.setMaxArea(kMaxArea)
            if invert:
                cv2.bitwise_not(src, src)
            if binarize:
                _, src = cv2.threshold(src, thresh, 255, cv2.THRESH_BINARY)
            if blur:
                src = cv2.GaussianBlur(src, (5, 5), 1.5, 1.5)
            minRegs = 7 if use_big_image else 2
            maxRegs = 1000 if use_big_image else 15
            if binarize and (thresh == 0 or thresh == 255):
                minRegs = maxRegs = 0
            msers, boxes = mserExtractor.detectRegions(src)
            nmsers = len(msers)
            self.assertEqual(nmsers, len(boxes))
            self.assertLessEqual(minRegs, nmsers)
            self.assertGreaterEqual(maxRegs, nmsers)
Esempio n. 19
0
    def get_circles(self):
        masked = self.get_masked_black()
        # masked = cv2.medianBlur(masked,3)
        # masked = cv2.blur(masked,(3,3))
        masked_inv = cv2.bitwise_not(masked)

        img2 = cv2.bitwise_not(self.gray, mask=masked_inv)
        # plt.imshow(img2)
        # plt.show()
        return self._get_circles_from_img(img2)
Esempio n. 20
0
def fill_holes_with_contour_filling(gray_mask, inverse=False):
  filled = gray_mask.copy()
  if inverse:
  	filled = cv2.bitwise_not(filled)
  contour, _ = cv2.findContours(filled,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
  for cnt in contour:
    cv2.drawContours(filled, [cnt], 0, 255, -1)
  if inverse:
  	filled = cv2.bitwise_not(filled)
  return filled
Esempio n. 21
0
def deskew(image, angle):
    print angle
    image = cv2.bitwise_not(image)
    non_zero_pixels = cv2.findNonZero(image)
    center, wh, theta = cv2.minAreaRect(non_zero_pixels)

    root_mat = cv2.getRotationMatrix2D(center, angle, 1)
    rows,cols = image.shape[:2]
    rotated = cv2.warpAffine(image, root_mat, (cols, rows), flags=cv2.INTER_CUBIC)
    return cv2.bitwise_not(cv2.getRectSubPix(rotated, (cols, rows), center))
def fill_holes(img):
		#filling holes (je ne sait pas ce qu'il se passe)
		des = cv2.bitwise_not(img)
		contour,hier = cv2.findContours(des,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
	
		for cnt in contour:
			cv2.drawContours(des,[cnt],0,255,-1)
		#inversion des couleur
		img = cv2.bitwise_not(des)
		return img
Esempio n. 23
0
    def track(self, frame):
        #cv2.imshow('tracker', frame)

        if calc_area(self.track_window) < self.minFaceArea or \
           calc_area(self.track_window) > self.maxFaceArea:
            self.init = False
            self.track_window = (0,0,0,0)

        vis = frame.copy()
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))

        if not self.init:
            (x0, y0, w, h) = self.faceDetector.findFace(frame)
            self.track_window = (int(x0+(1-self.faceScaleDown)/2*w), \
                                 int(y0+(1-self.faceScaleDown)/2*h), \
                                 int(self.faceScaleDown*w), \
                                 int(self.faceScaleDown*h))

            if not is_rect_nonzero(self.track_window):
                return self.track_window
                

            #when we got track_window, then pre-process
            (x0, y0, w, h) = self.track_window
            x1 = x0 + w
            y1 = y0 + h
            hsv_roi = hsv[y0:y1, x0:x1]
            mask_roi = mask[y0:y1, x0:x1]
            hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
            cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX);
            self.hist = hist.reshape(-1)
            if self.GUI:
                self.show_hist()

            vis_roi = vis[y0:y1, x0:x1]
            cv2.bitwise_not(vis_roi, vis_roi)
            vis[mask == 0] = 0

            self.init = True

        #start tracking
        prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
        prob &= mask
        term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
        track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)

        try: cv2.ellipse(vis, track_box, (0, 0, 255), 2)
        except: print track_box

        if self.GUI:
            cv2.imshow('camshift', vis)

        return self.track_window
Esempio n. 24
0
def horizontal_close(image_bin, lenght=None, verbose=False):
    height, width = image_bin.shape
    if verbose:
        print "Making close"
    if lenght is None:
        lenght = width/30
    cv2.bitwise_not(image_bin, image_bin)
    kernel = np.ones((1, int(lenght)), np.uint8)
    image_bin = cv2.morphologyEx(image_bin, cv2.MORPH_CLOSE, kernel)
    cv2.bitwise_not(image_bin, image_bin)
    return image_bin
Esempio n. 25
0
def merge(dirn ):
    set = [ ]
    print dirn.find("zh_");

    def findinside(dirname):
        print dirname
        for parent,dirnames,filenames in os.walk(dirname):
            for filename in filenames:
                path = parent + "/" + filename ;
                if(path.endswith(".jpg") or path.endswith(".png")):
                        img = cv2.imread(path,cv2.CV_LOAD_IMAGE_GRAYSCALE);
                        img = cv2.resize(img,(50,50));
                        #img = img.astype(np.float32)/255 ;
                        img = cutpadding(img,5);
                        img = cv2.resize(img,(20,36));

                        if(dirname.find("zh_") >-1):
                            set.append([img,1]);
                        else:
                            set.append([img,0]);

    for parent,dirnames,filenames in os.walk(dirn):
        for dirname in dirnames:
            c_path = dirn + "/" + dirname;
            findinside(c_path);
        count = 0 ;

    while(1):
        count+=1;


        L  = set[int(np.random.random() * len(set))]
        R  = set[int(np.random.random() * len(set))]
        print L[1],R[1];


        if(L[1] == 1 and R[1] == 0):
            R[0] = cv2.bitwise_not(R[0]);
            a = np.hstack([L[0],R[0]])
            cv2.imshow("a",a);
            F = getF(a);
            cv2.imshow("F",F);

        if(L[1] == 0 and R[1] == 0):

            a = np.hstack([L[0],R[0]])
            a = cv2.bitwise_not(a);
            cv2.imshow("a",a);
            F = getF(a);
            cv2.imshow("F",F);

        cv2.imwrite("./Char_classify/F/"+str(count)+".jpg",F);

        cv2.waitKey(0)
Esempio n. 26
0
def returnGray(image):
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
	cl1 = clahe.apply(gray)
	
	thresh = cv2.Canny(cl1, 50, 100)
	thresh = cv2.dilate(thresh, None, iterations=2)
	thresh = cv2.erode(thresh, None, iterations=2)
	cv2.bitwise_not ( thresh, thresh );

	return thresh
Esempio n. 27
0
    def run(self):
        while True:
            sleep(self.interfr_delay_ms)
            ret, self.frame = self.cam.read()
            if not ret:
                break
            vis = self.frame.copy()
            hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
            mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))

            if self.selection:
                x0, y0, x1, y1 = self.selection
                self.track_window = (x0, y0, x1-x0, y1-y0)
                hsv_roi = hsv[y0:y1, x0:x1]
                mask_roi = mask[y0:y1, x0:x1]
                hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
                cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX);
                self.hist = hist.reshape(-1)
                self.show_hist()
                
                vis_roi = vis[y0:y1, x0:x1]
                cv2.bitwise_not(vis_roi, vis_roi)
                vis[mask == 0] = 0

            if self.tracking_state == 1:
                self.selection = None
                prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
                prob &= mask
                term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
                #print 'TRACK WINDOW BEFORE cv2.Camshift', self.track_window
                track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
                #print 'TRACK WINDOW AFTER cv2.Camshift', self.track_window
                
                if self.show_backproj:
                    vis[:] = prob[...,np.newaxis]
                try: cv2.ellipse(vis, track_box, (0, 0, 255), 2)
                except: print track_box
                
            cv2.imshow('camshift', vis)

            ch = cv2.waitKey(5)
            if ch == KEYS['ESCAPE']:
                break
            if ch == ord('b'):
                self.show_backproj = not self.show_backproj
            if ch == KEYS['UP']:
                self.interfr_delay_ms = min(self.INTERFRAME_DELAY_MS_MAX, self.interfr_delay_ms + self.INTERFRAME_DELAY_MS_DELTA)
            if ch == KEYS['DOWN']:
                self.interfr_delay_ms = max(0.0, self.interfr_delay_ms - self.INTERFRAME_DELAY_MS_DELTA)
            if ch == KEYS['SPACE']:
                while cv2.waitKey(10) != KEYS['SPACE']:
                    pass
        cv2.destroyAllWindows() 			
Esempio n. 28
0
 def get_non_head_mask(self, img, rect):
     """
     Finds mask for non head pixels
     img - thresholded image (binary black and white)
     rect - head rect
     """
     mask = np.zeros((self.h, self.w), np.uint8)
     x, y, w, h = rect
     mask[y:y+h, x:x+w] = 255
     mask = cv2.bitwise_and(img, mask)
     self.mask_non = cv2.bitwise_not(mask)
     return cv2.bitwise_not(mask)
def AddSmudginess(img, Smu):
    rows = r(Smu.shape[0] - 50)

    cols = r(Smu.shape[1] - 50)
    adder = Smu[rows:rows + 50, cols:cols + 50];
    adder = cv2.resize(adder, (50, 50));
    #   adder = cv2.bitwise_not(adder)
    img = cv2.resize(img,(50,50))
    img = cv2.bitwise_not(img)
    img = cv2.bitwise_and(adder, img)
    img = cv2.bitwise_not(img)
    return img
 def _subtract(self, img, BS):
     fgmask = BS.apply(img)
     contour,hier = cv2.findContours(fgmask,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
     for cnt in contour:
         cv2.drawContours(fgmask,[cnt],0,255,-1)
     kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(15,15))
     fgmask = cv2.morphologyEx(fgmask,cv2.MORPH_OPEN,kernel)
     fgmask = cv2.bitwise_not(fgmask)
     kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(30,30))
     fgmask = cv2.morphologyEx(fgmask,cv2.MORPH_OPEN,kernel)
     fgmask = cv2.bitwise_not(fgmask)
     return fgmask
Esempio n. 31
0
                             color=(0, 255, 0),
                             thickness=-1)
            cv2.drawContours(image=prev_next_action_mask,
                             contours=[cntr],
                             contourIdx=-1,
                             color=255,
                             thickness=-1)

        cv2.imshow('cntr', visual)
        cv2.imshow('prev_action_mask', prev_action_mask)
        cv2.imshow('next_action_mask', next_action_mask)
        cv2.imshow('prev_next_action_mask', prev_next_action_mask)

        # curr 에서 prev, next와 겹치지 않는 부분만 남김
        aa = cv2.bitwise_and(prev_action_mask,
                             cv2.bitwise_not(prev_next_action_mask))
        bb = cv2.bitwise_and(next_action_mask,
                             cv2.bitwise_not(prev_next_action_mask))
        cv2.imshow('aa', aa)
        cv2.imshow('bb', bb)
        cv2.imshow('cc', cv2.absdiff(aa, bb))
        cv2.imshow('dd', cv2.bitwise_and(aa, bb))

        #cv2.imshow('vvv', cv2.cvtColor(curr, cv2.COLOR_BGR2HSV))

        action_mask = bb  #cv2.bitwise_and(aa, bb)
        # motion accumulate - simple
        motion[:, :, 3] = cv2.bitwise_or(motion[:, :, 3], action_mask)
        motion[:, :, :3][action_mask != 0] = curr[action_mask != 0]

        prev = curr
Esempio n. 32
0
 def _invert_image(self, img):
     # TODO: Detect whether or not image should be inverted
     return cv2.bitwise_not(img)
Esempio n. 33
0
    def stream(self):
        """
        Initialize both subprocessses for both streams including error channels in threads.
        Streaming is performed synced, as performing ML in threads is hard
        :return: None
        """
        self.initialize_cv2()

        sdp_file_peripheral = "VideoSettings/video_00_00_00_peripheral.sdp"
        sdp_file_foveated = "VideoSettings/video_00_00_00_foveated.sdp"
        proc_peripheral = self.initialize_ffmpeg(sdp_file_peripheral)
        proc_foveated = self.initialize_ffmpeg(sdp_file_foveated)
        size_peripheral = self.size_stream_peripheral[0] * (
            self.size_stream_peripheral[1]) * 3
        size_foveated = self.size_stream_foveated[
            0] * self.size_stream_foveated[1] * 3

        while not self.end_of_stream:

            frame_peripheral = proc_peripheral.stdout.read(size_peripheral)
            frame_foveated = proc_foveated.stdout.read(size_foveated)

            if frame_foveated == 0 or frame_peripheral == 0 or len(
                    frame_foveated) == 0 or len(frame_peripheral) == 0:
                print('end of stream')
                print('frame_foveated: ', frame_foveated)
                print('frame_peripheral: ', frame_peripheral)
                self.end_of_stream = True
                break

            peripheral_area_raw = self.extract_area(
                frame_peripheral, self.size_stream_peripheral)

            peripheral_area_raw = tf.expand_dims(peripheral_area_raw, axis=0)
            tic = time.perf_counter()
            peripheral_area = self.Generator(peripheral_area_raw)[
                0]  # Perfom superresolution
            toc = time.perf_counter()
            peripheral_area = np.array(peripheral_area, dtype='uint8')

            time_elapsed = round(((toc - tic) * 1000), 4)
            print(f"performed calc in {time_elapsed:0.4f} miliseconds")
            self.csv_lines += f"\n{time_elapsed}"

            peripheral_area = cv2.UMat(peripheral_area)
            peripheral_area = cv2.resize(peripheral_area, self.size_total,
                                         cv2.INTER_CUBIC)
            foveated_area: cv2.UMat = self.extract_area(
                frame_foveated, self.size_stream_foveated)
            foveated_area = cv2.UMat(foveated_area)

            a, b = 960, 512
            img, mask = self.calculate_masked_circle(foveated_area, (a, b))
            result = self.stack_images(peripheral_area, img,
                                       cv2.bitwise_not(mask))

            cv2.imshow('image', result)
            print(f"performed calc in {time_elapsed:0.4f} miliseconds")
            self.csv_lines += f"\n{time_elapsed}"
            # self.video_writer.write(peripheral_area)
            self.last_frame_received = time.process_time()

            k = cv2.waitKey(20) & 0xFF
            if k == ord('q'):
                self.end_of_stream = True
                break

        for thread in self.threads:
            thread.join()

        cv2.destroyAllWindows()
        # self.video_writer.release()
        proc_peripheral.stdout.close()
        proc_peripheral.wait()
        proc_foveated.stdout.close()
        proc_foveated.wait()
        with open('Latency_4.csv', 'w', newline='') as file:
            writer = csv.writer(file)
            for line in self.csv_lines.splitlines():
                writer.writerow([line])
        sys.exit()
Esempio n. 34
0
得到的就是图像中物体的边缘。
示例:
'''
image = cv2.imread(common_pics_path, 0)
#构造一个3×3的结构元素
element = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
dilate = cv2.dilate(image, element)
erode = cv2.erode(image, element)

#将两幅图像相减获得边,第一个参数是膨胀后的图像,第二个参数是腐蚀后的图像
result = cv2.absdiff(dilate, erode)

#上面得到的结果是灰度图,将其二值化以便更清楚的观察结果
retval, result = cv2.threshold(result, 40, 255, cv2.THRESH_BINARY)
#反色,即对二值图每个像素取反
result = cv2.bitwise_not(result)
#显示图像
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
2.)检测拐角
第一步:与边缘检测不同,拐角的检测过程有些复杂,但原理相同,不同的是先用十字形的结构元素膨胀像素,
这种情况下只会在边缘处“扩张”,角点不发生变化。接着用菱形的结构元素腐蚀原图像,导致只有在拐角
处才会“收缩”,而直线边缘都发生变化。

第二步:用X形膨胀原图像,角点膨胀的比边要多。这样第二次用方块腐蚀时,角点恢复原状,而边要腐蚀
的更多,所以当两幅图像相减时,只保留了拐角处。
示例:
'''
image = cv2.imread(common_pics_path, 0)
Esempio n. 35
0
                        countt+=1
                    if img[x+1][y+1]==140:
                        countt+=1
                    if img[x-1][y-1]==140:
                        countt+=1
                    if img[x+1][y-1]==140:
                        countt+=1
                    if img[x-1][y+1]==140:
                        countt+=1
                    if countt>=5:
                        img1[x][y]=(140)
    
    a=np.array(140)
    
    mask=cv.inRange(img1,a,a)
    cv.bitwise_not(mask, mask)

    
    ret, binary = cv.threshold(mask, 0, 255, cv.THRESH_BINARY_INV or cv.THRESH_OTSU)
    kernel = cv.getStructuringElement(cv.MORPH_RECT, (1, 2))
    binl = cv.morphologyEx(binary, cv.MORPH_OPEN, kernel)
    
    
    
    cv.bitwise_not(binl, binl)

    ##切
    xrec=0
    yrec=0
    for x in range(80):
        county = 0 
Esempio n. 36
0
from __future__ import absolute_import, division, print_function, unicode_literals

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import cv2

data = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = data.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

imgTest = cv2.imread("Capture.png", 0)
resizedImg = cv2.resize(imgTest, (28, 28))
invertedImg = cv2.bitwise_not(resizedImg)

#cv2.imshow("imgTest", imgTest)

model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(input_shape=(28, 28)),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dropout(0.2),
    tf.keras.layers.Dense(10)
])

loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)

model.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy'])

model.fit(x_train, y_train, epochs=5)

probability_model = tf.keras.Sequential([model, tf.keras.layers.Softmax()])
import cv2
import numpy as np
square = np.zeros((300, 300), np.uint8)
cv2.rectangle(square, (50, 50), (250, 250), 255, -1)
cv2.imshow('Square', square)
cv2.waitKey(0)
#making ellipse
ellips = np.zeros((300, 300), np.uint8)
cv2.ellipse(ellips, (150, 150), (150, 150), 30, 0, 180, 255, -1)
cv2.imshow('Ellips', ellips)
cv2.waitKey(0)
And = cv2.bitwise_and(square, ellips)
cv2.imshow('AND', And)
cv2.waitKey(0)
Or = cv2.bitwise_or(square, ellips)
cv2.imshow('OR', Or)
cv2.waitKey(0)
Not = cv2.bitwise_not(square, ellips)
cv2.imshow('NOT', Not)
cv2.waitKey(0)
Xor = cv2.bitwise_xor(square, ellips)
cv2.imshow('XOR', Xor)
cv2.waitKey(0)
cv2.destroyAllWindows()
blurred = cv.GaussianBlur(gray, (5, 5), 0)
edged = cv.Canny(blurred, 30, 150)
(_, cnts, _) = cv.findContours(edged.copy(), cv.RETR_EXTERNAL,
                               cv.CHAIN_APPROX_SIMPLE)
cnts = sorted([(c, cv.boundingRect(c)[0]) for c in cnts], key=lambda x: x[1])

for (c, _) in cnts:
    (x, y, w, h) = cv.boundingRect(c)

    if w >= 7 and h >= 20:
        roi = gray[y:y + h, x:x + w]
        thresh = roi.copy()
        T = mahotas.thresholding.otsu(roi)
        thresh[thresh > T] = 255
        thresh = cv.bitwise_not(thresh)

        thresh = dataset.deskew(thresh, 20)
        thresh = dataset.center_extent(thresh, (20, 20))

        cv.imshow("thresh", thresh)
        hist = hog.describe(thresh)
        digit = model.predict([hist])[0]

        print("I think that number is: {}".format(digit))
        cv.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 1)
        cv.putText(image, str(digit), (x - 10, y - 10),
                   cv.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0), 2)
        cv.imshow("image", image)
        cv.waitKey(0)
Esempio n. 39
0
# 사진 불러오기
background = cv2.imread('road.jpg', cv2.IMREAD_COLOR)
sign = cv2.imread('cropped_sign.jpg', cv2.IMREAD_COLOR)

# 합성할 구역 뽑아내기
bg_rows, bg_cols, _ = background.shape
rows, cols, channels = sign.shape
for i in range(1, 101):
    ran_rows = random.randrange(bg_rows - rows)
    ran_cols = random.randrange(bg_cols - cols)
    roi = background[ran_rows:ran_rows + rows,
                     ran_cols:ran_cols + cols]  # 좌표위치를 바꿔야 함.

    # mask만들기
    gray_sign = cv2.cvtColor(sign, cv2.COLOR_BGR2GRAY)
    _, mask = cv2.threshold(gray_sign, 240, 255, cv2.THRESH_BINARY)
    mask_inv = cv2.bitwise_not(mask)

    # roi와 표지판 합성하기
    img1 = cv2.bitwise_and(roi, roi, mask=mask)
    img2 = cv2.bitwise_and(sign, sign, mask=mask_inv)
    dst = cv2.add(img1, img2)

    # 원래 그림에 붙이기
    copy = background.copy()
    copy[ran_rows:ran_rows + rows, ran_cols:ran_cols + cols] = dst
    #파일저장하기
    name = 'data/road_%03d.jpg' % i
    print(name)
    cv2.imwrite(name, copy)
Esempio n. 40
0
    def process_image(self, frame):
        #rospy.loginfo("%s %s %s %s"%(str(self.is_positioning), str(self.is_grasping), str(self.is_done), str(self.is_failed)))

        #rospy.loginfo("processing image ...")
        #cv2.imshow("camera", frame)

        # gray
        frame_gray = cv2.cvtColor(frame, cv.CV_RGB2GRAY)
        frame_gray_copy = cv2.cvtColor(frame, cv.CV_RGB2GRAY)

        #frame_gray = cv2.blur(frame_gray, (3,3))
        frame_gray = cv2.GaussianBlur(frame_gray, (9, 9), 0)
        #cv2.imshow("camera_gray", frame_gray)

        # adaptive filter
        frame_filter = cv2.adaptiveThreshold(
            frame_gray,
            255.0,
            cv.CV_THRESH_BINARY,
            #cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            9,  # neighbourhood
            9)

        #cv2.imshow("camera_filter", frame_filter)
        size = frame.shape
        size = (size[1] - 1, size[0] - 1)

        # rectangle
        cv2.rectangle(
            frame_filter,
            (0, 0),
            (640, 180),
            255,  # color
            cv2.cv.CV_FILLED,  # thickness
            8,  # line-type ???
            0)  # random shit

        cv2.bitwise_not(frame_filter, frame_filter)

        #kernel = np.ones((9,9),'uint8')
        #frame_dilate = cv2.dilate(frame_filter, kernel)

        # rectangle
        frame_dilate = frame_filter
        cv2.rectangle(
            frame_dilate,
            (0, 0),
            size,
            0,  # color
            20,  # thickness
            8,  # line-type ???
            0)  # random shit

        #        cv2.imshow("camera_dilate", frame_dilate)

        # contours
        contours, hierarchy = cv2.findContours(frame_dilate, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_SIMPLE)

        cross_w = size[0] / 2 + CROSS_OFF_X
        cross_h = size[1] / 2 + CROSS_OFF_Y

        cross = np.int32([cross_w, cross_h])
        # crosshair
        cv2.line(frame, (cross_w, 0), (cross_w, size[1]), (255, 255, 0))

        cv2.line(frame, (0, cross_h), (size[0], cross_h), (255, 255, 0))

        largest = None
        #max_area = 0
        smallest_dist = max(size[0], size[1])

        #print contours

        contours = self.filter_contours(contours)

        cv2.drawContours(frame, contours, -1, (255, 0, 0), 1)

        cv2.circle(frame, tuple(cross), m_to_pixel(0.08), (0, 0, 255), 2)

        for c in contours:
            ellipse = cv2.fitEllipse(c)

            #cv2.ellipse(frame,ellipse,(0,255,0),2)

            #rect = cv2.minAreaRect(c)
            center = np.int32(ellipse[0])

            #xdist = abs(center[0] - cross_w)
            dist = np.linalg.norm(center - cross)

            if dist < smallest_dist:
                smallest_dist = dist
                largest = c  # HENNES: this is now closest?

                #if max_area < area:
                #    max_area = area
                #    largest = c

        self.angle = None
        object_label = "unknown"

        if largest is not None:  #HENNES: not needed? # and len(largest) >= 5:

            rect = cv2.minAreaRect(largest)
            #print rect
            box = cv2.cv.BoxPoints(rect)
            #print box
            box = np.int32(box)
            cv2.drawContours(frame, [box], 0, (0, 0, 255), 2)

            #cv2.ellipse(rect,ellipse,(0,255,0),2)

            ellipse = cv2.fitEllipse(largest)

            center = tuple(np.int32(ellipse[0]))
            axis = tuple(np.int32(ellipse[1]))

            cv2.circle(frame, tuple(center), 3, (0, 0, 255), 2)
            vec1 = box[1] - box[2]
            vec2 = box[2] - box[3]

            # FEATURES
            # TODO: all features should be calculated on eroded image
            mask = np.zeros(frame_gray.shape, dtype=np.uint8)
            cv2.drawContours(mask, [largest], 0, 255, -1)

            kernel = np.ones((11, 11), 'uint8')
            mask_erode = cv2.erode(mask, kernel)

            #            # get object from gray image with smaller mask
            #            object_image = frame_gray_copy.copy()
            #            object_image[mask_erode == 0] = 0
            #            cv2.imshow("object", object_image)

            mean_val = cv2.mean(frame_gray_copy, mask=mask_erode)
            intensity = mean_val[0]

            axis2 = (np.linalg.norm(vec1), np.linalg.norm(vec2))
            feature_vector = [
                cv2.contourArea(largest),
                min(axis2),
                max(axis2), intensity
            ]

            object_label = "unknown"
            if self.object_type:
                print ",".join(
                    str(x) for x in feature_vector) + "," + self.object_type
            else:
                object_label = j48(feature_vector)
                print object_label
            # if object_label == "R20V20":
            #     if intensity > 38:
            #         print "V20"
            #     else:
            #         print "R20"

            if np.linalg.norm(vec1) < np.linalg.norm(vec2):
                self.angle = np.arctan2(vec1[1], vec1[0])
            else:
                self.angle = np.arctan2(vec2[1], vec2[0])

            r = 30.0
            cv2.line(
                frame,
                tuple(
                    np.int32(center) +
                    np.int32([r * cos(self.angle), r * sin(self.angle)])),
                tuple(
                    np.int32(center) -
                    np.int32([r * cos(self.angle), r * sin(self.angle)])),
                (0, 0, 255), 2)

            xdist = center[0] - cross_w
            ydist = center[1] - cross_h

            x_m = pixel_to_m(xdist)
            y_m = pixel_to_m(ydist)

            msg = PoseStampedLabeled()
            msg.pose.header.frame_id = '/arm_base_link'
            msg.pose.header.stamp = rospy.Time.now()

            #double check
            msg.pose.pose.position.x = y_m
            msg.pose.pose.position.y = -x_m

            quat = tf.transformations.quaternion_from_euler(0, 0, self.angle)

            msg.pose.pose.orientation.x = quat[0]
            msg.pose.pose.orientation.y = quat[1]
            msg.pose.pose.orientation.z = quat[2]
            msg.pose.pose.orientation.w = quat[3]
            #print xdist, ydist
            #print self.angle

            msg.label = object_label

            self.pose_pub.publish(msg)

        # show camera image with annotations
        cv2.putText(frame,
                    object_label, (20, 50),
                    cv2.FONT_HERSHEY_PLAIN,
                    3.0, (0, 255, 0),
                    thickness=3)

        cv2.imshow("camera contours", frame)
Esempio n. 41
0
def findArea(c1):
    return abs(c1[0][0] - c1[1][0]) * abs(c1[0][1] - c1[3][1])


if __name__ == "__main__":
    bndingBx = []  #holds bounding box of each countour
    corners = []

    img = cv2.imread('linear.png', 0)  #read image

    #perform gaussian blur (5*5)
    blur = cv2.GaussianBlur(img, (5, 5), 0)
    #apply adaptive threshold to image
    th3 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                cv2.THRESH_BINARY, 11, 2)
    th3 = cv2.bitwise_not(th3)
    #Otsu method if preferred
    # ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    #reassign contours to the filled in image
    contours, heirar = cv2.findContours(th3, cv2.RETR_CCOMP,
                                        cv2.CHAIN_APPROX_SIMPLE)
    #find the rectangle around each contour
    for num in range(0, len(contours)):
        #make sure contour is for letter and not cavity
        if (heirar[0][num][3] == -1):
            left = tuple(contours[num][contours[num][:, :, 0].argmin()][0])
            right = tuple(contours[num][contours[num][:, :, 0].argmax()][0])
            top = tuple(contours[num][contours[num][:, :, 1].argmin()][0])
            bottom = tuple(contours[num][contours[num][:, :, 1].argmax()][0])
            bndingBx.append([top, right, bottom, left])
Esempio n. 42
0
		frame3 = cv2.cvtColor(frame3,cv2.COLOR_RGB2GRAY)
		ret, frame4 = cv2.threshold(frame3,40,255,cv2.THRESH_BINARY)
		#	frame4 = cv2.dilate(frame4,kernel,iterations=3)
			#cv2.imshow("frame4",frame4)
		frame5 = frame4.copy()
		close = np.ones((4,4),np.uint8,3)
		frame5 = cv2.morphologyEx(frame5,cv2.MORPH_CLOSE,close)			
		frame6 =frame5.copy()
		h,w = frame6.shape[:2]
		mask = np.zeros((h+2,w+2),np.uint8)
		frame6[0,:w] = 0
		frame6[h-1,:w] = 0
		frame6[:h,0] = 0
		frame6[:h,w-1] = 0
		cv2.floodFill(frame6,mask,(0,0),255)
		frame6_inv = cv2.bitwise_not(frame6)
		frame6 = frame5 | frame6_inv
		numOfLables, img_label, stats, centroids = cv2.connectedComponentsWithStats(frame4)
		for idx, centroid in enumerate(centroids):  
			if stats[idx][0] == 0 and stats[idx][1] == 0:  
				continue  
			if np.any(np.isnan(centroid)):  
				continue  
			x, y, width, height, area = stats[idx]  
			centerX, centerY = int(centroid[0]), int(centroid[1])  
			
			if area > 5000 and width>50 and height>150 :
				cv2.rectangle(frame, (x, y), (x+width, y+height), (0, 0, 255))  
				global crop
				crop = frame6[y:y+height,x:x+width]
				#crop = cv2.resize(crop,(129,129),cv2.INTER_CUBIC)
Esempio n. 43
0
import cv2
import numpy

cap = cv2.VideoCapture(0)
back = cv2.imread('./image.jpg')
while cap.isOpened():
    ret, frame = cap.read()
    if ret:
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        #cv2.imshow("hsv",hsv)
        #lower: hue-10,100,100 , higher:hue+10,255,255
        blue = numpy.uint8([[[0, 0, 255]]])
        hsv_blue = cv2.cvtColor(blue, cv2.COLOR_BGR2HSV)
        #print(hsv_blue)

        l_blue = numpy.array([0, 100, 100])
        u_blue = numpy.array([10, 255, 255])
        mask = cv2.inRange(hsv, l_blue, u_blue)
        #cv2.imshow("mask",mask)
        demo1 = cv2.bitwise_and(back, back, mask=mask)
        mask = cv2.bitwise_not(mask)
        demo2 = cv2.bitwise_and(frame, frame, mask=mask)
        cv2.imshow("cloak", demo1 + demo2)
        if cv2.waitKey(5) == ord('q'):
            break
cap.release()
cv2.destroyAllWindows()
Esempio n. 44
0
def logic(m1, m2):
    # dst = cv.bitwise_and(m1,m2)
    # dst = cv.bitwise_or(m1,m2)
    dst = cv.bitwise_not(m1, m2)
    cv.imshow("dst", dst)
    cv.waitKey()
Esempio n. 45
0
# Optical Character Recognition software

import cv2
import pytesseract
import numpy as np

img = cv2.imread(
    "/Users/ammiellewambobecker/github/1_W3bDw8mNI-GI-F_mro3XMg.png")

gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
gray, img_bin = cv2.threshold(gray, 128, 255,
                              cv2.THRESH_BINARY | cv2.THRESH_OTSU)
gray = cv2.bitwise_not(img_bin)

kernel = np.ones((2, 1), np.uint8)
img = cv2.erode(gray, kernel, iterations=1)
img = cv2.dilate(img, kernel, iterations=1)
out_below = pytesseract.image_to_string(img)

print("OUTPUT: ", out_below)
Esempio n. 46
0
    # 创建掩图
    fgmask = cv2.inRange(frame, lower_color, upper_color)
    cv2.imshow('Mask', fgmask)

    # 腐蚀膨胀
    erode = cv2.erode(fgmask, None, iterations=1)
    # cv2.imshow('erode', erode)
    dilate = cv2.dilate(erode, None, iterations=1)
    # cv2.imshow('dilate', dilate)

    rows, cols = dilate.shape
    img_back = img_back[0:rows, 0:cols]
    # print(img_back)
    # #根据掩图和原图进行抠图
    img2_fg = cv2.bitwise_and(img_back, img_back, mask=dilate)
    Mask_inv = cv2.bitwise_not(dilate)
    img3_fg = cv2.bitwise_and(frame, frame, mask=Mask_inv)
    finalImg = img2_fg + img3_fg
    cv2.imshow('res', finalImg)

    # 保存
    # out.write(finalImg)

    k = cv2.waitKey(10) & 0xFF
    if k == 27:
        break

out.release()
cap.release()
cv2.destroyAllWindows()
Esempio n. 47
0
    def detectCharacterCandidates(self,
                                  region):  # region is a rotated bouncing_box

        # apply a 4-point transform to extract the plate as if we had a 90-degree viewing angle
        plate = perspective.four_point_transform(self.image, region)
        cv2.imshow("Perspective Transform", imutils.resize(plate, width=400))

        # extract the Value component from the HSV color space and apply adaptive thresholding to reveal the characters on the plate
        V = cv2.split(cv2.cvtColor(plate, cv2.COLOR_BGR2HSV))[
            2]  # extract the Value channel from the HSV color space
        '''
        Why Value Channel instead of GrayScale?

        The grayscale version of an image is a weighted combination of the RGB channels. 
        The Value channel, however, is given a dedicated dimension in the HSV color space. When performing thresholding to extract dark regions 
        from a light background (or vice versa), better results can often be obtained by using the Value rather than grayscale.
        '''
        T = threshold_local(
            V, 29, offset=15, method="gaussian"
        )  # apply adaptive thresholding to reveal the characters on the plate
        thresh = (V > T).astype("uint8") * 255
        thresh = cv2.bitwise_not(thresh)
        '''
        Image Thresholding: Classify pixels as "dark" or "light"
        Adaptive Thresholding: Form of image thresholding that takes into account spatial variations in illumination
        '''

        # resize the plate region to a canonical size
        plate = imutils.resize(plate, width=400)
        thresh = imutils.resize(thresh, width=400)
        cv2.imshow("Thresh", thresh)

        labels = measure.label(
            thresh, neighbors=8,
            background=0)  # perform a connected components analysis
        charCandidates = np.zeros(
            thresh.shape, dtype="uint8"
        )  # mask to store the locations of the character candidates

        for label in np.unique(labels):  # loop over the unique components

            if label == 0:  # label corresponds to the background of the plate, so we can ignore it
                continue

# otherwise, construct the label mask to display only connected components for the
# current label, then find contours in the label mask.
# By performing this masking, we are revealing only pixels that are part of the current connected component.
            labelMask = np.zeros(thresh.shape, dtype="uint8")
            labelMask[
                labels ==
                label] = 255  # draw all pixels with the current  label  value as white on a black background
            cnts = cv2.findContours(
                labelMask, cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)  # find contours in the label mask
            cnts = cnts[0] if imutils.is_cv2() else cnts[1]

            if len(
                    cnts
            ) > 0:  # check that at least one contour was found in the  labelMask

                c = max(
                    cnts, key=cv2.contourArea
                )  # grab the largest contour which corresponds to the component in the mask
                (boxX, boxY, boxW, boxH) = cv2.boundingRect(
                    c)  # compute the bounding box for the contour

                # compute the aspect ratio, solidity, and height ratio for the component
                aspectRatio = boxW / float(
                    boxH
                )  #  the ratio of the bounding box width to the bounding box height
                solidity = cv2.contourArea(c) / float(boxW * boxH)
                heightRatio = boxH / float(
                    plate.shape[0]
                )  # the ratio of the bounding box height to the license plate height
                # Large values of  heightRatio  indicate that the height of the (potential) character is similar to the license plate itself (and thus a likely character).

                # determine if the aspect ratio, solidity, and height of the contour pass the rules tests
                keepAspectRatio = aspectRatio < 1.0  # We want aspectRatio to be at most square, ideally taller rather than wide since most characters are taller than they are wide.
                keepSolidity = solidity > 0.15  # We want solidity to be reasonably large, otherwise we could be investigating “noise”, such as dirt, bolts, etc. on the license plate.
                keepHeight = heightRatio > 0.4 and heightRatio < 0.95  # We want our keepHeight  ratio to be just the right size.

                if keepAspectRatio and keepSolidity and keepHeight:  # check to see if the component passes all the tests

                    # We take the contour, compute the convex hull (to ensure the entire bounding region of the character is included in the contour),
                    # and draw the convex hull on our  charCandidates  mask.
                    hull = cv2.convexHull(
                        c)  # compute the convex hull of the contour
                    cv2.drawContours(
                        charCandidates, [hull], -1, 255,
                        -1)  # draw it on the character candidates mask

        charCandidates = segmentation.clear_border(
            charCandidates
        )  # clear pixels that touch the borders of the character candidates mask and detect contours in the candidates mask

        # TODO:
        # There will be times when we detect more than the desired number of characters it would be wise to apply a method to 'prune' the unwanted characters

        return LicensePlate(success=True,
                            plate=plate,
                            thresh=thresh,
                            candidates=charCandidates)
def process_static_layers(layers_static, field_folder, camera, infos, output_field, output_field_fix, idx = 0, fixBlack = True, blackBackground = False):

	layers_generated = []
	layer_static_count = 0
	first_layer = True
	# The actual process for static layers.
	for layer in layers_static:

		file_name = "Layer%i_%i.tiff" % (layer["camera_id"], layer["layer_number"])

		# combine static stuff
		if True:
			layer_file = os.path.join(field_folder, file_name)

			# If it's not the first layer ...
			if first_layer == False:
				# We read the layer...
				foreground = cv2.imread(filename = layer_file, flags = cv2.IMREAD_UNCHANGED )
				rows,cols, num_channels = foreground.shape

				has_overlap = False
				has_overlap_other = False
				
				
				# We check if this layer overlap the previous layer (without compositing)
				
				comparaison_foreground = np.zeros((rows, cols , num_channels), np.uint8)
				comparaison_previous = np.zeros((rows, cols , num_channels), np.uint8)
				pixelCoverage = 0
				for i in range(rows):
					for j in range(cols):
						background_pixel_alpha = previous_layer_img[i,j][3]
						foreground_pixel_alpha = foreground[i,j][3]
						if foreground_pixel_alpha != 0 and background_pixel_alpha != 0:
							if ( previous_layer_img[i,j][0] != foreground[i,j][0] and
				 				 previous_layer_img[i,j][1] != foreground[i,j][1] and
				 				 previous_layer_img[i,j][2] != foreground[i,j][2] 
				 				):								
								comparaison_foreground[i,j] = foreground[i,j]
								comparaison_previous[i,j] = previous_layer_img[i,j]

								pixelCoverage += 1
				
				if pixelCoverage != 0:
					
					difference = (cv2.subtract(comparaison_foreground.astype(float), comparaison_previous.astype(float)))
					meanDiff = cv2.mean(difference)
					meanPx = ( abs(meanDiff[0]) + abs(meanDiff[1]) + abs(meanDiff[2]))
					
					if meanPx > 0.01:
						has_overlap = True

				# If we don't overlap at all, we can just compositing them together
				if has_overlap == False:
					channels = cv2.split(foreground)
					if len(channels) < 3:
						print("error", layer_file)

					# We are using the alpha channel of the current layer (so the PSX alpha)
					# If you want to use the PC alpha channel, you can edit replace this line with something like :
					# Read the alpha :

					# alpha_image = cv2.imread(filename = alpha_layer, flags = cv2.IMREAD_UNCHANGED )
					
					# Resize it to the PSX resolution, you can change the filtering too.
					
					# alpha_image = cv2.resize(alpha_image, (0,0), fx=0.5, fy=0.5, interpolation = cv2.INTER_NEAREST) 
					# alpha = alpha_image[0]

					alpha = channels[3]

					background = cv2.bitwise_and(background, background, mask=cv2.bitwise_not(alpha))
					background = cv2.add(background, foreground)
					previous_layer_without_overlap = layer
					

					previous_layer_img = cv2.bitwise_and(previous_layer_img, previous_layer_img, mask=cv2.bitwise_not(alpha))
					previous_layer_img = cv2.add(previous_layer_img, foreground)

					if layer_static_count == len(layers_static) - 1 :
						if len(layers_generated) > 0:
							layers_generated.append((layer["layer_number"], layer["layer_id"]))
							out_static_file = os.path.join(output_field, "static_layers_%s_%s.png" % (camera, layer["layer_number"] ))
							if os.path.exists(out_static_file) == False:
								imwrite(out_static_file, background)

				else:

					#the layer on front overlap the one in the background.
					# we composite all the other layers without overlapping.
					altered_bg = background			

					for i in range(rows):
						for j in range(cols):
							background_pixel = background[i,j]
							foreground_pixel = foreground[i,j]
							if foreground_pixel[3] != 0 and background_pixel[3] == 0:
								altered_bg[i,j] = foreground_pixel

					for other_layer in layers_static:
						if other_layer["layer_number"] > layer["layer_number"]:
							
							other_file_name = "Layer%i_%i.tiff" % (layer["camera_id"], other_layer["layer_number"])
							other_layer_file = os.path.join(field_folder, other_file_name)
							
							other_foreground = cv2.imread(filename = other_layer_file, flags = cv2.IMREAD_UNCHANGED )
							for i in range(rows):
								for j in range(cols):
									background_pixel = altered_bg[i,j]
									foreground_pixel = other_foreground[i,j]
									if foreground_pixel[3] != 0 and background_pixel[3] == 0:
										altered_bg[i,j] = foreground_pixel								


					layers_generated.append((previous_layer_without_overlap["layer_number"], previous_layer_without_overlap["layer_id"]))


					if os.path.exists(os.path.join(output_field, "static_layers_%s_%i.png" % (camera, previous_layer_without_overlap["layer_number"]))) == False:
						imwrite(os.path.join(output_field, "static_layers_%s_%i.png" % (camera, previous_layer_without_overlap["layer_number"])), altered_bg)

					
					channels = cv2.split(foreground)
					if len(channels) < 3:
						print("error", layer_file)
					alpha = channels[3]
					background = cv2.bitwise_and(background, background, mask=cv2.bitwise_not(alpha))
					background = cv2.add(background, foreground)
					
					if layer["layer_number"] == layers_static[-1]["layer_number"]:
						layers_generated.append((layer["layer_number"], layer["layer_id"]))

						if os.path.exists(os.path.join(output_field, "static_layers_%s_%i.png" % (camera, layer["layer_number"]))) == False:
							imwrite(os.path.join(output_field, "static_layers_%s_%i.png" % (camera, layer["layer_number"])), background)
					
					previous_layer_without_overlap = layer
				
					previous_layer_img = foreground


			else:
				# It is the first layer ... 
				previous_layer_without_overlap = layer

				# Reading the background.
				background = cv2.imread(filename = layer_file, flags = cv2.IMREAD_UNCHANGED )	

				if blackBackground == True:

					rows, cols, _ = background.shape 
					backgroundBlack = np.zeros(background.shape, np.uint8)
					for i in range(rows):
						for j in range(cols):
							backgroundBlack[i,j] = background[i,j]
							backgroundBlack[i,j][3] = 255


					background = backgroundBlack

				previous_layer_img = background
				rows,cols, num_channels = background.shape
				# check large area of missing pixels.
				numBlack = 0
				pixelsBlack = []
				if fixBlack == True:
					for i in range(rows):
						for j in range(cols):
							background_pixel = background[i,j]
							if background_pixel[3] != 0:
								if background_pixel[0] == 0 and background_pixel[1] == 0 and background_pixel[2] == 0:
									pixelsBlack.append((i,j))

				if len(pixelsBlack) > 200:						
					# we have a large amount of missing pixels, We try to fix these.

					previous_num_black = numBlack
					
					other_layers_img = []
					goodLayerFound = []
					
					otherLayers =infos[camera]["layers"]

					if infos["field_id"] == "1000":
						otherLayers = [infos[camera]["layers"][4], infos[camera]["layers"][8], infos[camera]["layers"][12]]



					for other_layer in otherLayers:
						if(other_layer["blend"]) == 1:
							continue

						if(other_layer["source"]) == 0:
							continue

						# We check if we have the info in another layer, static or animated.
						if layer["layer_number"] != other_layer["layer_number"]:
							other_file_name = "Layer%i_%i.tiff" % (other_layer["camera_id"], other_layer["layer_number"])
							other_layer_file = os.path.join(field_folder, other_file_name)
							other_img = cv2.imread(filename = other_layer_file, flags = cv2.IMREAD_UNCHANGED )
							
							numPixelsFixed = 0
							for pixel in pixelsBlack :
								other_pixel = other_img[pixel[0],pixel[1]]
								if other_pixel[3] != 0:
									if other_pixel[0] != 0 and other_pixel[1] != 0 and other_pixel[2] != 0:
										numPixelsFixed += 1

							if numPixelsFixed >= 200:
								
								goodLayerFound.append(other_img)

					# We found other layers to fix these black pixels...
					if len(goodLayerFound) != 0 :

						mask_image = np.zeros((rows, cols ,3), np.uint8)
						
						# We update the layer with the fixed pixels.
						for goodLayer in goodLayerFound:
							for pixel in pixelsBlack :
								if goodLayer[pixel][3] != 0 :
									mask_image[pixel] = (255,255,255)
									background[pixel] = goodLayer[pixel]
											

						# We output the mask of correction.
						file_name_mask = "Layer_MaskFix%i_%i.png" % (layer["camera_id"], layer["layer_number"])
						file_mask = os.path.join(output_field_fix, file_name_mask)


						imwrite(file_mask, mask_image)



			# Next layer won't be the first one, obviously...
			first_layer = False


		layer_static_count += 1
	
	if len(layers_generated) == 0 :

		out_static_file = os.path.join(output_field, "static_layers_%s.png" % camera )
		if idx != 0:
			out_static_file = os.path.join(output_field, "static_layers_%s_%i.png" % (camera, idx ))
		if os.path.exists(out_static_file) == False:
			imwrite(out_static_file, background)
		statics_have_overlapping = False
		infos[camera]["static_has_overlap"] = False
	elif len(layers_generated) == 1 :

		out_static_file = os.path.join(output_field, "static_layers_%s_%s.png" % (camera, layer["layer_number"] ))
		if os.path.exists(out_static_file) == False:
			imwrite(out_static_file, background)
		statics_have_overlapping = True
		layers_generated.append((layer["layer_number"], layer["layer_id"]))
		infos[camera]["static_has_overlap"] = True
	else:
		statics_have_overlapping = True
		infos[camera]["static_has_overlap"] = True

	return layers_generated, statics_have_overlapping, background
Esempio n. 49
0
            try:
                #   всё, что можно считать функцией без numpy
                eval(fun_str[i])
                fun += fun_str[i]
            except:
                #   только с numpy
                fun += 'np.' + fun_str[i]
    return fun


f = np.vectorize(norm)

model = joblib.load("cls.pkl")
path = "C:\\Program Files\\Epic Games\\UE_4.18\\Engine\\Binaries\\Win64\\RenderTarget.png"

img = cv2.bitwise_not(cv2.imread(path))

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)

thresh = cv2.adaptiveThreshold(gray, 255, 1, 1, 11, 2)
thresh_color = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)

thresh = cv2.dilate(thresh, None, iterations=3)
thresh = cv2.erode(thresh, None, iterations=2)

_, contours, _ = cv2.findContours(thresh, cv2.RETR_LIST,
                                  cv2.CHAIN_APPROX_SIMPLE)

normal_contours = np.array([], dtype='int32')
def combined_layer_for_upscale(field_folder):

	info_file = os.path.join(field_folder, "infos.json")
	if os.path.exists(info_file) == False:
		return

	with open(info_file) as f:
		infos = json.load(f)

	field_id = infos["field_id"]
	
	previous_layer_without_overlap = None
	
	output_field = os.path.join(OUT_FOLDER, "Field"+ infos["field_id"])
	output_field_fix = os.path.join(OUT_FOLDER_FIX, "Field"+ infos["field_id"])

	# create output dir
	if not os.path.exists(output_field):
		os.makedirs(output_field)
	if not os.path.exists(output_field_fix):
		os.makedirs(output_field_fix)
	
	# testing all cameras for the field.
	for camera in infos_pc[field_id]:

		fixBlack = True
		blackBackground = False

		num_layers = len(infos[camera]["layers"])
		
	
		layers_static = []
		layers_static_group = []
		if field_id == "766" or field_id == "1055" or field_id == "813":
			layers_static_group.append([])
			layers_static_group.append([])

		if field_id == "931":
			layers_static_group.append([])
			layers_static_group.append([])
			layers_static_group.append([])
			


		# Filtering images for all static layer in the camera.
		for layer in infos[camera]["layers"]:
			if layer["camera_id"] != int(camera):
				print("What ?", layer["camera_id"], camera)
				continue

			# skip additive/multiply effects layers.
			if(layer["blend"]) == 1:
				continue

			# If the source is the PC version, we don't need to upscale it (text, ..)
			if(layer["source"]) == 0:
				continue

			# If the file has paralax, we output the information and continue.
			# The script doesn't something particular with parallax layers, there is only one field using it. (Field2916)
			# We still print it so we can check if we need to do something special !
			if(layer["has_parallax"]) == 1:
				print("parallax on", info_file)
					

			# We are isolating static layers for the moment.
			if(layer["is_static"] == 0):	
				continue
			
			layers_static.append(layer)
			
			# special case for field 766 : They are actually two backgrounds, one with lighting, the other without.
			if field_id == "766":
				if (layer["layer_number"] % 2) == 0:
					layers_static_group[0].append(layer)
				else:
					layers_static_group[1].append(layer)
			
			if field_id == "1055":
				# to solve background compositing being ugly
				if layer["layer_number"] < 2:
					layers_static_group[0].append(layer)
				else:
					layers_static_group[1].append(layer)				

			if field_id == "813":
				# to solve background compositing being ugly
				if layer["layer_number"] < 4:
					layers_static_group[0].append(layer)
				else:
					layers_static_group[1].append(layer)


			if field_id == "931":
				# to solve background compositing being ugly
				if layer["layer_number"] == 0:
					
					layers_static_group[0].append(layer)
					#layers_static_group[1].append(layer)
					#layers_static_group[2].append(layer)
				if layer["layer_number"] <= 2:
					layers_static_group[1].append(layer)
				else:
					layers_static_group[2].append(layer)

		
		if field_id == "2908":
			# we need a animated layer ...
			layers_static.insert(0, infos[camera]["layers"][2])

		if field_id == "1000":
			blackBackground = True
			#layers_static.insert(1, infos[camera]["layers"][7])
			#layers_static.insert(2 ,infos[camera]["layers"][11])
			for l in layers_static:
				print(l)
	
		if field_id == "931":
			fixBlack = False
			blackBackground = True

		if len(layers_static_group) != 0:
			i = 0
			for group in layers_static_group:
				layers_generated, statics_have_overlapping, background = process_static_layers(group, field_folder, camera, infos, output_field, output_field_fix, i, fixBlack = fixBlack, blackBackground =blackBackground)
				i = i + 1
		else:

			layers_generated, statics_have_overlapping, background = process_static_layers(layers_static, field_folder, camera, infos, output_field, output_field_fix, fixBlack = fixBlack, blackBackground = blackBackground)

		# then animation layers
		
		for layer in infos[camera]["layers"]:

			if layer["camera_id"] != int(camera):
				print("What ?", layer["camera_id"], camera)
				continue
			# still skip lighting effects
			if(layer["blend"]) == 1:
				continue

			if(layer["is_static"] == 0):

				file_name = "Layer%i_%i.tiff" % (layer["camera_id"], layer["layer_number"])
				layer_file_anim = os.path.join(field_folder, file_name)
				frame = cv2.imread(filename = layer_file_anim, flags = cv2.IMREAD_UNCHANGED )
				
				channels = cv2.split(frame)
				alpha = channels[3]


				if statics_have_overlapping == False:

					specialCase = False

					# special case. Could be detected, but it's easier this way.
					if infos["field_id"] == "357":
						composited_frame = background

						if layer["layer_number"] >= 13 and layer["layer_number"] <= 28:			
								specialCase = True
								offsetAnimOmbre = 7 + ( layer["layer_number"] - 13 )
								if offsetAnimOmbre >= (7 + 8):
									offsetAnimOmbre -= 8									
								if offsetAnimOmbre >= (7 + 4):
									offsetAnimOmbre -= 4

							

								layer_file_to_load = os.path.join(field_folder, "Layer0_%i.tiff" % offsetAnimOmbre)
								background_add = cv2.imread(filename = layer_file_to_load, flags = cv2.IMREAD_UNCHANGED )	
								channels_add = cv2.split(background_add)
								composited_frame = cv2.bitwise_and(composited_frame, composited_frame, mask=cv2.bitwise_not(channels_add[3]))
								composited_frame = cv2.add(background_add, composited_frame)


								if  layer["layer_number"] <= 24:
									# 25 to 28
									offsetOtherRoue = layer["layer_number"] + 12
									if offsetOtherRoue > 28 + 4:
									 	offsetOtherRoue -= 8									
									if offsetOtherRoue > 28:
									 	offsetOtherRoue -= 4	

									layer_file_to_load = os.path.join(field_folder, "Layer0_%i.tiff" % offsetOtherRoue)
									background_add = cv2.imread(filename = layer_file_to_load, flags = cv2.IMREAD_UNCHANGED )	
									channels_add = cv2.split(background_add)
									composited_frame = cv2.bitwise_and(composited_frame, composited_frame, mask=cv2.bitwise_not(channels_add[3]))
									composited_frame = cv2.add(background_add, composited_frame)

								
								composited_frame = cv2.bitwise_and(composited_frame, composited_frame, mask=cv2.bitwise_not(alpha))
								composited_frame = cv2.add(frame, composited_frame)		


						if layer["layer_number"] >= 17 and layer["layer_number"] <= 28:	
								specialCase = True

								offsetAnimRoue = 13 + ( layer["layer_number"] - 13 )
								if offsetAnimRoue >= (13 + 8):
									offsetAnimRoue -= 8									
								if offsetAnimRoue >= (13 + 4):
									offsetAnimRoue -= 4


								layer_file_to_load = os.path.join(field_folder, "Layer0_%i.tiff" % offsetAnimRoue)
								background_add = cv2.imread(filename = layer_file_to_load, flags = cv2.IMREAD_UNCHANGED )	
								channels_add = cv2.split(background_add)
								composited_frame = cv2.bitwise_and(composited_frame, composited_frame, mask=cv2.bitwise_not(channels_add[3]))
								composited_frame = cv2.add(background_add, composited_frame)
								
								composited_frame = cv2.bitwise_and(composited_frame, composited_frame, mask=cv2.bitwise_not(alpha))
								composited_frame = cv2.add(frame, composited_frame)	

						if layer["layer_number"] >= 25 and layer["layer_number"] <= 28:		
							pass							
								#layer_file_to_load = os.path.join(field_folder, "Layer0_%i.tiff" % offsetAnimRoue)
								 #background_add = cv2.imread(filename = layer_file_to_load, flags = cv2.IMREAD_UNCHANGED )	
								#channels_add = cv2.split(background_add)
								#composited_frame = cv2.bitwise_and(composited_frame, composited_frame, mask=cv2.bitwise_not(channels_add[3]))
								#composited_frame = cv2.add(background_add, composited_frame)
								
								# composited_frame = cv2.bitwise_and(composited_frame, composited_frame, mask=cv2.bitwise_not(alpha))
								# composited_frame = cv2.add(frame, composited_frame)																

					if infos["field_id"] == "1000" :

						composited_frame = background
						if layer["layer_number"] >= 4 and layer["layer_number"] <= 7:
							specialCase = True
							offsetAnimTree = 8 + ( layer["layer_number"] - 4 )


						if layer["layer_number"] >= 8 and layer["layer_number"] <= 11:
							specialCase = True
							offsetAnimTree = 4 + ( layer["layer_number"] - 8 )

						
						if specialCase:
							layer_file_to_load = os.path.join(field_folder, "Layer0_%i.tiff" % offsetAnimTree)
							background_add = cv2.imread(filename = layer_file_to_load, flags = cv2.IMREAD_UNCHANGED )	
							channels_add = cv2.split(background_add)
							composited_frame = cv2.bitwise_and(composited_frame, composited_frame, mask=cv2.bitwise_not(channels_add[3]))
							composited_frame = cv2.add(background_add, composited_frame)

							composited_frame = cv2.bitwise_and(composited_frame, composited_frame, mask=cv2.bitwise_not(alpha))
							composited_frame = cv2.add(frame, composited_frame)		

					if not specialCase:
						composited_frame = cv2.bitwise_and(background, background, mask=cv2.bitwise_not(alpha))
						composited_frame = cv2.add(frame, composited_frame)

				else:

					minLayerId = 9999

					for x in layers_generated:
						if x[1] < minLayerId:
							minLayerId = x[1]
					maxLayerId = 0
					for x in layers_generated:
						if x[1] > minLayerId:
							maxLayerId = x[1]					
					
					if layer["layer_id"] < minLayerId:
						for x in layers_generated:
							if x[1] == minLayerId:
								layer_file_to_load = os.path.join(output_field, "static_layers_%s_%i.png" % (camera, x[0]))
								break
					elif layer["layer_id"] > maxLayerId:
						for x in layers_generated:
							if x[1] == maxLayerId:
								layer_file_to_load = os.path.join(output_field, "static_layers_%s_%i.png" % (camera, x[0]))	
								break
					else:
						maxLayerUsed = 0
						for x in layers_generated:
							if layer["layer_id"] < x[1]  and x[1] > maxLayerUsed:
								layer_file_to_load = os.path.join(output_field, "static_layers_%s_%i.png" % (camera, x[0]))
								maxLayerUsed = x[1]
							elif layer["layer_id"] < x[1]  and x[1] < maxLayerUsed:
								print("weird order ?", field_id)


					background = cv2.imread(filename = layer_file_to_load, flags = cv2.IMREAD_UNCHANGED )		

					composited_frame = cv2.bitwise_and(background, background, mask=cv2.bitwise_not(alpha))
					composited_frame = cv2.add(frame, composited_frame)
				
				if os.path.exists(os.path.join(output_field, "anim_layer_%i_%i.png" % (int(camera), layer["layer_number"] ))) == False:

					cv2.imwrite(os.path.join(output_field, "anim_layer_%i_%i.png" % (int(camera), layer["layer_number"] )), composited_frame)

	info_file = os.path.join(output_field, "infos.json")
	with open(info_file, "w") as write_file:
		json.dump(infos, write_file, indent=4)
Esempio n. 51
0
    green_mask[:,:,1] = 100
    green_mask[:,:,2] = 1
    green = np.asarray(green_mask, dtype = 'uint8')
    white = threshold_white(ROI)
    result = cv2.bitwise_and(green,ROI, mask= white)
    for i in range(result.shape[0]):
        for j in range(result.shape[1]):
            if sum(result[i,j,:]) == 0:
                result[i,j,:] = [255,255,255]
    result2 = cv2.bitwise_and(result,ROI)
    #blue_thresh = threshold_blue(ROI)
    #preview(blue_thresh) 
    
    green_thresh = threshold_green(ROI)
    #preview(green_thresh)
    green_thresh_inverted = cv2.bitwise_not(green_thresh)
    green_thresh_open_close = open_close_image(green_thresh_inverted, 4)
    #green_thresh_inverted = morphOps(green_thresh_inverted, 5)
    
    res = cv2.bitwise_and(ROI,ROI, mask= green_thresh_open_close)
    #preview(res)
    
    
    im2, contours, hierarchy = cv2.findContours(green_thresh_open_close, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    num_contours = len(contours)
    print(num_contours)
# <<<<<<< Updated upstream
    font = cv2.FONT_HERSHEY_SIMPLEX
#    cv2.putText(res,'Number of screws:' + str(num_contours),(50,50), font, 1,(255,255,255),2,cv2.LINE_AA)
    preview(res)
<<<<<<< HEAD
Esempio n. 52
0
def calibrateMask(img,
                  hhigh=5,
                  hlow=15,
                  shigh=255,
                  slow=40,
                  vhigh=40,
                  vlow=255):
    #HSV color is ideal for masking, as it is resistance to shadow and lighting
    #effects for filter colors. As a result, image converted to HSV color space
    #for filtering background colors from dice colors
    imagehsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    #Creates a blank image (all black)
    #Size of this image determines the length of the trackbars in trackbar
    #window
    blankimg = np.array([0, 0, 0])
    row = 1
    col = 400
    blankimg = np.full((row, col, 3), blankimg)

    low_background = np.array([hlow, slow, vlow])
    high_background = np.array([hhigh, shigh, vhigh])
    mask = cv2.inRange(imagehsv, low_background, high_background)
    mask = cv2.bitwise_not(mask)

    res = cv2.bitwise_and(img, img, mask=mask)

    cv2.namedWindow('image')
    cv2.namedWindow('trackbars')
    cv2.createTrackbar('H-HIGH', 'trackbars', 0, 255, nothing)
    cv2.createTrackbar('H-LOW', 'trackbars', 0, 255, nothing)
    cv2.createTrackbar('S-HIGH', 'trackbars', 0, 255, nothing)
    cv2.createTrackbar('S-LOW', 'trackbars', 0, 255, nothing)
    cv2.createTrackbar('V-HIGH', 'trackbars', 0, 255, nothing)
    cv2.createTrackbar('V-LOW', 'trackbars', 0, 255, nothing)
    cv2.imshow('trackbars', blankimg)

    while (1):
        cv2.imshow('image', mask)

        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            break

        #get current position of trackbars
        h_high = cv2.getTrackbarPos('H-HIGH', 'trackbars')
        h_low = cv2.getTrackbarPos('H-LOW', 'trackbars')
        s_high = cv2.getTrackbarPos('S-HIGH', 'trackbars')
        s_low = cv2.getTrackbarPos('S-LOW', 'trackbars')
        v_high = cv2.getTrackbarPos('V-HIGH', 'trackbars')
        v_low = cv2.getTrackbarPos('V-LOW', 'trackbars')
        threshold = [(h_low, h_high), (s_low, s_high), (v_low, v_high)]

        #create mask from trackbar thresholds
        low_background = np.array([h_low, s_low, v_low])
        high_background = np.array([h_high, s_high, v_high])
        mask = cv2.inRange(imagehsv, low_background, high_background)
        mask = cv2.bitwise_not(mask)
        #Filtering for mask
        #Exact same as filtering done in algorithim for mask
        #so result is accurate to alogorithm
        kernel = np.ones((5, 5), np.uint8)
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        kernel = np.ones((5, 5), np.float32) / 25
        mask = cv2.filter2D(mask, -1, kernel)

        _, mask = cv2.threshold(mask, 1, 255, cv2.THRESH_BINARY)
        res = cv2.bitwise_and(img, img, mask=mask)

    cv2.destroyWindow('image')
    cv2.destroyWindow('trackbars')
    return threshold
Esempio n. 53
0
    HSVLOW2 = np.array([160, 110, 40])
    HSVHIGH2 = np.array([179, 255, 228])

    mask = cv2.inRange(hsv, HSVLOW1, HSVHIGH1)
    mask1 = mask.copy()
    mask2 = cv2.inRange(hsv, HSVLOW2, HSVHIGH2)
    mask1 = cv2.bitwise_or(mask1, mask2, mask=None)
    mask = mask1.copy()

    # mask = cv2.inRange(hsv, HSVLOW, HSVHIGH)
    # mask1 = mask.copy()

    # mask1 = cv2.morphologyEx(mask, cv2.MORPH_OPEN, np.ones((3,3),np.uint8))
    # mask1 = cv2.morphologyEx(mask, cv2.MORPH_DILATE, np.ones((3,3),np.uint8))

    mask1 = cv2.bitwise_not(mask1)

    maskedFrame = cv2.bitwise_and(frame, frame, mask=mask1)

    maskedFrame = cv2.morphologyEx(maskedFrame, cv2.MORPH_OPEN,
                                   np.ones((3, 3), np.uint8))
    maskedFrame = cv2.morphologyEx(maskedFrame, cv2.MORPH_DILATE,
                                   np.ones((3, 3), np.uint8))

    # maskedFrame = cv2.bitwise_and(frame, frame, mask = mask1)

    contours, hierarchy = cv2.findContours(mask1, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        if cv2.contourArea(cnt) >= min_area:
            # x,y,w,h = cv2.boundingRect(cnt)
Esempio n. 54
0
def decaptcha(filenames):
    captcha_image_files = filenames
    numChars = 3 * np.ones((len(filenames), ))
    count = 0
    codes = []
    for image_file in captcha_image_files:
        image = cv2.imread(image_file)
        cv2.imshow("Output", image)
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        low = np.array([0, 100, 250])
        high = np.array([179, 255, 255])
        mask_fore = cv2.inRange(hsv, low, high)
        image = cv2.bitwise_not(mask_fore)

        image = cv2.copyMakeBorder(image, 20, 20, 20, 20, cv2.BORDER_REPLICATE)

        thresh = cv2.threshold(image, 0, 255,
                               cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

        contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)

        contours = contours[0] if imutils.is_cv2() else contours[1]

        letter_image_regions = []

        temp = 0
        for contour in contours:
            (x, y, w, h) = cv2.boundingRect(contour)
            if w / h > 1.25:

                half_width = int(w / 2)
                letter_image_regions.append((x, y, half_width, h))
                letter_image_regions.append((x + half_width, y, half_width, h))
            else:
                letter_image_regions.append((x, y, w, h))

        letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])

        output = cv2.merge([image] * 3)
        predictions = []

        for letter_bounding_box in letter_image_regions:
            x, y, w, h = letter_bounding_box
            letter_image = image[y - 2:y + h + 2, x - 2:x + w + 2]

            letter_image = resize_to_fit(letter_image, 20, 20)

            letter_image = np.expand_dims(letter_image, axis=2)
            letter_image = np.expand_dims(letter_image, axis=0)

            prediction = model.predict(letter_image)

            letter = lb.inverse_transform(prediction)[0]
            predictions.append(letter)
            temp = temp + 1

        numChars[count] = temp
        count = count + 1
        captcha_text = "".join(predictions)
        print("CAPTCHA text is: {}".format(captcha_text))
        codes.append(captcha_text)

    return (numChars, codes)
Esempio n. 55
0
    bluemask = cv2.dilate(bluemask, None, iterations=2)

    blackmask = cv2.inRange(hsv, blackLower, blackUpper)
    # blackmask = cv2.erode(blackmask, None, iterations=5)
    blackmask = cv2.dilate(blackmask, None, iterations=1)
    blackmask = cv2.GaussianBlur(blackmask, (5, 5), 0)
    blackmask = cv2.Canny(blackmask, 35, 125)

    # find target
    redmasked = cv2.bitwise_and(frame, frame, mask=redmask)
    yellowmasked = cv2.bitwise_and(frame, frame, mask=yellowmask)
    greenmasked = cv2.bitwise_and(frame, frame, mask=greenmask)
    bluemasked = cv2.bitwise_and(frame, frame, mask=bluemask)
    blackmasked = cv2.bitwise_and(frame, frame, mask=blackmask)
    # locates the area
    redmask = cv2.bitwise_not(redmask)
    yellowmask = cv2.bitwise_not(yellowmask)
    greenmask = cv2.bitwise_not(greenmask)
    bluemask = cv2.bitwise_not(bluemask)
    blackmask = cv2.bitwise_not(blackmask)
    # dig hole
    rfakemask = cv2.bitwise_and(resultFrame, resultFrame, mask=redmask)
    resultFrame = np.minimum(resultFrame, rfakemask)
    yfakemask = cv2.bitwise_and(resultFrame, resultFrame, mask=yellowmask)
    resultFrame = np.minimum(resultFrame, yfakemask)
    gfakemask = cv2.bitwise_and(resultFrame, resultFrame, mask=greenmask)
    resultFrame = np.minimum(resultFrame, gfakemask)
    bfakemask = cv2.bitwise_and(resultFrame, resultFrame, mask=bluemask)
    resultFrame = np.minimum(resultFrame, bfakemask)
    # dfakemask = cv2.bitwise_and(resultFrame, resultFrame, mask=blackmask)
    # resultFrame = np.minimum(resultFrame, dfakemask)
Esempio n. 56
0
def not_demo(m1):  #非运算   每个像素点每个通道的值按位取反
    dst = cv2.bitwise_not(m1)
    cv2.imshow("not_demo", dst)
cv2.imshow("finger_location",mask_selected)
cv2.waitKey(0)

img_parameters = mask_selected.shape
height , width = img_parameters
#Part 2
#Countouring

while 1:
    _, frame = cap.read()
    
    crop = frame[tf2-50:tf2,0:width]
    img_parameters = crop.shape
    print img_parameters
    gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)
    gray = cv2.bitwise_not(gray)
    thresh = cv2.threshold(gray, 0, 255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
    kernel = np.ones((5,5), np.uint8)
    dilation = cv2.dilate(thresh, kernel, iterations=1)
    (_, contours, _) = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    no_of_cnt=len(contours)
    i=0
    dist=[]
    #discard contours with lower than threshold area
    reduction=0
    threshold=400
    no_of_cnt_temp = no_of_cnt
    print no_of_cnt
    while i<(no_of_cnt_temp):
        print i
Esempio n. 58
0
    img = np.maximum(img, my_min + img_min)
    img = np.minimum(img, my_max + img_min)
    img = (img - np.min(img)) / (np.max(img) - np.min(img))

    # img=anisodiff(img)

    img = (img * 255).astype(np.uint8)
    new_img = img.copy()
    # remove_skull(new_img)

    #!!!

    new_img = cv2.morphologyEx(new_img, cv2.MORPH_CLOSE, kernel)
    new_img = cv2.medianBlur(new_img, 5)
    new_img = cv2.bitwise_not(new_img)

    ret, new_img = cv2.threshold(new_img, 127, 255, 0)
    cv2.imshow("thres_img", new_img)

    #!!!
    inverze_img = cv2.bitwise_not(new_img)

    keypoints = detector.detect(new_img)
    if len(keypoints) > 0:
        for elem in keypoints:
            detected_keypoints.append(elem)
    keypoint_lens.append(len(keypoints))

#///////////////////////////////////////////////////////
detector = cv2.SimpleBlobDetector_create(params)
plt.plot(history.history['val_acc'])
plt.legend(['acc', 'val_acc'])
plt.title('Accuracy')
plt.xlabel('epoch')

score = model.evaluate(X_test, y_test, verbose=0)
print(type(score))
print('Test score:', score[0])
print('Test accuracy:', score[1])

import requests
from PIL import Image

url = 'https://www.researchgate.net/profile/Jose_Sempere/publication/221258631/figure/fig1/AS:305526891139075@1449854695342/Handwritten-digit-2.png'
response = requests.get(url, stream=True)
img = Image.open(response.raw)
plt.imshow(img, cmap=plt.get_cmap('gray'))

import cv2

img = np.asarray(img)
img = cv2.resize(img, (28, 28))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.bitwise_not(img)
plt.imshow(img, cmap=plt.get_cmap('gray'))

img = img/255
img = img.reshape(1, 784)

prediction = model.predict_classes(img)
import cv2
import numpy as np
img1 = np.zeros((250, 500, 3), np.uint8)  #2^8-1=255
print(img1.shape)
cv2.rectangle(img1, (200, 0), (300, 100), (255, 255, 255), -1)
img2 = np.ones((250, 500, 3), np.uint8)
cv2.rectangle(img2, (0, 0), (250, 500), (255, 255, 255), -1)
bitwise_and = cv2.bitwise_and(img1, img2)
bitwise_or = cv2.bitwise_or(img1, img2)
bitwise_xor = cv2.bitwise_xor(img1, img2)
bitwise_not = cv2.bitwise_not(img1)
cv2.imshow("BIT_NOT", bitwise_not)
"""
cv2.imshow("BIT_XOR",bitwise_xor)
cv2.imshow("BIT_OR",bitwise_or)
cv2.imshow("BIT_AND",bitwise_and)
"""
cv2.imshow("IMAGE2", img2)
cv2.imshow("IMG1", img1)
cv2.waitKey(0)