def __sobel_image__(self,image,horizontal):
        """
        apply the sobel operator to a given image on either the vertical or horizontal axis
        basically copied from
        http://stackoverflow.com/questions/10196198/how-to-remove-convexity-defects-in-a-sudoku-square
        :param horizontal:
        :return:
        """
        if horizontal:
            dy = cv2.Sobel(image,cv2.CV_16S,0,2)
            dy = cv2.convertScaleAbs(dy)
            cv2.normalize(dy,dy,0,255,cv2.NORM_MINMAX)
            ret,close = cv2.threshold(dy,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)

            kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(10,2))
        else:
            dx = cv2.Sobel(image,cv2.CV_16S,2,0)
            dx = cv2.convertScaleAbs(dx)
            cv2.normalize(dx,dx,0,255,cv2.NORM_MINMAX)
            ret,close = cv2.threshold(dx,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)

            kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,10))

        close = cv2.morphologyEx(close,cv2.MORPH_CLOSE,kernel)

        return close
def track2(bs,img_copy,img, avg):
	x = -1
	y = -1

	img_copy = cv2.GaussianBlur(img_copy,(5,5),0)
	cv2.accumulateWeighted(img_copy,avg,0.4)
	res = cv2.convertScaleAbs(avg)
	res = cv2.absdiff(img, res)
	_,processed_img = cv2.threshold( res, 7, 255, cv2.THRESH_BINARY )
	processed_img = cv2.GaussianBlur(processed_img,(5,5),0)
	_,processed_img = cv2.threshold( processed_img, 240, 255, cv2.THRESH_BINARY )

	processed_img = bs.bg_subtractor.apply(processed_img, None, 0.05)
	
	# img_thresh = cv2.morphologyEx(img_thresh, cv2.MORPH_OPEN, kernel)
	
	if np.count_nonzero(processed_img) > 5:
		# Get the largest contour
		contours, hierarchy = cv2.findContours(processed_img, cv2.RETR_TREE, 
			cv2.CHAIN_APPROX_SIMPLE)
		areas = [cv2.contourArea(c) for c in contours]
		max_index = np.argmax(areas)

		# Make sure it's big enough
		if cv2.contourArea(contours[max_index]) >= MIN_BLOB_SIZE:
			cv2.drawContours(img, contours, max_index, (255, 255, 255), -1)
			x, y = getCentroid(contours[max_index])

	return x, y
def SEU_Decode(SEU_Code):

    SEU_Grey = cv2.cvtColor(SEU_Code, cv2.COLOR_BGR2GRAY)
    cv2.threshold(SEU_Grey,255/2,255,cv2.THRESH_BINARY,SEU_Grey)
    SEU_Code_Reshape4D = np.array(SEU_Grey).reshape((1, height, width, 1))
    y_pred = model.predict(SEU_Code_Reshape4D)
    return decode(y_pred)
Exemple #4
0
    def otsu(self, img):
        # global thresholding
        ret1,th1 = cv2.threshold(img,50,255,cv2.THRESH_BINARY)
        per = np.percentile(img.ravel(), np.linspace(0,100,100))
        print("percentile = {}".format(per))
#         plt.hist(img.ravel(), 256)
#         plt.figure()

        
        # Otsu's thresholding
        ret2,th2 = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        # Otsu's thresholding after Gaussian filtering
        blur = cv2.GaussianBlur(img,(5,5),0)
        ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        
        print("global = {}, ostu={}, gaussinaostu={}".format(ret1, ret2, ret3))
        # plot all the images and their histograms
        images = [img, 0, cv2.bitwise_not(th1),
                  img, 0, cv2.bitwise_not(th2),
                  blur, 0, cv2.bitwise_not(th3)]
        titles = ['Original Noisy Image','Histogram','Global Thresholding (v=127)',
                  'Original Noisy Image','Histogram',"Otsu's Thresholding",
                  'Gaussian filtered Image','Histogram',"Otsu's Thresholding"]
        for i in range(3):
            plt.subplot(3,3,i*3+1),plt.imshow(images[i*3],'gray')
            plt.title(titles[i*3])
            plt.subplot(3,3,i*3+2),plt.hist(images[i*3].ravel(),256)
            plt.title(titles[i*3+1])
            plt.subplot(3,3,i*3+3),plt.imshow(images[i*3+2],'gray')
            plt.title(titles[i*3+2])
        plt.show()
        return
Exemple #5
0
def border_mask(img, p1, p2, device, debug, color="black"):
  # by using rectangle_mask to mask the edge of plotting regions you end up missing the border of the images by 1 pixel
  # This function fills this region in
  # note that p1 = (0,0) is the top left hand corner bottom right hand corner is p2 = (max-value(x), max-value(y))
  # device = device number. Used to count steps in the pipeline
  # debug = True/False. If True; print output image
  if color=="black":
    ix, iy = np.shape(img)
    size = ix,iy
    bnk = np.zeros(size, dtype=np.uint8)
    cv2.rectangle(img = bnk, pt1 = p1, pt2 = p2, color = (255,255,255))
    ret, bnk = cv2.threshold(bnk,127,255,0)
    contour,hierarchy = cv2.findContours(bnk,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    cv2.drawContours(bnk, contour, -1 ,(255,255,255), 5)
    device +=1
  if color=="gray":
    ix, iy = np.shape(img)
    size = ix,iy
    bnk = np.zeros(size, dtype=np.uint8)
    cv2.rectangle(img = bnk, pt1 = p1, pt2 = p2, color = (192,192,192))
    ret, bnk = cv2.threshold(bnk,127,255,0)
    contour,hierarchy = cv2.findContours(bnk,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    cv2.drawContours(bnk, contour, -1 ,(192,192,192), 5)
    device +=1
  if debug:
    print_image(bnk, (str(device) + '_brd_mskd_' + '.png'))
  return device, bnk, contour, hierarchy
Exemple #6
0
def rectangle_mask(img, p1, p2, device, debug, color="black"):
  # takes an input image and returns a binary image masked by a rectangular area denoted by p1 and p2
  # note that p1 = (0,0) is the top left hand corner bottom right hand corner is p2 = (max-value(x), max-value(y))
  # device = device number. Used to count steps in the pipeline
  # debug = True/False. If True; print output image
  # get the dimensions of the input image
  ix, iy = np.shape(img)
  size = ix,iy
  # create a blank image of same size
  bnk = np.zeros(size, dtype=np.uint8)
  # draw a rectangle denoted by pt1 and pt2 on the blank image
  
  if color=="black":
    cv2.rectangle(img = bnk, pt1 = p1, pt2 = p2, color = (255,255,255))
    ret, bnk = cv2.threshold(bnk,127,255,0)
    contour,hierarchy = cv2.findContours(bnk,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    # make sure entire rectangle is within (visable within) plotting region or else it will not fill with thickness = -1
    # note that you should only print the first contour (contour[0]) if you want to fill with thickness = -1
    # otherwise two rectangles will be drawn and the space between them will get filled
    cv2.drawContours(bnk, contour, 0 ,(255,255,255), -1)
    device +=1
  if color=="gray":
    cv2.rectangle(img = bnk, pt1 = p1, pt2 = p2, color = (192,192,192))
    ret, bnk = cv2.threshold(bnk,127,255,0)
    contour,hierarchy = cv2.findContours(bnk,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    # make sure entire rectangle is within (visable within) plotting region or else it will not fill with thickness = -1
    # note that you should only print the first contour (contour[0]) if you want to fill with thickness = -1
    # otherwise two rectangles will be drawn and the space between them will get filled
    cv2.drawContours(bnk, contour, 0 ,(192,192,192), -1)
  if debug:
    print_image(bnk, (str(device) + '_roi.png'))
  return device, bnk, contour, hierarchy
def _generate_training_set(img, image_file):
    save_location = "images/training/"
    _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
    _, regions = cv2.connectedComponents(img, img)

    if not os.path.exists("../images/cc"):
        os.makedirs("../images/cc")

    cv2.imwrite("../images/cc/cc.png", regions)
    cc = cv2.imread("../images/cc/cc.png", 0)
    _, cc_vis = cv2.threshold(cc, 1, 255, cv2.THRESH_BINARY)

    _, contours, hierarchy = cv2.findContours(cc_vis, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    idx = 0
    for cnt in contours:
        area = cv2.contourArea(cnt)
        if area < 50 or area > 1000:
            continue
        if len(cnt) < 5:
            continue
        idx += 1
        x, y, w, h = cv2.boundingRect(cnt)
        roi = img[y: y + h, x: x + w]
        name = image_file.split('.')[0]
        inverted = (255 - roi)
        cv2.imwrite(save_location + name + str(idx) + '.jpg', inverted)
    cv2.waitKey(0)
Exemple #8
0
def get_vessels(img,side):
#convert to grayscale
    #img_gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    #equilized = cv2.equalizeHist(img_gray)
    green_channel = img[:,:,1]
    threshold =np.max(green_channel)*0.9
    
    #crop image
    gch_crop1=green_channel[:, (green_channel != 0).sum(axis=0) != 0]
    gch_crop2=gch_crop1[(gch_crop1 != 0).sum(axis=1) != 0,:]
    green_channel=gch_crop2
    
    #rotate by optical disc
    dummy,gch_bin = cv2.threshold(green_channel, threshold,255 ,cv2.THRESH_BINARY)
    i,j = np.unravel_index(gch_bin.argmax(), gch_bin.shape)
    if ((gch_bin.shape[1]/2 < j) and side=='left') or ((gch_bin.shape[1]/2 > j) and side=='right'):
        green_channel=np.rot90(green_channel,2)
#25 x 25 median filter
    gch_mf = cv2.medianBlur(green_channel,35)
#gch_nl = cv2.fastNlMeansDenoising(green_channel,h=10)
    gch_norm = green_channel - gch_mf

    gch_norm_norm = cv2.medianBlur(gch_norm,35)
#convert to binary image
    thresh,gch_norm_bin = cv2.threshold(gch_norm,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)


    gch_norm_bin_norm = cv2.medianBlur(gch_norm_bin,35)
    return gch_norm_bin_norm
def find_hottest_points(cv_image):
  
  clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(3,3))
  #gray = clahe.apply(img)
  gray = clahe.apply(cv_image)
  gray = cv2.GaussianBlur (gray, (21,21), 0)

  min_thresh = cv2.threshold(gray, min_th, 255, cv2.THRESH_BINARY)[1]
  max_thresh = cv2.threshold(gray, max_th, 255, cv2.THRESH_BINARY_INV)[1]

  thresh = cv2.bitwise_and(min_thresh, max_thresh)

  thresh = cv2.dilate(thresh, None, iterations = 2)
  (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
    cv2.CHAIN_APPROX_SIMPLE)

  for c in cnts:
    if cv2.contourArea(c) > min_area and cv2.contourArea(c) < max_area:
      
      (x,y,w,h) = cv2.boundingRect(c)
#      cv2.rectangle(cv_image, (x, y), (x+w, y+h), (0, 255, 0), 2)
      cv2.rectangle(cv_image, (x, y), (x+w, y+h), 0, 2)
      continue


  cv2.imshow("region_detector", cv_image)
  cv2.moveWindow("region_detector",900,0)
  cv2.imshow("band_threshold_image", thresh)
  cv2.moveWindow("band_threshold_image",900,400)
  cv2.waitKey(1)
Exemple #10
0
    def _divide(self):        
        block_size = self.spec.block_size # shortcut
        half_block = (block_size-1)/2
        
        rows, columns = self.dividing.nonzero()
        for i in range(len(rows)):
            row = rows[i]
            column = columns[i]

            write_block(self._cell_block, self.cells, row, column, block_size)
            cv2.filter2D(self._cell_block, cv2.CV_32F, self._tension_kernel,
                         self._probability, borderType=cv2.BORDER_CONSTANT)
            cv2.threshold(self._probability, self._tension_min, 0, 
                          cv2.THRESH_TOZERO, self._probability)
            self._probability[self._cell_block] = 0
            self._probability **= self.spec.tension_power
            self._probability *= self._distance_kernel
            
            # optimized version of np.random.choice
            np.cumsum(self._probability.flat, out=self._cumulative)
            total = self._cumulative[-1]
            if total < 1.0e-12:
                # no viable placements, we'll have precision problems anyways
                continue 
            self._cumulative /= total
            
            index = self._indices[np.searchsorted(self._cumulative, 
                                                  rdm.random())]
            local_row, local_column = np.unravel_index(index, 
                                                       self._probability.shape)
            self.set_alive(row+(local_row-half_block), 
                           column+(local_column-half_block))
Exemple #11
0
def perform_changes_task2(img):
    cv2.GaussianBlur(img, (3, 3), 0, img)
    cv2.Laplacian(img, 0, img, 1)
    cv2.GaussianBlur(img, (19, 3), 7, img)
    cv2.threshold(img, 10, 255, cv2.THRESH_BINARY, img)
    #cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 21, 5, img)
    return img
def otsuTwo(img, img_file, man_img, mask=None):

    # blur = cv2.GaussianBlur(img, (5,5),0)
    blur = cv2.bilateralFilter(img, 5, 100, 100)

    thresholds = multithresholdOtsu(blur,mask)
    th1 = thresholds[0]
    th2 = thresholds[1]


    if mask is None:
        ret, thresh1 = cv2.threshold(blur,th1,255,cv2.THRESH_BINARY)
        ret, thresh2 = cv2.threshold(blur,th2,255,cv2.THRESH_BINARY_INV)
    else:
        combined_img = cv2.bitwise_and(blur, blur, mask=mask)
        ret, thresh1 = cv2.threshold(combined_img,th1,255,cv2.THRESH_BINARY)
        ret, thresh2 = cv2.threshold(combined_img,th2,255,cv2.THRESH_BINARY_INV)

    out_img_o = cv2.bitwise_and(thresh1, thresh2, mask=None)
    out_info_o = "_otsu_%d-%d" % (th1, th2)
    out_str_o = out_info_o + '.png'
    out_file_o = re.sub(r'\.jpg', out_str_o, img_file)
    cv2.imwrite(out_file_o, out_img_o)
    t = evaluation.findTotals(out_img_o, man_img)
    f = open('o2_all.txt', 'a')
    f.write(img_file + " " + str(t[0]) + " " + str(t[1]) + " " + str(t[2]) + " " + str(t[3]) + "\n")
    f.close()
    def threshold_image(self, channel):
        if channel == "hue":
            minimum = self.hue_min
            maximum = self.hue_max
        elif channel == "saturation":
            minimum = self.sat_min
            maximum = self.sat_max
        elif channel == "value":
            minimum = self.val_min
            maximum = self.val_max

        (t, tmp) = cv2.threshold(
            self.channels[channel],  # src
            maximum,  # threshold value
            0,  # we dont care because of the selected type
            cv2.THRESH_TOZERO_INV  # t type
        )

        (t, self.channels[channel]) = cv2.threshold(
            tmp,  # src
            minimum,  # threshold value
            255,  # maxvalue
            cv2.THRESH_BINARY  # type
        )

        if channel == 'hue':
            # only works for filtering red color because the range for the hue
            # is split
            self.channels['hue'] = cv2.bitwise_not(self.channels['hue'])
def GenCh(f,val, data_shape1, data_shape2, bg_gray, text_gray, text_position):
    img=Image.new("L", (data_shape1,data_shape2),bg_gray)
    draw = ImageDraw.Draw(img)
    draw.text((0, 0),val,text_gray,font=f)
    #draw.text((0, text_position),val.decode('utf-8'),0,font=f)
    A = np.array(img)

    #二值化,确定文字精确的左右边界
    if bg_gray > text_gray:
	ret,bin = cv2.threshold(A,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    else:
	ret,bin = cv2.threshold(A,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    #cv2.imshow('A',A)
    #cv2.imshow('bin',bin)


    left = -1
    right = 10000
    for i in range(0,bin.shape[1]):
	if np.sum(bin[:,i]) > 0:
		left = i
		break
    for i in range(bin.shape[1]-1,0,-1):
	if np.sum(bin[:,i]) > 0:
		right = i
		break
    dst  = A[:,left:right+1]
    #cv2.imshow('dst',dst)
    #cv2.waitKey()
    return dst
Exemple #15
0
def main(argv):
	# compress the image to < 1000 width first!
	im = cv2.imread(argv[0])
	hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)

	lower_yellow = np.array([20, 60, 60])
	upper_yellow = np.array([40, 255, 255])

	mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
	res = cv2.bitwise_and(im, im, mask=mask)
	ret, binary = cv2.threshold(res,127,255,cv2.THRESH_BINARY)

	gray = cv2.cvtColor(binary,cv2.COLOR_BGR2GRAY)
	ret,gray = cv2.threshold(gray,127,255,0)
	im2 = im.copy()

	contours, hier = cv2.findContours(gray,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
	ct = 0
	for cnt in contours:
	    if 800<cv2.contourArea(cnt)<10000:
	    	(x,y,w,h) = cv2.boundingRect(cnt)
	    	cv2.rectangle(im2,(x,y),(x+w,y+h),0,-1)
	    	crop_im = im[y:y+h, x:x+w]
	    	crop_im = cv2.cvtColor(crop_im, cv2.COLOR_BGR2GRAY)
	    	cv2.imwrite("crop_"+str(ct)+".jpg", crop_im)
	        ct += 1

	cv2.imshow("testing", im2)
	cv2.waitKey(0)
def get_library():
    # READ THE LIBRARY IMAGES
    img_cube = cv2.imread('./Library/Cube.jpg')
    img_hexagon = cv2.imread('./Library/Hexagon.jpg')
    img_star = cv2.imread('./Library/Star.jpg')

    # CANNY EDGE ALGORITHM
    img_cube = cv2.Canny(img_cube, 1200, 100)
    img_hexagon = cv2.Canny(img_hexagon, 1200, 100)
    img_star = cv2.Canny(img_star, 1200, 100)

    # OPENING (EROSION FOLLOWED BY DILATION)
    kernel = np.ones((5, 5), np.uint8)
    img_cube = cv2.morphologyEx(img_cube, cv2.MORPH_GRADIENT, kernel)
    img_hexagon = cv2.morphologyEx(img_hexagon, cv2.MORPH_GRADIENT, kernel)
    img_star = cv2.morphologyEx(img_star, cv2.MORPH_GRADIENT, kernel)

    # THRESHOLD (INVERSE BINARY)
    ret, img_cube = cv2.threshold(img_cube, 0, 255, cv2.THRESH_BINARY_INV)
    ret, img_hexagon = cv2.threshold(
        img_hexagon, 0, 255, cv2.THRESH_BINARY_INV)
    ret, img_star = cv2.threshold(img_star, 0, 255, cv2.THRESH_BINARY_INV)

    # SURF - FIND KEYPOINTS AND DESCRIPTORS
    surf = cv2.SURF()
    (cube_kpts, cube_dpts) = surf.detectAndCompute(img_cube, None)
    (hexagon_kpts, hexagon_dpts) = surf.detectAndCompute(img_hexagon, None)
    (star_kpts, star_dpts) = surf.detectAndCompute(img_star, None)

    # LIBRARY IMAGE DICTIONARY - IMAGE:DESCRIPTORS
    library = {CUBE: cube_dpts, HEXAGON: hexagon_dpts, STAR: star_dpts}
    return library
Exemple #17
0
def find_lines(image):
    """
    Compute the layout from the image
    """

    draw_image = numpy.array(image)
    tmp_im = numpy.array(image)
    ret, thres_im = cv2.threshold(tmp_im, 4, 255, cv2.THRESH_BINARY)

    thres_im = ndimage.uniform_filter(thres_im, (1, 50))

    ret, thres_im = cv2.threshold(thres_im, 50, 255, cv2.THRESH_BINARY)


    lines = cv2.HoughLinesP(thres_im, 1, math.pi / 4.0, 100, None, 100, 10)
    tmp_mask = np.zeros(thres_im.shape, np.uint8)
    for l in lines[0]:
        a = angle((l[0], l[1]), (l[2], l[3]))
        if abs(a) < 1.0: 
            cv2.line(tmp_mask, (l[0], l[1]), (l[2], l[3]), 255, 1)
        
    contours, hier = cv2.findContours(tmp_mask, cv2.RETR_EXTERNAL, 
                                      cv2.CHAIN_APPROX_TC89_L1)  

    boxes = []
    mask = np.zeros(image.shape, np.uint8)
    draw_image = cv2.cvtColor(draw_image, cv2.COLOR_GRAY2BGR)
    for cnt in contours:
        box  = cv2.boundingRect(cnt)
        x, y, width, height = box
        if width > 20 and height > 2 and height < 40:
            boxes.append((x, y, width, height))
    return boxes
 def mask_shiny(self):
     pygrip.desaturate(self.ir, dst=self.tmp16_1)
     into_uint8(self.tmp16_1, dst=self.tmp8_1)
     pygrip.blur(self.tmp8_1, pygrip.MEDIAN_BLUR, 1, dst=self.tmp8_2)
     cv2.threshold(self.tmp8_2, 80, 0xff, cv2.THRESH_BINARY, dst=self.mask8)
     # grr threshold operates on matrices of unsigned bytes
     into_uint16_mask(self.mask8, dst=self.mask16)
def imgToMove(msg):
    #Convert it to an OpenCV image
    bridge = cv_bridge.CvBridge()
    cvImg = np.array(bridge.imgmsg_to_cv(msg, "bgr8"), dtype=np.uint8)
    #Threshold it, parameters from ImageSlicer.cpp
    hsvImg = cv2.cvtColor(cvImg, cv2.cv.CV_BGR2HSV)
    H,S,V = cv2.split(hsvImg)
    H = cv2.threshold(H, 165, 65536, cv2.cv.CV_THRESH_BINARY)
    S = cv2.threshold(S, 45, 65536, cv2.cv.CV_THRESH_BINARY)
    out = cv2.bitwise_and(H[1], S[1])

    #Slice it and count white pixels
    slices = 5
    counts = []
    regionWidth = out.shape[1]/slices
    for ii in range(slices):
        roi = out[0:out.shape[0],ii*regionWidth:(ii*regionWidth)+regionWidth]
        counts.append(cv2.countNonZero(roi))
        
    #Decide on a move
    bestMove = "N"
    leftCount = rightCount = 0
    for ii in range(3):
        leftCount += counts[ii]
        rightCount += counts[4-ii]
    if abs(leftCount - rightCount) > 500:
        #Enough difference, we move
        if leftCount > rightCount:
            bestMove = "L"
        else:
            bestMove = "R"
    else:
        #Not different enough, no move
        bestMove = "N"
    return bestMove
Exemple #20
0
def findHP(img):
    statuses = {'none': -1, 'dead' : 0,  'lhalf' : 1, 'mhalf' : 2, 'full' : 3}
    # hpcolor = [231, 73, 132]
    hpcolor = [107, 101, 107]
    hp = grabHP()
    gray = cv2.cvtColor(hp, cv2.COLOR_BGR2GRAY)
    ret,th1 = cv2.threshold(gray,120,255,cv2.THRESH_TOZERO_INV)
    ret,th1 = cv2.threshold(th1,100,255,cv2.THRESH_TOZERO)
    # cv2.imwrite('th1' + str(int(time.time())) + '.png',th1)

    (cnts, hierarchy) = cv2.findContours(th1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    if (len(cnts) == 0):
        return statuses['dead']
    left = list(cnts[0][cnts[0][:,:,0].argmin()][0])
    px = hp[left[1], left[0]];
    # print px
    if (hpcolor != px).any():
        return statuses['none']

    leftx = list(cnts[0][cnts[0][:,:,0].argmin()][0])[0]
    rightx = list(cnts[0][cnts[0][:,:,0].argmax()][0])[0]
    diff = rightx - leftx
    if diff > 140:
        return statuses['full']
    if diff >= 75:
        return statuses['mhalf']
    if diff < 75:
        return statuses['lhalf']
    return statuses['dead']
Exemple #21
0
def get_binary_from_hsv(card):
    # convert from BGR colorspace to HSV colorspace
    hsv = cv2.cvtColor(card, cv2.COLOR_BGR2HSV)

    # separate hue, saturation, and value into three images
    hue, sat, val = [np.array([[col[i] for col in row] for row in hsv]) for i in xrange(3)]

    # get binary representation of saturation image
    # higher threshold = less white
    _, bin_sat = cv2.threshold(np.array(sat), thresh=55, maxval=255, type=cv2.THRESH_BINARY)
    # bin_sat = cv2.GaussianBlur(bin_sat, ksize=(5, 5), sigmaX=0)

    # get binary representation of value image
    # higher threshold = more white
    _, bin_val = cv2.threshold(np.array(val), thresh=140, maxval=255, type=cv2.THRESH_BINARY_INV)

    bin_sat_val = cv2.bitwise_or(bin_sat, bin_val)

    # erosion followed by morphological opening to erase noise and fill gaps
    # in shapes
    kernel_e = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
    kernel_d = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 8))
    bin_sat_val = cv2.erode(bin_sat_val, kernel_e)
    bin_sat_val = cv2.morphologyEx(bin_sat_val, cv2.MORPH_CLOSE, kernel_d)

    return bin_sat_val, hue, sat, val
def create_merged_map():
    # copy sat images
    for data_type in ["train", "test", "valid"]:
        out_dir = "data/mass_merged/%s/sat" % data_type
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)
        for fn in glob.glob("data/mass_buildings/%s/sat/*.tiff" % data_type):
            shutil.copy(fn, "%s/%s" % (out_dir, os.path.basename(fn)))

    road_maps = dict([(os.path.basename(fn).split(".")[0], fn) for fn in glob.glob("data/mass_roads/*/map/*.tif")])

    # combine map images
    for data_type in ["train", "test", "valid"]:
        out_dir = "data/mass_merged/%s/map" % data_type
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)
        for fn in glob.glob("data/mass_buildings/%s/map/*.tif" % data_type):
            base = os.path.basename(fn).split(".")[0]
            building_map = cv.imread(fn, cv.IMREAD_GRAYSCALE)
            road_map = cv.imread(road_maps[base], cv.IMREAD_GRAYSCALE)
            _, building_map = cv.threshold(building_map, 0, 1, cv.THRESH_BINARY)
            _, road_map = cv.threshold(road_map, 0, 1, cv.THRESH_BINARY)
            h, w = road_map.shape
            merged_map = np.zeros((h, w))
            merged_map += building_map
            merged_map += road_map * 2
            merged_map = np.where(merged_map > 2, 2, merged_map)
            cv.imwrite("data/mass_merged/%s/map/%s.tif" % (data_type, base), merged_map)
            print(merged_map.shape, fn)
            merged_map = np.array(
                [np.where(merged_map == 0, 1, 0), np.where(merged_map == 1, 1, 0), np.where(merged_map == 2, 1, 0)]
            )
            merged_map = merged_map.swapaxes(0, 2).swapaxes(0, 1)
            cv.imwrite("data/mass_merged/%s/map/%s.png" % (data_type, base), merged_map * 255)
Exemple #23
0
	def _draw_board(self, dst):
		hsvdst = cv2.cvtColor(dst,cv2.COLOR_BGR2HSV)
		step = CHESS_SIZE/8
		black_val = cv2.getTrackbarPos('black', 'warp')
		white_val = cv2.getTrackbarPos('white', 'warp')
		yc = cv2.getTrackbarPos('ycutoff', 'warp')
		font = cv2.FONT_HERSHEY_SIMPLEX
		#dst = hsvdst[...,2]
		col = dst
		dst = cv2.cvtColor(dst,cv2.COLOR_BGR2GRAY)
		rv, bl_dst = cv2.threshold(dst, black_val, 255, cv2.THRESH_BINARY)
		rv, wt_dst = cv2.threshold(dst, white_val, 255, cv2.THRESH_BINARY)
		for i in range(8):
			for j in range(8):
				y = self.BORDER + step*i
				x = self.BORDER + step*j
				bl_cnt = np.sum(bl_dst[y-yc:y-yc + step,x:x+step] == 0)
				wt_cnt = np.sum(wt_dst[y-yc:y+step-yc,x:x+step] == 255)
				col[y-yc:y-yc+step,x:x+step,2] = np.zeros((step, step))
				s = str(bl_cnt) + "/" + str(wt_cnt)
				if bl_cnt > 500:
					cv2.circle(col, (x+step/2, y+step/2), 25, BLACK, -1)
				elif wt_cnt >= 75:
					cv2.circle(col, (x+step/2, y+step/2), 25, WHITE, -1)
				cv2.putText(col, s ,(x, y+step/2), font, 0.5,BLUEGREEN,2,cv2.LINE_AA)
Exemple #24
0
def investigate_threshold(frame):
    """Try different threshold types a visualize the results.

    otsu:

    Args:
        frame (np.array): Input frame.

    """

    ret1, th1 = cv2.threshold(frame, 127, 255, cv2.THRESH_BINARY)

    ret2, th2 = cv2.threshold(frame, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

    # Otsu's thresholding after Gaussian filtering
    blur = cv2.GaussianBlur(frame, (3, 3), 0)
    ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    #print th3

    # plot all the images and their histograms
    images = [frame, 0, th1,
              frame, 0, th2,
              blur, 0, th3]
    titles = ['Original Noisy Image', 'Histogram', 'Global Thresholding (v=127)',
              'Original Noisy Image', 'Histogram', "Otsu's Thresholding",
              'Gaussian filtered Image', 'Histogram', "Otsu's Thresholding"]

    for i in xrange(3):
        plt.subplot(3, 3, i * 3 + 1), plt.imshow(images[i * 3], 'gray')
        plt.title(titles[i * 3]), plt.xticks([]), plt.yticks([])
        plt.subplot(3, 3, i * 3 + 2), plt.hist(images[i * 3].ravel(), 256)
        plt.title(titles[i * 3 + 1]), plt.xticks([]), plt.yticks([])
        plt.subplot(3, 3, i * 3 + 3), plt.imshow(images[i * 3 + 2], 'gray')
        plt.title(titles[i * 3 + 2]), plt.xticks([]), plt.yticks([])
    plt.show()
def detect(capture, prev_images):
    # Capture a new frame
    new_frame = capture.grab_frame()

    # Not enough frames: no detection, just store this one
    if len(prev_images) < 2:
        return None, None, new_frame, None

    # Everything to grayscale
    prev_images = [prev_images[1], prev_images[0], new_frame]
    prev_images = [cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
                   for prev_frame in prev_images]
    prev_frame, current_frame, next_frame = prev_images

    # Diff
    d1 = cv2.absdiff(prev_frame, next_frame)
    d2 = cv2.absdiff(next_frame, current_frame)
    motion = cv2.bitwise_and(d1, d2)

    # Threshold & erode
    cv2.threshold(motion, config.DIFF_THRESHOLD, 255, cv2.THRESH_BINARY,
                  dst=motion)
    cv2.erode(motion, kernel_ero, dst=motion)

    # Find and count changes
    number_of_changes, location, std_dev = detect_motion(motion)

    return number_of_changes, std_dev, new_frame, location
 def ComputeDescriptors(self,RGB,Depth,dep_mask,h):
     dep = np.float32(Depth)
     dep_mask =cv2.bitwise_not(dep_mask)
     ret, mask = cv2.threshold(dep, 1.7, 1, cv2.THRESH_BINARY_INV)
     mask = np.uint8(mask)
     ret, mask2 = cv2.threshold(dep, 0.01, 1, cv2.THRESH_BINARY)
     mask2 = np.uint8(mask2)
     mask = cv2.bitwise_and(mask,mask2)
     mask = cv2.bitwise_and(mask,dep_mask)
     if h:
         masked_data = cv2.bitwise_and(RGB, RGB, mask=mask)
         masked_data = cv2.bitwise_and(masked_data, masked_data, mask=mask2)
         sp = cv2.cvtColor(masked_data, cv2.COLOR_RGB2GRAY)
         sp = cv2.GaussianBlur(sp, (5, 5),10)
         fd, imn = hog(dep, self.orientations, self.pixels_per_cell, self.cells_per_block,
                       self.visualize, self.normalize)
         if self.HogDepth:
             fdn,im = hog(sp, self.orientations, self.pixels_per_cell, self.cells_per_block,
                   self.visualize, self.normalize)
             fd = np.concatenate((fd, fdn))
     else:
         fd = []
     fgrid = np.array([])
     for i in xrange(4):
         for j in xrange(4):
             sub = RGB[25*i:25*(i+1),25*j:25*(j+1)]
             sub_mask = mask[25*i:25*(i+1),25*j:25*(j+1)]
             fsub = self.ComputeHC(sub,sub_mask)
             fgrid = np.concatenate((fgrid,fsub))
     fd2 = fgrid.copy()
     return fd,fd2,masked_data
    def ReadImage(self, fileIndex, show):
        # read in image
        # print "Read in image: ", fileIndex
        colorFileName = 'trainImageDistorted\\color_'+str(fileIndex)+'.jpg'
        depthFileName = 'trainImageDistorted\\depth_'+str(fileIndex)+'.jpg'
        maskFileName = 'trainImageDistorted\\mask_'+str(fileIndex)+'.jpg'
        labelFileName = 'trainImageDistorted\\label_'+str(fileIndex)+'.jpg'
        imgBGR = cv2.imread(colorFileName, 1)
        imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)
        imgDepth = cv2.imread(depthFileName, 0)
        imgMaskGray = cv2.imread(maskFileName, 0)
        # erod the mask so that the boundary won't bother us
        # imgMaskGray = cv2.erode(imgMaskGray, np.ones((5,5),np.uint8))
        _, imgMask = cv2.threshold(imgMaskGray,127,255,cv2.THRESH_BINARY)

        # the label
        imgLabel = cv2.imread(labelFileName, 1)
        imgLabelGray = cv2.cvtColor(imgLabel, cv2.COLOR_BGR2GRAY)
        _, imgLabelBW = cv2.threshold(imgLabelGray,10,255,cv2.THRESH_BINARY)

        # rescale depth and mask
        imgDepth, imgMask = self.RescaleDepth(imgBGR, imgDepth, imgMask)

        # histogram equalization on the gray image
        imgGrayHistEqu = self.IlluminationNorm(imgGray, show = False)

        if show == True:
            cv2.imshow("imgBGR", imgBGR)
            cv2.imshow("imgGray", imgGray)
            cv2.imshow('imgDepth', imgDepth)
            cv2.imshow('imgMask', imgMask)
            cv2.imshow('imgLabelBW', imgLabelBW)
            WaitKey(30)

        return imgBGR, imgGray, imgGrayHistEqu, imgDepth, imgMask, imgLabelBW
def draw_circle(event,x,y,flags,param):
    global i
    global punkte
    if event == cv2.EVENT_LBUTTONDBLCLK:
        draw_overlay[:] = 0
        cv2.circle(draw_overlay,(x,y),1,(0,0,255),2)
        ret, mask = cv2.threshold(draw_overlay, 10, 255, cv2.THRESH_BINARY)
        img2gray = cv2.cvtColor(draw_overlay,cv2.COLOR_BGR2GRAY)
        ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
        mask_inv = cv2.bitwise_not(mask)
        rows,cols,channels = img.shape
        roi = img[0:rows, 0:cols ]
        img_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)
        dst = cv2.add(img_bg,draw_overlay)
        font = cv2.FONT_HERSHEY_SIMPLEX
        text = 'X: ' + str(x) + ' Y: ' + str(y) + '   Press Escape to close the Window!'
        cv2.putText(dst, text, (10, 20), font, 0.5,
                    (0,0,0), 1, cv2.LINE_AA)
        cv2.imshow('image',dst)
        
        punkte[i,0] = x
        punkte[i,1] = y
        
        i = i + 1
        print
        print punkte
        
        if i>3:
            i = 0
            deform()
Exemple #29
0
def scan2hdf5 (out_path, image_dir, label_dir, folds=0, resize=255, gray=False, useall=False):
    images = find_images(image_dir)
    labels = find_images(label_dir)
    all = []
    chs = 1 if gray else 3
    for key, ipath in images.iteritems():
        image = cv2.imread(ipath, cv2.IMREAD_GRAYSCALE if gray else cv2.IMREAD_COLOR)
        image = cv2.resize(image, (resize, resize)).reshape(1, chs, resize, resize)
        lpath = labels.get(key, None)
        if lpath:
            label = cv2.imread(lpath, cv2.IMREAD_GRAYSCALE)
            #_,label = cv2.threshold(label, 1,255,cv2.THRESH_BINARY_INV)
            contour_img = lpath
            if 'c_' in lpath:
                _,label = cv2.threshold(label, 1,255,cv2.THRESH_BINARY_INV)
            else:
                _,label = cv2.threshold(label, 127,255,cv2.THRESH_BINARY_INV)
            label = cv2.resize(label, (resize, resize)).reshape(1, 1, resize, resize)
        elif useall:    # no label, set all negative
            label = np.zeros((1, 1, resize, resize), dtype=np.uint8)
        else:
            logging.warning('no label found for {}'.format(ipath))
            continue
        all.append((image, label))
        pass
    logging.info("found {} images".format(len(all)))
    save_hd5py(out_path, all, folds)
    pass
Exemple #30
0
def findTarget(img):    
    template_tg = cv2.imread('template_target2.png', 0)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret,th1 = cv2.threshold(gray,253,255,cv2.THRESH_TOZERO_INV)
    ret,th3 = cv2.threshold(th1,251,255,cv2.THRESH_BINARY)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 15))
    closed = cv2.morphologyEx(th3, cv2.MORPH_CLOSE, kernel)
    closed = cv2.erode(closed, None, iterations = 3)
    closed = cv2.dilate(closed, None, iterations = 2)
    (cnts, hierarchy) = cv2.findContours(closed,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

    approxes = []
    hulls = []
    for cnt in cnts:
        approxes.append(cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True))
        hulls.append(cv2.convexHull(cnt))
        left = list(cnt[cnt[:,:,0].argmin()][0])        
        right = list(cnt[cnt[:,:,0].argmax()][0])
        print 'left x' + str(left[0])+ 'y '+ str(left[1])
        print 'right x' + str(right[0])+ 'y '+ str(right[1])
        center = round((right[0]+left[0])/2)
        center = int(center)
        moveMouse(center-10,left[1]+70)
        if (findFromTargeted(template_tg, left, right)):
            autoit.mouse_click('left', center-10, left[1]+70)
            return True
        pyautogui.moveTo(center,left[1]+70)
        moveMouse(center,left[1]+70)
        if (findFromTargeted(template_tg, left, right)):
            autoit.mouse_click('left', center+10, left[1]+70)
            return True
def getDetBoxes_core(textmap, linkmap, text_threshold, link_threshold, low_text):
    # prepare data
    linkmap = linkmap.copy()
    textmap = textmap.copy()
    img_h, img_w = textmap.shape

    """ labeling method """
    ret, text_score = cv2.threshold(textmap, low_text, 1, 0)
    ret, link_score = cv2.threshold(linkmap, link_threshold, 1, 0)

    text_score_comb = np.clip(text_score + link_score, 0, 1)
    nLabels, labels, stats, centroids = cv2.connectedComponentsWithStats(
        text_score_comb.astype(np.uint8), connectivity=4
    )

    det = []
    mapper = []
    for k in range(1, nLabels):
        # size filtering
        size = stats[k, cv2.CC_STAT_AREA]
        if size < 10:
            continue

        # thresholding
        if np.max(textmap[labels == k]) < text_threshold:
            continue

        # make segmentation map
        segmap = np.zeros(textmap.shape, dtype=np.uint8)
        segmap[labels == k] = 255

        # remove link area
        segmap[np.logical_and(link_score == 1, text_score == 0)] = 0

        x, y = stats[k, cv2.CC_STAT_LEFT], stats[k, cv2.CC_STAT_TOP]
        w, h = stats[k, cv2.CC_STAT_WIDTH], stats[k, cv2.CC_STAT_HEIGHT]
        niter = int(math.sqrt(size * min(w, h) / (w * h)) * 2)
        sx, ex, sy, ey = (x - niter, x + w + niter + 1, y - niter, y + h + niter + 1)
        # boundary check
        if sx < 0:
            sx = 0
        if sy < 0:
            sy = 0
        if ex >= img_w:
            ex = img_w
        if ey >= img_h:
            ey = img_h
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1 + niter, 1 + niter))
        segmap[sy:ey, sx:ex] = cv2.dilate(segmap[sy:ey, sx:ex], kernel)

        # make box
        np_temp = np.roll(np.array(np.where(segmap != 0)), 1, axis=0)
        np_contours = np_temp.transpose().reshape(-1, 2)
        rectangle = cv2.minAreaRect(np_contours)
        box = cv2.boxPoints(rectangle)

        # boundary check due to minAreaRect may have out of range values 
        # (see https://docs.opencv.org/3.4/d3/dc0/group__imgproc__shape.html#ga3d476a3417130ae5154aea421ca7ead9)
        for p in box:
            if p[0] < 0:
                p[0] = 0
            if p[1] < 0:
                p[1] = 0
            if p[0] >= img_w:
                p[0] = img_w
            if p[1] >= img_h:
                p[1] = img_h

        # align diamond-shape
        w, h = np.linalg.norm(box[0] - box[1]), np.linalg.norm(box[1] - box[2])
        box_ratio = max(w, h) / (min(w, h) + 1e-5)
        if abs(1 - box_ratio) <= 0.1:
            l, r = min(np_contours[:, 0]), max(np_contours[:, 0])
            t, b = min(np_contours[:, 1]), max(np_contours[:, 1])
            box = np.array([[l, t], [r, t], [r, b], [l, b]], dtype=np.float32)

        # make clock-wise order
        startidx = box.sum(axis=1).argmin()
        box = np.roll(box, 4 - startidx, 0)
        box = np.array(box)

        det.append(box)
        mapper.append(k)

    return det, labels, mapper
Exemple #32
0
    def splitImg(self):
        # 找出各輪廓的距離
        def find_if_close(cnt1, cnt2, distance):
            row1, row2 = cnt1.shape[0], cnt2.shape[0]
            for i in xrange(row1):
                for j in xrange(row2):
                    dist = np.linalg.norm(cnt1[i] - cnt2[j])
                    if abs(dist) < distance:
                        return True
                    elif i == row1 - 1 and j == row2 - 1:
                        return False

        # 傳入輪廓陣列 回傳各陣列的距離分級
        def getStatus(contours, distance):
            LENGTH = len(contours)
            status = np.zeros((LENGTH, 1))  # 用來儲存每個輪廓的等級 等級一樣的會合併為同一個輪廓

            for i, cnt1 in enumerate(contours):
                x = i
                if i != LENGTH - 1:
                    for j, cnt2 in enumerate(contours[i + 1:]):
                        x = x + 1
                        dist = find_if_close(cnt1, cnt2, distance)
                        if dist == True:
                            val = min(status[i], status[x])
                            status[x] = status[i] = val
                        else:
                            if status[x] == status[i]:
                                status[x] = i + 1
            return status

        # 合併各輪廓
        def MergeEachCnts(contours, distance, unified=[], excuteTimes=0):
            '''
            :param contours: 要判斷距離的輪廓
            :param unified:  已經判斷完 合併後的輪廓
            :return:
            '''
            # print('============\n執行次數:'+ str(excuteTimes))
            unsucess = []  # 面積過大的輪廓放進來重新判斷
            # 取得各輪廓距離的分類
            status = getStatus(contours, distance)
            # print('status:\n')
            # print(status)
            # print('areas and width:\n')
            maximum = int(status.max()) + 1
            for i in xrange(maximum):
                pos = np.where(status == i)[0]
                if pos.size != 0:
                    cont = np.vstack(
                        contours[i]
                        for i in pos)  # 把輪廓陣列裡的輪廓合併 pos的index對應到輪廓陣列的index
                    # hull = cv2.convexHull(cont) # 將合併後的輪廓轉凸包
                    # 如果面積大於200 就是錯誤合併兩個數字了
                    area = cv2.contourArea(cont)
                    (x, y, w, h) = cv2.boundingRect(cont)
                    # print(area)
                    # print(x, y, w, h)
                    # 當面積大於200或寬度大於20或高度大於20且distance大於0 才會加到錯誤判斷輪廓的陣列
                    if (area > 200 or w > 20 or h > 20) and distance > 0:
                        for i in pos:
                            unsucess.append(contours[i])
                    # 如果distance已經小於0 就把未經合併的原始輪廓加到unified
                    elif area > 200 and distance <= 0:
                        for i in pos:
                            unified.append(contours[i])
                    # 如果面積<26且寬高皆小於9 判斷為雜點
                    elif (area < 26 and w < 9 and h < 9):
                        pass
                    else:
                        unified.append(cont)
            if len(unsucess) > 0:
                return MergeEachCnts(unsucess, distance - 3, unified,
                                     excuteTimes + 1)
            else:
                return unified

        # 將圖片二值化 以便做邊緣檢測
        colorIm = self.im
        self.im = cv2.cvtColor(self.im, cv2.COLOR_BGR2GRAY)
        self.retval, self.im = cv2.threshold(self.im, 200, 255,
                                             cv2.THRESH_BINARY_INV)
        # 找出輪廓
        _, contours, hierarchy = cv2.findContours(self.im.copy(),
                                                  cv2.RETR_TREE,
                                                  cv2.CHAIN_APPROX_SIMPLE)

        # 取出輪廓的範圍、區域大小 且過濾面積太小的輪廓
        contours = [c for c in contours if 4 < cv2.contourArea(c) < 1000]
        # 將鄰近的輪廓合併
        unified = MergeEachCnts(contours, 7)

        a = colorIm.copy()
        cv2.drawContours(a, contours, -1, (255, 0, 0), 1)
        self.dicImg.update({"找出輪廓(合併前)": a})
        cv2.drawContours(colorIm, unified, -1, (255, 0, 0), 1)
        self.dicImg.update({"找出輪廓(合併後)": colorIm.copy()})

        # 依照X軸排序輪廓
        unified = sorted([(c, cv2.boundingRect(c)[0], cv2.contourArea(c))
                          for c in unified],
                         key=lambda x: x[1])
        # 再將太小的輪廓移除
        unified = [c for c, v, a in unified if 15 < a < 200]
        for index, c in enumerate(unified):
            (x, y, w, h) = cv2.boundingRect(c)
            # print('目前輪廓rect :'+str(x)+' '+ str(y)+' '+ str(w)+' '+ str(h))
            try:
                # 只將寬高大於 7 視為數字留存
                if w > 7 and h > 7:
                    add = True
                    for i, img in enumerate(self.arr):
                        # 計算此輪廓與其他輪廓重心的距離 如果小於10 代表這兩輪廓是重疊的 例如6 0 9 會造成此誤判
                        dist = math.sqrt((x - img[0])**2 + (y - img[1])**2)
                        # print('距離:'+str(dist))
                        if dist <= 10:
                            add = False
                            break
                    if add:
                        self.arr.append((x, y, w, h))

            except IndexError:
                pass
        Imgarr = [self.im[y:y + h, x:x + w] for x, y, w, h in self.arr]
        self.dicImg.update({"圖片切割": Imgarr})
        return Imgarr
Exemple #33
0
def newCircles(imgOrg, org, circles,accept):
    newCircles = []
    cir = []
    accept = set(accept)
    imgRes = copy.deepcopy(imgOrg)
    for i in range(len(circles)):
        x0 = circles[i][1][0]
        y0 = circles[i][1][1]
        radius = circles[i][1][2]
        name = (str(x0) + str(y0))
        acc = False
        if (name in accept):
            acc = True
        nimg = np.ones_like(imgOrg)*255
        nimg = np.where(imgOrg == 0, 0, nimg)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
        nimg = cv2.morphologyEx(nimg, cv2.MORPH_ERODE, kernel)

        lin = 1
        if(nimg[x0][y0] == 0):
            lin +=10
            while True:
                if(lin > radius):
                    lin -= 1
                else:
                    break
            while True:
                if(nimg[x0+lin][y0] == 255):
                    break
                else:
                    lin -= 1
        else:
            per = radius*50//100
            while True:
                lin +=1
                r = x0 + lin
                if(nimg[r][y0] == 0):
                    break
                if(r >= (x0+radius-per)):
                    lin = 0
                    break
            while True and lin >0:
                lin +=1
                r = x0 + lin
                if(nimg[r][y0] == 255):
                    break
                if(r >= (x0+radius-per)):
                    lin = 0
                    break

        point = (x0+lin,y0)
        label = 100

        nimg, counter = us.bfs4neig(nimg,point,label)
        imgcir = np.ones_like(imgOrg)*255

        img = np.ones_like(imgOrg)*0
        img = np.where(nimg==label,255,img)
        minX = np.min(np.where(nimg == label)[0])
        maxX = np.max(np.where(nimg == label)[0])
        minY = np.min(np.where(nimg == label)[1])
        maxY = np.max(np.where(nimg == label)[1])
        radX = (maxX - minX) // 2
        radY = (maxY - minY) // 2
        medX = radX + minX
        medY = radY + minY
        if (radX > radY):
            rad = radX
        else:
            rad = radY
        orgAr = radius**2 * 3.14
        newAr = rad**2 * 3.14

        imgRealSize = np.ones_like(imgOrg)*255

        imgRealSize = drawCircle(imgRealSize,medX,medY,rad)
        imgRealSize, counter = us.bfs4neig(imgRealSize,(medX,medY),label)
        sizeEll = us.sizeEllipse(newAr)

        sizeImg = imgOrg.shape[0]* imgOrg.shape[1]

        loop = False
        if (sizeImg < newAr):
            loop = True
        if (not loop):
            imgcir = np.where(nimg == label, 0, imgcir)
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(sizeEll,sizeEll))
            imgcir = cv2.morphologyEx(imgcir, cv2.MORPH_OPEN, kernel)

            imglabel = np.ones_like(imgOrg)*255
            imglabel = np.where(imgcir==0,org,imglabel)
            imglabel = np.where(imglabel >= 245, 255, imglabel)
            imglabel = cv2.threshold(imglabel,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
            text = ocr.labelToText(imglabel)

            sizeEdge,acc = discoverEdge(imgOrg,acc,minX,maxX,minY,maxY,medX,medY)

            rad = (sizeEdge//3) + rad
            imgcir = cv2.threshold(imgcir,0,255,cv2.THRESH_BINARY_INV)[1]
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(sizeEdge,sizeEdge))
            imgcir = cv2.morphologyEx(imgcir, cv2.MORPH_DILATE, kernel)
            imgRes = imgRes + imgcir

            imgcir = cv2.Canny(imgcir,100,200)
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(sizeEdge//3,sizeEdge//3))
            imgcir = cv2.morphologyEx(imgcir, cv2.MORPH_DILATE, kernel)
            label = 255
            minX = np.min(np.where(imgcir == label)[0])
            maxX = np.max(np.where(imgcir == label)[0])
            minY = np.min(np.where(imgcir == label)[1])
            maxY = np.max(np.where(imgcir == label)[1])
            point = (minX,minY)

            cir = set(cir)
            if(str(medX)+str(medY) not in cir):
                newCircles.append((circles[0],(medX,medY,rad),loop,acc,text,point,imgcir[minX:maxX,minY:maxY]))
            cir = list(cir)
            cir.append(str(medX)+str(medY))
            name = 'img'+str(x0)+str(y0)

        else:
            newCircles.append((circles[0],(x0,y0,radius),loop,acc,'',(0,0),[]))

    return newCircles,imgRes
Exemple #34
0
# # In the real world, you'd replace this section with code to grab a real
# # CAPTCHA image from a live website.
# captcha_image_files = list(paths.list_images(CAPTCHA_IMAGE_FOLDER))
# captcha_image_files = np.random.choice(captcha_image_files, size=(10,), replace=False)
#
# # loop over the image paths
# for image_file in captcha_image_files:
# Load the image and convert it to grayscale
image = cv2.imread("C:\\Users\\Administrator\\Desktop\\pic.jpg")
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Add some extra padding around the image
image = cv2.copyMakeBorder(image, 20, 20, 20, 20, cv2.BORDER_REPLICATE)

# threshold the image (convert it to pure black and white)
thresh = cv2.threshold(image, 0, 255,
                       cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

# find the contours (continuous blobs of pixels) the image
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)

# Hack for compatibility with different OpenCV versions
contours = contours[0] if imutils.is_cv2() else contours[1]

letter_image_regions = []

# Now we can loop through each of the four contours and extract the letter
# inside of each one
for contour in contours:
    # Get the rectangle that contains the contour
    (x, y, w, h) = cv2.boundingRect(contour)
    cv2.rectangle(frame, (int(cap_region_x_begin * frame.shape[1]), 0),
                 (frame.shape[1], int(cap_region_y_end * frame.shape[0])), (255, 0, 0), 2)
    cv2.imshow('original', frame)

    #  Main operation
    if isBgCaptured == 1:  # this part wont run until background captured
        img = removeBG(frame)
        img = img[0:int(cap_region_y_end * frame.shape[0]),
                    int(cap_region_x_begin * frame.shape[1]):frame.shape[1]]  # clip the ROI
        cv2.imshow('mask', img)

        # convert the image into binary image
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(gray, (blurValue, blurValue), 0)
        cv2.imshow('blur', blur)
        ret, thresh = cv2.threshold(blur, threshold, 255, cv2.THRESH_BINARY)
        cv2.imshow('ori', thresh)


        # get the coutours
        thresh1 = copy.deepcopy(thresh)
        _,contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        length = len(contours)
        maxArea = -1
        if length > 0:
            for i in range(length):  # find the biggest contour (according to area)
                temp = contours[i]
                area = cv2.contourArea(temp)
                if area > maxArea:
                    maxArea = area
                    ci = i
Exemple #36
0
            cnt += 1
            continue

        next = frame.copy()
        next_gray = cv2.cvtColor(next, cv2.COLOR_BGR2GRAY)
        next_gray = cv2.GaussianBlur(next_gray, (5, 5), 0)
        next_dif = cv2.absdiff(gray, next_gray)

        prev_next_dif = cv2.absdiff(prev_gray, next_gray)

        cv2.imshow('prev', prev)
        cv2.imshow('curr', curr)
        cv2.imshow('next', next)

        # dif=cv2.addWeighted(next_dif,0.5,prev_dif,0.5,0)
        _, next_thr = cv2.threshold(next_dif, 5, 255, cv2.THRESH_BINARY)
        _, prev_thr = cv2.threshold(prev_dif, 5, 255, cv2.THRESH_BINARY)
        _, prev_next_thr = cv2.threshold(prev_next_dif, 5, 255,
                                         cv2.THRESH_BINARY)
        # thr=cv2.bitwise_or(prev_thr,next_thr)
        # _, thr = cv2.threshold(dif, 30, 255, cv2.THRESH_BINARY)

        # print(np.max(np.mean([next_dif,prev_dif])))

        prev_contours, hierachy = cv2.findContours(prev_thr, cv2.RETR_EXTERNAL,
                                                   cv2.CHAIN_APPROX_SIMPLE)
        prev_action_mask = np.zeros_like(gray)
        for cntr in prev_contours:
            # if cv2.arcLength(cntr, True) >= 1000:
            # if cv2.contourArea(cntr)<3000:
            cv2.drawContours(image=visual,
import cv2
import numpy as np
def nothing(x):
    pass

img = cv2.imread('../debug/circle_ring.jpg',1)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

# # 1 官方解决方案
_,thresh = cv2.threshold(gray,100,255,cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# 默认
image,contours,hierachy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# 清晰
# image,contours,hierachy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

image_ = np.copy(img)

# 填充,第一步完成
# cv2.drawContours(image_,contours,2,(215, 215, 180),-1)
# cv2.drawContours(image_,contours,4,(215, 215, 180),-1)
# cv2.drawContours(image_,contours,6,(215, 215, 180),-1)

# 勾勒轮廓,为了接下来处理
cv2.drawContours(image_,contours,2,(0, 0, 255),2)
cv2.drawContours(image_,contours,4,(0, 0, 255),2)
cv2.drawContours(image_,contours,6,(0, 0, 255),2)

cv2.imshow("demo",image_)
cv2.waitKey(0)

Exemple #38
0
import numpy as np
import cv2

img = cv2.imread('faces.jpeg',1)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h = hsv[:,:,0]
s = hsv[:,:,1]
v = hsv[:,:,2]

hsv_split = np.concatenate((h,s,v), axis=1)
cv2.imshow("Split HSV",hsv_split)

ret, min_sat = cv2.threshold(s,40,255, cv2.THRESH_BINARY)
cv2.imshow("Sat Filter",min_sat)

ret, max_hue = cv2.threshold(h,15, 255, cv2.THRESH_BINARY_INV)
cv2.imshow("Hue Filter",max_hue)

final = cv2.bitwise_and(min_sat,max_hue)
cv2.imshow("Final",final)
cv2.imshow("Original",img)

cv2.waitKey(0)
cv2.destroyAllWindows()
Exemple #39
0
def warp_process_image(img):
    global nwindows
    global margin
    global minpix
    global lane_bin_th

    blur = cv2.GaussianBlur(img, (5, 5), 0)
    _, L, _ = cv2.split(cv2.cvtColor(blur, cv2.COLOR_BGR2HLS))
    _, lane = cv2.threshold(L, lane_bin_th, 255, cv2.THRESH_BINARY)

    histogram = np.sum(lane[lane.shape[0] // 2:, :], axis=0)
    midpoint = np.int(histogram.shape[0] / 2)
    leftx_current = np.argmax(histogram[:midpoint])
    rightx_current = np.argmax(histogram[midpoint:]) + midpoint

    window_height = np.int(lane.shape[0] / nwindows)
    nz = lane.nonzero()

    left_lane_inds = []
    right_lane_inds = []

    lx, ly, rx, ry = [], [], [], []

    out_img = np.dstack((lane, lane, lane)) * 255

    for window in range(nwindows):

        win_yl = lane.shape[0] - (window + 1) * window_height
        win_yh = lane.shape[0] - window * window_height

        win_xll = leftx_current - margin
        win_xlh = leftx_current + margin
        win_xrl = rightx_current - margin
        win_xrh = rightx_current + margin

        cv2.rectangle(out_img, (win_xll, win_yl), (win_xlh, win_yh),
                      (0, 255, 0), 2)
        cv2.rectangle(out_img, (win_xrl, win_yl), (win_xrh, win_yh),
                      (0, 255, 0), 2)

        good_left_inds = ((nz[0] >= win_yl) & (nz[0] < win_yh) &
                          (nz[1] >= win_xll) & (nz[1] < win_xlh)).nonzero()[0]
        good_right_inds = ((nz[0] >= win_yl) & (nz[0] < win_yh) &
                           (nz[1] >= win_xrl) & (nz[1] < win_xrh)).nonzero()[0]

        left_lane_inds.append(good_left_inds)
        right_lane_inds.append(good_right_inds)

        if len(good_left_inds) > minpix:
            leftx_current = np.int(np.mean(nz[1][good_left_inds]))
        if len(good_right_inds) > minpix:
            rightx_current = np.int(np.mean(nz[1][good_right_inds]))

        lx.append(leftx_current)
        ly.append((win_yl + win_yh) / 2)

        rx.append(rightx_current)
        ry.append((win_yl + win_yh) / 2)

    left_lane_inds = np.concatenate(left_lane_inds)
    right_lane_inds = np.concatenate(right_lane_inds)

    #left_fit = np.polyfit(nz[0][left_lane_inds], nz[1][left_lane_inds], 2)
    #right_fit = np.polyfit(nz[0][right_lane_inds] , nz[1][right_lane_inds], 2)

    lfit = np.polyfit(np.array(ly), np.array(lx), 2)
    rfit = np.polyfit(np.array(ry), np.array(rx), 2)

    out_img[nz[0][left_lane_inds], nz[1][left_lane_inds]] = [255, 0, 0]
    out_img[nz[0][right_lane_inds], nz[1][right_lane_inds]] = [0, 0, 255]
    cv2.imshow("viewer", out_img)

    #return left_fit, right_fit
    return lfit, rfit
Exemple #40
0
	for ind in indices:
		for subset in itertools.combinations(ind, 2):
			a_list.append(tuple(subset))
	a = list(set(a_list))
	Graph.add_edges_from(a)
	return Graph


# Read image
image = cv2.imread("/home/mark/Documents/CorGraphProjectGit/CorticalVision/TCS10.PNG",0)


# Create binary image
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]

# Find contours
(_, contours, _) = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

print("Found %d components." % len(contours))
centroids = []
G = nx.Graph()
tempInt = 0

for c in contours:
	M = cv2.moments(c)
	temp = []
	temp.append(int(M["m10"] / M["m00"]))
	temp.append(int(M["m01"] / M["m00"]))
	print(M["m00"])
Exemple #41
0
  def execute(self):
    img=self.img

    #gray is used for classification and search, gray_c only for search
    gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4,8))
    gray_c = clahe.apply(gray)

    #guess 'white' color
    epsilon=0.0001
    white=np.median(gray)
    white=np.mean(gray[gray>white-epsilon])
    white_c=np.median(gray_c)
    white_c=np.mean(gray_c[gray_c>white_c-epsilon])

    window=21
    #adaptive mean
    th2 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, window, 2)
    #adaptive gaussian
    th3 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, window, 2)
    #Otsu's
    _,th4 = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    #adaptive mean
    th_c2 = cv2.adaptiveThreshold(gray_c, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, window, 2)
    #adaptive gaussian
    th_c3 = cv2.adaptiveThreshold(gray_c, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, window, 2)
    #Otsu's
    _,th_c4 = cv2.threshold(gray_c,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)

    #extend borders(probably only needed for detectMultiscale)
    img=cv2.copyMakeBorder(img, 4, 4, 2, 2, cv2.BORDER_CONSTANT, value=(int(white), int(white), int(white)))
    gray=cv2.copyMakeBorder(gray, 4, 4, 2, 2, cv2.BORDER_CONSTANT, value=int(white))
    gray_c=cv2.copyMakeBorder(gray_c, 4, 4, 2, 2, cv2.BORDER_CONSTANT, value=int(white_c))
    ths=[th2, th3, th4, th_c2, th_c3, th_c4]
    for i in xrange(len(ths)):
      ths[i]=cv2.copyMakeBorder(ths[i], 4, 4, 2, 2, cv2.BORDER_CONSTANT, value=255)
    th2, th3, th4, th_c2, th_c3, th_c4 = ths

    self.debug(img, "svm_img")
    self.debug(gray, "svm_gr")
    self.debug(gray_c, "svm_gr_c")
    self.debug(th2, "svm_th2")
    self.debug(th3, "svm_th3")
    self.debug(th4, "svm_th4")
    self.debug(th_c2, "svm_th_c2")
    self.debug(th_c3, "svm_th_c3")
    self.debug(th_c4, "svm_th_c4")

    plate=[]

    min_height=10
    min_width=5
    min_area=70
    epsilon=0.00001

    @memoize
    def max_score_hsplit(box, n=3):
      x,y,w,h=box
      l,s=max_score(box)

      if s<0.0:
        l_s=[(s, [(l,s,box)])]
      else:
        l_s=[(epsilon, [(l,epsilon,box)])]

      if n>1:
        for w0 in xrange(1, w):
          if w0*h<min_area or w0<min_width or h<min_height:
            l0, s0=(None, epsilon)
          else:
            l0, s0=max_score((x,y,w0,h))

          if (w-w0)*h<min_area or (w-w0)<min_width or h<min_height:
            s1, ls1=(epsilon, [(None, epsilon, (x+w0,y,w-w0,h))])
          else:
            s1, ls1=max_score_hsplit((x+w0,y,w-w0,h),n-1)

          if s0>epsilon:
            s0=epsilon

          score=(s0*w0+s1*(w-w0)+0.0)/w

          l_s+=[(score, [(l0, s0, (x,y,w0,h))]+ls1)]
      return min(l_s)

    #functions defined as closures to avoid passing multiple and/or complex arguments
    #which allows memoize_simple use and autoresets after execute comletion

    def compute_hog(box):
      X,Y,W,H=box
      gray_=gray[Y-1:Y+H+1, X-1:X+W+1] #FIXME should check area bounds

      winSize = (20, 30)
      blockSize = (4,6)
      blockStride = (2,3)
      cellSize = (2,3)
      nbins=9

      winStride = (20,30)
      padding = (0,0)

      gray_=cv2.resize(gray_, winSize, interpolation = cv2.INTER_CUBIC)

      hog=cv2.HOGDescriptor(winSize, blockSize,blockStride,cellSize, nbins)
      desc = hog.compute(gray_, winStride, padding, ((0, 0),))

      return desc

    letters=['1','2','3','4','5','6','7','8','9','0O','A','B','C','E','H','K','M','P','T','X','Y']
    @memoize_simple
    def max_score(box):
      x,y,w,h=box

      if w*h<min_area or w<min_width or h<min_height:
        return (None, 1.0)

      desc=compute_hog(box)

      l_s=[(l, -self.svm_letters[l].predict(desc, returnDFVal=True)) for l in letters]

      return min(l_s, key=lambda x: x[1])

    letter_ligatures=['8dot', 'dotO', 'dotM', 'dotB', 'dotC', 'dotH', 'dotE', 'dotP']
    @memoize_simple
    def max_score_ligatures(box):
      x,y,w,h=box
      if w*h<min_area or w<min_width or h<min_height:
        return (None, 1.0)

      desc=compute_hog(box)

      l_s=[(l, -self.svm_letters[l].predict(desc, returnDFVal=True)) for l in letter_ligatures]
      return min(l_s, key=lambda x: x[1])

    h1_candidates=[10,5]
    h2_candidates=[16,22]

    @memoize_simple
    def max_score_vsplit(box):
      x,y,w,h=box
      l_s=[]
      min_score=1.0
      min_letter=None
      min_box=(x,y,w,h)

      for h1 in h1_candidates:
        for h2 in h2_candidates:
          l,s=max_score((x,h1,w,h2))
          s=s*h2/(h+0.0)
          if s<min_score:
            min_score=s
            min_letter=l
            min_box=(x,h1,w,h2)

      return min_letter, min_score, min_box

    def max_score_hsplit3(box):
      x,y,w,h=box

      min_score=1.0
      min_letter=None
      min_box=(x,y,w,h)

      for w1 in xrange(0,min(w-min_width,10)):

        for w2 in xrange(min_width, min(w-w1,16)):
          b_=(x+w1,y,w2,h)

          l,s,b=max_score_vsplit(b_)
          s=s/(w+0)*w2
          if s<min_score:
            min_score=s
            min_letter=l
            min_box=b

      return min_score, min_letter, min_box

    #replacing original compute_hog with memoized version
    #will be restored after ligatures detection
    compute_hog_raw=compute_hog
    compute_hog=memoize_simple(compute_hog)

    boxes=[]
    for th in ths:
      boxes+=self.get_boxes_from_contour(th, gray)
    boxes=list(set(boxes)) #get uniq boxes

    #annotate each box with name for debug, letter, score, cropped image
    boxes=[box+(str(box), None, 1.0, None) for box in boxes]

    #search all boxes for letters
    boxes_left=[]
    while boxes:
      X,Y,W,H,m,min_letter,min_score,b_img = boxes.pop()

      b_img=gray[Y-1:Y+H+1, X-1:X+W+1]
      self.debug(b_img, "svm1_t_"+str(m))

      min_letter, min_score=max_score((X,Y,W,H))

      if min_score<0:
        self.debug(b_img, "svm1_f_"+min_letter+"_"+str(m))
        plate+=[(min_letter, (X,Y,W,H), -min_score)]
      else:
        boxes_left+=[(X,Y,W,H,m,min_letter,min_score, b_img)]

    #prune plate, distructive to origianl
    plate=prune_plate(plate, threshold=0.799)

    #are we done?
    #RUSSIAN PLATE TYPE1 SPECIFIC
    alphas, nums, alphanums=get_stats_symbols(plate)
    if alphanums>=9:
      return TaskResultSVMLetterDetector(plate)

    #prune boxes by content
    hranges=get_free_hranges(gray, plate, 2)
    hranges=range_diff_many([(0,gray.shape[1])], [(r[0], r[1]-r[0]+1) for r in hranges])
    hranges=[r for r in hranges if r[1]>0]

    boxes=boxes_left
    boxes_left=[]
    while boxes:
      X,Y,W,H,m,min_letter,min_score,b_img = boxes.pop()

      fr=range_diff_many([(X,W)], hranges)
      for r in fr:
        X, W=r
        if W<min_width:
          continue
        b_img=gray[Y-1:Y+H+1, X-1:X+W+1]
        min_letter, min_score=max_score((X,Y,W,H))
        m_r=str(m)+"_"+str(r)
        boxes_left+=[(X,Y,W,H,m_r,min_letter,min_score, b_img)]
        self.debug(b_img, "svm1_t2_"+str(m_r))

    #search known 'ligatures'
    boxes=boxes_left
    boxes_left=[]
    while boxes:
      X,Y,W,H,m,min_letter,min_score,b_img = boxes.pop()

      min_letter_new, min_score_new=max_score_ligatures((X,Y,W,H))

      if min_score_new<0:
        min_letter=min_letter_new.replace('dot','').replace('O','0O')
        min_score=min_score_new
        self.debug(b_img, "svm1_fl_"+min_letter+"_"+str(m))
        plate+=[(min_letter, (X,Y,W,H), -min_score)]
      else:
        boxes_left+=[(X,Y,W,H,m,min_letter,min_score,b_img)]

    #prune plate, distructive to origianl
    plate=prune_plate(plate, threshold=0.799)

    #replace score if ligaturazed version if better
    #FIXME maybe just recrop?
    for i in xrange(len(plate)):
      letter, box, score = plate[i]
      X,Y,W,H = box
      if letter in ['8', 'O0', 'M', 'B', 'C', 'H', 'E', 'P']:
        if letter=='8':
          ligature='8dot'
        elif letter=='0O':
          ligature='dotO'
        else:
          ligature='dot'+letter
        desc=compute_hog(box)
        score=max(score, self.svm_letters[ligature].predict(desc, returnDFVal=True))
        plate[i]=(letter, (X,Y,W,H), score)

    #are we done?
    #RUSSIAN PLATE TYPE1 SPECIFIC
    alphas, nums, alphanums=get_stats_symbols(plate)
    if alphanums>=9:
      return TaskResultSVMLetterDetector(plate)

    #search by splitting
    boxes=boxes_left
    boxes_left=[]
    while boxes:
      X,Y,W,H,m,min_letter,min_score,b_img = boxes.pop()

      s, splt=max_score_hsplit((X,Y,W,H), n=3)

      for k in xrange(len(splt)):
        letter_s, score_s, box_s=splt[k]
        if score_s<0:
          b_img_s=gray[box_s[1]-1:box_s[1]+box_s[3]+1, box_s[0]-1:box_s[0]+box_s[2]+1]
          self.debug(b_img_s, "svm1_fspl_"+letter_s+"_"+str(m)+"_"+str(k))
          plate+=[(letter_s, box_s, -score_s)]

      if s>0:
        self.debug(b_img, "svm1_nf_"+str(m))

    #restore original compute_hog
    compute_hog_raw=compute_hog_raw

    #prune plate, distructive to original
    plate=prune_plate(plate, threshold=0.799) #distructive
    #plate=sorted(plate, key=lambda x: x[1][0]+x[1][2]/2.0)

    #'bruteforce' search
    hranges=[(r[0], r[1]-r[0]+1) for r in get_free_hranges(gray, plate, 2)]
    h1_cnds=list(set([l[1][1] for l in plate]))
    h2_cnds=list(set([l[1][3] for l in plate]))

    if len(h1_cnds)>2:
      h1_candidates=h1_cnds
    if len(h2_cnds)>2:
      h2_candidates=h2_cnds

    ws=[l[1][2] for l in plate]
    if ws:
       min_width=max(min_width, int(np.floor(min(ws)*0.75)))
    max_width=20

    for r in hranges:
      x,w=r
      if w<min_width:
        continue
      if x==0:
        x+=1
      if x+w==gray.shape[1]:
        w-=1

      scores=[max_score_hsplit3((x+i, 0, min(w-i,max_width), gray.shape[0])) for i in xrange(0,w-min_width,3)]
      for s in [s for s in scores if s[0]<0.0]:
        min_letter, min_score=max_score(s[2])
        b_img=gray[s[2][1]-1:s[2][1]+s[2][3]+1, s[2][0]-1:s[2][0]+s[2][2]+1]

        self.debug(b_img, "svm1_fbf_"+min_letter+"_"+str(s[2]))
        plate+=[(min_letter, s[2], -min_score)]

    plate=prune_plate(plate, threshold=0.799) #distructive

    return TaskResultSVMLetterDetector(plate)
Exemple #42
0
import cv2
import pytesseract
l = 0
a = []
pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract'
img = cv2.imread("C:\\Users\\admin\\Desktop\\1.jpg")
g = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
g = cv2.bilateralFilter(g, 250, 90, 190)
ret, thresh = cv2.threshold(g, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
t = pytesseract.image_to_string(thresh, lang="eng")
cv2.imshow("1", thresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
print("THE NUMBER IS:", t)
for i in range(0, len(t)):
    if t[i] == 'T':
        l = i
        break
for j in range(l, len(t)):
    if t[j].isupper():
        a.append(t[j])
    elif t[j].isnumeric():
        a.append(t[j])
print(*a)
Exemple #43
0
import cv2
import numpy as np
import time

img = cv2.imread('lena.jpg')
# -----------------------------------------------------
cv2.setUseOptimized(True) # open AVX
cv2.useOptimized()
# -----------------------------------------------------
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,128,255,cv2.THRESH_BINARY)
tStart = time.time()
# -----------------------------------------------------
for i in range (1,100,1) :
    kernel    = np.ones((5,5),np.uint8)
    frame_mor = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)
    frame_mor = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=1)
# -----------------------------------------------------
tEnd = time.time()
t_total = tEnd - tStart
print("With AVX : %f s" % t_total)
# -----------------------------------------------------
cv2.namedWindow("With_AVX",0)
cv2.imshow('With_AVX', frame_mor)
# ---------------------------------------------------------------------------------------------------------------------
img = cv2.imread('lena.jpg')
# -----------------------------------------------------
cv2.setUseOptimized(False) # close AVX
cv2.useOptimized()
# -----------------------------------------------------
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
camera = cv2.VideoCapture(0)
background = None
es = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 4))

while True:
    grabbed, frame = camera.read()

    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray_frame = cv2.GaussianBlur(gray_frame, (25, 25), 3)

    if background is None:
        background = gray_frame
        continue

    diff = cv2.absdiff(background, gray_frame)
    diff = cv2.threshold(diff, 50, 255, cv2.THRESH_BINARY)[1]
    diff = cv2.dilate(diff, es, iterations=3)

    cv2.putText(frame, "Motion: Undetected", (10, 20),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
    cv2.putText(frame,
                datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
                (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
                0.35, (0, 255, 0), 1)

    cv2.imshow('video', frame)
    cv2.imshow('diff', diff)

    key = cv2.waitKey(1) & 0xFFf
    if key == ord('q'):
        break
Exemple #45
0
def padding_position(x, y, w, h, p):
    return x - p, y - p, w + p * 2, h + p * 2


###以降メイン処理###

img = cv2.imread("concat.jpg", 1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imwrite("img_gray.jpg", gray)

gray2 = cv2.bitwise_not(gray)
cv2.imwrite("img_gray2.jpg", gray2)

# 閾値の設定
threshold = 80

# 二値化(閾値100を超えた画素を255にする。)
ret, gray3 = cv2.threshold(gray2, threshold, 255, cv2.THRESH_BINARY)
gray3 = cv2.bitwise_not(gray3)
cv2.imwrite("img_gray3.jpg", gray3)

img_out = cv2.imread("white.jpg", 3)
min_size = 30
# 輪郭検出
image, contours = cv2.findContours(gray3, cv2.RETR_LIST,
                                   cv2.CHAIN_APPROX_SIMPLE)

img2 = detect_contour(gray3, img_out, 500)

cv2.imwrite("img_contour.jpg", img2)
# coding=utf-8
# from __future__ import division
import cv2
import numpy as np
#这是遮罩效果  可以做mask 好屌啊 但是可惜有bug!
img1 = cv2.imread('abc.jpg')
img2 = cv2.imread('bbc.png')

rows, cols, channels = img2.shape
roi = img1[0:rows, 0:cols]
img2gray = cv2.cvtColor(img2, cv2.COLOR_BAYER_BG2GRAY)
ret, mask = cv2.threshold(img2gray, 175, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)

img1_bg = cv2.bitwise_and(roi, roi, mask=mask)
img2_fg = cv2.bitwise_and(img2, img2, mask=mask_inv)
dst = cv2.add(img1_bg, img2_fg)
img1[0:rows, 0:cols] = dst

cv2.imshow('img', img1)
Exemple #47
0
    def compare_screen_without_areas(
            self, path1, *args, save_folder=save_folder_path, ssim=starts_ssim, image_format=starts_format_image
    ):
        """
        Compares two pictures, which have parts to be ignored
        x1 and y1 = x and y coordinates for the upper left corner of the ignored area square
        x2 and y2 = x and y coordinates for the lower right corner of the square of the ignored part

        Attention! It is always necessary to enter in order x1 y1 x2 y2 x1 y1 x2 y2 etc ...

        Compare screen without areas ../Image1.png 0 0 30 40 50 50 100 100
        Creates 2 ignored parts at 0,0, 30,40 and 50, 50, 100, 100
        """
        self._check_dir(save_folder)
        self._check_ssim(ssim)
        self._check_image_format(image_format)
        save_folder = self.save_folder

        self.seleniumlib.capture_page_screenshot(save_folder + "/test1.png")
        path2 = save_folder + "/test1.png"
        if os.path.exists(path1) and os.path.exists(path2):
            lt = len(args)
            img1 = cv.imread(path1, 1)
            img2 = cv.imread(path2, 1)
            if lt % 4 == 0:
                x = lt / 4
                self.robotlib.log_to_console(x)
                i = 0
                a = 0
                while i < x:
                    color = (0, 0, 0)
                    x1 = int(args[0 + a])
                    y1 = int(args[1 + a])
                    x2 = int(args[2 + a])
                    y2 = int(args[3 + a])

                    cv.rectangle(img1, (x1, y1), (x2, y2), color, -1)
                    cv.rectangle(img2, (x1, y1), (x2, y2), color, -1)
                    a += 4
                    i += 1
                cv.namedWindow("image", cv.WINDOW_NORMAL)

                # convert to grey
                gray_img1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)
                gray_img2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)

                # SSIM diff Img
                (self.score, diff) = structural_similarity(
                    gray_img1, gray_img2, full=True
                )
                diff = (diff * 255).astype("uint8")

                # Threshold diff Img
                thresh = cv.threshold(
                    diff, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU
                )[1]
                cnts = cv.findContours(
                    thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE
                )
                cnts = imutils.grab_contours(cnts)

                # Create frame in diff area
                for c in cnts:
                    (x, y, w, h) = cv.boundingRect(c)
                    cv.rectangle(img1, (x, y), (x + w, y + h), (0, 0, 255), 2)
                    cv.rectangle(img2, (x, y), (x + w, y + h), (0, 0, 255), 2)

                # Show image
                if float(self.score) < self.ssim:
                    img_diff = cv.hconcat([img1, img2])
                    time_ = str(time.time())
                    self.seleniumlib.capture_page_screenshot(
                        save_folder + "/Img" + time_ + self.format
                    )
                    cv.imwrite(save_folder + "/Img" + time_ + self.format, img_diff)
                    self.robotlib.fail("Image has diff: {} ".format(self.score))
                else:
                    img_diff = cv.hconcat([img1, img2])
                    time_ = str(time.time())
                    self.seleniumlib.capture_page_screenshot(
                        save_folder + "/Img" + time_ + self.format
                    )
                    cv.imwrite(save_folder + "/Img" + time_ + self.format, img_diff)
                    self.robotlib.log_to_console(
                        "Image has diff: {} ".format(self.score)
                    )
        else:
            raise AssertionError("The path to the image does not exist")
Exemple #48
0
Third argument is the
ADAPTIVE_THRESH_MEAN_C − threshold value is the mean of neighborhood area. (OR)
ADAPTIVE_THRESH_GAUSSIAN_C − threshold value is the weighted sum of neighborhood values where weights are a Gaussian window.
Fourth Argument is the threshold type -> variable of integer type representing the type of threshold to be used.
Fifth Argument is the blockSize − A variable of the integer type representing size of the pixelneighborhood used to calculate the threshold value.
Sixth Argument is C (Constant) − A variable of double type representing the constant used in the both methods (subtracted from the mean or weighted mean).

"""

import numpy as np
import cv2
from matplotlib import pyplot as plt


img = cv2.imread('b.jpg', 0)
ret,thresh1 = cv2.threshold(img, 130, 255, cv2.THRESH_BINARY)
ret,thresh2 = cv2.threshold(img, 150, 255, cv2.THRESH_BINARY_INV)
ret,thresh3 = cv2.threshold(img, 140, 255, cv2.THRESH_TRUNC)
ret,thresh4 = cv2.threshold(img, 120, 255, cv2.THRESH_TOZERO)
ret,thresh5 = cv2.threshold(img, 127, 255, cv2.THRESH_TOZERO_INV)

th2 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2) # ADAPTIVE THRESHOLDING
th3 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
ret2, th4 = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) # OTSU'S BINARIZATION
blur = cv2.GaussianBlur(img, (5, 5), 0) # Removal of noise with gaussian for OTSU'S Binarization.
ret3, th5 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)

titles = ['Original Image', 'BINARY', 'BINARY_INV', 'TRUNC', 'TOZERO', 'TOZERO_INV', 'Adaptive Mean', 'Adaptive Gaussian', 'Otsu', 'Gaussian Otsu']
images = [img, thresh1, thresh2, thresh3, thresh4, thresh5, th2, th3, th4, th5]

for i in range(10):
def warp_example(dm):
    dm.example_image_warped = np.zeros_like(dm.subject_image)

    def find_index_of_point(point, point_array):
        for i, current_point in enumerate(point_array):
            if np.all(current_point == point):
                return i
        return -1

    # Search index
    for subject_triangle in dm.subject_triangles:
        subject_point1 = subject_triangle[:2]
        subject_point2 = subject_triangle[2:4]
        subject_point3 = subject_triangle[4:6]
        index1 = find_index_of_point(subject_point1, dm.subject_face_landmarks)
        index2 = find_index_of_point(subject_point2, dm.subject_face_landmarks)
        index3 = find_index_of_point(subject_point3, dm.subject_face_landmarks)

        example_point1 = dm.example_face_landmarks[index1]
        example_point2 = dm.example_face_landmarks[index2]
        example_point3 = dm.example_face_landmarks[index3]

        subject_cropped_triangle_coord = np.array(
            [subject_point1, subject_point2, subject_point3], np.int32)
        example_cropped_triangle_coord = np.array(
            [example_point1, example_point2, example_point3], np.int32)

        # Triangulate example
        example_crop_bounding_rect = cv.boundingRect(
            example_cropped_triangle_coord)
        x, y, w, h = example_crop_bounding_rect
        cropped_example_triangle = dm.example_image[y:y + h, x:x + w]
        # cropped_example_triangle_mask = np.zeros((h, w), np.uint8)

        cropped_example_triangle_coord_relative = example_cropped_triangle_coord - np.tile(
            np.array([x, y]), (3, 1))

        # cv.fillConvexPoly(cropped_example_triangle_mask, cropped_example_triangle_coord_relative, 255)

        # Triangulate subject
        subject_crop_bounding_rect = cv.boundingRect(
            subject_cropped_triangle_coord)
        x, y, w, h = subject_crop_bounding_rect
        # cropped_subject_triangle = dm.subject_image[y: y+h, x:x+w]
        cropped_subject_triangle_mask = np.zeros((h, w), np.uint8)

        cropped_subject_triangle_coord_relative = subject_cropped_triangle_coord - np.tile(
            np.array([x, y]), (3, 1))

        cv.fillConvexPoly(cropped_subject_triangle_mask,
                          cropped_subject_triangle_coord_relative, 255)

        # Transform
        M = cv.getAffineTransform(
            cropped_example_triangle_coord_relative.astype(np.float32),
            cropped_subject_triangle_coord_relative.astype(np.float32))
        warped_example_triangle = cv.warpAffine(cropped_example_triangle, M,
                                                (w, h))
        warped_example_triangle = cv.bitwise_and(
            warped_example_triangle,
            warped_example_triangle,
            mask=cropped_subject_triangle_mask)

        # Reconstruct
        # Fix to remove white lines present when adding triangles to places
        result_face_area = dm.example_image_warped[y:y + h, x:x + w]
        result_face_area_gray = cv.cvtColor(result_face_area,
                                            cv.COLOR_BGR2GRAY)
        _, triangle_fix_mask = cv.threshold(result_face_area_gray, 1, 255,
                                            cv.THRESH_BINARY_INV)
        warped_example_triangle = cv.bitwise_and(warped_example_triangle,
                                                 warped_example_triangle,
                                                 mask=triangle_fix_mask)

        result_face_area = cv.add(result_face_area, warped_example_triangle)
        dm.example_image_warped[y:y + h, x:x + w] = result_face_area
        # cropped_subject_triangle_mask_3_channel = np.zeros((cropped_subject_triangle_mask.shape[0], cropped_subject_triangle_mask.shape[1], 3), np.uint8)
        # cropped_subject_triangle_mask_3_channel[:,:,0] = np.array([cropped_subject_triangle_mask])
        # cropped_subject_triangle_mask_3_channel[:,:,1] = np.array([cropped_subject_triangle_mask])
        # cropped_subject_triangle_mask_3_channel[:,:,2] = np.array([cropped_subject_triangle_mask])
        # a = cv.bitwise_and(cropped_subject_triangle_mask_3_channel, warped_example_triangle)
        # dm.example_image_warped[y: y+h, x: x+w] = cv.bitwise_or(dm.example_image_warped[y: y+h, x: x+w], a)
        # e = dm.example_image_warped
        # cv.imshow("e", e)
        # cv.waitKey(100)

    if dm.show_intermediary:
        plt.subplot(1, 2, 1)
        plt.imshow(opencv2matplotlib(dm.subject_image))
        plt.subplot(1, 2, 2)
        plt.imshow(opencv2matplotlib(dm.example_image_warped))
        plt.show()
        cv2.imshow("Card", image)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("p"):
            break
        raw_capture.truncate(0)
    gray = \
        cv2.cvtColor(
            image,
            cv2.COLOR_BGR2GRAY
        )

    blur = cv2.GaussianBlur(gray,(5,5),0)
    _, threshold = \
        cv2.threshold(
            blur,
            100,
            255,
            cv2.THRESH_BINARY
        )

    contours, hierarchy = \
        cv2.findContours(
            threshold,
            cv2.RETR_TREE,
            cv2.CHAIN_APPROX_SIMPLE
        )

    contours = \
        sorted(
            contours,
            key=cv2.contourArea,
            reverse=True
cap = cv2.VideoCapture(0) 
scaling_factor = 0.5 
 
while True: 
    ret, frame = cap.read() 
    frame = cv2.resize(frame, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA) 
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 
 
    mouth_rects = mouth_cascade.detectMultiScale(gray, 1.3, 5) 
    if len(mouth_rects) > 0: 
        (x,y,w,h) = mouth_rects[0] 
        h, w = int(0.6*h), int(1.2*w) 
        x -= int(0.05*w)
        y -= int(0.55*h)
        frame_roi = frame[y:y+h, x:x+w] 
        moustache_mask_small = cv2.resize(moustache_mask, (w, h), interpolation=cv2.INTER_AREA) 
 
        gray_mask = cv2.cvtColor(moustache_mask_small, cv2.COLOR_BGR2GRAY) 
        ret, mask = cv2.threshold(gray_mask, 50, 255, cv2.THRESH_BINARY_INV) 
        mask_inv = cv2.bitwise_not(mask) 
        masked_mouth = cv2.bitwise_and(moustache_mask_small, moustache_mask_small, mask=mask) 
        masked_frame = cv2.bitwise_and(frame_roi, frame_roi, mask=mask_inv) 
        frame[y:y+h, x:x+w] = cv2.add(masked_mouth, masked_frame) 

    cv2.imshow('Moustache', frame)
    c = cv2.waitKey(1) 
    if c == 27: 
        break 
 
cap.release() 
cv2.destroyAllWindows()
Exemple #52
0
    def compare_screen_areas(
            self, x1, y1, x2, y2, path1, save_folder=save_folder_path, ssim=starts_ssim,
            image_format=starts_format_image
    ):
        """Creates a cut-out from the screen

        Creates a cut-out from the screen that is on the screen and compares it to a previously created

        x1 and y1 = x and y coordinates for the upper left corner of the square
        x2 and y2 = x and y coordinates for the bottom right corner of the square
        path1 = Path to an already created viewport with which we want to compare the viewport created by us

        Example: Compare screen area 0 0 25 25 ../Crop_Image1.png Creates Crop_Image1.png from 0, 0, 25, 25
        """
        self._check_dir(save_folder)
        self._check_ssim(ssim)
        self._check_image_format(image_format)
        save_folder = self.save_folder
        self.seleniumlib.capture_page_screenshot(save_folder + '/test1.png')
        path2 = save_folder + '/test1.png'

        if os.path.exists(path1):
            if os.path.exists(path2):
                # load img
                img1 = cv.imread(path1, 1)  # img from docu
                img2 = cv.imread(path2, 1)  # img from screenshot

                # convert to grey
                gray_img1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)
                gray_img2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)

                # spliting area
                crop_img = gray_img2[
                           int(x1): int(y2), int(y1): int(x2)
                           ]  # Crop from {x, y, w, h } => {0, 0, 300, 400}

                # SSIM diff img
                (self.score, diff) = structural_similarity(
                    gray_img1, crop_img, full=True
                )
                diff = (diff * 255).astype('uint8')

                # Threshold diff img
                thresh = cv.threshold(
                    diff, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU
                )[1]
                cnts = cv.findContours(
                    thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE
                )
                cnts = imutils.grab_contours(cnts)

                crop_img_color = img2[int(x1): int(y2), int(y1): int(x2)]
                # Create frame in diff area
                for c in cnts:
                    (x, y, w, h) = cv.boundingRect(c)
                    cv.rectangle(img1, (x, y), (x + w, y + h), (0, 0, 255), 2)
                    cv.rectangle(crop_img_color, (x, y), (x + w, y + h), (0, 0, 255), 2)

                # Show image
                if float(self.score) < self.ssim:
                    self.robotlib = BuiltIn().get_library_instance('BuiltIn')
                    img_diff = cv.hconcat([img1, crop_img_color])
                    time_ = str(time.time())
                    self.seleniumlib.capture_page_screenshot(
                        save_folder + '/img' + time_ + '.png'
                    )
                    cv.imwrite(save_folder + '/img' + time_ + self.format, img_diff)
                    self.robotlib.fail('Image has diff: {} '.format(self.score))
                    score_percen = float(self.score) * +100
                    self.robotlib.fail('Image has diff: {} %'.format(score_percen))
                else:
                    img_diff = cv.hconcat([self.img1, self.img2])
                    time_ = str(time.time())
                    self.seleniumlib.capture_page_screenshot(
                        save_folder + "/Img" + time_ + self.format
                    )
                    cv.imwrite(save_folder + "/Img" + time_ + self.format, img_diff)
                    self.robotlib.log_to_console(
                        "Image has diff: {} ".format(self.score)
                    )
            else:
                raise AssertionError("New screen doesnt exist anymore")
        else:
            raise AssertionError("The path1 to the image does not exist. Try a other path, than:" + path1)
        if os.path.exists(save_folder + '/test1.png'):
            os.remove(save_folder + '/test1.png')
Exemple #53
0
    def predict(self,
                image,
                background,
                beard_file=None,
                glasses_file=None,
                hat_file=None,
                visualization=False,
                threshold=0.5):
        # instance segmention
        solov2_output = self.solov2.predict(image=image,
                                            threshold=threshold,
                                            visualization=visualization)
        # Set background pixel to 0
        im_segm, x0, x1, y0, y1, _, _, _, _, flag_seg = P.visualize_box_mask(
            image, solov2_output, threshold=threshold)

        if flag_seg == 0:
            return im_segm

        h, w = y1 - y0, x1 - x0
        back_json = background[:-3] + 'json'
        stand_box = json.load(open(back_json))
        stand_box = stand_box['outputs']['object'][0]['bndbox']
        stand_xmin, stand_xmax, stand_ymin, stand_ymax = stand_box[
            'xmin'], stand_box['xmax'], stand_box['ymin'], stand_box['ymax']
        im_path = np.asarray(im_segm)

        # face detection
        blaceface_output = self.blaceface.predict(image=im_path,
                                                  threshold=threshold,
                                                  visualization=visualization)
        im_face_kp, p_left, p_right, p_up, p_bottom, h_xmin, h_ymin, h_xmax, h_ymax, flag_face = P.visualize_box_mask(
            im_path,
            blaceface_output,
            threshold=threshold,
            beard_file=beard_file,
            glasses_file=glasses_file,
            hat_file=hat_file)
        if flag_face == 1:
            if x0 > h_xmin:
                shift_x_ = x0 - h_xmin
            else:
                shift_x_ = 0
            if y0 > h_ymin:
                shift_y_ = y0 - h_ymin
            else:
                shift_y_ = 0
            h += p_up + p_bottom + shift_y_
            w += p_left + p_right + shift_x_
            x0 = min(x0, h_xmin)
            y0 = min(y0, h_ymin)
            x1 = max(x1, h_xmax) + shift_x_ + p_left + p_right
            y1 = max(y1, h_ymax) + shift_y_ + p_up + p_bottom
        # Fill the background image
        cropped = im_face_kp.crop((x0, y0, x1, y1))
        resize_scale = min((stand_xmax - stand_xmin) / (x1 - x0),
                           (stand_ymax - stand_ymin) / (y1 - y0))
        h, w = int(h * resize_scale), int(w * resize_scale)
        cropped = cropped.resize((w, h), cv2.INTER_LINEAR)
        cropped = cv2.cvtColor(np.asarray(cropped), cv2.COLOR_RGB2BGR)
        shift_x = int((stand_xmax - stand_xmin - cropped.shape[1]) / 2)
        shift_y = int((stand_ymax - stand_ymin - cropped.shape[0]) / 2)
        out_image = cv2.imread(background)
        e2gray = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
        ret, mask = cv2.threshold(e2gray, 1, 255, cv2.THRESH_BINARY_INV)
        mask_inv = cv2.bitwise_not(mask)
        roi = out_image[stand_ymin + shift_y:stand_ymin + cropped.shape[0] +
                        shift_y, stand_xmin + shift_x:stand_xmin +
                        cropped.shape[1] + shift_x]
        person_bg = cv2.bitwise_and(roi, roi, mask=mask)
        element_fg = cv2.bitwise_and(cropped, cropped, mask=mask_inv)
        dst = cv2.add(person_bg, element_fg)
        out_image[stand_ymin + shift_y:stand_ymin + cropped.shape[0] + shift_y,
                  stand_xmin + shift_x:stand_xmin + cropped.shape[1] +
                  shift_x] = dst

        return out_image
training_label_loc = data_location + 'Luna/train/label/'
testing_images_loc = data_location + 'Luna/test/image/'
testing_label_loc = data_location + 'Luna/test/label/'

train_files = os.listdir(training_images_loc)
train_data = []
train_label = []

for i in train_files:
    train_data.append(
        cv2.resize((mc.imread(training_images_loc + i)), (512, 512)))

    temp = cv2.resize(
        mc.imread(training_label_loc + i.split('.')[0] + '_mask.tif',
                  mode="L"), (512, 512))
    _, temp = cv2.threshold(temp, 127, 255, cv2.THRESH_BINARY)
    train_label.append(temp)
train_data = np.array(train_data)

train_label = np.array(train_label)

test_files = os.listdir(testing_images_loc)
test_data = []
test_label = []

for i in test_files:
    test_data.append(
        cv2.resize((mc.imread(testing_images_loc + i)), (512, 512)))
    # Change '_manual1.tiff' to the label name
    temp = cv2.resize(
        mc.imread(testing_label_loc + i.split('.')[0] + '_mask.tif'),
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 16:33:15 2020

@author: Javier
"""
import time

import cv2
import matplotlib.pyplot as plt
im1 = cv2.imread("F:\\TFM_datasets\\extracted_frames\\001084\\32.jpg")
im2 = cv2.imread("F:\\TFM_datasets\\extracted_frames\\001084\\38.jpg")

def read_gray(image):
    return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)


im1_gray = read_gray(im1)
im2_gray = read_gray(im2)

#plt.imshow(im2_gray,cmap='gray')
start_time = time.time()
diff_image = cv2.absdiff(im1_gray, im2_gray)
ret, thresh = cv2.threshold(diff_image, 30, 255, cv2.THRESH_BINARY)

#plt.imshow(thresh,cmap='gray')
print("--- %s seconds ---" % (time.time() - start_time))
Exemple #56
0
while True:
    ret, frame = cap.read()
    grayscale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    cv2.line(grayscale, (50, 50), (150, 150), (0, 0, 0), 15)
    cv2.rectangle(grayscale, (50, 50), (200, 200), (0, 0, 0), 10)
    cv2.circle(grayscale, (100, 50), 55, (0, 0, 0), -1)
    cv2.putText(grayscale, 'rajat', (400, 400), cv2.FONT_HERSHEY_SIMPLEX, 1,
                (255, 0, 0), 5)

    rows, cols, channels = img.shape
    roi = frame[0:rows, 0:cols]

    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(img_gray, 220, 255, cv2.THRESH_BINARY)
    mask_inv = cv2.bitwise_not(mask)

    frame_bg = cv2.bitwise_and(roi, roi, mask=mask)
    img_fg = cv2.bitwise_and(img, img, mask=mask_inv)

    add = frame_bg + img_fg
    frame[0:rows, 0:cols] = add

    out.write(frame)
    cv2.imshow('webcam', frame)
    cv2.imshow('grayscale', grayscale)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
def Basket_Detection(image):
    # Import the necessary packages
    import time
    import cv2
    import imutils
    import numpy as np

    # User variables
    area_max = 35000
    area_min = 1500
    apx_min = 1
    apx_max = 5
    threshold_value = 5
    s4_thresh = 150
    s3_thresh = 280
    s2_thresh = 360
    s1_thresh = 440
    s0_thresh = 520
    s_left = 220
    s_right = 290
    # Define range of blue color in HSV
    lower_green = np.array([35, 50, 50])
    upper_green = np.array([75, 255, 255])
    # Convert BGR to HSV
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    # Threshold the HSV image to get only blue colors
    mask = cv2.inRange(hsv, lower_green, upper_green)
    # Bitwise-AND mask and original image
    residual = cv2.bitwise_and(image, image, mask=mask)
    # Contour Detection
    # Convert the residual image to gray scale
    gray = cv2.cvtColor(residual, cv2.COLOR_BGR2GRAY)
    # Blur the image to eliminate high frequency noise
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)
    # Threshold the image to map blue values to white
    threshold = cv2.threshold(blurred, threshold_value, 255,
                              cv2.THRESH_BINARY)[1]
    # Find the contours
    contours = cv2.findContours(threshold, cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
    # Use the first or second element depending on opencv version
    contours = contours[0] if imutils.is_cv2() else contours[1]
    # Define parameters to track the closest ball to the reference point
    prev_distance = 600000
    closest_cX = 0
    closest_cY = 0
    # Loop over the contours
    for c in contours:
        approximate = cv2.approxPolyDP(c, 0.05 * cv2.arcLength(c, True), True)
        area = cv2.contourArea(c)
        if (area > area_min) & (area < area_max):
            # Compute the center of the contour
            M = cv2.moments(c)
            # The if there to avoid dividing by 0 errors.
            if (M["m00"] == 0):
                M["m00"] = 1
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            distance = (abs(cX - 230)**2) + (abs(544 - cY)**2)
            # Update the values if the next contour is closer
            if (distance < prev_distance):
                prev_distance = distance
                closest_cX = cX
                closest_cY = cY

    if (closest_cX == 230):
        closest_cX = 231
    # Mark the closest ball with black center
    Arct = np.arctan((544 - closest_cY) / (closest_cX - 230)) / np.pi
    # Decide to turn or go straight
    if (closest_cX == 0):
        operation = "nbx"
    elif ((closest_cX > s_left) and (closest_cX < s_right)):
        if (closest_cY < s4_thresh):
            operation = "s4x"
        elif (closest_cY < s3_thresh):
            operation = "s3x"
        elif (closest_cY < s2_thresh):
            operation = "s2x"
        elif (closest_cY < s1_thresh):
            operation = "s1x"
        elif (closest_cY < s0_thresh):
            operation = "rcx"
        else:
            operation = "nbx"
    elif (Arct < 0):
        if (Arct > -0.5 / 8):
            operation = "l2x"
        else:
            operation = "l1x"
    else:
        if (Arct < 0.5 / 8):
            operation = "r2x"
        else:
            operation = "r1x"
    return operation
import cv2


src = cv2.imread('coins.png', cv2.IMREAD_GRAYSCALE)

if src is None:
    print('Image load failed!')
    sys.exit()

h, w = src.shape[:2]
dst1 = np.zeros((h, w, 3), np.uint8)
dst2 = np.zeros((h, w, 3), np.uint8)

# 전처리
src = cv2.blur(src, (3, 3))
_, src_bin = cv2.threshold(src, 0, 255, cv2.THRESH_OTSU)

# 레이블링
cnt, labels, stats, centroids = cv2.connectedComponentsWithStats(src_bin)
for i in range(1, cnt):
    c = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
    dst1[labels == i] = c

# 외곽선 검출
contours, _ = cv2.findContours(src_bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)

for i in range(len(contours)):
    c = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
    cv2.drawContours(dst2, contours, i, c, 1)

cv2.imshow('src', src)
Exemple #59
0
for c in cnts:
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.02 * peri, True)
    if len(approx) > 2:
        candidates.append(approx)

mask = np.zeros_like(gray_image)
cv2.drawContours(mask, candidates, 0, (255, 255, 255), -1)

#cv2.imshow("mask", mask)

out = np.zeros_like(gray_image)
out[mask == 255] = gray_image[mask == 255]

#cv2.imshow("woo", out)
ret, filter_image = cv2.threshold(gray_image, 0, 255,
                                  cv2.THRESH_BINARY + cv2.THRESH_OTSU)
ret, filter_maskimage = cv2.threshold(out, 0, 255,
                                      cv2.THRESH_BINARY + cv2.THRESH_OTSU)

dots = cv2.bilateralFilter(filter_maskimage, 11, 17, 17)
_, cnts, _ = cv2.findContours(dots, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
print(len(cnts) - 1)  # -1 because the die itself counts

try:
    cv2.imshow("mask", mask)
    cv2.imshow("woo", out)
    cv2.imshow("Dice", image)
    cv2.imshow("Dice - gray", gray_image)
    cv2.imshow("Dice - filtered", filter_image)
    cv2.imshow("Dice - masked - filtered", filter_maskimage)
    cv2.waitKey(0)
Exemple #60
0
def create_annotation_info(annotation_id,
                           image_id,
                           category_info,
                           binary_mask,
                           image_size=None,
                           tolerance=2,
                           bounding_box=None):
    if bounding_box is None:
        bounding_box = []
        ret, thresh = cv2.threshold(binary_mask, 127, 255, 1)
        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
        for contour in contours:
            x, y, w, h = cv2.boundingRect(contour)
            xmin = np.min(x)
            xmax = np.max(x + w)
            ymin = np.min(y)
            ymax = np.max(y + h)
            bounding_box.append([xmin, ymin, xmax, ymax])

    # if bounding_box is None:
    #     obj_ids = np.unique(binary_mask)
    #     obj = []
    #     bounding_box = []
    #     for j in obj_ids:
    #         if j < 255:
    #             obj.append(j)
    #     num_obj = len(obj)
    #     if num_obj > 1:
    #         for i in range(num_obj):
    #             masks = binary_mask == obj[i]
    #             pos = np.where(masks)
    #             xmin = np.min(pos[1])
    #             xmax = np.max(pos[1])
    #             ymin = np.min(pos[0])
    #             ymax = np.max(pos[0])
    #             bounding_box.append([xmin, ymin, xmax, ymax])
    #     else:
    #         masks = binary_mask == obj
    #         pos = np.where(masks)
    #         xmin = np.min(pos[1])
    #         xmax = np.max(pos[1])
    #         ymin = np.min(pos[0])
    #         ymax = np.max(pos[0])
    #         bounding_box.append([xmin, ymin, xmax, ymax])

    if category_info["is_crowd"]:
        is_crowd = 1
        segmentation = binary_mask_to_rle(binary_mask)
    else:
        is_crowd = 0
        segmentation = binary_mask_to_polygon(binary_mask, tolerance)
        if not segmentation:
            return None

    if image_size is not None:
        binary_mask = resize_binary_mask(binary_mask, image_size)

    binary_mask_encoded = mask.encode(
        np.asfortranarray(binary_mask.astype(np.uint8)))

    area = mask.area(binary_mask_encoded)
    if area < 1:
        return None

    annotation_info = {
        "id": annotation_id,
        "image_id": image_id,
        "category_id": category_info["id"],
        "iscrowd": is_crowd,
        "area": area.tolist(),
        "bbox": bounding_box,
        "segmentation": segmentation,
        "width": binary_mask.shape[1],
        "height": binary_mask.shape[0],
    }

    return annotation_info