def test(frame):
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    lower_white = np.array([0,0,200])
    upper_white = np.array([300,100,300])
    # Threshold the HSV image to get only white colors
    mask = cv2.inRange(hsv, lower_white, upper_white)
    gray = cv2.cvtColor(mask, cv2.COLOR_BAYER_GB2GRAY)
    gradX = cv2.Sobel(gray, ddepth = cv2.CV_32F, dx = 1, dy = 0, ksize = -1)
    gradY = cv2.Sobel(gray, ddepth = cv2.CV_32F, dx = 0, dy = 1, ksize = -1)
    # subtract the y-gradient from the x-gradient
    gradient = cv2.subtract(gradX, gradY)
    gradient = cv2.convertScaleAbs(gradient)
    # blur and threshold the image
    blurred=cv2.blur(gray,(9,9))
    blurred = cv2.blur(gradient, (9, 9))
    (_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7))
    closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
    # perform a series of erosions and dilations
    closed = cv2.erode(closed, None, iterations = 4)
    closed = cv2.dilate(closed, None, iterations = 4)

    image, contours,hierarchy= cv2.findContours(closed,
                               cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    img= cv2.drawContours(frame, contours, -1,(0,0,0),3)
    io.imshow(img)
Пример #2
0
    def __getMask(self, image):
        """ Get binary mask of the image 
            This algorithm has been referenced from 
            http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html
            """
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

        lower_blue = np.array([115,50,50])
        upper_blue = np.array([125,255,255])

        lower_red = np.array([-3,50,50])
        upper_red = np.array([3,255,255])

        lower_green = np.array([50,50,50])
        upper_green = np.array([70,255,255])

        maskBlue = cv2.inRange(hsv, lower_blue, upper_blue)
        maskBlue = cv2.blur(maskBlue,(20,20))
        ret, maskBlue = cv2.threshold(maskBlue,127,255,cv2.THRESH_BINARY)

        maskRed = cv2.inRange(hsv, lower_red, upper_red)
        maskRed = cv2.blur(maskRed,(20,20))
        ret, maskRed = cv2.threshold(maskRed, 127, 255,cv2.THRESH_BINARY)

        return maskRed, maskBlue
Пример #3
0
 def processImage(self, img):
     heading = 0
     
     if self.size is None or self.size[0] != img.shape[0] or self.size[1] != img.shape[1]:
         h, w = img.shape[:2]
         self.size = (h, w)
         self.bin = np.empty((h, w, 1), dtype=np.uint8)
     temp = img
     targets = []
     HIGH = 80
     LOW = 10
     cv2.blur(img, (7,7), dst=img)
     img = cv2.Canny(img, LOW, HIGH)
     cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
     #return img
     circles = cv2.HoughCircles(img, cv2.cv.CV_HOUGH_GRADIENT, 1, 10, param1=HIGH,param2=5,minRadius=1,maxRadius=500)
     if(circles == None):
         print "O nose!!!, nothing in circles!"
         return img
     print circles[0][0][0]
     x = circles[0][0][0]
     y = circles[0][0][1]
     radius = circles[0][0][2]
     cv2.circle(cimg, (x,y), 7, self.targetColor, thickness=radius)
     return cimg
Пример #4
0
    def process(self, args):
        """
        Use the Blur algorithm from the opencv package to the chosen colour
        channels of the current image.

        Args:
            | *args* : a list of arguments, e.g. image ndarray

        """
        if (len(args[0].shape) == 2):
            self.result['img'] = cv2.blur(args[0], (self.kernelsize.value*2+1,
                                   self.kernelsize.value*2+1))

        else:
            channels = cv2.split(args[0])
            if self.channel1.value:
                channels[0] = cv2.blur(channels[0], (self.kernelsize.value*2+1,
                                                     self.kernelsize.value*2+1))
            if self.channel2.value:
                channels[1] = cv2.blur(channels[1], (self.kernelsize.value*2+1,
                                                     self.kernelsize.value*2+1))
            if self.channel3.value:
                channels[2] = cv2.blur(channels[2], (self.kernelsize.value*2+1,
                                                     self.kernelsize.value*2+1))
            self.result['img'] = cv2.merge(channels)
Пример #5
0
def processImage(t0, t1, t2):
	d1 = cv2.absdiff(t1, t0)
	d2 = cv2.absdiff(t2, t0)
	image = cv2.bitwise_and(d1, d2)
	image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
	image = cv2.blur(image, (5,5))
	value, image = cv2.threshold(image, 25, 255, cv2.THRESH_BINARY)
	
	#element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
	#image = cv2.erode(image, element)

	image = cv2.blur(image, (5,5))
	#image = cv2.normalize(image, 0., 1.)
	
	#mean, stddev = cv2.meanStdDev(image)
	#if stddev[0] > 40:
	#	return image

	(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(image)
	
	global cameraWidth, cameraHeight, x11
	xDiv = (cameraWidth+1) / 7.
	yDiv = (cameraHeight+1) / 7.
	if maxVal > 100:
		if x11:
			image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
			cv2.rectangle(image, (maxLoc[0]-10,maxLoc[1]-10), (maxLoc[0]+20,maxLoc[1]+20), (0,0,255), 1)
		#print xLoc, yLoc
		global leftEye, rightEye
		leftEye.setPupilSmoothed(maxLoc[0] / xDiv, maxLoc[1] / yDiv)
		rightEye.setPupilSmoothed(maxLoc[0] / xDiv, maxLoc[1] / yDiv)

	return image
Пример #6
0
def get_blur(df_s, path):
    df_s = remove_weird_slice(df_s)
    Dset = []
    for idx in range(len(df_s)):  
        #Get brightest and darkest frames for study number idx
        #idx = 0
        ext = df_s.index[idx]
        #print ext
        BT = np.argmax(df_s['brightness'][idx])
        DK = np.argmin(df_s['brightness'][idx])
        filename1 = os.listdir(path + '/' + ext)[BT]
        filename2 = os.listdir(path + '/' + ext)[DK]

        dcm1 = path + '/' + ext+ '/' + filename1
        dcm2 = path + '/' + ext+ '/' + filename2

        ds1 = dicom.read_file(dcm1)
        ds2 = dicom.read_file(dcm2)

        data1 = ds1.pixel_array
        data2 = ds2.pixel_array
        # plt.imshow(data1, cmap=plt.cm.bone)
        # plt.show()
        # plt.imshow(data2, cmap=plt.cm.bone)
        # plt.show()
        datab1 = cv2.blur(data1,(7,7))
        datab2 = cv2.blur(data2,(7,7))
        #D is delta between brightest and darkest image for a slice
        D = abs(datab2.astype(int) - datab1.astype(int))
        Dset.append(D)
        #plt.imshow(D, cmap=plt.cm.bone)
        #plt.show()

    #Average deltas for all slices gives region of movement
    return np.mean(np.array(Dset), axis = 0)
Пример #7
0
def cropped_bg_sum1():
    """
    desc: pass the cropped image through the background summation algorithm
          "bg_subber_sum1" from yesterday, though blur it first.
    date: 5/2/14
    source: Ian Burnett
    result: It worked. The algorithm barely squeaks by, but the cropping the image was super helpful.
            And when I crop a different spot, the whole thing becomes why pretty quickly, which is definitely a good sign.
    """
    fgbg = cv2.BackgroundSubtractorMOG(500, 6, 0.9, 1)
    _, f = video.read()
    f = crop_spot(f)
    f = cv2.blur(f,(18,18)) # this was super effective too
    f = cv2.blur(f,(16,16))
    f = fgbg.apply(f)

    for i in xrange(200):
        _, frame = video.read()
        cropped = crop_spot(frame)
        fg_mask = fgbg.apply(cropped)
        if i % 5 == 0:
            print i,
            f = cv2.add(f, fg_mask)
            cv2.imshow('frame', f)
            c = cv2.waitKey(1)
            if c == 27:
                break

    cv2.destroyAllWindows()
    plt.subplot(1,1,1), plt.imshow(f, 'gray')
    plt.xticks([]), plt.yticks([])
    plt.show()
Пример #8
0
def imgproc(frame):
    # convert color to gray scale and show it
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cv2.imshow('gray', gray)

    blur = cv2.blur(gray, (5, 5))
    edge = cv2.Canny(blur, 10, 100)
    edge = cv2.blur(edge, (2, 2))
    cv2.imshow('blured edge', edge)

    # convert image to black and white and show it
    thresh1, thresh = cv2.threshold(edge, 60, 120, cv2.THRESH_BINARY)
    cv2.imshow('thresh', thresh)

    # find contours!
    contours, hry = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # draw all the contours
    cpframe = frame.copy()
    cv2.drawContours(cpframe, contours, -1, (0, 255, 0), 3)
    cv2.imshow('cpframe', cpframe)

    # ================== TODO ===================

    # Modify these code to suit your need
    contours = [ctr for ctr in contours if cv2.contourArea(ctr) > 100]
    contours = [cv2.approxPolyDP(ctr, 5, True) for ctr in contours]
    contours = [ctr for ctr in contours if len(ctr) == 4]
    contours = [ctr for ctr in contours if cv2.isContourConvex(ctr)]

    # ============================================

    # draw on the frame
    cv2.drawContours(frame, contours, -1, (0, 255, 0), 3)
    return frame
Пример #9
0
def activeContour(image, snaxels):
    """
    Iterate the contour until the energy reaches an equilibrium
    """
    energy_matrix = np.zeros( (_MAX_SNAXELS - 1, _NUM_NEIGHBORS, _NUM_NEIGHBORS), dtype=np.float32)
    position_matrix = np.zeros( (_MAX_SNAXELS - 1, _NUM_NEIGHBORS, _NUM_NEIGHBORS, 2), dtype=np.int32 )
    neighbors = np.array([[i, j] for i in range(-1, 2) for j in range(-1, 2)])
    min_final_energy_prev = float("inf")
    
    counter = 0
    smooth_factor = _INITIAL_SMOOTH 
    iterations = _INITIAL_ITERATIONS
    gradient_image = _gradientImage(image)
    smooth_image = cv.blur(gradient_image, (smooth_factor, smooth_factor))
        
    while True:
        counter += 1
        if not (counter % iterations):
            iterations += _ITERATIONS_DELTA
            if smooth_factor > _SMOOTH_FACTOR_DELTA:
                smooth_factor -= _SMOOTH_FACTOR_DELTA            
            smooth_image = cv.blur(gradient_image, (smooth_factor, smooth_factor))
            print "Deblur step, smooth factor now: ", smooth_factor
        
        _display(smooth_image, snaxels)
        min_final_energy = _iterateContour(image, smooth_image, snaxels, energy_matrix, position_matrix, neighbors)
        
        if (min_final_energy == min_final_energy_prev) or smooth_factor < _SMOOTH_FACTOR_DELTA:
            print "Min energy reached at ", min_final_energy
            print "Final smooth factor ", smooth_factor
            break
        else:
            min_final_energy_prev = min_final_energy
def getboxes(img, b_tuple, count):
  lower = np.array(b_tuple[0], dtype = "uint8")
  upper = np.array(b_tuple[1], dtype = "uint8")

  mask = cv2.inRange(img, lower, upper)
  output = cv2.bitwise_and(img, img, mask = mask)

 # output = cv2.resize(output, None, fx = 0.1, fy = 0.1, interpolation = cv2.INTER_CUBIC)
  cv2.imshow("ii", np.hstack([img, output]))
  cv2.waitKey(0)

  gray_img = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
  gray_img = cv2.blur(gray_img, (5,5))
  gray_img = cv2.bilateralFilter(gray_img, 20, 20, 20)
  edged = cv2.Canny(gray_img, 30, 200)
  edged = cv2.blur(edged, (5,5))
  cv2.imshow("ii", edged)
  cv2.waitKey(0)

  contours, _ = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
  contours = sorted(contours, key = cv2.contourArea, reverse = True)[:count]
  #cv2.drawContours(img, contours, -1, (0,255,0), 3)
  cv2.imshow("ii", img)
  cv2.waitKey(0)

  boxes = []
  for i in range(0, count):
    rect = cv2.minAreaRect(contours[i])
    box = cv2.cv.BoxPoints(rect)
    box = np.int0(box)
    boxes.append(box)

  #pprint(boxes)
  return boxes
Пример #11
0
def get_slc_blur(solid, brightness):
    #better to just take data?
    #df_s = remove_weird_slice(df_s)
    Dset = []

    BT = np.argmax(brightness)
    DK = np.argmin(brightness)
    #filename1 = fileset[BT]
    #filename2 = fileset[DK]

    #dcm1 = filename1
    #dcm2 = filename2

    
    #ds1 = dicom.read_file(dcm1)
    #ds2 = dicom.read_file(dcm2)

    data1 = solid[BT]
    data2 = solid[DK]
    # plt.imshow(data1, cmap=plt.cm.bone)
    # plt.show()
    # plt.imshow(data2, cmap=plt.cm.bone)
    # plt.show()
    datab1 = cv2.blur(data1,(7,7))
    datab2 = cv2.blur(data2,(7,7))
    #D is delta between brightest and darkest image for a slice
    return abs(datab2.astype(int) - datab1.astype(int))
def best_size_offset(image, template, width_nom, threshold, audible = False, fast = False):
    import numpy as np
    import cv2
    
    offsets = np.linspace(-0.3, 0.3, 25)
    best_offset = 0
    most_matches = 0
    n_zeros = 0
    template = template
    width_nom = width_nom
    threshold = threshold
    
    for size_off in offsets:
        img3 = cv2.resize(image, (0,0), fx=width_nom + size_off, fy=width_nom + size_off)
        img3 = cv2.blur(img3, (2,2))
        res = cv2.matchTemplate(img3, template, cv2.TM_CCOEFF_NORMED)
        good_matches = sum(sum(res > threshold))
        if good_matches > most_matches:
            most_matches = good_matches
            best_offset = size_off
            n_zeros = 0
        if (most_matches > 0) & (good_matches == 0):
            n_zeros += 1
        if n_zeros > 2:
            break
        if audible:
            print "Sizing offset: " + str(size_off) + " :: " + str(good_matches)
    
    if audible:
        print "--------------------------------"    
        print "Best offset: " + str(best_offset) + "(" + str(most_matches) + ")"
        print "--------------------------------"   
    
    if fast:
        print(round(best_offset, 4))
        return(round(best_offset, 4))

    small_step_offsets = np.linspace(best_offset - 0.015, best_offset + 0.015, 13)
    best_offset = 0
    most_matches = 0

    for size_off in small_step_offsets:
        img3 = cv2.resize(image, (0,0), fx=width_nom + size_off, fy=width_nom + size_off)
        img3 = cv2.blur(img3, (2,2))
        res = cv2.matchTemplate(img3, template, cv2.TM_CCOEFF_NORMED)
        good_matches = sum(sum(res > threshold))
        if good_matches > most_matches:
            most_matches = good_matches
            best_offset = size_off
        if audible:
            print "Sizing offset: " + str(size_off) + " :: " + str(good_matches)

    if audible:
        print "--------------------------------"    
        print "Best offset: " + str(best_offset) + "(" + str(most_matches) + ")"
        print "--------------------------------"
        
    print(round(best_offset, 4))
    return(round(best_offset, 4))
def mean(image,params):
    if params['kernel_shape'] == 'rect':
        try:
            return cv2.blur(image,(params['kernel_size_x'],params['kernel_size_y']))
        except:
            return cv2.blur(scale_data(image,16),(params['kernel_size_x'],params['kernel_size_y']))
    else:
        sys.stderr.write("kernel shape handler not implemented:"+kernel['shape'])
Пример #14
0
 def _computeCoefficients(self, p):
     r = self._radius
     p_mean = cv2.blur(p, (r, r))
     p_cov = p_mean - self._I_mean * p_mean
     a = p_cov / (self._I_var + self._epsilon)
     b = p_mean - a * self._I_mean
     a_mean = cv2.blur(a, (r, r))
     b_mean = cv2.blur(b, (r, r))
     return a_mean, b_mean
Пример #15
0
    def calcul_similarite(self, n=20, p=30):
        """
        Question 1.2

        Trace les fenêtres de plus grandes similarités entre les deux images.

        Parameters
        ----------
        n: largeur des fenêtres
        p: hauteur des fenêtres
        """

        # Dans un premier temps on récupère la liste de tous les points d'importance des images 1 et 2
        corners1 = self.corner_method(self.img1)
        corners2 = self.corner_method(self.img2)

        for corner in corners1:
            x, y = corner.ravel()
            cv2.circle(self.img1, (x, y), 1, 255, -1)
        for corner in corners2:
            x, y = corner.ravel()
            cv2.circle(self.img2, (x, y), 1, 255, -1)

        # Puis pour chaque point des listes corners, on fait une fenêtre de taille (2N+1)x(2P+1)
        # On stocke ces fenêtres dans deux listes contenant les intensités des
        # points de cette fenêtre

        blur1 = cv2.blur(self.img1, (3, 3), 0)
        blur2 = cv2.blur(self.img2, (3, 3), 0)

        list_windows1 = self.window_around(blur1, corners1, n, p)
        list_windows2 = self.window_around(blur2, corners2, n, p)

        # On initialise le maximum avec une valeur très élevée afin d'être sûr
        # d'obtenir un minimum par la suite
        max_similarity = 10000000000

        # On va ensuite calculer la similarité de chaque combinaison possible de fenêtre de corner dans les deux images
        # On ne gardera que les points extrêmes de chaque fenêtre minimisant la dissimilarité
        for window1, point11, point12 in list_windows1:
            for window2, point21, point22 in list_windows2:

                similarity = self.cost(window1, window2, type=self.type)
                if similarity < max_similarity:
                    max_similarity = similarity
                    print max_similarity
                    image_1_top_left = point11
                    image_1_bot_right = point12
                    image_2_top_left = point21
                    image_2_bot_right = point22

                    # On plot en dessous d'un certain seuil afin d'afficher plusieurs points de similarité
                    if max_similarity < 150000:
                        self.plot_windows(self.img1, self.img2, image_1_top_left, image_1_bot_right, image_2_top_left,
                              image_2_bot_right)
        print "similarité finale : ", max_similarity
Пример #16
0
def augmentation_wrapper(X_list, y_list):
    import random
    for index, y in enumerate(y_list):
        X = np.copy(X_list[index])
        # Adjust the exposure
        X_Lab = cv2.cvtColor(X, cv2.COLOR_BGR2LAB)
        X_L = X_Lab[:, :, 0].astype(dtype=np.float32)
        # margin = np.min([np.min(X_L), 255.0 - np.max(X_L), 64.0])
        margin = 128.0
        exposure = random.uniform(-margin, margin)
        X_L += exposure
        X_L = np.around(X_L)
        X_L[X_L < 0.0] = 0.0
        X_L[X_L > 255.0] = 255.0
        X_Lab[:, :, 0] = X_L.astype(dtype=X_Lab.dtype)
        X = cv2.cvtColor(X_Lab, cv2.COLOR_LAB2BGR)
        # Rotate and Scale
        h, w, c = X.shape
        degree = random.randint(-30, 30)
        scale = random.uniform(0.80, 1.25)
        padding = np.sqrt((w) ** 2 / 4 - 2 * (w) ** 2 / 16)
        padding /= scale
        padding = int(np.ceil(padding))
        for channel in range(c):
            X_ = X[:, :, channel]
            X_ = np.pad(X_, padding, 'reflect', reflect_type='even')
            h_, w_ = X_.shape
            # Calulate Affine transform
            center = (w_ // 2, h_ // 2)
            A = cv2.getRotationMatrix2D(center, degree, scale)
            X_ = cv2.warpAffine(X_, A, (w_, h_), flags=cv2.INTER_LANCZOS4, borderValue=0)
            X_ = X_[padding: -1 * padding, padding: -1 * padding]
            X[:, :, channel] = X_
        # Horizontal flip
        if random.uniform(0.0, 1.0) <= 0.5:
            X = cv2.flip(X, 1)
            if ':' in y:
                species, viewpoint = y.split(':')
                viewpoint = label_mapping_dict[viewpoint]
                y = '%s:%s' % (species, viewpoint)
        # Blur
        if random.uniform(0.0, 1.0) <= 0.1:
            if random.uniform(0.0, 1.0) <= 0.5:
                X = cv2.blur(X, (3, 3))
            else:
                X = cv2.blur(X, (5, 5))
        # Reshape
        X = X.reshape(X_list[index].shape)
        # Show image
        # canvas = np.hstack((X_list[index], X))
        # cv2.imshow('', canvas)
        # cv2.waitKey(0)
        # Save
        X_list[index] = X
        y_list[index] = y
    return X_list, y_list
Пример #17
0
    def get_blurred_field(video, upper_left, lower_right, blur, brightness):
        middle_mat = np.zeros(video.shape, dtype=np.float32)
        cv2.rectangle(middle_mat, upper_left, lower_right, (1.0, 1, 1), thickness=-1)
        middle_mat = cv2.blur(middle_mat, (20, 20))

        middle_field_vid = cv2.blur(video, (int(blur), int(blur)))
        middle_field_vid = cv2.multiply(middle_field_vid, brightness * middle_mat, dtype=3)
        rest_vid = cv2.multiply(video, 1 - middle_mat, dtype=3)

        return cv2.add(middle_field_vid, rest_vid)
Пример #18
0
def localSD(mat, n):
    
    mat=np.float32(mat)
    mu = cv2.blur(mat,(n,n))
    mdiff=mu-mat
    mat2=cv2.blur(np.float64(mdiff*mdiff),(n,n))
    sd = np.float32(cv2.sqrt(mat2))
    sdn=normalize(sd)

    return sdn
Пример #19
0
    def getCoordinates(self,target="ball",debug=False):
        """
        This function searches for the target (ball/yellow gate/blue gate).
        It's done by the using thresholds and contour searching.
        Counter with the biggest area is chosen.

        First thresholding
        """
        #Initial ball position (not on the image) 
        x=-1
        y=-1
        
        #Convert to HSV space and blur the image to reduce color noise 
        hsv_frame=cv2.cvtColor(self.frame,cv2.COLOR_BGR2HSV)
        cv2.blur(hsv_frame, (1,  1), hsv_frame)

        if target == "ball":
            self.thresholded_frame = cv2.inRange(hsv_frame, self.ball_threshold_low, self.ball_threshold_high)
        elif target == "blue gate":
            self.thresholded_frame = cv2.inRange(hsv_frame, self.blue_gate_threshold_low, self.blue_gate_threshold_high)
        else: #yellow gate
            self.thresholded_frame = cv2.inRange(hsv_frame, self.yellow_gate_threshold_low, self.yellow_gate_threshold_high)

        #Erode the image (what ever it means)
        frame_contours = cv2.erode(self.thresholded_frame, None)

        if debug:
            cv2.imshow("Thresholded",self.thresholded_frame)

        """
        Finding a contour with the biggest area
        """
        contours, hierarchy = cv2.findContours(frame_contours, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        contourareamax = 0
        maxcontour = 0
        for i in contours:
            contourarea = cv2.contourArea(i)
            if contourarea > contourareamax:
                maxcontour = i
                contourareamax = contourarea
            #Find the center point of the contour
            try:
                if contourarea > 5:
                    center, radius = cv2.minEnclosingCircle(maxcontour)
                    #cv2.boundingRect(contourareamax)
                    x, y = center
                    if debug:
                        cv2.circle(self.frame, (int(x), int(y)), int(radius), (100, 100, 255))
            except:
                print "Can't find the minimum enclosing circle"    

        if debug:
            cv2.imshow("Camera",self.frame)

        return x,y
Пример #20
0
    def doThings(self):
        sgbm = cv2.StereoSGBM()
        sgbm.SADWindowSize, numberOfDisparitiesMultiplier, sgbm.preFilterCap, sgbm.minDisparity, \
        sgbm.uniquenessRatio, sgbm.speckleWindowSize, sgbm.P1, sgbm.P2, \
        sgbm.speckleRange = [v for v,_ in self.params.itervalues()]
        sgbm.numberOfDisparities = numberOfDisparitiesMultiplier*16
        sgbm.disp12MaxDiff = -1
        sgbm.fullDP = False
        R1, R2, P1, P2, Q, topValidRoi, bottomValidRoi = cv2.stereoRectify(self.M1, self.D1, self.M2, self.D2, 
                                (self.top.shape[1],self.top.shape[0]), self.R, self.T, flags=cv2.CALIB_ZERO_DISPARITY, alpha=0)

        top_map1, top_map2 = cv2.initUndistortRectifyMap(self.M1, self.D1, R1, P1, 
                                                         (self.top.shape[1],self.top.shape[0]), cv2.CV_16SC2)
        bottom_map1, bottom_map2 = cv2.initUndistortRectifyMap(self.M2, self.D2, R2, P2, 
                                                               (self.bottom.shape[1], self.bottom.shape[0]), cv2.CV_16SC2)
        
        self.top_r = cv2.remap(self.top, top_map1, top_map2, cv2.cv.CV_INTER_LINEAR);
        self.bottom_r = cv2.remap(self.bottom, bottom_map1, bottom_map2, cv2.cv.CV_INTER_LINEAR)
        top_small = cv2.resize(self.top_r, (self.top_r.shape[1]/2,self.top_r.shape[0]/2))
        bottom_small = cv2.resize(self.bottom_r, (self.bottom_r.shape[1]/2,self.bottom_r.shape[0]/2))
        cv2.imshow('top', top_small);
        cv2.imshow('bottom', bottom_small);
        
#        top_r = cv2.equalizeHist(top_r)
        top_r = cv2.blur(self.top_r, (5,5))
#        bottom_r = cv2.equalizeHist(bottom_r)
        bottom_r = cv2.blur(self.bottom_r, (5,5))
        dispTop = sgbm.compute(top_r.T, bottom_r.T).T;
        dispTopPositive = dispTop
        dispTopPositive[dispTop<0] = 0
        disp8 = (dispTopPositive / (sgbm.numberOfDisparities * 16.) * 255).astype(np.uint8);
        disp_small = cv2.resize(disp8, (disp8.shape[1]/2, disp8.shape[0]/2));
        cv2.imshow(self.winname, disp_small);
        
        self.disp8 = disp8
        self.xyz = cv2.reprojectImageTo3D(dispTop, Q, handleMissingValues=True)
#        self.xyzrgb = np.zeros((self.xyz.shape[0],self.xyz.shape[1],4))
        
#        import struct
#        def color_to_float(color):
#            if color.size == 1:
#                color = [color]*3
#            rgb = (color[2] << 16 | color[1] << 8 | color[0]);
#            rgb_hex = hex(rgb)[2:-1]
#            s = '0'*(8-len(rgb_hex)) + rgb_hex.capitalize()
##            print color, rgb, hex(rgb)
#            rgb_float = struct.unpack('!f', s.decode('hex'))[0]
##            print rgb_float
#            return rgb_float
        
#        for i in range(self.xyz.shape[0]):
#            for j in range(self.xyz.shape[1]):
#                self.xyzrgb[i,j] = np.append(self.xyz[i,j], color_to_float(self.top[i,j])) 
        
Пример #21
0
 def color_detection_hsv(self, frame, thresholdminvalues, thresholdmaxvalues):
         ''' (np.array np.uint8 3channel, list of 3 ints, list of 3 ints) -> np.array np.uint8 1channel'''
         ''' Return thresholded_frame according to thresholdmin/maxvalues'''
         hsv_frame=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) #convert the image to hsv(Hue, Saturation, Value) so its easier to determine the color to track(hue) 
         cv2.blur(hsv_frame, (3,  3), hsv_frame)  # TESTing needed does blurring has an effect. 
         colormin = np.asarray(thresholdminvalues, np.uint8)
         colormax = np.asarray(thresholdmaxvalues, np.uint8)# ball color
         thresholded_frame = cv2.inRange(hsv_frame, colormin, colormax)
         if self.debug:
             cv2.imshow("Thresholded",thresholded_frame)
         return thresholded_frame
	def flow(image):
			global opflow_first
			global previous
			src = image.copy()
			if (opflow_first):
				if (previous == None):
					previous = src.copy()
					opflow_first = 0
					del src
					return

			criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.03)
			lk_params = dict(winSize  = (31,31), maxLevel = 5, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.03))

			img1 =cv2.cvtColor(previous, cv2.COLOR_BGR2GRAY)
			img1= cv2.blur(img1,(5,5))
			img2 =cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
			img2= cv2.blur(img2,(5,5))
			
			numpoints = 90
			
			p0 = cv2.goodFeaturesToTrack(img1, numpoints, .01, .01)
			
			if (len(p0)>0):
				cv2.cornerSubPix(img1, p0, (15,15), (-1,-1), criteria)
				p1, st, error = cv2.calcOpticalFlowPyrLK(img1, img2, p0, None, **lk_params)

				for i in range(numpoints):
					if (st[i] == 0):
						p = (int(p0[i][0][0]), int(p0[i][0][1]))
						q = (int(p1[i][0][0]), int(p1[i][0][1]))
						cv2.circle(src, p, 5, (255,255,0), -1)
						cv2.circle(src, q, 5, (0,255,255), -1)
					
					line_thickness = 1
					line_color = (0, 0, 255)
					
					
					p = (int(p0[i][0][0]), int(p0[i][0][1]))
					q = (int(p1[i][0][0]), int(p1[i][0][1]))
					cv2.circle(src, p, 5, (0,255,0), -1)
					cv2.circle(src, q, 5, (255,0,0), -1)
					cv2.line(src, p, q, line_color, line_thickness, 0)
						

			
			previous = image.copy()
			
			del img1
			del img2
			
			
			return src
Пример #23
0
    def process_one_video(self, v):
        fn = os.path.basename(v)
        fn = os.path.splitext(fn)[0]  # get name without suffix
        print fn

        # start to process video
        cap = cv2.VideoCapture(v)
        fgbg = cv2.BackgroundSubtractorMOG()
        count = 0
        video_length = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
        while (count < video_length - 10):
            count += 1
            ret, frame = cap.read()
            # use org_frame to generate neg img
            check_frame = frame
            org_frame = frame
            # process after 50 frames
            if count > 50:
                frame = cv2.blur(frame, (3, 3))
                fgmask = fgbg.apply(frame, learningRate=0.001)
                if count > 600:
                    if count % 10 != 0:  # jump every 10 frames
                        continue
                    fgmask = cv2.blur(fgmask, (3, 3))
                    contoured = np.copy(fgmask)
                    contours, _ = cv2.findContours(contoured, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
                    tmp_img_name = fn + '_' + str(count) + '.jpg'
                    tmp_save_flag = False
                    for cnt in contours:
                        x, y, w, h = cv2.boundingRect(cnt)
                        if x < ROI_X_MIN or x > ROX_X_MAX or y < ROI_Y_MIN or y > ROI_Y_MAX:
                            continue
                        tmp_size = w * h
                        if tmp_size < PERSON_SIZE_MAX and tmp_size > PERSON_SIZE_MIN:
                            if tmp_img_name not in self.old_pos_dict:
                                self.old_pos_dict[tmp_img_name] = [[x, y, w, h]]
                            else:
                                self.old_pos_dict[tmp_img_name].append([x, y, w, h])
                            # mark box
                            cv2.rectangle(check_frame, (x, y), (x + w, y + h), (255, 0, 0), thickness=2)
                            print tmp_size, count, fn
                            tmp_save_flag = True
                    if tmp_save_flag:
                        # get one neg box
                        tmp_x = randint(0, 350)
                        tmp_y = randint(0, 200)
                        cv2.rectangle(check_frame, (tmp_x, tmp_y), (tmp_x + NEG_IMG_WIDTH, tmp_y + NEG_IMG_WIDTH), (0, 0, 255), thickness=2)
                        self.old_neg_dict[tmp_img_name] = [[tmp_x, tmp_y, NEG_IMG_WIDTH, NEG_IMG_WIDTH]]
                        cv2.imwrite(os.path.join(self.check_img_dir, tmp_img_name), check_frame) # save img for check
                        cv2.imwrite(os.path.join(self.work_img_dir, tmp_img_name), org_frame) # save origin img

            if count % 400 == 0:
                print 'now process frame', count, fn
Пример #24
0
    def opencv_show_detailed(self):
        pos = 0

        image = self.image.copy()
        name = 'image'
        cv2.destroyWindow(name)
        cv2.imshow(name, image)
        cv2.moveWindow(name, pos, 0)
        pos += 150

        name = 'edges: %d %d %d' % cannyrows(self.edges())
        edgesimage = self.edges().copy()
        cv2.circle(edgesimage, self.center(), 1, 255)
        cv2.destroyWindow(name)
        cv2.imshow(name, edgesimage)
        cv2.moveWindow(name, pos, 0)
        pos += 150

        blur = cv2.blur(self.gray(), ksize=(4,4))
        name = 'blur'
        cv2.destroyWindow(name)
        cv2.imshow(name, blur)
        cv2.moveWindow(name, pos, 0)

        blur2 = cv2.blur(self.gray(), ksize=(6,6))
        name = 'blur2'
        cv2.destroyWindow(name)
        cv2.imshow(name, blur)
        cv2.moveWindow(name, pos, 200)
        pos += 150

        for p in self.params:
            canny = cv2.Canny(blur, 404*p, 156*p, apertureSize=3)
            r1,r2,r3 = cannyrows(canny)
            name = 'bc %.3f %d %d %d' % (p, r1,r2,r3)
            cv2.destroyWindow(name)
            cv2.imshow(name, canny)
            cv2.moveWindow(name, pos, 0)

            canny = cv2.Canny(blur2, 404*p, 156*p, apertureSize=3)
            r1,r2,r3 = cannyrows(canny)
            name = 'bc2 %.3f %d %d %d' % (p, r1,r2,r3)
            cv2.destroyWindow(name)
            cv2.imshow(name, cv2.Canny(blur2, 404*p, 156*p, apertureSize=3))
            cv2.moveWindow(name, pos, 200)

            canny = cv2.Canny(self.gray(), 404*p, 156*p, apertureSize=3)
            r1,r2,r3 = cannyrows(canny)
            name = 'c %.3f %d %d %d' % (p, r1,r2,r3)
            cv2.destroyWindow(name)
            cv2.imshow(name, cv2.Canny(self.gray(), 404*p, 156*p, apertureSize=3))
            cv2.moveWindow(name, pos, 400)
            pos += 150
Пример #25
0
    def _computeCoefficients(self, p):
        r = self._radius
        I = self._I
        Ir, Ig, Ib = I[:, :, 0], I[:, :, 1], I[:, :, 2]

        p_mean = cv2.blur(p, (r, r))

        Ipr_mean = cv2.blur(Ir * p, (r, r))
        Ipg_mean = cv2.blur(Ig * p, (r, r))
        Ipb_mean = cv2.blur(Ib * p, (r, r))

        Ipr_cov = Ipr_mean - self._Ir_mean * p_mean
        Ipg_cov = Ipg_mean - self._Ig_mean * p_mean
        Ipb_cov = Ipb_mean - self._Ib_mean * p_mean

        ar = self._Irr_inv * Ipr_cov + self._Irg_inv * Ipg_cov + self._Irb_inv * Ipb_cov
        ag = self._Irg_inv * Ipr_cov + self._Igg_inv * Ipg_cov + self._Igb_inv * Ipb_cov
        ab = self._Irb_inv * Ipr_cov + self._Igb_inv * Ipg_cov + self._Ibb_inv * Ipb_cov
        b = p_mean - ar * self._Ir_mean - ag * self._Ig_mean - ab * self._Ib_mean

        ar_mean = cv2.blur(ar, (r, r))
        ag_mean = cv2.blur(ag, (r, r))
        ab_mean = cv2.blur(ab, (r, r))
        b_mean = cv2.blur(b, (r, r))

        return ar_mean, ag_mean, ab_mean, b_mean
Пример #26
0
 def score_gauss(self, idata):
     ''' Find the difference between a point and its gaussian
         idata: ImageMeta().data
         yields: 1d integer numpy array for each axis, y then x
     '''
     
     idata = cv2.cvtColor(idata, cv2.cv.CV_BGR2Lab)
     big = numpy.array(cv2.blur(idata, (11, 11)), dtype=numpy.int16)
     little = numpy.array(cv2.blur(idata, (3, 3)), dtype=numpy.int16)
     for axis in [1, 0]:
         # The sum is purpendicular to the axis of interest
         impulse = numpy.sqrt(((little-big)**2).sum(axis=2)).sum(axis=axis)
         #yield scind.median_filter(impulse, 3)
         yield impulse
Пример #27
0
def canny(img, lowThreshold):
    """
    Performs canny edge detection on the provided grayscale image.
    :param img: a grayscale image
    :param lowThreshold: threshold for the canny operation
    :return: binary image containing the edges found by canny
    """

    dst = np.zeros(img.shape, dtype=img.dtype)
    cv2.blur(img, (3, 3), dst)

    # canny recommends that the high threshold be 3 times the low threshold
    # the kernel size is 3 as defined above
    return cv2.Canny(dst, lowThreshold, lowThreshold * 3, dst, 3)
Пример #28
0
    def __init__(self, params, winname, top, bottom):
        self.top = top
        self.bottom = bottom
        
        top_small = cv2.resize(top, (top.shape[1] / 2, top.shape[0] / 2))
        bottom_small = cv2.resize(bottom, (bottom.shape[1] / 2, bottom.shape[0] / 2))
        cv2.imshow('top', top_small);
        cv2.imshow('bottom', bottom_small);

        extrinsic_filepath = config.PROJPATH + 'extrinsics.yml'
        intrinsic_filepath = config.PROJPATH + 'intrinsics.yml'
        self.R = np.asarray(cv2.cv.Load(extrinsic_filepath, name='R'))
        self.T = np.asarray(cv2.cv.Load(extrinsic_filepath, name='T'))
        self.R1 = np.asarray(cv2.cv.Load(extrinsic_filepath, name='R1'))
        self.R2 = np.asarray(cv2.cv.Load(extrinsic_filepath, name='R2'))
        self.P1 = np.asarray(cv2.cv.Load(extrinsic_filepath, name='P1'))
        self.P2 = np.asarray(cv2.cv.Load(extrinsic_filepath, name='P2'))
        self.Q = np.asarray(cv2.cv.Load(extrinsic_filepath, name='Q'))
        self.M1 = np.asarray(cv2.cv.Load(intrinsic_filepath, name='M1'))
        self.M2 = np.asarray(cv2.cv.Load(intrinsic_filepath, name='M2'))
        self.D1 = np.asarray(cv2.cv.Load(intrinsic_filepath, name='D1'))
        self.D2 = np.asarray(cv2.cv.Load(intrinsic_filepath, name='D2'))
        
        self.do_tune = config.TUNE_DISPARITY_MAP
        
        R1, R2, P1, P2, self.Q, topValidRoi, bottomValidRoi = cv2.stereoRectify(self.M1, self.D1, self.M2, self.D2,
                        (self.top.shape[1], self.top.shape[0]), self.R, self.T, flags=cv2.CALIB_ZERO_DISPARITY, alpha=-1)

        top_map1, top_map2 = cv2.initUndistortRectifyMap(self.M1, self.D1, R1, P1,
                                    (self.top.shape[1], self.top.shape[0]), cv2.CV_16SC2)
        bottom_map1, bottom_map2 = cv2.initUndistortRectifyMap(self.M2, self.D2, R2, P2,
                                (self.bottom.shape[1], self.bottom.shape[0]), cv2.CV_16SC2)
        
        self.top_r = cv2.remap(self.top, top_map1, top_map2, cv2.cv.CV_INTER_LINEAR);
        self.bottom_r = cv2.remap(self.bottom, bottom_map1, bottom_map2, cv2.cv.CV_INTER_LINEAR)
        
        top_r_small = cv2.resize(self.top_r, (self.top_r.shape[1] / 2, self.top_r.shape[0] / 2))
        bottom_r_small = cv2.resize(self.bottom_r, (self.bottom_r.shape[1] / 2, self.bottom_r.shape[0] / 2))
        cv2.imshow('top rectified', top_r_small);
        cv2.imshow('bottom rectified', bottom_r_small);
        
        tx1,ty1,tx2,ty2 = topValidRoi
        bx1,by1,bx2,by2 = bottomValidRoi
        self.roi = (max(tx1, bx1), max(ty1, by1), min(tx2, bx2), min(ty2, by2))
        self.top_r = cv2.blur(self.top_r, (5, 5))
        self.bottom_r = cv2.blur(self.bottom_r, (5, 5))
#        top_r = cv2.equalizeHist(self.top_r)
#        bottom_r = cv2.equalizeHist(self.bottom_r)
        
        super(SGBMTuner, self).__init__(params, winname)
Пример #29
0
def circles_iter \
        ( frames
        , blur=None
        , param1 = None
        , param2 = None
        , minFraction = None
        , maxFraction = None
        , debug = None
        ):
    '''iter<ndarray<y,x,bgr>>[, int][, int][, int][, float][, float][, str] -> iter<...>

       Locate a circle in each frame.
        If given, blur with a square kernel `blur` pixels to a side.
        If given, apply param1 and param2 to cv2.HoughCircles.
        If given, return circles with diameter between minFraction and maxFraction of the small side of the frame.
    '''
    ns = None
    for fr in frames:
        if ns is None:
            ns = \
                ( numpy.empty(fr.shape[:2] + (1,), fr.dtype)
                , numpy.empty_like(fr)
                )
        det, debug_im = ns
        # convert to gray; optionally blur
        cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY, det)
        cv2.equalizeHist(det, det)
        if blur is not None:
            cv2.blur(det, (blur, blur), det)
        # find circles
        size = min(fr.shape[:2])

        params = {}
        if param1 is not None:
            params['param1'] = param1
        if param2 is not None:
            params['param2'] = param2
        if minFraction is not None:
            params['minRadius'] = int(size * minFraction / 2)
        if maxFraction is not None:
            params['maxRadius'] = int(size * maxFraction / 2)

        ret = cv2.HoughCircles(det, cv2.cv.CV_HOUGH_GRADIENT, 1, minDist=size, **params)
        circles = numpy.empty((0, 3)) if ret is None else ret[0, ...]
        if debug:
            numpy.copyto(debug_im, fr)
            [annot_target(c[0], c[1], c[2], debug_im) for c in circles]
            cviter._debugWindow(debug, circles_iter.func_name, ns)
        yield circles
Пример #30
0
	def backgroundRemoval(self, image, background):
		try:
			# Blur image and background
			im_blur = cv2.blur(image, (self.blur, self.blur))
			im_bg_blur = cv2.blur(background, (self.blur, self.blur))

			# Convert to YCrCb
			im_YCrCb = cv2.cvtColor(im_blur, cv2.COLOR_BGR2YCR_CB)
			im_bg_YCrCb = cv2.cvtColor(im_bg_blur, cv2.COLOR_BGR2YCR_CB)

			# Substract background from image
			diff = cv2.absdiff(im_YCrCb, im_bg_YCrCb)

			# Split Y Cr Cb channels
			channels = cv2.split(diff)

			# Threshold on each channel
			y_img = self.thresholding(channels[0], self.th_Y_min, self.th_Y_max)
			cr_img = self.thresholding(channels[1], self.th_CR_min, self.th_CR_max)
			cb_img = self.thresholding(channels[2], self.th_CB_min, self.th_CB_max)

			# Define kernel for morophology edit
			kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (17, 17))

			# Dilate & erode each layer
			y_img = cv2.erode(cv2.dilate(y_img, kernel), kernel)
			cr_img = cv2.erode(cv2.dilate(cr_img, kernel), kernel)
			cb_img = cv2.erode(cv2.dilate(cb_img, kernel), kernel)

			# Sum channels together
			sum = cv2.add(y_img, cr_img, cb_img)

			# Define new kernel for morphology edit
			#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (17, 17))

			# Dilate & erode	
			sum = cv2.erode(cv2.dilate(sum, kernel), kernel)

			# Bitwise not
			sum = cv2.bitwise_not(sum)

			#total = cv2.merge([y_img, cr_img, cb_img])
			#fg = cv2.add(self.im_orig, sum)
			if self.debugType == 'mask':
				cv2.imshow('debug', sum)
		except Exception as detail:
			print "ERROR: Background removal (", detail, ")"
			sys.exit(1)
		return sum
Пример #31
0
def preprocess(base_path, input_image_path, output_image_path):

    img = cv2.imread(input_image_path, 0)

    # Invert the image
    img = 255 - img

    ret, thresh = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY)
    cv2.imwrite(base_path + 'threshold1.jpg', thresh)
    blur = cv2.blur(thresh, (5, 5))
    cv2.imwrite(base_path + 'blur.jpg', blur)

    kernel = np.ones((5, 5), np.uint8)
    erosion = cv2.erode(blur, kernel, iterations=1)
    cv2.imwrite(base_path + 'erosion.jpg', erosion)
    ret, thresh2 = cv2.threshold(erosion, 12, 255, cv2.THRESH_BINARY)
    cv2.imwrite(base_path + 'threshold2.jpg', thresh2)

    kernel = np.ones((3, 2), np.uint8)
    mask = cv2.dilate(thresh2, kernel, iterations=1)
    cv2.imwrite(base_path + 'dilate.jpg', mask)

    rows, cols = mask.shape

    # cropping
    # refPt = []
    # cropping = True

    # def click_and_crop(event, x, y, flags, param):
    #         global refPt, cropping
    #
    #         # if the left mouse button was clicked, record the starting
    #         # (x, y) coordinates and indicate that cropping is being
    #         # performed
    #         if event == cv2.EVENT_LBUTTONDOWN:
    #                 refPt = [(x, y)]
    #                 cropping = True
    #
    #         # check to see if the left mouse button was released
    #         elif event == cv2.EVENT_LBUTTONUP:
    #                 # record the ending (x, y) coordinates and indicate that
    #                 # the cropping operation is finished
    #                 refPt.append((x, y))
    #                 cropping = False
    #
    #                 # draw a rectangle around the region of interest
    #                 cv2.rectangle(mask, refPt[0], refPt[1], (255, 255, 255), 2)
    #                 cv2.namedWindow('image',cv2.WINDOW_NORMAL)
    #                 cv2.resizeWindow('image',  rows,cols)
    #                 cv2.imshow("image", mask)


    # load the image, clone it, and setup the mouse callback function
    # clone = mask.copy()
    cv2.namedWindow("image", cv2.WINDOW_NORMAL)
    cv2.resizeWindow('image', rows, cols)
    # cv2.setMouseCallback("image", click_and_crop)

    # keep looping until the 'q' key is pressed
    # while True:
    #         # display the image and wait for a keypress
    #         cv2.namedWindow("image" ,cv2.WINDOW_NORMAL)
    #         cv2.resizeWindow('image',  rows,cols)
    #         cv2.imshow("image", mask)
    #         key = cv2.waitKey(1) & 0xFF
    #
    #         # if the 'r' key is pressed, reset the cropping region
    #         if key == ord("r"):
    #                 image = clone.copy()
    #
    #         # if the 'c' key is pressed, break from the loop
    #         elif key == ord("c"):
    #                 break
    #
    # if there are two reference points, then crop the region of interest from the image and display it
    # if len(refPt) == 2:
    #         roi = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
    cv2.imwrite(output_image_path, mask)

    cv2.namedWindow("preprocessed_image", cv2.WINDOW_NORMAL)
    # cv2.resizeWindow('ROI',refPt[0][1]-refPt[1][1], refPt[0][0]-refPt[1][0] )
    # cv2.imshow("ROI", mask)

    # cv2.waitKey(0)
    cv2.destroyAllWindows()
sigmaColor = 0.3
sigmaSpace = 75

# with m = 1 the input image will not change
filter = 'b'  # box filter

while True:

    # add noise to image
    N = np.random.rand(*I.shape) * noise_sigma
    N=N.astype(np.float32)
    J = I + N
    J=J.astype(np.float32)

    if filter == 'b':
        K=cv2.blur(J,(m,m))
    elif filter == 'g':
        # filter with a Gaussian filter
        K=cv2.GaussianBlur(J,(gm,gm),0)
    elif filter == 'l':
        # filter with a bilateral filter
        K=cv2.bilateralFilter(J,size, sigmaColor, sigmaSpace)

    # filtered image

    cv2.imshow('demo ', K)
    key = cv2.waitKey(30) & 0xFF

    if key == ord('b'):
        filter = 'b'  # box filter
        print('Box filter')
Пример #33
0
import cv2
import numpy as np
from matplotlib import pyplot as plt

img = cv2.imread('images/opencv-logo.png')

blur = cv2.blur(img,(5,5))

plt.subplot(121),plt.imshow(img),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(blur),plt.title('Blurred')
plt.xticks([]), plt.yticks([])
plt.show()
Пример #34
0
ap.add_argument("-i", "--image", required=True, help="Path to the image")

# parsing the argument
args = vars(ap.parse_args())

# reading the image location through args
# and reading the image using cv2.imread
image = cv2.imread(args["image"])
cv2.imshow("Original", image)

# defining the kernel sizes for the blur operations
kernelSizes = [(3, 3), (9, 9), (15, 15)]

# displaying the different levels of average blurring with the kernels
for (kX, kY) in kernelSizes:
    blurred = cv2.blur(image, (kX, kY))
    cv2.imshow("Average ({},{})".format(kX, kY), blurred)
    cv2.waitKey(0)

# destroying all windows
cv2.destroyAllWindows()
cv2.imshow("Original", image)

# displaying the different levels of gaussian blurring with the kernels
for (kX, kY) in kernelSizes:
    blurred = cv2.GaussianBlur(image, (kX, kY), 0)
    cv2.imshow("Gaussian ({},{})".format(kX, kY), blurred)
    cv2.waitKey(0)

# destroying all windows
cv2.destroyAllWindows()
Пример #35
0
    if out.min() < 0:
        low_clip = -1.
    else:
        low_clip = 0.
    out = np.clip(out, low_clip, 1.0)
    out = np.uint8(out * 255)
    #cv.imshow("gasuss", out)
    return out


gasNoiseImg = gasuss_noise(img, 0.05)
#cv2.imshow('noise',gasNoiseImg)
#cv2.imwrite('gasNoiseImg.png',gasNoiseImg)

#meanfilter
dst1 = cv2.blur(spNoiseImg, (3, 3))
dst2 = cv2.blur(spNoiseImg, (5, 5))
dst3 = cv2.blur(spNoiseImg, (7, 7))

dst4 = cv2.blur(gasNoiseImg, (3, 3))
dst5 = cv2.blur(gasNoiseImg, (5, 5))
dst6 = cv2.blur(gasNoiseImg, (7, 7))

dst7 = cv2.medianBlur(spNoiseImg, 3)
dst8 = cv2.medianBlur(spNoiseImg, 5)
dst9 = cv2.medianBlur(spNoiseImg, 7)

dst10 = cv2.medianBlur(gasNoiseImg, 3)
dst11 = cv2.medianBlur(gasNoiseImg, 5)
dst12 = cv2.medianBlur(gasNoiseImg, 7)
Пример #36
0
import cv2

from qualipy.utils.focus_measure import *

IMAGE = cv2.imread('tests/images/lama.jpg', 0)
BLURRED = cv2.blur(IMAGE, (10, 10))


def test_LAPV_returns_less_for_blurred_image():
    assert LAPV(BLURRED) < LAPV(IMAGE)


def test_LAPM_returns_less_for_blurred_image():
    assert LAPM(BLURRED) < LAPM(IMAGE)


def test_TENG_returns_less_for_blurred_image():
    assert TENG(BLURRED) < TENG(IMAGE)


def test_MLOG_returns_less_for_blurred_image():
    assert MLOG(BLURRED) < MLOG(IMAGE)
Пример #37
0
def imageBlur(filename):
    img = cv.imread(filename)
    blurImg = cv.blur(img, (20, 20))
    return blurImg
Пример #38
0
    def compositeOverlayTopdown(self, base, new, blend_px=21):
        h, w, d = base.shape
        #print "h=%d w=%d d=%d" % ( h, w, d)

        # combine using masks and add operation (assumes pixel
        # image data will always be at least a little non-zero

        # create an inverse mask of the current accumulated imagery
        basegray = cv2.cvtColor(base, cv2.COLOR_BGR2GRAY)
        ret, base_mask_inv = cv2.threshold(basegray, 1, 255,
                                           cv2.THRESH_BINARY_INV)
        #cv2.imshow('base_mask_inv', base_mask_inv)

        # create an inverse mask of the new region to be added
        newgray = cv2.cvtColor(new, cv2.COLOR_BGR2GRAY)
        ret, new_mask = cv2.threshold(newgray, 1, 255, cv2.THRESH_BINARY_INV)
        #cv2.imshow('new_mask', new_mask)

        blendsize = (blend_px, blend_px)
        kernel = np.ones(blendsize, 'uint8')
        base_mask_dilate = cv2.dilate(base_mask_inv, kernel)
        #cv2.imshow('base_mask_dilate', base_mask_dilate)
        base_mask_blur = cv2.blur(base_mask_dilate, blendsize)
        #cv2.imshow('base_mask_blur', base_mask_blur)

        base_mask_blur_inv = 255 - base_mask_blur
        #cv2.imshow('base_mask_blur_inv', base_mask_blur_inv)
        base_mask_blur_inv = base_mask_blur_inv | new_mask
        #cv2.imshow('base_mask_blur_inv2', base_mask_blur_inv)

        new[:, :, 0] = new[:, :, 0] * (base_mask_blur / 255.0)
        new[:, :, 1] = new[:, :, 1] * (base_mask_blur / 255.0)
        new[:, :, 2] = new[:, :, 2] * (base_mask_blur / 255.0)
        #cv2.imshow('new masked', new)

        base[:, :, 0] = base[:, :, 0] * (base_mask_blur_inv / 255.0)
        base[:, :, 1] = base[:, :, 1] * (base_mask_blur_inv / 255.0)
        base[:, :, 2] = base[:, :, 2] * (base_mask_blur_inv / 255.0)
        #cv2.imshow('base masked', base)

        fast = True
        if fast:
            # Now clip the new imagery against the area already covered
            #new = cv2.add(base, new, mask=mask_inv)

            # And combine ...
            base = cv2.add(base, new)

        else:
            # alpha blend using the mask as the alpha value, works but
            # is done the hardway because I can't find a native opencv
            # way to do this.
            mask_blur = cv2.blur(mask_inv, (50, 50))
            for i in xrange(h):
                for j in xrange(w):
                    #(r0, g0, b0) = base[i][j]
                    #(r1, g1, b1) = new[i][j]
                    #a = mask_blur[i][j] / 255.0
                    #r = r0*(1.0-a) + r1*a
                    #g = g0*(1.0-a) + g1*a
                    #b = b0*(1.0-a) + b1*a
                    #base = (r, g, b)
                    b = base[i][j]
                    n = new[i][j]
                    a = mask_blur[i][j] / 255.0
                    if n[0] + n[1] + n[2] > 0:
                        base[i][j][0] = b[0] * (1.0 - a) + n[0] * a
                        base[i][j][1] = b[1] * (1.0 - a) + n[1] * a
                        base[i][j][2] = b[2] * (1.0 - a) + n[2] * a

        #cv2.imshow('base', base)
        #cv2.waitKey()

        return base
Пример #39
0
# Starting with 100's to prevent error while masking
h,s,v = 100,100,100
def nothing(x): 
    pass

# Creating track bar
cv2.createTrackbar('h', 'HSV_TrackBar',0,179,nothing)
cv2.createTrackbar('s', 'HSV_TrackBar',0,255,nothing)
cv2.createTrackbar('v', 'HSV_TrackBar',0,255,nothing)


# Real Code_Starts_Here
while(1):
    start_time = time.time()
    ret, frame = cam.read() #capture frames from the camera
    blur = cv2.blur(frame,(3,3)) #Blur the image
    hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2HSV) #Convert to HSV color space
    
    #Create a binary image with where white will be skin colors and rest is black
    mask2 = cv2.inRange(hsv,np.array([2,50,50]),np.array([15,255,255]))
    
    #Kernel matrices for morphological transformation    
    kernel_square = np.ones((11,11),np.uint8)
    kernel_ellipse= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
    
    #Perform morphological transformations to filter out the background noise
    #Dilation increase skin color area
    #Erosion increase skin color area
    dilation = cv2.dilate(mask2,kernel_ellipse,iterations = 1)
    erosion = cv2.erode(dilation,kernel_square,iterations = 1)    
    dilation2 = cv2.dilate(erosion,kernel_ellipse,iterations = 1)    
Пример #40
0
cap = cv.VideoCapture(0)
#cv.namedWindow('Trackbar',cv.WINDOW_AUTOSIZE)

cv.createTrackbar('B', 'Trackbar', 0, 255, nothing)
cv.createTrackbar('W', 'Trackbar', 0, 255, nothing)

while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()

    B = cv.getTrackbarPos('B', 'Trackbar')
    W = cv.getTrackbarPos('W', 'Trackbar')

    # Our operations on the frame come here
    gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    blur = cv.blur(gray, (2, 2))

    _, thres = cv.threshold(blur, 170, 255, cv.THRESH_BINARY_INV)

    #    cnt = contours(thres,frame)
    _, contours, _ = cv.findContours(thres, cv.RETR_TREE,
                                     cv.CHAIN_APPROX_SIMPLE)

    lines = cv.HoughLinesP(thres, 1, np.pi / 180, 100, 150, 150)
    #print(lines.shape)
    leftmost = 0
    rightmost = 0
    topmost = 0
    bottommost = 0

    for i in range(0, len(contours)):
    fmask = cv2.threshold(fmask, 10, 255, 0)[1]
    ####### Morphological Processing #########
    fmask = cv2.erode(fmask,
                      cv2.getStructuringElement(cv2.MORPH_ERODE, (2, 2)),
                      iterations=2)
    mask1=cv2.morphologyEx(fmask,cv2.MORPH_CLOSE,\
                           cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(4,4)))
    mask1 = cv2.erode(mask1,
                      cv2.getStructuringElement(cv2.MORPH_ERODE, (2, 2)),
                      iterations=2)
    #cv2.imshow('mask1',mask1);
    fg_frame = cv2.bitwise_and(roi, roi, mask=mask1)
    #cv2.imshow('fg_frame',fg_frame);

    gr_frame = cv2.cvtColor(fg_frame, cv2.COLOR_BGR2GRAY)
    gr_frame = cv2.blur(gr_frame, (10, 10))
    bw_frame = cv2.threshold(gr_frame, 50, 255, 0)[1]

    ############ Tracking contour tangan ################

    con = cv2.findContours(bw_frame, cv2.RETR_EXTERNAL,
                           cv2.CHAIN_APPROX_SIMPLE)[0]
    try:
        my_con = max(con, key=cv2.contourArea)
    except:
        my_con = np.array([[[1, 0], [1, 2], [2, 3]]], dtype=np.int32)
        #pass;
    try:
        if cv2.contourArea(my_con) > 90:

            hull = cv2.convexHull(my_con, True)
Пример #42
0
 def process_image(self, frame):
     grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
     blur = cv2.blur(grey, (7, 7))
     edges = cv2.Canny(blur, 15.0, 30.0)
     return edges
    count = 1935
    for i in range(121):
        img = cv2.imread(path + folder + '/' + str(i) + '.jpg')
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        for i in range(5):
            thresh_value = i * 20
            ret1, th1 = cv2.threshold(gray_img, 100 + thresh_value, 255,
                                      cv2.THRESH_BINARY)
            count += 1
            cv2.imwrite(path + folder + '/' + str(count) + '.jpg', th1)
            ret2, th2 = cv2.threshold(gray_img, 100 + thresh_value, 255,
                                      cv2.THRESH_OTSU)
            count += 1
            cv2.imwrite(path + folder + '/' + str(count) + '.jpg', th2)

#Filter variation (Blur and sharpen)
for folder in os.listdir(path):
    count = 3145
    for i in range(121):
        img = cv2.imread(path + folder + '/' + str(i) + '.jpg')
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        for i in range(2, 10, 2):
            ksize = (i, i)
            blurredimg = cv2.blur(gray_img, ksize)
            count += 1
            cv2.imwrite(path + folder + '/' + str(count) + '.jpg', blurredimg)
        for i in range(9, 12, 1):
            kernel = np.array([[-1, -1, -1], [-1, i, -1], [-1, -1, -1]])
            sharpen = cv2.filter2D(gray_img, -1, kernel)
            count += 1
            cv2.imwrite(path + folder + '/' + str(count) + '.jpg', sharpen)
Пример #44
0
def trackingthread(argv):
    global positions
    global calibrated
    global fs
    global transform
    global transform_size

    process_arguments(argv)

    # The size of the board in inches, measured between the two
    # robot boundaries:
    board_size = [16, 16]

    # Number of pixels to display per inch in the final transformed image. This
    # was selected somewhat arbitrarily (I chose 17 because it fit on my screen):
    dpi = 17

    transform_size = (int(board_size[0] * dpi), int(board_size[1] * dpi))
    cap = open_camera(camera_id)

    # Calculate the perspective transform matrix
    transform = get_transform_matrix(cap, board_size, dpi, 'calibrations.txt')

    # saturation ranges for targeting desired colour
    sat1 = 70
    sat2 = 40
    sat_range = 20

    cv2.namedWindow('frame', cv2.CV_WINDOW_AUTOSIZE)

    snake_rectangle_list = []
    snake_centroid_list = []

    show_rectangle = True
    show_centroid = True

    # can take a while for camera to focus on first use
    # code below allows for a 2 second delay
    frame = get_frame(cap)
    sleep(1)
    frame = get_frame(cap)
    sleep(1)

    while (1):

        # read the frame
        frame = get_frame(cap)
        frame_no_blur = frame.copy()

        # smooth it
        frame = cv2.blur(frame, (3, 3))

        # shows frame without drawings or edits
        #cv2.imshow('frame2',frame)

        # two-point colour tracking
        # convert to hsv and find range of colors
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        thresh_origin, thresh_direction = get_thresholds(
            hsv, sat1, sat2, sat_range)

        #cv2.imshow('dire',thresh_direction)
        cx_origin, cy_origin = find_centroid(thresh_origin, frame)
        cx_direction, cy_direction = find_centroid(thresh_direction, frame)

        if show_centroid:
            if cx_origin is not None and cx_direction is not None:
                # finding centroids of best_cnt and draw a circle there
                cv2.circle(frame, (cx_origin, cy_origin), 5, 255, -1)
                cv2.circle(frame, (cx_direction, cy_direction), 5, 255, -1)

                # draw line
                draw_directed_line(frame_no_blur, cx_origin, cy_origin,
                                   cx_direction, cy_direction)

        # calibrate
        if not calibrated and (cx_origin, cy_origin) != (0, 0):
            set_calibration(cx_origin, cy_origin)
            calibrated = True

        if cx_origin is not None and cx_direction is not None:
            # work out current angle
            angle = calculate_angle(cx_origin, cy_origin, cx_direction,
                                    cy_direction)

            # add position to list
            add_position((get_elapsedtime(), cx_origin, cy_origin, angle,
                          cx_direction, cy_direction))

            # draw path, the last 100 positions
            #draw_path(frame)

            # print position details
            draw_text(frame_no_blur, (get_elapsedtime(), cx_origin, cy_origin,
                                      angle, cx_direction, cy_direction))

        # display current frame
        cv2.imshow('frame', frame_no_blur)

        # Show it, if key pressed is 'Esc', exit the loop
        k = cv2.waitKey(1)
        #print k
        if k == 27:  # esc
            break
        elif k == 114:  # r
            reset_calibration()
        elif k == 97:  # a
            sat1 += 5
        elif k == 122:  # z
            sat1 -= 5
        elif k == 115:  # s
            sat2 += 5
        elif k == 120:  # x
            sat2 -= 5

        # print str(sat1) + ', ' + str(sat2)

    # Clean up everything before leaving
    cleanup(cap)

    # Log positions to file
    write_positions_to_file()
Пример #45
0
 def randomBlur(self, bgr):
     if random.random() < 0.5:
         bgr = cv2.blur(bgr, (5, 5))
     return bgr
Пример #46
0
def graycode_analysis(screen_list, path):

    print('------------------------')
    print('Analyse Graycode Pattern')
    print('------------------------')

    # load configuration
    filename = path + 'graycode_config.json'
    with open(filename, 'r') as f:
        config = json.load(f)

    img_HW = config['camera']['height'], config['camera']['width']

    N = config['num_projectors']
    proj_whole_HW = config['projector_whole_HW']
    proj_x_stack, proj_y_stack = [], []
    azimuth_stack, polar_stack = [], []
    overlap_x, overlap_y = [], []
    overlap_weight = []

    p = util.Propeller()

    for n in range(N):
        print('----- %d/%d -----' % (n + 1, N))

        proj_id = n + 1
        config_sub = config['parameters']['projector_%d' % proj_id]
        proj_HW = config_sub['y_num_pixel'], config_sub['x_num_pixel']
        x_starting = config_sub['x_starting']
        y_starting = config_sub['y_starting']

        scr = screen_list[n]
        i1, i2, j1, j2 = scr.get_evaluation_area_index()

        print('Decoding Gray-code pattern...', end='')
        p.start()

        # reference image
        if not (config_sub['xgraycode_PN'] and config_sub['ygraycode_PN']):
            filename = path + 'gray_proj%d_grey.jpg' % proj_id
            img_ref = imread(filename)
            img_ref = shift_horizontal(img_ref, scr.horizontal_shift)
            img_ref = add_equirectangular_margin(img_ref, margin[1], margin[0])
            img_ref = img_ref[i1:i2, j1:j2, :]

        # ----- x-axis -----
        BGR = config_sub['xgraycode_BGR']
        PN = config_sub['xgraycode_PN']
        num_imgs = config_sub['xgraycode_num_image']
        nbits = config_sub['xgraycode_num_bits']
        offset = config_sub['xgraycode_offset']
        imgs_code = np.empty([i2 - i1, j2 - j1, nbits], dtype=np.bool)
        for i in range(num_imgs):
            # load image
            filename = path + 'gray_proj%d_x%d_posi.jpg' % (proj_id, i)
            img = imread(filename)
            img = shift_horizontal(img, scr.horizontal_shift)
            img = add_equirectangular_margin(img, margin[1], margin[0])
            img = img[i1:i2, j1:j2, :]

            if PN == True:
                filename = path + 'gray_proj%d_x%d_nega.jpg' % (proj_id, i)
                img_nega = imread(filename)
                img_nega = shift_horizontal(img_nega, scr.horizontal_shift)
                img_nega = add_equirectangular_margin(img_nega, margin[1],
                                                      margin[0])
                img_nega = img_nega[i1:i2, j1:j2, :]

            # judge 0 or 1
            if BGR:
                if PN == False:
                    code = (img > img_ref)
                else:
                    code = (img > img_nega)
                for j in range(3):
                    if (3 * i + j) >= nbits: break
                    imgs_code[:, :, 3 * i + j] = code[:, :, j]
            else:
                if PN == False:
                    code = (img[:, :, 1] > img_ref[:, :, 1])  # green layer
                else:
                    code = (img[:, :, 1] > img_nega[:, :, 1])
                imgs_code[:, :, i] = code
        # decode
        imgs_bin = np.empty_like(imgs_code, dtype=np.bool)
        imgs_bin[:, :, 0] = imgs_code[:, :, 0]
        for i in range(1, nbits):
            imgs_bin[:, :, i] = np.logical_xor(imgs_bin[:, :, i - 1],
                                               imgs_code[:, :, i])
        weight = 2**np.arange(nbits)[::-1].reshape(1, 1, -1)
        proj_x = np.sum(imgs_bin * weight, axis=-1).astype(np.float32)
        proj_x += x_starting - offset

        # ----- y-axis -----
        BGR = config_sub['ygraycode_BGR']
        PN = config_sub['ygraycode_PN']
        num_imgs = config_sub['ygraycode_num_image']
        nbits = config_sub['ygraycode_num_bits']
        offset = config_sub['ygraycode_offset']
        imgs_code = np.empty([i2 - i1, j2 - j1, nbits], dtype=np.bool)
        for i in range(num_imgs):
            # load image
            filename = path + 'gray_proj%d_y%d_posi.jpg' % (proj_id, i)
            img = imread(filename)
            img = shift_horizontal(img, scr.horizontal_shift)
            img = add_equirectangular_margin(img, margin[1], margin[0])
            img = img[i1:i2, j1:j2, :]

            if PN == True:
                filename = path + 'gray_proj%d_y%d_nega.jpg' % (proj_id, i)
                img_nega = imread(filename)
                img_nega = shift_horizontal(img_nega, scr.horizontal_shift)
                img_nega = add_equirectangular_margin(img_nega, margin[1],
                                                      margin[0])
                img_nega = img_nega[i1:i2, j1:j2, :]

            # judge 0 or 1
            if BGR:
                if PN == False:
                    code = (img > img_ref)
                else:
                    code = (img > img_nega)
                for j in range(3):
                    if (3 * i + j) >= nbits: break
                    imgs_code[:, :, 3 * i + j] = code[:, :, j]
            else:
                if PN == False:
                    code = (img[:, :, 1] > img_ref[:, :, 1])  # green layer
                else:
                    code = (img[:, :, 1] > img_nega[:, :, 1])
                imgs_code[:, :, i] = code

        # decode
        imgs_bin = np.empty_like(imgs_code, dtype=np.bool)
        imgs_bin[:, :, 0] = imgs_code[:, :, 0]
        for i in range(1, nbits):
            imgs_bin[:, :, i] = np.logical_xor(imgs_bin[:, :, i - 1],
                                               imgs_code[:, :, i])
        weight = 2**np.arange(nbits)[::-1].reshape(1, 1, -1)
        proj_y = np.sum(imgs_bin * weight, axis=-1).astype(np.float32)
        proj_y += y_starting - offset

        # remove pulse noise from bit errors
        proj_x = cv2.medianBlur(proj_x, KSIZE_MEDIAN_FILTER)
        proj_y = cv2.medianBlur(proj_y, KSIZE_MEDIAN_FILTER)

        # smoothing
        proj_x = cv2.blur(proj_x, (KSIZE_SMOOTHING_X, KSIZE_SMOOTHING_Y))
        proj_y = cv2.blur(proj_y, (KSIZE_SMOOTHING_X, KSIZE_SMOOTHING_Y))

        plt.subplot(211)
        plt.imshow(proj_x, cmap=plt.cm.jet)
        plt.subplot(212)
        plt.imshow(proj_y, cmap=plt.cm.jet)
        plt.savefig(path + 'plt_decode_%d.pdf' % proj_id)
        plt.close()

        # pixel direction
        polar, azimuth = scr.get_direction_meshgrid()

        #
        index_masker = scr.get_masked_index()
        proj_x_sample = proj_x[index_masker]
        proj_y_sample = proj_y[index_masker]
        polar_sample = polar[index_masker]
        azimuth_sample = azimuth[index_masker]
        points_sample = np.c_[proj_x_sample, proj_y_sample]

        proj_x = proj_x[np.where((x_starting <= proj_x)
                                 & (proj_x <= (x_starting + proj_HW[1])))]
        proj_y = proj_y[np.where((y_starting <= proj_y)
                                 & (proj_y <= (y_starting + proj_HW[0])))]

        p.end()

        # interpolate points candidate
        print('Refining projector pixel...', end='')
        p.start()
        x1 = int(np.ceil(proj_x_sample.min()))
        x2 = int(proj_x_sample.max())
        y1 = int(np.ceil(proj_y_sample.min()))
        y2 = int(proj_y_sample.max())
        proj_y_interp_cand, proj_x_interp_cand = np.mgrid[y1:y2, x1:x2]
        points_interp_cand = np.c_[proj_x_interp_cand.reshape(-1),
                                   proj_y_interp_cand.reshape(-1)]
        # determine interpolate points
        n_poly = 100  # ポリゴン数(超大まかな目安)
        k = int(2 * (np.sqrt(len(points_sample)) / (n_poly / 4))**2 * np.pi)
        hull = concavehull.concavehull(points_sample, k)
        inside = concavehull.check_inside(points_interp_cand, hull)
        i_inside_hull = np.where(inside)[0]
        points_interp = points_interp_cand[i_inside_hull, :]

        p.end()

        # interpolation
        print('Estimating pixel direction...', end='')
        p.start()
        f = LinearNDInterpolator(points_sample, polar_sample)
        polar_interp = f(points_interp)

        f = LinearNDInterpolator(points_sample, azimuth_sample)
        azimuth_interp = f(points_interp)

        i_inside_area = np.where((scr.area_polar[0] <= polar_interp)
                                 & (polar_interp <= scr.area_polar[1])
                                 & (scr.area_azimuth[0] <= azimuth_interp)
                                 & (azimuth_interp <= scr.area_azimuth[1]))[0]

        proj_x_stack.append(points_interp[i_inside_area, 0])
        proj_y_stack.append(points_interp[i_inside_area, 1])
        azimuth_stack.append(azimuth_interp[i_inside_area])
        polar_stack.append(polar_interp[i_inside_area])

        # overlap weighting
        if scr.overlap_angle >= 0:
            left_side, right_side = scr.area_azimuth
            left_ovlp_end = left_side + scr.overlap_angle
            right_ovlp_end = right_side - scr.overlap_angle

            i_ovlp_left = np.where((left_side <= azimuth_stack[n])
                                   & (azimuth_stack[n] < left_ovlp_end))[0]
            overlap_x.append(proj_x_stack[n][i_ovlp_left])
            overlap_y.append(proj_y_stack[n][i_ovlp_left])
            azim_ovlp = azimuth_stack[n][i_ovlp_left]
            weight = (azim_ovlp - left_side) / (left_ovlp_end - left_side)
            overlap_weight.append(weight)

            i_ovlp_right = np.where((right_ovlp_end < azimuth_stack[n])
                                    & (azimuth_stack[n] <= right_side))[0]
            overlap_x.append(proj_x_stack[n][i_ovlp_right])
            overlap_y.append(proj_y_stack[n][i_ovlp_right])
            azim_ovlp = azimuth_stack[n][i_ovlp_right]
            weight = (azim_ovlp - right_side) / (right_ovlp_end - right_side)
            overlap_weight.append(weight)
        else:
            overlap_x.append([])
            overlap_y.append([])
            overlap_weight.append([])

        # cancel horizontal shift
        azimuth_stack[n] -= scr.horizontal_shift_deg

        p.end()

    proj_x_stack = np.hstack(proj_x_stack).astype(np.int)
    proj_y_stack = np.hstack(proj_y_stack).astype(np.int)
    azimuth_stack = np.hstack(azimuth_stack)
    polar_stack = np.hstack(polar_stack)

    overlap_x = np.hstack(overlap_x).astype(np.int)
    overlap_y = np.hstack(overlap_y).astype(np.int)
    overlap_weight = np.hstack(overlap_weight)

    overlap_tone_input = scr.tone_input
    overlap_tone_output = scr.tone_output

    mapper = (proj_x_stack, proj_y_stack, polar_stack, azimuth_stack,
              overlap_x, overlap_y, overlap_weight, overlap_tone_input,
              overlap_tone_output, np.array(proj_whole_HW))
    return mapper
Пример #47
0
        return count
    else:
        return 1


cap = cv2.VideoCapture('C:/Users/herom/Videos/AKIRA  金田と鉄雄の対決-Vk_AuM6ozks.mp4')
ret, frame = cap.read()
#画像サイズを求める。
height, width = frame.shape[:2]
print(height)
print(width)
while (cap.isOpened()):
    ret, frame = cap.read()

    #ブラーをかけてノイズを飛ばす。
    blur = cv2.blur(frame, (3, 3))
    # グレースケース化
    gray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)

    #CannyにてEdge取り出し。
    edges = cv2.Canny(gray, Edge_min, Edge_max)

    #二値化
    ret, thresh2 = cv2.threshold(edges, 127, 255, cv2.THRESH_BINARY)

    #output_image
    output = thresh2

    #輪郭の取り出し
    contours, hierarchy = cv2.findContours(thresh2, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
Пример #48
0
while(True):
    # Capture frame-by-frame
    ret, frame = cap.read()
    if ret:
        frame = resize_frame(frame)
        orig = frame

        bw_frame = cv.cvtColor(frame,cv.COLOR_RGB2GRAY)
        high_thresh, thresh_im = cv.threshold((bw_frame), 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
        # cv.imshow('thres',thresh_im)
        lowThresh = 0.5*high_thresh
        # print(high_thresh, lowThresh)
        R,G,B = cv.split(frame)
        # smooth = cv.bilateralFilter(frame,10,25,51)
        smooth = cv.blur(frame,(2,2))
        # cv.imshow('test',smooth)

        # GENERATE CANNY EDGES
        canny = cv.Canny(smooth, 100, high_thresh)

        sigma = 0.6
        v = np.median(smooth)
        lower = int(max(0, (1.0 - sigma) * v))
        upper = int(min(255, (1.0 + sigma) * v))
        # print(lower, upper)
        # canny = cv.Canny(smooth, lower, upper)
        #TODO: ADD TO RET FRAMES
        # cv.imshow('canny_new',canny)

        ## FIND CONTOURS
 parser.add_argument('--option2')
 args = parser.parse_args()
 ##### 读取图片 #############
 img_name = os.path.split(args.img)[-1]
 img_name, ext = os.path.splitext(img_name)
 input = cv.imread(args.img, flags=cv.IMREAD_COLOR)
 temp = input
 ######处理模块###########
 #图像扰动
 if args.mode:
     commands = ['blur', 'rotate', 'light', 'scale', 'shift']
     assert (args.mode in commands)
     if args.mode == 'blur':
         if args.option1:
             input = cv.blur(
                 input,
                 (int(args.option1), int(args.option1)))  #具体模糊参数需要改代码
             img_name = img_name + "_blur_" + args.option1
         else:
             input = cv.blur(input, (5, 5))
             img_name = img_name + "_blur_5"
     elif args.mode == 'rotate':
         h, w = input.shape[:2]  # 宽高
         center = (w // 2, h // 2)  # 旋转中心
         if args.option1:
             R_M = cv.getRotationMatrix2D(
                 center, float(args.option1), 1.0
             )  #生成旋转矩阵 参数1 表示旋转中心,2 表示旋转角度 (从逆时针开始) 3表示旋转后的大小 1表示和原图相同
             print(R_M)
             img_name = img_name + "_rotate_" + args.option1
         else:
Пример #50
0
def stgcn_visualize(pose,
                    edge,
                    feature,
                    video,
                    label=None,
                    label_sequence=None,
                    height=1080):

    _, T, V, M = pose.shape
    T = len(video)
    pos_track = [None] * M
    for t in range(T):
        frame = video[t]

        # image resize
        H, W, c = frame.shape
        frame = cv2.resize(frame, (height * W // H //2 , height//2))
        H, W, c = frame.shape
        scale_factor = 2 * height / 1080

        # draw skeleton
        skeleton = frame * 0
        text = frame * 0
        for m in range(M):
            score = pose[2, t, :, m].mean()
            if score < 0.3:
                continue

            for i, j in edge:
                xi = pose[0, t, i, m]
                yi = pose[1, t, i, m]
                xj = pose[0, t, j, m]
                yj = pose[1, t, j, m]
                if xi + yi == 0 or xj + yj == 0:
                    continue
                else:
                    xi = int((xi + 0.5) * W)
                    yi = int((yi + 0.5) * H)
                    xj = int((xj + 0.5) * W)
                    yj = int((yj + 0.5) * H)
                cv2.line(skeleton, (xi, yi), (xj, yj), (255, 255, 255),
                         int(np.ceil(2 * scale_factor)))

            body_label = label_sequence[t // 4][m]
            x_nose = int((pose[0, t, 0, m] + 0.5) * W)
            y_nose = int((pose[1, t, 0, m] + 0.5) * H)
            x_neck = int((pose[0, t, 1, m] + 0.5) * W)
            y_neck = int((pose[1, t, 1, m] + 0.5) * H)

            half_head = int(((x_neck - x_nose)**2 + (y_neck - y_nose)**2)**0.5)
            pos = (x_nose + half_head, y_nose - half_head)
            if pos_track[m] is None:
                pos_track[m] = pos
            else:
                new_x = int(pos_track[m][0] + (pos[0] - pos_track[m][0]) * 0.2)
                new_y = int(pos_track[m][1] + (pos[1] - pos_track[m][1]) * 0.2)
                pos_track[m] = (new_x, new_y)
            cv2.putText(text, body_label, pos_track[m],
                        cv2.FONT_HERSHEY_TRIPLEX, 0.5 * scale_factor,
                        (255, 255, 255))

        # generate mask
        mask = frame * 0
        feature = np.abs(feature)
        feature = feature / feature.mean()
        for m in range(M):
            score = pose[2, t, :, m].mean()
            if score < 0.3:
                continue

            f = feature[t // 4, :, m]**5
            if f.mean() != 0:
                f = f / f.mean()
            for v in range(V):
                x = pose[0, t, v, m]
                y = pose[1, t, v, m]
                if x + y == 0:
                    continue
                else:
                    x = int((x + 0.5) * W)
                    y = int((y + 0.5) * H)
                cv2.circle(mask, (x, y), 0, (255, 255, 255),
                           int(np.ceil(f[v]**0.5 * 8 * scale_factor)))
        blurred_mask = cv2.blur(mask, (12, 12))

        skeleton_result = blurred_mask.astype(float) * 0.75
        skeleton_result += skeleton.astype(float) * 0.25
        skeleton_result += text.astype(float)
        skeleton_result[skeleton_result > 255] = 255
        skeleton_result.astype(np.uint8)

        rgb_result = blurred_mask.astype(float) * 0.75
        rgb_result += frame.astype(float) * 0.5
        rgb_result += skeleton.astype(float) * 0.25
        rgb_result[rgb_result > 255] = 255
        rgb_result.astype(np.uint8)

        put_text(skeleton, 'inputs of st-gcn', (0.1, 0.5))

        text_1 = cv2.imread('./resource/demo_asset/original_video.png', cv2.IMREAD_UNCHANGED)
        text_2 = cv2.imread('./resource/demo_asset/pose_estimation.png', cv2.IMREAD_UNCHANGED)
        text_3 = cv2.imread('./resource/demo_asset/attention+prediction.png', cv2.IMREAD_UNCHANGED)
        text_4 = cv2.imread('./resource/demo_asset/attention+rgb.png', cv2.IMREAD_UNCHANGED)
        
        blend(frame, text_1)
        blend(skeleton, text_2)
        blend(skeleton_result, text_3)
        blend(rgb_result, text_4)

        if label is not None:
            label_name = 'voting result: ' + label
            put_text(skeleton_result, label_name, (0.1, 0.5))

        img0 = np.concatenate((frame, skeleton), axis=1)
        img1 = np.concatenate((skeleton_result, rgb_result), axis=1)
        img = np.concatenate((img0, img1), axis=0)

        yield img
Пример #51
0
    cv2.rectangle(img, (0,int(img.shape[1]*0.3) ), (int(img.shape[0]*0.5),img.shape[1]), (255,0,0),2)
    crop_img = img[int(img.shape[1]*0.3)+aBor:img.shape[0]-aBor, aBor:int(img.shape[0]*0.5)-aBor]

    if (bGuardar): #guarda la imagen de fondo        
        cv2.imwrite( 'fondo_box1.png',crop_img)
        bGuardar=False

    #lee el fondo de la imagen
    fondo_box1 = cv2.imread('fondo_box1.png') 
    
    #restamos la imagen con el fondo
    resta = cv2.subtract(fondo_box1,crop_img) #diferencia comun 
    #resta = cv2.absdiff (fondo_box1,crop_img) #diferencia absoluta 
    
    # filtro de desenfoque
    resta = cv2.blur(resta,(5,5)) 

    # convert to grayscale
    grey = cv2.cvtColor(resta, cv2.COLOR_BGR2GRAY)

    # applying gaussian blur
    value = (15, 15)
    blurred = cv2.GaussianBlur(grey, value, 0)

    # thresholdin: Otsu's Binarization method
    _, thresh1 = cv2.threshold(blurred, 35, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)  

    # check OpenCV version to avoid unpacking error
    (version, _, _) = cv2.__version__.split('.')

    if version == '3':
Пример #52
0
import numpy as np
import cv2
from matplotlib import pyplot as plt
#greyscale
img=cv2.imread('DeepakKanuri.jpg',0)
#img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img1=cv2.imread('DeepakKanuri.jpg',1)
#img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB)
cv2.imwrite('Deepak.png',img)

plt.imshow(img)
plt.show()
rows,cols = img.shape

#blur
blur=cv2.blur(img1,(20,20))
cv2.imwrite('blur.png',blur)
blur=cv2.blur(img,(20,20))
cv2.imwrite('blur1.png',blur)

#translated
trans = np.float32([[1,0,500],[0,1,100]])
transim = cv2.warpAffine(img,trans,(cols,rows))
cv2.imwrite('translatedimage.png',transim)
trans = np.float32([[1,0,500],[0,1,100]])
transim = cv2.warpAffine(img1,trans,(cols,rows))
cv2.imwrite('translatedimage1.png',transim)

#rotated
rotate = cv2.getRotationMatrix2D((cols/2,rows/2),180,1)
rotateim = cv2.warpAffine(img,rotate,(cols,rows))
Пример #53
0
def blur(img, kernel_size=3):

    return cv2.blur(img, (kernel_size, kernel_size))
Пример #54
0
def test_img(img_name):
    img = cv2.imread(img_name, cv2.IMREAD_GRAYSCALE)
    img_start = cv2.imread(img_name)
    white_black(img_name, "res2.jpg", 0.85)
    img = cv2.imread("res2.jpg", cv2.IMREAD_GRAYSCALE)
    blur = cv2.blur(img, (3, 3))  # blur the image
    ret, thresh = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY)
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)

    hull = []
    # calculate points for each contour
    for i in range(len(contours)):
        hull.append(cv2.convexHull(contours[i], False))
    drawing = np.zeros((thresh.shape[0], thresh.shape[1], 3), np.uint8)
    canvas = np.ones((img.shape[0], img.shape[1], 3), np.uint8) * 100
    for i in range(len(contours)):
        color_contours = (0, 255, 0)
        #color = (255, 0, 0)
        cv2.drawContours(drawing, contours, i, color_contours, 1, 8, hierarchy)
        #cv2.drawContours(canvas, hull, i, color, 1, 8)
    cv2.imshow('img1', img)
    cv2.waitKey(0)
    cv2.imshow('countur', drawing)
    cv2.waitKey(0)

    # Проходя через все контуры, найденные на изображении.
    font = cv2.FONT_HERSHEY_COMPLEX
    page = []
    rotrect = cv2.minAreaRect(contours[0])
    for cnt in contours:
        if cv2.arcLength(cnt, True) > 800:
            approx = cv2.approxPolyDP(cnt, 0.012 * cv2.arcLength(cnt, True),
                                      True)
            cv2.drawContours(canvas, [approx], 0, (0, 0, 255), 5)
            n = approx.ravel()
            i = 0
            for j in n:
                if (i % 2 == 0):
                    help_arr = [n[i], n[i + 1]]
                    page.append(help_arr)
                    x = n[i]
                    y = n[i + 1]
                    string = str(x) + " " + str(y)
                    cv2.putText(canvas, string, (x, y), font, 0.5, (0, 255, 0))
                i = i + 1
            rotrect = cv2.minAreaRect(cnt)
    # РЕДАГУВАННЯ МАСИВУ ДЛЯ ВІДПОВІДНОСТІ
    # тут треба повороти

    # коробочка по фрейду
    box = cv2.boxPoints(rotrect)
    box = np.int0(box)
    cv2.drawContours(canvas, [box], 0, (0, 255, 255), 2)
    cv2.imshow('box', canvas)
    cv2.waitKey(0)

    # матриця переходу і трансформація
    (x1, y1), (x2, y2), angle = rotrect
    box1 = [[0, 0], [0, y2], [x2, y2], [x2, 0]]
    box1 = forvard_back(box1)
    box1 = max_X_sort(box1)
    box = np.array(box, np.float32)
    box1 = np.array(box1, np.float32)
    page = aprox_in_array(find_near(page, box), page)
    page = np.array(page, np.float32)
    matrix = cv2.getPerspectiveTransform(page, box1)
    result = cv2.warpPerspective(img_start, matrix, (int(x2), int(y2)))

    # Wrap the transformed image
    cv2.imshow('img transform', result)
    cv2.imwrite("res.jpg", result)
    cv2.waitKey(0)

    # висвітлення фону паперу, приберання малих косяків та підведення ліній
    # increasing the contrast 20%
    image = Image.open("res.jpg")
    new_image = ImageEnhance.Contrast(image).enhance(1.2)
    result = np.array(new_image)
    cv2.imwrite("res.jpg", result)

    white_black("res.jpg", "res2.jpg", 0.85)
    result = cv2.imread("res2.jpg")
    cv2.imshow('bw-result', result)
    cv2.waitKey(0)

    cv2.destroyAllWindows()

    # thickness
    gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
    gray = cv2.bitwise_not(gray)
    cv2.imshow('gray invert', gray)
    cv2.waitKey(0)

    edges = cv2.Canny(result, 50, 150, apertureSize=3)
    minLineLength = 1
    maxLineGap = 1
    lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 20, minLineLength,
                            maxLineGap)
    for l in lines:
        for x1, y1, x2, y2 in l:
            cv2.line(result, (x1, y1), (x2, y2), (0, 0, 0), 1)

    cv2.imshow('with countur', result)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Пример #55
0
# read the input image
img_base = cv2.imread("./images/basic_base.png")
img_rotated = cv2.imread("./images/basic_rot45.png")

#     *********************************
#     *                               *
#     *   Detects Circles in Images   *
#     *                               *
#     *********************************

img = cv2.imread("./images/basic_base.png", cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img_base, cv2.COLOR_BGR2GRAY)
output = img.copy()

# Blur using 3 * 3 kernel.
gray_blurred = cv2.blur(gray, (3, 3))

# Apply Hough transform on the blurred image.
detected_circles = cv2.HoughCircles(gray_blurred,
                                    cv2.HOUGH_GRADIENT,
                                    1,
                                    20,
                                    param1=50,
                                    param2=30,
                                    minRadius=200,
                                    maxRadius=220)

max_pt = [0, 0, 0]

# Draw circles that are detected.
if detected_circles is not None:
Пример #56
0
def test():
    """
    kerasを使わずにデータ数を増やす
    参考 https://qiita.com/bohemian916/items/9630661cd5292240f8c7
    """
    input_dir = "data/input/gucky/"  # "data/input/gucky/"
    filelist = os.listdir(input_dir)

    output_dir = "data/input/gucky_generate_manual/"  # "data/input/gucky_generate/"
    make_dir(output_dir)

    # コントラスト調整
    min_table = 50
    max_table = 205
    diff_table = max_table - min_table  # 165
    LUT_HC = np.arange(256, dtype='uint8')
    LUT_LC = np.arange(256, dtype='uint8')

    # ハイコントラストLUT作成
    for i in range(0, min_table):
        LUT_HC[i] = 0
    for i in range(min_table, max_table):
        LUT_HC[i] = 255 * (i - min_table) / diff_table
    for i in range(max_table, 255):
        LUT_HC[i] = 255

    # ローコントラストLUT作成
    for i in range(256):
        LUT_LC[i] = min_table + i * (diff_table) / 255

    # 平滑化
    average_squeare = (10, 10)

    # ガウシアン分布によるノイズ
    mean, sigma = 0, 10

    # Salt&Pepperノイズ
    s_vs_p = 0.5
    amount = 0.004

    for i, file in enumerate(filelist):
        img = cv2.imread(input_dir + file)  # numpy配列で取得 (128, 128, 3)
        row, col, ch = img.shape
        filename = os.path.splitext(file)[0]

        high_cont_img = cv2.LUT(img, LUT_HC)  # ハイコントラスト
        cv2.imwrite(output_dir + filename + "_LUT_HC" + ".jpg", high_cont_img)

        low_cont_img = cv2.LUT(img, LUT_LC)  # ローコントラスト
        cv2.imwrite(output_dir + filename + "_LUT_LC" + ".jpg", low_cont_img)

        blur_img = cv2.blur(img, average_squeare)  # 平滑化
        cv2.imwrite(output_dir + filename + "_blur" + ".jpg", blur_img)

        gauss = np.random.normal(mean, sigma, (row, col, ch))  # (128, 128, 3)
        gauss = gauss.reshape(row, col, ch)  # reshapeする必要
        gauss_img = img + gauss  # ガウシアン分布のノイズ
        cv2.imwrite(output_dir + filename + "_gauss" + ".jpg", gauss_img)

        # 塩モード
        sp_img = img.copy()
        num_salt = np.ceil(amount * img.size * s_vs_p)
        coords = [
            np.random.randint(0, i - 1, int(num_salt)) for i in img.shape
        ]
        sp_img[coords[:-1]] = (255, 255, 255)

        # 胡椒モード
        num_pepper = np.ceil(amount * img.size * (1. - s_vs_p))
        coords = [
            np.random.randint(0, i - 1, int(num_pepper)) for i in img.shape
        ]
        sp_img[coords[:-1]] = (0, 0, 0)
        cv2.imwrite(output_dir + filename + "_salt_pepper" + ".jpg", sp_img)

        # 反転
        pass

        # 拡大・縮小
        pass
img = cv.imread('imgs/lenna.png')
w, h = int(img.shape[1] * 0.5), int(img.shape[0] * 0.5)
img = cv.resize(img, (w, h))
# cv.imshow('img', img)

rgb = cv.cvtColor(img, cv.COLOR_BGR2RGB)

kernel = np.ones((5, 5), np.uint8) / 25

# homogenous 2D convolution using custom kernel
conv2D = cv.filter2D(rgb, -1, kernel)
# cv.imshow('2D conv', cv.cvtColor(conv2D, cv.COLOR_RGB2BGR))

# NB: boxFilter ==> blur ==> filter2D can be used interchangeabl
# average smoothing
avg = cv.blur(rgb, (5, 5))
# cv.imshow('avg smoothing', avg)

# gaussian smoothing
gaussian_img = cv.GaussianBlur(rgb, (3, 3), 0)
# cv.imshow('Gaussian blur', gaussian_img)

# median smoothing -- for removing salt/pepper noise
median_img = cv.medianBlur(rgb, 3)
# cv.imshow('median blur', median_img)

# bilateral smoothing -- preserve edges and borders while removing noises
bilateral = cv.bilateralFilter(rgb, 20, 30, 40)
# cv.imshow('bilateral', bilateral)

titles = ['orig', '2D Conv', 'blur', 'gaussian', 'median', 'bilateral']
Пример #58
0
def blur(img):
    return (cv2.blur(img, (5, 5)))
Пример #59
0
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("Grey", gray)

#转化为高水平梯度和低竖直梯度
gradX = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradY = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=-1)

# subtract the y-gradient from the x-gradient
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)

cv2.imshow("Gradient", gradient)

#去噪&二值化

blurred = cv2.blur(gradient, (9, 9))  #平均模糊
(_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)

cv2.imshow("Blurred", blurred)

cv2.imshow("Thresh", thresh)

#矩形内核

kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)

cv2.imshow("Kernel", kernel)
cv2.imshow("Closed", closed)

#进行腐蚀和膨胀
Пример #60
0
def calculateLightPattern(image):  # 光模式或背景近似结果
    return cv2.blur(image, (image.cols / 3, image.cols / 3), 0)