示例#1
0
    def face(self):
        """
        Detects a face in the given image.
        """
        while True:
            self.convertToCV()

            grayscale = cv2.cvtColor(self._array, cv2.COLOR_BGR2GRAY)
            self.faces = faceCascade.detectMultiScale(grayscale, scaleFactor = 1.3, minNeighbors = 4, minSize = (15,15), flags = cv2.cv.CV_HAAR_SCALE_IMAGE)
            self.faces = sorted(self.faces, key=my_cool_sort)
            #print self.faces

            if len(self.faces) > 0:
                r = self.faces[0][1]
                h = self.faces[0][3]
                c = self.faces[0][0]
                w = self.faces[0][2]

                self.track_window = (c,r,w,h)
                roi = self._array[r:r+h, c:c+w]
                hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
                mask = cv2.inRange(hsv_roi, np.array((0.,60.,32.)), np.array((180.,255.,255.)))
                self.roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
                cv2.normalize(self.roi_hist, self.roi_hist, 0, 255, cv2.NORM_MINMAX)

                self.term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

                break

            else:
                self._updateImage()
                break
示例#2
0
 def updateBiggestObjectContour(self, frame, contour):
     
     if contour == None:
         return
         
     height, width = frame.shape[:2]
     
     c,r,w,h = cv2.boundingRect(contour)
     
     if w*h < 20:
         return
     # set up the ROI for tracking
     roi = frame[r:r+h, c:c+w]
     hsv_roi =  cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
     
     mask2 = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
     mask = np.zeros((height,width), np.uint8)
     cv2.drawContours(mask, [contour], 0, 255, -1)
     maskArea = mask[r:r+h,c:c+w]
     maskArea = cv2.bitwise_and(maskArea, mask2)
     img = cv2.bitwise_and(roi,roi,mask=maskArea)
     #cv2.imshow('maskarea', mask2)
     #roi_hist = cv2.calcHist([hsv_roi],[0,1,2],maskArea,[180,256,256],[0,180,0,256,0,256])
     roi_hist = cv2.calcHist([hsv_roi],[0,1],maskArea,[180,256],[0,180,0,256])
     #roi_hist = cv2.calcHist([hsv_roi],[0],maskArea,[180],[0,180])
     cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
     #print i
     self.biggestObject = (c,r,w,h)
     self.biggestObjectHistogram = roi_hist
def draw_window(frame):
    # setup initial location of window
    r,h,c,w = 250,90,400,125  # simply hardcoded the values
    track_window = (c,r,w,h)    

    # set up the ROI for tracking
    roi = frame[r:r+h, c:c+w]
    hsv_roi =  cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
    roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
    cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)    

    # Setup the termination criteria, either 10 iteration or move by atleast 1 pt
    term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )

    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)

    # apply meanshift to get the new location
    ret, track_window = cv2.CamShift(dst, track_window, term_crit)
    # Draw it on image
    pts = cv2.boxPoints(ret)
    pts = np.int0(pts)
    img2 = cv2.polylines(frame,[pts],True, 255,2)
    io.imshow(img2)
示例#4
0
    def skin_mask(self, img, det_face_hsv, face_rect):
        """
        Create a mask of the image which returns a binary image (black and white) based
        on whether we thing a section is skin or not. We do this by analyzing the hue and
        saturation from the detected face. From this we can calculate the probability of
        any pixel in the full image occuring in the face image. Then we can filter out
        any values whose probability is below a certain threshold.

        :param img: BGR image from webcam
        :param det_face_hsv: hsv image of the face from the previous detection
        :param face_rect: non-normalized dimensions of face rectangle (left, top, cols, rows)
        :return: 2D array, black and white if pixels are thought to be skin
        """
        #Get the HSV images of the whole thing and the face
        img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        face_left = face_rect[0]
        face_top = face_rect[1]
        face_width = face_rect[2]
        face_height = face_rect[3]
        #create a Hue-Saturation histogram of the face
        hs_face_hist = cv2.calcHist([det_face_hsv], [0,1], None, [32,32], [0, 180,0, 255])
        cv2.normalize(hs_face_hist, hs_face_hist, 0, 255, cv2.NORM_MINMAX)
        #create a Hue-Saturation BackProjection, and a mask
        #This mask ignores dark pixels < 32, and saturated pixels, <60
        hue_min, sat_min, val_min = 0.0, 32.0, 16.0
        mask = cv2.inRange(img_hsv, np.array((hue_min, sat_min, val_min)), np.array((180., 255., 255.)))
        mask_face = mask[face_top:face_top+face_height, face_left:face_left+face_width]
        masked_hs_hist = cv2.calcHist([det_face_hsv], [0,1], mask_face, [32,32], [0, 180,0, 255])
        cv2.normalize(masked_hs_hist, masked_hs_hist, 0, 255, cv2.NORM_MINMAX)
        masked_hs_prob = cv2.calcBackProject([img_hsv], [0,1], masked_hs_hist, [0, 180,0, 255],1)
        cv2.bitwise_and(masked_hs_prob, mask, dst=masked_hs_prob) #seems to lessen noise???
        thresh = 8.0 #threshold likelihood for being skin, changes a lot based on setting
        _, masked_img = cv2.threshold(masked_hs_prob, thresh, 255, cv2.CV_8U) #throw out below thresh

        return masked_img
示例#5
0
    def updateContours(self, frame, contours):

        self.movingObjects = []
        self.histograms = []
        
        height, width = frame.shape[:2]
        
        i=0
        for contour in contours:
            c,r,w,h = cv2.boundingRect(contour)
            if w*h < 20:
				continue
			# set up the ROI for tracking
            roi = frame[r:r+h, c:c+w]
            hsv_roi =  cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
            
            mask2 = cv2.inRange(hsv_roi, np.array((0., 30.,30.)), np.array((180.,250.,250.)))
            mask = np.zeros((height,width), np.uint8)
            cv2.drawContours(mask, [contour], 0, 255, -1)
            maskArea = mask[r:r+h,c:c+w]
            maskArea = cv2.bitwise_and(maskArea, mask2)
            img = cv2.bitwise_and(roi,roi,mask=maskArea)
            #cv2.imshow('maskarea', mask2)
            roi_hist = cv2.calcHist([hsv_roi],[0,1],maskArea,[180,256],[0,180,0,256])
            #roi_hist = cv2.calcHist([hsv_roi],[0],maskArea,[180],[0,180])
            cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
            #print i
            i+=1
            self.movingObjects.append((c,r,w,h))
            self.histograms.append(roi_hist)
示例#6
0
    def test_dft(self):

        img = self.get_sample('samples/data/rubberwhale1.png', 0)
        eps = 0.001

        #test direct transform
        refDft = np.fft.fft2(img)
        refDftShift = np.fft.fftshift(refDft)
        refMagnitide = np.log(1.0 + np.abs(refDftShift))

        testDft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT)
        testDftShift = np.fft.fftshift(testDft)
        testMagnitude = np.log(1.0 + cv2.magnitude(testDftShift[:,:,0], testDftShift[:,:,1]))

        refMagnitide = cv2.normalize(refMagnitide, 0.0, 1.0, cv2.NORM_MINMAX)
        testMagnitude = cv2.normalize(testMagnitude, 0.0, 1.0, cv2.NORM_MINMAX)

        self.assertLess(cv2.norm(refMagnitide - testMagnitude), eps)

        #test inverse transform
        img_back = np.fft.ifft2(refDft)
        img_back = np.abs(img_back)

        img_backTest = cv2.idft(testDft)
        img_backTest = cv2.magnitude(img_backTest[:,:,0], img_backTest[:,:,1])

        img_backTest = cv2.normalize(img_backTest, 0.0, 1.0, cv2.NORM_MINMAX)
        img_back = cv2.normalize(img_back, 0.0, 1.0, cv2.NORM_MINMAX)

        self.assertLess(cv2.norm(img_back - img_backTest), eps)
    def describeRGB(self, image):
        # compute a multidimensional histogram in the RGB colorspace using information from informaiton defined
        # channels which index is defined by the variable channelIds. Then normalize the histogram so that images
        # with the same content, but either scaled larger or smaller will have (roughly) the same histogram

        if(self.channelIds.shape[0]==1):
            hist = cv2.calcHist([image], self.channelIds,
               None, self.bins, [0, 256])
            hist = cv2.normalize(hist)

        elif(self.channelIds.shape[0]==2):
            hist = cv2.calcHist([image], self.channelIds,
                None, self.bins, [0, 256, 0, 256])
            hist = cv2.normalize(hist)

        elif(self.channelIds.shape[0]==3):
            hist = cv2.calcHist([image], self.channelIds,
                None, self.bins, [0, 256, 0, 256, 0, 256])
            hist = cv2.normalize(hist)
        else:
            print "WARNING: number of channels must be greate or equal to 1"
            hist = np.zeros([8,1])

        # return out 3D histogram as a flattened array
        return hist.flatten()
示例#8
0
def Calibrate_NewObject():
    # Give time for object to back away
    time.sleep(.4)
    
    # When the objects is moved away from the lense, the StdDev will increase.
    # The image is only captured after the object is moved away
    while(True):
        # take first frame of the video
        ret,frame = cap.read()
        if(Get_Frame_StdDev(frame) > 30):
            break

    # setup initial location of window
    r,h,c,w = int(.5*(camHeight-trackHeight)), trackHeight, int(.5*(camWidth-trackWidth)), trackWidth  # Defined above. used int() to keep type integer
    track_window = (c,r,w,h)
    
    # set up the ROI for tracking
    roi = frame[r:r+h, c:c+w] #creat smaller frame
    hsv_roi =  cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) #convert to HSV

    mask = cv2.inRange(hsv_roi, lower_hue, upper_hue) #np.array((0., 60.,32.)), np.array((180.,255.,255.))
    roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180]) #([hsv_roi],[0],mask,[180],[0,180])
    cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
    
    return track_window, roi_hist
示例#9
0
def play_vid(vid, wait=50, norm=False):
    import cv2
    for i,img in en(vid):
        if norm: cv2.normalize(img,img, 0, 255, cv2.NORM_MINMAX)
        img = cv2.resize(img.astype("uint8"), (200,200), interpolation=cv2.INTER_NEAREST)
        cv2.imshow("Gesture", img)
        cv2.waitKey(wait)
示例#10
0
def find_content(img_hsv, hist_sample):
    """ img hsv, hist_sample as np.array, -> 1 channel distance """
    src_img_cp = img_hsv
    # normalize the sample histogram
    cv2.normalize(hist_sample, hist_sample, 0, 179, cv2.NORM_MINMAX)
    distance = cv2.calcBackProject([img_hsv], [0], hist_sample, [0, 180], 0.5)

    print('ssssssssssssssssssssss distance -------------------')
    # show the distance
    ava.cv.utl.show_image_wait_2(distance) # ------------

    # convolute with circular, morphology
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    cv2.filter2D(distance, -1, kernel, distance)

    print('==================== distance convoluted -------------------')
    # show the smoothed distance
    ava.cv.utl.show_image_wait_2(distance) # ------------

    # threshold
    ret, thresh = cv2.threshold(distance, 55, 180, cv2.THRESH_BINARY)
    # thresh = cv2.merge([thresh, thresh, thresh])

    # do the bitwise_and
    #result = cv2.bitwise_and(src_img_cp, thresh)
    return thresh
示例#11
0
	def describe(self, image):
		hist = cv2.calcHist([image], [0, 1, 2],
			None, self.bins, [0, 256, 0, 256, 0, 256])
		cv2.normalize(hist, hist)

		# return out 3D histogram as a flattened array
		return hist.flatten()
def sliding_window(image,r1,r2,step,roihist):
    test_path = "/home/sarbajit/PyCharm_Scripts/test/green_pad_same_name_new/final_rotated2/"
    item = image
    if item.endswith(".png") or item.endswith(".PNG"):
        x = test_path+item
        target2 = cv2.imread(x)
        target = target2[90:150, 90:520]
        (winW, winH) = (50, 30)
        for (x, y, window) in sliding_window_test(target,r1,r2,stepSize=step, windowSize=(winW, winH)):
            # if the window does not meet our desired window size, ignore it
            if window.shape[0] != winH or window.shape[1] != winW:
                continue
            #this section does the histogram backprojected matching window by window.
            hsvt = cv2.cvtColor(window, cv2.COLOR_BGR2HSV)
            inputImage = cv2.calcHist([hsvt], [0, 1], None, [180, 256], [0, 180, 0, 256])
            cv2.normalize(roihist, roihist, 0, 255, cv2.NORM_MINMAX)
            dst = cv2.calcBackProject([hsvt], [0, 1], roihist, [0, 180, 0, 256], 1)
            match = cv2.compareHist(roihist, inputImage, method=0)
            print match
            #the match is printed to see the difference and jumps when the window moves through the landing pad

            # THIS IS WHERE YOU WOULD PROCESS YOUR WINDOW,AND DO THE NECESSARY STEPS

        # we'll just draw the window and show the results
            clone = target.copy()
            cv2.rectangle(clone, (x, y), (x + winW, y + winH), (0, 255, 0), 2)
            cv2.imshow("window", clone)
            cv2.waitKey(1)
            time.sleep(1)

# sliding_window('2015-08-06_06-27-48.png',28,58,30)
示例#13
0
    def run(self, cur_frame, next_frame,):
        # Setup the termination criteria, either 10 iteration or move by at least 1 pt
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
        new_list_of_objects = []

        for obj_tuple in self.list_of_objects:
            hsv_roi = None
            if len(obj_tuple) == 4:
                obj, hsv_roi, n_in_frame, n_not_moving = obj_tuple
            if (hsv_roi is not None) and (obj[2] > 0 or obj[3] > 0):
                mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
                roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
                cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

                # track in next frame
                # backprojection
                hsv = cv2.cvtColor(next_frame, cv2.COLOR_BGR2HSV)
                dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)

                # apply meanshift to get the new location
                ret, obj_new = cv2.meanShift(dst, obj, term_crit)
                n_in_frame += 1
                if PostProcessing.distance_two_squares(obj, obj_new) < 1:
                    n_not_moving += 1
                else:
                    n_not_moving = 0

                x, y, w, h = obj_new
                if n_not_moving < 20:
                    new_list_of_objects.append((obj_new, hsv_roi, n_in_frame, n_not_moving))

                # draw
                cv2.rectangle(next_frame, (x, y), (x + w, y + h), 255, 2)
        self.list_of_objects = new_list_of_objects
        pass
示例#14
0
def buffer_data(bag, input_topic, compressed):
    image_buff = []
    time_buff  = []
    start_time = None
    depthData = []
    bridge     = CvBridge()
    #bag = rosbag.Bag(bagFile)
    #Buffer the images, timestamps from the rosbag
    for topic, msg, t in bag.read_messages(topics=[input_topic]):
        depthData+=msg.data
        if start_time is None:
            start_time = t

        #Get the image
        if not compressed:
            try:
                cv_image = bridge.imgmsg_to_cv2(msg, "32FC1")
            except CvBridgeError as e:
                print e
        else:
            nparr = np.fromstring(msg.data, np.uint8)
            cv_image = cv2.imdecode(nparr, cv2.CV_LOAD_IMAGE_COLOR)

        # normalize depth image 0 to 255
        depthImg = np.array(cv_image, dtype=np.float32)
        cv2.normalize(depthImg, depthImg, 0, 255, cv2.NORM_MINMAX)
        time_buff.append(t.to_sec() - start_time.to_sec())
        image_buff.append(depthImg)

    return image_buff, time_buff  
	def describe(self, image):
		hist = cv2.calcHist([image], [0, 1, 2],
			None, self.bins, [0, 256, 0, 256, 0, 256])
		cv2.normalize(hist, hist)
		#print type(hist)
		#print type(hist.flatten())
		return hist.flatten()
    def CalcPCA(self, colorPatches, K = 1, show = True):
        width, height, _ = colorPatches[0].shape
        N = len(self.components)

        X = np.zeros((width*height*3 , N))
        for i in range(N):
            #x_vec = np.ravel(colorPatches[i][:,:,0]).astype(float)
            x_vec = np.ravel(colorPatches[self.components[i]]).astype(float)
            #x_vec = np.ravel(cv2.cvtColor(colorPatches[self.components[i]], cv2.COLOR_BGR2GRAY)).astype(float)
            x_vec /= np.linalg.norm(x_vec)
            X[:,i] = x_vec
        GrandMean = np.mean(X,1)
        for i in range(N):
            X[:,i] -= GrandMean
        U,s,Vt = np.linalg.svd(np.dot(np.transpose(X), X))
        Wp = np.zeros((width*height*3, K))
        for i in range(K):
            wi = np.dot(X,U[:,i])
            wi /= np.linalg.norm(wi)
            Wp[:,i] = wi

        # Visualize the most significant Eigen-Patterns
        if show == True:
            for k in range(K):
                w = np.reshape(Wp[:,k]+GrandMean, (width,height,3)).astype(np.float)
                cv2.normalize(w, w, 0, 255, cv2.NORM_MINMAX) # the same
                w = w.astype(np.uint8)
                #wint_color = cv2.applyColorMap(wint, cv2.COLORMAP_JET)
                cv2.imshow('w'+str(k),cv2.resize(w, None, fx=self.enlargeFactor, fy=self.enlargeFactor, interpolation = cv2.INTER_CUBIC))
                #cv2.imshow('wint_color'+str(k),cv2.resize(wint_color, None, fx=self.enlargeFactor, fy=self.enlargeFactor, interpolation = cv2.INTER_CUBIC))
                WaitKey(0)
    def __call__(self, data):
        print " - outHisto: display data nr %.0f @t: %f" % (data[self.inp_ch[0]], time.time())
        print " - outHisto: display the Histogram of streamelement %s, channels %s, frame %s" % (
            self.inp_ch[1],
            self.colorchannels,
            self.inp_ch[0],
        )

        hist = cv2.calcHist([data[self.inp_ch[1]]], self.colorchannels, None, [256], [0, 255])
        cv2.normalize(hist, hist, 0, 1, cv2.NORM_MINMAX)
        # print hist
        bin_count = hist.shape[0]
        bin_w = 2
        bin_max_h = 200
        img = (
            np.ones((int(bin_max_h * 1.1), bin_count * bin_w, 3), np.uint8) * [70, 255, 255] * 255
        )  # last list is background color

        # print hist
        for i in xrange(bin_count):
            val = hist[i]
            h = int(val * bin_max_h)
            # print h
            cv2.rectangle(
                img,
                (i * bin_w + 2, int(bin_max_h * 1.1)),
                ((i + 1) * bin_w - 2, int(bin_max_h * 1.1) - h),
                [int(255 * 255.0 * i / bin_count)] * 3,
                -1,
            )
        # img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
        cv2.imshow("hist", img)

        print " - outHisto: finished and waiting for key @t: %f" % (time.time())
        cv2.waitKey(1)
示例#18
0
文件: frame.py 项目: FilippoC/pke
    def getPartHistograms(self, px, py):
        if (px, py) in self.part_hists:
            return self.part_hists[(px, py)]

        gray_image = cv2.cvtColor(self.getCVFrame(), cv2.COLOR_BGR2GRAY)
        height, width = gray_image.shape

        size_x = int(math.ceil(float(width)/float(px)))
        size_y = int(math.ceil(float(height)/float(py)))

        hists = []
        for x in range(px):
            for y in range(py):
                
                subimage = gray_image[np.ix_(
                            range(y * size_y, min((y + 1) * size_y, height)),
                            range(x * size_x, min((x + 1) * size_x, width))
                            )]
                h = cv2.calcHist([subimage],[0],None,[256],[0,255])
                cv2.normalize(h,h,0,255,cv2.NORM_MINMAX)

                hists.append(h)

        self.part_hists[(px, py)] = hists

        return hists
示例#19
0
    def __init__(self, frame_first, gamma=1.0, motion_compensation=False):
        # Define settings
        """
        The constructor for a new frameFusion instance

        @param frame_first: initial picture
        @param gamma: contrast parameter
        @param motion_compensation: (boolean flag) compensate motion over time
        """
        self.n_fused_frames = 0
        self.gamma = gamma
        self.n_max_corners = 400
        self.corners_q_level = 4
        self.motion_comp = motion_compensation
        self.motion_compensation_method = 'orb'
        self.reset = False
        self.reset_ratio = 0.3

        # Allocate buffers
        self.frame_acc = np.float32(frame_first)
        self.frame_acc_disp = np.float32(frame_first)
        self.frame_eq = np.float32(frame_first)
        self.frame_prev = frame_first

        # Do the first accumulation
        cv2.equalizeHist(frame_first, self.frame_acc)
        cv2.normalize(self.frame_acc, self.frame_acc_disp, 0., 1., cv2.NORM_MINMAX)  # just for the display stuf
	def get_query_histogram(self):
		# set up the ROI for tracking
		roi = self.query_img[self.query_roi[1]:self.query_roi[3],self.query_roi[0]:self.query_roi[2],:]
		hsv_roi =  cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
		# play with the number of histogram bins by changing histSize
		self.query_hist = cv2.calcHist([hsv_roi],[0],mask=None,histSize=[256],ranges=[0,255])
		cv2.normalize(self.query_hist,self.query_hist,0,255,cv2.NORM_MINMAX)
示例#21
0
文件: frame.py 项目: FilippoC/pke
    def getHistogram(self):
        if self.histogram != None:
            return self.histogram

        """
        hist = cv2.calcHist(
            [frame], [0,1,2], 
            None, 
            [256, 256,256], 
            # http://stackoverflow.com/questions/15834602/how-to-calculate-3d-histogram-in-python-using-open-cv
            # [[0, 255], [0,255],[0,255]]
            [0, 255, -255, 255, -255, 255]
        )"""

        # la normalization doit se faire canal par canal...
        b,g,r = cv2.split(self.getCVFrame())
        color = [(255,0,0),(0,255,0),(0,0,255)]

        # qu'est ce que ça fait ?!
        bins = np.arange(256).reshape(256,1)

        self.histogram = []
        for item,col in zip([b,g,r],color):
            hist_item = cv2.calcHist([item],[0],None,[256],[0,255])
            cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)
            self.histogram.append(hist_item)

        return self.histogram
示例#22
0
def draw_histogram_hsv(hsv_img, bin_width=2):
    
    """ Calculates and plots 2 histograms next to each other: one for hue, and one for saturation and value
    """
    
    sv_hist_img, h_hist_img = np.zeros((300, 256, 3)), np.zeros((300, 360, 3))
    sv_bin_count, h_bin_count = 256 / bin_width    , 180 / bin_width
    
    sv_bins = np.arange(sv_bin_count).reshape(sv_bin_count, 1) * bin_width
    h_bins = np.arange(h_bin_count).reshape(h_bin_count, 1) * bin_width * 2
    
    debug_colors = [ (255, 255, 255), (255, 0, 0), (0, 0, 255) ]
    
    # Use ternary conditional for outputting to 2 different hists - a bit of a hack
    for ch, col in enumerate(debug_colors):
        hist_item = cv2.calcHist([hsv_img], [ch], None, [h_bin_count if ch == 0 else sv_bin_count], [0, 180 if ch == 0 else 255])
        cv2.normalize(hist_item, hist_item, 0, 255, cv2.NORM_MINMAX)
        hist = np.int32(np.around(hist_item))
        pts = np.column_stack((h_bins if ch == 0 else sv_bins, hist))
        cv2.polylines(h_hist_img if ch == 0 else sv_hist_img, [pts], False, col)
    
    sv_hist_img, h_hist_img = np.flipud(sv_hist_img), np.flipud(h_hist_img)
    h_hist_img[:, 0] = (0, 255, 0)
    
    cv2.imshow('sat / val hist | hue hist', np.concatenate([sv_hist_img, h_hist_img], axis=1))
def find_vertical_lines(gray_pic):
    kernelx = cv2.getStructuringElement(cv2.MORPH_RECT,(2,10))

    dx = cv2.Sobel(gray_pic,cv2.CV_16S,1,0)
    dx.pp()
    # dx = cv2.Sobel(gray_pic,cv2.CV_32F,1,0)
    # convert from dtype=int16 to dtype=uint8
    dx = cv2.convertScaleAbs(dx)
    dx.pp()
    cv2.normalize(dx,dx,0,255,cv2.NORM_MINMAX)

    
    # cv2_helper.show_pic(dx)
    # ret,close = cv2.threshold(dx,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    # close = cv2.adaptiveThreshold(dx,255,
    #     cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV, blockSize=3, C=2)

    # cv2_helper.show_pic(close)
    close = cv2.morphologyEx(close,cv2.MORPH_DILATE,kernelx,iterations = 1)

    contour, hier = cv2.findContours(close,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contour:
        x,y,w,h = cv2.boundingRect(cnt)
        if h/w > 8:
            cv2.drawContours(close,[cnt],0,255,-1)
        else:
            cv2.drawContours(close,[cnt],0,0,-1)
    # close = cv2.morphologyEx(close,cv2.MORPH_CLOSE,None,iterations = 2)
    closex = close.copy()
    # show_pic(closex)
    return closex
示例#24
0
 def normaliseImg(self, img):
     channel = cv2.split(img)
     for i in channel[1]:
         i += 5
     # cv2.normalize(channel[1], channel[1], 0, 255, cv2.NORM_MINMAX)
     cv2.normalize(channel[2], channel[2], 0, 255, cv2.NORM_MINMAX)
     return cv2.merge(channel, img)  
示例#25
0
    def motionDetected(self, new_frame):
        frame = self.preprocessInputFrame(new_frame)

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, (11, 11), 0)

        if self.prevPrevFrame is None:
            self.prevPrevFrame = gray
            return False

        if self.prevFrame is None:
            self.prevFrame = gray
            return False

        cv.normalize(gray, gray, 0, 255, cv.NORM_MINMAX)

        frameDiff = self.diffImg(self.prevPrevFrame, self.prevFrame, gray)
        ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)

        cv.dilate(th1, None, iterations=15)
        cv.erode(th1, None, iterations=1)

        delta_count = cv.countNonZero(th1)

        cv.imshow("frame_th1", th1)

        self.prevPrevFrame = self.prevFrame
        self.prevFrame = gray

        ret = delta_count > self.threshold

        if ret:
            self.updateMotionDetectionDts()

        return ret
示例#26
0
    def __sobel_image__(self,image,horizontal):
        """
        apply the sobel operator to a given image on either the vertical or horizontal axis
        basically copied from
        http://stackoverflow.com/questions/10196198/how-to-remove-convexity-defects-in-a-sudoku-square
        :param horizontal:
        :return:
        """
        if horizontal:
            dy = cv2.Sobel(image,cv2.CV_16S,0,2)
            dy = cv2.convertScaleAbs(dy)
            cv2.normalize(dy,dy,0,255,cv2.NORM_MINMAX)
            ret,close = cv2.threshold(dy,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)

            kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(10,2))
        else:
            dx = cv2.Sobel(image,cv2.CV_16S,2,0)
            dx = cv2.convertScaleAbs(dx)
            cv2.normalize(dx,dx,0,255,cv2.NORM_MINMAX)
            ret,close = cv2.threshold(dx,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)

            kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,10))

        close = cv2.morphologyEx(close,cv2.MORPH_CLOSE,kernel)

        return close
示例#27
0
def play_vid(vid, wait=50):
    import cv2
    for i,img in enumerate(vid):
        cv2.normalize(img,img, 0, 255, cv2.NORM_MINMAX)
        img = cv2.resize(img.astype("uint8"), (200,200))
        cv2.imshow("Gesture", img)
        cv2.waitKey(wait)
示例#28
0
    def detect_object(self, img):
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        mask = cv2.inRange(hsv, self.lower, self.upper)
        mask = cv2.blur(mask, (7, 7))

        img_filter = cv2.bitwise_and(img, img, mask=mask)
        gray = cv2.cvtColor(img_filter, cv2.COLOR_BGR2GRAY)
        contours, _ = cv2.findContours(gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

        max_idx = -1
        max_area = -1
        for i, cnt in enumerate(contours):
            area = cv2.contourArea(cnt)
            if area > max_area and area > self.min_area:
                max_idx = i
                max_area = area

        if max_idx > -1:
            cnt = contours[max_idx]
            x, y, h, w = cv2.boundingRect(cnt)
            self.track_window = (x, y, h, w)

            cv2.rectangle(img, (x, y), (x + h, y + w), (0, 255, 0), 2)
            self.img = img
            # cv2.imshow('track', img)
            # cv2.waitKey(1)

            hsv_roi = hsv[y : y + w, x : x + h]
            mask_roi = mask[y : y + w, x : x + h]

            mask_roi = cv2.inRange(hsv_roi, self.lower, self.upper)
            self.hist_track = cv2.calcHist([hsv_roi], [0], mask_roi, [180], [0, 180])
            cv2.normalize(self.hist_track, self.hist_track, 0, 255, cv2.NORM_MINMAX)
            return self.track_window
示例#29
0
    def track(self, img, center, face_rect, det_face_hsv):
        """
        Uses mean shifting to track the users face. Only useful once
        a face has already been detected. The Process
        1) Convert the image to HSV, since we track with Hue
        2) Pull out the Hue values in the detected region, and develop a histogram
        3) Create a Back Projection of the initial image with values equal to the
            probability of that hue appearing in the facial region
        4) Use mean shifting to determine where the new face is

        :param img: BGR image from webcam
        :param center: tuple giving the normalized center of teh face
        :param face_rect: non-normalized dimensions of face rectangle (x, y, cols, rows)
        :param det_face_hsv: hsv of the most recently detected face
        :return: (new_position, rect)
        """
        # convert the original image to hsv, and pull out the face
        img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        hue_face_hist = cv2.calcHist([det_face_hsv],[0], None, [32], [0, 180])
        cv2.normalize(hue_face_hist, hue_face_hist, 0, 255, cv2.NORM_MINMAX)
        #calculate teh back projection probabilities
        back_proj = cv2.calcBackProject([img_hsv], [0], hue_face_hist, [0, 180], 1)
        #track face using meanshift
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
        track_box, rect = cv2.meanShift(back_proj, face_rect, term_crit)
        #return values
        height, width, x = img.shape #rows, cols, depth
        new_position = ((rect[0] + rect[2]/2)/float(width), (rect[1] + rect[3]/2)/float(height))
        cv2.rectangle(img, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), 255)

        return (new_position, rect)
def histogram(picture, channels=[0]):
    # get and display histogram
    hist = cv2.calcHist([picture], channels, None, [256], [0, 255])
    cv2.normalize(hist, hist, 0, 1, cv2.NORM_MINMAX)
    # print hist
    bin_count = hist.shape[0]
    bin_w = 2
    bin_max_h = 200
    img = (
        np.ones((int(bin_max_h * 1.1), bin_count * bin_w, 3), np.uint8) * [70, 255, 255] * 255
    )  # last list is background color

    # print hist
    for i in xrange(bin_count):
        val = hist[i]
        if val == 1:
            hist_max = i  # find the size and the pos of the max
        h = int(val * bin_max_h)
        # print h
        cv2.rectangle(
            img,
            (i * bin_w + 2, int(bin_max_h * 1.1)),
            ((i + 1) * bin_w - 2, int(bin_max_h * 1.1) - h),
            [int(255 * 255.0 * i / bin_count)] * 3,
            -1,
        )
    # img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
    cv2.imshow("hist", img)

    for i in xrange(hist_max, bin_count):
        if hist[i] < 0.2:
            thres = i
            break
    print hist_max, thres
    return hist_max, thres
示例#31
0
# 0818.py
import cv2
import numpy as np

#1
gray = cv2.imread('../data/lena.jpg', cv2.IMREAD_GRAYSCALE)

#2
gray_sum = cv2.integral(gray)
dst = cv2.normalize(gray_sum, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)

cv2.imshow('dst', dst)
cv2.waitKey()
cv2.destroyAllWindows()
示例#32
0
    plot_jaccard = []

    output_stack = []
    output_stack_masked = []
    all_PPV = []
    input_im_stack = []
    for i in range(len(examples)):

        input_name = examples[i]['input']
        input_im = np.asarray(Image.open(input_name), dtype=np.float32)
        """ NEED TO CONVERT TO np.uint8 if the original input is np.uint16!!!"""
        # NORMALIZED BECAUSE IMAGE IS uint16 ==> do same when actually running images!!!
        input_im = np.asarray(Image.open(input_name))
        if input_im.dtype == 'uint16':
            input_im = np.asarray(input_im, dtype=np.float32)
            input_im = cv.normalize(input_im, 0, 255, cv.NORM_MINMAX)
            input_im = input_im * 255

        input_im = np.asarray(input_im, dtype=np.float32)

        size_whole = input_im.shape[0]

        size = int(size_whole)  # 4775 and 6157 for the newest one
        if resize:
            size = int(
                (size * im_scale) / 0.45)  # 4775 and 6157 for the newest one
            input_im = resize_adaptive(Image.fromarray(input_im),
                                       size,
                                       method=Image.BICUBIC)
            input_im = np.asarray(input_im, dtype=np.float32)
示例#33
0

def img_median(img):
    pixels = img.shape[0] * img.shape[1]
    hist_r = cv2.calcHist([img], [0], None, [65536], (0, 65535))
    hist_g = cv2.calcHist([img], [1], None, [65536], (0, 65535))
    hist_b = cv2.calcHist([img], [2], None, [65536], (0, 65535))
    return hist_median(hist_r, pixels), hist_median(hist_g,
                                                    pixels), hist_median(
                                                        hist_b, pixels)


# This image is just for diagnosis and verifying.
img_scaled = cv2.normalize(img,
                           dst=None,
                           alpha=0,
                           beta=255,
                           norm_type=cv2.NORM_MINMAX)

outf = None
if args.outfile:
    outf = open(args.outfile, "w+")
    sys.stdout = outf

print('patch', 'r', 'g', 'b')
for patch, b in boxes.items():
    top_left = to_int(b[0])
    bottom_right = to_int(np.add(b[0], b[1]))
    cv2.rectangle(img_scaled, top_left, bottom_right, (255, 0, 0), 1)

    patch_img = img[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
示例#34
0
import cv2

image = cv2.imread('D:\knee\data\smith\smith.jpg')
image = cv2.resize(image, (800, 800))
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
norm_image = cv2.normalize(gray_image,
                           None,
                           alpha=0,
                           beta=1,
                           norm_type=cv2.NORM_MINMAX,
                           dtype=cv2.CV_32F)

cv2.imshow("img", norm_image)
cv2.waitKey(0)
示例#35
0
# set up the ROI for tracking
roi = frame[c:c + w, r:r + h]
# conversion to Hue-Saturation-Value space
# 0 < H < 180 ; 0 < S < 255 ; 0 < V < 255
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# hsv_roi =  cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
# computation mask of the histogram:
# Pixels with S<60 or V<32 are ignored
mask = cv2.inRange(hsv_roi, np.array((15., 30., 55.)),
                   np.array((180., 235., 235.)))
# Marginal histogram of the Hue component
# roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
# Q2
roi_hist = cv2.calcHist([hsv_roi], [1], mask, [180], [0, 180])
# Histogram values are normalised to [0,255]
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

# Setup the termination criteria: either 10 iterations,
# or move by less than 1 pixel
term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

cpt = 1
while (1):
    ret, frame = cap.read()
    if ret == True:
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # Backproject the model histogram roi_hist onto the
        # current image hsv, i.e. dst(x,y) = roi_hist(hsv(0,x,y))
        # dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
        dst = cv2.calcBackProject([hsv], [1], roi_hist, [0, 180], 1)
    prvs = cv.cvtColor(frame1, cv.COLOR_BGR2GRAY)
    hsv = np.zeros_like(frame1)
    hsv[..., 1] = 255

    while (1):
        ret, frame2 = cap.read()
        if ret == True:
            next = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY)

            flow = cv.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 1,
                                               5, 1.2, 0)

            mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])
            hsv[..., 0] = ang * 180 / np.pi / 2
            hsv[..., 2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)
            bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)

            out.write(bgr)
            cv.imshow('Original', frame2)
            cv.imshow('Dense Optical Flow in OpenCV', bgr)
            k = cv.waitKey(60) & 0xff
            if k == 27:
                break

            prvs = next
        else:
            break

    out.release()
    cap.release()
示例#37
0
        gx = cv2.Sobel(frame, cv2.CV_32F, 1, 0)
        gy = cv2.Sobel(frame, cv2.CV_32F, 0, 1)

        # calculate gradient magnitude and direction (in degrees)

        mag, angle = cv2.cartToPolar(gx, gy, angleInDegrees=True)

        # normalize

        gx = np.abs(gx)
        gy = np.abs(gy)
        angle = np.abs(angle)

        # normalize other values 0 -> 180

        gx = cv2.normalize(gx, None, 0, 255, cv2.NORM_MINMAX)
        gy = cv2.normalize(gy, None, 0, 255, cv2.NORM_MINMAX)
        angle = cv2.normalize(angle, None, 0, 180, cv2.NORM_MINMAX)

        # for the angle take the max across all three channels

        (aB, aG, aR) = cv2.split(angle)
        angle = np.maximum(np.maximum(aR, aG), aB)

        # display images (as 8-bit)

        cv2.imshow(window_nameGx, gx.astype(np.uint8))
        cv2.imshow(window_nameGy, gy.astype(np.uint8))
        cv2.imshow(window_nameAngle, angle.astype(np.uint8))

        # stop the timer and convert to ms. (to see how long processing and
def main():

    cap = cv2.VideoCapture(vid_path)
    status1, previous_frame = cap.read()
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    copy_frame = cv2.cvtColor(previous_frame, cv2.COLOR_BGR2GRAY)
    fgbg = cv2.createBackgroundSubtractorMOG2()
    hsv = np.zeros_like(previous_frame)
    hsv[..., 1] = 255
    t = 20
    dc = 6
    red = 30
    check_red = 1
    start = 0
    radiuce_up_limit = 60
    radiuce_low_limit = 30
    i = 0

    while (i < total_frames - 1):
        ret, frame = cap.read()
        i = i + 1

        frame1 = frame.copy()
        current_frame = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
        current_frame = cv2.GaussianBlur(current_frame, (var_blur, var_blur),
                                         0)

        # optical Flow
        flow = cv2.calcOpticalFlowFarneback(copy_frame, current_frame, None,
                                            0.5, 3, 15, 3, 5, 1.2, 0)
        mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
        hsv[..., 0] = ang * 180 / np.pi / 2
        hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
        bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        grayscaled = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)

        retval2, binary_image2 = cv2.threshold(
            grayscaled, 125, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        # Background Subtraction
        binary_image3 = fgbg.apply(current_frame)

        # combination of three methods
        final_binary = cv2.bitwise_and(binary_image2, binary_image3)

        lab_val = 255
        n_labels, img_labeled, lab_stats, _ = \
            cv2.connectedComponentsWithStats(final_binary, connectivity=8,
                                             ltype=cv2.CV_32S)

        if check_red == 1:
            red = red + 10
            if red > radiuce_up_limit:
                check_red = 0
        else:
            red = red - 10
            if red == radiuce_low_limit:
                check_red = 1

        if lab_stats[1:, 4].size > 2:
            start = 1
            dc = dc + 1

            if dc > 6:
                dc = 0
                re = lab_stats[1:, 4].argsort()[-3:][::-1] + 1

                largest_mask = np.zeros(final_binary.shape, dtype=np.uint8)
                largest_mask[img_labeled == re[0]] = lab_val
                cnts1 = cv2.findContours(largest_mask.copy(),
                                         cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)
                cnts1 = cnts1[0] if imutils.is_cv2() else cnts1[1]

                largest_mask[img_labeled == re[1]] = lab_val
                cnts2 = cv2.findContours(largest_mask.copy(),
                                         cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)
                cnts2 = cnts2[0] if imutils.is_cv2() else cnts2[1]

                largest_mask[img_labeled == re[2]] = lab_val
                cnts3 = cv2.findContours(largest_mask.copy(),
                                         cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)
                cnts3 = cnts3[0] if imutils.is_cv2() else cnts3[1]

            X1 = cnts3[0][0]
            X2 = cnts3[1][0]
            X3 = cnts3[2][0]

            cX1 = X1[0][0]
            cY1 = X1[0][1]
            cX2 = X2[0][0]
            cY2 = X2[0][1]
            cX3 = X3[0][0]
            cY3 = X3[0][1]

            # distance between obj1 and obj2
            dist1 = math.sqrt((cX1 - cX2)**2 + (cY1 - cY2)**2)
            dist2 = math.sqrt((cX1 - cX3)**2 + (cY1 - cY3)**2)
            dist3 = math.sqrt((cX2 - cX3)**2 + (cY2 - cY3)**2)

            if dist1 < 90:
                cX2 = cX1
                cY2 = cY1
                radiuce_up_limit = 100
            else:
                radiuce_up_limit = 60

            if dist2 < 90:
                cX3 = cX2
                cY3 = cY2
                radiuce_up_limit = 100
            else:
                radiuce_up_limit = 60

            if dist3 < 90:
                cX2 = cX3
                cY2 = cY3
                radiuce_up_limit = 100
            else:
                radiuce_up_limit = 60

            cv2.circle(frame, (cX1, cY1), red, (0, 255, 255), 3)
            cv2.circle(frame, (cX2, cY2), red, (0, 255, 255), 3)
            cv2.circle(frame, (cX3, cY3), red, (0, 255, 255), 3)
            cv2.putText(frame, 'Breathing', (10, 40), cv2.FONT_HERSHEY_SIMPLEX,
                        1, (0, 255, 255), 1, cv2.LINE_AA)
            cv2.imshow('Frame', frame)
        else:
            t = t + 1
            if t > 40:
                if lab_stats[1:, 4].size > 0 and start == 1:
                    t = 0
                cv2.putText(frame, 'Not Breathing', (10, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1,
                            cv2.LINE_AA)
                cv2.imshow('Frame', frame)
            else:
                cv2.circle(frame, (cX1, cY1), red, (0, 255, 255), 3)
                cv2.circle(frame, (cX2, cY2), red, (0, 255, 255), 3)
                cv2.circle(frame, (cX3, cY3), red, (0, 255, 255), 3)
                cv2.putText(frame, 'Breathing', (10, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1,
                            cv2.LINE_AA)
                cv2.imshow('Frame', frame)
            previous_frame = current_frame
        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break
    cap.release()
    cv2.destroyAllWindows()
示例#39
0
def gen_dataset(data_dir):
    mask_dir = data_dir + '_mask'
    img_ori_dir = data_dir + '_ori'
    datasets = []
    datasets_name = []
    f_list = os.listdir(img_ori_dir)

    #train_max = 0
    #train_min = 0
    #test_max = 0
    #test_min = 0

    for index, i in enumerate(f_list):
        name, os2, event = os.path.splitext(i)[0].split('_')

        X = np.load(os.path.join(img_ori_dir, i))
        m = np.load(os.path.join(mask_dir, i))
        X0 = X.copy()
        m0 = m.copy()
        #m = (m*255).astype(np.uint8).copy()
        #print (np.max(m), np.min(m), np.unique(m))
        #_, cnts, _ = cv2.findContours(m, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        #print (cnts)

        '''
        if data_dir.split('/')[-1] == 'train_single': 
            img_max = np.max(X[X<175])
            img_min = np.min(X[X>-75])
            X[X>175] = img_max
            X[X<-75] = img_min
        elif data_dir.split('/')[-1] == 'test_single': 
            img_max = np.max(X[X<125])
            img_min = np.min(X[X>-125])
            X[X>125] = img_max
            X[X<-125] = img_min
        '''

        img_max = np.max(X[X<125])
        img_min = np.min(X[X>-125])
        X[X>125] = img_max
        X[X<-125] = img_min

        X = np.uint8(cv2.normalize(X, None, 0, 255, cv2.NORM_MINMAX))
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
        X = clahe.apply(X)
        X = (X-np.min(X))/(np.max(X)-np.min(X))
        
        #single_img_save_path = data_dir.rstrip('/') + '_img'
        #os.makedirs(single_img_save_path, exist_ok=True)
        #io.imsave(os.path.join(single_img_save_path, f'{name}_{index}.jpg'), X)

        '''
        if data_dir.split('/')[-1] == 'train_single': 
            train_max = max(train_max, X[m>0].max())
            train_min = min(train_min, X[m>0].min())
        elif data_dir.split('/')[-1] == 'test_single': 
            test_max = max(test_max, X[m>0].max())
            test_min = min(test_min, X[m>0].min())

        continue
        '''
        x, y = np.where(m>0)

        #x_mean, y_mean = np.mean(x), np.mean(y)
        #print (x_mean, y_mean)
        w0, h0 = m.shape
        x_min = max(0, int(np.min(x)-5))
        x_max = min(w0, int(np.max(x)+5))
        y_min = max(0, int(np.min(y)-5))
        y_max = min(h0, int(np.max(y)+5))

        #print (x_min, x_max, y_min, y_max)
        m = m[x_min:x_max, y_min:y_max]
        X = X[x_min:x_max, y_min:y_max] 

        X_m_1 = X.copy() 
        #X_m_1[m<1.0] = 0


        #X_m_1 = ((X_m_1-np.min(X_m_1[m>0]))/(np.max(X_m_1[m>0])-np.min(X_m_1[m>0])))*0.9+0.1
        X_m_1 = (X_m_1-np.min(X_m_1[m>0]))/(np.max(X_m_1[m>0])-np.min(X_m_1[m>0]))
        X_m_1[m==0] = 0
        #print (np.unique(X_m_1))
        #raise


        X_m_2 = X.copy() 
        X_m_2[m>0] = 0


        #print (X_m_1.max(), X_m_1.min(), np.unique(X_m_1))
        #print (X_m_1[X_m_1>0].max(), X_m_1[X_m_1>0].min(), np.unique(X_m_1[X_m_1>0]))
        #plt.imshow(X_m_1)
        #plt.show()
        #raise        

        h, w = X_m_1.shape
        #print (w, h)

        if h < w:
            pad_1 = (w - h)//2
            pad_2 = w - pad_1 - h
            X_m_1 = np.lib.pad(X_m_1, ((pad_1, pad_2),(0,0)), 'constant', constant_values=(0, 0))
            m = np.lib.pad(m, ((pad_1, pad_2),(0,0)), 'constant', constant_values=(0, 0))
        elif h >= w:
            pad_1 = (h - w)//2
            pad_2 = h - pad_1 - w
            X_m_1 = np.lib.pad(X_m_1, ((0, 0),(pad_1, pad_2)), 'constant', constant_values=(0, 0))
            m = np.lib.pad(m, ((0, 0),(pad_1, pad_2)), 'constant', constant_values=(0, 0))

        #print (X_m_1.min(), X_m_1.max())

        if X_m_1.shape[0] != 160 or X_m_1.shape[1] != 160:
            #X = cv2.resize(X, (96, 96), interpolation=cv2.INTER_CUBIC)
            #m = cv2.resize(m, (96, 96), interpolation=cv2.INTER_CUBIC)
            #X_m_1 = cv2.resize(X_m_1, (160, 160), interpolation=cv2.INTER_NEAREST)
            X_m_1 = cv2.resize(X_m_1, (160, 160), interpolation=cv2.INTER_CUBIC)
            m = cv2.resize(m, (160, 160), interpolation=cv2.INTER_CUBIC)
            #X_m_2 = cv2.resize(X_m_2, (96, 96), interpolation=cv2.INTER_CUBIC)

        #X_m_1 = (X_m_1-np.min(X_m_1[m>0]))/(np.max(X_m_1[m>0])-np.min(X_m_1[m>0]))
        #X_m_1[m==0] = 0

        #print (X_m_1.max(), X_m_1.min(), X_m_1.shape)
        #print (m.max(), m.min(), m.shape)
        #raise

        if m0.shape[0] != 160 or m0.shape[1] != 160:
            m0 = cv2.resize(m0, (160, 160), interpolation=cv2.INTER_CUBIC)

        #print (X_m_1.min(), X_m_1.max())
        #raise

        #single_img_save_path = data_dir.rstrip('/') + '_img_cut'
        #os.makedirs(single_img_save_path, exist_ok=True)
        #io.imsave(os.path.join(single_img_save_path, f'{name}_{os2}_{event}.jpg'), X_m_1)

        X_m_1 = (X_m_1-np.min(X_m_1[m>0]))/(np.max(X_m_1[m>0])-np.min(X_m_1[m>0]))
        X_m_1[m<=0] = 0
        #print (X.shape, np.max(X_m_1), np.min(X_m_1))
        #raise

        X_m_1 = np.expand_dims(X_m_1, axis=2)
        m = np.expand_dims(m, axis=2)
        m0 = np.expand_dims(m0, axis=2)

        #X_m_2 = np.expand_dims(X_m_2, axis=2)

        XX = np.concatenate((X_m_1, X_m_1, X_m_1), axis=-1) 
        #XX = X_m_1

        datasets.append((XX[None,...], np.array([float(os2)]), np.array([int(event)])))
        datasets_name.append(name)


    set_name = data_dir.split('/')[-1]
    print (f'{set_name}: {len(datasets)}')
    print (f'{set_name}: {len(datasets_name)}')
    return datasets, datasets_name
示例#40
0
文件: 坐标.py 项目: raoyi/workshop
#opencv模板匹配----单目标匹配
import cv2
#读取目标图片
target = cv2.imread("out.jpg")
#读取模板图片
template = cv2.imread("mod.jpg")
#获得模板图片的高宽尺寸
theight, twidth = template.shape[:2]
#执行模板匹配,采用的匹配方式cv2.TM_SQDIFF_NORMED
result = cv2.matchTemplate(target, template, cv2.TM_SQDIFF_NORMED)
#归一化处理
cv2.normalize(result, result, 0, 1, cv2.NORM_MINMAX, -1)
#寻找矩阵(一维数组当做向量,用Mat定义)中的最大值和最小值的匹配结果及其位置
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
print(min_loc, (min_loc[0] + twidth, min_loc[1] + theight))
def main():

    # Reference Distance
    L0 = 100.

    cap = cv2.VideoCapture(0)

    ok = False


    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        retry = 3
        container = None
        while container is None and 0 < retry:
            retry -= 1
            try:
                container = av.open(drone.get_video_stream())
            except av.AVError as ave:
                print(ave)
                print('retry...')

        drone.takeoff()

        # skip first 300 frames
        frame_skip = 300


	#-----------movie size
	#size = (int(cap.get(cv2.CV_CAP_PROP_FRAME_WIDTH)),
        #int(cap.get(cv2.CV_CAP_PROP_FRAME_HEIGHT)))
	size = (640, 480)
	fps = 30
	fourcc = cv2.VideoWriter_fourcc(*'X264')
	video = cv2.VideoWriter('output.avi', fourcc, fps, size)

	vx = 0
	vy = 0


        while True:
#------------------------------------------for start

            for frame in container.decode(video=0):
		
                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue

		#ret,image = cap.read()

		start_time = time.time()

		image = cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_RGB2BGR)

		# Start timer
                timer = cv2.getTickCount()

                #cv2.imshow('Canny', cv2.Canny(image, 100, 200))
                #cv2.waitKey(1)

		# Update tracker
                #ok, bbox = tracker.update(image)

		# Calculate Frames per second (FPS)
                #fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);


		term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1) #cmsf
		

# Draw bounding box
                if ok == True:
                    #(x,y,w,h) = (int(bbox[0]),int(bbox[1]),int(bbox[2]),int(bbox[3]))
		
		    x_mae = x
		    y_mae = y
		    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) #cmsf
		    dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1) #cmsf

		    ret, track_window = cv2.CamShift(dst, track_window, term_crit) #cmsf
		    (x,y,w,h) = track_window #cmsf

                    #CX=int(bbox[0]+0.5*bbox[2]) #Center of X
                    #CY=int(bbox[1]+0.5*bbox[3])
                    S = w*h
                    
                    # Tracking success
                    p1 = (x, y)
                    p2 = (x + w, y + h)
                    cv2.rectangle(image, p1, p2, (255,0,0), 2, 1)
		    p10 = (x0, y0)
                    p20 = (x0 + w0, y0 + h0)
                    cv2.rectangle(image, p10, p20, (0,255,0), 2, 1)

                    d = round(L0 * m.sqrt(float(S) / S0)) - L0
                    dx = x + w/2 - CX0
                    dy = y + h/2 - CY0

		    vx = x - x_mae
		    vy = y - y_mae
		    print("CX,CY,S,x,y,S0 =",int(x+0.5*w),int(y+0.5*h),S,x,y,S0)
                    print(d,dx,dy)
		    print("vx,vy =",vx,vy)
		    
		    tracking(drone,d,dx,dy,vx,vy,S,S0)

		    
                else:
                # Tracking failure		    
                    cv2.putText(image, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)



                cv2.imshow('Original', image)
		video.write(image)



		key = cv2.waitKey(1)&0xff
		if key == ord('q'):
		    print('Q!')
		    break

		if key == ord('r'):
		    roi_time = time.time()
                    bbox = cv2.selectROI(image, False)
                    print(bbox)
                    (x0,y0,w0,h0) = (int(bbox[0]),int(bbox[1]),int(bbox[2]),int(bbox[3]))

                    CX0=int(x0+0.5*w0) #Center of X
                    CY0=int(y0+0.5*h0)
		    S0 = w0*h0

		    #camshif--ref_https://qiita.com/MuAuan/items/a6e4aace2a6c0a7cb03d-----------------------------
		    #cap = cv2.VideoCapture(0)

		    track_window = (int(bbox[0]),int(bbox[1]),int(bbox[2]),int(bbox[3]))
    		    roi = image[y0:y0+h0, x0:x0+w0]

    		    hsv_roi =  cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    		    img_mask = cv2.inRange(hsv_roi, numpy.array((0., 60.,32.)), numpy.array((180.,255.,255.)))

		    roi_hist = cv2.calcHist([hsv_roi], [0], img_mask, [180], [0,180])
    		    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
        	    term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
		    
		    ret,image = cap.read()
		    ok = True

		    #camshif_end--------------------------------------

		    x = x0
		    y = y0 

		    #ok = tracker.init(image, bbox)
		    

		if frame.time_base < 1.0/60:
                    time_base = 1.0/60
                else:
                    time_base = frame.time_base
                frame_skip = int((time.time() - start_time)/time_base)

		#print(ok)

		    



#-------------------------------------------------for end
            break
	#print('stop fly')
                    

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
	drone.land()
	#cap.release()
	#video.release()
        cv2.destroyAllWindows()
 def histogram(self, image, mask):
     hist = cv2.calcHist([image], [0, 1, 2], mask, self.bins,
                         [0, 180, 0, 256, 0, 256])
     hist = cv2.normalize(hist, hist).flatten()
     return hist
示例#43
0
def rgb2gm(I):
    if (I.shape[2] == 3):
        I = cv2.normalize(I.astype('float64'), None, 0.0, 1.0, cv2.NORM_MINMAX)
        I = (I[:, :, 0] * I[:, :, 1] * I[:, :, 2])**(1 / 3)

    return I
				#in_h265d = None

				curr_time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
				if curr_time != last_time:
					print(f'{curr_time = }')
					last_time = curr_time

				if args.show_preview:
					# data is originally represented as a flat 1D array, it needs to be converted into HxW form
					depth_h, depth_w = in_depth.getHeight(), in_depth.getWidth()
					if args.debug_img_sizes:
						print(f'{depth_h = } - {depth_w = }')
					depth_frame = in_depth.getData().reshape((depth_h, depth_w)).astype(np.uint8)
					if args.debug_img_sizes:
						print(f'{depth_frame.shape = } - {len(depth_frame) = } - {type(depth_frame) = } - {depth_frame.size = }')
					depth_frame_orig = cv2.normalize(depth_frame, None, 0, 255, cv2.NORM_MINMAX)
					depth_frame = np.ascontiguousarray(depth_frame_orig)
					# depth_frame is transformed, the color map will be applied to highlight the depth info
					depth_frame = apply_colormap(depth_frame, cmap=13)
					# depth_frame is ready to be shown
					cv2.imshow("disparity", depth_frame)
			
					# Retrieve 'bgr' (opencv format) frame
					rgb_frame = in_rgb.getCvFrame()
					if args.debug_img_sizes:
						print(f'{rgb_frame.shape = } - {len(rgb_frame) = } - {type(rgb_frame) = } - {rgb_frame.size = }')
					cv2.imshow("rgb", rgb_frame)
	
					#img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
					depth_frame_th = cv2.adaptiveThreshold(depth_frame_orig, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
					cv2.imshow("disparity th", depth_frame_th)
        covd[i,(i+1):] = var*10**(-tmp/ns)
        covd[(i+1):,i] = covd[i,(i+1):]
    noise = corr_noise(covd,nparams["numdatas"])
    r,c   = nparams["xshape"].shape
    noise_reshape = noise.reshape(r,c)
    return noise_reshape

def corr_noise(covd,numdatas):
    npoints = len(covd)
    d,v     = np.linalg.eig(covd)
    d       = np.diag(d)
    orig    = np.random.randn(npoints,numdatas)
    orig    = orig.reshape(len(orig),1)
    noise   = np.matmul(np.matmul(v,np.sqrt(d)),orig)
    noise   = np.real(noise)
    return noise

if __name__ == "__main__":
    
    import matplotlib.pyplot as plt
    n1,n2,d1,d2 = gen_noise_corr_dem()
    img = cv2.normalize(n1,None,alpha=0,beta=255,norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F).astype(np.uint8)
    img = Image.fromarray(img,'L')
    
     
    #plt.imshow(img,cmap='gray')
    fig,ax = plt.subplots()
    im = ax.imshow(img,cmap='jet')
    #fig.colorbar(im,ax=ax)
    #print(np.max(n1),np.max(n2),np.max(n3))
    #plt.colorbar()
示例#46
0
# 使用hsv 色系对 直方图统计与匹配更加 标准
hsv1 = cv.cvtColor(src1, cv.COLOR_BGR2HSV)
hsv2 = cv.cvtColor(src2, cv.COLOR_BGR2HSV)
hsv3 = cv.cvtColor(src3, cv.COLOR_BGR2HSV)
hsv4 = cv.cvtColor(src4, cv.COLOR_BGR2HSV)

# 计算直方图, 对于 hsv色系, hs两个通道的值较具有比较意义;
# rang 60 64 双通道的 bins的值
hist1 = cv.calcHist([hsv1], [0, 1], None, [60, 64], [0, 180, 0, 256])
hist2 = cv.calcHist([hsv2], [0, 1], None, [60, 64], [0, 180, 0, 256])
hist3 = cv.calcHist([hsv3], [0, 1], None, [60, 64], [0, 180, 0, 256])
hist4 = cv.calcHist([hsv4], [0, 1], None, [60, 64], [0, 180, 0, 256])

# 归一化处理, 进行比较
cv.normalize(hist1, hist1, 1.0, 1.0, cv.NORM_INF)
cv.normalize(hist2, hist2, 1.0, 1.0, cv.NORM_INF)
cv.normalize(hist3, hist3, 1.0, 1.0, cv.NORM_INF)
cv.normalize(hist4, hist4, 1.0, 1.0, cv.NORM_INF)

# 常用的方法有以下四种
methods = [
    cv.HISTCMP_CORREL, cv.HISTCMP_CHISQR, cv.HISTCMP_INTERSECT,
    cv.HISTCMP_BHATTACHARYYA
]

str_method = ""

# 最常用的是 相关性 或 巴式距离 计算
for method in methods:
    v1 = cv.compareHist(hist1, hist2, method)  # 图像直方图比较
示例#47
0
print j
print k  

cv2.imwrite("1.jpg",img)
cv2.imwrite("2.jpg",final_mask)
#chessboard = cv2.bitwise_and(final_mask,img)
#print closing
#print final_mask
#cv2.imwrite('masked.jpg',mask)

cv2.imwrite("binary.jpg",thresh1)

#05/03/2016
#chessBoard = imerode(mask,strel('disk',8)).*im2double(gray_im);
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
chessBoard = cv2.erode(cv2.bitwise_and(final_mask,cv2.normalize(gray_image.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)),kernel)

#figure,imshow(chessBoard); title('Segmented Chess Board');
cv2.imshow('Segmented Chess Board',chessBoard) 

ret1,thresh2 = cv2.threshold(img,60,255,cv2.THRESH_BINARY)
edges1 = cv2.Canny(thresh2,100,200)
cv2.imwrite("canny1.jpg", edges1)
lines1 = cv2.HoughLines(edges1,1,np.pi/180,85)

#out = zeros(size(thrIm));
out = np.zeros(edges1.shape[:1],edges1.shape[:2])

#n = size(out,2);
n = out[0];
示例#48
0
def tsmooth(img, lamda=0.01, sigma=3.0, sharpness=0.001):
    I = cv2.normalize(img.astype('float64'), None, 0.0, 1.0, cv2.NORM_MINMAX)
    x = np.copy(I)
    wx, wy = computeTextureWeights(x, sigma, sharpness)
    S = solveLinearEquation(I, wx, wy, lamda)
    return S
示例#49
0
    elif timelapser == None and timelapse:
        timelapser = cv.detail.Timelapser_createDefault(timelapse_type)
        timelapser.initialize(corners, sizes)
    if timelapse:
        matones = np.ones((image_warped_s.shape[0], image_warped_s.shape[1]),
                          np.uint8)
        timelapser.process(image_warped_s, matones, corners[idx])
        pos_s = img_names[idx].rfind("/")
        if pos_s == -1:
            fixedFileName = "fixed_" + img_names[idx]
        else:
            fixedFileName = img_names[idx][:pos_s + 1] + "fixed_" + img_names[
                idx][pos_s + 1:]
        cv.imwrite(fixedFileName, timelapser.getDst())
    else:
        blender.feed(cv.UMat(image_warped_s), mask_warped, corners[idx])
if not timelapse:
    result = None
    result_mask = None
    result, result_mask = blender.blend(result, result_mask)
    cv.imwrite(result_name, result)
    zoomx = 600 / result.shape[1]
    dst = cv.normalize(src=result,
                       dst=None,
                       alpha=255.,
                       norm_type=cv.NORM_MINMAX,
                       dtype=cv.CV_8U)
    dst = cv.resize(dst, dsize=None, fx=zoomx, fy=zoomx)
    cv.imshow(result_name, dst)
    cv.waitKey()
示例#50
0
print(HL)
print(HR)

# Rectify images
img_left_rect = cv2.warpPerspective(img_left_undistorted, HL, (C, R))
img_right_rect = cv2.warpPerspective(img_right_undistorted, HR, (C, R))

# Display rectified images
cv2.imshow('Rectified Left Image', img_left_rect)
cv2.imshow('Rectified Right Image', img_right_rect)
if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()
cv2.imwrite('./results/rect_left.png', img_left_rect)
cv2.imwrite('./results/rect_right.png', img_right_rect)

gray_left_rect = cv2.cvtColor(img_left_rect, cv2.COLOR_BGR2GRAY)
gray_right_rect = cv2.cvtColor(img_right_rect, cv2.COLOR_BGR2GRAY)

stereoMatcher = cv2.StereoBM_create(256, 25)
disparity = stereoMatcher.compute(gray_left_rect, gray_right_rect)
disparity = cv2.normalize(src=disparity,
                          dst=disparity,
                          beta=-16,
                          alpha=255,
                          norm_type=cv2.NORM_MINMAX)
disp_color = cv2.applyColorMap(np.uint8(disparity), 2)

cv2.imshow('Disparity map', disp_color)
if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()
cv2.imwrite('./results/disparity.png', disp_color)
for z in range(9):
    next = cv.resize(frames[anchor + z + 1], (299, 299))
    next = cv.cvtColor(next, cv.COLOR_BGR2GRAY)
    prvs = cv.medianBlur(prvs, 5)
    next = cv.medianBlur(next, 5)
    # print(prvs.shape,next.shape)
    flow = cv.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 7, 4, 7, 5, 0)
    # flow_stack.append(flow) # 299,299,2,10
    # flow_stack = np.concatenate((flow_stack,flow),axis=-1)
    prvs = next
    mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])
    mag = (mag > 1) * mag
    hsv[...,
        0] = ang * 180 / np.pi / 2  #hue   which colour  draw for purple and yellow
    hsv[..., 2] = cv.normalize(
        mag, None, 0, 255,
        cv.NORM_MINMAX)  # intensity brightness draw for extra bright
    bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
    bgr = cv.medianBlur(bgr, 5)
    gray = cv.cvtColor(bgr, cv.COLOR_BGR2GRAY)
    gray = np.reshape(gray, [299, 299, 1])
    flow_stack = np.concatenate((flow_stack, gray), axis=2)
    cv.imshow('frames', gray)
    cv.imshow('frames2', next)
    cv.waitKey(20)
# print(X[i,].shape, flow_stack.shape)
X = flow_stack
cv.destroyAllWindows()


def BaejiNet_temporal_stream():
示例#52
0
 def normaliseImg(img):
     channel = cv2.split(img)
     cv2.normalize(channel[1], channel[1], 0, 255, cv2.NORM_MINMAX)
     cv2.normalize(channel[2], channel[2], 0, 255, cv2.NORM_MINMAX)
     return cv2.merge(channel, img)
def camshift():

    cam = video.create_capture(0)
    ret, frame = cam.read()
    cv2.namedWindow('camshift')
    cv2.moveWindow('camshift', 0, 0)
    x = 0
    selection = True
    flag = True
    flagForward = True
    x2 = 0

    while True:
        time.sleep(0.05)
        ret, frame = cam.read()
        vis = frame.copy()
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, np.array((0., 60., 32.)),
                           np.array((180., 255., 255.)))
        if selection:
            # initial region
            x0, y0, x1, y1 = (279, 116, 485, 309)
            track_window = (x0, y0, x1 - x0, y1 - y0)
            hsv_roi = hsv[y0:y1, x0:x1]
            mask_roi = mask[y0:y1, x0:x1]
            hist = cv2.calcHist([hsv_roi], [0], mask_roi, [16], [0, 180])
            cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
            hist = hist.reshape(-1)
            vis_roi = vis[y0:y1, x0:x1]
            cv2.bitwise_not(vis_roi, vis_roi)
            vis[mask == 0] = 0
        selection = False
        # 计算概率分布图
        prob = cv2.calcBackProject([hsv], [0], hist, [0, 180], 1)
        prob &= mask
        # 迭代终止条件
        term_crit = (cv2.TERM_CRITERIA_EPS |
                     cv2.TERM_CRITERIA_COUNT, 10, 1)
        track_box, track_window = cv2.CamShift(
            prob, track_window, term_crit)
        # pprint(track_window)
        # pprint(track_box)
        cv2.ellipse(vis, track_box, (0, 0, 255), 2)
        x = track_box[0][0]
        # print x
        if x - 300 > 70 and flag:
            flag = False
            # ser.write('right rotating:12000,10000\r\n')
            # threading.Thread(target=Move().runRight())
            print 'rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr'

        if x - 300 < -70 and flag:
            flag = False
            # ser.write('left rotating:12000,10000\r\n')
            # threading.Thread(target=Move().runLeft())
            print 'lllllllllllllllllllllllllllllllllllllllllllllllllllllll'

        if x - 300 > -70 and x - 300 < 70 and flag is False and flagForward:
            print 'ssssssssssssssssssssssssssssssssssssssssssssssssssssssss'
            flag = True
            # ser.write('stop\r\n')
            # threading.Thread(target=Move().runStop())
        cv2.imshow('camshift', vis)
        ch = 0xFF & cv2.waitKey(5)
        if ch == 27:
            break
    cv2.destroyAllWindows()
示例#54
0
        frame = cv.resize(frame, IMG_SIZE)
        cv.imshow("input", frame)
        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

        # Calculates dense optical flow by Farneback method
        # https://docs.opencv.org/3.0-beta/modules/video/doc/motion_analysis_and_object_tracking.html#calcopticalflowfarneback
        flow = cv.calcOpticalFlowFarneback(prev_gray, gray, None, 0.5, 3, 50, 3, 5, 1.1, 0)

        # Computes the magnitude and angle of the 2D vectors
        magnitude, angle = cv.cartToPolar(flow[..., 0], flow[..., 1])

        # Sets image hue according to the optical flow direction
        mask[..., 0] = angle * 180 / np.pi / 2

        # Sets image value according to the optical flow magnitude (normalized)
        mask[..., 2] = cv.normalize(magnitude, None, 0, 255, cv.NORM_MINMAX)

       # Converts HSV to RGB (BGR) color representation
        rgb = cv.cvtColor(mask, cv.COLOR_HSV2BGR)

        cv.imshow("dense optical flow", rgb)
        if i % takeFrame == 0:
            cv.imwrite(f'''{folders['flow']}/{i}.png''', rgb)
            cv.imwrite(f'''{folders['frames']}/{i}.png''', frame)
        i+=1
        prev_gray = gray
        # Frames are read by intervals of 1 millisecond. The programs breaks out of the while loop when the user presses the 'q' key
        if cv.waitKey(1) & 0xFF == ord('q'):
            break
    # The following frees up resources and closes all windows
    cap.release()
 def update(value):
     recovered_image = inverse_filter(blurred_image, kernel, value).astype(np.float32)
     recovered_image = cv2.cvtColor(recovered_image, cv2.COLOR_BGR2RGB)
     recovered_image = cv2.normalize(recovered_image, None, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F)
     recovered_image_plot.set_data(recovered_image)
     fig.canvas.draw_idle()
示例#56
0
def normalize(input):
    arr = [cv2.normalize(img, img, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) for img in input]
    return np.array(arr)
wrong = cv2.imread("bubble_8.png")

img2 = imutils.resize(img2, width=int((img2.shape[1]) * r))

histr1, histr2, histr3 = histogram(template)
plt.plot(histr1, 'r')
plt.plot(histr2, 'g')
plt.plot(histr3, 'b')
plt.savefig("templatergb.png")
plt.show()
# originalpts = []
# for x,y,z in zip(histr1,histr2,histr3):
#     originalpts.append((x,y,z).index(max(x,y,z)))
hist = cv2.calcHist([template], [0, 1, 2], None, [8, 8, 8],
                    [0, 256, 0, 256, 0, 256])
h1 = cv2.normalize(hist, hist).flatten()

hist = cv2.calcHist([wrong], [0, 1, 2], None, [8, 8, 8],
                    [0, 256, 0, 256, 0, 256])
h2 = cv2.normalize(hist, hist).flatten()

for (methodName, method) in OPENCV_METHODS:
    results = {}
    reverse = False
    if methodName in ("Correlation", "Intersection"):
        reverse = True
    d = cv2.compareHist(h1, h1, method)
    print(f"starting {method}  result is {d}")

for (methodName, method) in OPENCV_METHODS:
    results = {}
def graySpectrum(amplitude):
    amplitude = np.log(amplitude + 1.0)
    spectrum = np.zeros(amplitude.shape, np.float32)
    cv2.normalize(amplitude, spectrum, 0, 1, cv2.NORM_MINMAX)
    return spectrum
示例#59
0
def iris_encoding(img):

	# reduce / remove noise by applying a Gaussian filter
	blur = cv2.GaussianBlur(img, (3, 3), cv2.BORDER_DEFAULT)

	# contrast enhancement
	normalized = cv2.normalize(blur, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) # from 0-255 -> 0-1
	gamma = 1.2
	contrasted = np.power(normalized, 1/gamma)
	contrasted = cv2.normalize(contrasted, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1) # from 0-1 -> 0-255

	output = img.copy()

	# detection
	(x1, y1, r1) = pupil_detection(contrasted)
	(x2, y2, r2) = iris_contour_detection(contrasted, r1, 2)

	cv2.circle(output, (x1, y1), r1, (0, 255), 2)
	cv2.circle(output, (x2, y2), r2, (0, 255), 2)

	#plt.imshow(output)
	#plt.title('iris detection')
	#plt.show()


	# masking
	pupil_mask = create_circular_mask(img.shape[:2], (x1, y1, r1))
	external_iris_mask = create_circular_mask(img.shape[:2], (x2, y2, r2))
	mask = np.subtract(external_iris_mask, pupil_mask)

	#plt.imshow(mask, cmap='gray')
	#plt.title('mask')
	#plt.show()


	# isolated iris
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

	isolated_iris = gray & mask

	#plt.imshow(isolated_iris, cmap='gray')
	#plt.title('isolated_iris')
	#plt.show()


	# iris normalization
	polar_array, noise_array = iris_normalization(isolated_iris, x2, y2, r2, x1, y1, r1, 20, 240)

	#plt.imshow(polar_array, cmap='gray')
	#plt.title('polar_array')
	#plt.show()


	# feature extraction (2D gabor wavelet)
	# generate code (phase quantization)
	minWaveLength = 18
	mult = 1
	sigmaOnf = 0.5
	template, mask = encode(polar_array, noise_array, minWaveLength, mult, sigmaOnf)

	#plt.imshow(template, cmap='gray')
	#plt.title('template')
	#plt.show()

	return template, mask
    fig = plt.figure()
    plt.axis("off")
    recovered_image = inverse_filter(blurred_image,kernel, D_init)
    recovered_image = cv2.normalize(recovered_image, None, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F)
    # recovered_image_plot = plt.imshow(recovered_image)
    recovered_image_plot = plt.imshow(cv2.cvtColor(recovered_image.astype(np.float32), cv2.COLOR_BGR2RGB))
    slider_ax = plt.axes([0.1, 0.05, 0.8, 0.05])
    value_slider = Slider(slider_ax, 'value', D_min, D_max, valinit=D_init)
     # D0_slider.on_changed(update)
    def update(value):
        recovered_image = inverse_filter(blurred_image, kernel, value).astype(np.float32)
        recovered_image = cv2.cvtColor(recovered_image, cv2.COLOR_BGR2RGB)
        recovered_image = cv2.normalize(recovered_image, None, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F)
        recovered_image_plot.set_data(recovered_image)
        fig.canvas.draw_idle()
    value_slider.on_changed(update)
    plt.show()

# this function was written by me
if __name__ == '__main__':
	# global because i am accessing it in another function
	global ground_truth
	blurred_image = cv2.imread("Blurry1_1.jpg", 1)
	blurred_image = cv2.normalize(blurred_image, None, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F)

	ground_truth = cv2.imread("GroundTruth1_1_1.jpg", 1)
	ground_truth = cv2.normalize(ground_truth, None, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F)

	kernel = cv2.imread("blur_kern_1.png", 1)
	interactive_value(blurred_image, kernel)