Exemplo n.º 1
0
    def askForUserInput(self):
        #read image and break into right and left
        ret, img = self.cam.read()
        left, right = devika_cv.break_left_right(img)
        inputList = [left, right, img]

        #read parameters from settings window
        settings = self.readSettingsParameters()
        th, th_max, area_delta, dilateSize, erodeSize, LRA = settings['th'], settings['max'], settings['delta'],\
                settings['dilateSize'], settings['erodeSize'], settings['LRA']

        #select which image to use
        input = inputList[LRA]

        #get blue component
        input = input[:,:,1]

        #ask user to select contour
        self.askUserForInput_selectContour(input)

        #compute area and set the new threshoulds
        self.updatedValuesOfTh(area_delta)
Exemplo n.º 2
0
        break

    if ch == ord('p'):
        control_mode = 'pause' if control_mode == 'run' else 'run'

    if control_mode == 'run':

        ret, frame = cam.read()

        th = cv2.getTrackbarPos('th', 'settings') * 1000

        if ret == False:
            print 'finishing due to end of video'
            break

        left, right = devika_cv.break_left_right(frame)

        input = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        input = cv2.equalizeHist(input)
        input = cv2.blur(input, (5, 5))
        output = cv2.cvtColor(input, cv2.COLOR_GRAY2BGR)

        # Apply template Matching
        for TCB in 'TB':
            for LCR in 'LCR':
                name = TCB + LCR
                matches = []
                for tmp in templates[name]:
                    w, h = tmp.shape[::-1]
                    res = cv2.matchTemplate(input, tmp, cv2.TM_SQDIFF)
                    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
def nothing(e):
    pass
cv2.createTrackbar('window_size',window_trackbar,5,13,nothing)
cv2.createTrackbar('th_min',window_trackbar,107,255,nothing)
cv2.createTrackbar('th_max',window_trackbar,193,255,nothing)
cv2.createTrackbar('erode',window_trackbar,3,13,nothing)
cv2.createTrackbar('dilate',window_trackbar,4,13,nothing)
cv2.createTrackbar('left/right',window_trackbar,0,1,nothing)


while cam.isOpened():
    ret,frame = cam.read()
    if ret == False:
        print 'finishing due to end of video'
        break
    left, right = devika_cv.break_left_right(frame)

    #read trackbraks
    window_size = max(3,cv2.getTrackbarPos('window_size', window_trackbar))
    th_min = max(1, cv2.getTrackbarPos('th_min', window_trackbar))
    th_max = max(1, cv2.getTrackbarPos('th_max', window_trackbar))
    erode_p = cv2.getTrackbarPos('erode', window_trackbar)
    dilate_p = cv2.getTrackbarPos('dilate', window_trackbar)
    control_left_right = cv2.getTrackbarPos('left/right', window_trackbar)

    input = left if control_left_right == 0 else right
    hsv = cv2.cvtColor(input, cv2.COLOR_BGR2HSV)
    gray = cv2.cvtColor(input, cv2.COLOR_BGR2GRAY)

    filter.setParams(window_size, th_min, th_max, erode_p, dilate_p)
Exemplo n.º 4
0
cv2.namedWindow(window_flowMagnitude, cv2.WINDOW_NORMAL)
cv2.namedWindow(window_output, cv2.WINDOW_NORMAL)
cv2.namedWindow(window_flow, cv2.WINDOW_NORMAL)

cv2.moveWindow(window_flowMagnitude, 0, 0)
cv2.moveWindow(window_output, 260, 0)
cv2.moveWindow(window_flow, 520, 0)


#initialize camera
from config import video2load
cam = cv2.VideoCapture(video2load)
#obtain first frame
ret, prev = cam.read()
left,prev = devika_cv.break_left_right(prev)
prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)

#define filter parameters
noiseFilterKernelSize = (7,7)

#for paiting the hsv field
hsv = np.zeros_like(prev)
hsv[...,1] = 255

#criteria for kmeans
my_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 1.0)

#initialize kalman filter
kalman = cv2.KalmanFilter(dynamParams = 2, measureParams = 2, controlParams = 0)
kalman.statePost = np.array([10, 10])
Exemplo n.º 5
0
def main():
    #initialize camera
    cam = cv2.VideoCapture('../../video/avi/myFavoriteVideo.avi')

    #create windows to display output and intermediate results
    window_input = jasf_cv.getNewWindow('input')
    window_otsu = jasf_cv.getNewWindow('otsu')
    window_open = jasf_cv.getNewWindow('open')
    window_small_filter = jasf_cv.getNewWindow('smallFilter')
    window_output = jasf_cv.getNewWindow('output')
    window_settings = jasf_cv.getNewWindow('settings')

    def doNothing(val):
        """function to be passed to createTrackbar"""
        pass

    #create trackbars
    cv2.createTrackbar('th', window_settings, 0, 400, doNothing)
    cv2.createTrackbar('max', window_settings, 100, 400, doNothing)
    cv2.createTrackbar('area_delta', window_settings, 4, 10, doNothing)


    #---------------------------------------------------------------------------
    #initialize required variable
    #---------------------------------------------------------------------------
    rx,ry,mouse = -1, -1,[]
    previous_roi, roi = [], []
    approx, previous_approx = [], []
    #these values were set manually to produce good results
    #alpha is amount of error tolerated when approximating a polynomial surface
    alpha = 34/1000.0
    #this controls the amount of increase in area from one iteration to the other
    allowed_floor_jump = 50
    #---------------------------------------------------------------------------
    #main loop
    #---------------------------------------------------------------------------
    while cam.isOpened():
        #read frame
        ret, frame = cam.read()
        if frame == None:
            print 'finishing due to end of video'
            break
        left, right = devika_cv.break_left_right(frame)

        #read trackbars
        #those two are actually set by the own program
        th = cv2.getTrackbarPos('th', window_settings)
        th_max = cv2.getTrackbarPos('max', window_settings)
        #this one the user can freely set
        area_delta = cv2.getTrackbarPos('area_delta', window_settings)

        #get blue component
        B = right[:,:,0]
        #-----------------------------------------------------------------
        #Step 1: localize the square box
        #-----------------------------------------------------------------
        input = B.copy()
        #we need to keep track of those two previous states
        previous_approx = deepcopy(approx)
        previous_roi = deepcopy(roi)
        #this returns a contourn to the floor region
        #this method also returns the threshold computer by Otsu's method and that should
        #be used later
        roi, approx, inver, otsu_th = jasf_ratFinder.detectFloor(input, 34/1000.0, previous_approx, previous_roi, allowed_floor_jump )
        #make the contour into a mask where ones represent pixels to consider and zeros
        #pixels to disconsider
        floor_mask = np.zeros_like(input)
        floor_mask = cv2.drawContours(floor_mask, [roi], 0, 1, -1)
        #floor_mask = cv2.dilate(floor_mask, cv2.getStructuringElement(cv2.MORPH_RECT, (3,3)))

        #-----------------------------------------------------------------
        #Step 2: find candidates to rat contour
        #-----------------------------------------------------------------
        input = input * floor_mask
        #this will run on the first iteration to initialize the mouse position
        if (rx,ry) == (-1, -1):
            rx,ry,mouse = initialize(input, th, th_max, otsu_th)
            #computer area of the mouse and set the boundaries for the next iteration
            area = cv2.contourArea(mouse)/100
            newTh = max(area - area_delta, 9)
            newTh_max = max(area + area_delta, 16)
            cv2.setTrackbarPos('th', window_settings, int(newTh))
            cv2.setTrackbarPos('max', window_settings, int(newTh_max))

        #find candidates to rat contour
        contours, otsu_threshold, open, filterSmall = jasf_ratFinder.detectInterestingContours(input, th, th_max, otsu_th)

        #-----------------------------------------------------------------
        #Step 3: select which contour is the real mouse
        #-----------------------------------------------------------------
        rx,ry,new_mouse = filterMouse(contours, (rx, ry))
        #if mouse was found, update parameters
        if type(new_mouse) is not bool:
            mouse = new_mouse
            area = cv2.contourArea(mouse)/100
            newTh = max(area - area_delta, 9)
            newTh_max = max(area + area_delta, 16)
            cv2.setTrackbarPos('th', window_settings, int(newTh))
            cv2.setTrackbarPos('max', window_settings, int(newTh_max))

        
        #-----------------------------------------------------------------
        #Step 4: show output and some intermediate results
        #-----------------------------------------------------------------
        #draw countours
        output = input.copy()
        output = myDrawContours(output, [mouse])
        
        cv2.imshow(window_otsu, 255*otsu_threshold)
        cv2.imshow(window_open, 255*open)
        cv2.imshow(window_small_filter, 255*filterSmall)
        cv2.imshow(window_input, input) 
        cv2.imshow(window_output, output) 

        #check if execution should continue or not
        ch = cv2.waitKey(1) & 0xFF
        if ch == ord('q'):
            print 'end of execution due to user command'
            break

    cam.release()
    cv2.destroyAllWindows()
Exemplo n.º 6
0
    def update(self):
        #read frame
        ret, frame = self.cam.read()
        if ret == False:
            raise VideoInvalid('finishing due to end of video')
        left, right = devika_cv.break_left_right(frame)
        inputList = [left, right, frame]

        #read parameters from settings window
        settings = self.readSettingsParameters()
        th, th_max, area_delta, dilateSize, erodeSize, LRA = settings['th'], settings['max'], settings['delta'],\
                settings['dilateSize'], settings['erodeSize'], settings['LRA']


        #select which image to use
        input = inputList[LRA]
        h,w,d = input.shape
        ho, wo = 40,40
        extendedInput = np.zeros((h+2*ho,w+2*wo,d), dtype=np.uint8)
        extendedInput[ho:ho+h, wo:wo+w, :] = input[:,:,:]

        #get blue component(as suggested by Devika)
        B = input[:,:,0]
        Bextended = extendedInput[:,:,0]

        #-----------------------------------------------------------------
        #find candidates to rat contour
        #-----------------------------------------------------------------
        input = B.copy()
        self.contourFinder.setParams(dilateSize, erodeSize, th, th_max)
        contours, otsu_threshold, filterSmall = self.contourFinder.detectInterestingContours(input)

        #-----------------------------------------------------------------
        #Step 3: select which contour is the real mouse
        #-----------------------------------------------------------------
        rx,ry,new_mouse = self.contourPicker.pickCorrectContour(contours, {'last_center':(self.rx, self.ry), 'distanceRejectTh':2000})
        #if mouse was found, update parameters
        if type(new_mouse) is not bool:
            self.setMousePosition(rx,ry,new_mouse)
            self.updatedValuesOfTh(area_delta)

        #-----------------------------------------------------------------
        #Step 4: show output and some intermediate results
        #-----------------------------------------------------------------
        #draw countours
        output = Bextended.copy()
        #convert mouse coordinates to extended frame
        offset = np.empty_like(self.mouse)
        offset.fill(40)
        translatedMouse = self.mouse + offset
        #draw fixed dim rectangle around mouse
        output = jasf_cv.drawFixedDimAroundContourCenter(output, [translatedMouse], (200, 0, 200), np.array((60,60)))
        #get fixed lenght rectangle image
        mouseImg = jasf.cv.getRoiAroundContour(extendedInput, translatedMouse, dim = np.array((60,60)))

        mousePB = jasf_cv.convertBGR2Gray(mouseImg)
        oldP, newP = self.flowComputer.apply(mousePB)

        self.GUI.setImg(B, 0)
        self.GUI.setImg(255*otsu_threshold, 1)
        self.GUI.setImg(255*filterSmall, 2)
        self.GUI.setImg(output, 3)
        self.GUI.setImg(mouseImg, 4)
Exemplo n.º 7
0
def askUserForInput(frame, modeReadingFromData=False, userData=dict()):
    """Function will read the settings, find interesting contours and show then to the user so he can pick the correct
    contour """
    global control_mouse
    #read image and break into right and left
    left, right = devika_cv.break_left_right(frame)
    inputList = [left, right, frame]

    #create window to wait input
    jasf_cv.getNewWindow('user input', dimension=(160,120))

    cnts = []
    control_mouse.initialized = False

    def analyseUserInput(x,y):
        """This function will be called in two cases:
            *by the next functoin
            *when there is some userInput stored from previous run

            This piece of code was refactored in order to be used in these two cases
            """
        global control_mouse
        #compute center of current contours and their distances to the user click
        #'cnts' here will be set on the loop that is written after this function definition
        centers = [jasf_cv.getCenterOfContour(c) for c in cnts]
        distances = [np.linalg.norm(np.array(c) - np.array((x,y))) for c in centers]
        #the mouse is the one closest to the user click
        i = np.argmin(distances)
        rx,ry = centers[i]
        mouse = cnts[i] 

        #the user cannot miss badly
        if jasf.math.pointDistance((rx,ry), (x,y)) > 20:
            print 'not close enough!'
            pass
        else: 
            print 'position set!'
            control_mouse.setPosition(rx, ry, mouse)
            control_mouse.initialized = True

            #add user input to dictionary of user inputs
            userInputData[control_settings['currentVideoFileName']].append({'frame':
                readControlSetting('framesSinceStart'), 'input':(rx,ry), 'settings_state':readSettingsState()}) 


    def onUserInputDblCklick(event, x, y, flags, params):
        """ mouse callback to set the rat position. This function gets the user press
        position and compare it with the known centers, picking the closest match. It will reject the chosen position if
        it is distant from the guessed centers"""
        global control_mouse
        if event == cv2.EVENT_LBUTTONDBLCLK:
            analyseUserInput(x,y)


    if modeReadingFromData:
        rx,ry = userData['input']
        setSettingsState(userData['settings_state'])

        ##sorry for this code repetition starting here
        #read parameters from settings window
        th, th_max, delta, dilateSize, erodeSize, LRA = readSettings()
        #select which image to use
        input = inputList[LRA]
        #get blue component
        input = input[:,:,1]
        input = input.copy()
        #find contours
        contourFinder.setParams(dilateSize, erodeSize, th, th_max)
        cnts, otsu_threshold, filterSmall = contourFinder.detectInterestingContours(input)
        ##code repetition ends here

        #call analyse; hopefully this will already set control_mouse.initialized and the loop will not run
        analyseUserInput(rx,ry)

    else:
        #in this case, the loop should run
        cv2.setMouseCallback('user input', onUserInputDblCklick)
    
    #ask user to select contour
    while  control_mouse.initialized == False:
        #read parameters from settings window
        th, th_max, delta, dilateSize, erodeSize, LRA = readSettings()
        #select which image to use
        input = inputList[LRA]
        #get blue component
        input = input[:,:,1]
        input = input.copy()
        #find contours
        contourFinder.setParams(dilateSize, erodeSize, th, th_max)
        cnts, otsu_threshold, filterSmall = contourFinder.detectInterestingContours(input)
        #draw all contours
        img2show = jasf_cv.drawContours(input, cnts)
        cv2.imshow('user input', img2show)
        ch = cv2.waitKey(150) & 0xFF

    cv2.destroyWindow('user input')


    #compute area and set the new thresholds
    updateValuesOfTh(delta)