예제 #1
0
def depth_calibration():
    (init_depth, _) = get_depth()
    timer = time.time()
    marge_noise = 200
    for hd in range(0, 480):
        for wd in range(0, 640):
            if (init_depth[hd][wd] != 2047):
                min_mat[hd][wd] = init_depth[hd][wd]
                max_mat[hd][wd] = init_depth[hd][wd]
    for i in range(0, 5):
        print(i)
        (depthc, _) = get_depth()
        for hd in range(0, 480):
            for wd in range(0, 640):
                if (depthc[hd][wd] != 2047):
                    if depthc[hd][wd] < min_mat[hd][wd]:
                        min_mat[hd][wd] = depthc[hd][wd]
                    if depthc[hd][wd] > max_mat[hd][wd]:
                        max_mat[hd][wd] = depthc[hd][wd]
#Noise Calculation : Max - Min matrix
    for hd in range(0, 480):
        for wd in range(0, 640):
            if (abs(max_mat[hd][wd] - min_mat[hd][wd]) < marge_noise):
                noise[hd][wd] = max_mat[hd][wd] - min_mat[hd][wd]
            else:
                noise[hd][wd] = 0
    timer = time.time() - timer
    print('Calibration Time :', timer)
    return (1)
예제 #2
0
def hand_tracker():
    ControlVector = 2*[3*[0]]
    (depth,_) = get_depth()
    centroidList = list() #Initiate centroid list
    #RGB Color tuples
    pygame.init() #Initiates pygame
    xSize,ySize = 640,480 #Sets size of window
    screen = pygame.display.set_mode((xSize,ySize),pygame.RESIZABLE) #creates main surface
    screenFlipped = pygame.display.set_mode((xSize,ySize),pygame.RESIZABLE) #creates surface that will be flipped (mirror display)
    screen.fill(BLACK) #Make the window black
    done = False #Iterator boolean --> Tells programw when to terminate
    while not done:
        screen.fill(BLACK) #Make the window black
        (depth,_) = get_depth() #Get the depth from the kinect
        depth = depth.astype(np.float32) #Convert the depth to a 32 bit float
        _,depthThresh = cv2.threshold(depth, 750, 255, cv2.THRESH_BINARY_INV) #Threshold the depth for a binary image. Thresholded at 600 arbitary units
        _,back = cv2.threshold(depth, 1000, 255, cv2.THRESH_BINARY_INV) #Threshold the background in order to have an outlined background and segmented foreground
        blobData = BlobAnalysis(depthThresh) #Creates blobData object using BlobAnalysis class
        blobDataBack = BlobAnalysis(back) #Creates blobDataBack object using BlobAnalysis class

        for cont in blobDataBack.contours: #Iterates through contours in the background
            pygame.draw.lines(screen,YELLOW,True,cont,3) #Colors the binary boundaries of the background yellow
        for i in range(blobData.counter): #Iterate from 0 to the number of blobs minus 1
            pygame.draw.circle(screen,BLUE,blobData.centroid[i],10) #Draws a blue circle at each centroid
            centroidList.append(blobData.centroid[i]) #Adds the centroid tuple to the centroidList --> used for drawing
            pygame.draw.lines(screen,RED,True,blobData.cHull[i],3) #Draws the convex hull for each blob
            pygame.draw.lines(screen,GREEN,True,blobData.contours[i],3) #Draws the contour of each blob
            """try:
                print(depth[blobData.centroid[i][1]][blobData.centroid[i][0]])
            except:
                pass"""
        try:
            ControlVector[0][0] = blobData.centroid[0][1]
            ControlVector[0][1] = blobData.centroid[0][0]
            ControlVector[0][2] = depth[ControlVector[0][0]][ControlVector[0][1]]
            ControlVector[1][0] = blobData.centroid[1][1]
            ControlVector[1][1] = blobData.centroid[1][0]
            ControlVector[1][2] = depth[ControlVector[1][0]][ControlVector[1][1]]
        
            print(ControlVector)
        except:
            pass
        """
        #Drawing Loop
        #This draws on the screen lines from the centroids
        #Possible exploration into gesture recognition :D
        for cent in centroidList:
            pygame.draw.circle(screen,BLUE,cent,10)
        """

        pygame.display.set_caption('Kinect Tracking') #Makes the caption of the pygame screen 'Kinect Tracking'
        del depth #Deletes depth --> opencv memory issue
        screenFlipped = pygame.transform.flip(screen,1,0) #Flips the screen so that it is a mirror display
        screen.blit(screenFlipped,(0,0)) #Updates the main screen --> screen
        pygame.display.flip() #Updates everything on the window
        for e in pygame.event.get(): #Itertates through current events
            if e.type is pygame.QUIT: #If the close button is pressed, the while loop ends
                done = True
예제 #3
0
def main_engine():
    while True:
        contour_list = []
        (depth, _) = get_depth()
        (rgb, _) = get_video()
        orig = np.array(rgb[::1, ::1, ::-1])
        fgmask = fgbg.apply(orig)
        ret, thresh = cv2.threshold(fgmask, 127, 255,
                                    cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        kernel = np.ones((5, 5), np.uint8)
        erosion = cv2.erode(thresh, kernel, iterations=1)
        im2, contours, hierarchy = cv2.findContours(erosion, cv2.RETR_TREE,
                                                    cv2.CHAIN_APPROX_SIMPLE)
        index = 0
        for cnt in contours:
            for node in cnt:
                for elem in node:
                    y = elem[0].astype(int)
                    x = elem[1].astype(int)
                    if (y > min(xlist)) and (y < max(xlist)) and (
                            x > min(ylist)) and (x < max(ylist)):
                        if (depth[x][y] + 8 < min_mat[x][y] - noise[x][y]
                            ) and (depth[x][y] < 2047) and (
                                depth[x][y] != 0) and (depth[x][y] != 255 and
                                                       (min_mat[x][y] < 2047)):
                            contour_list.append([x, y])
                            index += 1
        if (len(contour_list)):
            print(str(contour_list))
예제 #4
0
def doloop():
    global depth, rgb, initdepth
    min = 0
    (initdepth, _) = get_depth()
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()
        #cv2.rectangle(rgb, (230, 100), (440, 300), (255, 0, 0), 2)
        test = np.array(rgb[::2, ::2, ::-1])
        cv2.imshow('FIRST', test)
        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8)
        da = np.hstack((d3, rgb))
        # Simple Downsample
        cv2.imshow('both', np.array(da[::2, ::2, ::-1]))
        cv2.waitKey(5)
예제 #5
0
def doloop():
    global depth, rgb
    path = "./kpictures/"
    num = glob.glob(str(path) + "*.avi")
    i = len(num)
    video = cv2.VideoWriter(path + 'video' + str(i) + '.avi',
                            cv2.cv.CV_FOURCC('D', 'I', 'V', 'X'), 20,
                            (1280, 480))
    if not video.isOpened():
        print 'error with video opening'
        sys.exit(1)
    print 'press "q" to exit'
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8, copy=False)
        bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
        da = np.hstack((d3, bgr)).astype(np.uint8, copy=False)

        #         src = cv2.cv.fromarray(da)
        cv2.imshow('both', da)
        video.write(da)
        k = cv2.waitKey(5)
        if (k > -1) and (k < 256):
            if chr(k) == 'q':
                #                 video.release()
                cv2.destroyAllWindows()
                sys.exit(0)
예제 #6
0
 def getDepth(self):
     #self.depth = np.zeros((ROWS,COLS),dtype=np.uint8)
     self.temp_depth = np.rot90(get_depth()[0])
     for row in range(len(self.depth)):
         for col in range(len(self.depth[0])):
             #print("row:{}, col:{}, indexing from {}:{}, {}:{}".format(row, col, int(row * ROW_DIV), int((row + 1) * ROW_DIV), int(col * COL_DIV), int((col + 1) * COL_DIV)))
             self.depth[row][col] = self.temp_depth[int(row * ROW_DIV):int((row + 1) * ROW_DIV), int(col * COL_DIV):int((col + 1) * COL_DIV)].mean() / 8
             self.depth[row][col] = np.bitwise_xor(self.depth[row][col], 255)
     
     self.depth[(self.depth >= 0) & (self.depth < 32)] = 0
     self.depth[(self.depth >= 32) & (self.depth < 56)] = 32
     self.depth[(self.depth >= 56) & (self.depth < 72)] = 56
     self.depth[(self.depth >= 72) & (self.depth < 88)] = 72
     self.depth[(self.depth >= 88) & (self.depth < 96)] = 88
     self.depth[(self.depth >= 96) & (self.depth < 104)] = 96
     self.depth[(self.depth >= 104) & (self.depth < 112)] = 104
     self.depth[(self.depth >= 112) & (self.depth < 120)] = 112
     self.depth[(self.depth >= 120) & (self.depth < 128)] = 120
     self.depth[(self.depth >= 128) & (self.depth < 136)] = 128
     self.depth[(self.depth >= 136) & (self.depth < 144)] = 136
     self.depth[(self.depth >= 144) & (self.depth < 152)] = 144
     self.depth[(self.depth >= 152) & (self.depth < 160)] = 152
     self.depth[(self.depth >= 160) & (self.depth < 168)] = 160
     self.depth[(self.depth >= 168) & (self.depth < 184)] = 168
     self.depth[(self.depth >= 184) & (self.depth < 200)] = 184
     self.depth[(self.depth >= 200) & (self.depth < 224)] = 200
     self.depth[(self.depth >= 224) & (self.depth < 256)] = 224
예제 #7
0
def center_detect():
    result = []
    (rgb, _) = get_video()
    (depth, _) = get_depth()
    orig = np.array(rgb[::1, ::1, ::-1])
    fgmask = fgbg.apply(orig)
    ret, thresh = cv2.threshold(fgmask, 127, 255,
                                cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    kernel = np.ones((5, 5), np.uint8)
    erosion = cv2.erode(thresh, kernel, iterations=1)
    im2, contours, hierarchy = cv2.findContours(erosion, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
    index = 0
    for cnt in contours:
        if cv2.arcLength(cnt, True) < 250:
            contours.pop(index)
        else:
            index += 1
            M = cv2.moments(cnt)
            if (M['m00'] != 0):
                y = int(M['m10'] / M['m00'])
                x = int(M['m01'] / M['m00'])
                if (y > min(xlist)) and (y < max(xlist)) and (
                        x > min(ylist)) and (x < max(ylist)):
                    if (depth[x][y] + 8 < min_mat[x][y] - noise[x][y]) and (
                            depth[x][y] < 2047) and (depth[x][y] != 0) and (
                                depth[x][y] != 255 and (min_mat[x][y] < 2047)):
                        result.append([x, y])
    return result
예제 #8
0
def doloop():
    global depth, rgb
    path = "./kpictures/"
    num = glob.glob(str(path) + "*.jpg")
    i = len(num)
    count = 0
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8, copy=False)
        bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
        da = np.hstack((d3, bgr)).astype(np.uint8, copy=False)

        #         src = cv2.cv.fromarray(da)
        cv2.imshow('both', da)
        k = cv2.waitKey(5)
        if (k > -1) and (k < 256):
            if chr(k) == 'q':
                cv2.destroyAllWindows()
                sys.exit(0)
        if (count % total) == 0:
            cv2.imwrite(path + "both" + str(i) + ".jpg", da)
            i += 1
        count += 1
예제 #9
0
def doloop():
    global depth, rgb
    path = "./kpictures/"
    num = glob.glob(str(path) + "*.jpg")
    i = len(num)
    count = 0 
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()
        
        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8, copy=False)
        bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
        da = np.hstack((d3, bgr)).astype(np.uint8, copy=False)
        
#         src = cv2.cv.fromarray(da)
        cv2.imshow('both', da)
        k = cv2.waitKey(5)
        if (k > -1) and (k < 256):
            if chr(k) == 'q':
                cv2.destroyAllWindows()
                sys.exit(0)
        if (count % total) == 0:
            cv2.imwrite(path + "both" + str(i) + ".jpg", da)
            i += 1
        count += 1
예제 #10
0
def doloop():
    global depth, rgb
    face_cascade = cv2.CascadeClassifier(
        'haarcascade/haarcascade_frontalface_default.xml')

    midx, midy = 0, 0
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()
        # Build a two panel color image
        mn = np.min(depth)
        mx = np.max(depth)
        output = np.uint8((depth - mn) * 255 / (mx - mn))
        gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)
        face = face_cascade.detectMultiScale(gray, 1.3, 5)
        for (x, y, w, h) in face:
            midx, midy = x + w / 2, y + h / 2

        ret2, th2 = cv2.threshold(output, 0, 255,
                                  cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        # d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
        # da = np.hstack((d3,rgb))

        # Simple Downsample
        # cv2.imshow('both',np.array(da[::2,::2,::-1]))

        cv2.imshow('frame', th2)
        k = cv2.waitKey(5) & 0xFF
        if k == 27:
            break
    def update(self):

	rate = rospy.Rate(10) # 10hz
	while not rospy.is_shutdown():
	    # Get a fresh frame
	    depth,_ = get_depth()    	 
	    frame,_ = get_video()
	    
	    self.BD.detect(frame)
	    self.DA.analyse(depth)
	    self.I2M.find(self.BD.get_xpos())

	    centroid = self.DA.get_centroid()
	    obstacle = self.DA.get_obstacle()

	    object_located = self.BD.get_objectLocated()

	    mea = self.I2M.get_pos()  

	    rospy.loginfo("centroid: %d, obstacle: %d", centroid, obstacle)
	    rospy.loginfo("object_located: %d", object_located)
	    rospy.loginfo("im2mea: \n %s", mea)

	    self.pub_depth.publish(centroid, obstacle)
	    self.pub_blob.publish(object_located)
	    self.pub_mea.publish(mea)

	    rate.sleep()
예제 #12
0
def doloop():
    global depth, rgb
    path = "./kpictures/"
    num = glob.glob(str(path) + "*.avi")
    i = len(num)
    video = cv2.VideoWriter(path+'video'+str(i)+'.avi',cv2.cv.CV_FOURCC('D','I','V','X'),20,(1280,480))
    if not video.isOpened():
        print 'error with video opening'
        sys.exit(1)
    print 'press "q" to exit' 
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()
        
        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8, copy=False)
        bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
        da = np.hstack((d3, bgr)).astype(np.uint8, copy=False)
        
#         src = cv2.cv.fromarray(da)
        cv2.imshow('both', da)
        video.write(da)
        k = cv2.waitKey(5)
        if (k > -1) and (k < 256):
            if chr(k)=='q':
#                 video.release()
                cv2.destroyAllWindows()
                sys.exit(0)
예제 #13
0
def update_surface(baseplane,bounds,prev=None,FLOOR=-500,verbose=False):
    """Read updated topography and calibrate for use"""
    #(depth,_)= get_depth()
    d = []
    for i in range(10):
        (depth,_)= get_depth()
        d.append(depth)
        #time.sleep()
    #time average the readings to reduce noise
    #currently taking mean, maybe switch to median
    depth = np.median(d[::],axis=0)#/10.
    topo,pix = calibrate(depth,baseplane,bounds)
    if verbose:
        print 'SURFACE STATS'
        print 'MEAN: %f \t MAX: %f\t MIN: %f\t MEDIAN: %f \n'%(np.mean(topo), np.max(topo),np.min(topo), np.median(topo) )
    #if there are enough pixels above a threshold, ignore only those and show masked linear combo. of previous and new topos
    #this is useful when hands are in the sandbox
    if len(np.where(topo<FLOOR)[0]) + len(np.where(topo<FLOOR)[1]) > 10:# or np.mean(topo) > 1e3: 
        if prev == None:
            return topo #- np.nanmedian(topo)
        else:
            a = np.zeros(prev.shape)
            b = np.ones(prev.shape)
            a[np.where(topo<FLOOR)] = 1
            b[np.where(topo<FLOOR)] = 0

            return prev*a + topo*b
    return topo #- np.nanmedian(topo)
예제 #14
0
def calibrate():
    (depth,_) = get_depth()
    background_depth = avg_depth(depth)
    print 'calibated depth of background: %d' % background_depth
    f = open(BACK_DEPTH_FNAME, 'w')
    pickle.dump(background_depth,f)
    f.close()
예제 #15
0
def qualtest(integer):
    warm_up, _ = get_depth()
    time.sleep(5)
    cf0, _ = get_depth()
    all_data = cf0[np.newaxis, ...]
    ts0 = time.time()
    ts = ts0
    count = 0
    while ts - ts0 < 60:
        cf, _ = get_depth()
        ts = time.time()
        if ts > count * 0.4 + ts0:
            all_data = np.vstack([all_data, cf[np.newaxis, ...]])
            count += 1
    #all_data[all_data == 0] = np.nan
    np.save('bower_test_tank42_depth' + str(integer), all_data)
예제 #16
0
def doloop():
    global depth, rgb
    while True:
        # Get a fresh frame
        (depth,_), (rgb,_) = get_depth(), get_video(0, freenect.VIDEO_IR_8BIT)
	#(depth,_), (rgb,_) = get_depth(), get_video()
        
        # Build a two panel color image
        #d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
        #da = np.hstack((d3,rgb))
        
        # Simple Downsample
        #cv.ShowImage('both', cv.fromarray(np.array(da[::2,::2,::-1])))
	#rgbarray = cv.fromarray(np.array(rgb))
	rgbarray = np.array(rgb)
	#gray = cv2.cvtColor(rgbarray, cv2.COLOR_BGR2GRAY)
	#blurred = cv.CloneMat(rgbarray)
	#sobeled = cv.CreateMat(rgbarray.rows, rgbarray.cols, cv.CV_32F)
	
	#cv.Sobel(rgbarray, sobeled, 1, 1)
	#sobeled = cv2.Sobel(blurred, cv.CV_32F, 1, 1)
	_, threshed = cv2.threshold(rgbarray, 250, 255, cv2.THRESH_BINARY)
	blurred = cv2.GaussianBlur(threshed, (255, 255), 0)
	
	#cv.Sobel(rgbarray2, sobeled, 0, 0, 1)
        cv.ShowImage('both', cv.fromarray(threshed))
        cv.WaitKey(5)
예제 #17
0
def calibrate():
    (depth, _) = get_depth()
    background_depth = avg_depth(depth)
    print 'calibated depth of background: %d' % background_depth
    f = open(BACK_DEPTH_FNAME, 'w')
    pickle.dump(background_depth, f)
    f.close()
예제 #18
0
def doloop():
    global depth, rgb
    while True:
        # Get a fresh frame
        (depth,_) = get_depth()
        print depth
        # Simple Downsample
        cv.WaitKey(5)
예제 #19
0
def main():
  # Get a fresh frame
  (depth,_), (rgb,_) = get_depth(), get_video()
  # Build a two panel color image
  d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
  da = np.hstack((d3,rgb))
  # Simple Downsample
  cv2.imshow('both',np.array(da[::2,::2,::-1]))
예제 #20
0
def getBinaryImage(l1, l2):
	(d,_) = get_depth(format = DEPTH_MM)
	m1 = d < l2
	m2 = d > l1
	m = np.logical_and(m1, m2)
	m = m.astype(np.uint8)
	m = m*255
	return (m,d)
예제 #21
0
def get_min_pos_kinect():
    (depth, _) = get_depth()
    minPos = np.argmin(
        depth)  #This is the raw index of the minimum value above
    xPos = np.mod(minPos,
                  sensorWidth)  #This is the x component of the raw index
    yPos = minPos // sensorWidth  #This is the y component of the raw index
    return ((sensorWidth - xPos - 10) * (screenWidth / sensorWidth),
            yPos * (screenHeight / sensorHeight))
예제 #22
0
def do_loop():
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        # Show it
        #show_and_move(rgb, depth)
        show_and_move(rgb, depth)
        show_depth(depth)
        cv.WaitKey(5)
    def GetDepth(self):

        # get data from the sensor
        (depth, _), (rgb, _) = get_depth(), get_video()

        # convert data
        depth = depth.astype(np.uint8)
        rgb = cv.cvtColor(rgb, cv.COLOR_RGB2BGR)

        return depth, rgb
예제 #24
0
def do_loop():
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        # Show it
        #show_and_move(rgb, depth)
        show_and_move(rgb, depth)
        show_depth(depth)
        cv.WaitKey(5)
예제 #25
0
def get_frame():
    frame = get_depth()[0]  # get current frame from connect
    frame = np.clip(frame, frame_clip_min, frame_clip_max)
    frame = frame - frame_clip_min
    frame = cv2.resize(frame,
                       dsize=(frame_width,
                              frame_height))  # resize the image to 28x28
    frame = frame / (frame_clip_max - frame_clip_min
                     )  # scale each depth between 0-1
    return frame
def display():
    global arr, depth, send_lock
    while (True):
        if send_lock.acquire():
            (depth,_),(arr,_)=get_depth(),get_video()
            d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
            da = np.hstack((d3,arr))
            cv.ShowImage('both',cv.fromarray(np.array(da[::2,::2,::-1])))
            send_lock.release()
        cv.WaitKey(5)
def doloop():

    capture=cv.CaptureFromCAM(0)
    fourcc = cv.CV_FOURCC('X','V','I','D')
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FPS, 30)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

    global depth,ir, rgb
    count = 0

    while True:
        rgb_frame=cv.QueryFrame(capture)
        rgb = iplimage_to_numpy_color(rgb_frame)

        (depth,_), (ir,_) = get_depth(), get_video(format=2)
        np.resize(depth, (300,400))
        np.resize(ir, (300,400))
        np.resize(rgb, (300,400,3))

        # Build a two panel color image
        d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
        ir3 = np.dstack((ir, ir, ir)).astype(np.uint8)
        depth_ir = np.hstack((d3,ir3))

        # Form Frame/Image
        image = cv.fromarray(np.array(depth_ir[:,:,::-1]))
        rgb_image = numpy_to_iplimage_color(rgb)

        d_cvMat = cv.fromarray(np.array(d3[:,:,::-1]))
        depth_image = cvMat_to_iplimage_color(d_cvMat)


        opacity = 0.4
        cv.AddWeighted(depth_image, opacity, rgb_image, 1 - opacity, 0, rgb_image)

        # Playback Frame
        cv.ShowImage('Trio',image)
        cv.ShowImage('RGB',rgb_image)
        cv.WaitKey(5)

        # Keyboard interrupt for Exit
        c=cv.WaitKey(2)
        if c==27: #Break if user enters 'Esc'.
            break


        # How does Downsample Works
        # ::2, means you skip by 2 -> [1,2,3,4] -> [2,4]
        # Downsample col -> cv.fromarray(np.array(rgb[:, ::2, ::-1]))
        # Downsample row -> cv.fromarray(np.array(rgb[::2, :, ::-1]))
        # Color Chanel: All Color -> cv.fromarray(np.array(rgb[::2, ::2, ::-1]))
        # Color Chanel: Blue      -> cv.fromarray(np.array(rgb[::2, ::2]))
        # Color Chanel: Gray      -> cv.fromarray(np.array(rgb[::2, ::2, 0]))
        """
예제 #28
0
파일: this.py 프로젝트: ehivan24/freenect
def doloop():
    global depth, rgb
    while True:
        # Get a fresh frame
        (depth,_), (rgb,_) = get_depth(), get_video()
        # Build a two panel color image
        d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
        da = np.hstack((d3,rgb))
        # Simple Downsample
        cv.ShowImage('both',cv.fromarray(np.array(da[::2,::2,::-1])))
        cv.WaitKey(5)
예제 #29
0
def doloop():
	global depth, rgb
	for i in range(1,10):
		(depth,_), (rgb,_) = get_depth(), get_video()
		bg=cv.CloneMat(cv.fromarray(depth.astype(numpy.uint8)))
	scratch = cv.CreateImage((640,480),8,1)
	scratch2 = cv.CreateImage((640,480),8,1)
	cv.SaveImage('bg.png',bg)xz
	
	while True:
		# Get a fresh frame
		(depth,_), (rgb,_) = get_depth(), get_video()
		depth=cv.fromarray(depth.astype(numpy.uint8))
		cv.AbsDiff(bg,depth,scratch)
		cv.Sub(scratch,2,10,scratch2)
		# cv.ConvertScale(scratch,scratch2,50)
		cv.Add(depth,scratch2,scratch2)
        
		# Simple Downsample
		cv.ShowImage('both',scratch2)
		cv.WaitKey(10)
예제 #30
0
def get_depth_object():
    result = []
    (depth, _) = get_depth()
    cv2.rectangle(depth, (min(xlist), min(ylist)), (max(xlist), max(ylist)),
                  (0, 255, 0), 2)
    for wd in range(min(xlist), max(xlist)):
        for hd in range(min(ylist), max(ylist)):
            if (depth[hd][wd] + 8 < min_mat[hd][wd] - noise[hd][wd]) and (
                    depth[hd][wd] < 2047) and (depth[hd][wd] != 0) and (
                        depth[hd][wd] != 255 and (min_mat[hd][wd] < 2047)
                    ):  #8 is the finger width ( approximatively)
                result.append([hd, wd])
    return result
예제 #31
0
def getCursorPosition():
    (depth,_) = get_depth()
    minVal = np.min(depth) #This is the minimum value from the depth image
    minPos = np.argmin(depth) #This is the raw index of the minimum value above
    xPos = np.mod(minPos, sensorWidth) #This is the x component of the raw index
    yPos = minPos//sensorWidth #This is the y component of the raw index
    xList.append(xPos)
    del xList[0]
    xPos = int(np.mean(xList))
    yList.append(yPos)
    del yList[0]
    yPos = int(np.mean(yList))
    return ((sensorWidth - xPos) * (screenWidth / sensorWidth), yPos * (screenHeight / sensorHeight));
예제 #32
0
def doloop():
    global depth, rgb
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8)
        da = np.hstack((d3, rgb))

        # Simple Downsample
        cv.ShowImage('both', np.array(da[::2, ::2, ::-1]))
        cv.WaitKey(5)
예제 #33
0
def hand_tracker(serial_object):
    (depth,_) = get_depth()
    cHullAreaCache = constList(5,12000) #Blank cache list for convex hull area
    areaRatioCache = constList(5,1) #Blank cache list for the area ratio of contour area to convex hull area
    centroidList = list() #Initiate centroid list
    #RGB Color tuples
    BLACK = (0,0,0)
    RED = (255,0,0)
    GREEN = (0,255,0)
    PURPLE = (255,0,255)
    BLUE = (0,0,255)
    WHITE = (255,255,255)
    YELLOW = (255,255,0)
    done = False
    previous_object_no_error = True
    min_delta_pos = 1 # as a percentage of total possibilities
    prev_vert_pos = -20
    while not done:
        (depth,_) = get_depth()
        depth = depth.astype(np.float32)
        _,depthThresh = cv2.threshold(depth, 600, 255, cv2.THRESH_BINARY_INV) #Threshold the depth for a binary image. Thresholded at 600 arbitary units
        blobData = BlobAnalysis(depthThresh) #Creates blobData object using BlobAnalysis class

        if blobData.counter == 1:
            centroid = blobData.centroid[0]
            previous_object_no_error = True
            vert_pos = get_pos_for_serial(centroid)
            # Check for significant changes
            if np.abs(prev_vert_pos - vert_pos) > min_delta_pos:
                prev_vert_pos = vert_pos
                print str(vert_pos)
                serial_object.write(str(vert_pos)+'\n')
        elif blobData.counter == 0 and previous_object_no_error:
            print 'No tracking objects found'
            previous_object_no_error = False
        elif blobData.counter > 1 and previous_object_no_error:
            print 'Too many tracking objects: {} objects'.format(blobData.counter)
            previous_object_no_error = False
예제 #34
0
def doloop():
    while True:
        # Get a fresh frame
        (depth,_), (rgb,_) = get_depth(), get_video()

        depth = depth[::2, ::2]
        r,g,b = process(depth)

        # Build a two panel color image
        d3 = np.dstack((r,g,depth/20)).astype(np.uint8)
        da = np.hstack((d3,rgb[::2, ::2]))

        # Simple Downsample
        cv.ShowImage('both',np.array(da[:,:,::-1]))
        cv.WaitKey(5)
예제 #35
0
def stack_depth(stack=1,verbose=True):
    depth = None
    for i in range(stack):
        (temp,_) = get_depth()
        if depth is None:
            depth = temp
            if verbose:
                print "Init invalids: %d" % depth[depth >= INVALID_DEPTH].shape[0]
        else:
            mask1 = (depth >= INVALID_DEPTH)
            mask2 = ((mask1*temp > 0) * (mask1*temp < INVALID_DEPTH))
            depth = depth.copy() + mask2*temp - mask2*INVALID_DEPTH
    if verbose:
        print "Final invalids: %d" % depth[depth >= INVALID_DEPTH].shape[0]
    return depth
예제 #36
0
def stack_depth(stack=1, verbose=True):
    depth = None
    for i in range(stack):
        (temp, _) = get_depth()
        if depth is None:
            depth = temp
            if verbose:
                print "Init invalids: %d" % depth[
                    depth >= INVALID_DEPTH].shape[0]
        else:
            mask1 = (depth >= INVALID_DEPTH)
            mask2 = ((mask1 * temp > 0) * (mask1 * temp < INVALID_DEPTH))
            depth = depth.copy() + mask2 * temp - mask2 * INVALID_DEPTH
    if verbose:
        print "Final invalids: %d" % depth[depth >= INVALID_DEPTH].shape[0]
    return depth
예제 #37
0
def doloop():
	global depth, rgb
	while True:
		# Get a fresh frame
		(depth,_), (rgb,_) = get_depth(), get_video()

		# Build a two panel color image
		d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
		da = np.hstack((d3,rgb))

		# Simple Downsample
		cv2.imshow('both',np.array(da[::2,::2,::-1]))

		print len(rgb), len(rgb[0]), len(rgb[0][0])

		raw_input(':')
예제 #38
0
def readkinect():
    (depth,_), (rgb,_) = get_depth(), get_video()
    if dodepth:
        da=np.dstack((depth,depth,depth)).astype(np.uint8) # this is correct depth 
        frame=np.array(da[::1,::1,::-1]);
    else:
        frame=rgb[::1,::1,::-1]
#    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    #frame=np.array() ### !!!! or rgb
    
    
    #fgmask = fgbg.apply(frame)
    #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cv2.imshow('frame',frame)
    k = cv2.waitKey(3) & 0xff
    return frame
예제 #39
0
def get_min_pos_kinect():
    
    (depth,_) = get_depth()
        
    minVal = np.min(depth) #This is the minimum value from the depth image
    minPos = np.argmin(depth) #This is the raw index of the minimum value above
    xPos = np.mod(minPos, xSize) #This is the x component of the raw index
    yPos = minPos//xSize #This is the y component of the raw index
        
    xList.append(xPos)
    del xList[0]
    xPos = int(np.mean(xList))
    yList.append(yPos)
    del yList[0]
    yPos = int(np.mean(yList))
        
    return (xSize - xPos-10, yPos, minVal)
예제 #40
0
def get_min_pos_kinect():
    
    (depth,_) = get_depth()
        
    minVal = np.min(depth) #This is the minimum value from the depth image
    minPos = np.argmin(depth) #This is the raw index of the minimum value above
    xPos = np.mod(minPos, xSize) #This is the x component of the raw index
    yPos = minPos//xSize #This is the y component of the raw index
        
    xList.append(xPos)
    del xList[0]
    xPos = int(np.mean(xList))
    yList.append(yPos)
    del yList[0]
    yPos = int(np.mean(yList))
        
    return (xSize - xPos-10, yPos, minVal)
예제 #41
0
    def generateSingleFrame(self, interval, start, end, stride=8):
        depth = get_depth()[0]
        depth = self.adjustDepth(depth, stride)
        self.depthCache = depth
        layers = []
        for i in range(start, end, interval):
            layers.append(
                self.generateSingleLayer(i,
                                         depth,
                                         c3=self.generateColour(
                                             (float(i) /
                                              (float(end) - float(start))))))

        for i in range(1, len(layers)):
            layers[0].paste(layers[i], (0, 0), layers[i])
        layers = layers[0]
        self.imageCache = layers
        return layers
예제 #42
0
def get_bg_depth():
    global corners
    corners = get_corners()
    mtx = np.matrix(corners)
    mean = mtx.mean(0)
    middle_coords = (int(mean.item((0, 0))), int(mean.item((0, 1))))
    corners_arr = np.array(corners)
    global lower_bound, upper_bound
    lower_bound = np.amin(corners_arr, axis=0)
    upper_bound = np.amax(corners_arr, axis=0)
    global depth
    (depth,_) = get_depth()
    #d3 = np.dstack((depth, depth, depth)).astype(np.uint8)
    d3 = np.array(depth)
    d3 = np.array([y[lower_bound[0]:upper_bound[0]] 
        for y in d3[lower_bound[1]:upper_bound[1]]])
    mean_vertical = d3.min(axis=0)
    return mean_vertical.min(axis=0)
예제 #43
0
    def find_position(self):
	print "Kinect is trying to find the image"
        (kinect_depth,_), (rgb,_) = get_depth(), get_video() 
        self.img = video_cv(rgb)
        depth_img = pretty_depth_cv(kinect_depth)
 
        position = self._get_pos(self.img)

        depth = self._get_depth(depth_img, debug=False)

        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 1, 1) 

        fps = 1/(time.time() - self.lasttime)
        s1 = "FPS:%.2f" % fps
        self.lasttime = time.time()
        cv.PutText(self.img,s1, (0,30),font, cv.CV_RGB(255, 0, 0))

        dt = "Depth: %d" % depth
        if position:
            pt = "Pos: X=%d Y=%d" % (position[0], position[1])
        else:
            pt = "Pos: N/A"
        cv.PutText(self.img, dt, (0,60),font, cv.CV_RGB(255, 0, 0))
        cv.PutText(self.img, pt, (0,90),font, cv.CV_RGB(255, 0, 0))

        offset = 120
        for t in self.text:
            cv.PutText(self.img, t, (0,offset),font, cv.CV_RGB(255, 0, 0))
            offset += 30

        cv.Circle(self.img, (self.sp[0], self.sp[1]) , 10, cv.CV_RGB(0, 255, 0), 1)

        cv.ShowImage('RGB', self.img)
        #cv.SaveImage('RGB-%d.png' % (time.time()*100), self.img)
        #cv.ShowImage('DEPTH', depth_img)
        cv.WriteFrame(self.writer, self.img)
        cv.WaitKey(5)

        #cv.ShowImage('depth_mask', depth_mask)
        try:
            return (position[0], position[1], depth)
        except:
            return (None, None, None)
예제 #44
0
def doloop(c, conn, insert_snapshot):
    global depth, rgb, counter
    counter = 0
    while True:
        # Get a fresh frame
        (depth, _) = get_depth()
        fn = './images/{}.png'.format(uuid4())
        cv2.imwrite(fn, frame_convert2.video_cv(freenect.sync_get_video()[0]))
        # Build a two panel color image
        # d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
        # da = np.hstack((d3,rgb))
        # print(d3, rgb)
        # time.sleep(3)
        counter += 1
        insert_snapshot(c, fn, depth)
        (ok, ) = c.execute('select count(1) from snapshot;')
        print(ok)
        conn.commit()
        time.sleep(1)
def doloop():
    global depth,ir, rgb

    while True:
        """
        ctypedef enum freenect_video_format:
        FREENECT_VIDEO_RGB
        FREENECT_VIDEO_BAYER
        FREENECT_VIDEO_IR_8BIT
        FREENECT_VIDEO_IR_10BIT
        FREENECT_VIDEO_IR_10BIT_PACKED
        FREENECT_VIDEO_YUV_RGB
        FREENECT_VIDEO_YUV_RAW
        """

        (depth,_), (ir,_) = get_depth(), get_video(format=2)


        # How does Downsample Works
        # ::2, means you skip by 2 -> [1,2,3,4] -> [2,4]
        # Downsample col -> cv.fromarray(np.array(rgb[:, ::2, ::-1]))
        # Downsample row -> cv.fromarray(np.array(rgb[::2, :, ::-1]))
        # Color Chanel: All Color -> cv.fromarray(np.array(rgb[::2, ::2, ::-1]))
        # Color Chanel: Blue      -> cv.fromarray(np.array(rgb[::2, ::2]))
        # Color Chanel: Gray      -> cv.fromarray(np.array(rgb[::2, ::2, 0]))

        # Build a two panel color image
        d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
        i3 = np.dstack((ir, ir, ir)).astype(np.uint8)
        da = np.hstack((d3,i3))

        # Form Frame/Image
        image = cv.fromarray(np.array(da[:,:,::-1]))

        # Playback Frame
        cv.ShowImage('Dual',image)
        cv.WaitKey(5)

        # Keyboard interrupt for Exit
        c=cv.WaitKey(2)
        if c==27: #Break if user enters 'Esc'.
            break
예제 #46
0
파일: getdata.py 프로젝트: nmagerko/pose
def getData():
    global depth, rgb

    i = 0
    # for the first ten frames
    while i < 10:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        data.append([depth, rgb])
        # data = data + str(depth) + " \n $$$ \n" + str(rgb) + "\n !!! \n"

        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8)
        da = np.hstack((d3, rgb))

        # Simple Downsample
        cv.ShowImage("Depth and RGB", cv.fromarray(np.array(da[::2, ::2, ::-1])))
        cv.WaitKey(5)
        i = i + 1
예제 #47
0
def getData():
    global depth, rgb

    i = 0
    #for the first ten frames
    while i < 10:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        data.append([depth, rgb])
        #data = data + str(depth) + " \n $$$ \n" + str(rgb) + "\n !!! \n"

        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8)
        da = np.hstack((d3, rgb))

        # Simple Downsample
        cv.ShowImage('Depth and RGB',
                     cv.fromarray(np.array(da[::2, ::2, ::-1])))
        cv.WaitKey(5)
        i = i + 1
예제 #48
0
def doloop():
    global depth, rgb
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8)
        da = np.hstack((d3, rgb))

        # detect apriltag
        data = da[::2, ::2, ::-1]
        #image = cv2.imread(data)
        gray = cv2.cvtColor(data, cv2.COLOR_RGB2GRAY)

        detect_apriltag(gray, data)

        # Simple Downsample
        cv2.imshow('both', np.array(data))
        if cv2.waitKey(1000) == 27:
            break
def main():
    print ('[*] Start')
    start = datetime.now()
    folder_name=get_args()
    os.mkdir(folder_name)
    os.chdir(folder_name)
    i=1
    buff=[]
    try:
        while True:
            print '[*] Recording Index %s'%(str(i))
            (depth,_), (rgb,_) = get_depth(), get_video()
            buff.append([depth.copy(),rgb.copy()])
            i+=1
            time.sleep(0.0001)
            if i>=1000000:
                break
    except KeyboardInterrupt:
        sync_stop() # stop the sync_get_video...etc
        print '\n[*] End Buff with following information :'
        duration = str((datetime.now()-start).total_seconds() ).split('.')[0]
        fps = i/((datetime.now()-start).total_seconds())
        print '[*] Duration is { %s }'%(duration)
        print '[*] FPS is { %s }'%(str(fps).split('.')[0])

        print '\n[*] Start Saving IMG from Buff'
        try:
            for j in range(i):
                #list.pop(index)
                depth,rgb = buff.pop(0)
                a=""
                if len(str(j))<frame_length_limit_order:
                    a='0'*(frame_length_limit_order-len(str(j)))
                depth=depth_to_gray(depth)
                # io.imsave to a series of .png
                io.imsave('depth'+a+str(j)+'.png',depth)
                io.imsave('rgb'+a+str(j)+'.png',rgb)
                print '[*] Saving Index %s'%(str(j))
        except:
            print '\n[*] End Saving IMG '
예제 #50
0
    def doloop(self):
        global depth, rgb
        while True:
            (depth, _), (rgb, _) = get_depth(), get_video()

            d3 = np.dstack((depth, depth, depth)).astype(np.uint8)

            redAvg = 0
            greenAvg = 0
            blueAvg = 0
            distanceAvg = 0

            for i in range(len(d3)):
                redAvg += d3[0][i][0]
                greenAvg += d3[0][i][1]
                blueAvg += d3[0][i][2]

            redAvg = redAvg / len(d3)
            greenAvg = greenAvg / len(d3)
            blueAvg = blueAvg / len(d3)
            distanceAvg = (redAvg + blueAvg + greenAvg) / 3

            cv.ShowImage("both", cv.fromarray(np.array(d3[::2, ::2, ::-1])))
            self.sendCommandASCII("128")  # sets mode to passive
            self.sendCommandASCII("131")  # sets mode to safe

            if distanceAvg >= 255 or distanceAvg <= 10:
                self.sendCommandASCII("140 3 1 64 16 141 3")  # beep
                self.callMovementCommand(0, -100)

            elif distanceAvg > distanceThreshold:
                self.sendCommandASCII("140 3 1 64 16 141 3")  # beep
                self.callMovementCommand(100, 100)

            else:
                self.callMovementCommand(0, 0)  # stop
                self.callMovementCommand(0, 100)  # Right turn

            cv.WaitKey(10)
def main():
    print ('[*] Start')
    start = datetime.now()
    folder_name=get_args()
    os.mkdir(folder_name)
    os.chdir(folder_name)
    i=1
    try:
        while True:
            (depth,_), (rgb,_) = get_depth(), get_video()
            a=""
            if len(str(i))<frame_length_limit_order:
                a='0'*(frame_length_limit_order-len(str(i)))
            elif len(str(i))>frame_length_limit_order:
                break
            depth=depth_to_gray(depth)
            io.imsave('depth'+a+str(i)+'.png',depth)
            io.imsave('rgb'+a+str(i)+'.png',rgb)
            i+=1
    except KeyboardInterrupt:
        duration=str((datetime.now()-start).total_seconds() ).split('.')[0]
        print '\n[*] Recoding Duration is { %s }'%(duration)
    def update(self):

	rate = rospy.Rate(10) # 10hz
	while not rospy.is_shutdown():
	    # Get a fresh frame
	    depth,_ = get_depth()    	 
	    frame,_ = get_video()
	    
	    self.BD.detect(frame)
	    self.DA.analyse(depth)

	    centroid = self.DA.get_centroid()
	    obstacle = self.DA.get_obstacle()
	    x_pos = self.BD.get_xpos()
	    object_located = self.BD.get_objectLocated()

	    rospy.loginfo("centroid: %d obstacle: %s", centroid, obstacle)
	    rospy.loginfo("x_pos: %d object_located: %s", x_pos, object_located)

	    self.pub_depth.publish(centroid, obstacle)
	    self.pub_blob.publish(x_pos, object_located)

	    rate.sleep()
예제 #53
0
from opencv.cv import *
from opencv import adaptors
from opencv.highgui import *
from time import time
from freenect import sync_get_depth as get_depth, sync_get_video as get_video, init
import numpy as np

from rx_config import *
from timing_stats import *

#initialize the camera
ctx = init()

# Grab an initial frame to get the video size
global depth, rgb
(depth,_), (rgb,_) = get_depth(), get_video()

rgbFrameSize = cvGetSize(rgb)
depthSize = cvGetSize(depth)
dwnFrameSize = cvSize(rgbFrameSize.width / 2, rgbFrameSize.height / 2)
dwnDepthSize = cvSize(depthSize.width / 2, depthSize.height / 2)

print 'rgbSize = %d %d' % (rgbFrameSize.width, rgbFrameSize.height)
print 'depthSize = %d %d' % (depthSize.width, depthSize.height)


# Allocate processing chain image buffers the same size as
# the video frame
rgbFrame        = cvCreateImage( rgbFrameSize, cv.IPL_DEPTH_8U, 3 )
depthFrame      = cvCreateImage( depthSize,    cv.IPL_DEPTH_16U, 3 )
dwnDepthFrame   = cvCreateImage( dwnDepthSize,    cv.IPL_DEPTH_16U, 1 )#tbd 3 or 1?
예제 #54
0
def get_min_pos_kinect():
    (depth,_) = get_depth()
    minPos = np.argmin(depth) #This is the raw index of the minimum value above
    xPos = np.mod(minPos, sensorWidth) #This is the x component of the raw index
    yPos = minPos//sensorWidth #This is the y component of the raw index
    return ((sensorWidth - xPos-10) * (screenWidth / sensorWidth),yPos * (screenHeight / sensorHeight))
예제 #55
0
def hand_tracker():
    (depth,_) = get_depth()
    cHullAreaCache = constList(5,12000) #Blank cache list for convex hull area
    areaRatioCache = constList(5,1) #Blank cache list for the area ratio of contour area to convex hull area
    centroidList = list() #Initiate centroid list
    #RGB Color tuples
    BLACK = (0,0,0)
    RED = (255,0,0)
    GREEN = (0,255,0)
    PURPLE = (255,0,255)
    BLUE = (0,0,255)
    WHITE = (255,255,255)
    YELLOW = (255,255,0)
    pygame.init() #Initiates pygame
    xSize,ySize = 640,480 #Sets size of window
    screen = pygame.display.set_mode((xSize,ySize),pygame.RESIZABLE) #creates main surface
    screenFlipped = pygame.display.set_mode((xSize,ySize),pygame.RESIZABLE) #creates surface that will be flipped (mirror display)
    screen.fill(BLACK) #Make the window black
    done = False #Iterator boolean --> Tells programw when to terminate
    dummy = False #Very important bool for mouse manipulation
    while not done:
        screen.fill(BLACK) #Make the window black
        (depth,_) = get_depth() #Get the depth from the kinect 
        depth = depth.astype(np.float32) #Convert the depth to a 32 bit float
        _,depthThresh = cv2.threshold(depth, 600, 255, cv2.THRESH_BINARY_INV) #Threshold the depth for a binary image. Thresholded at 600 arbitary units
        _,back = cv2.threshold(depth, 900, 255, cv2.THRESH_BINARY_INV) #Threshold the background in order to have an outlined background and segmented foreground
        blobData = BlobAnalysis(depthThresh) #Creates blobData object using BlobAnalysis class
        blobDataBack = BlobAnalysis(back) #Creates blobDataBack object using BlobAnalysis class
        
        for cont in blobDataBack.contours: #Iterates through contours in the background
            pygame.draw.lines(screen,YELLOW,True,cont,3) #Colors the binary boundaries of the background yellow
        for i in range(blobData.counter): #Iterate from 0 to the number of blobs minus 1
            pygame.draw.circle(screen,BLUE,blobData.centroid[i],10) #Draws a blue circle at each centroid
            centroidList.append(blobData.centroid[i]) #Adds the centroid tuple to the centroidList --> used for drawing
            pygame.draw.lines(screen,RED,True,blobData.cHull[i],3) #Draws the convex hull for each blob
            pygame.draw.lines(screen,GREEN,True,blobData.contours[i],3) #Draws the contour of each blob
            for tips in blobData.cHull[i]: #Iterates through the verticies of the convex hull for each blob
                pygame.draw.circle(screen,PURPLE,tips,5) #Draws the vertices purple
        
        """
        #Drawing Loop
        #This draws on the screen lines from the centroids
        #Possible exploration into gesture recognition :D
        for cent in centroidList:
            pygame.draw.circle(screen,BLUE,cent,10)
        """
        
        pygame.display.set_caption('Kinect Tracking') #Makes the caption of the pygame screen 'Kinect Tracking'
        del depth #Deletes depth --> opencv memory issue
        screenFlipped = pygame.transform.flip(screen,1,0) #Flips the screen so that it is a mirror display
        screen.blit(screenFlipped,(0,0)) #Updates the main screen --> screen
        pygame.display.flip() #Updates everything on the window
        
        #Mouse Try statement
        try:
            centroidX = blobData.centroid[0][0]
            centroidY = blobData.centroid[0][1]
            if dummy:
                mousePtr = display.Display().screen().root.query_pointer()._data #Gets current mouse attributes
                dX = centroidX - strX #Finds the change in X
                dY = strY - centroidY #Finds the change in Y
                if abs(dX) > 1: #If there was a change in X greater than 1...
                    mouseX = mousePtr["root_x"] - 2*dX #New X coordinate of mouse
                if abs(dY) > 1: #If there was a change in Y greater than 1...
                    mouseY = mousePtr["root_y"] - 2*dY #New Y coordinate of mouse
                move_mouse(mouseX,mouseY) #Moves mouse to new location
                strX = centroidX #Makes the new starting X of mouse to current X of newest centroid
                strY = centroidY #Makes the new starting Y of mouse to current Y of newest centroid
                cArea = cacheAppendMean(cHullAreaCache,blobData.cHullArea[0]) #Normalizes (gets rid of noise) in the convex hull area
                areaRatio = cacheAppendMean(areaRatioCache, blobData.contourArea[0]/cArea) #Normalizes the ratio between the contour area and convex hull area
                '''
                if cArea < 10000 and areaRatio > 0.82: #Defines what a click down is. Area must be small and the hand must look like a binary circle (nearly)
                    #click_down(1)
                else:
                    #click_up(1)
                '''
            else:
                strX = centroidX #Initializes the starting X
                strY = centroidY #Initializes the starting Y
                dummy = True #Lets the function continue to the first part of the if statement
        except: #There may be no centroids and therefore blobData.centroid[0] will be out of range
            dummy = False #Waits for a new starting point
            
        for e in pygame.event.get(): #Itertates through current events
            if e.type is pygame.QUIT: #If the close button is pressed, the while loop ends
                done = True