Exemplo n.º 1
0
def main_engine():
    while True:
        contour_list = []
        (depth, _) = get_depth()
        (rgb, _) = get_video()
        orig = np.array(rgb[::1, ::1, ::-1])
        fgmask = fgbg.apply(orig)
        ret, thresh = cv2.threshold(fgmask, 127, 255,
                                    cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        kernel = np.ones((5, 5), np.uint8)
        erosion = cv2.erode(thresh, kernel, iterations=1)
        im2, contours, hierarchy = cv2.findContours(erosion, cv2.RETR_TREE,
                                                    cv2.CHAIN_APPROX_SIMPLE)
        index = 0
        for cnt in contours:
            for node in cnt:
                for elem in node:
                    y = elem[0].astype(int)
                    x = elem[1].astype(int)
                    if (y > min(xlist)) and (y < max(xlist)) and (
                            x > min(ylist)) and (x < max(ylist)):
                        if (depth[x][y] + 8 < min_mat[x][y] - noise[x][y]
                            ) and (depth[x][y] < 2047) and (
                                depth[x][y] != 0) and (depth[x][y] != 255 and
                                                       (min_mat[x][y] < 2047)):
                            contour_list.append([x, y])
                            index += 1
        if (len(contour_list)):
            print(str(contour_list))
Exemplo n.º 2
0
def screen_calibration():
    for i in range(0, 5):
        (dst, _) = get_video()
        frame = np.array(dst[::1, ::1, ::-1])
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        lower_red = np.array([0, 0, 180])
        upper_red = np.array([255, 255, 255])
        mask = cv2.inRange(hsv, lower_red, upper_red)
        dst = cv2.bitwise_and(frame, frame, mask=mask)
        kernel = np.ones((5, 5), np.float32) / 25
        rgb = cv2.filter2D(dst, -1, kernel)
        gray_image = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
        ret, thresh = cv2.threshold(gray_image, 127, 255,
                                    cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                                    cv2.CHAIN_APPROX_SIMPLE)
        index = 0
        for cnt in contours:
            epsilon = 0.15 * cv2.arcLength(cnt, True)
            approx = cv2.approxPolyDP(cnt, epsilon, True)
            if cv2.arcLength(approx, True) < 15:
                contours.pop(index)
            else:
                if len(approx) == 4:
                    x, y, w, h = cv2.boundingRect(approx)
                    xlist.append(x)
                    xlist.append(x + w)
                    ylist.append(y)
                    ylist.append(y + h)
                    index += 1
        result = [[min(xlist), min(ylist)], [max(xlist), max(ylist)]]
        return result
Exemplo n.º 3
0
def doloop():
    global depth, rgb
    face_cascade = cv2.CascadeClassifier(
        'haarcascade/haarcascade_frontalface_default.xml')

    midx, midy = 0, 0
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()
        # Build a two panel color image
        mn = np.min(depth)
        mx = np.max(depth)
        output = np.uint8((depth - mn) * 255 / (mx - mn))
        gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)
        face = face_cascade.detectMultiScale(gray, 1.3, 5)
        for (x, y, w, h) in face:
            midx, midy = x + w / 2, y + h / 2

        ret2, th2 = cv2.threshold(output, 0, 255,
                                  cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        # d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
        # da = np.hstack((d3,rgb))

        # Simple Downsample
        # cv2.imshow('both',np.array(da[::2,::2,::-1]))

        cv2.imshow('frame', th2)
        k = cv2.waitKey(5) & 0xFF
        if k == 27:
            break
Exemplo n.º 4
0
def center_detect():
    result = []
    (rgb, _) = get_video()
    (depth, _) = get_depth()
    orig = np.array(rgb[::1, ::1, ::-1])
    fgmask = fgbg.apply(orig)
    ret, thresh = cv2.threshold(fgmask, 127, 255,
                                cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    kernel = np.ones((5, 5), np.uint8)
    erosion = cv2.erode(thresh, kernel, iterations=1)
    im2, contours, hierarchy = cv2.findContours(erosion, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
    index = 0
    for cnt in contours:
        if cv2.arcLength(cnt, True) < 250:
            contours.pop(index)
        else:
            index += 1
            M = cv2.moments(cnt)
            if (M['m00'] != 0):
                y = int(M['m10'] / M['m00'])
                x = int(M['m01'] / M['m00'])
                if (y > min(xlist)) and (y < max(xlist)) and (
                        x > min(ylist)) and (x < max(ylist)):
                    if (depth[x][y] + 8 < min_mat[x][y] - noise[x][y]) and (
                            depth[x][y] < 2047) and (depth[x][y] != 0) and (
                                depth[x][y] != 255 and (min_mat[x][y] < 2047)):
                        result.append([x, y])
    return result
Exemplo n.º 5
0
def doloop():
    global depth, rgb
    path = "./kpictures/"
    num = glob.glob(str(path) + "*.avi")
    i = len(num)
    video = cv2.VideoWriter(path+'video'+str(i)+'.avi',cv2.cv.CV_FOURCC('D','I','V','X'),20,(1280,480))
    if not video.isOpened():
        print 'error with video opening'
        sys.exit(1)
    print 'press "q" to exit' 
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()
        
        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8, copy=False)
        bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
        da = np.hstack((d3, bgr)).astype(np.uint8, copy=False)
        
#         src = cv2.cv.fromarray(da)
        cv2.imshow('both', da)
        video.write(da)
        k = cv2.waitKey(5)
        if (k > -1) and (k < 256):
            if chr(k)=='q':
#                 video.release()
                cv2.destroyAllWindows()
                sys.exit(0)
Exemplo n.º 6
0
 def check_who_it_is(self):
     (video, _) = get_video()
     res = apiFaceDetectAndClassifier.getPerson(self, video, self.personsDB)
     for name in res:
         print(name, "is here")
     del video
     return res
Exemplo n.º 7
0
def doloop():
    global depth, rgb
    path = "./kpictures/"
    num = glob.glob(str(path) + "*.jpg")
    i = len(num)
    count = 0
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8, copy=False)
        bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
        da = np.hstack((d3, bgr)).astype(np.uint8, copy=False)

        #         src = cv2.cv.fromarray(da)
        cv2.imshow('both', da)
        k = cv2.waitKey(5)
        if (k > -1) and (k < 256):
            if chr(k) == 'q':
                cv2.destroyAllWindows()
                sys.exit(0)
        if (count % total) == 0:
            cv2.imwrite(path + "both" + str(i) + ".jpg", da)
            i += 1
        count += 1
    def update(self):

	rate = rospy.Rate(10) # 10hz
	while not rospy.is_shutdown():
	    # Get a fresh frame
	    depth,_ = get_depth()    	 
	    frame,_ = get_video()
	    
	    self.BD.detect(frame)
	    self.DA.analyse(depth)
	    self.I2M.find(self.BD.get_xpos())

	    centroid = self.DA.get_centroid()
	    obstacle = self.DA.get_obstacle()

	    object_located = self.BD.get_objectLocated()

	    mea = self.I2M.get_pos()  

	    rospy.loginfo("centroid: %d, obstacle: %d", centroid, obstacle)
	    rospy.loginfo("object_located: %d", object_located)
	    rospy.loginfo("im2mea: \n %s", mea)

	    self.pub_depth.publish(centroid, obstacle)
	    self.pub_blob.publish(object_located)
	    self.pub_mea.publish(mea)

	    rate.sleep()
Exemplo n.º 9
0
def timed_snapshot(sec=10,name="tmp_",timestamp=True,save=True, \
                    interval=0,ret=False,stack=1):
    start_time = time.time()
    # update depth and rgb each time through loop

    depth = None
    (rgb, _) = get_video()
    # Get a fresh frame
    depth = stack_depth(stack=stack)
    c = 1
    while (time.time() - start_time) < sec:
        # Get a fresh frame
        depth = stack_depth(stack=stack)
        view_frame(normalize_depth(depth), rgb)
        #depth[depth>=INVALID_DEPTH]=0
        print "%d - %d (avg: %s)" % (depth.min(), depth.max(), depth.mean())
        #print depth[235+30:245+30,315:325]

        # saves frame per specified interval
        if save and interval > 0 and (time.time() - start_time) > c * interval:
            c += 1
            fname = "%s%s%s.pkl" % (DATA_DIR, name, int(time.time())) \
                        if timestamp else "%s%s.pkl" % (DATA_DIR, name)
            save_frame((depth, rgb), fname)

    # saves frame at the very end
    if save and interval == 0:
        fname = "%s%s%s.pkl" % (DATA_DIR, name, int(time.time())) \
                    if timestamp else "%s%s.pkl" % (DATA_DIR, name)
        save_frame((depth, rgb), fname)

    # optionally returns frame
    if ret: return (depth, rgb)
Exemplo n.º 10
0
def doloop():
    global depth, rgb
    path = "./kpictures/"
    num = glob.glob(str(path) + "*.jpg")
    i = len(num)
    count = 0 
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()
        
        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8, copy=False)
        bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
        da = np.hstack((d3, bgr)).astype(np.uint8, copy=False)
        
#         src = cv2.cv.fromarray(da)
        cv2.imshow('both', da)
        k = cv2.waitKey(5)
        if (k > -1) and (k < 256):
            if chr(k) == 'q':
                cv2.destroyAllWindows()
                sys.exit(0)
        if (count % total) == 0:
            cv2.imwrite(path + "both" + str(i) + ".jpg", da)
            i += 1
        count += 1
Exemplo n.º 11
0
def doloop():
    global depth, rgb
    path = "./kpictures/"
    num = glob.glob(str(path) + "*.avi")
    i = len(num)
    video = cv2.VideoWriter(path + 'video' + str(i) + '.avi',
                            cv2.cv.CV_FOURCC('D', 'I', 'V', 'X'), 20,
                            (1280, 480))
    if not video.isOpened():
        print 'error with video opening'
        sys.exit(1)
    print 'press "q" to exit'
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8, copy=False)
        bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
        da = np.hstack((d3, bgr)).astype(np.uint8, copy=False)

        #         src = cv2.cv.fromarray(da)
        cv2.imshow('both', da)
        video.write(da)
        k = cv2.waitKey(5)
        if (k > -1) and (k < 256):
            if chr(k) == 'q':
                #                 video.release()
                cv2.destroyAllWindows()
                sys.exit(0)
Exemplo n.º 12
0
def timed_snapshot(sec=10,name="tmp_",timestamp=True,save=True, \
                    interval=0,ret=False,stack=1):
    start_time = time.time()
    # update depth and rgb each time through loop

    depth=None
    (rgb,_) = get_video()
    # Get a fresh frame
    depth = stack_depth(stack=stack)
    c = 1
    while (time.time()-start_time) < sec:
        # Get a fresh frame
        depth = stack_depth(stack=stack)
        view_frame(normalize_depth(depth),rgb)
        #depth[depth>=INVALID_DEPTH]=0
        print "%d - %d (avg: %s)" % (depth.min(), depth.max(), depth.mean())
        #print depth[235+30:245+30,315:325]

        # saves frame per specified interval
        if save and interval > 0 and (time.time()-start_time) > c*interval:
            c+=1
            fname = "%s%s%s.pkl" % (DATA_DIR, name, int(time.time())) \
                        if timestamp else "%s%s.pkl" % (DATA_DIR, name)
            save_frame((depth, rgb), fname)
            
    # saves frame at the very end
    if save and interval == 0:
        fname = "%s%s%s.pkl" % (DATA_DIR, name, int(time.time())) \
                    if timestamp else "%s%s.pkl" % (DATA_DIR, name)
        save_frame((depth, rgb), fname)

    # optionally returns frame
    if ret: return (depth,rgb)
Exemplo n.º 13
0
def doloop():
    global depth, rgb
    while True:
        # Get a fresh frame
        (depth,_), (rgb,_) = get_depth(), get_video(0, freenect.VIDEO_IR_8BIT)
	#(depth,_), (rgb,_) = get_depth(), get_video()
        
        # Build a two panel color image
        #d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
        #da = np.hstack((d3,rgb))
        
        # Simple Downsample
        #cv.ShowImage('both', cv.fromarray(np.array(da[::2,::2,::-1])))
	#rgbarray = cv.fromarray(np.array(rgb))
	rgbarray = np.array(rgb)
	#gray = cv2.cvtColor(rgbarray, cv2.COLOR_BGR2GRAY)
	#blurred = cv.CloneMat(rgbarray)
	#sobeled = cv.CreateMat(rgbarray.rows, rgbarray.cols, cv.CV_32F)
	
	#cv.Sobel(rgbarray, sobeled, 1, 1)
	#sobeled = cv2.Sobel(blurred, cv.CV_32F, 1, 1)
	_, threshed = cv2.threshold(rgbarray, 250, 255, cv2.THRESH_BINARY)
	blurred = cv2.GaussianBlur(threshed, (255, 255), 0)
	
	#cv.Sobel(rgbarray2, sobeled, 0, 0, 1)
        cv.ShowImage('both', cv.fromarray(threshed))
        cv.WaitKey(5)
Exemplo n.º 14
0
def main():
  # Get a fresh frame
  (depth,_), (rgb,_) = get_depth(), get_video()
  # Build a two panel color image
  d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
  da = np.hstack((d3,rgb))
  # Simple Downsample
  cv2.imshow('both',np.array(da[::2,::2,::-1]))
Exemplo n.º 15
0
def do_loop():
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        # Show it
        #show_and_move(rgb, depth)
        show_and_move(rgb, depth)
        show_depth(depth)
        cv.WaitKey(5)
    def GetDepth(self):

        # get data from the sensor
        (depth, _), (rgb, _) = get_depth(), get_video()

        # convert data
        depth = depth.astype(np.uint8)
        rgb = cv.cvtColor(rgb, cv.COLOR_RGB2BGR)

        return depth, rgb
def display():
    global arr, depth, send_lock
    while (True):
        if send_lock.acquire():
            (depth,_),(arr,_)=get_depth(),get_video()
            d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
            da = np.hstack((d3,arr))
            cv.ShowImage('both',cv.fromarray(np.array(da[::2,::2,::-1])))
            send_lock.release()
        cv.WaitKey(5)
Exemplo n.º 18
0
def do_loop():
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        # Show it
        #show_and_move(rgb, depth)
        show_and_move(rgb, depth)
        show_depth(depth)
        cv.WaitKey(5)
def doloop():

    capture=cv.CaptureFromCAM(0)
    fourcc = cv.CV_FOURCC('X','V','I','D')
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FPS, 30)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

    global depth,ir, rgb
    count = 0

    while True:
        rgb_frame=cv.QueryFrame(capture)
        rgb = iplimage_to_numpy_color(rgb_frame)

        (depth,_), (ir,_) = get_depth(), get_video(format=2)
        np.resize(depth, (300,400))
        np.resize(ir, (300,400))
        np.resize(rgb, (300,400,3))

        # Build a two panel color image
        d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
        ir3 = np.dstack((ir, ir, ir)).astype(np.uint8)
        depth_ir = np.hstack((d3,ir3))

        # Form Frame/Image
        image = cv.fromarray(np.array(depth_ir[:,:,::-1]))
        rgb_image = numpy_to_iplimage_color(rgb)

        d_cvMat = cv.fromarray(np.array(d3[:,:,::-1]))
        depth_image = cvMat_to_iplimage_color(d_cvMat)


        opacity = 0.4
        cv.AddWeighted(depth_image, opacity, rgb_image, 1 - opacity, 0, rgb_image)

        # Playback Frame
        cv.ShowImage('Trio',image)
        cv.ShowImage('RGB',rgb_image)
        cv.WaitKey(5)

        # Keyboard interrupt for Exit
        c=cv.WaitKey(2)
        if c==27: #Break if user enters 'Esc'.
            break


        # How does Downsample Works
        # ::2, means you skip by 2 -> [1,2,3,4] -> [2,4]
        # Downsample col -> cv.fromarray(np.array(rgb[:, ::2, ::-1]))
        # Downsample row -> cv.fromarray(np.array(rgb[::2, :, ::-1]))
        # Color Chanel: All Color -> cv.fromarray(np.array(rgb[::2, ::2, ::-1]))
        # Color Chanel: Blue      -> cv.fromarray(np.array(rgb[::2, ::2]))
        # Color Chanel: Gray      -> cv.fromarray(np.array(rgb[::2, ::2, 0]))
        """
Exemplo n.º 20
0
def doloop():
    global depth, rgb
    while True:
        # Get a fresh frame
        (depth,_), (rgb,_) = get_depth(), get_video()
        # Build a two panel color image
        d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
        da = np.hstack((d3,rgb))
        # Simple Downsample
        cv.ShowImage('both',cv.fromarray(np.array(da[::2,::2,::-1])))
        cv.WaitKey(5)
Exemplo n.º 21
0
def doloop():
	global depth, rgb
	for i in range(1,10):
		(depth,_), (rgb,_) = get_depth(), get_video()
		bg=cv.CloneMat(cv.fromarray(depth.astype(numpy.uint8)))
	scratch = cv.CreateImage((640,480),8,1)
	scratch2 = cv.CreateImage((640,480),8,1)
	cv.SaveImage('bg.png',bg)xz
	
	while True:
		# Get a fresh frame
		(depth,_), (rgb,_) = get_depth(), get_video()
		depth=cv.fromarray(depth.astype(numpy.uint8))
		cv.AbsDiff(bg,depth,scratch)
		cv.Sub(scratch,2,10,scratch2)
		# cv.ConvertScale(scratch,scratch2,50)
		cv.Add(depth,scratch2,scratch2)
        
		# Simple Downsample
		cv.ShowImage('both',scratch2)
		cv.WaitKey(10)
Exemplo n.º 22
0
def doloop():
    global depth, rgb
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8)
        da = np.hstack((d3, rgb))

        # Simple Downsample
        cv.ShowImage('both', np.array(da[::2, ::2, ::-1]))
        cv.WaitKey(5)
Exemplo n.º 23
0
def doloop():
    while True:
        # Get a fresh frame
        (depth,_), (rgb,_) = get_depth(), get_video()

        depth = depth[::2, ::2]
        r,g,b = process(depth)

        # Build a two panel color image
        d3 = np.dstack((r,g,depth/20)).astype(np.uint8)
        da = np.hstack((d3,rgb[::2, ::2]))

        # Simple Downsample
        cv.ShowImage('both',np.array(da[:,:,::-1]))
        cv.WaitKey(5)
Exemplo n.º 24
0
def doloop():
	global depth, rgb
	while True:
		# Get a fresh frame
		(depth,_), (rgb,_) = get_depth(), get_video()

		# Build a two panel color image
		d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
		da = np.hstack((d3,rgb))

		# Simple Downsample
		cv2.imshow('both',np.array(da[::2,::2,::-1]))

		print len(rgb), len(rgb[0]), len(rgb[0][0])

		raw_input(':')
Exemplo n.º 25
0
def readkinect():
    (depth,_), (rgb,_) = get_depth(), get_video()
    if dodepth:
        da=np.dstack((depth,depth,depth)).astype(np.uint8) # this is correct depth 
        frame=np.array(da[::1,::1,::-1]);
    else:
        frame=rgb[::1,::1,::-1]
#    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    #frame=np.array() ### !!!! or rgb
    
    
    #fgmask = fgbg.apply(frame)
    #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cv2.imshow('frame',frame)
    k = cv2.waitKey(3) & 0xff
    return frame
Exemplo n.º 26
0
def doloop():
    global depth, rgb, initdepth
    min = 0
    (initdepth, _) = get_depth()
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()
        #cv2.rectangle(rgb, (230, 100), (440, 300), (255, 0, 0), 2)
        test = np.array(rgb[::2, ::2, ::-1])
        cv2.imshow('FIRST', test)
        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8)
        da = np.hstack((d3, rgb))
        # Simple Downsample
        cv2.imshow('both', np.array(da[::2, ::2, ::-1]))
        cv2.waitKey(5)
Exemplo n.º 27
0
def doloop():
    global depth, rgb
    while True:
        # Get a fresh frame
        (rgb, _) = get_video()

        # Get average coloring of a center square across for each of RGB
        _r, g, b = np.mean(np.mean(rgb[220:260, 300:340], axis=0), axis=0)
        print _r, g, b
        data_tuple = {}
        data_tuple['sensor_id'] = str(macaddr)
        data_tuple['ts'] = str(calendar.timegm(time.gmtime()))
        data_tuple['r'] = str(_r)
        data_tuple['g'] = str(g)
        data_tuple['b'] = str(b)
        r.publish('proximity', json.dumps(data_tuple))
        time.sleep(0.1)
Exemplo n.º 28
0
    def find_position(self):
	print "Kinect is trying to find the image"
        (kinect_depth,_), (rgb,_) = get_depth(), get_video() 
        self.img = video_cv(rgb)
        depth_img = pretty_depth_cv(kinect_depth)
 
        position = self._get_pos(self.img)

        depth = self._get_depth(depth_img, debug=False)

        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 1, 1) 

        fps = 1/(time.time() - self.lasttime)
        s1 = "FPS:%.2f" % fps
        self.lasttime = time.time()
        cv.PutText(self.img,s1, (0,30),font, cv.CV_RGB(255, 0, 0))

        dt = "Depth: %d" % depth
        if position:
            pt = "Pos: X=%d Y=%d" % (position[0], position[1])
        else:
            pt = "Pos: N/A"
        cv.PutText(self.img, dt, (0,60),font, cv.CV_RGB(255, 0, 0))
        cv.PutText(self.img, pt, (0,90),font, cv.CV_RGB(255, 0, 0))

        offset = 120
        for t in self.text:
            cv.PutText(self.img, t, (0,offset),font, cv.CV_RGB(255, 0, 0))
            offset += 30

        cv.Circle(self.img, (self.sp[0], self.sp[1]) , 10, cv.CV_RGB(0, 255, 0), 1)

        cv.ShowImage('RGB', self.img)
        #cv.SaveImage('RGB-%d.png' % (time.time()*100), self.img)
        #cv.ShowImage('DEPTH', depth_img)
        cv.WriteFrame(self.writer, self.img)
        cv.WaitKey(5)

        #cv.ShowImage('depth_mask', depth_mask)
        try:
            return (position[0], position[1], depth)
        except:
            return (None, None, None)
def doloop():
    global depth,ir, rgb

    while True:
        """
        ctypedef enum freenect_video_format:
        FREENECT_VIDEO_RGB
        FREENECT_VIDEO_BAYER
        FREENECT_VIDEO_IR_8BIT
        FREENECT_VIDEO_IR_10BIT
        FREENECT_VIDEO_IR_10BIT_PACKED
        FREENECT_VIDEO_YUV_RGB
        FREENECT_VIDEO_YUV_RAW
        """

        (depth,_), (ir,_) = get_depth(), get_video(format=2)


        # How does Downsample Works
        # ::2, means you skip by 2 -> [1,2,3,4] -> [2,4]
        # Downsample col -> cv.fromarray(np.array(rgb[:, ::2, ::-1]))
        # Downsample row -> cv.fromarray(np.array(rgb[::2, :, ::-1]))
        # Color Chanel: All Color -> cv.fromarray(np.array(rgb[::2, ::2, ::-1]))
        # Color Chanel: Blue      -> cv.fromarray(np.array(rgb[::2, ::2]))
        # Color Chanel: Gray      -> cv.fromarray(np.array(rgb[::2, ::2, 0]))

        # Build a two panel color image
        d3 = np.dstack((depth,depth,depth)).astype(np.uint8)
        i3 = np.dstack((ir, ir, ir)).astype(np.uint8)
        da = np.hstack((d3,i3))

        # Form Frame/Image
        image = cv.fromarray(np.array(da[:,:,::-1]))

        # Playback Frame
        cv.ShowImage('Dual',image)
        cv.WaitKey(5)

        # Keyboard interrupt for Exit
        c=cv.WaitKey(2)
        if c==27: #Break if user enters 'Esc'.
            break
Exemplo n.º 30
0
def getData():
    global depth, rgb

    i = 0
    # for the first ten frames
    while i < 10:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        data.append([depth, rgb])
        # data = data + str(depth) + " \n $$$ \n" + str(rgb) + "\n !!! \n"

        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8)
        da = np.hstack((d3, rgb))

        # Simple Downsample
        cv.ShowImage("Depth and RGB", cv.fromarray(np.array(da[::2, ::2, ::-1])))
        cv.WaitKey(5)
        i = i + 1
Exemplo n.º 31
0
def getData():
    global depth, rgb

    i = 0
    #for the first ten frames
    while i < 10:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        data.append([depth, rgb])
        #data = data + str(depth) + " \n $$$ \n" + str(rgb) + "\n !!! \n"

        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8)
        da = np.hstack((d3, rgb))

        # Simple Downsample
        cv.ShowImage('Depth and RGB',
                     cv.fromarray(np.array(da[::2, ::2, ::-1])))
        cv.WaitKey(5)
        i = i + 1
Exemplo n.º 32
0
def doloop():
    global depth, rgb
    while True:
        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()

        # Build a two panel color image
        d3 = np.dstack((depth, depth, depth)).astype(np.uint8)
        da = np.hstack((d3, rgb))

        # detect apriltag
        data = da[::2, ::2, ::-1]
        #image = cv2.imread(data)
        gray = cv2.cvtColor(data, cv2.COLOR_RGB2GRAY)

        detect_apriltag(gray, data)

        # Simple Downsample
        cv2.imshow('both', np.array(data))
        if cv2.waitKey(1000) == 27:
            break
def main():
    print ('[*] Start')
    start = datetime.now()
    folder_name=get_args()
    os.mkdir(folder_name)
    os.chdir(folder_name)
    i=1
    buff=[]
    try:
        while True:
            print '[*] Recording Index %s'%(str(i))
            (depth,_), (rgb,_) = get_depth(), get_video()
            buff.append([depth.copy(),rgb.copy()])
            i+=1
            time.sleep(0.0001)
            if i>=1000000:
                break
    except KeyboardInterrupt:
        sync_stop() # stop the sync_get_video...etc
        print '\n[*] End Buff with following information :'
        duration = str((datetime.now()-start).total_seconds() ).split('.')[0]
        fps = i/((datetime.now()-start).total_seconds())
        print '[*] Duration is { %s }'%(duration)
        print '[*] FPS is { %s }'%(str(fps).split('.')[0])

        print '\n[*] Start Saving IMG from Buff'
        try:
            for j in range(i):
                #list.pop(index)
                depth,rgb = buff.pop(0)
                a=""
                if len(str(j))<frame_length_limit_order:
                    a='0'*(frame_length_limit_order-len(str(j)))
                depth=depth_to_gray(depth)
                # io.imsave to a series of .png
                io.imsave('depth'+a+str(j)+'.png',depth)
                io.imsave('rgb'+a+str(j)+'.png',rgb)
                print '[*] Saving Index %s'%(str(j))
        except:
            print '\n[*] End Saving IMG '
def main():
    print ('[*] Start')
    start = datetime.now()
    folder_name=get_args()
    os.mkdir(folder_name)
    os.chdir(folder_name)
    i=1
    try:
        while True:
            (depth,_), (rgb,_) = get_depth(), get_video()
            a=""
            if len(str(i))<frame_length_limit_order:
                a='0'*(frame_length_limit_order-len(str(i)))
            elif len(str(i))>frame_length_limit_order:
                break
            depth=depth_to_gray(depth)
            io.imsave('depth'+a+str(i)+'.png',depth)
            io.imsave('rgb'+a+str(i)+'.png',rgb)
            i+=1
    except KeyboardInterrupt:
        duration=str((datetime.now()-start).total_seconds() ).split('.')[0]
        print '\n[*] Recoding Duration is { %s }'%(duration)
Exemplo n.º 35
0
    def doloop(self):
        global depth, rgb
        while True:
            (depth, _), (rgb, _) = get_depth(), get_video()

            d3 = np.dstack((depth, depth, depth)).astype(np.uint8)

            redAvg = 0
            greenAvg = 0
            blueAvg = 0
            distanceAvg = 0

            for i in range(len(d3)):
                redAvg += d3[0][i][0]
                greenAvg += d3[0][i][1]
                blueAvg += d3[0][i][2]

            redAvg = redAvg / len(d3)
            greenAvg = greenAvg / len(d3)
            blueAvg = blueAvg / len(d3)
            distanceAvg = (redAvg + blueAvg + greenAvg) / 3

            cv.ShowImage("both", cv.fromarray(np.array(d3[::2, ::2, ::-1])))
            self.sendCommandASCII("128")  # sets mode to passive
            self.sendCommandASCII("131")  # sets mode to safe

            if distanceAvg >= 255 or distanceAvg <= 10:
                self.sendCommandASCII("140 3 1 64 16 141 3")  # beep
                self.callMovementCommand(0, -100)

            elif distanceAvg > distanceThreshold:
                self.sendCommandASCII("140 3 1 64 16 141 3")  # beep
                self.callMovementCommand(100, 100)

            else:
                self.callMovementCommand(0, 0)  # stop
                self.callMovementCommand(0, 100)  # Right turn

            cv.WaitKey(10)
Exemplo n.º 36
0
    def update(self):

	rate = rospy.Rate(10) # 10hz
	while not rospy.is_shutdown():
	    # Get a fresh frame
	    depth,_ = get_depth()    	 
	    frame,_ = get_video()
	    
	    self.BD.detect(frame)
	    self.DA.analyse(depth)

	    centroid = self.DA.get_centroid()
	    obstacle = self.DA.get_obstacle()
	    x_pos = self.BD.get_xpos()
	    object_located = self.BD.get_objectLocated()

	    rospy.loginfo("centroid: %d obstacle: %s", centroid, obstacle)
	    rospy.loginfo("x_pos: %d object_located: %s", x_pos, object_located)

	    self.pub_depth.publish(centroid, obstacle)
	    self.pub_blob.publish(x_pos, object_located)

	    rate.sleep()
def center_detect():
    result = []
    (rgb, _) = get_video()
    orig = np.array(rgb[::1, ::1, ::-1])
    fgmask = fgbg.apply(orig)
    cv2.imshow('backgrondeliminated', fgmask)
    ret, thresh = cv2.threshold(fgmask, 127, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    kernel = np.ones((5, 5), np.uint8)
    erosion = cv2.erode(thresh, kernel, iterations=1)
    im2, contours, hierarchy = cv2.findContours(erosion, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    index = 0
    for cnt in contours:
        if(cv2.arcLength(cnt, True)<250):
            contours.pop(index)
        else:
            index += 1
            hull = cv2.convexHull(cnt)
            M = cv2.moments(cnt)
            if(M['m00']!=0):
                cx = int(M['m10'] / M['m00'])
                cy = int(M['m01'] / M['m00'])
                result.append([cx,cy])
    return(result)
Exemplo n.º 38
0
 def on_message(self, message):
     print("STATE : MESSAGE RECIEVED")
     if (message == "edges"):
         self.write_message(str(get_screen_params()))
         print("***Edges Sent ***")
     elif (message == "go"):
         print(status.get_closed())
         while True:
             print("*** sending Contour ***")
             (depth, _) = get_depth()
             (rgb, _) = get_video()
             orig = np.array(rgb[::1, ::1, ::-1])
             fgmask = fgbg.apply(orig)
             ret, thresh = cv2.threshold(
                 fgmask, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
             kernel = np.ones((5, 5), np.uint8)
             erosion = cv2.erode(thresh, kernel, iterations=1)
             im2, contours, hierarchy = cv2.findContours(
                 erosion, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
             index = 0
             for cnt in contours:
                 for node in cnt:
                     for elem in node:
                         y = elem[0].astype(int)
                         x = elem[1].astype(int)
                         if (y > min(xlist)) and (y < max(xlist)) and (
                                 x > min(ylist)) and (x < max(ylist)):
                             if (depth[x][y] + 8 <
                                     min_mat[x][y] - noise[x][y]) and (
                                         depth[x][y] <
                                         2047) and (depth[x][y] != 0) and (
                                             depth[x][y] != 255 and
                                             (min_mat[x][y] < 2047)):
                                 contour_list.append([x, y])
                                 index += 1
             if (len(contour_list) != 0):
                 self.write_message(str(contour_list))
Exemplo n.º 39
0
                if depthc[hd][wd] < min_mat[hd][wd]:
                    min_mat[hd][wd] = depthc[hd][wd]
                if depthc[hd][wd] > max_mat[hd][wd]:
                    max_mat[hd][wd] = depthc[hd][wd]
#Noise Calculation
for hd in range(0, 480):
    for wd in range(0, 640):
        if (abs(max_mat[hd][wd] - min_mat[hd][wd]) < 200):
            noise[hd][wd] = max_mat[hd][wd] - min_mat[hd][wd]
        else:
            noise[hd][wd] = 0
timer = time.time() - timer
print('Calibration Time :', timer)
#Screen Region Calibration
for i in range(0, 5):
    (dst, _) = get_video()
    # -----------
    orig = np.array(dst[::1, ::1, ::-1])
    frame = np.array(dst[::1, ::1, ::-1])
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    lower_red = np.array([0, 0, 183])
    upper_red = np.array([255, 255, 255])

    mask = cv2.inRange(hsv, lower_red, upper_red)
    dst = cv2.bitwise_and(frame, frame, mask=mask)
    # -----------
    kernel = np.ones((5, 5), np.float32) / 25
    rgb = cv2.filter2D(dst, -1, kernel)
    gray_image = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
    ret, thresh = cv2.threshold(gray_image, 127, 255,
import numpy as np
import cv, sys
import cv2

fps = 25.0      # so we need to hardcode the FPS
print "Recording at: ", fps, " fps"

frame_size = (640,480)
print "Video size: ", frame_size

writer = cv.CreateVideoWriter("original.avi", cv.CV_FOURCC('X','V','I','D'), fps, frame_size, True)

global depth, ir
while True :

    (depth,_), (ir,_) = get_depth(), get_video(format=2)

    # Process Depth Matrix to Binary Matrix
    binary = (ir>200).astype(float)
    binary = binary*255

    im = imresize(binary, (480,640))
    binary_img = cv.fromarray(im)

    cv.ShowImage("original",binary_img)

    cvMat = cvMat_to_iplimage_grayscale(binary_img)
    image = cv.CreateImage (frame_size, 8, 3)
    bitmap = cv.CreateImageHeader(frame_size, cv.IPL_DEPTH_8U, 3)
    cv.SetData(bitmap, cvMat.tostring())
    cv.WriteFrame(writer, image)
                        # Noise Calculation : Max - Min matrix
    for hd in range(0, 480):
        for wd in range(0, 640):
            if (abs(max_mat[hd][wd] - min_mat[hd][wd]) < 200):
                noise[hd][wd] = max_mat[hd][wd] - min_mat[hd][wd]
            else:
                noise[hd][wd] = 0
    timer = time.time() - timer
    print('Calibration Time :', timer)
    return (1)


screen_calibration()
depth_calibration()
while True:
    (rgb, _) = get_video()
    (depth, _) = get_depth()
    orig = np.array(rgb[::1, ::1, ::-1])
    fgmask = fgbg.apply(orig)
    cv2.imshow('backgrondeliminated', fgmask)
    ret, thresh = cv2.threshold(fgmask, 127, 255,
                                cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    kernel = np.ones((5, 5), np.uint8)
    erosion = cv2.erode(thresh, kernel, iterations=1)
    im2, contours, hierarchy = cv2.findContours(erosion, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
    index = 0
    result = []
    curr = time.time()
    for cnt in contours:
        for node in cnt:
Exemplo n.º 42
0
def get_kinect():
    image, _ = get_video()
    depth, _ = get_depth()
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    return image, depth
def mainfunc():
    lower_red = np.array([0, 0, 69])
    upper_red = np.array([255, 255, 255])
    global rgb, initdepth
    global BM, GM, RM, BMM, GMM, RMM
    BM = 0
    GM = 0
    RM = 180
    BMM = 255
    GMM = 255
    RMM = 255
    (dst, _) = get_video()
    #-----------
    orig = np.array(dst[::1, ::1, ::-1])
    frame = np.array(dst[::1, ::1, ::-1])
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    lower_red = np.array([BM, GM, RM])
    upper_red = np.array([BMM, GMM, RMM])

    mask = cv2.inRange(hsv, lower_red, upper_red)
    dst = cv2.bitwise_and(frame, frame, mask=mask)
    #-----------
    kernel = np.ones((5, 5), np.float32) / 25
    rgb = cv2.filter2D(dst, -1, kernel)
    gray_image = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
    ret, thresh = cv2.threshold(gray_image, 150, 255,
                                cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
    font = cv2.FONT_HERSHEY_SIMPLEX
    index = 0
    for cnt in contours:
        epsilon = 0.15 * cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, epsilon, True)
        if cv2.arcLength(approx, True) < 20:
            contours.pop(index)
        else:
            if len(approx) == 4:
                x, y, w, h = cv2.boundingRect(approx)
                cv2.rectangle(rgb, (x, y), (x + w, y + h), (0, 255, 0), 2)
                index = index + 1
                #cv2.drawContours(rgb, approx, -1, (0, 255, 0), 3)
    cv2.namedWindow('rgb')
    cv2.startWindowThread()
    cv2.createTrackbar('BMin', 'rgb', BM, 255, change_BM)
    cv2.createTrackbar('GMin', 'rgb', GM, 255, change_GM)
    cv2.createTrackbar('RMin', 'rgb', RM, 255, change_RM)
    cv2.createTrackbar('BMax', 'rgb', BMM, 255, change_BMM)
    cv2.createTrackbar('GMax', 'rgb', GMM, 255, change_GMM)
    cv2.createTrackbar('RMax', 'rgb', RMM, 255, change_RMM)
    cv2.createTrackbar('0 : OFF \n1 : ON', 'rgb', 0, 1, nothing)
    while (1):
        (dst, _) = get_video()
        # -----------
        frame = np.array(dst[::1, ::1, ::-1])
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        lower_red = np.array([BM, GM, RM])
        upper_red = np.array([BMM, GMM, RMM])
        mask = cv2.inRange(hsv, lower_red, upper_red)
        dst = cv2.bitwise_and(frame, frame, mask=mask)
        # -----------
        kernel = np.ones((5, 5), np.float32) / 25
        rgb = cv2.filter2D(dst, -1, kernel)
        gray_image = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
        ret, thresh = cv2.threshold(gray_image, 150, 255,
                                    cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                                    cv2.CHAIN_APPROX_SIMPLE)
        index = 0
        for cnt in contours:
            epsilon = 0.15 * cv2.arcLength(cnt, True)
            approx = cv2.approxPolyDP(cnt, epsilon, True)
            if cv2.arcLength(approx, True) < 20:
                contours.pop(index)
            else:
                if len(approx) == 4:
                    x, y, w, h = cv2.boundingRect(approx)
                    cv2.rectangle(rgb, (x, y), (x + w, y + h), (0, 255, 0), 2)
                    index = index + 1
        cv2.imshow('rgb', rgb)
        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            break
Exemplo n.º 44
0
    contour_array_depth = []
    contour_center = []
    mouse_clicked = []
    def add_item(self, x, y, z):
        self.contour_array_xy.append([x, y])
        self.contour_array_depth.append(z)

    def add_center(self, cent):
        self.contour_center = cent

    def print_contour(self):
        print(self.contour_array_xy,self.contour_array_depth,self.contour_center)
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
cont = ObjectContour
while True:
    (depth, _), (rgb, _) = get_depth(), get_video()
    orig = np.array(rgb[::1, ::1, ::-1])
    fgmask = fgbg.apply(orig)
    cv2.imshow('No Background', fgmask)
    ret, thresh = cv2.threshold(fgmask, 127, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    kernel = np.ones((5, 5), np.uint8)
    erosion = cv2.erode(thresh, kernel, iterations=1)
    im2, contours, hierarchy = cv2.findContours(erosion, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    index = 0
    for cnt in contours:
        if cv2.arcLength( cnt, True) < 250:
            contours.pop(index)
        else:
            print("----------------------")
            print(str(index)+":")
            index += 1
Exemplo n.º 45
0
import gui
import cv2
from freenect import sync_get_video as get_video

while True:
    image, _ = get_video()
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

    gui.display(image)
    try:
        gui.refresh()
    except KeyboardInterrupt:
        break

gui.close()
Exemplo n.º 46
0
        'float32': cv.IPL_DEPTH_32F,
        'float64': cv.IPL_DEPTH_64F,
    }
    try:
        nChannels = a.shape[2]
    except:
        nChannels = 1
    cv_im = cv.CreateImageHeader((a.shape[1], a.shape[0]), dtype2depth[str(a.dtype)], nChannels)
    cv.SetData(cv_im, a.tostring(), a.dtype.itemsize * nChannels * a.shape[1])
    return cv_im


while True:

    (raw_depth, _) = get_depth()
    (raw_video, _) = get_video()

    np.clip(raw_depth, 0, (2**10)-1, raw_depth)
    raw_depth = raw_depth >> 2
    raw_depth = raw_depth.astype(np.uint8)#np.dstack((raw_depth, raw_depth, raw_depth))

    video = cv.GetImage(cv.fromarray(raw_video))
    cv.CvtColor(video, video, cv.CV_RGB2BGR) 
    cv.Flip(video, video, 1)

    depth = cv.GetImage(cv.fromarray(raw_depth))
    cv.Flip(depth, depth, 1)

    cv.Threshold(depth, depth, 125, 255, cv.CV_THRESH_BINARY)
   
    temp = cv.CloneImage(depth)
Exemplo n.º 47
0
def doloop():

    #Series of commands to do pointer operations on the kinect (motor, led, accelerometer)
    ctx = init()  #Initiates device
    mdev = open_device(ctx, 0)  #Opens the device for commands
    set_led(mdev, 1)  #Sets LED to green
    close_device(
        mdev)  #Closes device. Device must be closed immediately after usage

    #Mean filter caches
    yList = [0, 0, 0, 0, 0, 0]
    xList = [0, 0, 0, 0, 0, 0]

    #Sets color tuples
    RED = (255, 0, 0)
    BLUE = (0, 0, 255)
    TEAL = (0, 200, 100)
    BLACK = (0, 0, 0)

    #Sets the size of the screen
    xSize = 640
    ySize = 480

    done = False  #Main while loop bool counter
    pygame.init()  #Initiates pygame
    screen = pygame.display.set_mode(
        (xSize, ySize), pygame.RESIZABLE)  #Creates the pygame window
    screen.fill(BLACK)  #Fills the window black

    #Initiates the xTempPos and yTempPos values so that the point will remain stationary
    #if the minimum value is larger than 600
    xTempPos = xSize / 2
    yTempPos = ySize / 2

    global depth, rgb  #Makes the depth and rgb variables global

    while not done:
        screen.fill(BLACK)  #Makes the pygame window black after each iteration

        # Get a fresh frame
        (depth, _) = get_depth()
        (rgb, _) = get_video()

        minVal = np.min(depth)  #This is the minimum value from the depth image
        minPos = np.argmin(
            depth)  #This is the raw index of the minimum value above
        xPos = np.mod(minPos, xSize)  #This is the x component of the raw index
        yPos = minPos // xSize  #This is the y component of the raw index

        #This is the mean filter process
        """
        A mean filter works by collecting values in a cache list and taking the mean of them
        to determine the final value. It works in this case to decrease the amount of
        volatility the minimum position experiences to get a smoother display with a more
        consistent value. My computer works smoothly with a 5 bit cache where as a faster
        computer may need a larger cache and a slower computer may need a smaller cache
        """
        xList.append(xPos)
        del xList[0]
        xPos = int(mean(xList))
        yList.append(yPos)
        del yList[0]
        yPos = int(mean(yList))
        """
        This if statement says that if the minimum value is below 600 to store the minimum
        positions in xTempPos and yTempPos and to make the dot color red. Also if the minimum
        value is larger than 600, xPos and yPos become the last stored minimum and maximum
        positions. It also changes the color to purple
        """
        if minVal < 600:
            xTempPos = xPos
            yTempPos = yPos
            COLOR = cv.RGB(255, 0, 0)
        else:
            xPos = xTempPos
            yPos = yTempPos
            COLOR = cv.RGB(100, 0, 100)

        #cv.Circle(rgb, (xPos, yPos), 2, COLOR, 40) #draws a circle of a certain color at minimum position

        #cv.ShowImage('Image',rgb) #Shows the image
        cv.WaitKey(5)  #Keyboard interupt
        """
        The if statement below sets up the virtual joystick by basically breaking the pygame
        window into four parts. A dot representing the minimum position is drawn on the window
        and the corresponding button based on the position is "pressed". The quarter of the
        window in which the button "pressed" corresponds to turns teal after being "pressed"

        Top Right   : A
        Bottom Right: B
        Bottom Left : Y
        Top Right   : X
        """
        if xPos <= xSize / 2 and yPos <= ySize / 2:
            command = 'A'
            rect1 = pygame.Rect((xSize / 2, 0), (xSize / 2, ySize / 2))
            pygame.draw.rect(screen, TEAL, rect1)
        elif xPos <= xSize / 2 and yPos > ySize / 2:
            command = 'B'
            rect1 = pygame.Rect((xSize / 2, ySize / 2), (xSize / 2, ySize / 2))
            pygame.draw.rect(screen, TEAL, rect1)
        elif xPos > xSize / 2 and yPos <= ySize / 2:
            command = 'X'
            rect1 = pygame.Rect((0, 0), (xSize / 2, ySize / 2))
            pygame.draw.rect(screen, TEAL, rect1)
        else:
            command = 'Y'
            rect1 = pygame.Rect((0, ySize / 2), (xSize / 2, ySize / 2))
            pygame.draw.rect(screen, TEAL, rect1)
        pygame.draw.line(
            screen, BLUE, (xSize / 2, ySize / 2),
            (xSize - xPos,
             yPos))  #Draws a line from the middle to the minimum position
        pygame.draw.circle(screen, RED, (xSize - xPos, yPos),
                           10)  #Draws the circle on pygame window
        pygame.display.flip()  #Displays the processed pygame window
        print command, minVal  #Prints the "pressed" button and the minimum value
        for e in pygame.event.get():  #Itertates through current events
            if e.type is pygame.QUIT:  #If the close button is pressed, the while loop ends
                done = True
Exemplo n.º 48
0
from opencv.cv import *
from opencv import adaptors
from opencv.highgui import *
from time import time
from freenect import sync_get_depth as get_depth, sync_get_video as get_video, init
import numpy as np

from rx_config import *
from timing_stats import *

#initialize the camera
ctx = init()

# Grab an initial frame to get the video size
global depth, rgb
(depth,_), (rgb,_) = get_depth(), get_video()

rgbFrameSize = cvGetSize(rgb)
depthSize = cvGetSize(depth)
dwnFrameSize = cvSize(rgbFrameSize.width / 2, rgbFrameSize.height / 2)
dwnDepthSize = cvSize(depthSize.width / 2, depthSize.height / 2)

print 'rgbSize = %d %d' % (rgbFrameSize.width, rgbFrameSize.height)
print 'depthSize = %d %d' % (depthSize.width, depthSize.height)


# Allocate processing chain image buffers the same size as
# the video frame
rgbFrame        = cvCreateImage( rgbFrameSize, cv.IPL_DEPTH_8U, 3 )
depthFrame      = cvCreateImage( depthSize,    cv.IPL_DEPTH_16U, 3 )
dwnDepthFrame   = cvCreateImage( dwnDepthSize,    cv.IPL_DEPTH_16U, 1 )#tbd 3 or 1?
Exemplo n.º 49
0
#ctx = fn.init()
#if fn.num_devices(ctx) > 0:
#    dev = fn.open_device(ctx, 0)
#    
#curr_tilt_state = fn.get_tilt_state(dev)
#curr_tilt_degs = fn.get_tilt_degs(curr_tilt_state)
        
#print "Current Angle:", curr_tilt_degs
        
#fn.set_tilt_degs(dev, 15)
#fn.shutdown(ctx)

raw_sample = None
while raw_sample == None:
    (raw_sample, _) = get_video()

sample_screen = cv.GetImage(cv.fromarray(raw_sample))
cv.Flip(sample_screen, sample_screen, 1)
cv.CvtColor(sample_screen, sample_screen, cv.CV_RGB2BGR)
screen_size = cv.GetSize(sample_screen)

colour_image = cv.CreateImage(screen_size, 8, 3)
grey_image = cv.CreateImage(screen_size, cv.IPL_DEPTH_8U, 1)
moving_average = cv.CreateImage(screen_size, cv.IPL_DEPTH_32F, 3)

cv.Smooth(sample_screen, sample_screen, cv.CV_GAUSSIAN, 3, 0)
difference = cv.CloneImage(sample_screen)
temp = cv.CloneImage(sample_screen)
cv.ConvertScale(sample_screen, moving_average, 1.0, 0.0)
Exemplo n.º 50
0
    def run(self):
        heat=0
        storage = cv.CreateMemStorage()
        if not self.solo:
            haar=cv.Load('haarcascades/haarcascade_profileface.xml') 
        else:
            haar=cv.Load('haarcascades/haarcascade_frontalface_default.xml')
        threshold=10
        while not self._stop:
            # Get a fresh framepython
            (depth,_), (rgb,_) = get_depth(), get_video()
            #image=array2PIL(rgb, (640, 480))
            
            #o=rgb.astype(numpy.uint8)
            w=depth.astype(numpy.uint8)
            o=cv.fromarray(rgb)
            if self.online:
                grayscale = cv.CreateImage((640,  480), 8, 1)
                cv.CvtColor(o, grayscale, cv.CV_BGR2GRAY)
                storage = cv.CreateMemStorage(0)
                # equalize histogram
                cv.EqualizeHist(grayscale, grayscale)
                
                faces = cv.HaarDetectObjects(grayscale, haar, storage, 1.1, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (30, 30))
                left=None
                right=None
                if self.solo:
                    threshold=20
                if faces:
                    for face in faces:
                        #print "FORCE "+str(face[1])
                        if face[1] > threshold:
                        #face=faces[0]
                            r=face[0]
                            #i=face[0]
                            #draw.rectangle((r[0], r[1],  r[0]+r[2],  r[1]+r[3]), outline="green")
                            #cv.Rectangle(o, ( int(r[0]), int(r[1])),
                            #         (int(r[0]+r[2]), int(r[1]+r[3])),
                            #         cv.RGB(0, 255, 0), 3, 8, 0)
                            cv.Circle(o, (int(r[0]+r[3]/2), int(r[1]+r[3]/2)),r[3]/2,cv.RGB(255, 255, 255), 3, 8, 0)
                            #cv.EllipseBox(o, r, cv.RGB(0, 255, 0))
                            #draw.ellipse((r[0], r[1],  r[0]+r[2],  r[1]+r[3]), fill="green")
                            #draw.ellipse((r[0]+3, r[1]+3,  r[0]+r[2]-3,  r[1]+r[3]-3), fill=None)
                            left=(r[0]+r[2]/2, r[1]+r[3]/2)
                if not self.solo:
                    cv.Flip(grayscale, grayscale, 1)
                    faces = cv.HaarDetectObjects(grayscale, haar, storage, 1.1, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (30, 30))
                    if faces:
                        for face in faces:
                            if face[1] > threshold:
                                r=face[0]
                                cv.Circle(o, (int(640-r[0]-r[3]/2), int(r[1]+r[3]/2)),r[3]/2,cv.RGB(255, 255, 255), 3, 8, 0)
                                right=(640-r[0]-r[2]/2, r[1]+r[3]/2)
                else:
                    right=left
                if left and right and self.paranoic:
                    #print "distancia esquerda:"
                    #print w[left[1]][left[0]]
                    #print "distancia direita:"
                    #print w[right[1]][right[0]]
                    #print heat
                    print str(w[left[1]][left[0]]) + "_"+ str(w[right[1]][right[0]])
                    if w[left[1]][left[0]] - w[right[1]][right[0]] < 50:
                        heat+=1
                        if not self.solo:
                            heat+=10
                        if heat>10:
                            self.emit(QtCore.SIGNAL('save()'))
                            self.disconnect()
                else:
                    heat-=1
                if heat<0:
                    heat=0
            cv.Flip(o,o,1)

            qi=toQImage(numpy.asarray(o),True) #array2PIL(rgb, (640, 480))
            #qi=ImageQt(image.transpose(Image.FLIP_LEFT_RIGHT)) #PILimageToQImage(image)
            self.emit(QtCore.SIGNAL('update(QImage)'), qi)
        return
Exemplo n.º 51
0
from freenect import sync_get_depth as get_depth, sync_get_video as get_video
import cv2
import numpy as np
from matplotlib import pyplot as plt
#camera = cv2.VideoCapture(0)
(img,_) = get_video()
#_,im = camera.read()
#cv2.imwrite('thing.png',camera)
#img = cv2.imread(camera,0)


# thresholding
ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
ret,thresh2 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)
ret,thresh3 = cv2.threshold(img,127,255,cv2.THRESH_TRUNC)
ret,thresh4 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO)
ret,thresh5 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO_INV)
thresh6 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)

titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','ADAPTIVE_THRESH_GAUSSIAN_C']
images = [img, thresh1, thresh2, thresh3, thresh4, thresh6]

for i in xrange(6):
    plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
    plt.title(titles[i])
    plt.xticks([]),plt.yticks([])

plt.show()
# 2d convolution
kernel = np.ones((5,5),np.float32)/25
dst = cv2.filter2D(img,-1,kernel)
Exemplo n.º 52
0
def doloop():
    """
    Primary process loop.
    """
    global depth, rgb, current_target

    current_target = None
    positions = []

    create_windows()

    nothing = lambda x: None
    # cv2.createTrackbar('distance min', 'color', 0, 255, nothing)
    # cv2.createTrackbar('distance max', 'color', 0, 255, nothing)
    cv2.createTrackbar('min', 'color', MIN_A, 500, nothing)
    cv2.createTrackbar('max', 'color', MAX_A, 1000, nothing)
    cv2.createTrackbar('trim', 'color', 0, 200, nothing)
    cv2.createTrackbar('user', 'grid', 0, 640, nothing)
    cv2.createTrackbar('difficulty', 'grid', 0, 4, nothing)
    #cv2.createTrackbar('Top Motor', 'grid', MOTOR1, 1023, nothing)
    #cv2.createTrackbar('Bottom Motor', 'grid', MOTOR2, 1023, nothing)
    cv2.createTrackbar('rotation', 'grid', 90, 180, nothing)

    times = []

    while True:
        #time.sleep(1)
        start = time.time()

        # Trackbar updates
        distance_min = cv2.getTrackbarPos('distance min', 'color')
        distance_max = cv2.getTrackbarPos('distance max', 'color')
        min_a = cv2.getTrackbarPos('min', 'color')
        max_a = cv2.getTrackbarPos('max', 'color')
        trim = cv2.getTrackbarPos('trim', 'color')
        difficulty = cv2.getTrackbarPos('difficulty', 'grid')
        userpos = cv2.getTrackbarPos('user', 'grid')
        if distance_min == -1: distance_min = DISTANCE_MIN
        if distance_max == -1: distance_max = DISTANCE_MAX
        if min_a == -1: min_a = MIN_A
        if max_a == -1: max_a = MAX_A
        if trim == -1: trim = EDGE

        # Get a fresh frame
        (depth, _), (rgb, _) = get_depth(), get_video()
        depth = prepare_depth(depth)

        # Trim frame
        if trim:
            depth = depth[:, trim:depth.shape[1] - trim]
            rgb = rgb[:, trim:rgb.shape[1] - trim]

        # Threshold
        ret, thresh1 = cv2.threshold(depth, distance_min, 255,
                                     cv2.THRESH_TOZERO)
        ret, thresh = cv2.threshold(thresh1, distance_max, 255,
                                    cv2.THRESH_TOZERO_INV)
        if 'd' in windows: cv2.imshow('depth', depth)
        if 't' in windows: cv2.imshow('threshold', thresh)

        # Get target point
        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
        target = get_contour2(contours, min_a, max_a)
        if not target is None:
            M = cv2.moments(target)
            if M['m00'] == 0: print "0"
            else:
                target_point = (int(M['m10'] / M['m00']),
                                int(M['m01'] / M['m00']))

                x_val = (640.0 / float(target_point[0]) * 255)
                _, radius = cv2.minEnclosingCircle(target)
                #cv2.putText(rgb, str(radius), target_point, cv2.FONT_HERSHEY_SIMPLEX, \
                #1, (0,255,0), 2, cv2.CV_AA)
                cv2.circle(rgb, target_point, 4, (255, 0, 0), -1)
                cv2.drawContours(rgb, [target], -1, (0, 255, 0), 3)

                if userpos == -1: userpos = target_point[0]

        #Clip userpos
        userpos = max(min(2 * userpos - WIDTH / 2, WIDTH), 0)

        if difficulty:
            current_target = pick_target(userpos, difficulty)
            current_speed = pick_speed(userpos, difficulty)

        # Keep a list of the detected positions
        positions.append(current_target)
        if len(positions) == 6:
            # select and send target
            send_target(select_target(positions, difficulty))
            positions = []

        if 'g' in windows:
            cv2.imshow('grid', drawTable(userpos, current_target))

        if 'c' in windows: cv2.imshow('color', video_cv(rgb))
        char = cv2.waitKey(10)
        if char == 27:
            cv2.destroyAllWindows()
            break
        elif char == 115:
            cv2.imwrite('capture.png', video_cv(rgb))
            cv2.namedWindow('saved')
            cv2.imshow('saved', video_cv(rgb))
        elif char == ord('r'):
            if serial_out:
                serial_out.close()
                serial_out.open()

        end = time.time()
        times.append(end - start)
Exemplo n.º 53
0
    cv_video, cv_depth = (cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, cv.CV_16S),
                          cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, cv.CV_16S))

    cv_hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 )
    track_window = None
    
    depth, near, far=(600, 600, 0)

    while True:
        for e in pygame.event.get():
            if e.type == QUIT or (e.type == KEYDOWN and e.key == K_ESCAPE):
                exit(0)
            if e.type == KEYDOWN and e.key == K_r:
                track_window = None
        
        (np_video, _), (np_depth, _) = get_video(), get_depth()

        np_depth = 255 * numpy.logical_and(np_depth >= depth - near,
                                           np_depth <= depth + far)
        np_depth = numpy.dstack((np_depth, np_depth, np_depth)).astype(numpy.uint8).reshape(size[1], size[0], 3)

        cv.SetData(cv_depth, np_depth.tostring())
        cv.SetData(cv_video, np_video.tostring())
        #
        cv_hsv = cv.CreateImage(cv.GetSize(cv_video), 8, 3)
        cv.CvtColor(cv_video, cv_hsv, cv.CV_RGB2HSV)
        cv_hue = cv.CreateImage(cv.GetSize(cv_video), 8, 1)
        cv.Split(cv_hsv, cv_hue, None, None, None)
        cv_backproject = cv.CreateImage(cv.GetSize(cv_video), 8, 1)
        cv.CalcArrBackProject([cv_hue], cv_backproject, cv_hist )
        #
Exemplo n.º 54
0
def save_img():
    global rgb
    (rgb,_) = get_video()
    cv.SaveImage('/tmp/anglerfish-img.jpg', cv.fromarray(np.array(rgb)))
Exemplo n.º 55
0
    warm_up, _ = get_depth()
    time.sleep(5)
    cf0, _ = get_depth()
    all_data = cf0[np.newaxis, ...]
    ts0 = time.time()
    ts = ts0
    count = 0
    while ts - ts0 < 60:
        cf, _ = get_depth()
        ts = time.time()
        if ts > count * 0.4 + ts0:
            all_data = np.vstack([all_data, cf[np.newaxis, ...]])
            count += 1
    #all_data[all_data == 0] = np.nan
    np.save('bower_test_tank42_depth' + str(integer), all_data)


integer = 1
warm_up, _ = get_depth()
warm_up, _ = get_video()
time.sleep(5)
Color = get_video()
time.sleep(2)
np.save('bower_test_tank42_color', Color)
while integer <= 20:
    ts = time.time()
    qualtest(integer)
    integer += 1
    to = time.time()
    time.sleep(300 - (to - ts))