コード例 #1
0
    def get_depth(self):
        if len(freenect.sync_get_depth()) > 0:
            depth = freenect.sync_get_depth()[0]
            #numpy.clip(depth, 0, 2**10 - 1, depth)
            #depth >>= 2
            #print depth[0][0]
            return depth.astype(numpy.float32)/2048.

            #print "return from get depth"
            #return numpy.array(depth, dtype=numpy.float32)
            #return frame_convert.pretty_depth_cv(freenect.sync_get_depth()[0])
        return None
コード例 #2
0
def warmup(time):
    """
    At startup, the kinect depth sensor needs to go through some range
    calibration and focusing. This function accepts number of seconds to wait
    and will take some frames to "warm up" the sensor.
    """
    dt = .05
    iterations = int(np.around(time / dt))
    for i in range(iterations):
        freenect.sync_get_depth()
        if i % 20 == 0:
            print("Warming up " + str(i/20))
        sleep(dt)
コード例 #3
0
def get_depth(y,x):

    cx =314.0137
    cy = 247.90585
    fx = 591.1027
    fy =590.557
 
    w = 640
    h = 480

    array,_ = freenect.sync_get_depth()
    array = array.astype(np.float32)
    #index = x + y*w
    depth_value = array[y,x]
    distance_cm = 100/(-0.00307 * depth_value + 3.33)


    depthInMeters = 1.0 / (depth_value * -0.0030711016 + 3.3309495161)   

       
    

    z= depthInMeters
    x = ( x - cx)*z/fx
    y = (y - cy)*z/fy  
  
    #x = (2 * math.tan(29 * 3.14159265359 / 180) * z) * ((x - w/2) / 640)
    #y = (2 * math.tan(22.5 * 3.14 / 180) * z) * ((y - h/2) / 480)
    real_values = [z,-x,-y]
    #cv2.imshow('depth', array)
    print(z,x,y)
コード例 #4
0
def getDepthVal(x, y):  #get depth at a specific point

    if (x > width or x < 0 or y > height
            or y < 0):  #check the request is inside the drone zone
        print("out of bounds")
        return 2047

    box_half_width = int((depth_boxwidth) / 2)
    box_half_height = int((depth_boxheight) / 2)

    depth, timestamp = freenect.sync_get_depth()  #get image
    depth = crop_to_zone(depth)  #crop to drone zone

    #since the depth and rgb images don't align exactly (bcos cameras are slightly offset), define a region around the approx drone location
    drone_area = depth[y - box_half_height:y + box_half_height,
                       x - box_half_width:x + box_half_width]

    val = 2047  # 2047 = failed :(
    if (drone_area.shape[0] > 7 and drone_area.shape[1] > 7
        ):  #make sure the drone zone is greater than 7x7 for the gauss blur
        drone_area = cv2.GaussianBlur(drone_area, (7, 7),
                                      0)  #blur the pixel values a bit

        (minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(
            drone_area)  #find the extrema of the image - furtherst and closest

        np.clip(drone_area, 0, 2**10 - 1, drone_area
                )  #do some processing in case i want to display the image
        drone_area >>= 2
        drone_area = drone_area.astype(np.uint8)

        #cv2.imshow("depth", drone_area) #display the depth image if wanted

        val = z_groundheight - minVal  #take the minimum distance (closest to the camera, so must be the drone) and make it into a sensible height value
    return val
コード例 #5
0
def get_depth_arrayEncode():
    array, timestamp = freenect.sync_get_depth()
    # constrain from less than
    np.clip(array, 245, 500, array)
    array = array - 245
    array = array.astype(np.uint8)
    return array
コード例 #6
0
ファイル: methods.py プロジェクト: r2apu/Kinect-Instrument
def def_plane(threshold,current_depth):
	
	depth = freenect.sync_get_depth()[0]	
	depth = 255 * np.logical_and(depth >= current_depth, depth <= current_depth + threshold)
	depth = depth.astype(np.uint8)
	
	return depth
コード例 #7
0
def show_ent_depth():
    depth = freenect.sync_get_depth()[0]
    depth = depth.astype(np.uint8)

    #~ print "depth "+str(np.shape(depth))

    return depth
コード例 #8
0
    def __init__(self, dummy=False, mirror=True):
        self.__class__._instances.append(weakref.proxy(self))
        self.id = next(self._ids)
        self.resolution = (640, 480)
        self.dummy = dummy
        self.mirror = mirror

        if self.dummy == False:
            print("looking for kinect...")
            self.ctx = freenect.init()
            self.dev = freenect.open_device(self.ctx, self.id)
            print(self.id)
            freenect.close_device(
                self.dev)  # TODO Test if this has to be done!

            self.angle = None
            self.depth = freenect.sync_get_depth(
                index=self.id, format=freenect.DEPTH_MM
            )[0]  # get the first Depth frame already (the first one takes much longer than the following)
            self.filtered_depth = None
            print("kinect initialized")
        else:
            print(
                "dummy mode. get_frame() will return a synthetic depth frame, other functions may not work"
            )
コード例 #9
0
ファイル: kinect.py プロジェクト: LuoXin0826/Armbot
    def __init__(self):
        self.currentVideoFrame = np.array([])
        self.currentDepthFrame = np.array([])
        if (freenect.sync_get_depth() == None):
            self.kinectConnected = False
        else:
            self.kinectConnected = True
        self.calibrated = False

        # mouse clicks & calibration variables
        self.depth2rgb_affine = np.float32([[1, 0, 0], [0, 1, 0]])
        self.kinectCalibrated = False
        self.last_click = np.array([0, 0])
        self.new_click = False
        self.rgb_click_points = np.zeros((5, 2), int)
        self.depth_click_points = np.zeros((5, 2), int)
        self.grab_click_point = np.zeros((1, 2), int)
        self.place_click_point = np.zeros((1, 2), int)
        self.affine = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
        self.clickandplace = np.zeros((2, 2), int)
        #self.inverseK = np.array([0, 0, 100, np.pi/2])
        """ Extra arrays for colormaping the depth image"""
        self.DepthHSV = np.zeros((480, 640, 3)).astype(np.uint8)
        self.DepthCM = np.array([])
        """ block info """
        self.block_contours = np.array([])
コード例 #10
0
 def _acquire_frame(self):
     frame, _ = freenect.sync_get_depth()
     # return success if frame size is valid
     if frame is not None:
         return (True, frame)
     else:
         return (False, frame)
コード例 #11
0
ファイル: demo_cv_threshold2.py プロジェクト: pkropf/exuro
def show_depth():
    global threshold
    global current_depth
    global closest

    depth, timestamp = freenect.sync_get_depth()
    depthm = np.ma.masked_values(depth, 2047)
    amin = depthm.argmin()
    y = amin / depthm.shape[1]
    x = amin - (y * depthm.shape[1])
    points.pop()
    points.insert(0, (x, y))
    closest = (sum(z[0] for z in points) / max_points, 
               sum(z[1] for z in points) / max_points)

    #print closest
    depth = 255 * np.logical_and(depth >= current_depth - threshold,
                                 depth <= current_depth + threshold)
    depth = depth.astype(np.uint8)
    image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]),
                                 cv.IPL_DEPTH_8U,
                                 1)
    cv.SetData(image, depth.tostring(),
               depth.dtype.itemsize * depth.shape[1])
    cv.ShowImage('Depth', image)
コード例 #12
0
ファイル: master.py プロジェクト: ccxcxd/kinnectproj
def showlive():
  global count, frames
  cv.NamedWindow('Depth')
  cv.NamedWindow('Video')
  cv.MoveWindow('Depth', 100, 100)
  cv.MoveWindow('Video', 745, 100)

  print('Press ESC in window to stop')
  print('Press Space to convert current to PLY')
  print('Press k to stop live capture')

  while 1:
      imgdepth = fc.depth_cv(freenect.sync_get_depth()[0])
      imgvideo = fc.video_cv(freenect.sync_get_video()[0])

      cv.ShowImage('Depth', imgdepth)
      cv.ShowImage('Video', imgvideo)

      inp = cv.WaitKey(100)

      if inp != -1:
        inp = chr(inp % 1048576)
        if inp == ' ': # space for capture and convert
          print 'capturing images'
          captureimage()
          print 'done capturing'
        elif inp.isdigit():
          frames = ord(inp) - ord('0')
          print 'setting the number of frames to capture to %d' % frames
        elif inp == 'k':
          break
      count = count + 1

  cv.DestroyWindow('Depth')
  cv.DestroyWindow('Video')
コード例 #13
0
ファイル: master.py プロジェクト: ccxcxd/kinnectproj
def captureimage():
  global frames

  depthframes = np.zeros((frames, rownum, colnum))
  rgbframes = np.zeros((frames, rownum, colnum, 3))

  for i in range(frames):
    depthframes[i] = freenect.sync_get_depth()[0]
    rgbframes[i] = freenect.sync_get_video()[0]
    arargb = freenect.sync_get_video()[0]
    time.sleep(0.05)

  arargb   = fc.robustavg(rgbframes)
  aradepth = fc.robustavg(depthframes)
  serial = time.time()

  cv.SaveImage('img/depth%d.png' % serial, fc.depth_cv(aradepth.astype(int)))
  cv.SaveImage('img/video%d.png' % serial, fc.video_cv(arargb.astype(np.uint8)))
  #f = open('poly/poly%d.ply' % serial,'w')
  
    
  meterdepth = fc.meter_depth(aradepth)
  #newrgb2 = fc.matchrgb2(meterdepth, arargb)
  newrgb = fc.matchrgb(meterdepth, arargb)
  
  #meterdepth = ndi.gaussian_filter(fc.meter_depth(aradepth), [sigma, sigma])
  
  meterdepth[meterdepth > 1.5] = -1.
  meterdepth[meterdepth < 0.5] = -1.
  scipy.io.savemat('data/aligned%d.mat' % serial, {'depth':meterdepth, 'rgb':newrgb})
コード例 #14
0
    def getDepthMat():

        depth, timestamp = freenect.sync_get_depth()

        depth = depth * np.logical_and(depth > 500, depth < 1024)
        #depth=depth*0.2480544747081712

        np.clip(depth, 0, 2**10 - 1, depth)
        depth >>= 2
        depth = depth.astype(np.uint8)

        depth = depth.astype(np.uint8)
        #edges = cv2.Canny(depth, threshold1=100, threshold2=100)

        depth = cv2.medianBlur(depth, 17)
        depth = cv2.bilateralFilter(depth, 9, 75, 75)
        frame = depth
        laplacian = cv2.Laplacian(frame, cv2.CV_64F)

        #cv2.imshow('Canny',edges)
        cv2.imshow('Original', frame)
        cv2.imshow('laplacian', laplacian)

        depth = cv2.resize(depth, (400, 300), interpolation=cv2.INTER_LINEAR)
        ret, jpeg = cv2.imencode('.jpg', depth, [cv2.IMWRITE_JPEG_QUALITY, 90])
        return jpeg.tobytes()
コード例 #15
0
def actual_width_in_mm(lb, lt, rb, rt, cxr, cxl):
    """
    * Function Name:actual_width_in_mm()
    * Input:	    co-ordinates of left bottom, left top, right bottom, right top,
                    right contour centroid, left contour centroid
    * Output:		returns actual width of the door
    * Logic:		It takes the actual depth and using filters the black noise spaces are made white
                    The 20 pixels of the area of left and right edges are processed.
                    the minimum value in them is found and the depth is that value.
                    Using pixel knowledge we find the angle and then using cosine rule
                    we find the actual width of the door.
    * Example Call:	actual_width_in_mm(lb, lt, rb, rt, cxr, cxl)
    """
    a = freenect.sync_get_depth(format=freenect.DEPTH_MM)[0]
    a /= 30.0
    a = a.astype(np.uint8)
    ret, mask = cv2.threshold(a, 1, 255, cv2.THRESH_BINARY_INV)
    ad = a + mask
    pts1 = np.float32([[lt[0] - 30, lt[1]], [lt[0], lt[1]], [lb[0] - 30, lb[1]], [lb[0], lb[1]]])
    pts2 = np.float32([[0, 0], [30, 0], [0, lb[1] - lt[1]], [30, lb[1] - lt[1]]])
    m = cv2.getPerspectiveTransform(pts1, pts2)
    dst = cv2.warpPerspective(ad, m, (30, lb[1] - lt[1]))
    left_depth = np.amin(dst) * 30
    pts1 = np.float32([[rt[0], rt[1]], [rt[0] + 30, rt[1]], [rb[0], rb[1]], [rb[0] + 30, rb[1]]])
    pts2 = np.float32([[0, 0], [30, 0], [0, rb[1] - rt[1]], [30, rb[1] - rt[1]]])
    m = cv2.getPerspectiveTransform(pts1, pts2)
    dst = cv2.warpPerspective(ad, m, (30, rb[1] - rt[1]))
    right_depth = np.amin(dst) * 30
    pixel_width = cxr - cxl
    angle = (pixel_width / 640.0) * (57 / 180.0) * math.pi
    width = (left_depth * left_depth) + (right_depth * right_depth) - (2 * left_depth * right_depth * math.cos(angle))
    width = math.sqrt(width)
    return width
コード例 #16
0
ファイル: hand_recog_test.py プロジェクト: nneonneo/pandt
def disp_thresh(lower, upper, show_masked_rgb=True):
  depth, timestamp = freenect.sync_get_depth()
  min_depth = depth.min()
  video,_ = freenect.sync_get_video()

  if show_masked_rgb:
    video = video.astype(np.uint8)
    depthmask = (255*np.logical_and(depth>lower,depth<upper)).reshape(480,640,1)
    depthmask = depthmask.astype(np.uint8)
    masked_video = video & depthmask
    #print reduce(lambda count, curr: curr>0 and count+1 or count,masked_video.flatten(),0)
    cv.ShowImage('RGB',frame_convert.video_cv(masked_video.reshape(480,640,3)))

  depth = 255 * np.logical_and(depth > lower, depth < upper)
  depth = depth.astype(np.uint8)
  image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]),
                               cv.IPL_DEPTH_8U,
                               1)
  cv.SetData(image, depth.tostring(),
             depth.dtype.itemsize * depth.shape[1])

  canny = doCanny(image,150.0,200.0,7)
  templates = template_match(image,template)
  smoothed = smooth(canny)
  cv.ShowImage('Depth', image)
  return depth,canny,min_depth
コード例 #17
0
def get_depth_snapshot():
    array, _ = freenect.sync_get_depth()
    ts = datetime.datetime.now()
    filenamed = "{}DEPTH11bit".format(ts.strftime("%H_%M_%S"))
    print '[INFO] Saving depth Data at' + filenamed
    np.save(filenamed, array, allow_pickle=False)
    threading.Timer(60, get_depth_snapshot).start()
コード例 #18
0
ファイル: kinect.py プロジェクト: choochootrain/kinect-runner
  def update(self):
    depth, timestamp = freenect.sync_get_depth()
    depth = 255 * np.logical_and(depth >= self.current_depth - self.threshold,
                                 depth <= self.current_depth + self.threshold)
    depth = depth.astype(np.uint8)

    image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]), cv.IPL_DEPTH_8U, 1)
    cv.SetData(image, depth.tostring(), depth.dtype.itemsize * depth.shape[1])

    cropped = cv.GetSubRect(image, (200, 75, 240, 200))

    features = cv.GoodFeaturesToTrack(cropped, None, None, 7, 0.01, 10, None, 3, 0, 0.04)

    #shitty not python code
    i = 0
    x = 0
    y = 0
    for (a,b) in features:
      i += 1
      x += a
      y += b

    if i != 0:
      if abs(x-self.lx) < self.jitter or x == 0:
        x = self.lx
      if abs(y-self.ly) < self.jitter or y == 0:
        y = self.ly

    self.lx = x
    self.ly = y

    return cropped
コード例 #19
0
def actual_width_in_mm(lb, lt, rb, rt, cxr, cxl):
    a = freenect.sync_get_depth(format=freenect.DEPTH_MM)[0]
    a = a / 30.0
    a = a.astype(np.uint8)
    ret, mask = cv2.threshold(a, 1, 255, cv2.THRESH_BINARY_INV)
    ad = a + mask
    pts1 = np.float32([[lt[0] - 30, lt[1]], [lt[0], lt[1]],
                       [lb[0] - 30, lb[1]], [lb[0], lb[1]]])
    pts2 = np.float32([[0, 0], [30, 0], [0, lb[1] - lt[1]],
                       [30, lb[1] - lt[1]]])
    M = cv2.getPerspectiveTransform(pts1, pts2)
    dst = cv2.warpPerspective(ad, M, (30, lb[1] - lt[1]))
    left_depth = np.amin(dst) * 30
    pts1 = np.float32([[rt[0], rt[1]], [rt[0] + 30, rt[1]], [rb[0], rb[1]],
                       [rb[0] + 30, rb[1]]])
    pts2 = np.float32([[0, 0], [30, 0], [0, rb[1] - rt[1]],
                       [30, rb[1] - rt[1]]])
    M = cv2.getPerspectiveTransform(pts1, pts2)
    dst = cv2.warpPerspective(ad, M, (30, rb[1] - rt[1]))
    right_depth = np.amin(dst) * 30
    pixel_width = cxr - cxl
    angle = (pixel_width / 640.0) * (57 / 180) * (math.pi)
    width = (left_depth * left_depth) + (right_depth * right_depth) - (
        2 * left_depth * right_depth * math.cos(angle))
    width = math.sqrt(width)
def read_frame() -> Tuple[bool, np.ndarray]:
    frame, timestamp = freenect.sync_get_depth()
    if frame is None:
        return False, None
    frame = np.clip(frame, 0, 2**10 - 1)
    frame >>= 2
    return True, frame.astype(np.uint8)
コード例 #21
0
def mark_closest():
    depth, timestamp = freenect.sync_get_depth()

    loc = np.unravel_index(depth.argmax(), depth.shape)
    loc = tuple(loc)
    depth[loc[0] - 5:loc[0] + 5, loc[1] - 5:loc[1] + 5] = [0, 255, 0]
    cv2.imshow('Depth', rgb_to_bgr(depth))
コード例 #22
0
ファイル: methods.py プロジェクト: r2apu/Kinect-Instrument
def show_ent_depth():
	depth = freenect.sync_get_depth()[0]
	depth = depth.astype(np.uint8)
	
	#~ print "depth "+str(np.shape(depth))
	
	return depth
コード例 #23
0
ファイル: kinectviewer.py プロジェクト: aidanok/kivy
 def run(self):
     q = self.queue
     while not self.quit:
         depths = freenect.sync_get_depth()
         if depths is None:
             continue
         q.appendleft(depths)
コード例 #24
0
def get_depth(flag):
    global framesd, framesr
    loopnum = 0
    i = 0
    #flag = 0
    while (True):
        print(loopnum)
        depth, _ = freenect.sync_get_depth(
        )  #depth is a numpy array which stores the depth value of each pixel captured
        rgbframes, _ = freenect.sync_get_video(
        )  #rgbframes is a numpy array which stores rgb value of each pixel captured
        rgbframes = cv2.cvtColor(rgbframes, cv2.COLOR_RGB2BGR)
        #print(depth)
        depth_mask = np.where(depth < 650, 255, 0).astype(np.uint8)
        #cv2.imshow('Segmented Image', depth_mask)
        cv2.imshow('RGB', rgbframes)
        for i in range(depth_mask.shape[0]):
            for j in range(depth_mask.shape[1]):
                if depth_mask[i][j] == 255:
                    flag = 1
                    cv2.waitKey(100)
                    break
            if flag == 1:
                break
        if flag == 1:
            framesd.append(depth_mask)
            framesr.append(rgbframes)
            loopnum = loopnum + 1
            cv2.waitKey(100)
            if (loopnum == 20):
                break
    print('$$$$$')
コード例 #25
0
    def get_data(self):
        while True:
            (depth, _) = freenect.sync_get_depth()
            (rgb  , _) = freenect.sync_get_video()

            depth8 = self._pretty_depth(depth)
            yield depth, depth8, rgb
コード例 #26
0
    def getDepthMap(self):
        depth, timestamp = freenect.sync_get_depth()

        np.clip(depth, 0, 2**10 - 1, depth)
        depth >>= 2
        depth = depth.astype(np.uint8)
        return depth
コード例 #27
0
 def getFrame(self):
     self.when = time.time()
     depth,_ = freenect.sync_get_depth()
     rgb,_ = freenect.sync_get_video()
     # Retain a copy otherwise we crash later
     self.depth = numpy.copy(depth)
     self.rgb = numpy.copy(rgb)
コード例 #28
0
 def frame(self):
     """ Grab a frame and set it as self._frame.
     Modified by Graham Jones to use libFreenect for kinect depth sensor
     """
     if self._enteredFrame and self._frame is None:
         if (self._uselibFreenect):
             if (self.channel == depth.CV_CAP_OPENNI_BGR_IMAGE):
                 imgRGB, timestap = freenect.sync_get_video()
                 imgBGR = imgRGB  # Create new image by copying original.
                 filters.bgr2rgb(imgRGB, imgBGR)
                 self._frame = imgBGR
             elif (self.channel == depth.CV_CAP_OPENNI_DEPTH_MAP):
                 depthMap, timestamp = freenect.sync_get_depth()
                 #depthMap = depthMap.astype(numpy.uint16)
                 depthMap = depthMap.astype(numpy.uint8)
                 self._frame = depthMap
             else:
                 print "Error - Unrecognised channel %d." % self.channel
                 self._frame = None
         else:
             retVal, self._frame = self._capture.retrieve(
                 channel=self.channel)
             self._nFrames = self._nFrames + 1
             #print retVal, type(self._frame),
             # self._frame.size, frameCount, self._nFrames
             self._frame = cv2.cvtColor(self._frame, cv2.COLOR_BGR2GRAY)
             #self._frame = self._frame.astype(numpy.uint8)
     else:
         pass
         #print self._enteredFrame, self._frame
     return self._frame
コード例 #29
0
def get_img(mode):
    # This was intended to inhibit the stream warnings to stdout, but did not work.
    #text_trap = io.StringIO()
    #sys.stderr = text_trap
    if (mode == IMG_RGB):
        frame = freenect.sync_get_video()[0]  # gets the Kinect RGB image
        fgMask = backSub.apply(frame, learningRate=-1)
        ret, fgMask = cv2.threshold(fgMask, 127, 255, cv2.THRESH_BINARY)
        #fgMask = cv2.erode(fgMask, kernel, iterations = 1) # morphological erode with 3x3
        fgMask = cv2.morphologyEx(
            fgMask, cv2.MORPH_CLOSE,
            kernel_big)  # closes gaps small than 9x9 pixels
    elif (mode == IMG_DEPTH):
        frame = freenect.sync_get_depth()[0]  # gets the Kinect depth image
        frame = 255 * np.logical_and(frame >= DEPTH - THRESHOLD,
                                     frame <= DEPTH + THRESHOLD)
        frame = frame.astype(np.uint8)
        fgMask = backSub_depth.apply(frame, learningRate=-1)
        ret, fgMask = cv2.threshold(fgMask, 127, 255, cv2.THRESH_BINARY)
        fgMask = cv2.erode(fgMask, kernel,
                           iterations=1)  # morphological erode with 3x3
        fgMask = cv2.morphologyEx(
            fgMask, cv2.MORPH_CLOSE,
            kernel_big)  # closes gaps small than 9x9 pixels

    # Problem: this function gives us sometimes only one blob instead of two
    ret, labels, stats, centroids = cv2.connectedComponentsWithStats(fgMask)
    # This didn't seem to work:
    #for label in range(np.max(labels)):
    #    if 2000 < stats[label, cv2.CC_STAT_AREA] < 900:
    #       labels[labels==label]=0

    # Reset output to stdout:
    #sys.stderr = sys.__stderr__
    return ret, frame, fgMask, labels, stats
コード例 #30
0
ファイル: kinect.py プロジェクト: zli91/armlab-F18
    def __init__(self):
        self.currentVideoFrame = np.array([])
        self.currentDepthFrame = np.array([])
        self.convert_to_world = np.array([])
        self.convert_to_cam = np.array([])
        self.cubeContours = np.array([])
        self.contoursByDepth = np.array([])
        if (freenect.sync_get_depth() == None):
            self.kinectConnected = False
        else:
            self.kinectConnected = True

        # mouse clicks & calibration variables
        self.depth2rgb_affine = np.float32([[1, 0, 0], [0, 1, 0]])
        self.kinectCalibrated = False
        self.last_click = np.array([0, 0])
        self.new_click = False
        self.rgb_click_points = np.zeros((5, 2), int)
        self.depth_click_points = np.zeros((5, 2), int)
        self.worldHeight = 942.0
        self.x_off = 304.88  # distances from center of the bottom of ReArm to world origin
        self.y_off = 301.5

        self.blockDetected = False

        self.blockMessage = False

        self.detectDepth = []
        self.detectedCubeColor = []
        self.cubeOrient = []
        """ Extra arrays for colormaping the depth image"""
        self.DepthHSV = np.zeros((480, 640, 3)).astype(np.uint8)
        self.DepthCM = np.array([])
        """ block info """
        self.block_contours = np.array([])
コード例 #31
0
def get_depth():
    #data = frame_convert2.pretty_depth_cv(freenect.sync_get_depth()[0])
    data = freenect.sync_get_depth()[0]
    data = data.astype(np.uint8)
    img = cv2.imencode('.JPEG', data)[1].tostring()
    enData = base64.b64encode(img).decode('UTF-8')
    return enData
コード例 #32
0
ファイル: depthmap.py プロジェクト: jackromo/gestures
def getDepthMap():
    depth, timestamp = freenect.sync_get_depth()
    # Decrease all values in depth map to within 8 bits to be uint8
    depth = np.clip(depth, 0, 2**10 - 1)
    depth >>= 2
    depth = cv2.GaussianBlur(depth, (5,5), 0)
    return depth.astype(np.uint8)
コード例 #33
0
ファイル: kinect.py プロジェクト: torshepherd/armlab-f19
    def __init__(self):
        self.currentVideoFrame = np.array([])
        self.currentDepthFrame = np.array([])
        self.currentDepthMeter = np.array([])
        if (freenect.sync_get_depth() == None):
            self.kinectConnected = False
        else:
            self.kinectConnected = True

        # mouse clicks & calibration variables
        self.depth2rgb_affine = np.float32([[1, 0, 0], [0, 1, 0]])
        self.kinectCalibrated = False
        self.last_click = np.array([0, 0])
        self.new_click = False
        self.rgb_click_points = np.zeros((5, 2), int)
        self.depth_click_points = np.zeros((5, 2), int)
        self.calib_mat, self.dist_coeff = self.loadCameraCalibration(
            'calibration.cfg')
        self.h = 480
        self.w = 640
        self.fixed_coords = np.float32([[304, 304], [304, -304], [-304, -304],
                                        [-304, 304]])
        self.extrinsic = np.float32([[1, 0, 0], [0, 1, 0]])
        # x: 60.7; y: 60.8
        """ Extra arrays for colormaping the depth image"""
        self.DepthHSV = np.zeros((480, 640, 3)).astype(np.uint8)
        self.DepthCM = np.array([])
        """ block info """
        self.block_contours = np.array([])
コード例 #34
0
ファイル: demo_cv_reduce.py プロジェクト: pkropf/exuro
def show_depth():
    global threshold
    global current_depth

    depth, timestamp = freenect.sync_get_depth()
    depth = 255 * np.logical_and(depth >= current_depth - threshold, depth <= current_depth + threshold)
    depth = depth.astype(np.uint8)
    image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]), cv.IPL_DEPTH_8U, 1)
    cv.SetData(image, depth.tostring(), depth.dtype.itemsize * depth.shape[1])
    cv.ShowImage("Depth", image)

    print "depth:", type(depth), dir(depth)
    print ""

    print "image:", type(image), dir(image)
    print ""

    x = cv.CreateMat(image.height, image.width, cv.IPL_DEPTH_8U)
    cv.Convert(image, x)

    smaller = cv.CreateMat(image.height / 10, image.width / 10, cv.IPL_DEPTH_8U)
    print "smaller:", type(smaller), dir(smaller)
    print ""

    cv.Resize(image, smaller)
    cv.ShowImage("Reduced", smaller)
コード例 #35
0
	def get_depth(self):
		if self.depth == 'KINECT':
			frame, timestamp = freenect.sync_get_depth()
			return frame
		else:
			ret, img = self.depth.read()
			return img
コード例 #36
0
	def _acquire_frame(self):
		frame, _ = freenect.sync_get_depth()
	   # return success if frame size is valid
		if frame is not None:
			return (True, frame)
		else:
			return (False, frame)
コード例 #37
0
def getDepthMat():

    depth, timestamp = freenect.sync_get_depth()
    depth = cv2.resize(depth, (360, 240))

    #depth = depth * np.logical_and(depth > 500, depth < 1024)
    #depth=depth*0.2480544747081712

    np.clip(depth, 0, 2**10 - 1, depth)
    depth >>= 2
    depth = depth.astype(np.uint8)

    #depth = depth.astype(np.uint8)
    #edges = cv2.Canny(depth, threshold1=100, threshold2=100)

    depth = cv2.medianBlur(depth, 5)
    #Pdepth = cv2.bilateralFilter(depth,9,75,75)
    #frame=depth
    #

    #cv2.imshow('Canny',edges)
    #cv2.imshow('Original',depth)
    #cv2.imshow('laplacian',laplacian)

    return np.array(depth)
コード例 #38
0
ファイル: kinect.py プロジェクト: PascalLeMerrer/Bug-Arena
def get_buffers():
    '''get_buffers(): returns a KinectData object
    KinectData members:
     - real_kinect (boolean) (true if data comes fro ma real kinect)
     - rgb array
     - depth array

     (buffers=numpy array)

     the input is taken from a file if the kinect is missing or the library not present.
     no memorization is done
     '''
    found_kinect = False

    if freenect: # module has been imported
        try:
            # Try to obtain Kinect images.
            (depth, _), (rgb, _) = freenect.sync_get_depth(), freenect.sync_get_video()
            found_kinect = True
        except TypeError:
            pass

    if found_kinect:
        return KinectData(real_kinect=True, rgb=rgb, depth=depth)
    else:
        # Use local data files.
        return _DEFAULT_DATA
コード例 #39
0
 def do_create(self, offset, length):
     depth, timestamp = freenect.sync_get_depth()
     databuf = numpy.getbuffer(depth)
     self.buf = gst.Buffer(databuf)
     self.buf.timestamp = 0
     self.buf.duration = pow(2, 63) -1
     return gst.FLOW_OK, self.buf
コード例 #40
0
 def frame(self):
     """ Grab a frame and set it as self._frame.
     Modified by Graham Jones to use libFreenect for kinect depth sensor
     """
     if self._enteredFrame and self._frame is None:
         if (self._uselibFreenect):
             if (self.channel == depth.CV_CAP_OPENNI_BGR_IMAGE):
                 imgRGB, timestap = freenect.sync_get_video()
                 imgBGR = imgRGB # Create new image by copying original.
                 filters.bgr2rgb(imgRGB,imgBGR)
                 self._frame = imgBGR
             elif (self.channel == depth.CV_CAP_OPENNI_DEPTH_MAP):
                 depthMap, timestamp = freenect.sync_get_depth()
                 #depthMap = depthMap.astype(numpy.uint16)
                 depthMap = depthMap.astype(numpy.uint8)
                 self._frame = depthMap
             else:
                 print "Error - Unrecognised channel %d." % self.channel
                 self._frame = None
         else:
             retVal, self._frame = self._capture.retrieve(channel = self.channel)
             self._nFrames = self._nFrames + 1
             #print retVal, type(self._frame), 
             # self._frame.size, frameCount, self._nFrames
             self._frame = cv2.cvtColor(self._frame, cv2.COLOR_BGR2GRAY)
             #self._frame = self._frame.astype(numpy.uint8)
     else:
         pass
         #print self._enteredFrame, self._frame
     return self._frame
コード例 #41
0
    def get_depth(self):
        """
        **SUMMARY**

        This method returns the Kinect depth image.

        **RETURNS**

        The Kinect's depth camera image as a grayscale image.

        **EXAMPLE**

        >>> k = Kinect()
        >>> while True:
        ...     d = k.get_depth()
        ...     img = k.get_image()
        ...     result = img.side_by_side(d)
        ...     result.show()
        """

        if not FREENECT_ENABLED:
            logger.warning("You don't seem to have the freenect library "
                           "installed. This will make it hard to use "
                           "a Kinect.")
            return

        depth = freenect.sync_get_depth(self.device_number)[0]
        self.capture_time = time.time()
        np.clip(depth, 0, 2 ** 10 - 1, depth)
        depth >>= 2
        depth = depth.astype(np.uint8).transpose()

        return Factory.Image(depth, camera=self)
コード例 #42
0
ファイル: test.py プロジェクト: pepe-roni/codedaysfwinter2016
def update():
    global projpts, rgb, depth
    global hue,sat,val
    global imgHsv,imgHue,imgSat,imgVal
    range2=25.0
    depth,_ = freenect.sync_get_depth()
    rgb,_ = freenect.sync_get_video()
    #convert numpy to opencv image
    img = cv.fromarray(rgb)

    #MAIN IMAGE PROCESSING WORK IN OPENCV HERE
    cv.CvtColor(img, imgHsv, cv.CV_BGR2HSV)
    cv.Split(imgHsv, imgHue, imgSat, imgVal, None)
    cv.InRangeS(imgHue,(hue-range2, 0, 0),(hue+range2, 0, 0),imgHue)
    cv.InRangeS(imgSat,(sat, 0, 0),(255, 0, 0),imgSat)
    cv.InRangeS(imgVal,(val, 0, 0),(255, 0, 0),imgVal)
    cv.And(imgHue,imgSat,imgBin)
    cv.And(imgBin,imgVal,imgBin)
    cv.Erode(imgBin,imgBin,None)
    cv.Dilate(imgBin,imgBin,None)
    cv.CvtColor(imgBin, imgOut, cv.CV_GRAY2BGR)
    cv.ShowImage("Binary",imgOut)
    #FINISH IMAGE PROCESSING
    #return to numpy array
    rgb = np.asarray(imgOut)
    q = depth.astype(np.uint16)
    X,Y = np.meshgrid(range(IMGWIDTH),range(IMGHEIGHT))
    d = 1
    projpts = calib.depth2xyzuv(q[::d,::d],X[::d,::d],Y[::d,::d])
コード例 #43
0
ファイル: test.py プロジェクト: pepe-roni/codedaysfwinter2016
    def run ( self ):
        #set up opencv windows
        cv.NamedWindow("Camera", 1)
        cv.NamedWindow("Binary", 1)
        cv.NamedWindow("Settings", 1)

        #set up sliders
        cv.CreateTrackbar("Hue", "Settings", hue, 180, on_hueTrackbar)
        cv.CreateTrackbar("Sat", "Settings", sat, 255, on_satTrackbar)
        cv.CreateTrackbar("Val", "Settings", val, 255, on_valTrackbar)

        #run a blocking while loop to capture depth and rgb to opencv window
        while 1:
            #pull in camera data
            (depth,_),(rgb,_)=freenect.sync_get_depth(),freenect.sync_get_video()
            depth=depth.astype(np.uint8)

            h1, w1 = depth.shape[:2]
            h2, w2 = rgb.shape[:2]
            maxHeight= max(h1,h2)
            vis = np.zeros((maxHeight, w1+w2), np.uint8)
            vis2 = np.zeros((h2,w2), np.uint8)
            cv.CvtColor(cv.fromarray(rgb), cv.fromarray(vis2), cv.CV_BGR2GRAY)

            #display in a single window
            vis[:maxHeight, :w1] = depth
            vis[:maxHeight, w1:w1+w2] = vis2
            cv.ShowImage("Camera",cv.fromarray(vis))
            cv.WaitKey(100)
コード例 #44
0
ファイル: main.py プロジェクト: nmagerko/pose
def GenerateCloud():
    global clouds
    cloudCount = len(clouds)
    (depth,_) = freenect.sync_get_depth()
    # Collects 4800 points/frame
    worldCoordinates = np.arange(14400, dtype = np.float64).reshape(60, 80, 3)
    for i in range(0, 480, 8):
        for j in range(0, 640, 8):
            depthValue = depth[i,j]
            if depthValue < 2047:
                # if the depth value is small enough, convert it
                # to depth in meters
                values = DepthToWorld(i, j, depthValue)
                worldCoordinates[i/8, j/8, 0] = values[0]
                worldCoordinates[i/8, j/8, 1] = values[1]
                worldCoordinates[i/8, j/8, 2] = values[2]
            else:
                # otherwise, assign the value to zero
                worldCoordinates[i/8, j/8, 0] = 0
                worldCoordinates[i/8, j/8, 1] = 0
                worldCoordinates[i/8, j/8, 2] = 0
    cloud = []
    for row in worldCoordinates:
        for point in row:
            if str(point) != "[ 0.  0.  0.]":
                cloud.append([point[0],point[1],point[2]])
    if cloudCount < 1:
        clouds.append(cloud)
    else:
        icp = get_icp_transform(cloud, clouds[cloudCount -1])
        print icp[1][:, 0:3]
        clouds.append(icp[1][:, 0:3])
コード例 #45
0
 def isOpened(self):
     data = freenect.sync_get_depth()
     if data is None:
         self.isopen = False
         return False
     else:
         return True
         self.isopen = True
コード例 #46
0
def capture_images(size):
    """Capture images from the color and depth streams, and
    return them as arrays."""
    w,h = size    
    frame_data, ts = freenect.sync_get_depth(format=freenect.DEPTH_MM)
    # note: image still seems to be rotated 90 degrees after this
    depth_image = frame_data.reshape((w,h)).T 
    return depth_image    
コード例 #47
0
def grayscale():
    maske = np.zeros((480,640,3))
    a = freenect.sync_get_depth()[0]
    np.clip(a, 0, 2**10 - 1, a)
    a >>= 2
    a = a.astype(np.uint8)
    median = cv2.medianBlur(a,5)
    return median
コード例 #48
0
def get_depth():
    #saving as 11 bit for csv file
    #frame = freenect.sync_get_depth()[0]
    #good for displaying and saving depth image
    frame = frame_convert2.pretty_depth_cv(freenect.sync_get_depth()[0])
    #print('\nframeDepth =\n')
    #print(frame)
    return frame
コード例 #49
0
 def run(self):
     q = self.queue
     while not self.quit:
         depths = freenect.sync_get_depth(index=self.index)
         if depths is None:
             sleep(2)
             continue
         q.appendleft(depths)
コード例 #50
0
def getDepthMap():	
    depth, timestamp = freenect.sync_get_depth()
 
    np.clip(depth, 0, 2 ** 10 - 1, depth)
    depth >>= 2
    depth = depth.astype(np.uint8)
 
    return depth
コード例 #51
0
 def _acquire_frame(self):
     """Acquire frame from depth sensor using freenect library"""
     frame, _ = freenect.sync_get_depth()
     # return success if frame size is valid
     if frame is not None:
         return (True, frame)
     else:
         return (False, frame)
コード例 #52
0
ファイル: main.py プロジェクト: mcoenca/interactive-fluid
def getDepthMap():
    depth, timestamp = freenect.sync_get_depth()
    depth = 255 * np.logical_and(depth > lowerDepth, depth < upperDepth)
    # np.clip(depth, 0, 2**10 - 1, depth)
    # depth >>= 2
    depth = depth.astype(np.uint8)

    return depth
コード例 #53
0
 def run(self):
     q = self.queue
     while not self.quit:
         depths = freenect.sync_get_depth(index=self.index)
         if depths is None:
             sleep(2)
             continue
         q.appendleft(depths)
コード例 #54
0
 def sync_get_depth_frame(self):
     depth, timestamp = freenect.sync_get_depth()
     depth = self.pretty_depth(depth)
     image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]),
                                  cv.IPL_DEPTH_8U, 1)
     cv.SetData(image, depth.tostring(),
                depth.dtype.itemsize * depth.shape[1])
     return image
コード例 #55
0
ファイル: depth.py プロジェクト: sdpyljz/sandbox-fm
def depth_images():
    """generate depth images"""
    while True:
        depth, _ = freenect.sync_get_depth()
        depth = np.ma.masked_equal(depth, (2**11) - 1)

        # return as double because we might compute something with it
        yield depth.astype('double')
コード例 #56
0
def sample_bgnd(n):
    """
    Take frames from the kinect to use as the background. Argument n
    corresponds to number of frames to be averaged as background. Currently
    averaging hurts more than it helps.
    """
    depth, _ = freenect.sync_get_depth()
    #bgnd = uniform_filter(depth, mode='constant')
    bgnd = depth
    
    for _ in range(n - 1):
        depth, _ = freenect.sync_get_depth()       
        #bgnd += uniform_filter(depth, mode='constant')
        bgnd += depth
    bgnd = np.around(bgnd / float(n)).astype(np.int)
    print("Got Background")
    return bgnd
コード例 #57
0
ファイル: chapter2.py プロジェクト: EJHortala/books-2
 def _acquire_frame(self):
     """Acquire frame from depth sensor using freenect library"""
     frame, _ = freenect.sync_get_depth()
     # return success if frame size is valid
     if frame is not None:
         return (True, frame)
     else:
         return (False, frame)
コード例 #58
0
def sample_bgnd(n):
    """
    Take frames from the kinect to use as the background. Argument n
    corresponds to number of frames to be averaged as background. Currently
    averaging hurts more than it helps.
    """
    depth, _ = freenect.sync_get_depth()
    #bgnd = uniform_filter(depth, mode='constant')
    bgnd = depth

    for _ in range(n - 1):
        depth, _ = freenect.sync_get_depth()
        #bgnd += uniform_filter(depth, mode='constant')
        bgnd += depth
    bgnd = np.around(bgnd / float(n)).astype(np.int)
    print("Got Background")
    return bgnd
コード例 #59
0
ファイル: depth.py プロジェクト: ArthurAllshire/vision-2015
    def get_depth(self):
        array, _ = freenect.sync_get_depth()

        array = np.multiply(array, 255.0 / 2048)
        max = np.max(array)
        min = np.min(array)
        array = array.astype(np.uint8)
        return array