def snap_and_save(args, frames=None):
    if args.photo_mode == args.motion_mode and frames:
        snap = np.vstack(frames[1:][::-1])
        yoff = 930
    else:
        yoff = 450
        if args.photo_mode == 'rgb':
            rgb = freenect.sync_get_video()[0]
            snap = cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB) 
        else:
            gray = freenect.sync_get_video(0, freenect.VIDEO_IR_8BIT)[0]
            snap = cv2.equalizeHist(gray)

    fname = "snapshots/{0}.jpg".format(str(int(time.time() * 10)))
    
    dt_str = datetime.strftime(
        datetime.now() + timedelta(seconds=-5*3600), 
        '%Y-%m-%d %H:%M:%S')

    cv2.putText(snap, dt_str, (30, yoff), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
        (0, 0, 255), 2)

    cv2.imwrite(fname, snap, [int(cv2.IMWRITE_JPEG_QUALITY), 70])

    return fname
Ejemplo n.º 2
0
def captureimage():
  global frames

  depthframes = np.zeros((frames, rownum, colnum))
  rgbframes = np.zeros((frames, rownum, colnum, 3))

  for i in range(frames):
    depthframes[i] = freenect.sync_get_depth()[0]
    rgbframes[i] = freenect.sync_get_video()[0]
    arargb = freenect.sync_get_video()[0]
    time.sleep(0.05)

  arargb   = fc.robustavg(rgbframes)
  aradepth = fc.robustavg(depthframes)
  serial = time.time()

  cv.SaveImage('img/depth%d.png' % serial, fc.depth_cv(aradepth.astype(int)))
  cv.SaveImage('img/video%d.png' % serial, fc.video_cv(arargb.astype(np.uint8)))
  #f = open('poly/poly%d.ply' % serial,'w')
  
    
  meterdepth = fc.meter_depth(aradepth)
  #newrgb2 = fc.matchrgb2(meterdepth, arargb)
  newrgb = fc.matchrgb(meterdepth, arargb)
  
  #meterdepth = ndi.gaussian_filter(fc.meter_depth(aradepth), [sigma, sigma])
  
  meterdepth[meterdepth > 1.5] = -1.
  meterdepth[meterdepth < 0.5] = -1.
  scipy.io.savemat('data/aligned%d.mat' % serial, {'depth':meterdepth, 'rgb':newrgb})
 def get_video(self):
     if len(freenect.sync_get_video()) > 0:
         rgb = freenect.sync_get_video()[0]
         rgb = rgb[:, :, ::-1]  # RGB -> BGR
         return rgb.astype(numpy.int8)
         #return frame_convert.video_cv(freenect.sync_get_video()[0])
     return None
Ejemplo n.º 4
0
def main():
    # The range where we allow before we consider 
    # movement left or right.
    # Note, look at a normal distribution, we're
    # talking about allowing a range for how much
    # movement we allowing before we we start
    # to say whether there was movement to the
    # left or to the right.
    alpha = 0
    
    # Get previous image, resize it, convert it to grayscale.
    prevgray,_ = freenect.sync_get_video()
    prevgray = cv2.cvtColor(prevgray, cv2.COLOR_BGR2GRAY)
    prevgray = cv2.resize(prevgray, size)
    
    while True:
        # Get new image, resize it, convert it to grayscale.
        gray,_ = freenect.sync_get_video()
        gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
        gray = cv2.resize(gray, size)
        
        # Calculate the optical flow using this handy built in method.
        flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
        
        # Set the newly gathered image to the previous one.
        prevgray = gray
        
        # Set the steps for do.
        step = 16
        
        # Draw the dots and lines on the screen.
        h, w = gray.shape[:2]
        y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2, -1).astype(int)
        fx, fy = flow[y,x].T
        
        # Get the average amount of displacement within the screen
        # with positive being left and negative being right.
        fx_avg = np.average(fx)
        
        # Keep it straight!
        if ((fx_avg > alpha)):
            print('Moved right')
            #driveLeft()
        elif ((fx_avg < -alpha)):
            print('Moved left')
            #driveRight()
        else:
            print('Looking straight')
Ejemplo n.º 5
0
def show_video():
    video, timestamp = freenect.sync_get_video()
    #save_video(timestamp, video)
    video = frame_convert.video_cv(video)
    cv.Circle(video, (closest[1], closest[0]), 8, (0, 0, 255))
    cv.Circle(video, (closest[1], closest[0]), 4, (0, 0, 255))
    cv.ShowImage('Video', video)
def get_kinect_video():    
    if not kinect == None:
        return get_kinect_video_cv()
    depth, timestamp = freenect.sync_get_video()  
    if (depth == None):
        return None
    return depth[...,1]
Ejemplo n.º 7
0
	def get_ir(self):
		if self.ir == 'KINECT':
			frame, timestamp = freenect.sync_get_video(freenect.VIDEO_IR_10BIT)
			return frame
		else:
			ret, img = self.ir.read()
			return img
Ejemplo n.º 8
0
def get_buffers():
    '''get_buffers(): returns a KinectData object
    KinectData members:
     - real_kinect (boolean) (true if data comes fro ma real kinect)
     - rgb array
     - depth array

     (buffers=numpy array)

     the input is taken from a file if the kinect is missing or the library not present.
     no memorization is done
     '''
    found_kinect = False

    if freenect: # module has been imported
        try:
            # Try to obtain Kinect images.
            (depth, _), (rgb, _) = freenect.sync_get_depth(), freenect.sync_get_video()
            found_kinect = True
        except TypeError:
            pass

    if found_kinect:
        return KinectData(real_kinect=True, rgb=rgb, depth=depth)
    else:
        # Use local data files.
        return _DEFAULT_DATA
Ejemplo n.º 9
0
    def get_data(self):
        while True:
            (depth, _) = freenect.sync_get_depth()
            (rgb  , _) = freenect.sync_get_video()

            depth8 = self._pretty_depth(depth)
            yield depth, depth8, rgb
Ejemplo n.º 10
0
 def frame(self):
     """ Grab a frame and set it as self._frame.
     Modified by Graham Jones to use libFreenect for kinect depth sensor
     """
     if self._enteredFrame and self._frame is None:
         if (self._uselibFreenect):
             if (self.channel == depth.CV_CAP_OPENNI_BGR_IMAGE):
                 imgRGB, timestap = freenect.sync_get_video()
                 imgBGR = imgRGB # Create new image by copying original.
                 filters.bgr2rgb(imgRGB,imgBGR)
                 self._frame = imgBGR
             elif (self.channel == depth.CV_CAP_OPENNI_DEPTH_MAP):
                 depthMap, timestamp = freenect.sync_get_depth()
                 #depthMap = depthMap.astype(numpy.uint16)
                 depthMap = depthMap.astype(numpy.uint8)
                 self._frame = depthMap
             else:
                 print "Error - Unrecognised channel %d." % self.channel
                 self._frame = None
         else:
             retVal, self._frame = self._capture.retrieve(channel = self.channel)
             self._nFrames = self._nFrames + 1
             #print retVal, type(self._frame), 
             # self._frame.size, frameCount, self._nFrames
             self._frame = cv2.cvtColor(self._frame, cv2.COLOR_BGR2GRAY)
             #self._frame = self._frame.astype(numpy.uint8)
     else:
         pass
         #print self._enteredFrame, self._frame
     return self._frame
Ejemplo n.º 11
0
def update():
    global projpts, rgb, depth
    global hue,sat,val
    global imgHsv,imgHue,imgSat,imgVal
    range2=25.0
    depth,_ = freenect.sync_get_depth()
    rgb,_ = freenect.sync_get_video()
    #convert numpy to opencv image
    img = cv.fromarray(rgb)

    #MAIN IMAGE PROCESSING WORK IN OPENCV HERE
    cv.CvtColor(img, imgHsv, cv.CV_BGR2HSV)
    cv.Split(imgHsv, imgHue, imgSat, imgVal, None)
    cv.InRangeS(imgHue,(hue-range2, 0, 0),(hue+range2, 0, 0),imgHue)
    cv.InRangeS(imgSat,(sat, 0, 0),(255, 0, 0),imgSat)
    cv.InRangeS(imgVal,(val, 0, 0),(255, 0, 0),imgVal)
    cv.And(imgHue,imgSat,imgBin)
    cv.And(imgBin,imgVal,imgBin)
    cv.Erode(imgBin,imgBin,None)
    cv.Dilate(imgBin,imgBin,None)
    cv.CvtColor(imgBin, imgOut, cv.CV_GRAY2BGR)
    cv.ShowImage("Binary",imgOut)
    #FINISH IMAGE PROCESSING
    #return to numpy array
    rgb = np.asarray(imgOut)
    q = depth.astype(np.uint16)
    X,Y = np.meshgrid(range(IMGWIDTH),range(IMGHEIGHT))
    d = 1
    projpts = calib.depth2xyzuv(q[::d,::d],X[::d,::d],Y[::d,::d])
Ejemplo n.º 12
0
    def run ( self ):
        #set up opencv windows
        cv.NamedWindow("Camera", 1)
        cv.NamedWindow("Binary", 1)
        cv.NamedWindow("Settings", 1)

        #set up sliders
        cv.CreateTrackbar("Hue", "Settings", hue, 180, on_hueTrackbar)
        cv.CreateTrackbar("Sat", "Settings", sat, 255, on_satTrackbar)
        cv.CreateTrackbar("Val", "Settings", val, 255, on_valTrackbar)

        #run a blocking while loop to capture depth and rgb to opencv window
        while 1:
            #pull in camera data
            (depth,_),(rgb,_)=freenect.sync_get_depth(),freenect.sync_get_video()
            depth=depth.astype(np.uint8)

            h1, w1 = depth.shape[:2]
            h2, w2 = rgb.shape[:2]
            maxHeight= max(h1,h2)
            vis = np.zeros((maxHeight, w1+w2), np.uint8)
            vis2 = np.zeros((h2,w2), np.uint8)
            cv.CvtColor(cv.fromarray(rgb), cv.fromarray(vis2), cv.CV_BGR2GRAY)

            #display in a single window
            vis[:maxHeight, :w1] = depth
            vis[:maxHeight, w1:w1+w2] = vis2
            cv.ShowImage("Camera",cv.fromarray(vis))
            cv.WaitKey(100)
Ejemplo n.º 13
0
    def controlLoop(self):
        while True:
            if self.loop_should_stop:
                break

            img, _ = freenect.sync_get_video()

            center = self.droneCenterFromImage(img)

            center = (
                int(center[0] - img.shape[1] / 2),
                int(center[1] - img.shape[0] / 2),
                1
            )

            del img

            logger.debug("Position: %s", center)

            self.position = center

            if self.isAtTarget(self.charger.getCoordinates(), 10): # If at charger, slowly charge
                self.battery_level += 0.005
                if self.battery_level > 1.0:
                    self.battery_level = 1.0
Ejemplo n.º 14
0
 def getFrame(self):
     self.when = time.time()
     depth,_ = freenect.sync_get_depth()
     rgb,_ = freenect.sync_get_video()
     # Retain a copy otherwise we crash later
     self.depth = numpy.copy(depth)
     self.rgb = numpy.copy(rgb)
Ejemplo n.º 15
0
    def get_image(self):
        """
        **SUMMARY**

        This method returns the Kinect camera image.

        **RETURNS**

        The Kinect's color camera image.

        **EXAMPLE**

        >>> k = Kinect()
        >>> while True:
        ...     k.get_image().show()

        """
        if not FREENECT_ENABLED:
            logger.warning("You don't seem to have the freenect library "
                           "installed. This will make it hard to use "
                           "a Kinect.")
            return

        video = freenect.sync_get_video(self.device_number)[0]
        self.capture_time = time.time()
        #video = video[:, :, ::-1]  # RGB -> BGR
        return Factory.Image(video.transpose([1, 0, 2]), camera=self)
Ejemplo n.º 16
0
def disp_thresh(lower, upper, show_masked_rgb=True):
  depth, timestamp = freenect.sync_get_depth()
  min_depth = depth.min()
  video,_ = freenect.sync_get_video()

  if show_masked_rgb:
    video = video.astype(np.uint8)
    depthmask = (255*np.logical_and(depth>lower,depth<upper)).reshape(480,640,1)
    depthmask = depthmask.astype(np.uint8)
    masked_video = video & depthmask
    #print reduce(lambda count, curr: curr>0 and count+1 or count,masked_video.flatten(),0)
    cv.ShowImage('RGB',frame_convert.video_cv(masked_video.reshape(480,640,3)))

  depth = 255 * np.logical_and(depth > lower, depth < upper)
  depth = depth.astype(np.uint8)
  image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]),
                               cv.IPL_DEPTH_8U,
                               1)
  cv.SetData(image, depth.tostring(),
             depth.dtype.itemsize * depth.shape[1])

  canny = doCanny(image,150.0,200.0,7)
  templates = template_match(image,template)
  smoothed = smooth(canny)
  cv.ShowImage('Depth', image)
  return depth,canny,min_depth
Ejemplo n.º 17
0
 def get_frame(self):
     image, success = freenect.sync_get_video()
     # We are using Motion JPEG, but OpenCV defaults to capture raw images,
     # so we must encode it into JPEG in order to correctly display the
     # video stream.
     ret, jpeg = cv2.imencode('.jpg', image[:, :, ::-1])
     return jpeg.tobytes()
Ejemplo n.º 18
0
def show_detector():
    image = frame_convert.video_cv(freenect.sync_get_video()[0]);

    # cascade classifiers
    face_cascade = cv2.CascadeClassifier('opencv_data/haarcascades/haarcascade_frontalface_default.xml')
    eye_cascade = cv2.CascadeClassifier('opencv_data/haarcascades/haarcascade_eye.xml')

    # convert image to grayscale to use it with classifers
    gray = cv2.cvtColor(cv2array(image), cv2.COLOR_BGR2GRAY);

    # save previous image and use copy
    img = image;

    # detect and highlight faces
    faces = face_cascade.detectMultiScale(gray, 1.3, 5);
    for (x,y,w,h) in faces:
        cv.Rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)

    # detect and highlight eyes
    eyes = eye_cascade.detectMultiScale(gray)
    for (ex,ey,ew,eh) in eyes:
        cv.Rectangle(img,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)

    # show detector window
    cv.ShowImage('Detector', img)
Ejemplo n.º 19
0
def showlive():
  global count, frames
  cv.NamedWindow('Depth')
  cv.NamedWindow('Video')
  cv.MoveWindow('Depth', 100, 100)
  cv.MoveWindow('Video', 745, 100)

  print('Press ESC in window to stop')
  print('Press Space to convert current to PLY')
  print('Press k to stop live capture')

  while 1:
      imgdepth = fc.depth_cv(freenect.sync_get_depth()[0])
      imgvideo = fc.video_cv(freenect.sync_get_video()[0])

      cv.ShowImage('Depth', imgdepth)
      cv.ShowImage('Video', imgvideo)

      inp = cv.WaitKey(100)

      if inp != -1:
        inp = chr(inp % 1048576)
        if inp == ' ': # space for capture and convert
          print 'capturing images'
          captureimage()
          print 'done capturing'
        elif inp.isdigit():
          frames = ord(inp) - ord('0')
          print 'setting the number of frames to capture to %d' % frames
        elif inp == 'k':
          break
      count = count + 1

  cv.DestroyWindow('Depth')
  cv.DestroyWindow('Video')
Ejemplo n.º 20
0
	def get_bgr(self):
		if self.bgr == 'KINECT':
			frame, timestamp = freenect.sync_get_video()
			bgr = frame[:, :, ::-1]  # RGB -> BGR
			return bgr
		else:
			ret, img = self.bgr.read()
			return img
Ejemplo n.º 21
0
def getVideo():
    video, timestamp = freenect.sync_get_video()
    
    np.clip(video, 0, 2**10 - 1, video)
#     video >>= 2
    video = video.astype(np.uint8)
    
    return video
Ejemplo n.º 22
0
def detect():
    matches = []
    img = freenect.sync_get_video()[0]
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    cups = classifier_rect.detectMultiScale(gray, 1.3, 3)
    for (x,y,w,h) in cups:
        matches.append((x,y,w,h))
    return (matches,img)
Ejemplo n.º 23
0
def get_real():
    array = freenect.sync_get_video()[0]
    
    #~ print "real "+str(np.shape(array))
    
    array = cv2.cvtColor(array,cv2.COLOR_RGB2BGR)
    #array = cv2.cvtColor(array,cv2.COLOR_RGB2GRAY)
    return array
Ejemplo n.º 24
0
def capture_pair(index, ir_img):
    
    rgb_img = freenect.sync_get_video(format=freenect.VIDEO_RGB)[0]
    detected, corners = cv.FindChessboardCorners(prepare_cv(rgb_img), (8,6))
    if not detected:
        return index
    imsave("ir%03d.png"%index, ir_img)
    imsave("rgb%03d.png"%index, rgb_img)
    return index + 1
Ejemplo n.º 25
0
def initialize():
    matrix = np.load('matrix.npz')['mtx']
    dist = np.load('dist.npz')['dist'] 
    while(1):
        ret = fiducial.find(freenect.sync_get_video()[0],matrix,dist)
        if ret[0]:
            break
    print ret[1]
    return (matrix, dist, ret[1])
Ejemplo n.º 26
0
def analyzeKinect(modelName):

	#FeatureMatrix = np.load(modelName+".npy")
	HAAR_CASCADE_PATH_FRONTAL = "haarcascade_frontalface_default.xml"
	HAAR_CASCADE_PATH_PROFILE = "haarcascade_frontalface_default.xml"
	cascadeFrontal = cv2.cv.Load(HAAR_CASCADE_PATH_FRONTAL);
	cascadeProfile = cv2.cv.Load(HAAR_CASCADE_PATH_PROFILE);
	storage = cv2.cv.CreateMemStorage()
		
	count = 0	

	start = time.time()
	prevTime = start


	dirName1 = "Real_Scenario"
	if os.path.exists(dirName1):
    		shutil.rmtree(dirName1)
	os.makedirs(dirName1)
	


	while 1:
		rgb   = freenect.sync_get_video()[0]
		depth = freenect.sync_get_depth()[0]
		bgr = cv2.cvtColor(rgb, cv2.cv.CV_RGB2BGR)	
		# elapsedTime = "%08.3f" % (time.time() - start)
		elapsedTime = "%08.3f" % (time.time())

		# process depth:
		depth >> 5
		meter = 1.0 / (depth * (-0.0030711016) + 3.3309495161)

		faces = detect_faces(rgb, cascadeFrontal, cascadeProfile, storage, rgb.shape[1], 30)
		countFaces = 0
		bgr_plot = bgr.copy()
		print len(faces)
		if len(faces)>-1:	

			

			strFile =dirName1 + os.sep + "image{0:04d}".format(count)
			cv2.imwrite(strFile+'.jpg',bgr) 



		cv2.cv.ShowImage("depth",  visual_frame_convert.pretty_depth_cv(np.uint16(depth) )); 			cv2.moveWindow('depth', 720, 0)
		cv2.imshow("bgr",bgr); cv2.moveWindow('rgb', 0, 0)

		count += 1

		curTime = time.time()	
		#print (curTime - prevTime)
		prevTime = curTime

	    	if cv2.cv.WaitKey(10) == 27:
			break
Ejemplo n.º 27
0
def get_frame():

	# Get the depth
	depth_frame, depth_timestamp = freenect.sync_get_depth()

	# Get the color image
	rgb_frame, rgb_timestamp = freenect.sync_get_video()

	return depth_frame, rgb_frame, depth_timestamp 
Ejemplo n.º 28
0
def MatchAllCapture(save, maxdist=200):
    from os.path import isfile, join
    import freenect
    from os import listdir
    import cv2
    import numpy as np
    import itertools
    import sys
    import time
    #Clear all cv windows
    #cv2.destroyAllWindows()

    #Prepare a list of different training images
    pathlarge = "TrainingImages/AxonCups/LargeCup/"
    pathmedium = "TrainingImages/AxonCups/MediumCup/"
    pathsmall = "TrainingImages/AxonCups/SmallCup/"
    pathtest = "TestImages"

    largecups = [ f for f in listdir(pathlarge) if isfile(join(pathlarge,f)) and f[0]<>"."]
    mediumcups = [ f for f in listdir(pathmedium) if isfile(join(pathmedium,f)) and f[0]<>"."]
    smallcups = [ f for f in listdir(pathsmall) if isfile(join(pathsmall,f)) and f[0]<>"."]
    testimages = [ f for f in listdir(pathtest) if isfile(join(pathtest,f)) and f[0]<>"."]

    #img = cv2.imread(str(pathtest+"/"+testimages[ImageNo]))

    img, timestamp = freenect.sync_get_video()
    depth, timestamp = freenect.sync_get_depth(format=freenect.DEPTH_REGISTERED)

    detector = cv2.FeatureDetector_create("FAST")
    descriptor = cv2.DescriptorExtractor_create("SIFT")
    skp = detector.detect(img)
    skp, sd = descriptor.compute(img, skp)
    
    KeyPointsTotalList = []
    DistsTotalList = []

    for i in largecups+mediumcups+smallcups:
        if i in largecups:
            temp = cv2.imread(str(pathlarge+"/"+i))
        elif i in mediumcups:
            temp = cv2.imread(str(pathmedium+"/"+i))
        elif i in smallcups:
            temp = cv2.imread(str(pathsmall+"/"+i))
        #print i
        KeyPointsOut = findKeyPointsDist(img,temp,skp,sd,maxdist)
        KeyPointsTotalList += KeyPointsOut[0]
        DistsTotalList += KeyPointsOut[1]

    indices = range(len(DistsTotalList))
    indices.sort(key=lambda i: DistsTotalList[i])
    DistsTotalList = [DistsTotalList[i] for i in indices]
    KeyPointsTotalList = [KeyPointsTotalList[i] for i in indices]
    img1 = img
    if save == 1:
        saveImageMappedPoints(img, KeyPointsTotalList, 1)
        
    return KeyPointsTotalList, DistsTotalList, img, depth
def show_faces():
    image = video_to_bgr(freenect.sync_get_video()[0])
    min_size = (20, 20)
    image_scale = 2
    haar_scale = 1.2
    min_neighbors = 2
    haar_flags = 0

    gray = cv.CreateImage((image.width, image.height), 8, 1)
    small_image = cv.CreateImage((cv.Round(image.width / image_scale), cv.Round(image.height / image_scale)), 8, 1)
    cv.CvtColor(image, gray, cv.CV_BGR2GRAY)
    cv.Resize(gray, small_image, cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_image, small_image)

    faces = cv.HaarDetectObjects(
        small_image,
        face_cascade,
        cv.CreateMemStorage(0),
        haar_scale,
        min_neighbors,
        haar_flags,
        min_size
    )

    if faces:
        for ((x, y, w, h), n) in faces:
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
            cv.SetImageROI(image, (pt1[0], pt1[1], pt2[0] - pt1[0], int((pt2[1] - pt1[1]) * 0.7)))

        eyes = cv.HaarDetectObjects(
            image,
            eye_cascade,
            cv.CreateMemStorage(0),
            haar_scale,
            min_neighbors,
            haar_flags,
            (15, 15)
        )

        if eyes:
            for eye in eyes:
                cv.Rectangle(
                    image,
                    (eye[0][0], eye[0][1]),
                    (eye[0][0] + eye[0][2], eye[0][1] + eye[0][3]),
                    cv.RGB(255, 0, 0),
                    1,
                    8,
                    0
                )

    cv.ResetImageROI(image)

    return image
Ejemplo n.º 30
0
def show_video():
    video = freenect.sync_get_video()[0]
    bgr = frame_convert.video_cv(video)
    video = video / 255.00
    bgra = bgra_from_depth(video, depth) 

    rgb = alpha_blend(bgra, background)
    rgb = cv.fromarray(rgb)

    cv.ShowImage('Video', rgb)
Ejemplo n.º 31
0
def get_video():
    return frame_convert.video_cv(freenect.sync_get_video()[0])
Ejemplo n.º 32
0
            )  # K-means clustering

if __name__ == '__main__':
    import sys
    import getopt
    import time

    # create main window
    cv2.namedWindow("camera", 1)

    frame_count = 0
    while True:
        ch = 0xFF & cv2.waitKey(10)
        if True:
            # convert Bayer GB to RGB for display
            rgb_frame = cv2.cvtColor(freenect.sync_get_video()[0],
                                     cv2.COLOR_RGB2BGR)
            # convert Bayer BG to Grayscale for corner detections
            grey_frame = cv2.cvtColor(rgb_frame, cv2.COLOR_BGR2GRAY)
            frames_str = "Frames for Calibration:" + str(frame_count)
            cv2.putText(rgb_frame, frames_str, (50, 50), font, 1,
                        (255, 255, 255), 2, cv2.LINE_AA)
            if ch == 32:  # ord(' ') == 32
                # find location of corners in image, this is really slow if no corners are seen.
                found, corners = cv2.findChessboardCorners(
                    grey_frame, pattern_size, None)
                if found:
                    # find sub pixel estimate for corner location
                    # Refines the corner locations, cv2.cornerSubPix(image, corners, winSize, zeroZone, criteria)
                    cv2.cornerSubPix(grey_frame, corners, (5, 5), (-1, -1),
                                     criteria)
Ejemplo n.º 33
0
    output_hsv = "H:%d, S:%d, V:%d" % (h, s, v)
    tmp = img.copy()
    cv2.putText(tmp,output_rgb, (10,20), font, 0.5, (255,255,255))
    cv2.putText(tmp,output_hsv, (10,40), font, 0.5, (255,255,255))
    cv2.imshow('window',tmp)
    if event == cv2.EVENT_LBUTTONDOWN:
        print "bgr: (%d,%d,%d) \n hsv: (%d,%d,%d)" % (b,g,r,h,s,v)


if(freenect.sync_get_depth() == None):
    kinectConnected = False
else:
    kinectConnected = True

if(kinectConnected):
    rgb_frame = freenect.sync_get_video()[0]
    
    img = cv2.cvtColor(rgb_frame, cv2.COLOR_RGB2BGR)
    hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
    kernel = np.ones((5, 5), np.uint8)
    cv2.namedWindow("window",cv2.WINDOW_AUTOSIZE)
    cv2.imshow('window', img)
    yellow_lower_range = np.array([23,200,200])
    yellow_upper_range = np.array([27,255,255])
    orange_lower_range = np.array([6,100,100])
    orange_upper_range = np.array([14,255,255])
    pink_lower_range = np.array([167, 100, 100])
    pink_upper_range = np.array([170, 200, 255])
    black_lower_range = np.array([0, 0, 0])  # a lot of the image is black so TODO find the least area and that will be the block
    black_upper_range = np.array([180, 255, 50])
    purple_lower_range = np.array([155, 90, 100])
Ejemplo n.º 34
0
def realsense_run():
    # ROS node setup
    frame_id = 'Realsense node'

    if not test:
        rospy.init_node(f'Realsense_main', anonymous=True)
        diag_obj = diag_class(frame_id=frame_id,
                              user_id=args.user_id,
                              user_name=args.user_name,
                              queue=1)
        rate = rospy.Rate(10)

    if args.classify:
        try:
            frames = cam.pipeline.wait_for_frames()
            if args.depth:
                color_image, depth_colormap, depth_image = cam.depth_frames(
                    frames)
            else:
                color_image = np.array(
                    frame_convert2.video_cv(freenect.sync_get_video()[0]))
                #color_image = cam.colour_frames(frames)

            im_classifier = classifier(args.comp_device, args.weights,
                                       args.img_size, color_image,
                                       args.conf_thres, args.iou_thres)
            if not test:
                obj_obj = obj_class(frame_id=frame_id,
                                    names=im_classifier.names,
                                    queue=1)

        except Exception as e:
            print("**Classifier Load Error**")
            traceback.print_exc(file=sys.stdout)
            if not test:
                diag_obj.publish(2, f"load classifier error: {e}")
            raise

    diag_timer = time.time()
    while (not rospy.is_shutdown()) or test:
        try:
            # Get frameset of color and depth
            frames = cam.pipeline.wait_for_frames()

            if args.depth:
                color_image, depth_colormap, depth_image = cam.depth_frames(
                    frames)
            else:
                #color_image = cam.colour_frames(frames)
                color_image = np.array(
                    frame_convert2.video_cv(freenect.sync_get_video()[0]))

            if args.classify:
                try:
                    if args.depth:
                        color_image, det = im_classifier.detect(
                            color_image, depth_image, depth_histogram=False)
                    else:
                        color_image, det = im_classifier.detect(
                            color_image, None)

                    if not test:
                        obj_obj.publish(det)
                except Exception as e:
                    print("**Classifier Detection Error**")
                    traceback.print_exc(file=sys.stdout)
                    if not test:
                        diag_obj.publish(2, f"load classifier error: {e}")

            # Remove background - Set pixels further than clipping_distance to grey
            #grey_color = 153
            #depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels
            #bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)
            #image = scale(bg_removed)

            if (time.time() - diag_timer) > 1:
                print(time.time())
                if not test:
                    diag_obj.publish(0, f"Running")
                diag_timer = time.time()

        except TypeError as e:
            time.sleep(1)
            if not test:
                diag_obj.publish(2, f"TypeError")
        except Exception as e:
            print("**Get Image Error**")
            if not test:
                diag_obj.publish(2, f"Realsense image error: {e}")
            traceback.print_exc(file=sys.stdout)
            break

        #im_screw_states, tally = im_screw_detect.detect_screws(image, args.disp)
        #im_screw_states = im_screw_states.tolist()
        if args.disp:
            if args.depth:
                disp_im = np.hstack((color_image, depth_colormap))
            else:
                disp_im = color_image

            cv2.namedWindow('Realsense viewer', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('Realsense viewer', disp_im)
            key = cv2.waitKey(1)
            # Press esc or 'q' to close the image window
            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                break

        if not test:
            rate.sleep()
        else:
            #time.sleep(1)
            pass
def show_video():
    cv.ShowImage('Video', frame_convert.video_cv(freenect.sync_get_video()[0]))
Ejemplo n.º 36
0
def get_video():
    frame = frame_convert2.video_cv(freenect.sync_get_video()[0])
    return frame
Ejemplo n.º 37
0
def get_video():
    video, _ = freenect.sync_get_video()

    return video
Ejemplo n.º 38
0
def get_video():
    array, _ = freenect.sync_get_video()
    array = cv2.cvtColor(array, cv2.COLOR_RGB2BGR)
    ret, jpeg = cv2.imencode('.jpeg', array)
    return jpeg.tostring()
Ejemplo n.º 39
0
def get_img(mode):
    # This was intended to inhibit the stream warnings to the console, but it did not work:
    #text_trap = io.StringIO()
    #sys.stderr = text_trap
    if (mode == IMG_RGB):
        # gets one frame from the RGB camera
        frame = freenect.sync_get_video()[0]

        #frame = clahe.apply(frame)

        #background subsraction
        #Learning Rate Parameter / -1 auto, 0 not updated at all, 1 new from last frame
        fgMask = backSub.apply(frame, learningRate=-1)

        #threshold to get rid of any other color then black and white
        #anything lighter then 127 will be set to 255 (white) anything lower to 0 (black)
        # we dont need this if we turn shadows off
        #thresold expects a single channel image // with shadows enabled its a greyscale image
        #ret, fgMask = cv2.threshold(fgMask,127,255,cv2.THRESH_BINARY)

        #erosion
        #fgMask = cv2.erode(fgMask, kernel, iterations = 1)

        #dilation

        #fgMask = cv2.dilate(fgMask, kernel, iterations = 1)

        fgMask = cv2.morphologyEx(
            fgMask, cv2.MORPH_CLOSE,
            kernel_big)  # closes gaps smaller than 9x9 pixels

        #change color space from grayscale to BGR so we can draw a colored box later around blobs
        #col = cv2.cvtColor(fgMask, cv2.COLOR_GRAY2BGR)

    elif (mode == IMG_DEPTH):
        frame = freenect.sync_get_depth()[0]  # gets the Kinect depth image
        frame = 255 * np.logical_and(frame >= DEPTH - THRESHOLD,
                                     frame <= DEPTH + THRESHOLD)

        # we make sure its a 8-bit single-channel image // do we need this
        #frame = frame.astype(np.uint8)

        #background subsration // do we need this? // we do this already with the threshold
        fgMask = backSub.apply(frame, learningRate=-1)

        #do we need this // there are not grey colors in the depth image after the threshold
        #ret, fgMask = cv2.threshold(fgMask,127,255,cv2.THRESH_BINARY)

        #erosion
        #fgMask = cv2.erode(fgMask, kernel, iterations = 1) # morphological erode with 3x3

        #dilation

        #fgMask = cv2.dilate(fgMask, kernel, iterations=1)

        # do we need this
        fgMask = cv2.morphologyEx(
            fgMask, cv2.MORPH_CLOSE,
            kernel_big)  # closes gaps smaller than 9x9 pixels

        #change color space from grayscale to BGR so we can draw a colored box later around blobs
        #col = cv2.cvtColor(fgMask, cv2.COLOR_GRAY2BGR)

    # Problem: this function gives us sometimes only one blob instead of two
    ret, labels, stats, centroids = cv2.connectedComponentsWithStats(fgMask)

    # Reset output to stdout:
    #sys.stderr = sys.__stderr__
    return ret, frame, fgMask, labels, stats, centroids
Ejemplo n.º 40
0
    for v in range(rgb.size[1]):
        for u in range(rgb.size[0]):
            color = rgb.getpixel((u,v))
            Z=(depth[v][u])*0.1
            if Z==0:continue
            X=(u - (w / 2.0)) * (Z + minDistance) * scaleFactor
            Y=(v - (h / 2.0)) * (Z + minDistance) * scaleFactor
            points.append("%f %f %f %d %d %d 0\n"%(X,Y,Z,color[0],color[1],color[2]))

    with open(ply_file,"w") as ply:
        ply.write("ply\n" + \
                  "format ascii 1.0\n" + \
                  "element vertex {}\n".format(len(points)) + \
                  "property float x\n" + \
                  "property float y\n" + \
                  "property float z\n" + \
                  "property uchar red\n" + \
                  "property uchar green\n" + \
                  "property uchar blue\n" + \
                  "property uchar alpha\n" + \
                  "end_header\n" + \
                  "{}".format("".join(points)))


rgb = Image.fromarray(freenect.sync_get_video()[0])
KinectDepth,_=freenect.sync_get_depth(0, freenect.DEPTH_MM)
depth=np.array(KinectDepth)

generate_pointcloud(rgb, depth, "PointCloud.ply")

Ejemplo n.º 41
0
def main():
    print("Running...")

    count = 0
    # grab one frame at first to compare for background substraction
    frame, timestamp = freenect.sync_get_video()
    frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
    frame_resized = imutils.resize(frame, width=min(400, frame.shape[1]))
    frame_resized_grayscale = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)

    # initialize centroid
    center = [[frame_resized.shape[1] / 2, frame_resized.shape[0] / 2]]
    center_fix = []
    # defining min cuoff area
    min_area = (480 / 400) * frame_resized.shape[1]

    key = ''
    while key != 113:  # for 'q' key
        # start timer
        timer = cv2.getTickCount()
        starttime = time.time()

        previous_frame = frame_resized_grayscale
        # retrieve new RGB frame image
        frame, timestamp = freenect.sync_get_video()
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        frame_resized = imutils.resize(frame, width=min(400, frame.shape[1]))
        frame_resized_grayscale = cv2.cvtColor(frame_resized,
                                               cv2.COLOR_BGR2GRAY)
        temp = background_subtraction(previous_frame, frame_resized_grayscale,
                                      min_area)

        # retrieve depth map
        depth, timestamp = freenect.sync_get_depth()
        depth = imutils.resize(depth, width=min(400, depth.shape[1]))
        # orig = image.copy()
        if temp == 1:
            frame_processed, center_fix = detect_people(frame_resized, center)
            if (len(center_fix) > 0):
                #xnorm = sercomm(center_fix[0][0]) # send x_norm to nucleo
                #print("X_NORM: " + str(xnorm))
                distance = depth[(int)(center_fix[0][1]),
                                 (int)(center_fix[0][0])]
                cv2.putText(frame_processed,
                            "distance: {:.2f}".format(distance),
                            (10, frame_processed.shape[0]),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 3)
                #print("Distance: " + str(depth.shape) + str(frame_processed.shape))
            i = 0
            for b in center_fix:
                cv2.putText(
                    frame_processed,
                    "Point " + str(i) + ": " + str(b[0]) + " " + str(b[1]),
                    (10, frame_processed.shape[0] - (i + 1) * 35), font, 0.65,
                    (0, 0, 255), 3)
                #print(b)
                #print("Point "+str(i)+": "+str(b[0])+" "+str(b[1]))
                i = i + 1
            cv2.imshow("Detected Human", frame_processed)
            #cv2.imshow("Depth", depth)
            # cv2.imshow("Original", frame)
            key = cv2.waitKey(1) & 0xFF
            if key == ord("b"):
                break
            endtime = time.time()
            print("Time to process a frame: " + str(starttime - endtime))
        else:
            count = count + 1
            print("Number of frame skipped in the video= " + str(count))

        # cv2.putText(depth, "distance: {:.2f}".format(distance), (10, depth.shape[0] - 120), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 3)

        # compute the fps
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        print("FPS: " + str(fps))

        key = cv2.waitKey(5)

    cv2.destroyAllWindows()
    print("\nFINISH")
Ejemplo n.º 42
0
def show_video():
    array, _ = freenect.sync_get_video()
    array = cv.cvtColor(array, cv.COLOR_RGB2BGR)
    cv.imshow('Video', array)
Ejemplo n.º 43
0
 def __init__(self):
     self.array, _ = freenect.sync_get_video()
Ejemplo n.º 44
0
def get_video(pts):
    array, _ = freenect.sync_get_video()
    array = cv2.cvtColor(array, cv2.COLOR_RGB2BGR)
    return four_point_transform(array, pts)
Ejemplo n.º 45
0
def process(quit, callback, **kwargs):
    size = (-1, -1)

    while quit.value == 0:
        use_depth = kwargs.get('use_depth', True)
        use_video = kwargs.get('use_video', False)
        video_format = kwargs.get('video_format', 'ir8')
        index = kwargs.get('index', 0)
        depth = None
        video = None
        depth_str = None
        video_str = None

        video_channels = 0
        if video_format == 'rgb':
            video_format = freenect.VIDEO_RGB
            video_channels = 3
        elif video_format == 'ir8':
            video_format = freenect.VIDEO_IR_8BIT
            video_channels = 1
        else:
            assert ('unsupported')

        if use_depth:
            # fetch depth from freenect
            depth = freenect.sync_get_depth(index=index)
            if depth is None:
                print 'Kinect: kinect not available, restarting in 5 seconds.'
                sleep(5)
                continue

            video = None
            if use_video:
                video = freenect.sync_get_video(index=index)

            # extract data & timestamp
            data, timestamp = depth

            # update image size from the first image
            if size == (-1, -1):
                size = data.shape[1], data.shape[0]
                print 'Kinect: size is', size

            data = convert_depth_to_8bit(data)

            # convert to opencv image

            depth = cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, 1)
            cv.SetData(depth, data.tostring(),
                       data.dtype.itemsize * data.shape[1])

            cv.Flip(depth, depth, 0)
            depth_str = str(depth.tostring())

        if use_video:
            video = freenect.sync_get_video(index=index, format=video_format)
            if video is None:
                print 'Kinect: kinect not available, restarting in 5 seconds.'
                sleep(5)
                continue

            data, timestamp = video

            if size == (-1, -1):
                size = data.shape[1], data.shape[0]
                print 'Kinect: size is', size

            # convert to opencv image
            video = cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, video_channels)
            cv.SetData(video, data.tostring(),
                       data.dtype.itemsize * data.shape[1] * video_channels)
            cv.Flip(video, video, 0)
            video_str = video.tostring()

        arg = {
            'depth': depth,
            'depth_str': depth_str,
            'video': video,
            'video_str': video_str,
            'size': size
        }

        if callback(arg, kwargs) is False:
            break

    for q in kwargs.values():
        if q.__class__.__name__ == 'Queue':
            while q.qsize():
                q.get()
def get_video():
	array,_ = freenect.sync_get_video()
	array = cv2.cvtColor(array,cv2.COLOR_RGB2BGR)
   	return array
Ejemplo n.º 47
0
def get_video():
    return np.array(frame_convert2.video_cv(freenect.sync_get_video()[0]))
Ejemplo n.º 48
0
def get_video():
    return freenect.sync_get_video()[0]