Exemple #1
0
def handle_rgb(dev, data, timestamp):
    global keep_running, rgb, using_depth, rgb_points, depth_points, shots
    global taken, calibrating, waiting, note
    
    if keep_running:
        if using_depth:
            cvdata = frame_convert.video_cv(data[:160,:213])
            resized = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 3)
            cv.Resize(cvdata, resized)
            rgb = resized
        else:
            rgb = frame_convert.video_cv(data)
        
        if not calibrating:
            showrgb()

    if taken == shots and not waiting:
        keep_running = False
    elif calibrating and not waiting:
        findchessboardcorners()
            
        showrgb()

    key = cv.WaitKey(10)
    if key >= 0 and key < 255:
        keypressed(key)
        if stop:
            keep_running = False
        elif not calibrating:
            calibrating = True
            note = "Looking for inner chessboard corners..."
        else:
            waiting = False
            note = "Looking for inner chessboard corners..."
 def rgbCallback(self, dev, rgb, timestamp):
   frame_convert.video_cv(rgb)
   # FIXME:
   # for now, pass by file to the streamer, there's gotta be a way to get jpg out of 
   # opencv without writing to file, but didn't have time to sort it out, for now this
   # works, just less elegant than I'd prefer... if time at the end I'll revisit and figure
   # out a better way to handle this
   with open('frame.jpg','r+') as f:
       self.fileData = f.read()
   reactor.callFromThread(self.wsFactory2.broadcast, self.fileData, True)
Exemple #3
0
def show_detector():
    image = frame_convert.video_cv(freenect.sync_get_video()[0]);

    # cascade classifiers
    face_cascade = cv2.CascadeClassifier('opencv_data/haarcascades/haarcascade_frontalface_default.xml')
    eye_cascade = cv2.CascadeClassifier('opencv_data/haarcascades/haarcascade_eye.xml')

    # convert image to grayscale to use it with classifers
    gray = cv2.cvtColor(cv2array(image), cv2.COLOR_BGR2GRAY);

    # save previous image and use copy
    img = image;

    # detect and highlight faces
    faces = face_cascade.detectMultiScale(gray, 1.3, 5);
    for (x,y,w,h) in faces:
        cv.Rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)

    # detect and highlight eyes
    eyes = eye_cascade.detectMultiScale(gray)
    for (ex,ey,ew,eh) in eyes:
        cv.Rectangle(img,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)

    # show detector window
    cv.ShowImage('Detector', img)
Exemple #4
0
def show_detector():
    image = frame_convert.video_cv(freenect.sync_get_video()[0]);

    # cascade classifiers
    face_cascade = cv2.CascadeClassifier('opencv_data/haarcascades/haarcascade_frontalface_default.xml')
    eye_cascade = cv2.CascadeClassifier('opencv_data/haarcascades/haarcascade_eye.xml')

    # convert image to grayscale to use it with classifers
    gray = cv2.cvtColor(cv2array(image), cv2.COLOR_BGR2GRAY);

    # save previous image and use copy
    img = image;

    # detect and highlight faces
    faces = face_cascade.detectMultiScale(gray, 1.3, 5);
    for (x,y,w,h) in faces:
        cv.Rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)

    # detect and highlight eyes
    eyes = eye_cascade.detectMultiScale(gray)
    for (ex,ey,ew,eh) in eyes:
        cv.Rectangle(img,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)

    # show detector window
    cv.ShowImage('Detector', img)
Exemple #5
0
def video(dev, data, timestamp):
    global kill_me

    cv.ShowImage("Video", frame_convert.video_cv(data))
    key = cv.WaitKey(10)
    if key == 27:
        kill_me = True
Exemple #6
0
def display_rgb(dev, data, timestamp):
	global keep_running
	img = frame_convert.video_cv(data)
	cv.ShowImage('RGB', img)
	cv2.imwrite('test.png', data)
	if cv.WaitKey(1000) == 27:
		keep_running = False
Exemple #7
0
def display_rgb(dev, data, timestamp):
    global keep_running
    img = frame_convert.video_cv(data)
    cv.ShowImage('RGB', img)
    cv2.imwrite('test.png', data)
    if cv.WaitKey(1000) == 27:
        keep_running = False
Exemple #8
0
def showlive():
  global count, frames
  cv.NamedWindow('Depth')
  cv.NamedWindow('Video')
  cv.MoveWindow('Depth', 100, 100)
  cv.MoveWindow('Video', 745, 100)

  print('Press ESC in window to stop')
  print('Press Space to convert current to PLY')
  print('Press k to stop live capture')

  while 1:
      imgdepth = fc.depth_cv(freenect.sync_get_depth()[0])
      imgvideo = fc.video_cv(freenect.sync_get_video()[0])

      cv.ShowImage('Depth', imgdepth)
      cv.ShowImage('Video', imgvideo)

      inp = cv.WaitKey(100)

      if inp != -1:
        inp = chr(inp % 1048576)
        if inp == ' ': # space for capture and convert
          print 'capturing images'
          captureimage()
          print 'done capturing'
        elif inp.isdigit():
          frames = ord(inp) - ord('0')
          print 'setting the number of frames to capture to %d' % frames
        elif inp == 'k':
          break
      count = count + 1

  cv.DestroyWindow('Depth')
  cv.DestroyWindow('Video')
Exemple #9
0
def captureimage():
  global frames

  depthframes = np.zeros((frames, rownum, colnum))
  rgbframes = np.zeros((frames, rownum, colnum, 3))

  for i in range(frames):
    depthframes[i] = freenect.sync_get_depth()[0]
    rgbframes[i] = freenect.sync_get_video()[0]
    arargb = freenect.sync_get_video()[0]
    time.sleep(0.05)

  arargb   = fc.robustavg(rgbframes)
  aradepth = fc.robustavg(depthframes)
  serial = time.time()

  cv.SaveImage('img/depth%d.png' % serial, fc.depth_cv(aradepth.astype(int)))
  cv.SaveImage('img/video%d.png' % serial, fc.video_cv(arargb.astype(np.uint8)))
  #f = open('poly/poly%d.ply' % serial,'w')
  
    
  meterdepth = fc.meter_depth(aradepth)
  #newrgb2 = fc.matchrgb2(meterdepth, arargb)
  newrgb = fc.matchrgb(meterdepth, arargb)
  
  #meterdepth = ndi.gaussian_filter(fc.meter_depth(aradepth), [sigma, sigma])
  
  meterdepth[meterdepth > 1.5] = -1.
  meterdepth[meterdepth < 0.5] = -1.
  scipy.io.savemat('data/aligned%d.mat' % serial, {'depth':meterdepth, 'rgb':newrgb})
Exemple #10
0
def disp_thresh(lower, upper, show_masked_rgb=True):
  depth, timestamp = freenect.sync_get_depth()
  min_depth = depth.min()
  video,_ = freenect.sync_get_video()

  if show_masked_rgb:
    video = video.astype(np.uint8)
    depthmask = (255*np.logical_and(depth>lower,depth<upper)).reshape(480,640,1)
    depthmask = depthmask.astype(np.uint8)
    masked_video = video & depthmask
    #print reduce(lambda count, curr: curr>0 and count+1 or count,masked_video.flatten(),0)
    cv.ShowImage('RGB',frame_convert.video_cv(masked_video.reshape(480,640,3)))

  depth = 255 * np.logical_and(depth > lower, depth < upper)
  depth = depth.astype(np.uint8)
  image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]),
                               cv.IPL_DEPTH_8U,
                               1)
  cv.SetData(image, depth.tostring(),
             depth.dtype.itemsize * depth.shape[1])

  canny = doCanny(image,150.0,200.0,7)
  templates = template_match(image,template)
  smoothed = smooth(canny)
  cv.ShowImage('Depth', image)
  return depth,canny,min_depth
Exemple #11
0
def show_video():
    video, timestamp = freenect.sync_get_video()
    #save_video(timestamp, video)
    video = frame_convert.video_cv(video)
    cv.Circle(video, (closest[1], closest[0]), 8, (0, 0, 255))
    cv.Circle(video, (closest[1], closest[0]), 4, (0, 0, 255))
    cv.ShowImage('Video', video)
Exemple #12
0
def display_rgb(dev, data, timestamp):
    global keep_running, rgb, using_depth
    
    if keep_running:
        if using_depth:
            cvdata = frame_convert.video_cv(data[:160,:213])
            gray = cv.CreateImage((213, 160), cv.IPL_DEPTH_8U, 1)
            cv.CvtColor(cvdata, gray, cv.CV_BGR2GRAY)
            resized = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1)
            cv.Resize(gray, resized)
            rgb = resized
        else:
            rgb = frame_convert.video_cv(data)
        
        showrgb()

    if cv.WaitKey(10) == 27:
        keep_running = False
def handle_rgb(dev, data, timestamp):
    """
    Handler for the images from the Kinect. It initiates the finding of the
    chessboard corners.
    
    Input: DevPtr dev, the Kinect object
           np.array data, the image from the Kinect
           int timestamp, the time at which this image was received
    Output: none
    """

    global keep_running, rgb, using_depth, rgb_points, depth_points, shots
    global taken, calibrating, waiting, note, stop, fake
    
    if keep_running:
        if using_depth and not fake:
            cvdata = frame_convert.video_cv(data[:160,:213])
            resized = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 3)
            cv.Resize(cvdata, resized)
            rgb = resized
        else:
            rgb = frame_convert.video_cv(data)
        
        if not calibrating:
            showrgb()

    if found and not waiting:
        keep_running = False
    elif calibrating and not waiting:
        findchessboardcorners()
        
        showrgb()

    key = cv.WaitKey(10)
    if key >= 0 and key < 255:
        keypressed(key)
        if stop:
            keep_running = False
        elif not calibrating:
            calibrating = True
            note = "Looking for inner chessboard corners..."
        else:
            waiting = False
            note = "Looking for inner chessboard corners..."
Exemple #14
0
def show_video():
    video = freenect.sync_get_video()[0]
    bgr = frame_convert.video_cv(video)
    video = video / 255.00
    bgra = bgra_from_depth(video, depth) 

    rgb = alpha_blend(bgra, background)
    rgb = cv.fromarray(rgb)

    cv.ShowImage('Video', rgb)
Exemple #15
0
    def process_rgb(self, dev, data, timestamp):
        #global keep_running
        # get an opencv version of video_cv data
        frame = frame_convert.video_cv(data)
        frame_size = cv.GetSize(frame)

        # Convert to HSV and keep the hue
        hsv = cv.CreateImage(frame_size, 8, 3)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        self.hue = cv.CreateImage(frame_size, 8, 1)

        # split the image into different hues
        cv.Split(hsv, self.hue, None, None, None)

        # Compute back projection
        # Run the cam-shift
        backproject = cv.CreateImage(frame_size, 8, 1)
        cv.CalcArrBackProject([self.hue], backproject, self.hist)

        # if we have a tracking window... shift it
        # Track_window => (rectangle of approx hue)
        if self.track_window and is_rect_nonzero(self.track_window):
            # set criteria for backproject iter
            # compute back projections - shifting rectangle in
            # appropriate direction
            crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
            (iters, (area, value, rect),
             self.track_box) = cv.CamShift(backproject, self.track_window,
                                           crit)
            # set track_window to the newly selected rectangle
            self.track_window = rect

        # if a section is being selected - set the histogram
        if self.debug:
            sel = self.dbg_rgb.check_for_selection(self.track_window,
                                                   self.track_box)

            # sets the histogram if there is a selection
            if sel: self.set_hist(frame, sel)

            self.dbg_rgb.update(frame)
            #if self.track_window:
            #  self.dbg_rgb.add_box(self.track_box)

            self.dbg_rgb.render()

        # Bail out if ESC is pushed
        key = cv.WaitKey(3)
        char = chr(key & 255)

        # k is for KILL
        if char == 'k':
            self.keep_running = False
        else:
            self.curr_classifier().respond_to_key(char)
Exemple #16
0
def display_rgb(dev, data, timestamp):
    global keep_running
    cvdata = frame_convert.video_cv(data[:160, :213])
    resized = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 3)
    cv.Resize(cvdata, resized)
    grey = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1)
    cv.CvtColor(resized, grey, cv.CV_BGR2GRAY)
    cv.ShowImage("RGB", resized)
    cv.ShowImage("Depth", grey)
    if cv.WaitKey(10) == 27:
        keep_running = False
def display_rgb(dev, data, timestamp):
    global keep_running
    cv.Image = frame_convert.video_cv(data)
    img = cv.CreateImage(cv.GetSize(cv.Image), cv.IPL_DEPTH_16S, 3)
    cv.ShowImage('RGB', cv.Image)
    for x in range(1, 5):
        name = "img%d" % (x)
        cv.SaveImage('name.png', cv.Image)
        time.sleep(1)
    if cv.WaitKey(10) == 27:
        keep_running = False
Exemple #18
0
  def process_rgb(self,dev, data, timestamp):
      #global keep_running
      # get an opencv version of video_cv data
      frame = frame_convert.video_cv(data)
      frame_size = cv.GetSize(frame)

      # Convert to HSV and keep the hue
      hsv = cv.CreateImage(frame_size, 8, 3)
      cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
      self.hue = cv.CreateImage(frame_size, 8, 1)

      # split the image into different hues
      cv.Split(hsv, self.hue, None, None, None)

      # Compute back projection
      # Run the cam-shift
      backproject = cv.CreateImage(frame_size, 8, 1)
      cv.CalcArrBackProject( [self.hue], backproject, self.hist )

      # if we have a tracking window... shift it
      # Track_window => (rectangle of approx hue)
      if self.track_window and is_rect_nonzero(self.track_window):
        # set criteria for backproject iter
	# compute back projections - shifting rectangle in
	# appropriate direction
	crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) 
	(iters, (area, value, rect), self.track_box) = cv.CamShift(backproject, self.track_window, crit)
	# set track_window to the newly selected rectangle 
	self.track_window = rect 

      # if a section is being selected - set the histogram 
      if self.debug:
	sel = self.dbg_rgb.check_for_selection(
	    self.track_window,
	    self.track_box)

	# sets the histogram if there is a selection
	if sel: self.set_hist(frame,sel)
	
	self.dbg_rgb.update(frame)
	#if self.track_window:
	#  self.dbg_rgb.add_box(self.track_box)
	  
        self.dbg_rgb.render()

      # Bail out if ESC is pushed
      key = cv.WaitKey(3)
      char = chr(key & 255)
      
      # k is for KILL
      if char == 'k':
	self.keep_running = False
      else:
	self.curr_classifier().respond_to_key(char)
Exemple #19
0
    def find_position(self):
	print "Kinect is trying to find the image"
        (kinect_depth,_), (rgb,_) = get_depth(), get_video() 
        self.img = video_cv(rgb)
        depth_img = pretty_depth_cv(kinect_depth)
 
        position = self._get_pos(self.img)

        depth = self._get_depth(depth_img, debug=False)

        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 1, 1) 

        fps = 1/(time.time() - self.lasttime)
        s1 = "FPS:%.2f" % fps
        self.lasttime = time.time()
        cv.PutText(self.img,s1, (0,30),font, cv.CV_RGB(255, 0, 0))

        dt = "Depth: %d" % depth
        if position:
            pt = "Pos: X=%d Y=%d" % (position[0], position[1])
        else:
            pt = "Pos: N/A"
        cv.PutText(self.img, dt, (0,60),font, cv.CV_RGB(255, 0, 0))
        cv.PutText(self.img, pt, (0,90),font, cv.CV_RGB(255, 0, 0))

        offset = 120
        for t in self.text:
            cv.PutText(self.img, t, (0,offset),font, cv.CV_RGB(255, 0, 0))
            offset += 30

        cv.Circle(self.img, (self.sp[0], self.sp[1]) , 10, cv.CV_RGB(0, 255, 0), 1)

        cv.ShowImage('RGB', self.img)
        #cv.SaveImage('RGB-%d.png' % (time.time()*100), self.img)
        #cv.ShowImage('DEPTH', depth_img)
        cv.WriteFrame(self.writer, self.img)
        cv.WaitKey(5)

        #cv.ShowImage('depth_mask', depth_mask)
        try:
            return (position[0], position[1], depth)
        except:
            return (None, None, None)
Exemple #20
0
    def video_callback(self, dev, data, timestamp):
        if self.nightvision:
            cv_data = simplify_cv(data)
        else:
            cv_data = video_cv(data)

        if self.debug:
            cv.ShowImage('Video', cv_data)


        if (self._last_img + self.snapshot_secs < time.time() or
            self._snapshot):
            cv.SaveImage('babby-current.jpg', cv_data)
            k = boto.s3.key.Key(self.s3bucket)
            if self._snapshot:
                k.key = '/babby/snapshot-%s.jpg' % self._snapshot
                self._snapshot = False
            else:
                k.key = '/babby/current.jpg'
            k.set_contents_from_filename('babby-current.jpg')
            k.set_acl('public-read')
            self._last_img = time.time()
Exemple #21
0
def display_result(depth, data, p, screentitle):
    """
    This function will add the last information to the data that is displayed
    in a screen. In the rgb window, some text is added and in both windows
    the points that are clicked are added.
    """
    global cubic
    im = None
    data_old = np.array(data)
    if depth:
        im = frame_convert.pretty_depth_cv(data_old)        
        if len(p) == 4 and len(cubic) == 0:
            make_cubicle(data)        
    else:
        im = frame_convert.video_cv(data)
    
    displaypoints(im, p)
    im_cp = cv.CloneImage(im)
    if not depth:
        print_instructions(im_cp)
    
    cv.ShowImage(screentitle, im_cp)
Exemple #22
0
def show_video():
    cv.ShowImage('Video', frame_convert.video_cv(opennpy.sync_get_video()[0]))
Exemple #23
0
def display_rgb(dev, data, timestamp):
  cv.ShowImage('RGB', frame_convert.video_cv(data))
  if cv.WaitKey(10) == 27:
    shared_state.terminate()
Exemple #24
0
	def getImageData(self):
		npImage, _ = freenect.sync_get_video()
		cvImage = fc.video_cv(npImage)
		return cvImage
Exemple #25
0
def show_video():
    image = frame_convert.video_cv(freenect.sync_get_video()[0]);
    cv.ShowImage('Video', resize_image(image))
Exemple #26
0
def get_video():
    video_data = freenect.sync_get_video()
    return video_data[1], frame_convert.video_cv(video_data[0])
Exemple #27
0
def display_rgb(dev, data, timestamp):
    global keep_running
    cv.ShowImage('RGB', frame_convert.video_cv(data))
    if cv.WaitKey(10) == 27:
        keep_running = False
 def get_data(self):
     self.raw_depth_image = frame_convert.pretty_depth_cv(freenect.sync_get_depth()[0])
     self.raw_video_image = frame_convert.video_cv(freenect.sync_get_video()[0])
     cv.Flip(self.raw_depth_image,None,-1)
     cv.Flip(self.raw_video_image,None,-1)
Exemple #29
0
def show_video():
    image = frame_convert.video_cv(freenect.sync_get_video()[0]);
    cv.ShowImage('Video', resize_image(image))
def motion_detection():
    
    #Initialisierungen:
    
    #Das erste Bild des Videos speichern, um Bildeigenschaften zu erhalten
    frame = frame_convert.video_cv(freenect.sync_get_video()[0])
    frame_size = cv.GetSize(frame)
    
    #Graubild
    grey_image = cv.CreateImage(frame_size,cv.IPL_DEPTH_8U,1)
    
    #Bild fuer den RunningAverage-Algorithmus (aus Opencv): Braucht 32 oder 64 Bit - Bild
    running_average_image = cv.CreateImage((frame_size), cv.IPL_DEPTH_32F, 3)
    #Fuer die Konvertierung
    running_average_image_converted = cv.CloneImage(frame)
    #Konstante Alpha fuer RunningAvg
    #Kleines Alpha: Schnelle Bewegung werden kaum wahrgenommen
    #Grosses Alpha: Schnelle Bewegung werden wahrgenommen
    alpha = 0.320
    
    #Fuer Clone-Image
    mem_storage = cv.CreateMemStorage(0)
    
    #Differenzbild fuer AbsDiff
    difference = cv.CloneImage(frame)

    
    while True:
        
        video = frame_convert.video_cv(freenect.sync_get_video()[0])
        
        #Kopie von video
        color_image = cv.CloneImage(video)
        #Glaetten
        cv.Smooth(color_image,color_image,cv.CV_GAUSSIAN,19,0)
        
        #RunningAverage-Algorithmus
        cv.RunningAvg(color_image,running_average_image,alpha,None)
        #Ergebnis ist ein weisses Bild. Deshalb konvertieren
        cv.ConvertScale(running_average_image,running_average_image_converted,1.0,0.0)
        
        #Aktuelles Bild vom RunningAverage abziehen
        cv.AbsDiff(color_image,running_average_image_converted,difference)
        
        #In Graubild konvertieren
        cv.CvtColor(difference,grey_image,cv.CV_RGB2GRAY)
        
        #Schwellwertbild, um Schwarz-Weiß Bild zu bekommen
        cv.Threshold(grey_image,grey_image,2,255,cv.CV_THRESH_BINARY)
        #Weiterverarbeitung
        cv.Smooth(grey_image,grey_image,cv.CV_GAUSSIAN,19,0)
        cv.Threshold(grey_image,grey_image,240,255,cv.CV_THRESH_BINARY)
        
        
        bounding_box_list = []
        #Konturen berechnen, um Bewegung zu finden
        contours = cv.FindContours(grey_image,mem_storage,cv.CV_RETR_CCOMP,cv.CV_CHAIN_APPROX_SIMPLE)
        liste = list(contours)
        #print "Liste: ", liste
        while contours:        
            if liste:
                cv.DrawContours(color_image,contours,(255,0,0),(255,0,0),0,thickness=1)
                bounding_rect = cv.BoundingRect(contours)
                bounding_box_list.append(bounding_rect)       
                #print bounding_box_list
                contours = contours.h_next() 
                
        #print "Bounding Box List: ", bounding_box_list
        #cv.DrawContours(color_image,contours,(255,0,0),(255,0,0),0,thickness=-1)    

         
        #print "Beginn"    
        for box in bounding_box_list:
            #print len(bounding_box_list)
            (x,y,w,h) = box
            #print "Box: ", box
            cv.Rectangle(color_image,(x,y),(x+w,y+h),(0,0,255))
        #print "Ende"
            
        #Ausgabe der Videos zur Ueberpruefung
        #cv.ShowImage('Video',video)
        cv.ShowImage('Color Image',color_image)
        #cv.ShowImage('Running Average', running_average_image_converted)
        cv.ShowImage('Differenz', grey_image)
        if cv.WaitKey(10)==27:
            break
def capture(dev, data, timestamp):
	global keep_running
	global img
	img = frame_convert.video_cv(data)
	keep_running = False
Exemple #32
0
cv.NamedWindow('Video')
print('Press ESC in window to stop')


def get_depth():
    return depth


def get_video():
    return video


while 1:
    depth, timestamp = freenect.sync_get_depth()
    video, timestemp = freenect.sync_get_video()

    cdepth = frame_convert.pretty_depth_cv(copy.deepcopy(depth))
    video = frame_convert.video_cv(video)

    cv.ShowImage('Depth', cdepth)
    cv.ShowImage('Video', video)

    key = cv.WaitKey(10)
    if key == 27:    # escape
        break

    elif key == 115: # lower case s
        print 'scraping a new depth at', timestamp
        pickle.dump(depth, open('depth.pickle', 'w'))
        cv.SaveImage('depth.jpg', video)
def show_video():
    cv.ShowImage('Video', frame_convert.video_cv(freenect.sync_get_video()[0]))
Exemple #34
0
def handle_new_capture(depth, rgb):
    """
    This function is responsible for all the calculations. For now, it adds
    points to the image so that we can see the 3 dimensional cubicle in which
    we are working. Later this function will be used to scan for values.
    """
    print "New data captured"
    global cubic, intrinsic_matrix, distortion
    
    
    if intrinsic_matrix == None or distortion == None:
       print "Can't handle the capture because the intrinsic matrix and distortion aren't set yet."

    
    objectpoints = [(0,100,0),(100,100,0),(100,100,200),(0,100,200)]
    npoints = len(objectpoints)
    imagepoints = []
    
    o_points = cv.CreateMat(npoints, 3, cv.CV_32FC1)
    i_points = cv.CreateMat(npoints, 2, cv.CV_32FC1)
    

    for i in xrange(npoints):
        o_points[i, 0] = objectpoints[i][0]
        o_points[i, 1] = objectpoints[i][0]   
        o_points[i, 2] = objectpoints[i][0]
        i_points[i, 0] = cubic[i][0]
        i_points[i, 1] = cubic[i][1]
    
    
    rvec = cv.CreateMat(1, 3, cv.CV_32FC1)
    tvec = cv.CreateMat(1, 3, cv.CV_32FC1)
    cv.FindExtrinsicCameraParams2(o_points, i_points, intrinsic_matrix, distortion, rvec, tvec, useExtrinsicGuess=0)
    
    rotation = cv.CreateMat(3, 3, cv.CV_32FC1)
    translation = cv.CreateMat(3, 3, cv.CV_32FC1)
    cv.Rodrigues2(rvec, rotation)
    cv.Rodrigues2(tvec, translation)
    
    matrix = cv.CloneMat(rotation)
    for i in xrange(3):
       print matrix[i,0],"\t\t", matrix[i,1],"\t\t", matrix[i,2]
       
    rgb_cv = frame_convert.video_cv(rgb)
    dst = cv.CloneImage(rgb_cv)
    
    rvec_rgb = cv.CreateMat(2, 3, cv.CV_32FC1)
    
    for i in xrange(2):
        rvec_rgb[i,0] = rotation[i + 1,0] 
        rvec_rgb[i,1] = rotation[i + 1,1]
        rvec_rgb[i,2] = rotation[i + 1,2]
        
        
    cv.WarpAffine(rgb_cv, dst, rvec_rgb)
    
    cv.SaveImage("original.png", rgb_cv)
    cv.SaveImage("warped.png", dst)    
    
        
    print "New points"
Exemple #35
0
def get_video():
    return frame_convert.video_cv(freenect.sync_get_video()[0])
Exemple #36
0
def display_rgb(dev, data, timestamp):
    global keep_running
    cv.ShowImage('RGB', frame_convert.video_cv(data))
    if cv.WaitKey(10) == 27:
        keep_running = False
Exemple #37
0
def get_video(ind):
    return frame_convert.video_cv(freenect.sync_get_video(ind)[0])
Exemple #38
0
 def next_frame(self):
     self.raw_depth = frame_convert.pretty_depth_cv(freenect.sync_get_depth()[0])
     self.raw_video = frame_convert.video_cv(freenect.sync_get_video()[0])
     cv.Flip(self.raw_depth, None, 1)
     cv.Flip(self.raw_video, None, 1)
Exemple #39
0
def show_video():
    cv.ShowImage("Video", frame_convert.video_cv(freenect.sync_get_video()[0]))
Exemple #40
0
def get_video():
    return frame_convert.video_cv(opennpy.sync_get_video()[0])