def process_depth_info(self, dev, data, timestamp): #global keep_running self.find_pointer(data) # pass off to debug to update window if self.debug: img = frame_convert.pretty_depth_cv(data) self.dbg_depth.update(img) self.dbg_depth.render()
def process_depth_info(self,dev, data, timestamp): #global keep_running self.find_pointer(data) # pass off to debug to update window if self.debug: img = frame_convert.pretty_depth_cv(data) self.dbg_depth.update(img) self.dbg_depth.render()
def scalar_size_circles(self, depth_array, states, local_to_pixel, state_size_var = "size", colour = 0x00): img = frame_convert.pretty_depth_cv(depth_array) # draw strings, deactive and then active strands for state in states: x,y = local_to_pixel(state['label']) cv.Circle(img, (x,y), state.get(self.config.get("state_size_var", state_size_var), 0), self.config.get("colour", colour)) return img
def square_areas(self, depth_array, states, local_to_pixel, on_colour = 0x00, off_colour = 0x99, on_thickness=2, off_thickness=1): if self.config.get('threshold'): depth_array = np.clip(depth_array, self.config["threshold"].get("min", 200), self.config["threshold"].get("min", 200) + self.config["threshold"].get("range", 300)) img = frame_convert.pretty_depth_cv(depth_array) for state in states: x,y,w,h = local_to_pixel(state['label']) if state.get('mapped', {'trigger':False}).get('trigger', False): cv.Rectangle(img, (x,y), (x+w, y+h), self.config.get('on_colour', on_colour), self.config.get('on_thickness', on_thickness)) else: cv.Rectangle(img, (x,y), (x+w, y+h), self.config.get('off_colour', off_colour), self.config.get('off_thickness', off_thickness)) return img
def binary_circles(self, depth_array, states, local_to_pixel, on_size = 50, on_colour = 0x00, off_size = 25, off_colour = 0x44): if self.config.get('threshold'): depth_array = np.clip(depth_array, self.config["threshold"].get("min", 200), self.config["threshold"].get("min", 200) + self.config["threshold"].get("range", 300)) img = frame_convert.pretty_depth_cv(depth_array) # draw strings, deactive and then active strands for state in states: x,y = local_to_pixel(state['label']) if state.get('mapped', {'trigger':False}).get('trigger', False): cv.Circle(img, (x,y), self.config.get('on_size', on_size), self.config.get('on_colour', on_colour)) else: cv.Circle(img, (x,y), self.config.get('off_size', off_size), self.config.get('off_colour', off_colour)) return img
def display_depth(self, data, timestamp): img = frame_convert.pretty_depth_cv(data) min_index = data.argmin() width = data.shape[1] i, j = divmod(min_index, width) min_val = int(data[i][j]) if not self.current_step: pass elif self.prev_time is not None: val_diff = min_val - self.prev_min_val timestamp = time.time() time_diff = timestamp - self.prev_time # no div by zero and no negative time if self.punch_cooldown and time_diff > 0.5: self.punch_cooldown = False if self.game_state != 'death': self.game_state = 'idle' if not self.punch_cooldown: #print "val_diff: %d\t time_diff: %d" % (val_diff, time_diff) acc = val_diff / (timestamp - self.last_time) #print "acc = % 04.3f" % acc self.prev_time = timestamp if acc > self.punch_threshold and (self.game_state != 'death' and self.game_state != 'sploded'): self.punch_cooldown = True self.health = self.health - self.damage self.kebab_roar.play() if self.health > 0: self.game_state = 'hit' else: self.game_state = 'death' self.explosion.play() return (j, i) else: self.prev_time = time.time() self.last_time = time.time() self.current_step = (self.current_step + 1) % self.step_size self.last_time = time.time() self.prev_min_val = min_val if not self.fullscreen: pt1 = (j - 20, i - 20) pt2 = (j + 20, i + 20) cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) cv.ShowImage('Depth', img) if cv.WaitKey(10) == 27: keep_running = False
def find_position(self): print "Kinect is trying to find the image" (kinect_depth,_), (rgb,_) = get_depth(), get_video() self.img = video_cv(rgb) depth_img = pretty_depth_cv(kinect_depth) position = self._get_pos(self.img) depth = self._get_depth(depth_img, debug=False) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 1, 1) fps = 1/(time.time() - self.lasttime) s1 = "FPS:%.2f" % fps self.lasttime = time.time() cv.PutText(self.img,s1, (0,30),font, cv.CV_RGB(255, 0, 0)) dt = "Depth: %d" % depth if position: pt = "Pos: X=%d Y=%d" % (position[0], position[1]) else: pt = "Pos: N/A" cv.PutText(self.img, dt, (0,60),font, cv.CV_RGB(255, 0, 0)) cv.PutText(self.img, pt, (0,90),font, cv.CV_RGB(255, 0, 0)) offset = 120 for t in self.text: cv.PutText(self.img, t, (0,offset),font, cv.CV_RGB(255, 0, 0)) offset += 30 cv.Circle(self.img, (self.sp[0], self.sp[1]) , 10, cv.CV_RGB(0, 255, 0), 1) cv.ShowImage('RGB', self.img) #cv.SaveImage('RGB-%d.png' % (time.time()*100), self.img) #cv.ShowImage('DEPTH', depth_img) cv.WriteFrame(self.writer, self.img) cv.WaitKey(5) #cv.ShowImage('depth_mask', depth_mask) try: return (position[0], position[1], depth) except: return (None, None, None)
def get_video(): global process_flag, process_frame, depth_frame rgb_video = freenect.sync_get_video(0, format=freenect.VIDEO_IR_8BIT) # print(depth_feed) # depth_cv = frame_convert.pretty_depth_cv(depth_feed[0]) # depth_frames.append(depth_feed) # depth_frames = depth_frames[:10] if process_flag: ir_feed = freenect.sync_get_video(0, format=freenect.VIDEO_IR_8BIT) ir_feed = ir_feed[1], ir_feed[0] # depth_feed = freenect.sync_get_depth() # ir_feed = freenect.sync_get_video(0, format=freenect.VIDEO_IR_8BIT) # cv2.imwrite("temp_video.png", ir_feed[1]) depth_accumulator = freenect.sync_get_depth()[0] depth_accumulator[depth_accumulator > 2046] = 0 for i in range(10): # print(depth_accumulator) depth_accumulator = combine_depth_frames( depth_accumulator, freenect.sync_get_depth()[0]) depth_accumulator[depth_accumulator > 0] = 255 # print(ir_feed) # depth_accumulator = depth_accumulator.astype() ir_feed = numpy.bitwise_and(depth_accumulator.astype(numpy.uint8), numpy.array(ir_feed[1])).astype( numpy.uint8) # cv2.imwrite("thing.png", frame_convert.pretty_depth_cv(ir_feed)) process_frame = frame_convert.pretty_depth_cv(ir_feed) process_flag = False for i in range(len(rgb_video[0])): rgb_video[0][i][int(len(rgb_video[0][0]) / 2) - 1] = 255 rgb_video[0][i][int(len(rgb_video[0][0]) / 2)] = 255 rgb_video[0][i][int(len(rgb_video[0][0]) / 2) + 1] = 255 print(len(rgb_video[0])) # rgb_video = rgb_video[1], frame_convert.video_cv(rgb_video[0]) return rgb_video[::-1] #return ir_feed[1], ir_feed[0] #return 0, frame_convert.pretty_depth_cv(depth_feed[0]) return rgb_video
def display_result(depth, data, p, screentitle): """ This function will add the last information to the data that is displayed in a screen. In the rgb window, some text is added and in both windows the points that are clicked are added. """ global cubic im = None data_old = np.array(data) if depth: im = frame_convert.pretty_depth_cv(data_old) if len(p) == 4 and len(cubic) == 0: make_cubicle(data) else: im = frame_convert.video_cv(data) displaypoints(im, p) im_cp = cv.CloneImage(im) if not depth: print_instructions(im_cp) cv.ShowImage(screentitle, im_cp)
def get_video(): global process_flag, process_frame, depth_frame rgb_video = freenect.sync_get_video(0, format=freenect.VIDEO_IR_8BIT) # print(depth_feed) # depth_cv = frame_convert.pretty_depth_cv(depth_feed[0]) # depth_frames.append(depth_feed) # depth_frames = depth_frames[:10] if process_flag: ir_feed = freenect.sync_get_video(0, format=freenect.VIDEO_IR_8BIT) ir_feed = ir_feed[1], ir_feed[0] # depth_feed = freenect.sync_get_depth() # ir_feed = freenect.sync_get_video(0, format=freenect.VIDEO_IR_8BIT) # cv2.imwrite("temp_video.png", ir_feed[1]) depth_accumulator = freenect.sync_get_depth()[0] depth_accumulator[depth_accumulator > 2046] = 0 for i in range(10): # print(depth_accumulator) depth_accumulator = combine_depth_frames(depth_accumulator, freenect.sync_get_depth()[0]) depth_accumulator[depth_accumulator > 0] = 255 # print(ir_feed) # depth_accumulator = depth_accumulator.astype() ir_feed = numpy.bitwise_and(depth_accumulator.astype(numpy.uint8), numpy.array(ir_feed[1])).astype(numpy.uint8) # cv2.imwrite("thing.png", frame_convert.pretty_depth_cv(ir_feed)) process_frame = frame_convert.pretty_depth_cv(ir_feed) process_flag = False for i in range(len(rgb_video[0])): rgb_video[0][i][int(len(rgb_video[0][0]) / 2) - 1] = 255 rgb_video[0][i][int(len(rgb_video[0][0]) / 2)] = 255 rgb_video[0][i][int(len(rgb_video[0][0]) / 2) + 1] = 255 print(len(rgb_video[0])) # rgb_video = rgb_video[1], frame_convert.video_cv(rgb_video[0]) return rgb_video[::-1] #return ir_feed[1], ir_feed[0] #return 0, frame_convert.pretty_depth_cv(depth_feed[0]) return rgb_video
def get_depth(): depth_data = freenect.sync_get_depth()[0] print(depth_in_meters_at_pixel(320, 240, depth_data)) print(lookup_table_depth[int(pixel_to_look_at)]) return frame_convert.pretty_depth_cv(depth_data)
def get_data(self): self.raw_depth_image = frame_convert.pretty_depth_cv(freenect.sync_get_depth()[0]) self.raw_video_image = frame_convert.video_cv(freenect.sync_get_video()[0]) cv.Flip(self.raw_depth_image,None,-1) cv.Flip(self.raw_video_image,None,-1)
def get_depth(ind): (d, _) = freenect.sync_get_depth(ind) print "depth: ", type(d) return frame_convert.pretty_depth_cv(d)
def get_depth(): frame = frame_convert.pretty_depth_cv(freenect.sync_get_depth()[0]) return frame
def get_depth(): return frame_convert.pretty_depth_cv(opennpy.sync_get_depth()[0])
def simplify_cv(data): img = frame_convert.pretty_depth_cv(data) return img
def display_depth(dev, data, timestamp): cv.ShowImage('Depth', frame_convert.pretty_depth_cv(data)) if cv.WaitKey(10) == 27: shared_state.terminate()
def display_depth(dev, data, timestamp): global keep_running cv.ShowImage('Depth', frame_convert.pretty_depth_cv(data)) if cv.WaitKey(10) == 27: keep_running = False
def show_depth(): global depth_image depth, timestamp = freenect.sync_get_depth() depth_image = frame_convert.pretty_depth_cv(depth); cv.ShowImage('Depth', resize_image(depth_image))
def get_depth(): time_string = "%1.6f" % time.time() cv.SaveImage(time_string + ".pgm", frame_convert.pretty_depth_cv(freenect.sync_get_depth()[0]))
def getDepthData(self): npDepth, _ = freenect.sync_get_depth() cvDepth = fc.pretty_depth_cv(npDepth) return cvDepth
def get_depth(): return frame_convert.pretty_depth_cv(copy.deepcopy(depth))
def get_depth(ind): return frame_convert.pretty_depth_cv(freenect.sync_get_depth(ind)[0])
cv.NamedWindow('Video') print('Press ESC in window to stop') def get_depth(): return depth def get_video(): return video while 1: depth, timestamp = freenect.sync_get_depth() video, timestemp = freenect.sync_get_video() cdepth = frame_convert.pretty_depth_cv(copy.deepcopy(depth)) video = frame_convert.video_cv(video) cv.ShowImage('Depth', cdepth) cv.ShowImage('Video', video) key = cv.WaitKey(10) if key == 27: # escape break elif key == 115: # lower case s print 'scraping a new depth at', timestamp pickle.dump(depth, open('depth.pickle', 'w')) cv.SaveImage('depth.jpg', video)
def get_depth(): text_file = open("Output.txt","w") text_file.write("%s" % freenect.sync_get_depth()[0]) text_file.close() return frame_convert.pretty_depth_cv(freenect.sync_get_depth()[0])
def get_depth(): return frame_convert.pretty_depth_cv(freenect.sync_get_depth()[0])
def next_frame(self): self.raw_depth = frame_convert.pretty_depth_cv(freenect.sync_get_depth()[0]) self.raw_video = frame_convert.video_cv(freenect.sync_get_video()[0]) cv.Flip(self.raw_depth, None, 1) cv.Flip(self.raw_video, None, 1)