def run(self): print(" Start capture ...") dmcam.cap_start(dev) # blocking code goes here self.run = True while self.run: global f_dist, f_gray, frame_data, f_mutex if dev is None: time.sleep(0.5) # get one frame finfo = dmcam.frame_t() ret = dmcam.cap_get_frames(dev, 1, frame_data, finfo) # will blocking wait # ret = dmcam.cap_get_frame(dev, frame_data, None) # print("get %d frames" % ret) if ret > 0: w = finfo.frame_info.width h = finfo.frame_info.height # print(" frame @ %d, %dx%d (%d)" % # (finfo.frame_info.frame_idx, # finfo.frame_info.width, # finfo.frame_info.height, # finfo.frame_info.frame_size)) f_mutex.lock() dist_cnt, f_dist = dmcam.frame_get_distance(dev, w * h, frame_data, finfo.frame_info) gray_cnt, f_gray = dmcam.frame_get_gray(dev, w * h, frame_data, finfo.frame_info) if dist_cnt != w * h: f_dist = None if gray_cnt != w * h: f_gray = None f_mutex.unlock() self.frameReadySignal.emit(0) self.captureDoneSignal.emit(int(self.run))
def poll_frame(self): finfo = dmcam.frame_t() ret = dmcam.cap_get_frame(self.dev, self.frame_data, finfo) if ret > 0: w = finfo.frame_info.width h = finfo.frame_info.height _, frame_dist = dmcam.frame_get_distance(self.dev, w * h, self.frame_data, finfo.frame_info) _, frame_gray = dmcam.frame_get_gray(self.dev, w * h, self.frame_data, finfo.frame_info) self.frame_dist.append(frame_dist.reshape(h, w)) self.frame_gray.append(frame_gray.reshape(h, w)) if len(self.frame_dist) > 4: self.frame_dist.pop(0) if len(self.frame_gray) > 4: self.frame_gray.pop(0) self.frame_cnt += 1 return ret > 0
def tof_main(head_alarm): """ ToF 主函数 """ finfo = dmcam.frame_t() ret = dmcam.cap_get_frames(dev, 1, f, finfo) # print("get %d frames" % ret) if ret > 0: w = finfo.frame_info.width h = finfo.frame_info.height dist_cnt, dist = dmcam.frame_get_distance(dev, w * h, f, finfo.frame_info) gray_cnt, gray = dmcam.frame_get_gray(dev, w * h, f, finfo.frame_info) if dist_cnt == w * h: # ----------------------------------------- dep0 = (dist.reshape(h, w) * 1000) Z = gray.reshape(h, w) img_dep = cv2.convertScaleAbs(dep0, None, 1 / 16) img_to_pred = img_dep[:, cut_edge:320 - cut_edge] img_to_pred = cv2.merge([img_to_pred] * 3) img_show = img_to_pred.copy() # cv2.imwrite('ToF'+'/dep_{}.png'.format(time.time()),img_show) predict_label = 0 if use_CNN_model: img_to_pred = np.expand_dims(img_to_pred, axis=0) y_pred = model.predict(img_to_pred)[0] if head_alarm == 1: y_pred[1] += 0.2 predict_label = np.argmax(y_pred) max_prob = np.max(y_pred) if max_prob < 0.55: predict_label = 0 show_label = dl.after_delay(predict_label) return img_show, show_label
# get one frame finfo = dmcam.frame_t() ret = dmcam.cap_get_frames(dev, 1, f, finfo) # print("get %d frames" % ret) if ret > 0: print(" frame @ %d, %dx%d (%d)" % (finfo.frame_info.frame_idx, finfo.frame_info.width, finfo.frame_info.height, finfo.frame_info.frame_size)) # print the first 16bytes of the frame # print([hex(n) for n in f][:16]) w = finfo.frame_info.width h = finfo.frame_info.height gray_cnt = 0 dist_cnt, dist = dmcam.frame_get_distance(dev, w * h, f, finfo.frame_info) gray_cnt, gray = dmcam.frame_get_gray(dev, w * h, f, finfo.frame_info) if dist_cnt != w * h: dist = None if gray_cnt != w * h: gray = None # dist = dmcam.raw2dist(int(len(f) / 8), f) # gray = dmcam.raw2gray(int(len(f) / 4), f) # amb = dmcam.raw2amb(int(len(f) / 4), f) # --- show in TL sub-window --- if gray is not None: Z = gray.reshape(h, w, 1) Z = (255.0 * Z / Z.max()).astype(int)
def get_frames(self): if self.dev is None: log.warning("Device not initialized, retry to capture in 0.5s!") time.sleep(0.5) return f = bytearray(320 * 240 * 4 * 2) finfo = dmcam.frame_t() log.debug("dmcam.cap_get_frames") ret = dmcam.cap_get_frames(self.dev, 1, f, finfo) w = finfo.frame_info.width h = finfo.frame_info.height intg_auto_arg = dmcam.filter_args_u() intg_auto_arg.sat_ratio = 5 if not dmcam.filter_enable( self.dev, dmcam.DMCAM_FILTER_ID_AUTO_INTG, intg_auto_arg, sys.getsizeof(intg_auto_arg)): print(" enable AUTO INTG filter (sat_ratio=%d) failed" % intg_auto_arg.sat_ratio) if ret <= 0: if ret == -10: log.warning("dmcam USB cable unplugged, try to reconnect!") self._close_dmcam() self._init_dmcam() self._start_dmcam() elif ret == 0: self.empty_frame_cnt += 1 if self.empty_frame_cnt > 20: log.warning("dmcam capture timeout, restart capture") self._stop_dmcam() self._start_dmcam() self.empty_frame_cnt = 0 else: log.warning("dmcam capture error, errono:{}".format(ret)) self._stop_dmcam() self._start_dmcam() else: self.empty_frame_cnt = 0 print() dist_cnt, dist = dmcam.frame_get_distance(self.dev, w * h, f, finfo.frame_info) # gray_cnt, gray = dmcam.frame_get_gray(self.dev, w * h, f, finfo.frame_info) # gray_data = gray.astype(np.uint16) # gray = ((gray/16).astype(np.uint8)).reshape(240,320) if dist_cnt == w * h: data = (dist * 1000).astype(np.uint16) frame = data.reshape(240, 320) cv2.imshow('frame', cv2.convertScaleAbs(frame, None, 1 / 20)) cv2.waitKey(1) # ## 静态手势 img_s = frame.copy() result_s, result_str = static(img_s) self.result_list = update_win(self.result_list, result_s, win_len=5) ## 动态手势 img_d = frame.copy() img_d = img_d.astype(np.float32) / 10000 self.algo = mainloop_trk(self.algo, img_d, self.viewer, self.bg) self.viewer.put_text(str('手势识别结果为:'), 150, 1000) if result_s > 0 and self.algo.decision == 'point': if self.ENABLE_VIEW: # if len(self.result_list)>0: # counts = np.bincount(self.result_list) # idx = np.argmax(counts) # if idx > 0: # self.viewer.put_text(str(label[idx]),200,1050) self.viewer.put_text(str(result_str), 200, 1000) # final_result = result_str else: x, y = self.algo.xlist[self.algo.idx], self.algo.ylist[ self.algo.idx] if self.ENABLE_VIEW: if self.algo.trace_pattern['type'] != 'point': self.viewer.draw_circle(int(x * 2 + 400), int(y * 2 + 165), r=5, line_wid=5) draw_trace_mode( self.viewer, mode=self.algo.trace_pattern['type'], pos=self.algo.trace_pattern['param']) if self.algo.trace_pattern['type'] == 'circle': self.viewer.put_text(str('转圈'), 215, 1050) elif self.algo.trace_pattern['type'] == 'h-line': self.viewer.put_text(str('水平移动'), 215, 1050) elif self.algo.trace_pattern['type'] == 'v-line': self.viewer.put_text(str('竖直移动'), 215, 1050) # self.result_list=[] # if self.algo.trace_pattern['type'] == 'point': # final_result = 'None' # else: # final_result = self.algo.trace_pattern['type'] # if final_result == "circle": # pos = self.algo.trace_pattern['param'] # update_win(pos_win, pos, win_len=3) # if pos_win[0] < pos_win[1] and pos_win[1]<pos_win[2]: # final_result = "c-circle" # elif pos_win[0] > pos_win[1] and pos_win[1] > pos_win[2]: # final_result = "circle" # if final_result == "h-line": # pos = self.algo.trace_pattern['param'] # update_win(pos_win, pos, win_len=3) # if pos_win[0] < pos_win[1] and pos_win[1]<pos_win[2]: # final_result = "right" # elif pos_win[0] > pos_win[1] and pos_win[1] > pos_win[2]: # final_result = "left" # if final_result == "v-line": # pos = self.algo.trace_pattern['param'] # update_win(pos_win, pos, win_len=3) # if pos_win[0] < pos_win[1] and pos_win[1]<pos_win[2]: # final_result = "down" # elif pos_win[0] > pos_win[1] and pos_win[1] > pos_win[2]: # final_result = "up" # 刷新屏幕显示 if self.ENABLE_VIEW: self.viewer.update() evt, param = self.viewer.poll_evt() if evt == 'quit': return False return True