def run(self): while not self.stop_event.is_set(): try: image = self.cap.get() with chainer.using_config('autotune', True), \ chainer.using_config('use_ideep', 'auto'): feature_map = get_feature(self.model, image.transpose(2, 0, 1).astype(np.float32)) self.queue.put((image, feature_map), timeout=1) except queue.Full: pass except queue.Empty: pass
def run(self): while not self.stop_event.is_set(): try: image, count = self.cap.get(2) print('pred2 getting from cap:' + str(count) + '\n') image = cv2.resize(image, self.insize) with chainer.using_config('autotune', True), \ chainer.using_config('use_ideep', 'auto'): feature_map = get_feature( self.model, image.transpose(2, 0, 1).astype(np.float32)) self.queue.put((image, feature_map), timeout=100) print("pred2 queue: ", self.queue.qsize()) except queue.Full: pass except queue.Empty: pass
def run(self): model = create_model(self.modelargs, self.config) logger.info('{} started at PID {} - 224x224 model loaded'.format(self.name, self.pid)) if chainer.backends.cuda.available: model = model.to_gpu(1) self.model = model self.insize = (224, 224) self.pipe_end.send(True) # model loaded sign count = 0 run = False if self.pipe_end.recv(): logger.info("start running 224x224") run = True # run the first forward path to get model auto tune right # than wait for bigger model to complete first forward path image, count = self.queue_in.get(timeout=1) image = cv2.resize(image, self.insize) with chainer.using_config('autotune', True), \ chainer.using_config('use_ideep', 'auto'): feature_map = get_feature(self.model, image.transpose(2, 0, 1).astype(np.float32)) # self.queue.put((image, feature_map), timeout=1) self.queue.put((feature_map), timeout=1) if self.pipe_end.recv() == 2: pass while not self.stop_event.is_set(): try: if run: t_start = time.time() # image, count = self.cap.get(2) image, count = self.queue_in.get(timeout=1) self.queue_get_time += time.time()-t_start try: humans = self.queue_comm.get(timeout=1) # if humans: print(humans) cropped_image_set = self.cut_human(image, humans) except queue.Empty: logger.info('humans queue empty') cropped_image_set = self.random_crop(image, 20) feature_map = self.model.predict_video(cropped_image_set) # logger.info('get img from queue took {} sec'.format(time.time()-t_start)) # print('pred2 getting from cap:'+str(count)+'\n') image = cv2.resize(image, self.insize) # print(cropped_image_set) t_start = time.time() with chainer.using_config('autotune', True), \ chainer.using_config('use_ideep', 'auto'): feature_map = get_feature(self.model, image.transpose(2, 0, 1).astype(np.float32)) if not self.queue.empty(): self.inf_time = time.time() - t_start # self.queue.put((image, feature_map), timeout=1) self.queue.put((feature_map), timeout=1) # maybe not needed to be a queue, just internal storage of process #logger.debug("pred2 queue: {}".format(self.queue.qsize())) else: logger.info("waiting for other model to load....") if self.pipe_end.recv() == 'stop': print("STOP received via pipe") if self.queue_in.qsize() == 0 and self.queue.qsize() > 0: self.pipe_end.send('stop') self.stop() except queue.Full: logger.info("queue full") pass except queue.Empty: logger.info("queue empty") if self.queue.qsize() > 0: # self.pipe_end.recv() == 'stop': self.pipe_end.send('stop') self.stop() else: pass except cv2.error: logger.info("CV2 error") logger.info('{} exiting'.format(self.name)) self.pipe_end.send('stop') time.sleep(1) self.stop() except KeyboardInterrupt: self.pipe_end.send('stop') self.stop() except: print("Unexpected error:", sys.exc_info()[0]) self.pipe_end.send('stop') time.sleep(1) self.stop() raise
def run(self): model = create_model(self.modelargs, self.config) logger.info('{} started at PID {} - 1920x1080 model loaded'.format(self.name, self.pid)) if chainer.backends.cuda.available: model = model.to_gpu(0) self.model = model self.insize = (1920, 1080) self.pipe_end.send(True) # model loaded sign count = 0 run = False if self.pipe_end.recv(): logger.info("start running 1920x1080") run = True while not self.stop_event.is_set(): try: if run and not self.pipe_end.poll(): t_start = time.time() # image, count = self.cap.get(1) image, count = self.queue_in.get(timeout=1) self.queue_get_time += time.time() - t_start # logger.info('get img from queue took {} sec'.format(time.time()-t_start)) # print('pred1 getting from cap:'+str(count)+'\n') image = cv2.resize(image, self.insize) t_start = time.time() with chainer.using_config('autotune', True), \ chainer.using_config('use_ideep', 'auto'): feature_map = get_feature(self.model, image.transpose(2, 0, 1).astype(np.float32)) if not self.queue.empty(): self.inf_time += time.time() - t_start # self.queue.put((image, feature_map), timeout=1) self.queue.put((feature_map), timeout=1) humans = get_humans_by_feature(model, feature_map, self.detection_threshold, self.min_num_keypoints) if len(humans) > 0: self.queue_comm.put(humans, timeout=1) # cropped_image_set = self.cut_human(image, humans) # logger.debug("pred1 queue {}: ".format(self.queue.qsize())) if self.queue.qsize() == 1: self.pipe_end.send(2) # sign that big model passed first forward path ## BLOCK ON PORPUSE ## block = False while block: time.sleep(2) pass ###### else: # logger.info("waiting for other model to load....") if self.pipe_end.recv() == 'stop': logger.info("STOP received via pipe") self.stop() if self.queue_in.qsize()==0 and self.queue.qsize()>0: self.pipe_end.send('stop') self.stop() except queue.Full: logger.info("queue full") pass except queue.Empty: logger.info("queue empty") if self.queue.qsize() > 0: self.stop() else: pass except cv2.error: logger.info("CV2 error") self.pipe_end.send('stop') time.sleep(1) self.stop() except KeyboardInterrupt: self.pipe_end.send('stop') self.stop() except: print("Unexpected error:", sys.exc_info()[0]) self.pipe_end.send('stop') time.sleep(1) self.stop() raise