def video(args): global net_img_g, net_out_g, net_thread_start h, w = LPD.size if args.trt: # tensorRT Inference can't in thread import numpy as np from yolo_modules.tensorrt_module import do_inference_wrapper from yolo_modules.tensorrt_module import get_engine_wrapper engine_wrapper = get_engine_wrapper(args.version) net_out_shape = (1, LPD.LP_slice_point[-1], h / 2**LPD.num_downsample, w / 2**LPD.num_downsample) else: import mxnet # mx_resize = mxnet.image.ForceResizeAug((w, h), interp=2) # not always available net = yolo_gluon.init_executor(LPD.export_file, (h, w), LPD.ctx[0]) yolo_gluon.test_inference_rate(net, (1, 3, h, w), cycles=100, ctx=LPD.ctx[0]) rospy.init_node("LP_Detection_Video_Node", anonymous=True) threading.Thread(target=_video_thread).start() net_thread_start = True while not rospy.is_shutdown(): if 'img_g' not in globals() or img_g is None: print('Wait For Image') time.sleep(1.0) continue if not net_thread_start: time.sleep(0.01) continue try: net_start_time = time.time() # tic net_img = img_g.copy() # (480, 640, 3), np.float32 net_img = cv2.resize(net_img, (w, h)) # (320, 512, 3) if args.trt: # if cuMemcpyHtoDAsync failed: invalid argument, check input image size trt_outputs = do_inference_wrapper(engine_wrapper, net_img) net_out = np.array(trt_outputs).reshape( net_out_shape) # list to np.array else: nd_img = yolo_gluon.cv_img_2_ndarray( net_img, LPD.ctx[0]) #, mxnet_resize=mx_resize) net_out = net.forward(is_train=False, data=nd_img)[0] net_out[-1].wait_to_read() yolo_gluon.switch_print(time.time() - net_start_time, video_verbose) net_img_g = net_img net_out_g = net_out net_thread_start = False except Exception as e: print(global_variable.red) rospy.signal_shutdown('main_thread Error') print(e) print(global_variable.reset_color) sys.exit(0)
def __init__(self, args): # -------------------- Load args -------------------- # self.ctx = yolo_gluon.get_ctx(args.gpu) self.export_folder = os.path.join(args.version, 'export') # -------------------- Load network sprc. -------------------- # spec_path = os.path.join(args.version, 'spec.yaml') with open(spec_path) as f: spec = yaml.load(f) for key in spec: setattr(self, key, spec[key]) self.all_anchors = nd.array( self.all_anchors) # anchors in each pyramid layers self.num_class = len(self.classes) self._init_step() self._init_area() self._init_syxhw() self.version = args.version # -------------------- Load "Executor" !!! -------------------- # if args.mode in ['video', 'valid', 'valid_Nima']: self.trt = args.trt if args.trt: from yolo_modules.tensorrt_module import get_engine_wrapper self.net = get_engine_wrapper(args.version) else: self.net = yolo_gluon.init_executor(self.export_folder, self.size, self.ctx[0], fp16=self.use_fp16) return # -------------------- Use gluon Block !!! -------------------- # # Export/Train/Render_And_Train self.record = args.record self._init_net(spec, args.weight) if args.mode in ['train', 'render_and_train']: self._init_train()
def __init__(self, args): # -------------------- Load args -------------------- # self.ctx = yolo_gluon.get_ctx(args.gpu) self.export_folder = os.path.join(args.version, 'export') # -------------------- Load network sprc. -------------------- # spec_path = os.path.join(args.version, 'spec.yaml') with open(spec_path) as f: spec = yaml.load(f) for key in spec: setattr(self, key, spec[key]) self.all_anchors = nd.array(self.all_anchors) self.num_class = len(self.classes) self._init_step() self._init_area() self._init_syxhw() self.version = args.version # -------------------- Load "Executor" !!! -------------------- # if (args.mode == 'video' or args.mode == 'valid' or args.mode == 'valid_Nima'): self.net = yolo_gluon.init_executor(self.export_folder, self.size, self.ctx[0], fp16=self.use_fp16) # -------------------- Use gluon Block !!! -------------------- # elif (args.mode == 'export' or args.mode == 'train' or args.mode == 'render_and_train'): self.record = args.record self._init_net(spec, args.weight) if args.mode != 'export': self._init_train()
LR = 0.001 batch_size = 100 record_step = 1000 num_init_features = 32 growth_rate = 12 block_config = [6, 12, 24] area = size[1] / 2**(len(block_config) + 1) score_weight = 0.1 class_weight = 1.0 export_file = args.version + '/export/' if args.mode == 'video': executor = yolo_gluon.init_executor(export_file, size, ctx[0]) bridge = CvBridge() rospy.init_node("OCR_node", anonymous=True) pub = rospy.Publisher('YOLO/OCR', String, queue_size=0) rospy.Subscriber('/YOLO/clipped_LP', Image, _image_callback) print('Image Topic: /YOLO/clipped_LP') print('checkpoint file: %s' % export_file) r = rospy.Rate(30) while not rospy.is_shutdown(): r.sleep() else: net = OCRDenseNet(num_init_features, growth_rate, block_config, classes=34) backup_dir = os.path.join(args.version, 'backup')
def video(self, record=False): LP_size = (int(380 * 1.05), int(160 * 1.05)) pjct_6d = licence_plate_render.ProjectRectangle6D(*LP_size) self.net = yolo_gluon.init_executor(self.export_file, self.size, self.ctx[0], use_tensor_rt=self.tensorrt) ''' self.net = mxnet.contrib.onnx.onnx2mx.import_to_gluon.import_to_gluon( 'v1/export/onnx/out.onnx', self.ctx[0]) ''' # -------------------- ROS -------------------- # self.bridge = CvBridge() rospy.init_node("LP_Detection", anonymous=True) LP_pub = rospy.Publisher(self.pub_clipped_LP, Image, queue_size=0) ps_pub = rospy.Publisher(self.pub_LP, Float32MultiArray, queue_size=0) rospy.Subscriber(self.topic, Image, self._image_callback) #threading.Thread(target=self._get_frame).start() pose = Float32MultiArray() rate = rospy.Rate(100) print('Image Topic: %s' % self.topic) print('checkpoint file: %s' % self.export_file) # -------------------- video record -------------------- # if record: start_time = datetime.datetime.now().strftime("%m-%dx%H-%M") out_file = os.path.join('video', 'LPD_ % s.avi' % start_time) fourcc = cv2.VideoWriter_fourcc(*'XVID') v_size = (640, 480) video_out = cv2.VideoWriter(out_file, fourcc, 30, v_size) shape = (1, 3, 320, 512) #yolo_gluon.test_inference_rate(self.net, shape, cycles=100, ctx=mxnet.gpu(0)) threading.Thread(target=self._net_thread).start() while not rospy.is_shutdown(): if not hasattr(self, 'net_out') or \ not hasattr(self, 'net_img'): rate.sleep() print('Wait For Net') continue img = self.net_img.copy() net_out = self.net_out.copy() pred = self.predict_LP(self.net_out) ps_pub.publish(pose) if pred[0] > LP_threshold: img, clipped_LP = pjct_6d.add_edges(img, pred[1:]) clipped_LP = self.bridge.cv2_to_imgmsg(clipped_LP, 'bgr8') LP_pub.publish(clipped_LP) if self.show: cv2.imshow('img', img) cv2.waitKey(1) #video_out.write(ori_img) rate.sleep()