def Infer(img_name, mod): system_dict["image"] = img_name if system_dict["gpu"]: ctx = mx.gpu(int(system_dict["gpu"])) else: ctx = mx.cpu(0) # load single test im_tensor, im_info, im_orig = load_test( system_dict["image"], short=system_dict["img_short_side"], max_size=system_dict["img_long_side"], mean=system_dict["img_pixel_means"], std=system_dict["img_pixel_stds"]) # generate data batch data_batch = generate_batch(im_tensor, im_info) # forward mod.forward(data_batch) rois, scores, bbox_deltas = mod.get_outputs() rois = rois[:, 1:] scores = scores[0] bbox_deltas = bbox_deltas[0] im_info = im_info[0] # decode detection det = im_detect(rois, scores, bbox_deltas, im_info, bbox_stds=system_dict["rcnn_bbox_stds"], nms_thresh=system_dict["rcnn_nms_thresh"], conf_thresh=system_dict["rcnn_conf_thresh"]) output = [] conf_scores = [] for [cls, conf, x1, y1, x2, y2] in det: output.append( [system_dict["classes"][int(cls)], conf, [x1, y1, x2, y2]]) conf_scores.append(conf) if cls > 0 and conf > system_dict["vis_thresh"]: print(system_dict["classes"][int(cls)], conf, [x1, y1, x2, y2]) max_index = conf_scores.index(max(conf_scores)) print(output[max_index]) if system_dict["vis"]: vis_detection(im_orig, det, system_dict["classes"], thresh=system_dict["vis_thresh"]) save_detection(im_orig, det, system_dict["classes"], thresh=system_dict["vis_thresh"]) return output
def dummy_data(ctx): im_tensor, im_info, im_orig = load_test(path_to_image, short=img_short_side, max_size=img_long_side, mean=img_pixel_means, std=img_pixel_stds, ctx=ctx) # data_batch = generate_batch(im_tensor, im_info) return im_tensor
def demo_net(sym, class_names, args): # print config print('called with args\n{}'.format(pprint.pformat(vars(args)))) # setup context if args.gpu: ctx = mx.gpu(int(args.gpu)) else: ctx = mx.cpu(0) # load single test im_tensor, im_info, im_orig = load_test(args.image, short=args.img_short_side, max_size=args.img_long_side, mean=args.img_pixel_means, std=args.img_pixel_stds) # generate data batch data_batch = generate_batch(im_tensor, im_info) # load params arg_params, aux_params = load_param(args.params, ctx=ctx) # produce shape max possible data_names = ['data', 'im_info'] label_names = None data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)), ('im_info', (1, 3))] label_shapes = None # check shapes check_shape(sym, data_shapes, arg_params, aux_params) # create and bind module mod = Module(sym, data_names, label_names, context=ctx) mod.bind(data_shapes, label_shapes, for_training=False) mod.init_params(arg_params=arg_params, aux_params=aux_params) # forward mod.forward(data_batch) rois, scores, bbox_deltas = mod.get_outputs() rois = rois[:, 1:] scores = scores[0] bbox_deltas = bbox_deltas[0] im_info = im_info[0] # decode detection det = im_detect(rois, scores, bbox_deltas, im_info, bbox_stds=args.rcnn_bbox_stds, nms_thresh=args.rcnn_nms_thresh, conf_thresh=args.rcnn_conf_thresh) # print out for [cls, conf, x1, y1, x2, y2] in det: if cls > 0 and conf > args.vis_thresh: print(class_names[int(cls)], conf, [x1, y1, x2, y2]) # if vis if args.vis: vis_detection(im_orig, det, class_names, thresh=args.vis_thresh)
def demo_net(sym, class_names, args): # print config print('called with args\n{}'.format(pprint.pformat(vars(args)))) # setup context if args.gpu: ctx = mx.gpu(int(args.gpu)) else: ctx = mx.cpu(0) # load single test im_tensor, im_info, im_orig = load_test(args.image, short=args.img_short_side, max_size=args.img_long_side, mean=args.img_pixel_means, std=args.img_pixel_stds) # generate data batch data_batch = generate_batch(im_tensor, im_info) # load params arg_params, aux_params = load_param(args.params, ctx=ctx) # produce shape max possible data_names = ['data', 'im_info'] label_names = None data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)), ('im_info', (1, 3))] label_shapes = None # check shapes check_shape(sym, data_shapes, arg_params, aux_params) # create and bind module mod = Module(sym, data_names, label_names, context=ctx) mod.bind(data_shapes, label_shapes, for_training=False) mod.init_params(arg_params=arg_params, aux_params=aux_params) # forward mod.forward(data_batch) rois, scores, bbox_deltas = mod.get_outputs() rois = rois[:, 1:] scores = scores[0] bbox_deltas = bbox_deltas[0] im_info = im_info[0] # decode detection det = im_detect(rois, scores, bbox_deltas, im_info, bbox_stds=args.rcnn_bbox_stds, nms_thresh=args.rcnn_nms_thresh, conf_thresh=args.rcnn_conf_thresh, use_soft_nms=args.use_soft_nms, soft_nms_thresh=args.soft_nms_thresh, max_per_image=args.max_per_image) # print out for [cls, conf, x1, y1, x2, y2] in det: if cls > 0 and conf > args.vis_thresh: print(class_names[int(cls)], conf, [x1, y1, x2, y2]) # if vis if args.vis: vis_detection(im_orig, det, class_names, thresh=args.vis_thresh)
def demo_net(sym, class_names, args): # print config print('called with args\n{}'.format(pprint.pformat(vars(args)))) # setup context if args.gpu: ctx = mx.gpu(int(args.gpu)) else: ctx = mx.cpu(0) # load single test im_tensor, im_info, im_orig = load_test(args.image, short=args.img_short_side, max_size=args.img_long_side, mean=args.img_pixel_means, std=args.img_pixel_stds) # generate data batch data_batch = generate_batch(im_tensor, im_info) # load params arg_params, aux_params = load_param(args.params, ctx=ctx) # produce shape max possible data_names = ['data', 'im_info'] label_names = None data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)), ('im_info', (1, 3))] label_shapes = None # check shapes check_shape(sym, data_shapes, arg_params, aux_params) # create and bind module mod = Module(sym, data_names, label_names, context=ctx) mod.bind(data_shapes, label_shapes, for_training=False) mod.init_params(arg_params=arg_params, aux_params=aux_params) # forward start=time.time() mod.forward(data_batch) rois, scores, bbox_deltas = mod.get_outputs() print("time=", time.time() - start) #rois = rois.asnumpy() rois = rois[:, 1:] #print('rois=',rois) scores = scores[0] bbox_deltas = bbox_deltas[0] #print("BBox_deltas.shape=",bbox_deltas.shape) #print("BBOX_deltas=",bbox_deltas) im_info = im_info[0] # decode detection det = im_detect(rois, scores, bbox_deltas, im_info, bbox_stds=args.rcnn_bbox_stds, nms_thresh=args.rcnn_nms_thresh, conf_thresh=args.rcnn_conf_thresh) # print out for [cls, conf, x_c,y_c,w,h,theta] in det: if cls > 0 and conf > args.vis_thresh: print('class_name=',class_names[int(cls)], 'conf=',conf, [x_c , y_c, w,h,theta]) if True: draw_rotate_box_cv(det,class_names,0.95)
def Infer(img_name, mod): system_dict["image"] = img_name if system_dict["gpu"]: ctx = mx.gpu(int(system_dict["gpu"])) else: ctx = mx.cpu(0) # load single test im_tensor, im_info, im_orig = load_test( system_dict["image"], short=system_dict["img_short_side"], max_size=system_dict["img_long_side"], mean=system_dict["img_pixel_means"], std=system_dict["img_pixel_stds"]) # generate data batch data_batch = generate_batch(im_tensor, im_info) # forward mod.forward(data_batch) rois, scores, bbox_deltas = mod.get_outputs() rois = rois[:, 1:] scores = scores[0] bbox_deltas = bbox_deltas[0] im_info = im_info[0] # decode detection det = im_detect(rois, scores, bbox_deltas, im_info, bbox_stds=system_dict["rcnn_bbox_stds"], nms_thresh=system_dict["rcnn_nms_thresh"], conf_thresh=system_dict["rcnn_conf_thresh"]) output = [] conf_scores = [] final = [0] * 8 for [cls, conf, x1, y1, x2, y2] in det: output.append([conf, [x1, y1, x2, y2]]) conf_scores.append(conf) if cls > 0 and conf > system_dict["vis_thresh"]: print(" Intensity || X1 || X2 : ") print(system_dict["classes"][int(cls)], conf, [x1, x2]) p0 = 0 p1 = 125 p2 = 375 p3 = 500 p4 = 625 p5 = 750 p6 = 875 p7 = 1000 tag = [p0, p1, p2, p3, p4, p5, p6, p7] contag = tag + [x1] + [x2] contag.sort() pos1 = contag.index(x1) pos2 = contag.index(x2) led = [0] * len(tag) for i in range(pos1 - 1, pos2 - 1): led[i] = 1 print("Binary Output: ", led) print('\n') print('\n') for i in range(len(tag)): final[i] = final[i] | led[i] print("LED Summary Activation : ", final) max_index = conf_scores.index(max(conf_scores)) print("Most Intensive : ", output[max_index]) # if system_dict["vis"]: # vis_detection(im_orig, det, system_dict["classes"], thresh=system_dict["vis_thresh"]) save_detection(im_orig, det, system_dict["classes"], thresh=system_dict["vis_thresh"]) return output
def Infer(img_name, mod): ''' User function: Run inference on image and visualize it Args: img_name (str): Relative path to the image file mod (mxnet model): Mxnet model returned from load_model() function Returns: list: Contaning IDs, Scores and bounding box locations of predicted objects. ''' system_dict["image"] = img_name if system_dict["gpu"]: ctx = mx.gpu(int(system_dict["gpu"])) else: ctx = mx.cpu(0) # load single test im_tensor, im_info, im_orig = load_test( system_dict["image"], short=system_dict["img_short_side"], max_size=system_dict["img_long_side"], mean=system_dict["img_pixel_means"], std=system_dict["img_pixel_stds"]) # generate data batch data_batch = generate_batch(im_tensor, im_info) # forward mod.forward(data_batch) rois, scores, bbox_deltas = mod.get_outputs() rois = rois[:, 1:] scores = scores[0] bbox_deltas = bbox_deltas[0] im_info = im_info[0] # decode detection det = im_detect(rois, scores, bbox_deltas, im_info, bbox_stds=system_dict["rcnn_bbox_stds"], nms_thresh=system_dict["rcnn_nms_thresh"], conf_thresh=system_dict["rcnn_conf_thresh"]) output = [] conf_scores = [] for [cls, conf, x1, y1, x2, y2] in det: output.append( [system_dict["classes"][int(cls)], conf, [x1, y1, x2, y2]]) conf_scores.append(conf) if cls > 0 and conf > system_dict["vis_thresh"]: print(system_dict["classes"][int(cls)], conf, [x1, y1, x2, y2]) max_index = conf_scores.index(max(conf_scores)) print(output[max_index]) if system_dict["vis"]: vis_detection(im_orig, det, system_dict["classes"], thresh=system_dict["vis_thresh"]) save_detection(im_orig, det, system_dict["classes"], thresh=system_dict["vis_thresh"]) return output
def demo_net(sym, class_names, args): # print config # setup context if args.gpu: ctx = mx.gpu(int(args.gpu)) else: ctx = mx.cpu(0) print('called with args\n{}'.format(pprint.pformat(vars(args)))) # load params arg_params, aux_params = load_param(args.params, ctx=ctx) # produce shape max possible data_names = ['data', 'im_info'] label_names = None data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)), ('im_info', (1, 3))] label_shapes = None # check shapes check_shape(sym, data_shapes, arg_params, aux_params) # create and bind module mod = Module(sym, data_names, label_names, context=ctx) mod.bind(data_shapes, label_shapes, for_training=False) mod.init_params(arg_params=arg_params, aux_params=aux_params) f = open( "/home/skutukov/datasets/VOCdevkit/VOC2007/ImageSets/Main/test.txt", "r") for file in tqdm.tqdm(f.readlines()): path = os.path.join(args.image, str(file).strip() + '.jpg') path = '/home/skutukov/Pictures/demo.jpg' # load single test im_tensor, im_info, im_orig = load_test(path, short=args.img_short_side, max_size=args.img_long_side, mean=args.img_pixel_means, std=args.img_pixel_stds, ctx=ctx) # generate data batch data_batch = generate_batch(im_tensor, im_info) # forward mod.forward(data_batch) rois, scores, bbox_deltas = mod.get_outputs() rois = rois[:, 1:] scores = scores[0] bbox_deltas = bbox_deltas[0] im_info = im_info[0] # decode detection det = im_detect(rois, scores, bbox_deltas, im_info, bbox_stds=args.rcnn_bbox_stds, nms_thresh=args.rcnn_nms_thresh, conf_thresh=args.rcnn_conf_thresh) # print out for [cls, conf, x1, y1, x2, y2] in det: if cls > 0 and conf > args.vis_thresh: print(class_names[int(cls)], conf, [x1, y1, x2, y2]) # if vis if args.vis: vis_detection(im_orig, det, class_names, thresh=args.vis_thresh, file=file) break
def demo_net(sym, class_names, args): # print config print('called with args\n{}'.format(pprint.pformat(vars(args)))) # setup context if args.gpu: ctx = mx.gpu(int(args.gpu)) else: ctx = mx.cpu(0) # load single test im_tensor, im_info, im_orig = load_test(args.image, short=args.img_short_side, max_size=args.img_long_side, mean=args.img_pixel_means, std=args.img_pixel_stds) # generate data batch data_batch = generate_batch(im_tensor, im_info) # load params arg_params, aux_params = load_param(args.params, ctx=ctx) # produce shape max possible data_names = ['data', 'im_info'] label_names = None data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)), ('im_info', (1, 3))] label_shapes = None # check shapes check_shape(sym, data_shapes, arg_params, aux_params) # create and bind module mod = Module(sym, data_names, label_names, context=ctx) mod.bind(data_shapes, label_shapes, for_training=False) mod.init_params(arg_params=arg_params, aux_params=aux_params) # forward mod.forward(data_batch) rois, scores, bbox_deltas, mask_prob = mod.get_outputs() rois = rois[:, 1:] scores = scores[0] bbox_deltas = bbox_deltas[0] im_info = im_info[0] # decode detection det, masks = im_detect(rois, scores, bbox_deltas, mask_prob, im_info, bbox_stds=args.rcnn_bbox_stds, nms_thresh=args.rcnn_nms_thresh, conf_thresh=args.rcnn_conf_thresh) im = cv2.imread(args.image) print(im.shape) print(im_info) # print out for index, [cls, conf, x1, y1, x2, y2] in enumerate(det): print(masks[index].max()) if cls > 0 and conf > args.vis_thresh: print(class_names[int(cls)], conf, [x1, y1, x2, y2]) print((int(x1), int(y1)), (int(x2), int(y2))) cv2.rectangle(im, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 10) cv2.imwrite("mask{}.png".format(index), np.uint8(masks[index] * 255)) cv2.imwrite('demo.png', im) # if vis if args.vis: vis_detection(im_orig, det, class_names, thresh=args.vis_thresh)