def Infer(img_name, mod):
    system_dict["image"] = img_name
    if system_dict["gpu"]:
        ctx = mx.gpu(int(system_dict["gpu"]))
    else:
        ctx = mx.cpu(0)

    # load single test
    im_tensor, im_info, im_orig = load_test(
        system_dict["image"],
        short=system_dict["img_short_side"],
        max_size=system_dict["img_long_side"],
        mean=system_dict["img_pixel_means"],
        std=system_dict["img_pixel_stds"])

    # generate data batch
    data_batch = generate_batch(im_tensor, im_info)

    # forward
    mod.forward(data_batch)
    rois, scores, bbox_deltas = mod.get_outputs()
    rois = rois[:, 1:]
    scores = scores[0]
    bbox_deltas = bbox_deltas[0]
    im_info = im_info[0]

    # decode detection
    det = im_detect(rois,
                    scores,
                    bbox_deltas,
                    im_info,
                    bbox_stds=system_dict["rcnn_bbox_stds"],
                    nms_thresh=system_dict["rcnn_nms_thresh"],
                    conf_thresh=system_dict["rcnn_conf_thresh"])

    output = []
    conf_scores = []
    for [cls, conf, x1, y1, x2, y2] in det:
        output.append(
            [system_dict["classes"][int(cls)], conf, [x1, y1, x2, y2]])
        conf_scores.append(conf)
        if cls > 0 and conf > system_dict["vis_thresh"]:
            print(system_dict["classes"][int(cls)], conf, [x1, y1, x2, y2])

    max_index = conf_scores.index(max(conf_scores))
    print(output[max_index])

    if system_dict["vis"]:
        vis_detection(im_orig,
                      det,
                      system_dict["classes"],
                      thresh=system_dict["vis_thresh"])

    save_detection(im_orig,
                   det,
                   system_dict["classes"],
                   thresh=system_dict["vis_thresh"])

    return output
Example #2
0
def demo_net(sym, class_names, args):
    # print config
    print('called with args\n{}'.format(pprint.pformat(vars(args))))

    # setup context
    if args.gpu:
        ctx = mx.gpu(int(args.gpu))
    else:
        ctx = mx.cpu(0)

    # load single test
    im_tensor, im_info, im_orig = load_test(args.image, short=args.img_short_side, max_size=args.img_long_side,
                                            mean=args.img_pixel_means, std=args.img_pixel_stds)

    # generate data batch
    data_batch = generate_batch(im_tensor, im_info)

    # load params
    arg_params, aux_params = load_param(args.params, ctx=ctx)

    # produce shape max possible
    data_names = ['data', 'im_info']
    label_names = None
    data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)), ('im_info', (1, 3))]
    label_shapes = None

    # check shapes
    check_shape(sym, data_shapes, arg_params, aux_params)

    # create and bind module
    mod = Module(sym, data_names, label_names, context=ctx)
    mod.bind(data_shapes, label_shapes, for_training=False)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # forward
    mod.forward(data_batch)
    rois, scores, bbox_deltas = mod.get_outputs()
    rois = rois[:, 1:]
    scores = scores[0]
    bbox_deltas = bbox_deltas[0]
    im_info = im_info[0]

    # decode detection
    det = im_detect(rois, scores, bbox_deltas, im_info,
                    bbox_stds=args.rcnn_bbox_stds, nms_thresh=args.rcnn_nms_thresh,
                    conf_thresh=args.rcnn_conf_thresh)

    # print out
    for [cls, conf, x1, y1, x2, y2] in det:
        if cls > 0 and conf > args.vis_thresh:
            print(class_names[int(cls)], conf, [x1, y1, x2, y2])

    # if vis
    if args.vis:
        vis_detection(im_orig, det, class_names, thresh=args.vis_thresh)
Example #3
0
def demo_net(sym, class_names, args):
    # print config
    print('called with args\n{}'.format(pprint.pformat(vars(args))))

    # setup context
    if args.gpu:
        ctx = mx.gpu(int(args.gpu))
    else:
        ctx = mx.cpu(0)

    # load single test
    im_tensor, im_info, im_orig = load_test(args.image,
                                            short=args.img_short_side,
                                            max_size=args.img_long_side,
                                            mean=args.img_pixel_means,
                                            std=args.img_pixel_stds)

    # generate data batch
    data_batch = generate_batch(im_tensor, im_info)

    # load params
    arg_params, aux_params = load_param(args.params, ctx=ctx)

    # produce shape max possible
    data_names = ['data', 'im_info']
    label_names = None
    data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)),
                   ('im_info', (1, 3))]
    label_shapes = None

    # check shapes
    check_shape(sym, data_shapes, arg_params, aux_params)

    # create and bind module
    mod = Module(sym, data_names, label_names, context=ctx)
    mod.bind(data_shapes, label_shapes, for_training=False)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # forward
    mod.forward(data_batch)
    rois, scores, bbox_deltas = mod.get_outputs()
    rois = rois[:, 1:]
    scores = scores[0]
    bbox_deltas = bbox_deltas[0]
    im_info = im_info[0]

    # decode detection
    det = im_detect(rois,
                    scores,
                    bbox_deltas,
                    im_info,
                    bbox_stds=args.rcnn_bbox_stds,
                    nms_thresh=args.rcnn_nms_thresh,
                    conf_thresh=args.rcnn_conf_thresh,
                    use_soft_nms=args.use_soft_nms,
                    soft_nms_thresh=args.soft_nms_thresh,
                    max_per_image=args.max_per_image)

    # print out
    for [cls, conf, x1, y1, x2, y2] in det:
        if cls > 0 and conf > args.vis_thresh:
            print(class_names[int(cls)], conf, [x1, y1, x2, y2])

    # if vis
    if args.vis:
        vis_detection(im_orig, det, class_names, thresh=args.vis_thresh)
Example #4
0
def Infer(img_name, mod):
    '''
    User function: Run inference on image and visualize it

    Args:
        img_name (str): Relative path to the image file
        mod (mxnet model): Mxnet model returned from load_model() function

    Returns:
        list: Contaning IDs, Scores and bounding box locations of predicted objects. 
    '''
    system_dict["image"] = img_name
    if system_dict["gpu"]:
        ctx = mx.gpu(int(system_dict["gpu"]))
    else:
        ctx = mx.cpu(0)

    # load single test
    im_tensor, im_info, im_orig = load_test(
        system_dict["image"],
        short=system_dict["img_short_side"],
        max_size=system_dict["img_long_side"],
        mean=system_dict["img_pixel_means"],
        std=system_dict["img_pixel_stds"])

    # generate data batch
    data_batch = generate_batch(im_tensor, im_info)

    # forward
    mod.forward(data_batch)
    rois, scores, bbox_deltas = mod.get_outputs()
    rois = rois[:, 1:]
    scores = scores[0]
    bbox_deltas = bbox_deltas[0]
    im_info = im_info[0]

    # decode detection
    det = im_detect(rois,
                    scores,
                    bbox_deltas,
                    im_info,
                    bbox_stds=system_dict["rcnn_bbox_stds"],
                    nms_thresh=system_dict["rcnn_nms_thresh"],
                    conf_thresh=system_dict["rcnn_conf_thresh"])

    output = []
    conf_scores = []
    for [cls, conf, x1, y1, x2, y2] in det:
        output.append(
            [system_dict["classes"][int(cls)], conf, [x1, y1, x2, y2]])
        conf_scores.append(conf)
        if cls > 0 and conf > system_dict["vis_thresh"]:
            print(system_dict["classes"][int(cls)], conf, [x1, y1, x2, y2])

    max_index = conf_scores.index(max(conf_scores))
    print(output[max_index])

    if system_dict["vis"]:
        vis_detection(im_orig,
                      det,
                      system_dict["classes"],
                      thresh=system_dict["vis_thresh"])

    save_detection(im_orig,
                   det,
                   system_dict["classes"],
                   thresh=system_dict["vis_thresh"])

    return output
Example #5
0
def demo_net(sym, class_names, args):
    # print config
    # setup context
    if args.gpu:
        ctx = mx.gpu(int(args.gpu))
    else:
        ctx = mx.cpu(0)

    print('called with args\n{}'.format(pprint.pformat(vars(args))))
    # load params
    arg_params, aux_params = load_param(args.params, ctx=ctx)

    # produce shape max possible
    data_names = ['data', 'im_info']
    label_names = None
    data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)),
                   ('im_info', (1, 3))]
    label_shapes = None

    # check shapes
    check_shape(sym, data_shapes, arg_params, aux_params)

    # create and bind module
    mod = Module(sym, data_names, label_names, context=ctx)
    mod.bind(data_shapes, label_shapes, for_training=False)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    f = open(
        "/home/skutukov/datasets/VOCdevkit/VOC2007/ImageSets/Main/test.txt",
        "r")
    for file in tqdm.tqdm(f.readlines()):
        path = os.path.join(args.image, str(file).strip() + '.jpg')
        path = '/home/skutukov/Pictures/demo.jpg'
        # load single test
        im_tensor, im_info, im_orig = load_test(path,
                                                short=args.img_short_side,
                                                max_size=args.img_long_side,
                                                mean=args.img_pixel_means,
                                                std=args.img_pixel_stds,
                                                ctx=ctx)

        # generate data batch
        data_batch = generate_batch(im_tensor, im_info)
        # forward
        mod.forward(data_batch)
        rois, scores, bbox_deltas = mod.get_outputs()
        rois = rois[:, 1:]
        scores = scores[0]
        bbox_deltas = bbox_deltas[0]
        im_info = im_info[0]

        # decode detection
        det = im_detect(rois,
                        scores,
                        bbox_deltas,
                        im_info,
                        bbox_stds=args.rcnn_bbox_stds,
                        nms_thresh=args.rcnn_nms_thresh,
                        conf_thresh=args.rcnn_conf_thresh)

        # print out
        for [cls, conf, x1, y1, x2, y2] in det:
            if cls > 0 and conf > args.vis_thresh:
                print(class_names[int(cls)], conf, [x1, y1, x2, y2])

        # if vis
        if args.vis:
            vis_detection(im_orig,
                          det,
                          class_names,
                          thresh=args.vis_thresh,
                          file=file)

        break
Example #6
0
def demo_net(sym, class_names, args):
    # print config
    print('called with args\n{}'.format(pprint.pformat(vars(args))))

    # setup context
    if args.gpu:
        ctx = mx.gpu(int(args.gpu))
    else:
        ctx = mx.cpu(0)

    # load single test
    im_tensor, im_info, im_orig = load_test(args.image,
                                            short=args.img_short_side,
                                            max_size=args.img_long_side,
                                            mean=args.img_pixel_means,
                                            std=args.img_pixel_stds)

    # generate data batch
    data_batch = generate_batch(im_tensor, im_info)

    # load params
    arg_params, aux_params = load_param(args.params, ctx=ctx)

    # produce shape max possible
    data_names = ['data', 'im_info']
    label_names = None
    data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)),
                   ('im_info', (1, 3))]
    label_shapes = None

    # check shapes
    check_shape(sym, data_shapes, arg_params, aux_params)

    # create and bind module
    mod = Module(sym, data_names, label_names, context=ctx)
    mod.bind(data_shapes, label_shapes, for_training=False)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # forward
    mod.forward(data_batch)
    rois, scores, bbox_deltas, mask_prob = mod.get_outputs()
    rois = rois[:, 1:]
    scores = scores[0]
    bbox_deltas = bbox_deltas[0]
    im_info = im_info[0]

    # decode detection
    det, masks = im_detect(rois,
                           scores,
                           bbox_deltas,
                           mask_prob,
                           im_info,
                           bbox_stds=args.rcnn_bbox_stds,
                           nms_thresh=args.rcnn_nms_thresh,
                           conf_thresh=args.rcnn_conf_thresh)

    im = cv2.imread(args.image)
    print(im.shape)
    print(im_info)
    # print out
    for index, [cls, conf, x1, y1, x2, y2] in enumerate(det):
        print(masks[index].max())
        if cls > 0 and conf > args.vis_thresh:
            print(class_names[int(cls)], conf, [x1, y1, x2, y2])
            print((int(x1), int(y1)), (int(x2), int(y2)))
            cv2.rectangle(im, (int(x1), int(y1)), (int(x2), int(y2)),
                          (255, 0, 0), 10)
            cv2.imwrite("mask{}.png".format(index),
                        np.uint8(masks[index] * 255))

    cv2.imwrite('demo.png', im)

    # if vis
    if args.vis:
        vis_detection(im_orig, det, class_names, thresh=args.vis_thresh)