def check_quantize(sym, data_shape, check_conv=True):
  fc = mx.sym.FullyConnected(data=sym, num_hidden=10, flatten=True, name='fc')
  sym = mx.sym.SoftmaxOutput(data=fc, name='softmax')
  sym_sg = sym.get_backend_symbol("MKLDNN")
  label_shape = (data_shape[0], 10)
  mod = Module(symbol=sym)
  mod.bind(for_training=False,
           data_shapes=[('data', data_shape)],
           label_shapes=[('softmax_label', label_shape)])
  mod.init_params(mx.init.Normal(0.5))
  arg_params, aux_params = mod.get_params()

  data = [mx.random.uniform(-1, 1, shape=shape, ctx=mx.current_context()) for _, shape in mod.data_shapes]
  batch = mx.io.DataBatch(data, [])

  mod.forward(batch, is_train=False)
  for output in mod.get_outputs():
      output.wait_to_read()
  ref_out = mod.get_outputs()

  excluded_sym_names = []
  if mx.current_context() == mx.cpu():
    excluded_sym_names += ['fc']

  calib_data = mx.nd.random.uniform(shape=data_shape)
  calib_data = NDArrayIter(data=calib_data)
  calib_data = DummyIter(calib_data)
  calib_layer = lambda name: name.endswith('_output')
  qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym_sg,
                                                                   arg_params=arg_params,
                                                                   aux_params=aux_params,
                                                                   ctx=mx.current_context(),
                                                                   excluded_sym_names=excluded_sym_names,
                                                                   quantized_dtype='uint8',
                                                                   calib_mode='naive',
                                                                   calib_data=calib_data,
                                                                   calib_layer=calib_layer,
                                                                   calib_quantize_op=True,
                                                                   num_calib_examples=5)
  qsym = qsym.get_backend_symbol("MKLDNN_POST_QUANTIZE")
  if check_conv:
    check_qsym_calibrated(qsym)
  quantized_out = check_qsym_forward(qsym, qarg_params, qaux_params, batch, data_shape, label_shape)
  for i in range(len(ref_out)):
    assert_almost_equal(ref_out[i].asnumpy(), quantized_out[i].asnumpy(), atol = 1)
  check_qsym_dummy_forward(qsym, batch, data_shape, label_shape)
Exemplo n.º 2
0
def demo_net(sym, class_names, args):
    # print config
    print('called with args\n{}'.format(pprint.pformat(vars(args))))

    # setup context
    if args.gpu:
        ctx = mx.gpu(int(args.gpu))
    else:
        ctx = mx.cpu(0)

    # load single test
    im_tensor, im_info, im_orig = load_test(args.image, short=args.img_short_side, max_size=args.img_long_side,
                                            mean=args.img_pixel_means, std=args.img_pixel_stds)

    # generate data batch
    data_batch = generate_batch(im_tensor, im_info)

    # load params
    arg_params, aux_params = load_param(args.params, ctx=ctx)

    # produce shape max possible
    data_names = ['data', 'im_info']
    label_names = None
    data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)), ('im_info', (1, 3))]
    label_shapes = None

    # check shapes
    check_shape(sym, data_shapes, arg_params, aux_params)

    # create and bind module
    mod = Module(sym, data_names, label_names, context=ctx)
    mod.bind(data_shapes, label_shapes, for_training=False)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # forward
    mod.forward(data_batch)
    rois, scores, bbox_deltas = mod.get_outputs()
    rois = rois[:, 1:]
    scores = scores[0]
    bbox_deltas = bbox_deltas[0]
    im_info = im_info[0]

    # decode detection
    det = im_detect(rois, scores, bbox_deltas, im_info,
                    bbox_stds=args.rcnn_bbox_stds, nms_thresh=args.rcnn_nms_thresh,
                    conf_thresh=args.rcnn_conf_thresh)

    # print out
    for [cls, conf, x1, y1, x2, y2] in det:
        if cls > 0 and conf > args.vis_thresh:
            print(class_names[int(cls)], conf, [x1, y1, x2, y2])

    # if vis
    if args.vis:
        vis_detection(im_orig, det, class_names, thresh=args.vis_thresh)
Exemplo n.º 3
0
def test_net(sym, imdb, args):
    # print config
    logger.info('called with args\n{}'.format(pprint.pformat(vars(args))))

    # setup context
    ctx = mx.gpu(args.gpu)

    # load testing data
    test_data = TestLoader(imdb.roidb, batch_size=1, short=args.img_short_side, max_size=args.img_long_side,
                           mean=args.img_pixel_means, std=args.img_pixel_stds)

    # load params
    arg_params, aux_params = load_param(args.params, ctx=ctx)

    # produce shape max possible
    data_names = ['data', 'im_info']
    label_names = None
    data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)), ('im_info', (1, 3))]
    label_shapes = None

    # check shapes
    check_shape(sym, data_shapes, arg_params, aux_params)

    # create and bind module
    mod = Module(sym, data_names, label_names, context=ctx)
    mod.bind(data_shapes, label_shapes, for_training=False)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # all detections are collected into:
    #    all_boxes[cls][image] = N x 5 array of detections in
    #    (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in range(imdb.num_images)]
                 for _ in range(imdb.num_classes)]

    # start detection
    with tqdm(total=imdb.num_images) as pbar:
        for i, data_batch in enumerate(test_data):
            # forward
            im_info = data_batch.data[1][0]
            mod.forward(data_batch)
            rois, scores, bbox_deltas = mod.get_outputs()
            rois = rois[:, 1:]
            scores = scores[0]
            bbox_deltas = bbox_deltas[0]

            det = im_detect(rois, scores, bbox_deltas, im_info,
                            bbox_stds=args.rcnn_bbox_stds, nms_thresh=args.rcnn_nms_thresh,
                            conf_thresh=args.rcnn_conf_thresh)
            for j in range(1, imdb.num_classes):
                indexes = np.where(det[:, 0] == j)[0]
                all_boxes[j][i] = np.concatenate((det[:, -4:], det[:, [1]]), axis=-1)[indexes, :]
            pbar.update(data_batch.data[0].shape[0])

    # evaluate model
    imdb.evaluate_detections(all_boxes)
Exemplo n.º 4
0
    def check_quantize_model(qdtype):
        def check_params(params, qparams, qsym=None):
            if qsym is None:
                assert len(params) == len(qparams)
                for k, v in params.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())
            else:
                qparams_ground_truth = mx.contrib.quant._quantize_params(qsym, params)
                assert len(qparams) == len(qparams_ground_truth)
                for k, v in qparams_ground_truth.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())

        def check_qsym_calibrated(qsym):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('requantize_') != -1:
                    assert 'min_calib_range' in v
                    assert 'max_calib_range' in v

        def check_qsym_qdtype(qsym, qdtype):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('_quantize') != -1:
                    assert 'out_type' in v
                    assert v['out_type'] == qdtype

        sym = get_fp32_sym()
        mod = Module(symbol=sym)
        batch_size = 4
        data_shape = (batch_size, 4, 10, 10)
        label_shape = (batch_size, 10)
        mod.bind(data_shapes=[('data', data_shape)], label_shapes=[('softmax_label', label_shape)])
        mod.init_params()
        arg_params, aux_params = mod.get_params()
        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym,
                                                                         arg_params=arg_params,
                                                                         aux_params=aux_params,
                                                                         ctx=mx.current_context(),
                                                                         quantized_dtype=qdtype,
                                                                         calib_mode='none')
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)

        calib_data = mx.nd.random.uniform(shape=data_shape)
        calib_data = NDArrayIter(data=calib_data)
        calib_data = DummyIter(calib_data)
        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym,
                                                                         arg_params=arg_params,
                                                                         aux_params=aux_params,
                                                                         ctx=mx.current_context(),
                                                                         quantized_dtype=qdtype,
                                                                         calib_mode='naive',
                                                                         calib_data=calib_data,
                                                                         num_calib_examples=20)
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)
        check_qsym_calibrated(qsym)
        check_qsym_qdtype(qsym, qdtype)
Exemplo n.º 5
0
def train_net(sym, roidb, args):
    # print config
    logger.info('called with args\n{}'.format(pprint.pformat(vars(args))))

    # setup multi-gpu
    ctx = [mx.gpu(int(i)) for i in args.gpus.split(',')]
    batch_size = args.rcnn_batch_size * len(ctx)

    # load training data
    feat_sym = sym.get_internals()['rpn_cls_score_output']
    ag = AnchorGenerator(feat_stride=args.rpn_feat_stride,
                         anchor_scales=args.rpn_anchor_scales, anchor_ratios=args.rpn_anchor_ratios)
    asp = AnchorSampler(allowed_border=args.rpn_allowed_border, batch_rois=args.rpn_batch_rois,
                        fg_fraction=args.rpn_fg_fraction, fg_overlap=args.rpn_fg_overlap,
                        bg_overlap=args.rpn_bg_overlap)
    train_data = AnchorLoader(roidb, batch_size, args.img_short_side, args.img_long_side,
                              args.img_pixel_means, args.img_pixel_stds, feat_sym, ag, asp, shuffle=True)

    # produce shape max possible
    _, out_shape, _ = feat_sym.infer_shape(data=(1, 3, args.img_long_side, args.img_long_side))
    feat_height, feat_width = out_shape[0][-2:]
    rpn_num_anchors = len(args.rpn_anchor_scales) * len(args.rpn_anchor_ratios)
    data_names = ['data', 'im_info', 'gt_boxes']
    label_names = ['label', 'bbox_target', 'bbox_weight']
    data_shapes = [('data', (batch_size, 3, args.img_long_side, args.img_long_side)),
                   ('im_info', (batch_size, 3)),
                   ('gt_boxes', (batch_size, 100, 5))]
    label_shapes = [('label', (batch_size, 1, rpn_num_anchors * feat_height, feat_width)),
                    ('bbox_target', (batch_size, 4 * rpn_num_anchors, feat_height, feat_width)),
                    ('bbox_weight', (batch_size, 4 * rpn_num_anchors, feat_height, feat_width))]

    # print shapes
    data_shape_dict, out_shape_dict = infer_data_shape(sym, data_shapes + label_shapes)
    logger.info('max input shape\n%s' % pprint.pformat(data_shape_dict))
    logger.info('max output shape\n%s' % pprint.pformat(out_shape_dict))

    # load and initialize params
    if args.resume:
        arg_params, aux_params = load_param(args.resume)
    else:
        arg_params, aux_params = load_param(args.pretrained)
        arg_params, aux_params = initialize_frcnn(sym, data_shapes, arg_params, aux_params)

    # check parameter shapes
    check_shape(sym, data_shapes + label_shapes, arg_params, aux_params)

    # check fixed params
    fixed_param_names = get_fixed_params(sym, args.net_fixed_params)
    logger.info('locking params\n%s' % pprint.pformat(fixed_param_names))

    # metric
    rpn_eval_metric = RPNAccMetric()
    rpn_cls_metric = RPNLogLossMetric()
    rpn_bbox_metric = RPNL1LossMetric()
    eval_metric = RCNNAccMetric()
    cls_metric = RCNNLogLossMetric()
    bbox_metric = RCNNL1LossMetric()
    eval_metrics = mx.metric.CompositeEvalMetric()
    for child_metric in [rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, eval_metric, cls_metric, bbox_metric]:
        eval_metrics.add(child_metric)

    # callback
    batch_end_callback = mx.callback.Speedometer(batch_size, frequent=args.log_interval, auto_reset=False)
    epoch_end_callback = mx.callback.do_checkpoint(args.save_prefix)

    # learning schedule
    base_lr = args.lr
    lr_factor = 0.1
    lr_epoch = [int(epoch) for epoch in args.lr_decay_epoch.split(',')]
    lr_epoch_diff = [epoch - args.start_epoch for epoch in lr_epoch if epoch > args.start_epoch]
    lr = base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))
    lr_iters = [int(epoch * len(roidb) / batch_size) for epoch in lr_epoch_diff]
    logger.info('lr %f lr_epoch_diff %s lr_iters %s' % (lr, lr_epoch_diff, lr_iters))
    lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(lr_iters, lr_factor)
    # optimizer
    optimizer_params = {'momentum': 0.9,
                        'wd': 0.0005,
                        'learning_rate': lr,
                        'lr_scheduler': lr_scheduler,
                        'rescale_grad': (1.0 / batch_size),
                        'clip_gradient': 5}

    # train
    mod = Module(sym, data_names=data_names, label_names=label_names,
                 logger=logger, context=ctx, work_load_list=None,
                 fixed_param_names=fixed_param_names)
    mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback,
            batch_end_callback=batch_end_callback, kvstore='device',
            optimizer='sgd', optimizer_params=optimizer_params,
            arg_params=arg_params, aux_params=aux_params, begin_epoch=args.start_epoch, num_epoch=args.epochs)
Exemplo n.º 6
0
def test_net(sym, imdb, args):
    # print config
    logger.info('called with args\n{}'.format(pprint.pformat(vars(args))))

    # setup context
    ctx = mx.gpu(args.gpu)

    # load testing data
    test_data = TestLoader(imdb.roidb,
                           batch_size=1,
                           short=args.img_short_side,
                           max_size=args.img_long_side,
                           mean=args.img_pixel_means,
                           std=args.img_pixel_stds)

    # load params
    arg_params, aux_params = load_param(args.params, ctx=ctx)

    # produce shape max possible
    data_names = ['data', 'im_info']
    label_names = None
    data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)),
                   ('im_info', (1, 3))]
    label_shapes = None

    # check shapes
    check_shape(sym, data_shapes, arg_params, aux_params)

    # create and bind module
    mod = Module(sym, data_names, label_names, context=ctx)
    mod.bind(data_shapes, label_shapes, for_training=False)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # all detections are collected into:
    #    all_boxes[cls][image] = N x 5 array of detections in
    #    (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in range(imdb.num_images)]
                 for _ in range(imdb.num_classes)]

    # start detection
    with tqdm(total=imdb.num_images) as pbar:
        for i, data_batch in enumerate(test_data):
            # forward
            im_info = data_batch.data[1][0]
            mod.forward(data_batch)
            rois, scores, bbox_deltas = mod.get_outputs()
            rois = rois[:, 1:]
            scores = scores[0]
            bbox_deltas = bbox_deltas[0]

            det = im_detect(rois,
                            scores,
                            bbox_deltas,
                            im_info,
                            bbox_stds=args.rcnn_bbox_stds,
                            nms_thresh=args.rcnn_nms_thresh,
                            conf_thresh=args.rcnn_conf_thresh,
                            use_soft_nms=args.use_soft_nms,
                            soft_nms_thresh=args.soft_nms_thresh,
                            max_per_image=args.max_per_image)
            for j in range(1, imdb.num_classes):
                indexes = np.where(det[:, 0] == j)[0]
                all_boxes[j][i] = np.concatenate((det[:, -4:], det[:, [1]]),
                                                 axis=-1)[indexes, :]
            pbar.update(data_batch.data[0].shape[0])

    # evaluate model
    imdb.evaluate_detections(all_boxes)
    def check_quantize_model(qdtype):
        def check_params(params, qparams, qsym=None):
            if qsym is None:
                assert len(params) == len(qparams)
                for k, v in params.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())
            else:
                qparams_ground_truth = mx.contrib.quant._quantize_params(
                    qsym, params)
                assert len(qparams) == len(qparams_ground_truth)
                for k, v in qparams_ground_truth.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())

        def check_qsym_calibrated(qsym):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('requantize_') != -1:
                    assert 'min_calib_range' in v
                    assert 'max_calib_range' in v

        def check_qsym_qdtype(qsym, qdtype):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('_quantize') != -1:
                    assert 'out_type' in v
                    assert v['out_type'] == qdtype

        sym = get_fp32_sym()
        mod = Module(symbol=sym)
        batch_size = 4
        data_shape = (batch_size, 4, 10, 10)
        label_shape = (batch_size, 10)
        mod.bind(data_shapes=[('data', data_shape)],
                 label_shapes=[('softmax_label', label_shape)])
        mod.init_params()
        arg_params, aux_params = mod.get_params()
        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
            sym=sym,
            arg_params=arg_params,
            aux_params=aux_params,
            ctx=mx.current_context(),
            quantized_dtype=qdtype,
            calib_mode='none')
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)

        calib_data = mx.nd.random.uniform(shape=data_shape)
        calib_data = NDArrayIter(data=calib_data)
        calib_data = DummyIter(calib_data)
        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
            sym=sym,
            arg_params=arg_params,
            aux_params=aux_params,
            ctx=mx.current_context(),
            quantized_dtype=qdtype,
            calib_mode='naive',
            calib_data=calib_data,
            num_calib_examples=20)
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)
        check_qsym_calibrated(qsym)
        check_qsym_qdtype(qsym, qdtype)
Exemplo n.º 8
0
def check_quantize(sym,
                   data_shape,
                   out_type,
                   name='conv',
                   check_calibration=True,
                   gluon_forward=False,
                   check_scale_align=False):
    if name in config:
        name = config[name][OP_NAME]
    sym_sg = sym.get_backend_symbol(QUANTIZE_SG_PASS_NAME)
    mod = Module(symbol=sym, label_names=None)
    mod.bind(for_training=False, data_shapes=[('data', data_shape)])
    mod.init_params(mx.init.Normal(0.5))
    arg_params, aux_params = mod.get_params()

    if out_type == 'uint8':
        data = [
            mx.random.uniform(0.0, 1.0, shape=shape, ctx=mx.current_context())
            for _, shape in mod.data_shapes
        ]
    else:
        data = [
            mx.random.uniform(-1.0, 1.0, shape=shape, ctx=mx.current_context())
            for _, shape in mod.data_shapes
        ]
    batch = mx.io.DataBatch(data, [])

    mod.forward(batch, is_train=False)
    for output in mod.get_outputs():
        output.wait_to_read()
    ref_out = mod.get_outputs()

    excluded_sym_names = []
    excluded_op_names = []
    if mx.current_context() == mx.cpu() and gluon_forward == True:
        excluded_op_names += ['_sg_mkldnn_fully_connected']

    calib_data = CalibIter(batch, data_shape, 1)

    qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
        sym=sym_sg,
        arg_params=arg_params,
        aux_params=aux_params,
        ctx=mx.current_context(),
        excluded_sym_names=excluded_sym_names,
        excluded_op_names=excluded_op_names,
        quantized_dtype=out_type,
        calib_mode='naive',
        calib_data=calib_data,
        label_names=None,
        num_calib_examples=1,
        quantize_mode='full')
    qsym = qsym.get_backend_symbol(QUANTIZE_SG_PASS_NAME)
    if check_calibration:
        check_qsym_calibrated(qsym, out_type, name=name)
    if check_scale_align:
        check_qsym_scale_align(qsym)
    if gluon_forward == True:
        check_qsym_gluon_forward(qsym, qarg_params, qaux_params, data_shape)
    else:
        quantized_out = check_qsym_forward(qsym, qarg_params, qaux_params,
                                           batch, data_shape)
        for i in range(len(ref_out)):
            min_range = mx.nd.min(ref_out[i]).asscalar()
            max_range = mx.nd.max(ref_out[i]).asscalar()
            atol = 0.1 * max(abs(min_range), abs(max_range))
            assert_almost_equal_with_err(quantized_out[i].asnumpy(),
                                         ref_out[i].asnumpy(),
                                         rtol=0.1,
                                         atol=atol,
                                         etol=0.2)
        check_qsym_dummy_forward(qsym, batch, data_shape)
Exemplo n.º 9
0
    def fit(self,
            train_data,
            eval_data=None,
            eval_metric='acc',
            validate_metric=None,
            work_load_list=None,
            epoch_end_callback=None,
            batch_end_callback=None,
            fixed_param_prefix=None,
            initializer=None,
            arg_params=None,
            aux_params=None,
            allow_missing=False,
            optimizer=None,
            optimizer_params=None,
            begin_epoch=0,
            num_epoch=None,
            kvstore='device'):

        self.module.bind(data_shapes=self.data_shapes,
                         label_shapes=self.label_shapes,
                         for_training=True)
        self.module.init_params(initializer=initializer,
                                arg_params=arg_params,
                                aux_params=aux_params,
                                allow_missing=allow_missing)
        self.module.init_optimizer(kvstore=kvstore,
                                   optimizer=optimizer,
                                   optimizer_params=optimizer_params)

        if validate_metric is None:
            validate_metric = eval_metric
        if not isinstance(eval_metric, metric.EvalMetric):
            eval_metric = metric.create(eval_metric)

        temp_count = 0

        # # test model size by saving params of model
        # arg_params, aux_params = self.module.get_params()
        # for callback in _as_list(epoch_end_callback):
        #     callback(0, self.symbol, arg_params, aux_params)
        # raise NotImplementedError

        # training loop
        for epoch in range(begin_epoch, num_epoch):

            train_time = AverageMeter()
            kvstore_sync_time = AverageMeter()
            get_data_time = AverageMeter()
            iter_total_time = AverageMeter()

            tic = time.time()
            eval_metric.reset()
            nbatch = 0
            data_iter = iter(train_data)
            end_of_batch = False
            next_data_batch = next(data_iter)
            while not end_of_batch:
                start_time = time.time()
                data_batch = next_data_batch

                self.module.forward(data_batch, is_train=True)
                self.module.backward()

                # ndarray.waitall()
                train_time.update(time.time() - start_time)

                self.module.update()

                # ndarray.waitall()
                kvstore_sync_time.update(time.time() - start_time)

                try:
                    next_data_batch = next(data_iter)
                except StopIteration:
                    end_of_batch = True

                # ndarray.waitall()
                get_data_time.update(time.time() - start_time)

                if isinstance(data_batch, list):
                    self.module.update_metric(eval_metric,
                                              [db.label for db in data_batch],
                                              pre_sliced=True)
                else:
                    self.module.update_metric(eval_metric, data_batch.label)

                # ndarray.waitall()
                iter_total_time.update(time.time() - start_time)

                if batch_end_callback is not None:
                    # batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch,
                    #                                  eval_metric=eval_metric,
                    #                                  locals=locals())

                    batch_end_params = BatchEndParam(
                        epoch=epoch,
                        nbatch=nbatch,
                        eval_metric=eval_metric,
                        locals=locals(),
                        rank=kvstore.rank,
                        total_iter=temp_count,
                        cur_data_time=get_data_time.val,
                        avg_data_time=get_data_time.avg,
                        cur_batch_time=train_time.val,
                        avg_batch_time=train_time.avg,
                        cur_kvstore_sync_time=kvstore_sync_time.val,
                        avg_kvstore_sync_time=kvstore_sync_time.avg,
                        cur_iter_total_time=iter_total_time.val,
                        avg_iter_total_time=iter_total_time.avg)
                    for callback in _as_list(batch_end_callback):
                        callback(batch_end_params)
                nbatch += 1
                temp_count += 1

            for name, val in eval_metric.get_name_value():
                self.logger.info('Epoch[%d] Train-%s=%f', epoch, name, val)
            toc = time.time()
            self.logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))

            arg_params, aux_params = self.module.get_params()
            self.module.set_params(arg_params, aux_params)

            if epoch_end_callback is not None and kvstore.rank == 0:
                for callback in _as_list(epoch_end_callback):
                    callback(epoch, self.symbol, arg_params, aux_params)
            if eval_data:
                if self.config.network == 'mobilenet_int8_foldbn':
                    # for fold bn to create inference symbol
                    total_params_path = "./model/%s-%04d.params" % (
                        self.config.model_prefix, epoch + 1)
                    # total_params_path = "./model/mobilenet_flodbn_0904/mobilenet_int8_flodbn_imagenet_retrain_80_pertensor-fold-0100.params"
                    # _, arg_params, aux_params = mx.model.load_checkpoint('./model/mobilenet_flodbn_0904/mobilenet_int8_flodbn_imagenet_retrain_80_pertensor-fold', 100)
                    import os
                    assert os.path.exists(
                        total_params_path
                    ), "please provide the correct total_params_path for foldbn eval"
                    eval_sym = eval(self.config.network)(
                        num_classes=self.config.num_classes,
                        quant_mod=self.config.quant_mod,
                        delay_quant=self.config.delay_quant,
                        is_weight_perchannel=self.config.is_weight_perchannel,
                        total_params_path=total_params_path,
                        quantize_flag=self.config.quantize_flag)
                    eval_module = Module(
                        symbol=eval_sym,
                        data_names=self.data_names,
                        label_names=self.label_names,
                        logger=self.logger,
                        context=self.context,
                        work_load_list=self.work_load_list,
                        fixed_param_names=self.fixed_param_names)
                    eval_module.bind(data_shapes=self.data_shapes,
                                     label_shapes=self.label_shapes,
                                     for_training=False)
                    eval_module.init_params(initializer=initializer,
                                            arg_params=arg_params,
                                            aux_params=aux_params)
                    res = eval_module.score(eval_data,
                                            validate_metric,
                                            score_end_callback=None,
                                            batch_end_callback=None,
                                            reset=True,
                                            epoch=epoch)
                else:
                    res = self.module.score(eval_data,
                                            validate_metric,
                                            score_end_callback=None,
                                            batch_end_callback=None,
                                            reset=True,
                                            epoch=epoch)
                for name, val in res:
                    self.logger.info('Epoch[%d] Validation-%s=%f', epoch, name,
                                     val)

            train_data.reset()
Exemplo n.º 10
0
    def check_quantize_model(qdtype):
        if is_test_for_native_cpu():
            print(
                'skipped testing test_quantize_model_with_forward for native cpu since it is not supported yet'
            )
            return
        elif qdtype == 'int8' and is_test_for_mkldnn():
            print(
                'skipped testing test_quantize_model_with_forward for mkldnn cpu int8 since it is not supported yet'
            )
            return
        elif qdtype == 'uint8' and is_test_for_gpu():
            print(
                'skipped testing test_quantize_model_with_forward for gpu uint8 since it is not supported yet'
            )
            return

        def check_params(params, qparams, qsym=None):
            if qsym is None:
                assert len(params) == len(qparams)
                for k, v in params.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())
            else:
                qparams_ground_truth = mx.contrib.quant._quantize_params(
                    qsym, params, th_dict={})
                assert len(qparams) == len(qparams_ground_truth)
                for k, v in qparams_ground_truth.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())

        def check_qsym_calibrated(qsym):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('requantize_') != -1:
                    assert 'min_calib_range' in v
                    assert 'max_calib_range' in v

        def check_qsym_qdtype(qsym, qdtype):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('_quantize') != -1:
                    assert 'out_type' in v
                    assert v['out_type'] == qdtype

        def check_qsym_forward(qsym, qarg_params, qaux_params, data_shape,
                               label_shape):
            mod = mx.mod.Module(symbol=qsym, context=mx.current_context())
            mod.bind(for_training=False,
                     data_shapes=[('data', data_shape)],
                     label_shapes=[('softmax_label', label_shape)])
            mod.set_params(qarg_params, qaux_params)
            data = [
                mx.random.uniform(-1.0, 1.0, shape=shape)
                for _, shape in mod.data_shapes
            ]
            batch = mx.io.DataBatch(data, [])
            mod.forward(batch, is_train=False)
            for output in mod.get_outputs():
                output.wait_to_read()

        sym = get_fp32_residual()
        batch_size = 4
        data_shape = (batch_size, 4, 10, 10)
        label_shape = (batch_size, 10)

        length = batch_size  # specify num of outputs from split op
        msym = get_fp32_sym_with_multiple_outputs(length)
        msym_label_shape = (length, 10)
        msym_data_shape = (length, 4, 4, 10, 10)

        for s, dshape, lshape in zip((sym, msym),
                                     (data_shape, msym_data_shape),
                                     (label_shape, msym_label_shape)):
            mod = Module(symbol=s)
            mod.bind(data_shapes=[('data', dshape)],
                     label_shapes=[('softmax_label', lshape)])

            mod.init_params()
            arg_params, aux_params = mod.get_params()
            excluded_names = []
            if mx.current_context() == mx.cpu():
                excluded_names += ['fc']
            excluded_names += ['concat']

            optional_names = ['pool0']
            for skip_optional_names in [False, True]:
                exclude_sym_names = []
                if skip_optional_names:
                    excluded_sym_names = excluded_names
                else:
                    excluded_sym_names = excluded_names + optional_names

                qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
                    sym=s,
                    arg_params=arg_params,
                    aux_params=aux_params,
                    excluded_sym_names=excluded_sym_names,
                    ctx=mx.current_context(),
                    quantized_dtype=qdtype,
                    calib_mode='none')
                check_params(arg_params, qarg_params, qsym)
                check_params(aux_params, qaux_params)
                check_qsym_forward(qsym, qarg_params, qaux_params, dshape,
                                   lshape)

                calib_data = mx.nd.random.uniform(shape=dshape)
                calib_data = NDArrayIter(data=calib_data,
                                         batch_size=batch_size)
                calib_data = DummyIter(calib_data)
                qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
                    sym=s,
                    arg_params=arg_params,
                    aux_params=aux_params,
                    excluded_sym_names=excluded_sym_names,
                    ctx=mx.current_context(),
                    quantized_dtype=qdtype,
                    calib_mode='naive',
                    calib_data=calib_data,
                    num_calib_examples=20)
                check_params(arg_params, qarg_params, qsym)
                check_params(aux_params, qaux_params)
                check_qsym_calibrated(qsym)
                check_qsym_qdtype(qsym, qdtype)
                check_qsym_forward(qsym, qarg_params, qaux_params, dshape,
                                   lshape)
Exemplo n.º 11
0
    def check_quantize_model(qdtype):
        if is_test_for_native_cpu():
            print(
                'skipped testing quantize_model for native cpu since it is not supported yet'
            )
            return
        elif qdtype == 'int8' and is_test_for_mkldnn():
            print(
                'skipped testing quantize_model for mkldnn cpu int8 since it is not supported yet'
            )
            return
        elif qdtype == 'uint8' and is_test_for_gpu():
            print(
                'skipped testing quantize_model for gpu uint8 since it is not supported yet'
            )
            return

        def check_params(params, qparams, qsym=None):
            if qsym is None:
                assert len(params) == len(qparams)
                for k, v in params.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())
            else:
                qparams_ground_truth = mx.contrib.quant._quantize_params(
                    qsym, params, th_dict={})
                assert len(qparams) == len(qparams_ground_truth)
                for k, v in qparams_ground_truth.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())

        def check_qsym_calibrated(qsym):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('requantize_') != -1:
                    assert 'min_calib_range' in v
                    assert 'max_calib_range' in v

        def check_qsym_qdtype(qsym, qdtype):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('_quantize') != -1:
                    assert 'out_type' in v
                    assert v['out_type'] == qdtype

        sym = get_fp32_sym()
        batch_size = 4
        label_shape = (batch_size, 10)
        data_shape = (batch_size, 4, 10, 10)

        length = batch_size  # specify num of outputs from split op
        msym = get_fp32_sym_with_multiple_outputs(length)
        msym_label_shape = (length, 10)
        msym_data_shape = (length, 4, 4, 10, 10)

        for s, dshape, lshape in zip((sym, msym),
                                     (data_shape, msym_data_shape),
                                     (label_shape, msym_label_shape)):
            mod = Module(symbol=s)
            mod.bind(data_shapes=[('data', dshape)],
                     label_shapes=[('softmax_label', lshape)])
            mod.init_params()
            arg_params, aux_params = mod.get_params()
            qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
                sym=s,
                arg_params=arg_params,
                aux_params=aux_params,
                ctx=mx.current_context(),
                quantized_dtype=qdtype,
                calib_mode='none')
            check_params(arg_params, qarg_params, qsym)
            check_params(aux_params, qaux_params)

            calib_data = mx.nd.random.uniform(shape=dshape)
            calib_data = NDArrayIter(data=calib_data, batch_size=batch_size)
            calib_data = DummyIter(calib_data)
            qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
                sym=s,
                arg_params=arg_params,
                aux_params=aux_params,
                ctx=mx.current_context(),
                quantized_dtype=qdtype,
                calib_mode='naive',
                calib_data=calib_data,
                num_calib_examples=20)
            check_params(arg_params, qarg_params, qsym)
            check_params(aux_params, qaux_params)
            check_qsym_calibrated(qsym)
            check_qsym_qdtype(qsym, qdtype)
Exemplo n.º 12
0
    def check_quantize_model(qdtype):
        if is_test_for_native_cpu():
            print(
                'skipped testing test_quantize_model_with_forward for native cpu since it is not supported yet'
            )
            return
        elif qdtype == 'int8' and is_test_for_mkldnn():
            print(
                'skipped testing test_quantize_model_with_forward for mkldnn cpu int8 since it is not supported yet'
            )
            return
        elif qdtype == 'uint8' and is_test_for_gpu():
            print(
                'skipped testing test_quantize_model_with_forward for gpu uint8 since it is not supported yet'
            )
            return

        def check_params(params, qparams, qsym=None):
            if qsym is None:
                assert len(params) == len(qparams)
                for k, v in params.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())
            else:
                qparams_ground_truth = mx.contrib.quant._quantize_params(
                    qsym, params, th_dict={})
                assert len(qparams) == len(qparams_ground_truth)
                for k, v in qparams_ground_truth.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())

        def check_qsym_calibrated(qsym):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('requantize_') != -1:
                    assert 'min_calib_range' in v
                    assert 'max_calib_range' in v

        def check_qsym_qdtype(qsym, qdtype):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('_quantize') != -1:
                    assert 'out_type' in v
                    assert v['out_type'] == qdtype

        def check_qsym_forward(qsym, qarg_params, qaux_params, data_shape):
            mod = mx.mod.Module(symbol=qsym,
                                label_names=None,
                                context=mx.current_context())
            mod.bind(for_training=False, data_shapes=[('data', data_shape)])
            mod.set_params(qarg_params, qaux_params)
            data = [
                mx.random.uniform(-1.0, 1.0, shape=shape)
                for _, shape in mod.data_shapes
            ]
            batch = mx.io.DataBatch(data, [])
            mod.forward(batch, is_train=False)
            for output in mod.get_outputs():
                output.wait_to_read()

        batch_size = 4
        dshape = (batch_size, 4, 10, 10)
        data = mx.sym.Variable('data')
        sym = mx.sym.Convolution(data,
                                 kernel=(1, 1),
                                 num_filter=16,
                                 name='conv0')

        mod = Module(symbol=sym, label_names=None)
        mod.bind(data_shapes=[('data', dshape)])

        mod.init_params()
        arg_params, aux_params = mod.get_params()
        excluded_sym_names = []

        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
            sym=sym,
            arg_params=arg_params,
            aux_params=aux_params,
            excluded_sym_names=excluded_sym_names,
            ctx=mx.current_context(),
            quantized_dtype=qdtype,
            calib_mode='none')
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)
        check_qsym_forward(qsym, qarg_params, qaux_params, dshape)

        calib_data = mx.nd.random.uniform(shape=dshape)
        calib_data = NDArrayIter(data=calib_data, batch_size=batch_size)
        calib_data = DummyIter(calib_data)
        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
            sym=sym,
            arg_params=arg_params,
            aux_params=aux_params,
            excluded_sym_names=excluded_sym_names,
            ctx=mx.current_context(),
            quantized_dtype=qdtype,
            calib_mode='naive',
            calib_data=calib_data,
            num_calib_examples=20)
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)
        check_qsym_calibrated(qsym)
        check_qsym_qdtype(qsym, qdtype)
        check_qsym_forward(qsym, qarg_params, qaux_params, dshape)
Exemplo n.º 13
0
def demo_net(sym, class_names, args, result_path):
    # print config
    print('called with args\n{}'.format(pprint.pformat(vars(args))))

    # setup context
    if args.gpu:
        ctx = mx.gpu(int(args.gpu))
    else:
        ctx = mx.cpu(0)

    # load single test
    im_tensor, im_info, im_orig = load_test(args.image,
                                            short=args.img_short_side,
                                            max_size=args.img_long_side,
                                            mean=args.img_pixel_means,
                                            std=args.img_pixel_stds)

    # generate data batch
    data_batch = generate_batch(im_tensor, im_info)

    # load params
    arg_params, aux_params = load_param(args.params, ctx=ctx)

    # produce shape max possible
    data_names = ['data', 'im_info']
    label_names = None
    data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)),
                   ('im_info', (1, 3))]
    label_shapes = None

    # check shapes
    check_shape(sym, data_shapes, arg_params, aux_params)

    # create and bind module
    mod = Module(sym, data_names, label_names, context=ctx)
    mod.bind(data_shapes, label_shapes, for_training=False)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # forward
    forward_starts = time.time()
    mod.forward(data_batch)
    rois, scores, bbox_deltas = mod.get_outputs()
    rois.wait_to_read()
    rois = rois[:, 1:]
    scores = scores[0]
    bbox_deltas = bbox_deltas[0]
    forward_costs = time.time() - forward_starts
    print("forward costs %.4f" % (forward_costs))

    im_info = im_info[0]
    # decode detection
    det = im_detect(rois,
                    scores,
                    bbox_deltas,
                    im_info,
                    bbox_stds=args.rcnn_bbox_stds,
                    nms_thresh=args.rcnn_nms_thresh,
                    conf_thresh=args.rcnn_conf_thresh)

    fieldnames = ['name', 'coordinate']
    if result_path.exists():
        csvfile = result_path.open("a")
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
    else:
        csvfile = result_path.open("w+")
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        writer.writeheader()

    img_name = Path(args.image).name
    bbox_str = ''
    for [cls, conf, x1, y1, x2, y2] in det:
        if cls > 0 and conf > args.vis_thresh:
            print(class_names[int(cls)], conf, [x1, y1, x2, y2])
            bbox_str += "%d_%d_%d_%d;" % (int(x1), int(y1), int(x2 - x1),
                                          int(y2 - y1))
    writer.writerow({'name': img_name, 'coordinate': bbox_str[:-1]})
    csvfile.close()
    print("detect image %s" % img_name)

    # if vis
    if args.vis:
        vis_detection(im_orig,
                      det,
                      class_names,
                      thresh=args.vis_thresh,
                      prefix=args.image)
Exemplo n.º 14
0
class FaceD(object):
    def __init__(self, config):
        # size = config.SCALE.lower()
        # if size == "small":
        #     scale = [576, 1024]
        # elif size == "middle":
        #     scale = [864, 1536]
        # elif size == "big":
        #     scale = [1152, 2048]    
        sym = mx.sym.load(config.SYMBOL_PATH)
        
        self.nms = py_nms_wrapper(0.3)
        self.scale = config.SCALE
        self.mod = Module(sym, ['data', 'im_info'], [], context=[mx.gpu(config.GPU_ID)])
        self.thresh = config.THRESH
        self.rebind = not config.FIXSIZE
        self.model_path = config.MODEL_PATH
        self.font = config.FONT_PATH
        self.preprocess = False

    def bbox_detect(self, im, im_scale, force_rebind=False):
    
        im_tensor = transform(im, [103.06, 115.9, 123.15])
        im_info = np.array([[im_tensor.shape[2], im_tensor.shape[3], im_scale]], dtype=np.float32)

        data = [mx.nd.array(im_tensor), mx.nd.array(im_info)]
        data_batch = mx.io.DataBatch(data=data, label=[], pad=0, index=0,
                                    provide_data=[[(k, v.shape) for k, v in zip(self.mod.data_names, data)]],
                                    provide_label=[None])

        if not self.mod.binded:
            arg_params, aux_params = load_param(self.model_path, 0, process=True)
            self.mod.bind([('data', (1L, 3L, im_tensor.shape[2], im_tensor.shape[3])), ('im_info', (1L, 3L))], None, 
                        for_training=False, inputs_need_grad=False, force_rebind=True,
                        shared_module=None)
            self.mod.init_params(arg_params=arg_params, aux_params=aux_params)
        
        if self.rebind or force_rebind:
            self.mod.bind([('data', (1L, 3L, im_tensor.shape[2], im_tensor.shape[3])), ('im_info', (1L, 3L))], None, 
                          for_training=False, inputs_need_grad=False, force_rebind=True,
                          shared_module=None)

        scale = data_batch.data[1].asnumpy()[0, 2]
        self.mod.forward(data_batch)
        output=dict(zip(self.mod.output_names, tuple(self.mod.get_outputs(merge_multi_context=False))))

        rois = output['rois_output'][0].asnumpy()[:, 1:]
        im_shape = data[0].shape

        scores = output['cls_prob_reshape_output'][0].asnumpy()[0]
        bbox_deltas = output['bbox_pred_reshape_output'][0].asnumpy()[0]

        pred_boxes = bbox_pred(rois, bbox_deltas)
        pred_boxes = clip_boxes(pred_boxes, im_shape[-2:])

        pred_boxes = pred_boxes / scale

        pred_boxes = pred_boxes.astype('f')
        scores = scores.astype('f')
        
        indexes = np.where(scores[:, 1] > self.thresh)[0]
        cls_scores = scores[indexes, 1, np.newaxis]
        cls_boxes = pred_boxes[indexes, 4:8]
        cls_dets = np.hstack((cls_boxes, cls_scores))
        keep = self.nms(cls_dets)
        return cls_dets[keep, :]

    def Detect(self, img):
        im, im_scale = resize(img, self.scale)
        dets = self.bbox_detect(im, im_scale)
        return dets
    
    def Detect_raw(self, img):
        im, im_scale = resize(img, [200, 400])
        dets = self.bbox_detect(im, im_scale, True)
        return dets

    def reset(self):
        self.mod.binded = False

    def vis_detections(self, img, dets, save='./tmp.jpg'):
        for bbox in dets:
            cv2.rectangle(img,(bbox[0], bbox[1]),(bbox[2],bbox[3]),(127, 255, 0), 4)
        cv2.imwrite(save, img)

    def vis_dets(self, img, dets, names, scores=None):
        img = img.copy()
        num = len(dets)
        for idx, bbox in enumerate(dets):
            cv2.rectangle(img,(int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (127, 255, 0), 4)
            cv2.rectangle(img,(int(bbox[0]-2), int(bbox[1]-25)),(int(bbox[0]+100), int(bbox[1])),(255, 0, 0), -1)
            if scores is not None:
                cv2.putText(img, '%.3f' % scores[idx], (int(bbox[0]-2), int(bbox[1]+20)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), thickness=1, lineType=8)
        font = ImageFont.truetype(self.font, 22)
        img_pil = Image.fromarray(img)
        draw = ImageDraw.Draw(img_pil)
        for idx, bbox in enumerate(dets):
            draw.text((int(bbox[0]), int(bbox[1]-22)),  names[idx].decode('utf8'), font = font, fill = (255 ,255 ,255 ,0))
        img = np.array(img_pil)
        #cv2.putText(img, 'person %d' % num, (100,200), cv2.FONT_HERSHEY_SIMPLEX, 7, (0, 0 ,255), thickness = 5, lineType = 8)
        return img
Exemplo n.º 15
0
def check_quantize(sym,
                   data_shape,
                   out_type,
                   name='conv',
                   check_calibration=True,
                   gluon_forward=False,
                   check_scale_align=False):
    sg_pass_name = config[name][SG_PASS_NAME]
    post_sg_pass_name = config[name][POST_SG_PASS_NAME]

    fc = mx.sym.FullyConnected(data=sym,
                               num_hidden=10,
                               flatten=True,
                               name='fc_softmax')
    if gluon_forward == True:
        sym = fc
        sym_sg = sym.get_backend_symbol(sg_pass_name)
        mod = Module(symbol=sym, label_names=[])
        mod.bind(for_training=False, data_shapes=[('data', data_shape)])
    else:
        sym = mx.sym.SoftmaxOutput(data=fc, name='softmax')
        sym_sg = sym.get_backend_symbol(sg_pass_name)
        label_shape = (data_shape[0], 10)
        mod = Module(symbol=sym)
        mod.bind(for_training=False,
                 data_shapes=[('data', data_shape)],
                 label_shapes=[('softmax_label', label_shape)])
    mod.init_params(mx.init.Normal(0.5))
    arg_params, aux_params = mod.get_params()

    data = [
        mx.random.uniform(-1, 1, shape=shape, ctx=mx.current_context())
        for _, shape in mod.data_shapes
    ]
    batch = mx.io.DataBatch(data, [])

    mod.forward(batch, is_train=False)
    for output in mod.get_outputs():
        output.wait_to_read()
    ref_out = mod.get_outputs()

    excluded_sym_names = []
    if mx.current_context() == mx.cpu() and gluon_forward == True:
        excluded_sym_names += ['sg_mkldnn_fully_connected_0']
        excluded_sym_names += ['fc_softmax']

    calib_data = mx.nd.random.uniform(shape=data_shape)
    calib_data = NDArrayIter(data=calib_data)
    calib_data = DummyIter(calib_data)
    calib_layer = lambda name: name.endswith('_output')
    qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
        sym=sym_sg,
        arg_params=arg_params,
        aux_params=aux_params,
        ctx=mx.current_context(),
        excluded_sym_names=excluded_sym_names,
        quantized_dtype=out_type,
        calib_mode='naive',
        calib_data=calib_data,
        calib_layer=calib_layer,
        num_calib_examples=5)
    qsym = qsym.get_backend_symbol(post_sg_pass_name)
    if check_calibration:
        check_qsym_calibrated(qsym, out_type, name=name)
    if check_scale_align:
        check_qsym_scale_align(qsym)
    if gluon_forward == True:
        check_qsym_gluon_forward(qsym, qarg_params, qaux_params, data_shape)
    else:
        check_qsym_dummy_forward(qsym, batch, data_shape, label_shape)
        quantized_out = check_qsym_forward(qsym, qarg_params, qaux_params,
                                           batch, data_shape, label_shape)
        for i in range(len(ref_out)):
            assert_almost_equal(ref_out[i].asnumpy(),
                                quantized_out[i].asnumpy(),
                                atol=1)
Exemplo n.º 16
0
    def check_quantize_model(qdtype):
        if is_test_for_native_cpu():
            print('skipped testing quantized_residual_unit for native cpu since it is not supported yet')
            return
        elif qdtype == 'int8' and is_test_for_mkldnn():
            print('skipped testing quantized_residual_unit for mkldnn cpu int8 since it is not supported yet')
            return
        elif qdtype == 'uint8' and is_test_for_gpu():
            print('skipped testing quantized_residual_unit for gpu uint8 since it is not supported yet')
            return

        def check_params(params, qparams, qsym=None):
            if qsym is None:
                assert len(params) == len(qparams)
                for k, v in params.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())
            else:
                qparams_ground_truth = mx.contrib.quant._quantize_params(qsym, params)
                assert len(qparams) == len(qparams_ground_truth)
                for k, v in qparams_ground_truth.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())

        def check_qsym_calibrated(qsym):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('requantize_') != -1:
                    assert 'min_calib_range' in v
                    assert 'max_calib_range' in v

        def check_qsym_qdtype(qsym, qdtype):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('_quantize') != -1:
                    assert 'out_type' in v
                    assert v['out_type'] == qdtype

        def check_qsym_forward(qsym, qarg_params, qaux_params, data_shape, label_shape):
            mod = mx.mod.Module(symbol=qsym, context=mx.current_context())
            mod.bind(for_training=False,
                     data_shapes=[('data', data_shape)],
                     label_shapes=[('softmax_label', label_shape)])
            mod.set_params(qarg_params, qaux_params)
            data = [mx.random.uniform(-1.0, 1.0, shape=shape) for _, shape in mod.data_shapes]
            batch = mx.io.DataBatch(data, [])
            mod.forward(batch, is_train=False)
            for output in mod.get_outputs():
                output.wait_to_read()
             

        sym = get_fp32_residual()
        mod = Module(symbol=sym)
        batch_size = 4
        data_shape = (batch_size, 4, 10, 10)
        label_shape = (batch_size, 10)
        mod.bind(data_shapes=[('data', data_shape)], label_shapes=[('softmax_label', label_shape)])
        mod.init_params()
        arg_params, aux_params = mod.get_params()
        excluded_sym_names = []
        if mx.current_context() == mx.cpu():
           excluded_sym_names += ['fc']
        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym,
                                                                         arg_params=arg_params,
                                                                         aux_params=aux_params,
                                                                         excluded_sym_names=excluded_sym_names,
                                                                         ctx=mx.current_context(),
                                                                         quantized_dtype=qdtype,
                                                                         calib_mode='none')
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)
        check_qsym_forward(qsym, qarg_params, qaux_params, data_shape, label_shape)

        calib_data = mx.nd.random.uniform(shape=data_shape)
        calib_data = NDArrayIter(data=calib_data)
        calib_data = DummyIter(calib_data)
        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym,
                                                                         arg_params=arg_params,
                                                                         aux_params=aux_params,
                                                                         excluded_sym_names=excluded_sym_names,
                                                                         ctx=mx.current_context(),
                                                                         quantized_dtype=qdtype,
                                                                         calib_mode='naive',
                                                                         calib_data=calib_data,
                                                                         num_calib_examples=20)
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)
        check_qsym_calibrated(qsym)
        check_qsym_qdtype(qsym, qdtype)
        check_qsym_forward(qsym, qarg_params, qaux_params, data_shape, label_shape)
Exemplo n.º 17
0
def train_net(args,
              ctx,
              pretrained,
              epoch,
              prefix,
              begin_epoch,
              end_epoch,
              lr=0.001,
              lr_step='5'):
    # setup config
    #init_config()
    #print(config)
    # setup multi-gpu

    input_batch_size = config.TRAIN.BATCH_IMAGES * len(ctx)

    # print config
    logger.info(pprint.pformat(config))

    # load dataset and prepare imdb for training
    image_sets = [iset for iset in args.image_set.split('+')]
    roidbs = [
        load_gt_roidb(args.dataset,
                      image_set,
                      args.root_path,
                      args.dataset_path,
                      flip=not args.no_flip) for image_set in image_sets
    ]
    #roidb = merge_roidb(roidbs)
    #roidb = filter_roidb(roidb)
    roidb = roidbs[0]

    # load symbol
    #sym = eval('get_' + args.network + '_train')(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS)
    #feat_sym = sym.get_internals()['rpn_cls_score_output']
    #train_data = AnchorLoader(feat_sym, roidb, batch_size=input_batch_size, shuffle=not args.no_shuffle,
    #                          ctx=ctx, work_load_list=args.work_load_list,
    #                          feat_stride=config.RPN_FEAT_STRIDE, anchor_scales=config.ANCHOR_SCALES,
    #                          anchor_ratios=config.ANCHOR_RATIOS, aspect_grouping=config.TRAIN.ASPECT_GROUPING)

    # load and initialize params
    #sym = get_mobilenet_v3_large(0.75)
    sym = mx.sym.load('./model/efficientnet-b0-symbol.json')
    #sym = get_symbol(1000,0.25)
    arg_params = {}
    aux_params = {}
    '''
    sym = None
   
    if len(pretrained)==0:
        arg_params = {}
        aux_params = {}
    else:
        logger.info('loading %s,%d'%(pretrained, epoch))
        sym, arg_params, aux_params = mx.model.load_checkpoint(pretrained, epoch)
        #arg_params, aux_params = load_param(pretrained, epoch, convert=True)
        #for k in ['rpn_conv_3x3', 'rpn_cls_score', 'rpn_bbox_pred', 'cls_score', 'bbox_pred']:
        #  _k = k+"_weight"
        #  if _k in arg_shape_dict:
        #    v = 0.001 if _k.startswith('bbox_') else 0.01
        #    arg_params[_k] = mx.random.normal(0, v, shape=arg_shape_dict[_k])
        #    print('init %s with normal %.5f'%(_k,v))
        #  _k = k+"_bias"
        #  if _k in arg_shape_dict:
        #    arg_params[_k] = mx.nd.zeros(shape=arg_shape_dict[_k])
        #    print('init %s with zero'%(_k))
    '''
    print('mobilev2: ', sym.get_internals())
    sym = eval('get_' + args.network + '_train')(sym)
    #print(sym.get_internals())
    feat_sym = []
    for stride in config.RPN_FEAT_STRIDE:
        feat_sym.append(
            sym.get_internals()['face_rpn_cls_score_stride%s_output' % stride])

    train_data = CropLoader(feat_sym,
                            roidb,
                            batch_size=input_batch_size,
                            shuffle=not args.no_shuffle,
                            ctx=ctx,
                            work_load_list=args.work_load_list)

    # infer max shape
    max_data_shape = [('data', (1, 3, max([v[1] for v in config.SCALES]),
                                max([v[1] for v in config.SCALES])))]
    #max_data_shape = [('data', (1, 3, max([v[1] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]
    max_data_shape, max_label_shape = train_data.infer_shape(max_data_shape)
    max_data_shape.append(('gt_boxes', (1, roidb[0]['max_num_boxes'], 5)))
    logger.info('providing maximum shape %s %s' %
                (max_data_shape, max_label_shape))

    # infer shape
    data_shape_dict = dict(train_data.provide_data + train_data.provide_label)
    arg_shape, out_shape, aux_shape = sym.infer_shape(**data_shape_dict)
    arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))
    out_shape_dict = dict(zip(sym.list_outputs(), out_shape))
    aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))
    logger.info('output shape %s' % pprint.pformat(out_shape_dict))

    for k, v in arg_shape_dict.items():
        if k.find('upsampling') >= 0:
            print('initializing upsampling_weight', k)
            arg_params[k] = mx.nd.zeros(shape=v)
            init = mx.init.Initializer()
            init._init_bilinear(k, arg_params[k])
            #print(args[k])

    # check parameter shapes
    #for k in sym.list_arguments():
    #    if k in data_shape_dict:
    #        continue
    #    assert k in arg_params, k + ' not initialized'
    #    assert arg_params[k].shape == arg_shape_dict[k], \
    #        'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape)
    #for k in sym.list_auxiliary_states():
    #    assert k in aux_params, k + ' not initialized'
    #    assert aux_params[k].shape == aux_shape_dict[k], \
    #        'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape)

    fixed_param_prefix = config.FIXED_PARAMS
    # create solver
    data_names = [k[0] for k in train_data.provide_data]
    label_names = [k[0] for k in train_data.provide_label]
    fixed_param_names = get_fixed_params(sym, fixed_param_prefix)
    print('fixed', fixed_param_names, file=sys.stderr)
    mod = Module(sym,
                 data_names=data_names,
                 label_names=label_names,
                 logger=logger,
                 context=ctx,
                 work_load_list=args.work_load_list,
                 fixed_param_names=fixed_param_names)

    # metric
    eval_metrics = mx.metric.CompositeEvalMetric()
    mid = 0
    for m in range(len(config.RPN_FEAT_STRIDE)):
        stride = config.RPN_FEAT_STRIDE[m]
        #mid = m*MSTEP
        _metric = metric.RPNAccMetric(pred_idx=mid,
                                      label_idx=mid + 1,
                                      name='RPNAcc_s%s' % stride)
        eval_metrics.add(_metric)
        mid += 2
        #_metric = metric.RPNLogLossMetric(pred_idx=mid, label_idx=mid+1)
        #eval_metrics.add(_metric)

        _metric = metric.RPNL1LossMetric(loss_idx=mid,
                                         weight_idx=mid + 1,
                                         name='RPNL1Loss_s%s' % stride)
        eval_metrics.add(_metric)
        mid += 2
        if config.FACE_LANDMARK:
            _metric = metric.RPNL1LossMetric(loss_idx=mid,
                                             weight_idx=mid + 1,
                                             name='RPNLandMarkL1Loss_s%s' %
                                             stride)
            eval_metrics.add(_metric)
            mid += 2
        if config.HEAD_BOX:
            _metric = metric.RPNAccMetric(pred_idx=mid,
                                          label_idx=mid + 1,
                                          name='RPNAcc_head_s%s' % stride)
            eval_metrics.add(_metric)
            mid += 2
            #_metric = metric.RPNLogLossMetric(pred_idx=mid, label_idx=mid+1)
            #eval_metrics.add(_metric)

            _metric = metric.RPNL1LossMetric(loss_idx=mid,
                                             weight_idx=mid + 1,
                                             name='RPNL1Loss_head_s%s' %
                                             stride)
            eval_metrics.add(_metric)
            mid += 2

    # callback
    #means = np.tile(np.array(config.TRAIN.BBOX_MEANS), config.NUM_CLASSES)
    #stds = np.tile(np.array(config.TRAIN.BBOX_STDS), config.NUM_CLASSES)
    #epoch_end_callback = callback.do_checkpoint(prefix, means, stds)
    epoch_end_callback = None
    # decide learning rate
    #base_lr = lr
    #lr_factor = 0.1
    #lr = base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))

    lr_epoch = [int(epoch) for epoch in lr_step.split(',')]
    lr_epoch_diff = [
        epoch - begin_epoch for epoch in lr_epoch if epoch > begin_epoch
    ]
    lr_iters = [
        int(epoch * len(roidb) / input_batch_size) for epoch in lr_epoch_diff
    ]

    lr_steps = []
    if len(lr_iters) == 5:
        factors = [0.5, 0.5, 0.4, 0.1, 0.1]
        for i in range(5):
            lr_steps.append((lr_iters[i], factors[i]))
    elif len(lr_iters) == 8:  #warmup
        for li in lr_iters[0:5]:
            lr_steps.append((li, 1.5849))
        for li in lr_iters[5:]:
            lr_steps.append((li, 0.1))
    else:
        for li in lr_iters:
            lr_steps.append((li, 0.1))
    #lr_steps = [ (20,0.1), (40, 0.1) ] #XXX

    #end_epoch = 10000
    curr_epoch = 0
    logger.info('lr %f lr_epoch_diff %s lr_steps %s' %
                (lr, lr_epoch_diff, lr_steps))
    # optimizer
    opt = optimizer.SGD(learning_rate=lr,
                        momentum=0.9,
                        wd=0.0005,
                        rescale_grad=1.0 / len(ctx),
                        clip_gradient=None)
    initializer = mx.init.Xavier()
    #initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="out", magnitude=2) #resnet style

    train_data = mx.io.PrefetchingIter(train_data)

    _cb = mx.callback.Speedometer(train_data.batch_size,
                                  frequent=args.frequent,
                                  auto_reset=False)
    global_step = [0]

    def save_model(epoch):
        arg, aux = mod.get_params()
        all_layers = mod.symbol.get_internals()
        outs = []
        for stride in config.RPN_FEAT_STRIDE:
            num_anchors = config.RPN_ANCHOR_CFG[str(stride)]['NUM_ANCHORS']
            _name = 'face_rpn_cls_score_stride%d_output' % stride
            rpn_cls_score = all_layers[_name]

            # prepare rpn data
            rpn_cls_score_reshape = mx.symbol.Reshape(
                data=rpn_cls_score,
                shape=(0, 2, -1, 0),
                name="face_rpn_cls_score_reshape_stride%d" % stride)

            rpn_cls_prob = mx.symbol.SoftmaxActivation(
                data=rpn_cls_score_reshape,
                mode="channel",
                name="face_rpn_cls_prob_stride%d" % stride)
            rpn_cls_prob_reshape = mx.symbol.Reshape(
                data=rpn_cls_prob,
                shape=(0, 2 * num_anchors, -1, 0),
                name='face_rpn_cls_prob_reshape_stride%d' % stride)
            _name = 'face_rpn_bbox_pred_stride%d_output' % stride
            rpn_bbox_pred = all_layers[_name]
            outs.append(rpn_cls_prob_reshape)
            outs.append(rpn_bbox_pred)
            if config.FACE_LANDMARK:
                _name = 'face_rpn_landmark_pred_stride%d_output' % stride
                rpn_landmark_pred = all_layers[_name]
                outs.append(rpn_landmark_pred)
        _sym = mx.sym.Group(outs)
        mx.model.save_checkpoint(prefix, epoch, _sym, arg, aux)

    def _batch_callback(param):
        #global global_step
        _cb(param)
        global_step[0] += 1
        mbatch = global_step[0]
        for step in lr_steps:
            if mbatch == step[0]:
                opt.lr *= step[1]
                print('lr change to',
                      opt.lr,
                      ' in batch',
                      mbatch,
                      file=sys.stderr)
                break

        if mbatch == lr_steps[-1][0]:
            print('saving final checkpoint', mbatch, file=sys.stderr)
            save_model(999)
            #arg, aux = mod.get_params()
            #mx.model.save_checkpoint(prefix, 99, mod.symbol, arg, aux)
            sys.exit(0)

    def _epoch_callback(epoch, symbol, arg_params, aux_params):
        save_model(epoch)

    # train
    mod.fit(train_data,
            eval_metric=eval_metrics,
            epoch_end_callback=_epoch_callback,
            batch_end_callback=_batch_callback,
            kvstore=args.kvstore,
            optimizer=opt,
            initializer=initializer,
            allow_missing=True,
            arg_params=arg_params,
            aux_params=aux_params,
            begin_epoch=begin_epoch,
            num_epoch=end_epoch)
Exemplo n.º 18
0
def train_net(sym, roidb, args, config):
    logger.addHandler(logging.FileHandler("{0}/{1}".format(args.save_prefix, 'train.log')))
    # print config
    logger.info('called with args\n{}'.format(pprint.pformat(vars(args))))
    # setup multi-gpu
    ctx = [mx.gpu(int(i)) for i in args.gpus.split(',')]
    batch_size = args.rcnn_batch_size * len(ctx)

    # config = Config('configs/vgg_step_{}.yml'.format(args.step))
    # load training data
    feat_sym = sym.get_internals()['rpn_cls_score_output']
    ag = AnchorGenerator(feat_stride=config.rpn['rpn_feat_stride'],
                         anchor_scales=config.rpn['rpn_anchor_scales'], anchor_ratios=config.rpn['rpn_anchor_ratios'])
    asp = AnchorSampler(allowed_border=args.rpn_allowed_border, batch_rois=args.rpn_batch_rois,
                        fg_fraction=config.rpn['rpn_fg_fraction'], fg_overlap=config.rpn['rpn_fg_overlap'],
                        bg_overlap=config.rpn['rpn_bg_overlap'])
    train_data = AnchorLoader(roidb, batch_size, args.img_short_side, args.img_long_side,
                              config.transform['img_pixel_means'],
                              config.transform['img_pixel_stds'], feat_sym, ag, asp, shuffle=True)

    # produce shape max possible
    _, out_shape, _ = feat_sym.infer_shape(data=(1, 3, args.img_long_side, args.img_long_side))
    feat_height, feat_width = out_shape[0][-2:]
    rpn_num_anchors = len(config.rpn['rpn_anchor_scales']) * len(config.rpn['rpn_anchor_ratios'])
    data_names = ['data', 'im_info', 'gt_boxes']
    label_names = ['label', 'bbox_target', 'bbox_weight']
    data_shapes = [('data', (batch_size, 3, args.img_long_side, args.img_long_side)),
                   ('im_info', (batch_size, 3)),
                   ('gt_boxes', (batch_size, 100, 5))]
    label_shapes = [('label', (batch_size, 1, rpn_num_anchors * feat_height, feat_width)),
                    ('bbox_target', (batch_size, 4 * rpn_num_anchors, feat_height, feat_width)),
                    ('bbox_weight', (batch_size, 4 * rpn_num_anchors, feat_height, feat_width))]

    # print shapes
    data_shape_dict, out_shape_dict = infer_data_shape(sym, data_shapes + label_shapes)
    logger.info('max input shape\n%s' % pprint.pformat(data_shape_dict))
    logger.info('max output shape\n%s' % pprint.pformat(out_shape_dict))

    # load and initialize params
    if args.resume:
        arg_params, aux_params = load_param(args.resume)
        # arg_params, aux_params = initialize_bias(sym, data_shapes, arg_params, aux_params)
    else:
        arg_params, aux_params = load_param(args.pretrained)
        arg_params, aux_params = initialize_frcnn(sym, data_shapes, arg_params, aux_params)
        arg_params, aux_params = initialize_bias(sym, data_shapes, arg_params, aux_params)
    # check parameter shapes
    check_shape(sym, data_shapes + label_shapes, arg_params, aux_params)

    # check fixed params
    fixed_param_names = get_fixed_params(sym, config.train_param['net_fixed_params'])
    logger.info('locking params\n%s' % pprint.pformat(fixed_param_names))

    # metric
    rpn_eval_metric = RPNAccMetric()
    rpn_cls_metric = RPNLogLossMetric()
    rpn_bbox_metric = RPNL1LossMetric()
    eval_metric = RCNNAccMetric()
    cls_metric = RCNNLogLossMetric()
    bbox_metric = RCNNL1LossMetric()
    eval_metrics = mx.metric.CompositeEvalMetric()
    for child_metric in [rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, eval_metric, cls_metric, bbox_metric]:
        eval_metrics.add(child_metric)

    # callback
    batch_end_callback = mx.callback.Speedometer(batch_size, frequent=args.log_interval, auto_reset=False)
    epoch_end_callback = mx.callback.do_checkpoint(args.save_prefix)

    # learning schedule
    base_lr = args.lr
    lr_factor = 0.1
    lr_epoch = [int(epoch) for epoch in args.lr_decay_epoch.split(',')]
    lr_epoch_diff = [epoch - args.start_epoch for epoch in lr_epoch if epoch > args.start_epoch]
    lr = base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))
    lr_iters = [int(epoch * len(roidb) / batch_size) for epoch in lr_epoch_diff]
    logger.info('lr %f lr_epoch_diff %s lr_iters %s' % (lr, lr_epoch_diff, lr_iters))
    lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(lr_iters, lr_factor)
    # optimizer
    optimizer_params = {'momentum': 0.9,
                        'wd': 0.0005,
                        'learning_rate': lr,
                        'lr_scheduler': lr_scheduler,
                        'rescale_grad': (1.0 / batch_size),
                        'clip_gradient': 5}

    # train
    mod = Module(sym, data_names=data_names, label_names=label_names,
                 logger=logger, context=ctx, work_load_list=None,
                 fixed_param_names=fixed_param_names)
    mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback,
            batch_end_callback=batch_end_callback, kvstore='device',
            optimizer='sgd', optimizer_params=optimizer_params,
            arg_params=arg_params, aux_params=aux_params, begin_epoch=args.start_epoch, num_epoch=args.epochs)
Exemplo n.º 19
0
def demo_net(sym, class_names, args):
    # print config
    print('called with args\n{}'.format(pprint.pformat(vars(args))))

    # setup context
    if args.gpu:
        ctx = mx.gpu(int(args.gpu))
    else:
        ctx = mx.cpu(0)

    # load single test
    im_tensor, im_info, im_orig = load_test(args.image,
                                            short=args.img_short_side,
                                            max_size=args.img_long_side,
                                            mean=args.img_pixel_means,
                                            std=args.img_pixel_stds)

    # generate data batch
    data_batch = generate_batch(im_tensor, im_info)

    # load params
    arg_params, aux_params = load_param(args.params, ctx=ctx)

    # produce shape max possible
    data_names = ['data', 'im_info']
    label_names = None
    data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)),
                   ('im_info', (1, 3))]
    label_shapes = None

    # check shapes
    check_shape(sym, data_shapes, arg_params, aux_params)

    # create and bind module
    mod = Module(sym, data_names, label_names, context=ctx)
    mod.bind(data_shapes, label_shapes, for_training=False)
    mod.init_params(arg_params=arg_params, aux_params=aux_params)

    # forward
    mod.forward(data_batch)
    rois, scores, bbox_deltas, mask_prob = mod.get_outputs()
    rois = rois[:, 1:]
    scores = scores[0]
    bbox_deltas = bbox_deltas[0]
    im_info = im_info[0]

    # decode detection
    det, masks = im_detect(rois,
                           scores,
                           bbox_deltas,
                           mask_prob,
                           im_info,
                           bbox_stds=args.rcnn_bbox_stds,
                           nms_thresh=args.rcnn_nms_thresh,
                           conf_thresh=args.rcnn_conf_thresh)

    im = cv2.imread(args.image)
    print(im.shape)
    print(im_info)
    # print out
    for index, [cls, conf, x1, y1, x2, y2] in enumerate(det):
        print(masks[index].max())
        if cls > 0 and conf > args.vis_thresh:
            print(class_names[int(cls)], conf, [x1, y1, x2, y2])
            print((int(x1), int(y1)), (int(x2), int(y2)))
            cv2.rectangle(im, (int(x1), int(y1)), (int(x2), int(y2)),
                          (255, 0, 0), 10)
            cv2.imwrite("mask{}.png".format(index),
                        np.uint8(masks[index] * 255))

    cv2.imwrite('demo.png', im)

    # if vis
    if args.vis:
        vis_detection(im_orig, det, class_names, thresh=args.vis_thresh)