Beispiel #1
0
def main():
    parser = argparse.ArgumentParser(
        description='Convert the parameters of the repvgg block '
        'from training mode to deployment mode.')
    parser.add_argument(
        'config_path',
        help='The path to the configuration file of the network '
        'containing the repvgg block.')
    parser.add_argument(
        'checkpoint_path',
        help='The path to the checkpoint file corresponding to the model.')
    parser.add_argument(
        'save_path',
        help='The path where the converted checkpoint file is stored.')
    args = parser.parse_args()

    save_path = Path(args.save_path)
    if save_path.suffix != '.pth':
        print('The path should contain the name of the pth format file.')
        exit()
    save_path.parent.mkdir(parents=True, exist_ok=True)

    model = init_model(args.config_path,
                       checkpoint=args.checkpoint_path,
                       device='cpu')
    assert isinstance(model, ImageClassifier), \
        '`model` must be a `mmcls.classifiers.ImageClassifier` instance.'

    convert_classifier_to_deploy(model, args.save_path)
Beispiel #2
0
def convert_repvggblock_param(config_path, checkpoint_path, save_path):
    model = init_model(config_path, checkpoint=checkpoint_path)
    print('Converting...')

    model.backbone.switch_to_deploy()
    torch.save(model.state_dict(), save_path)

    print('Done! Save at path "{}"'.format(save_path))
Beispiel #3
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)
    if args.cfg_options is not None:
        cfg.merge_from_dict(args.cfg_options)

    # build the model from a config file and a checkpoint file
    model = init_model(cfg, args.checkpoint, device=args.device)
    if args.preview_model:
        print(model)
        print('\n Please remove `--preview-model` to get the CAM.')
        return

    # apply transform and perpare data
    data, src_img = apply_transforms(args.img, cfg.data.test.pipeline)

    # build target layers
    if args.target_layers:
        target_layers = [
            get_layer(layer, model) for layer in args.target_layers
        ]
    else:
        target_layers = get_default_traget_layers(model, args)

    # init a cam grad calculator
    use_cuda = ('cuda' in args.device)
    reshape_transform = build_reshape_transform(model, args)
    cam = init_cam(args.method, model, target_layers, use_cuda,
                   reshape_transform)

    # warp the target_category with ClassifierOutputTarget in grad_cam>=1.3.7,
    # to fix the bug in #654.
    targets = None
    if args.target_category:
        grad_cam_v = pkg_resources.get_distribution('grad_cam').version
        if digit_version(grad_cam_v) >= digit_version('1.3.7'):
            from pytorch_grad_cam.utils.model_targets import \
                ClassifierOutputTarget
            targets = [ClassifierOutputTarget(c) for c in args.target_category]
        else:
            targets = args.target_category

    # calculate cam grads and show|save the visualization image
    grayscale_cam = cam(data['img'].unsqueeze(0),
                        targets,
                        eigen_smooth=args.eigen_smooth,
                        aug_smooth=args.aug_smooth)
    show_cam_grad(grayscale_cam,
                  src_img,
                  title=args.method,
                  out_path=args.save_path)
    def initialize(self, context):
        properties = context.system_properties
        self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.device = torch.device(self.map_location + ':' +
                                   str(properties.get('gpu_id')) if torch.cuda.
                                   is_available() else self.map_location)
        self.manifest = context.manifest

        model_dir = properties.get('model_dir')
        serialized_file = self.manifest['model']['serializedFile']
        checkpoint = os.path.join(model_dir, serialized_file)
        self.config_file = os.path.join(model_dir, 'config.py')

        self.model = init_model(self.config_file, checkpoint, self.device)
        self.initialized = True
Beispiel #5
0
def main(args):
    # Inference single image by native apis.
    model = init_model(args.config, args.checkpoint, device=args.device)
    model_result = inference_model(model, args.img)
    show_result_pyplot(model, args.img, model_result, title='pytorch_result')

    # Inference single image by torchserve engine.
    url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
    with open(args.img, 'rb') as image:
        response = requests.post(url, image)
    server_result = response.json()
    show_result_pyplot(model, args.img, server_result, title='server_result')

    assert np.allclose(model_result['pred_score'], server_result['pred_score'])
    print('Test complete, the results of PyTorch and TorchServe are the same.')
Beispiel #6
0
    def __init__(self, cls_c, cls_w, device):
        self.model_w = cls_w
        self.device = device
        self.cls_model = init_model(cls_c, cls_w, device=device)
        self.cls_model.export = True    # set export and return convolution result
        self.short_size = 256
        self.dst_w = 224
        self.dst_h = 224
        self.input_size = [self.dst_h, self.dst_w]

        self.mean = np.array([123.675, 116.28, 103.53])
        self.std = np.array([58.395, 57.12, 57.375])
        self.std_inv = 1/self.std

        self.cls_name = self.cls_model.CLASSES
        self.result = dict()
def main():
    parser = ArgumentParser()
    parser.add_argument('img', help='Image file')
    parser.add_argument('config', help='Config file')
    parser.add_argument('checkpoint', help='Checkpoint file')
    parser.add_argument('--device',
                        default='cuda:0',
                        help='Device used for inference')
    args = parser.parse_args()

    # build the model from a config file and a checkpoint file
    model = init_model(args.config, args.checkpoint, device=args.device)
    # test a single image
    result = inference_model(model, args.img)
    # print result on terminal
    print(result)
Beispiel #8
0
def inference(config_file, checkpoint, classes, args):
    cfg = Config.fromfile(config_file)

    model = init_model(cfg, checkpoint, device=args.device)
    model.CLASSES = classes

    # build the data pipeline
    if cfg.data.test.pipeline[0]['type'] != 'LoadImageFromFile':
        cfg.data.test.pipeline.insert(0, dict(type='LoadImageFromFile'))
    if cfg.data.test.type in ['CIFAR10', 'CIFAR100']:
        # The image shape of CIFAR is (32, 32, 3)
        cfg.data.test.pipeline.insert(1, dict(type='Resize', size=32))

    data = dict(img_info=dict(filename=args.img), img_prefix=None)

    test_pipeline = Compose(cfg.data.test.pipeline)
    data = test_pipeline(data)
    resolution = tuple(data['img'].shape[1:])
    data = collate([data], samples_per_gpu=1)
    if next(model.parameters()).is_cuda:
        # scatter to specified GPU
        data = scatter(data, [args.device])[0]

    # forward the model
    result = {'resolution': resolution}
    with torch.no_grad():
        if args.inference_time:
            time_record = []
            for _ in range(10):
                start = time()
                scores = model(return_loss=False, **data)
                time_record.append((time() - start) * 1000)
            result['time_mean'] = np.mean(time_record[1:-1])
            result['time_std'] = np.std(time_record[1:-1])
        else:
            scores = model(return_loss=False, **data)

        pred_score = np.max(scores, axis=1)[0]
        pred_label = np.argmax(scores, axis=1)[0]
        result['pred_label'] = pred_label
        result['pred_score'] = float(pred_score)
    result['pred_class'] = model.CLASSES[result['pred_label']]

    result['model'] = config_file.stem

    return result
def deploy_model():
    g.model = init_model(g.local_model_config_path,
                         g.local_weights_path,
                         device=g.device)
    g.model.CLASSES = sorted(g.gt_labels, key=g.gt_labels.get)
    sly.logger.info("🟩 Model has been successfully deployed")