Example #1
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)

    if torch.cuda.is_available():
        device = torch.cuda.current_device()
    else:
        device = 'cpu'

    model = build_detector(cfg.model)
    load_weights(model, args.checkpoint)
    model.to(device)
    model.forward = model.forward_impl

    shape = map(int, args.dummy_input_shape.split(','))
    dummy_input = torch.randn(1, *shape)

    if args.dynamic_shape:
        print(f'Convert to Onnx with dynamic input shape and '
              f'opset version {args.opset_version}')
    else:
        print(f'Convert to Onnx with constant input shape '
              f'{args.dummy_input_shape} and '
              f'opset version {args.opset_version}')
    torch2onnx(model,
               dummy_input,
               args.out,
               dynamic_shape=args.dynamic_shape,
               opset_version=args.opset_version,
               do_constant_folding=args.do_constant_folding,
               verbose=args.verbose)
    print(
        f'Convert successfully, saved onnx file: {os.path.abspath(args.out)}')
Example #2
0
def prepare(cfg):
    if torch.cuda.is_available():
        device = torch.cuda.current_device()
    else:
        device = 'cpu'
    engine = build_engine(cfg.infer_engine)
    engine.detector.to(device)
    load_weights(engine.detector, cfg.weights.filepath)

    data_pipeline = Compose(cfg.data_pipeline)
    return engine, data_pipeline, device
Example #3
0
File: test.py Project: zfxu/vedadet
def prepare(cfg, checkpoint):

    engine = build_engine(cfg.val_engine)
    load_weights(engine.model, checkpoint, map_location='cpu')

    device = torch.cuda.current_device()
    engine = MMDataParallel(engine.to(device),
                            device_ids=[torch.cuda.current_device()])

    dataset = build_dataset(cfg.data.val, dict(test_mode=True))
    dataloader = build_dataloader(dataset, 1, 1, dist=False, shuffle=False)

    return engine, dataloader
Example #4
0
    def load_weights(self,
                     filepath,
                     map_location='cpu',
                     strict=False,
                     prefix=None):
        if torch.cuda.is_available():
            device_id = torch.cuda.current_device()

            # map_location = lambda storage, loc: storage.cuda(device_id)
            def map_location(storage, loc):
                storage.cuda(device_id)

        self.logger.info('Loading weights from %s', filepath)
        # Wether to load train or val engine is OK
        # they share the same model
        for engine in self.engines.values():
            if is_module_wrapper(engine):
                engine = engine.module
            model = engine.model
            load_weights(model, filepath, map_location, strict, self.logger,
                         prefix)