Example #1
0
def main():
    args = parse_args()
    cfg = set_cfg_from_file(args.config)
    if not args.local_rank == -1:
        torch.cuda.set_device(args.local_rank)
        dist.init_process_group(backend='nccl',
                                init_method='tcp://127.0.0.1:{}'.format(
                                    args.port),
                                world_size=torch.cuda.device_count(),
                                rank=args.local_rank)
    if not osp.exists(cfg.respth): os.makedirs(cfg.respth)
    setup_logger('{}-eval'.format(cfg.model_type), cfg.respth)
    evaluate(cfg, args.weight_pth)
Example #2
0
torch.set_grad_enabled(False)

parse = argparse.ArgumentParser()
parse.add_argument(
    '--config',
    dest='config',
    type=str,
    default='configs/bisenetv2.py',
)
parse.add_argument('--weight-path',
                   dest='weight_pth',
                   type=str,
                   default='model_final.pth')
parse.add_argument('--outpath', dest='out_pth', type=str, default='model.pt')
args = parse.parse_args()

cfg = set_cfg_from_file(args.config)
if cfg.use_sync_bn: cfg.use_sync_bn = False

net = model_factory[cfg.model_type](cfg.n_cats, aux_mode='pred')
net.load_state_dict(torch.load(args.weight_pth, map_location='cpu'),
                    strict=False)
net.eval()

#  dummy_input = torch.randn(1, 3, *cfg.crop_size)
dummy_input = torch.randn(1, 3, 1024, 2048)
script_module = torch.jit.trace(net, dummy_input)
#  script_module.save(args.out_pth, _use_new_zipfile_serialization=False)
script_module.save(args.out_pth)