예제 #1
0
                                             init_method='env://')
    w, h = cfg['dataset']['w'], cfg['dataset']['h']
    net = parsingNet(network=cfg['network'], datasets=cfg['dataset']).cuda()
    if distributed:
        net = torch.nn.parallel.DistributedDataParallel(
            net, device_ids=[args.local_rank])
    # try:
    #     from thop import profile
    #     macs, params = profile(net, inputs=(torch.zeros(1, 3, h, w).to(device)))
    #     ms = 'FLOPs:  %.2f GFLOPS, Params: %.2f M'%(params/ 1E9, params/ 1E6)
    # except:
    #     ms = 'Model profile error'
    # logger.log(ms)
    train_loader = get_train_loader(cfg['dataset'], args.local_rank)
    test_loader = get_test_loader(cfg['dataset'], args.local_rank)
    optimizer = get_optimizer(net, cfg['train'])

    if cfg['finetune'] is not None:
        state_all = torch.load(cfg['finetune'])['model']
        state_clip = {}  # only use backbone parameters
        for k, v in state_all.items():
            if 'model' in k:
                state_clip[k] = v
        net.load_state_dict(state_clip, strict=False)
    if cfg['resume'] is not None:
        logger.log('==> Resume model from ' + cfg['resume'])
        resume_dict = torch.load(cfg['resume'], map_location='cpu')
        net.load_state_dict(resume_dict['model'])
        if 'optimizer' in resume_dict.keys():
            optimizer.load_state_dict(resume_dict['optimizer'])
        resume_epoch = int(os.path.split(cfg['resume'])[1][2:5]) + 1
예제 #2
0
    ]

    train_loader, cls_num_per_lane = get_train_loader(
        cfg.batch_size, cfg.data_root, cfg.griding_num, cfg.dataset,
        cfg.use_aux, distributed, cfg.num_lanes)

    net = parsingNet(pretrained=True,
                     backbone=cfg.backbone,
                     cls_dim=(cfg.griding_num + 1, cls_num_per_lane,
                              cfg.num_lanes),
                     use_aux=cfg.use_aux).cuda()

    if distributed:
        net = torch.nn.parallel.DistributedDataParallel(
            net, device_ids=[args.local_rank])
    optimizer = get_optimizer(net, cfg)

    if cfg.finetune is not None:
        dist_print('finetune from ', cfg.finetune)
        state_all = torch.load(cfg.finetune)['model']
        state_clip = {}  # only use backbone parameters
        for k, v in state_all.items():
            if 'model' in k:
                state_clip[k] = v
        net.load_state_dict(state_clip, strict=False)
    if cfg.resume is not None:
        dist_print('==> Resume model from ' + cfg.resume)
        resume_dict = torch.load(cfg.resume, map_location='cpu')
        net.load_state_dict(resume_dict['model'])
        if 'optimizer' in resume_dict.keys():
            optimizer.load_state_dict(resume_dict['optimizer'])
예제 #3
0
 def configure_optimizers(self):
     optimizer, scheduler = get_optimizer(
         cfg=self.cfg.optimizer, model_params=self.net.parameters())
     return [optimizer], [scheduler]