예제 #1
0
def modelDeploy(args, model, optimizer, scheduler, logger):
    if args.num_gpus >= 1:
        from torch.nn.parallel import DataParallel
        model = DataParallel(model)
        model = model.cuda()

    if torch.backends.cudnn.is_available():
        import torch.backends.cudnn as cudnn
        cudnn.benchmark = True
        cudnn.deterministic = True

    trainData = {'epoch': 0, 'loss': [], 'miou': [], 'val': [], 'bestMiou': 0}

    if args.resume:
        if os.path.isfile(args.resume):
            logger.info("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume,
                                    map_location=torch.device('cpu'))

            # model&optimizer
            model.load_state_dict(checkpoint['model'])
            optimizer.load_state_dict(checkpoint['optimizer'])

            # stop point
            trainData = checkpoint['trainData']
            for i in range(trainData['epoch']):
                scheduler.step()
            # print(trainData)

            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, trainData['epoch']))

        else:
            logger.error("=> no checkpoint found at '{}'".format(args.resume))
            assert False, "=> no checkpoint found at '{}'".format(args.resume)

    if args.finetune:
        if os.path.isfile(args.finetune):
            logger.info("=> finetuning checkpoint '{}'".format(args.finetune))
            state_all = torch.load(args.finetune, map_location='cpu')['model']
            state_clip = {}  # only use backbone parameters
            # print(model.state_dict().keys())
            for k, v in state_all.items():
                state_clip[k] = v
            # print(state_clip.keys())
            model.load_state_dict(state_clip, strict=False)
        else:
            logger.warning("finetune is not a file.")
            pass

    if args.freeze_bn:
        logger.warning('Freezing batch normalization layers')
        for m in model.modules():
            if isinstance(m, nn.BatchNorm2d):
                m.eval()
                m.weight.requires_grad = False
                m.bias.requires_grad = False

    return model, trainData