示例#1
0
def profile(opt, lr_size, test_speed=False):
    # basic configs
    scale = opt['scale']
    device = torch.device(opt['device'])
    msg = '\n'

    torch.backends.cudnn.benchmark = True
    # torch.backends.cudnn.deterministic = False

    # logging
    base_utils.print_options(opt['model']['generator'])

    lr_size_lst = tuple(map(int, lr_size.split('x')))
    hr_size = f'{lr_size_lst[0]}x{lr_size_lst[1]*scale}x{lr_size_lst[2]*scale}'
    msg += f'{"*"*40}\nResolution: {lr_size} -> {hr_size} ({scale}x SR)'

    # create model
    from models.networks import define_generator
    net_G = define_generator(opt).to(device)
    # base_utils.log_info(f'\n{net_G.__str__()}')

    # profile
    lr_size = tuple(map(int, lr_size.split('x')))
    gflops_dict, params_dict = net_G.profile(lr_size, device)

    gflops_all, params_all = 0, 0
    for module_name in gflops_dict.keys():
        gflops, params = gflops_dict[module_name], params_dict[module_name]
        msg += f'\n{"-"*40}\nModule: [{module_name}]'
        msg += f'\n    FLOPs (10^9): {gflops:.3f}'
        msg += f'\n    Parameters (10^6): {params/1e6:.3f}'
        gflops_all += gflops
        params_all += params
    msg += f'\n{"-"*40}\nOverall'
    msg += f'\n    FLOPs (10^9): {gflops_all:.3f}'
    msg += f'\n    Parameters (10^6): {params_all/1e6:.3f}\n{"*"*40}'

    # test running speed
    if test_speed:
        n_test, tot_time = 30, 0
        for i in range(n_test):
            dummy_input_list = net_G.generate_dummy_data(lr_size, device)

            start_time = time.time()
            # ---
            net_G.eval()
            with torch.no_grad():
                _ = net_G.step(*dummy_input_list)
            torch.cuda.synchronize()
            # ---
            end_time = time.time()
            tot_time += end_time - start_time
        msg += f'\nSpeed: {n_test/tot_time:.3f} FPS (averaged over {n_test} runs)\n{"*"*40}'

    base_utils.log_info(msg)
示例#2
0
def profile(opt, lr_size, test_speed=False):
    # logging
    logger = base_utils.get_logger('base')
    logger.info('{} Model Information {}'.format('='*20, '='*20))
    base_utils.print_options(opt['model']['generator'], logger)

    # basic configs
    scale = opt['scale']
    device = torch.device(opt['device'])

    # create model
    net_G = define_generator(opt).to(device)

    # get dummy input
    dummy_input_dict = net_G.generate_dummy_input(lr_size)
    for key in dummy_input_dict.keys():
        dummy_input_dict[key] = dummy_input_dict[key].to(device)

    # profile
    register(net_G, dummy_input_dict)
    gflops, params = profile_model(net_G)

    logger.info('-' * 40)
    logger.info('Super-resolute data from {}x{}x{} to {}x{}x{}'.format(
        *lr_size, lr_size[0], lr_size[1]*scale, lr_size[2]*scale))
    logger.info('Parameters (x10^6): {:.3f}'.format(params/1e6))
    logger.info('FLOPs (x10^9): {:.3f}'.format(gflops))
    logger.info('-' * 40)

    # test running speed
    if test_speed:
        n_test = 3
        tot_time = 0

        for i in range(n_test):
            start_time = time.time()
            with torch.no_grad():
                _ = net_G(**dummy_input_dict)
            end_time = time.time()
            tot_time += end_time - start_time

        logger.info('Speed (FPS): {:.3f} (averaged for {} runs)'.format(
            n_test / tot_time, n_test))
        logger.info('-' * 40)
示例#3
0
文件: torch2onnx.py 项目: Thmen/EGVSR
            # torch.backends.cudnn.deterministic = True
            # torch.backends.cudnn.benchmark = False
            torch.backends.cudnn.benchmark = True
            opt['device'] = 'cuda'
        else:
            opt['device'] = 'cpu'
    else:
        opt['device'] = 'cpu'

    # ----------------- test ----------------- #
    # basic configs
    scale = opt['scale']
    device = torch.device(opt['device'])

    # create model
    net_G = define_generator(opt).to(device)

    from models.networks.tecogan_nets import FNet, SRNet

    fnet = FNet(in_nc=opt['model']['generator']['in_nc']).to(device)
    srnet = SRNet(in_nc=opt['model']['generator']['in_nc'],
                  out_nc=3,
                  nf=64,
                  nb=10,
                  upsample_func=None,
                  scale=4).to(device)

    # get dummy input
    lr_size = tuple(map(int, args.lr_size.split('x')))
    dummy_input_dict = net_G.generate_dummy_input(lr_size)
    for key in dummy_input_dict.keys():
    def __init__(self, opt):
        super(TailorGAN, self).__init__()
        self.isTrain = opt.isTrain
        self.srcE = networks.define_srcEncoder(norm='instance')
        self.edgeE = networks.define_edgeEncoder(norm='instance')
        self.netG = networks.define_generator('instance', opt.n_blocks,
                                              opt.use_dropout)
        if self.isTrain:
            if opt.step == 'step2':
                self.srcE.load_state_dict(
                    torch.load('./checkpoints/Recon/collarRecon_srcE_%s.pth' %
                               opt.num_epoch,
                               map_location="cuda:%d" % opt.gpuid))
                self.edgeE.load_state_dict(
                    torch.load('./checkpoints/Recon/collarRecon_srcE_%s.pth' %
                               opt.num_epoch,
                               map_location="cuda:%d" % opt.gpuid))
                self.netG.load_state_dict(
                    torch.load('./checkpoints/Recon/collarRecon_srcE_%s.pth' %
                               opt.num_epoch,
                               map_location="cuda:%d" % opt.gpuid))
                self.netD = networks.define_discriminator(opt.num_collar,
                                                          input_nc=3,
                                                          ndf=32,
                                                          n_layers_D=3,
                                                          norm='instance',
                                                          num_D=1)
                self.optimizer_netD = torch.optim.Adam(self.netD.parameters(),
                                                       lr=opt.lr * 3,
                                                       betas=(opt.beta1,
                                                              0.999))

                print('Model load successful!')

            # self.optimizer_srcE = torch.optim.Adam(self.srcE.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            # self.optimizer_edgeE = torch.optim.Adam(self.edgeE.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            # self.optimizer_netG = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))

            params = []
            params += self.srcE.parameters()
            params += self.edgeE.parameters()
            params += self.netG.parameters()
            self.optimizer = torch.optim.Adam(params,
                                              lr=opt.lr,
                                              betas=(opt.beta1, 0.999))

            self.class_loss = nn.CrossEntropyLoss()
            # self.recon_loss = networks.vggloss(opt)
            self.recon_loss = nn.L1Loss()
            self.concept_loss = nn.L1Loss()
            self.VGGloss = networks.vggloss(opt)
            self.adv_loss = networks.GANLOSS()
        else:
            self.edgeE.load_state_dict(
                torch.load(
                    './checkpoints/FullModel/FullModel_collar_edgeE.pth',
                    map_location="cuda:%d" % opt.gpuid))
            self.srcE.load_state_dict(
                torch.load('./checkpoints/FullModel/FullModel_collar_srcE.pth',
                           map_location="cuda:%d" % opt.gpuid))
            self.netG.load_state_dict(
                torch.load('./checkpoints/FullModel/FullModel_collar_netG.pth',
                           map_location="cuda:%d" % opt.gpuid))
    def __init__(self, opt):
        super(SleeveGAN, self).__init__()
        self.isTrain = opt.isTrain
        self.srcE = networks.define_srcEncoder(norm='instance')
        self.edgeE = networks.define_edgeEncoder(norm='instance')
        self.netG = networks.define_generator('instance', opt.n_blocks,
                                              opt.use_dropout)
        if self.isTrain:
            if opt.step == 'step2':
                self.srcE.load_state_dict(
                    torch.load('./checkpoints/Recon/sleeveRecon_srcE_%s.pth' %
                               opt.num_epoch,
                               map_location="cuda:%d" % opt.gpuid))
                self.edgeE.load_state_dict(
                    torch.load('./checkpoints/Recon/sleeveRecon_srcE_%s.pth' %
                               opt.num_epoch,
                               map_location="cuda:%d" % opt.gpuid))
                self.netG.load_state_dict(
                    torch.load('./checkpoints/Recon/sleeveRecon_srcE_%s.pth' %
                               opt.num_epoch,
                               map_location="cuda:%d" % opt.gpuid))
                self.netD = networks.define_discriminator(opt.num_sleeve,
                                                          input_nc=3,
                                                          ndf=32,
                                                          n_layers_D=3,
                                                          norm='instance',
                                                          num_D=1)
                self.optimizer_netD = torch.optim.Adam(self.netD.parameters(),
                                                       lr=opt.lr,
                                                       betas=(opt.beta1,
                                                              0.999))

                print('Model load successful!')
            if opt.enable_classifier:
                if opt.type_classifier == 'collar':
                    self.classifier = networks.define_classifier(
                        opt.num_collar)
                else:
                    self.classifier = networks.define_classifier(
                        opt.num_sleeve)
                self.classifier.load_state_dict(
                    torch.load(
                        './checkpoints/classifier/path/classifier_%s_%s.pth' %
                        (opt.type_classifier, opt.num_epoch),
                        map_location="cuda:%d" % opt.gpuid))
                for param in self.classifier.parameters():
                    param.requires_grad = False
            self.optimizer_srcE = torch.optim.Adam(self.srcE.parameters(),
                                                   lr=opt.lr,
                                                   betas=(opt.beta1, 0.999))
            self.optimizer_edgeE = torch.optim.Adam(self.edgeE.parameters(),
                                                    lr=opt.lr,
                                                    betas=(opt.beta1, 0.999))
            self.optimizer_netG = torch.optim.Adam(self.netG.parameters(),
                                                   lr=opt.lr,
                                                   betas=(opt.beta1, 0.999))

            self.class_loss = nn.CrossEntropyLoss()
            self.recon_loss = nn.L1Loss(opt)
            self.concept_loss = nn.L1Loss()
            self.VGGloss = networks.vggloss(opt)
            self.adv_loss = networks.GANLOSS()
        else:
            self.edgeE.load_state_dict(
                torch.load(
                    './checkpoints/FullModel/FullModel_sleeve_edgeE.pth',
                    map_location="cuda:%d" % opt.gpuid))
            self.srcE.load_state_dict(
                torch.load('./checkpoints/FullModel/FullModel_sleeve_srcE.pth',
                           map_location="cuda:%d" % opt.gpuid))
            self.netG.load_state_dict(
                torch.load('./checkpoints/FullModel/FullModel_sleeve_netG.pth',
                           map_location="cuda:%d" % opt.gpuid))