def __init__(self, opt):
        super(SRSolver, self).__init__(opt)
        self.train_opt = opt['solver']
        self.LR = self.Tensor()
        self.HR = self.Tensor()
        self.SR = None

        self.records = {'train_loss': [],
                        'val_loss': [],
                        'psnr': [],
                        'ssim': [],
                        'lr': []}

        self.model = create_model(opt)
        self.print_network()

        if self.is_train:
            self.model.train()

            # set cl_loss
            if self.use_cl:
                self.cl_weights = self.opt['solver']['cl_weights']
                assert self.cl_weights, "[Error] 'cl_weights' is not be declared when 'use_cl' is true"

            # set loss
            loss_type = self.train_opt['loss_type']
            if loss_type == 'l1':
                self.criterion_pix = nn.L1Loss()
            elif loss_type == 'l2':
                self.criterion_pix = nn.MSELoss()
            else:
                raise NotImplementedError('Loss type [%s] is not implemented!'%loss_type)

            if self.use_gpu:
                self.criterion_pix = self.criterion_pix.cuda()

            # set optimizer
            weight_decay = self.train_opt['weight_decay'] if self.train_opt['weight_decay'] else 0
            optim_type = self.train_opt['type'].upper()
            if optim_type == "ADAM":
                self.optimizer = optim.Adam(self.model.parameters(),
                                            lr=self.train_opt['learning_rate'], weight_decay=weight_decay)
            else:
                raise NotImplementedError('Loss type [%s] is not implemented!' % optim_type)

            # set lr_scheduler
            if self.train_opt['lr_scheme'].lower() == 'multisteplr':
                self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer,
                                                                self.train_opt['lr_steps'],
                                                                self.train_opt['lr_gamma'])
            else:
                raise NotImplementedError('Only MultiStepLR scheme is supported!')

        self.load()

        print('===> Solver Initialized : [%s] || Use CL : [%s] || Use GPU : [%s]'%(self.__class__.__name__,
                                                                                       self.use_cl, self.use_gpu))
        if self.is_train:
            print("optimizer: ", self.optimizer)
            print("lr_scheduler milestones: %s   gamma: %f"%(self.scheduler.milestones, self.scheduler.gamma))
Example #2
0
    def __init__(self, opt):
        super(SRSolver, self).__init__(opt)
        self.train_opt = opt['solver']
        self.LR = self.Tensor()
        self.HR = self.Tensor()
        self.SR = None

        self.records = {
            'client_idx': [],
            'client_loss': [],
            'agg_loss': [],
            'val_loss': [],
            'psnr': [],
            'ssim': []
        }

        self.model = create_model(opt)
        self.print_network()

        if self.is_train:
            self.model.train()

            # set cl_loss
            if self.use_cl:
                self.cl_weights = self.opt['solver']['cl_weights']
                assert self.cl_weights, "[Error] 'cl_weights' is not be declared when 'use_cl' is true"

            # set loss
            loss_type = self.train_opt['loss_type']
            if loss_type == 'l1':
                self.criterion_pix = nn.L1Loss()
            elif loss_type == 'l2':
                self.criterion_pix = nn.MSELoss()
            else:
                raise NotImplementedError(
                    'Loss type [%s] is not implemented!' % loss_type)

            if self.use_gpu:
                self.criterion_pix = self.criterion_pix.cuda()

            # set optimizer
            weight_decay = self.train_opt['weight_decay'] if self.train_opt[
                'weight_decay'] else 0
            optim_type = self.train_opt['type'].upper()
            if optim_type == "ADAM":
                self.optimizer = optim.Adam(self.model.parameters(),
                                            lr=self.train_opt['learning_rate'],
                                            betas=(0.9, 0.999),
                                            eps=1e-8,
                                            weight_decay=weight_decay)
#########################################################################
### add AdamW
            elif optim_type == 'ADAMW':
                self.optimizer = optim.AdamW(
                    self.model.parameters(),
                    lr=self.train_opt['learning_rate'],
                    weight_decay=0.01)
#########################################################################
### add SGD

            elif optim_type == 'SGD':
                self.optimizer = optim.SGD(self.model.parameters(),
                                           lr=self.train_opt['learning_rate'],
                                           weight_decay=1e-5)
#########################################################################
            else:
                raise NotImplementedError(
                    'Loss type [%s] is not implemented!' % optim_type)

            # set lr_scheduler
            if self.train_opt['lr_scheme'].lower() == 'multisteplr':
                self.scheduler = optim.lr_scheduler.MultiStepLR(
                    self.optimizer, self.train_opt['lr_steps'],
                    self.train_opt['lr_gamma'])
#########################################################################
### add cosine_lr
            elif self.train_opt['lr_scheme'].lower() == 'cosinelr':
                self.scheduler = CosineLRScheduler(
                    self.optimizer, self.train_opt['num_epochs'])
#########################################################################
            else:
                raise NotImplementedError(
                    'Only MultiStepLR scheme is supported!')
        if not self.is_train:
            self.load()

        print(
            '===> Solver Initialized : [%s] || Use CL : [%s] || Use GPU : [%s]'
            % (self.__class__.__name__, self.use_cl, self.use_gpu))
        if self.is_train:
            print("optimizer: ", self.optimizer)
Example #3
0
    def __init__(self, opt):
        super(SRSolver, self).__init__(opt)
        self.train_opt = opt['solver']
        self.LR = self.Tensor()
        self.HR = self.Tensor()
        # self.patch_size = opt['datasets']['train']['LR_size']
        self.SR = None

        self.records = {
            'train_loss': [],
            'val_loss': [],
            'psnr': [],
            'ssim': [],
            'lr': []
        }

        self.model = create_model(opt)
        self.print_network()

        if self.is_train:
            self.model.train()

            # set loss
            loss_type = self.train_opt['loss_type']
            if loss_type == 'l1':
                self.criterion_pix = nn.L1Loss()
            elif loss_type == 'l2':
                self.criterion_pix = nn.MSELoss()
            elif loss_type == 'myloss':
                import importlib
                network = importlib.import_module(
                    'networks.' + opt['networks']['which_model'])
                self.criterion_pix = network.myloss()
            else:
                raise NotImplementedError(
                    'Loss type [%s] is not implemented!' % loss_type)

            if self.use_gpu:
                self.criterion_pix = self.criterion_pix.cuda()

            # set optimizer
            weight_decay = self.train_opt['weight_decay'] if self.train_opt[
                'weight_decay'] else 0
            optim_type = self.train_opt['type'].upper()
            if optim_type == "ADAM":
                self.optimizer = optim.Adam(self.model.parameters(),
                                            lr=self.train_opt['learning_rate'],
                                            weight_decay=weight_decay)
            else:
                raise NotImplementedError(
                    'Loss type [%s] is not implemented!' % optim_type)

            # set lr_scheduler
            if self.train_opt['lr_scheme'].lower() == 'multisteplr':
                self.scheduler = optim.lr_scheduler.MultiStepLR(
                    self.optimizer, self.train_opt['lr_steps'],
                    self.train_opt['lr_gamma'])
            else:
                raise NotImplementedError(
                    'Only MultiStepLR scheme is supported!')

        self.load()
        # self.optimizer.param_groups[0]['lr'] = 0.00005
        # if optim_type == "ADAM":
        #     self.optimizer = optim.Adam(self.model.parameters(),
        #                                 lr=self.train_opt['learning_rate'], weight_decay=weight_decay)

        print(
            '===> Solver Initialized : [%s] || Use CL : [%s] || Use GPU : [%s]'
            % (self.__class__.__name__, self.use_cl, self.use_gpu))
        if self.is_train:
            print("optimizer: ", self.optimizer)
            print("lr_scheduler milestones: %s   gamma: %f" %
                  (self.scheduler.milestones, self.scheduler.gamma))
Example #4
0
    'seresnet34'          -> SE-ResNet-34            21,461,952        21,446,720                15,232
    'seresnet50'          -> SE-ResNet-50            26,087,360        26,041,920                45,440
    'seresnet101'         -> SE-ResNet-101           47,988,672        47,887,936               100,736
    'seresnet154'         -> SE-ResNet-154           64,884,672        64,740,928               143,744
    'seresnetsaul'        -> SE-ResNet-Saul          22,072,768        22,055,744                17,024
    'seinceptionv3'       -> SE-Inception-v3         23,480,365        23,445,933                34,432
    'seinceptionresnetv2' -> SE-Inception-ResNet-v2  64,094,445        64,033,901                60,544
    'seresnext'           -> SE-ResNeXt              97,869,632        97,869,632               100,480
    'mobilenet'           -> MobileNet                3,242,189         3,220,301                21,888
    'densenet121'         -> DenseNet-121             7,050,829         6,967,181                83,648
    'densenet169'         -> DenseNet-169            12,664,525        12,506,125               158,400
    'densenet201'         -> DenseNet-201            18,346,957        18,117,901               229,056
    other                 -> Custom model
    '''

    aux_model = networks.create_model(network=ARCHITECTURE, input_shape=input_shape)
    aux_model.layers.pop() # remove the last layer of the model

    weight_decay = 1e-4

    x = [None]*OUTPUTS

    if OUTPUTS == 1:
        x[0] = Dense(13, use_bias=False, kernel_regularizer=l2(weight_decay),
                     activation='softmax', name='categories')(aux_model.layers[-1].output)
    elif OUTPUTS == 5:
        x[0] = Dense(4, use_bias=False, kernel_regularizer=l2(weight_decay),
                     activation='softmax', name='flavour')(aux_model.layers[-1].output)
        x[1] = Dense(4, use_bias=False, kernel_regularizer=l2(weight_decay),
                     activation='softmax', name='protons')(aux_model.layers[-1].output)
        x[2] = Dense(4, use_bias=False, kernel_regularizer=l2(weight_decay),
Example #5
0
    def __init__(self, opt):
        super(SRLandmarkSolver, self).__init__(opt)
        self.train_opt = opt['solver']
        self.LR = self.Tensor()
        self.HR = self.Tensor()
        self.gt_heatmap = self.Tensor()
        self.SR = None
        self.hg_require_grad = True

        self.unnorm = Normalize(
            (-0.509 * opt['rgb_range'], -0.424 * opt['rgb_range'],
             -0.378 * opt['rgb_range']), (1.0, 1.0, 1.0))

        self.records = {
            'val_loss_pix': [],
            'val_loss_total': [],
            'psnr': [],
            'ssim': [],
            'lr': []
        }

        self.model = create_model(opt)

        if self.is_train:
            self.model.train()

            # set loss
            self.loss_dict = {}
            for k, v in self.train_opt['loss'].items():
                self.loss_dict[k] = {}
                loss_type = v['loss_type']
                if loss_type == 'l1':
                    self.loss_dict[k]['criterion'] = nn.L1Loss()
                elif loss_type == 'l2':
                    self.loss_dict[k]['criterion'] = nn.MSELoss()
                else:
                    raise NotImplementedError(
                        '%d loss type [%s] is not implemented!' %
                        (k, loss_type))
                self.loss_dict[k]['weight'] = v['weight']

            if self.use_gpu:
                for k in self.loss_dict.keys():
                    self.loss_dict[k]['criterion'] = self.loss_dict[k][
                        'criterion'].cuda()

            # set optimizer
            weight_decay = self.train_opt['weight_decay'] if self.train_opt[
                'weight_decay'] else 0
            optim_type = self.train_opt['type'].upper()
            if optim_type == "ADAM":
                self.optimizer = optim.Adam(self.model.parameters(),
                                            lr=self.train_opt['learning_rate'],
                                            weight_decay=weight_decay)
            else:
                raise NotImplementedError(
                    'Loss type [%s] is not implemented!' % optim_type)

            # set lr_scheduler
            if self.train_opt['lr_scheme'].lower() == 'multisteplr':
                self.scheduler = optim.lr_scheduler.MultiStepLR(
                    self.optimizer, self.train_opt['lr_steps'],
                    self.train_opt['lr_gamma'])
            else:
                raise NotImplementedError(
                    'Only MultiStepLR scheme is supported!')

        self.print_network()
        self.load()

        print('===> Solver Initialized : [%s] || Use GPU : [%s]' %
              (self.__class__.__name__, self.use_gpu))
        if self.is_train:
            print("optimizer: ", self.optimizer)
            print("lr_scheduler milestones: %s   gamma: %f" %
                  (self.scheduler.milestones, self.scheduler.gamma))
        test = self.transform(img)
        test_embedding = self.model(test)

        l2_dist = self.l2_dist(self.anchor_emdedding, test_embedding)
        print(l2_dist.detach().item())

        if l2_dist < 10:
            print("correct")
        else:
            print("wrong")
        print("=" * 55)


if __name__ == "__main__":
    cfg = get_configuration()
    model = create_model(cfg)
    valload = create_dataset(cfg, "train", transform=ToTensor())
    restore_path = "./checkpoints/facenet/checkpoint_1000.pth"
    # dataloader = create_dataset(cfg, 'test', transform=ToTensor())
    inference_engie = InferenceEngine(cfg, model, valload)
    inference_engie.setup(restore_path)
    text = "=" * 80
    print("\033[33m{}\033[0m".format(text))
    # test single image
    # img_path = input("Enter the picture you want to save:")
    # inference_engie.save_code_base(img_path)
    # while True:
    #     test_path = input("Enter the picture you want to test:")
    #     if test_path == "q":
    #         break
    #     inference_engie.calculate_similarity(test_path)
Example #7
0
    def __init__(self, opt):
        super(SRSolver, self).__init__(opt)
        self.train_opt = opt['solver']
        self.LR = self.Tensor()
        self.HR = self.Tensor()
        self.SR = None

        self.records = {'train_loss': [],
                        'val_loss': [],
                        'psnr': [],
                        'ssim': [],
                        'lr': []}

        self.which_model = opt['networks']['which_model'].upper()
        if self.which_model.find('WSR') >= 0:
            self.wavelet_num = int(np.log2(opt['scale']) + 1)
        self.model = create_model(opt)

        if self.is_train:
            self.model.train()

            if self.which_model.find('WSR') >= 0:
                self.xfm = DWTForward(J=self.wavelet_num - 1, wave='db1', mode='symmetric')
                if torch.cuda.is_available():
                    self.xfm = nn.DataParallel(self.xfm).cuda()
                self.xfm.eval()

                # set wl_loss
                self.wl_weights = self.opt['solver']['wl_weights']
                assert self.wl_weights, "[Error] 'wl_weights' is not be declared"
                assert len(
                    self.wl_weights) == self.wavelet_num, "[Error] The number of 'wl_weights' does not match the scale factor"

            # set cl_loss
            if self.use_cl:
                self.cl_weights = self.opt['solver']['cl_weights']
                assert self.cl_weights, "[Error] 'cl_weights' is not be declared when 'use_cl' is true"

            # set loss
            loss_type = self.train_opt['loss_type']
            if loss_type == 'l1':
                self.criterion_pix = nn.L1Loss()
            elif loss_type == 'l2':
                self.criterion_pix = nn.MSELoss()
            else:
                raise NotImplementedError('Loss type [%s] is not implemented!' % loss_type)

            if self.use_gpu:
                self.criterion_pix = self.criterion_pix.cuda()

            # set optimizer
            weight_decay = self.train_opt['weight_decay'] if self.train_opt['weight_decay'] else 0
            optim_type = self.train_opt['type'].upper()
            if optim_type == "ADAM":
                self.optimizer = optim.Adam(self.model.parameters(),
                                            lr=self.train_opt['learning_rate'], weight_decay=weight_decay)
            else:
                raise NotImplementedError('Loss type [%s] is not implemented!' % optim_type)

            # set lr_scheduler
            if self.train_opt['lr_scheme'].lower() == 'multisteplr':
                self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer,
                                                                self.train_opt['lr_steps'],
                                                                self.train_opt['lr_gamma'])
            else:
                raise NotImplementedError('Only MultiStepLR scheme is supported!')

        self.load()
        self.print_network()

        print('===> Solver Initialized : [%s] || Use CL : [%s] || Use GPU : [%s]' % (self.__class__.__name__,
                                                                                     self.use_cl, self.use_gpu))
        if self.is_train:
            print("optimizer: ", self.optimizer)
            print("lr_scheduler milestones: %s   gamma: %f" % (self.scheduler.milestones, self.scheduler.gamma))
    def __init__(self, opt):
        super(SRSolver, self).__init__(opt)
        self.train_opt = opt['solver']
        self.LR = self.Tensor()
        self.HR = self.Tensor()
        self.SR = None
        self.loss_func = myloss_func(opt)

        self.records = {'train_loss': [],
                        'val_loss': [],
                        'psnr': [],
                        'ssim': [],
                        'lr': []}

        self.model = create_model(opt).to(self.device)

        if self.is_train:
            self.model.train()
            self.discriminator = define_D(opt)
            self.discriminator.train()


            # set loss
            loss_type = self.train_opt['loss_type']
            if loss_type == 'l1':
                self.criterion_pix = nn.L1Loss()
            elif loss_type == 'l2':
                self.criterion_pix = nn.MSELoss()
            else:
                raise NotImplementedError('Loss type [%s] is not implemented!'%loss_type)

            if self.use_gpu:
                self.criterion_pix = self.criterion_pix.cuda()

            # set optimizer
            weight_decay = self.train_opt['weight_decay'] if self.train_opt['weight_decay'] else 0
            optim_type = self.train_opt['type'].upper()
            if optim_type == "ADAM":
                wd_D = self.train_opt['weight_decay_D'] if self.train_opt['weight_decay_D'] else 0
                self.optimizer_D = torch.optim.Adam(self.discriminator.parameters(), lr=self.train_opt['lr_D'], \
                    weight_decay=wd_D, betas=(self.train_opt['beta1_D'], 0.999))
                self.optimizer = optim.Adam(self.model.parameters(),
                                            lr=self.train_opt['learning_rate'], weight_decay=weight_decay)
            else:
                raise NotImplementedError('Loss type [%s] is not implemented!' % optim_type)
            

            # D_update_ratio and D_init_iters are for WGAN
            self.D_update_ratio = self.train_opt['D_update_ratio'] if self.train_opt['D_update_ratio'] else 1
            self.D_init_iters = self.train_opt['D_init_iters'] if self.train_opt['D_init_iters'] else 0

            # set lr_scheduler
            if self.train_opt['lr_scheme'].lower() == 'multisteplr':
                self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer,
                                                                self.train_opt['lr_steps'],
                                                                self.train_opt['lr_gamma'])
            else:
                raise NotImplementedError('Only MultiStepLR scheme is supported!')

        self.load()
        self.print_network()

        print('===> Solver Initialized : [%s] || Use CL : [%s] || Use GPU : [%s]'%(self.__class__.__name__,
                                                                                       self.use_cl, self.use_gpu))
        if self.is_train:
            print("optimizer: ", self.optimizer)
            print("lr_scheduler milestones: %s   gamma: %f"%(self.scheduler.milestones, self.scheduler.gamma))