Exemple #1
0
    def init_optimizer(self):
        def group_param_func(named_params):
            base = {
                'params': [(n, p) for n, p in named_params
                           if n.startswith("base_model")],
                "lr":
                config["lr"]
            }
            head = {
                'params': [(n, p) for n, p in named_params
                           if not n.startswith("base_model")],
                "lr":
                config["lr_head"]
            }
            return [head, base]

        self.optimizer = get_optimizer(self.model,
                                       self.config,
                                       group_param_func=group_param_func)
 def init_optimizer(self):
     self.optimizer = get_optimizer(self.model, self.config)
Exemple #3
0
# init model
encoder_sizes = [28 * 28, 1000, 500, 250, 30]
decoder_sizes = [30, 250, 500, 1000, 28 * 28]

net = deep_autoencoder(encoder_sizes=encoder_sizes,
                       decoder_sizes=decoder_sizes,
                       activation=act).to(args.device)

# init dataloader
trainloader, testloader = get_dataloader(dataset=args.dataset,
                                         train_batch_size=args.batch_size,
                                         test_batch_size=256)
# init optimizer
optim_name = args.optimizer.lower()
tag = optim_name
optimizer = get_optimizer(optim_name, net, args)

# init lr scheduler
lr_scheduler = get_lr_scheduler(optimizer, args)

# init criterion
criterion = torch.nn.BCEWithLogitsLoss()

# init summary writter
log_dir = get_log_dir(optim_name, args)
if not os.path.isdir(log_dir):
    os.makedirs(log_dir)
writer = SummaryWriter(log_dir)

# create output directory for visualized results
#visualization_dir = f"visuals/mnist/{optim_name}"