def setup(self, config): model = model_creator(config) optimizer = optimizer_creator(model, config) train_loader, val_loader = data_creator(config) self.model, self.optimizer, = \ self.register( models=model, optimizers=optimizer, ddp_args={ "find_unused_parameters": True}) assert self.model.find_unused_parameters
def setup(self, config): model = nn.Sequential(nn.Linear(1, config.get("hidden_size", 1))) optimizer = IterableOptimizer(model.parameters(), lr=config.get("lr", 1e-2)) criterion = nn.MSELoss() self.model, self.optimizer, self.criterion = self.register( models=model, optimizers=optimizer, criterion=criterion) train_ld, val_ld = data_creator(config) self.register_data(train_loader=train_ld, validation_loader=val_ld)
def setup(self, config): model = model_creator(config) optimizer = optimizer_creator(model, config) train_loader, val_loader = data_creator(config) scheduler = scheduler_creator(optimizer, config) loss = nn.MSELoss() self.model, self.optimizer, self.criterion, self.scheduler = self.register( models=model, optimizers=optimizer, criterion=loss, schedulers=scheduler ) self.register_data(train_loader=train_loader, validation_loader=val_loader)
def setup(self, config): models = nn.Linear(1, 1), nn.Linear(1, 1) opts = [ torch.optim.SGD(model.parameters(), lr=0.0001) for model in models ] loss = nn.MSELoss() train_dataloader, val_dataloader = data_creator(config) self.models, self.optimizers, self.criterion = self.register( models=models, optimizers=opts, criterion=loss) self.register_data(train_loader=train_dataloader, validation_loader=val_dataloader)
def setup(self, stage): self.train_loader, self.val_loader = data_creator(self.config) self.loss = nn.MSELoss()