def test_larc_mixed_precision(self): for opt_level in ["O0", "O1", "O2", "O3"]: model = MyModel(1) optimizer = LARC( torch.optim.SGD( [{"params": model.parameters(), "lr": 0.25}], momentum=0.125 ) ) model, optimizer = amp.initialize( model, optimizer, opt_level=opt_level, verbosity=0 ) optimizer.zero_grad() loss = model(self.x) with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() optimizer.step()
def learning( cfg: OmegaConf, training_data_loader: torch.utils.data.DataLoader, validation_data_loader: torch.utils.data.DataLoader, model: SupervisedModel, ) -> None: """ Learning function including evaluation :param cfg: Hydra's config instance :param training_data_loader: Training data loader :param validation_data_loader: Validation data loader :param model: Model :return: None """ local_rank = cfg["distributed"]["local_rank"] num_gpus = cfg["distributed"]["world_size"] epochs = cfg["parameter"]["epochs"] num_training_samples = len(training_data_loader.dataset.data) steps_per_epoch = int( num_training_samples / (cfg["experiment"]["batches"] * num_gpus)) # because the drop=True total_steps = cfg["parameter"]["epochs"] * steps_per_epoch warmup_steps = cfg["parameter"]["warmup_epochs"] * steps_per_epoch current_step = 0 best_metric = np.finfo(np.float64).max optimizer = torch.optim.SGD(params=model.parameters(), lr=calculate_initial_lr(cfg), momentum=cfg["parameter"]["momentum"], nesterov=False, weight_decay=cfg["experiment"]["decay"]) # https://github.com/google-research/simclr/blob/master/lars_optimizer.py#L26 optimizer = LARC(optimizer=optimizer, trust_coefficient=0.001, clip=False) cos_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer.optim, T_max=total_steps - warmup_steps, ) for epoch in range(1, epochs + 1): # training model.train() training_data_loader.sampler.set_epoch(epoch) for data, targets in training_data_loader: # adjust learning rate by applying linear warming if current_step <= warmup_steps: lr = calculate_lr(cfg, warmup_steps, current_step) for param_group in optimizer.param_groups: param_group["lr"] = lr optimizer.zero_grad() data, targets = data.to(local_rank), targets.to(local_rank) unnormalized_features = model(data) loss = torch.nn.functional.cross_entropy(unnormalized_features, targets) loss.backward() optimizer.step() # adjust learning rate by applying cosine annealing if current_step > warmup_steps: cos_lr_scheduler.step() current_step += 1 if local_rank == 0: logger_line = "Epoch:{}/{} progress:{:.3f} loss:{:.3f}, lr:{:.7f}".format( epoch, epochs, epoch / epochs, loss.item(), optimizer.param_groups[0]["lr"]) # During warmup phase, we skip validation sum_val_loss, num_val_corrects = validation(validation_data_loader, model, local_rank) torch.distributed.barrier() torch.distributed.reduce(sum_val_loss, dst=0) torch.distributed.reduce(num_val_corrects, dst=0) num_val_samples = len(validation_data_loader.dataset) # logging and save checkpoint if local_rank == 0: validation_loss = sum_val_loss.item() / num_val_samples validation_acc = num_val_corrects.item() / num_val_samples logging.info(logger_line + " val loss:{:.3f}, val acc:{:.2f}%".format( validation_loss, validation_acc * 100.)) if cfg["parameter"]["metric"] == "loss": metric = validation_loss else: metric = 1. - validation_acc if metric <= best_metric: if "save_fname" in locals(): if os.path.exists(save_fname): os.remove(save_fname) save_fname = "epoch={}-{}".format( epoch, cfg["experiment"]["output_model_name"]) torch.save(model.state_dict(), save_fname)
def train( cfg: OmegaConf, training_data_loader: torch.utils.data.DataLoader, model: ContrastiveModel, ) -> None: """ Training function :param cfg: Hydra's config instance :param training_data_loader: Training data loader for contrastive learning :param model: Contrastive model based on resnet :return: None """ local_rank = cfg["distributed"]["local_rank"] num_gpus = cfg["distributed"]["world_size"] epochs = cfg["parameter"]["epochs"] num_training_samples = len(training_data_loader.dataset.data) steps_per_epoch = int( num_training_samples / (cfg["experiment"]["batches"] * num_gpus)) # because the drop=True total_steps = cfg["parameter"]["epochs"] * steps_per_epoch warmup_steps = cfg["parameter"]["warmup_epochs"] * steps_per_epoch current_step = 0 model.train() nt_cross_entropy_loss = NT_Xent( temperature=cfg["parameter"]["temperature"], device=local_rank) optimizer = torch.optim.SGD(params=exclude_from_wt_decay( model.named_parameters(), weight_decay=cfg["experiment"]["decay"]), lr=calculate_initial_lr(cfg), momentum=cfg["parameter"]["momentum"], nesterov=False, weight_decay=0.) # https://github.com/google-research/simclr/blob/master/lars_optimizer.py#L26 optimizer = LARC(optimizer=optimizer, trust_coefficient=0.001, clip=False) cos_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer.optim, T_max=total_steps - warmup_steps, ) for epoch in range(1, epochs + 1): training_data_loader.sampler.set_epoch(epoch) for (view0, view1), _ in training_data_loader: # adjust learning rate by applying linear warming if current_step <= warmup_steps: lr = calculate_lr(cfg, warmup_steps, current_step) for param_group in optimizer.param_groups: param_group["lr"] = lr optimizer.zero_grad() z0 = model(view0.to(local_rank)) z1 = model(view1.to(local_rank)) loss = nt_cross_entropy_loss(z0, z1) loss.backward() optimizer.step() # adjust learning rate by applying cosine annealing if current_step > warmup_steps: cos_lr_scheduler.step() current_step += 1 if local_rank == 0: logging.info( "Epoch:{}/{} progress:{:.3f} loss:{:.3f}, lr:{:.7f}".format( epoch, epochs, epoch / epochs, loss.item(), optimizer.param_groups[0]["lr"])) if epoch % cfg["experiment"]["save_model_epoch"] == 0: save_fname = "epoch={}-{}".format( epoch, cfg["experiment"]["output_model_name"]) torch.save(model.state_dict(), save_fname)