def init(self): # Set tracker configurations tracker.set_scalar("accuracy.*", True) tracker.set_scalar("loss.*", True) # Add a hook to log module outputs hook_model_outputs(self.mode, self.model, 'model') # This will keep the accuracy metric stats and memories separate for training and validation. self.state_modules = [self.accuracy, self.memory]
def init(self): # Set tracker configurations tracker.set_scalar("accuracy.*", True) tracker.set_scalar("loss.*", True) # Add a hook to log module outputs hook_model_outputs(self.mode, self.model, 'model') # Add accuracy as a state module. # The name is probably confusing, since it's meant to store # states between training and validation for RNNs. # This will keep the accuracy metric stats separate for training and validation. self.state_modules = [self.accuracy, self.memory]
def init(self): """ Initializations """ self.state_modules = [] hook_model_outputs(self.mode, self.generator, 'generator') hook_model_outputs(self.mode, self.discriminator, 'discriminator') tracker.set_scalar("loss.generator.*", True) tracker.set_scalar("loss.discriminator.*", True) tracker.set_image("generated", True, 1 / 100)
def init(self): self.state_modules = [] self.generator = Generator().to(self.device) self.discriminator = Discriminator().to(self.device) self.generator_loss = GeneratorLogitsLoss(self.label_smoothing).to(self.device) self.discriminator_loss = DiscriminatorLogitsLoss(self.label_smoothing).to(self.device) hook_model_outputs(self.mode, self.generator, 'generator') hook_model_outputs(self.mode, self.discriminator, 'discriminator') tracker.set_scalar("loss.generator.*", True) tracker.set_scalar("loss.discriminator.*", True) tracker.set_image("generated", True, 1 / 100)
def init(self): # Initialize encoder & decoder self.encoder = EncoderRNN(self.d_z, self.enc_hidden_size).to(self.device) self.decoder = DecoderRNN(self.d_z, self.dec_hidden_size, self.n_distributions).to(self.device) # Set optimizer. Things like type of optimizer and learning rate are configurable optimizer = OptimizerConfigs() optimizer.parameters = list(self.encoder.parameters()) + list( self.decoder.parameters()) self.optimizer = optimizer # Create sampler self.sampler = Sampler(self.encoder, self.decoder) # `npz` file path is `data/sketch/[DATASET NAME].npz` path = lab.get_data_path() / 'sketch' / f'{self.dataset_name}.npz' # Load the numpy file dataset = np.load(str(path), encoding='latin1', allow_pickle=True) # Create training dataset self.train_dataset = StrokesDataset(dataset['train'], self.max_seq_length) # Create validation dataset self.valid_dataset = StrokesDataset(dataset['valid'], self.max_seq_length, self.train_dataset.scale) # Create training data loader self.train_loader = DataLoader(self.train_dataset, self.batch_size, shuffle=True) # Create validation data loader self.valid_loader = DataLoader(self.valid_dataset, self.batch_size) # Add hooks to monitor layer outputs on Tensorboard hook_model_outputs(self.mode, self.encoder, 'encoder') hook_model_outputs(self.mode, self.decoder, 'decoder') # Configure the tracker to print the total train/validation loss tracker.set_scalar("loss.total.*", True) self.state_modules = []
def __init__(self, *, discriminator: Module, generator: Module, discriminator_optimizer: Optional[torch.optim.Adam], generator_optimizer: Optional[torch.optim.Adam], discriminator_loss: DiscriminatorLogitsLoss, generator_loss: GeneratorLogitsLoss, discriminator_k: int): self.discriminator_k = discriminator_k self.generator = generator self.discriminator = discriminator self.generator_loss = generator_loss self.discriminator_loss = discriminator_loss self.generator_optimizer = generator_optimizer self.discriminator_optimizer = discriminator_optimizer hook_model_outputs(self.generator, 'generator') hook_model_outputs(self.discriminator, 'discriminator') tracker.set_scalar("loss.generator.*", True) tracker.set_scalar("loss.discriminator.*", True) tracker.set_image("generated", True, 1 / 100)
def init(self): """ ### Initialize """ # Create dataset dataset = Dataset(self.dataset_path, self.image_size) # Create data loader dataloader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, num_workers=8, shuffle=True, drop_last=True, pin_memory=True) # Continuous [cyclic loader](../../utils.html#cycle_dataloader) self.loader = cycle_dataloader(dataloader) # $\log_2$ of image resolution log_resolution = int(math.log2(self.image_size)) # Create discriminator and generator self.discriminator = Discriminator(log_resolution).to(self.device) self.generator = Generator(log_resolution, self.d_latent).to(self.device) # Get number of generator blocks for creating style and noise inputs self.n_gen_blocks = self.generator.n_blocks # Create mapping network self.mapping_network = MappingNetwork( self.d_latent, self.mapping_network_layers).to(self.device) # Create path length penalty loss self.path_length_penalty = PathLengthPenalty(0.99).to(self.device) # Add model hooks to monitor layer outputs if self.log_layer_outputs: hook_model_outputs(self.mode, self.discriminator, 'discriminator') hook_model_outputs(self.mode, self.generator, 'generator') hook_model_outputs(self.mode, self.mapping_network, 'mapping_network') # Discriminator and generator losses self.discriminator_loss = DiscriminatorLoss().to(self.device) self.generator_loss = GeneratorLoss().to(self.device) # Create optimizers self.discriminator_optimizer = torch.optim.Adam( self.discriminator.parameters(), lr=self.learning_rate, betas=self.adam_betas) self.generator_optimizer = torch.optim.Adam( self.generator.parameters(), lr=self.learning_rate, betas=self.adam_betas) self.mapping_network_optimizer = torch.optim.Adam( self.mapping_network.parameters(), lr=self.mapping_network_learning_rate, betas=self.adam_betas) # Set tracker configurations tracker.set_image("generated", True)
def init(self): tracker.set_queue("loss.*", 20, True) tracker.set_scalar("accuracy.*", True) hook_model_outputs(self.mode, self.model, 'model') self.state_modules = [self.accuracy_func]