def __init__(self, config): # Set Hyperparameters self.batch_size_schedule = config.train_config.batch_size_schedule self.dataset = config.dataset self.learning_rate = config.train_config.learning_rate self.running_average_generator_decay = config.models.generator.running_average_decay self.pose_size = config.models.pose_size self.discriminator_model = config.models.discriminator.structure self.full_validation = config.use_full_validation self.load_fraction_of_dataset = config.load_fraction_of_dataset # Image settings self.current_imsize = 4 self.image_channels = 3 self.max_imsize = config.max_imsize # Logging variables self.checkpoint_dir = config.checkpoint_dir self.model_name = self.checkpoint_dir.split("/")[-2] self.config_path = config.config_path self.global_step = 0 self.logger = logger.Logger(config.summaries_dir, config.generated_data_dir) # Transition settings self.transition_variable = 1. self.transition_iters = config.train_config.transition_iters self.is_transitioning = False self.transition_step = 0 self.start_channel_size = config.models.start_channel_size self.latest_switch = 0 self.opt_level = config.train_config.amp_opt_level self.start_time = time.time() self.discriminator, self.generator = init_model( self.pose_size, config.models.start_channel_size, self.image_channels, self.discriminator_model) self.init_running_average_generator() self.criterion = loss.WGANLoss(self.discriminator, self.generator, self.opt_level) if not self.load_checkpoint(): print("Could not load checkpoint, so extending the models") self.extend_models() self.init_optimizers() self.batch_size = self.batch_size_schedule[self.current_imsize] self.update_running_average_beta() self.logger.log_variable("stats/batch_size", self.batch_size) self.num_ims_per_log = config.logging.num_ims_per_log self.next_log_point = self.global_step self.num_ims_per_save_image = config.logging.num_ims_per_save_image self.next_image_save_point = self.global_step self.num_ims_per_checkpoint = config.logging.num_ims_per_checkpoint self.next_validation_checkpoint = self.global_step self.dataloader_train, self.dataloader_val = load_dataset( self.dataset, self.batch_size, self.current_imsize, self.full_validation, self.pose_size, self.load_fraction_of_dataset) self.static_z = to_cuda(torch.randn((8, 32, 4, 4))) self.num_skipped_steps = 0
def __init__(self, config): # Set Hyperparameters # All of the input-output channels self.batch_size_schedule = config.train_config.batch_size_schedule self.dataset = config.dataset self.learning_rate = config.train_config.learning_rate self.running_average_generator_decay = config.models.generator.running_average_decay # Used in ProgressiveBaseModel that both G and D are based off of # TODO: Figure out what it is actually doing lol self.pose_size = config.models.pose_size # Normal vs. deep # TODO: What's the difference self.discriminator_model = config.models.discriminator.structure # Default = False self.full_validation = config.use_full_validation # Can be used for tests? self.load_fraction_of_dataset = config.load_fraction_of_dataset # Image settings # Image begins at 4x4 and slowly upsamples self.current_imsize = 4 self.image_channels = 3 # The ending image dimension after series of upsamplings # DeepPrivacy uses 128x128 usually self.max_imsize = config.max_imsize # Logging variables self.checkpoint_dir = config.checkpoint_dir self.model_name = self.checkpoint_dir.split("/")[-2] self.config_path = config.config_path self.global_step = 0 self.logger = logger.Logger(config.summaries_dir, config.generated_data_dir) # Transition settings self.transition_variable = 1. self.transition_iters = config.train_config.transition_iters self.is_transitioning = False self.transition_step = 0 self.start_channel_size = config.models.start_channel_size self.latest_switch = 0 self.opt_level = config.train_config.amp_opt_level self.start_time = time.time() self.discriminator, self.generator = init_model(self.pose_size, config.models.start_channel_size, self.image_channels, self.discriminator_model) self.init_running_average_generator() self.criterion = loss.WGANLoss(self.discriminator, self.generator, self.opt_level) if not self.load_checkpoint(): self.extend_models() self.init_optimizers() self.batch_size = self.batch_size_schedule[self.current_imsize] self.update_running_average_beta() self.logger.log_variable("stats/batch_size", self.batch_size) self.num_ims_per_log = config.logging.num_ims_per_log self.next_log_point = self.global_step self.num_ims_per_save_image = config.logging.num_ims_per_save_image self.next_image_save_point = self.global_step self.num_ims_per_checkpoint = config.logging.num_ims_per_checkpoint self.next_validation_checkpoint = self.global_step self.dataloader_train, self.dataloader_val = load_dataset( self.dataset, self.batch_size, self.current_imsize, self.full_validation, self.pose_size, self.load_fraction_of_dataset) self.static_z = to_cuda(torch.randn((8, 32, 4, 4))) self.num_skipped_steps = 0