def __init__(self, model_dir, num_gpu, input_shape=None, encoder_dim=None, training_image_size=256, trainer_name='original', predict=False): self.model_dir = model_dir self.num_gpu = num_gpu self.blocks = NNBlocks() self.input_shape = input_shape self.output_shape = None # set after model is compiled self.encoder_dim = encoder_dim self.trainer_name = trainer_name self.predict = predict self.state = State(self.model_dir, self.trainer_name, training_image_size) self.networks = dict() # Networks for the model self.predictors = dict() # Predictors for model self.history = dict() # Loss history per save iteration # Training information specific to the model should be placed in this # dict for reference by the trainer. self.training_opts = {'training_size': training_image_size} self.build()
def __init__(self, model_dir, gpus, no_logs=False, warp_to_landmarks=False, no_flip=False, training_image_size=256, alignments_paths=None, preview_scale=100, input_shape=None, encoder_dim=None, trainer="original", pingpong=False, memory_saving_gradients=False, predict=False): logger.debug( "Initializing ModelBase (%s): (model_dir: '%s', gpus: %s, no_logs: %s" "training_image_size, %s, alignments_paths: %s, preview_scale: %s, " "input_shape: %s, encoder_dim: %s, trainer: %s, pingpong: %s, " "memory_saving_gradients: %s, predict: %s)", self.__class__.__name__, model_dir, gpus, no_logs, training_image_size, alignments_paths, preview_scale, input_shape, encoder_dim, trainer, pingpong, memory_saving_gradients, predict) self.predict = predict self.model_dir = model_dir self.gpus = gpus self.blocks = NNBlocks( use_subpixel=self.config["subpixel_upscaling"], use_icnr_init=self.config["icnr_init"], use_reflect_padding=self.config["reflect_padding"]) self.input_shape = input_shape self.output_shape = None # set after model is compiled self.encoder_dim = encoder_dim self.trainer = trainer self.state = State(self.model_dir, self.name, no_logs, pingpong, training_image_size) self.is_legacy = False self.rename_legacy() self.load_state_info() self.networks = dict() # Networks for the model self.predictors = dict() # Predictors for model self.history = dict() # Loss history per save iteration) # Training information specific to the model should be placed in this # dict for reference by the trainer. self.training_opts = { "alignments": alignments_paths, "preview_scaling": preview_scale / 100, "warp_to_landmarks": warp_to_landmarks, "no_flip": no_flip, "pingpong": pingpong } self.set_gradient_type(memory_saving_gradients) self.build() self.set_training_data() logger.debug("Initialized ModelBase (%s)", self.__class__.__name__)
def test_blocks(use_icnr_init, use_convaware_init, use_reflect_padding): """ Test for all blocks contained within the NNBlocks Class """ cls_ = NNBlocks(use_icnr_init=use_icnr_init, use_convaware_init=use_convaware_init, use_reflect_padding=use_reflect_padding) block_test(cls_.conv2d, input_shape=(2, 5, 5, 128), kwargs=dict(filters=1024, kernel_size=3)) block_test(cls_.conv, input_shape=(2, 8, 8, 32), kwargs=dict(filters=64)) block_test(cls_.conv_sep, input_shape=(2, 8, 8, 32), kwargs=dict(filters=64)) block_test(cls_.upscale, input_shape=(2, 4, 4, 128), kwargs=dict(filters=64)) block_test(cls_.res_block, input_shape=(2, 2, 2, 64), kwargs=dict(filters=64)) block_test(cls_.upscale2x, input_shape=(2, 4, 4, 128), kwargs=dict(filters=64, fast=False)) block_test(cls_.upscale2x, input_shape=(2, 4, 4, 128), kwargs=dict(filters=64, fast=True))
def __init__(self, model_dir, gpus=1, configfile=None, snapshot_interval=0, no_logs=False, warp_to_landmarks=False, augment_color=True, no_flip=False, training_image_size=256, alignments_paths=None, preview_scale=100, input_shape=None, encoder_dim=None, trainer="original", pingpong=False, memory_saving_gradients=False, optimizer_savings=False, predict=False): logger.debug( "Initializing ModelBase (%s): (model_dir: '%s', gpus: %s, configfile: %s, " "snapshot_interval: %s, no_logs: %s, warp_to_landmarks: %s, augment_color: " "%s, no_flip: %s, training_image_size, %s, alignments_paths: %s, " "preview_scale: %s, input_shape: %s, encoder_dim: %s, trainer: %s, " "pingpong: %s, memory_saving_gradients: %s, optimizer_savings: %s, " "predict: %s)", self.__class__.__name__, model_dir, gpus, configfile, snapshot_interval, no_logs, warp_to_landmarks, augment_color, no_flip, training_image_size, alignments_paths, preview_scale, input_shape, encoder_dim, trainer, pingpong, memory_saving_gradients, optimizer_savings, predict) self.predict = predict self.model_dir = model_dir self.vram_savings = VRAMSavings(pingpong, optimizer_savings, memory_saving_gradients) self.backup = Backup(self.model_dir, self.name) self.gpus = gpus self.configfile = configfile self.input_shape = input_shape self.encoder_dim = encoder_dim self.trainer = trainer self.load_config( ) # Load config if plugin has not already referenced it self.state = State(self.model_dir, self.name, self.config_changeable_items, no_logs, self.vram_savings.pingpong, training_image_size) self.blocks = NNBlocks( use_subpixel=self.config["subpixel_upscaling"], use_icnr_init=self.config["icnr_init"], use_convaware_init=self.config["conv_aware_init"], use_reflect_padding=self.config["reflect_padding"], first_run=self.state.first_run) self.is_legacy = False self.rename_legacy() self.load_state_info() self.networks = dict() # Networks for the model self.predictors = dict() # Predictors for model self.history = dict() # Loss history per save iteration) # Training information specific to the model should be placed in this # dict for reference by the trainer. self.training_opts = { "alignments": alignments_paths, "preview_scaling": preview_scale / 100, "warp_to_landmarks": warp_to_landmarks, "augment_color": augment_color, "no_flip": no_flip, "pingpong": self.vram_savings.pingpong, "snapshot_interval": snapshot_interval, "training_size": self.state.training_size, "no_logs": self.state.current_session["no_logs"], "coverage_ratio": self.calculate_coverage_ratio(), "mask_type": self.config["mask_type"], "mask_blur_kernel": self.config["mask_blur_kernel"], "mask_threshold": self.config["mask_threshold"], "learn_mask": (self.config["learn_mask"] and self.config["mask_type"] is not None), "penalized_mask_loss": (self.config["penalized_mask_loss"] and self.config["mask_type"] is not None) } logger.debug("training_opts: %s", self.training_opts) if self.multiple_models_in_folder: deprecation_warning( "Support for multiple model types within the same folder", additional_info= "Please split each model into separate folders to " "avoid issues in future.") self.build() logger.debug("Initialized ModelBase (%s)", self.__class__.__name__)
def __init__(self, model_dir, gpus, configfile=None, snapshot_interval=0, no_logs=False, warp_to_landmarks=False, augment_color=True, no_flip=False, training_image_size=256, alignments_paths=None, preview_scale=100, input_shape=None, encoder_dim=None, trainer="original", pingpong=False, memory_saving_gradients=False, predict=False): logger.debug( "Initializing ModelBase (%s): (model_dir: '%s', gpus: %s, configfile: %s, " "snapshot_interval: %s, no_logs: %s, warp_to_landmarks: %s, augment_color: " "%s, no_flip: %s, training_image_size, %s, alignments_paths: %s, " "preview_scale: %s, input_shape: %s, encoder_dim: %s, trainer: %s, " "pingpong: %s, memory_saving_gradients: %s, predict: %s)", self.__class__.__name__, model_dir, gpus, configfile, snapshot_interval, no_logs, warp_to_landmarks, augment_color, no_flip, training_image_size, alignments_paths, preview_scale, input_shape, encoder_dim, trainer, pingpong, memory_saving_gradients, predict) self.predict = predict self.model_dir = model_dir self.backup = Backup(self.model_dir, self.name) self.gpus = gpus self.configfile = configfile self.blocks = NNBlocks( use_subpixel=self.config["subpixel_upscaling"], use_icnr_init=self.config["icnr_init"], use_reflect_padding=self.config["reflect_padding"]) self.input_shape = input_shape self.output_shape = None # set after model is compiled self.encoder_dim = encoder_dim self.trainer = trainer self.state = State(self.model_dir, self.name, self.config_changeable_items, no_logs, pingpong, training_image_size) self.is_legacy = False self.rename_legacy() self.load_state_info() self.networks = dict() # Networks for the model self.predictors = dict() # Predictors for model self.history = dict() # Loss history per save iteration) # Training information specific to the model should be placed in this # dict for reference by the trainer. self.training_opts = { "alignments": alignments_paths, "preview_scaling": preview_scale / 100, "warp_to_landmarks": warp_to_landmarks, "augment_color": augment_color, "no_flip": no_flip, "pingpong": pingpong, "snapshot_interval": snapshot_interval } self.set_gradient_type(memory_saving_gradients) if self.multiple_models_in_folder: deprecation_warning( "Support for multiple model types within the same folder", additional_info= "Please split each model into separate folders to " "avoid issues in future.") self.build() self.set_training_data() logger.debug("Initialized ModelBase (%s)", self.__class__.__name__)