def __init__( self, generative_model, clustering_model, discriminative_model, discriminator_loss, generator_loss, consistency_loss, feature_matching_loss=None, learning_rate=1e-05, ctx="cpu", ): ''' Init. Args: generative_model: is-a `GenerativeModel`. clustering_model: is-a `GenerativeModel`. discriminative_model: is-a `DiscriminativeModel`. generator_loss: is-a `GeneratorLoss`. discriminator_loss: is-a `GANDiscriminatorLoss`. consistency_loss: is-a `Loss`. feature_matching_loss: is-a `GANFeatureMatchingLoss`. learning_rate: `float` of learning rate. learning_attenuate_rate: `float` of attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`. attenuate_epoch: `int` of attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`. Additionally, in relation to regularization, this class constrains weight matrixes every `attenuate_epoch`. optimizer_name: `str` of name of optimizer. hybridize_flag: Call `mxnet.gluon.HybridBlock.hybridize()` or not. scale: `float` of scaling factor for initial parameters. ctx: `mx.cpu()` or `mx.gpu()`. ''' logger = getLogger("accelbrainbase") self.__logger = logger init_deferred_flag = self.init_deferred_flag self.init_deferred_flag = True if generator_loss is None: _generator_loss = GeneratorLoss() else: _generator_loss = generator_loss super().__init__( true_sampler=TrueSampler(), generative_model=clustering_model, discriminative_model=discriminative_model, generator_loss=_generator_loss, discriminator_loss=discriminator_loss, feature_matching_loss=feature_matching_loss, learning_rate=learning_rate, ctx=ctx, ) self.init_deferred_flag = init_deferred_flag self.__generative_model = generative_model self.__clustering_model = clustering_model self.__discriminative_model = discriminative_model self.__discriminator_loss = discriminator_loss self.__generator_loss = generator_loss self.__consistency_loss = consistency_loss self.__feature_matching_loss = feature_matching_loss logger = getLogger("accelbrainbase") self.__logger = logger self.__learning_rate = learning_rate
def __init__( self, dir_list, width=28, height=28, channel=1, initializer=None, batch_size=40, learning_rate=1e-03, ctx=mx.gpu(), discriminative_model=None, generative_model=None, ): ''' Init. If you are not satisfied with this simple default setting, delegate `discriminative_model` and `generative_model` designed by yourself. Args: dir_list: `list` of `str` of path to image files. width: `int` of image width. height: `int` of image height. channel: `int` of image channel. initializer: is-a `mxnet.initializer` for parameters of model. If `None`, it is drawing from the Xavier distribution. batch_size: `int` of batch size. learning_rate: `float` of learning rate. ctx: `mx.gpu()` or `mx.cpu()`. discriminative_model: is-a `accelbrainbase.observabledata._mxnet.adversarialmodel.discriminative_model.DiscriminativeModel`. generative_model: is-a `accelbrainbase.observabledata._mxnet.adversarialmodel.generative_model.GenerativeModel`. ''' image_extractor = ImageExtractor( width=width, height=height, channel=channel, ctx=ctx ) unlabeled_image_iterator = UnlabeledImageIterator( image_extractor=image_extractor, dir_list=dir_list, batch_size=batch_size, norm_mode="z_score", scale=1.0, noiseable_data=GaussNoise(sigma=1e-03, mu=0.0), ) true_sampler = TrueSampler() true_sampler.iteratorable_data = unlabeled_image_iterator condition_sampler = ConditionSampler() condition_sampler.true_sampler = true_sampler computable_loss = L2NormLoss() if discriminative_model is None: output_nn = NeuralNetworks( computable_loss=computable_loss, initializer=initializer, learning_rate=learning_rate, learning_attenuate_rate=1.0, attenuate_epoch=50, units_list=[100, 1], dropout_rate_list=[0.5, 0.0], optimizer_name="SGD", activation_list=["relu", "sigmoid"], hidden_batch_norm_list=[BatchNorm(), None], ctx=ctx, hybridize_flag=True, regularizatable_data_list=[], scale=1.0, output_no_bias_flag=True, all_no_bias_flag=True, not_init_flag=False, ) d_model = ConvolutionalNeuralNetworks( computable_loss=computable_loss, initializer=initializer, learning_rate=learning_rate, learning_attenuate_rate=1.0, attenuate_epoch=50, hidden_units_list=[ Conv2D( channels=16, kernel_size=6, strides=(2, 2), padding=(1, 1), ), Conv2D( channels=32, kernel_size=3, strides=(2, 2), padding=(1, 1), ), ], input_nn=None, input_result_height=None, input_result_width=None, input_result_channel=None, output_nn=output_nn, hidden_dropout_rate_list=[0.5, 0.5], hidden_batch_norm_list=[BatchNorm(), BatchNorm()], optimizer_name="SGD", hidden_activation_list=["relu", "relu"], hidden_residual_flag=False, hidden_dense_flag=False, dense_axis=1, ctx=ctx, hybridize_flag=True, regularizatable_data_list=[], scale=1.0, ) discriminative_model = DiscriminativeModel( model=d_model, initializer=None, learning_rate=learning_rate, optimizer_name="SGD", hybridize_flag=True, scale=1.0, ctx=ctx, ) else: if isinstance(discriminative_model, DiscriminativeModel) is False: raise TypeError("The type of `discriminative_model` must be `DiscriminativeModel`.") if generative_model is None: g_model = ConvolutionalNeuralNetworks( computable_loss=computable_loss, initializer=initializer, learning_rate=learning_rate, learning_attenuate_rate=1.0, attenuate_epoch=50, hidden_units_list=[ Conv2DTranspose( channels=16, kernel_size=6, strides=(1, 1), padding=(1, 1), ), Conv2DTranspose( channels=1, kernel_size=3, strides=(1, 1), padding=(1, 1), ), ], input_nn=None, input_result_height=None, input_result_width=None, input_result_channel=None, output_nn=None, hidden_dropout_rate_list=[0.5, 0.0], hidden_batch_norm_list=[BatchNorm(), None], optimizer_name="SGD", hidden_activation_list=["relu", "identity"], hidden_residual_flag=False, hidden_dense_flag=False, dense_axis=1, ctx=ctx, hybridize_flag=True, regularizatable_data_list=[], scale=1.0, ) generative_model = GenerativeModel( noise_sampler=UniformNoiseSampler( low=-1e-05, high=1e-05, batch_size=batch_size, seq_len=0, channel=channel, height=height, width=width, ctx=ctx ), model=g_model, initializer=None, condition_sampler=condition_sampler, conditonal_dim=1, learning_rate=learning_rate, optimizer_name="SGD", hybridize_flag=True, scale=1.0, ctx=ctx, ) else: if isinstance(generative_model, GenerativeModel) is False: raise TypeError("The type of `generative_model` must be `GenerativeModel`.") GAN = GANController( true_sampler=true_sampler, generative_model=generative_model, discriminative_model=discriminative_model, generator_loss=GeneratorLoss(weight=1.0), discriminator_loss=DiscriminatorLoss(weight=1.0), feature_matching_loss=L2NormLoss(weight=1.0), optimizer_name="SGD", learning_rate=learning_rate, learning_attenuate_rate=1.0, attenuate_epoch=50, hybridize_flag=True, scale=1.0, ctx=ctx, initializer=initializer, ) self.GAN = GAN
def __init__( self, dir_list, test_dir_list, width=28, height=28, channel=1, initializer_f=None, optimizer_f=None, batch_size=40, learning_rate=0.0002, ctx="cpu", discriminative_model=None, generative_model=None, re_encoder_model=None, advarsarial_loss_weight=1.0, encoding_loss_weight=1.0, contextual_loss_weight=1.0, discriminator_loss_weight=1.0, ): ''' Init. If you are not satisfied with this simple default setting, delegate `discriminative_model` and `generative_model` designed by yourself. Args: dir_list: `list` of `str` of path to image files. test_dir_list: `list` of `str` of path to image files for test. width: `int` of image width. height: `int` of image height. channel: `int` of image channel. initializer: is-a `mxnet.initializer` for parameters of model. If `None`, it is drawing from the Xavier distribution. batch_size: `int` of batch size. learning_rate: `float` of learning rate. ctx: `mx.gpu()` or `mx.cpu()`. discriminative_model: is-a `accelbrainbase.observabledata._torch.adversarialmodel.discriminative_model.DiscriminativeModel`. generative_model: is-a `accelbrainbase.observabledata._torch.adversarialmodel.generative_model.GenerativeModel`. re_encoder_model: is-a `HybridBlock`. advarsarial_loss_weight: `float` of weight for advarsarial loss. encoding_loss_weight: `float` of weight for encoding loss. contextual_loss_weight: `float` of weight for contextual loss. discriminator_loss_weight: `float` of weight for discriminator loss. ''' image_extractor = ImageExtractor(width=width, height=height, channel=channel, ctx=ctx) unlabeled_image_iterator = UnlabeledImageIterator( image_extractor=image_extractor, dir_list=dir_list, batch_size=batch_size, norm_mode="z_score", #scale=1/1000, noiseable_data=GaussNoise(sigma=1e-08, mu=0.0), ) test_unlabeled_image_iterator = UnlabeledImageIterator( image_extractor=image_extractor, dir_list=test_dir_list, batch_size=batch_size, norm_mode="z_score", #scale=1/1000, noiseable_data=GaussNoise(sigma=1e-08, mu=0.0), ) true_sampler = TrueSampler() true_sampler.iteratorable_data = unlabeled_image_iterator condition_sampler = ConditionSampler() condition_sampler.true_sampler = true_sampler computable_loss = L2NormLoss() if discriminative_model is None: output_nn = NeuralNetworks( computable_loss=computable_loss, initializer_f=initializer_f, optimizer_f=optimizer_f, learning_rate=learning_rate, units_list=[1], dropout_rate_list=[0.0], activation_list=[torch.nn.Sigmoid()], hidden_batch_norm_list=[None], ctx=ctx, regularizatable_data_list=[], output_no_bias_flag=True, all_no_bias_flag=True, not_init_flag=False, ) d_model = ConvolutionalNeuralNetworks( computable_loss=computable_loss, initializer_f=initializer_f, optimizer_f=optimizer_f, learning_rate=learning_rate, hidden_units_list=[ torch.nn.Conv2d( in_channels=channel, out_channels=16, kernel_size=3, stride=1, padding=1, ), torch.nn.Conv2d( in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1, ), ], input_nn=None, input_result_height=None, input_result_width=None, input_result_channel=None, output_nn=output_nn, hidden_dropout_rate_list=[0.5, 0.5], hidden_batch_norm_list=[ torch.nn.BatchNorm2d(16), torch.nn.BatchNorm2d(32) ], hidden_activation_list=[torch.nn.ReLU(), torch.nn.ReLU()], hidden_residual_flag=False, hidden_dense_flag=False, dense_axis=1, ctx=ctx, regularizatable_data_list=[], ) discriminative_model = DiscriminativeModel( model=d_model, learning_rate=learning_rate, ctx=ctx, ) else: if isinstance(discriminative_model, DiscriminativeModel) is False: raise TypeError( "The type of `discriminative_model` must be `DiscriminativeModel`." ) if re_encoder_model is None: re_encoder_model = ConvolutionalNeuralNetworks( # is-a `ComputableLoss` or `mxnet.gluon.loss`. computable_loss=computable_loss, # `list` of int` of the number of units in hidden layers. hidden_units_list=[ torch.nn.Conv2d( in_channels=channel, out_channels=16, kernel_size=3, stride=1, padding=1, ), torch.nn.Conv2d( in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1, ), ], # `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate. hidden_activation_list=[torch.nn.ReLU(), torch.nn.ReLU()], # `list` of `float` of dropout rate. hidden_dropout_rate_list=[0.5, 0.5], # `list` of `mxnet.gluon.nn.BatchNorm`. hidden_batch_norm_list=[ torch.nn.BatchNorm2d(16), torch.nn.BatchNorm2d(32) ], # `mx.gpu()` or `mx.cpu()`. ctx=ctx, ) if generative_model is None: encoder = ConvolutionalNeuralNetworks( # is-a `ComputableLoss` or `mxnet.gluon.loss`. computable_loss=computable_loss, # `list` of int` of the number of units in hidden layers. hidden_units_list=[ torch.nn.Conv2d( in_channels=channel, out_channels=16, kernel_size=3, stride=1, padding=1, ), torch.nn.Conv2d( in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1, ), ], # `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate. hidden_activation_list=[torch.nn.ReLU(), torch.nn.ReLU()], # `list` of `float` of dropout rate. hidden_dropout_rate_list=[0.5, 0.5], # `list` of `mxnet.gluon.nn.BatchNorm`. hidden_batch_norm_list=[ torch.nn.BatchNorm2d(16), torch.nn.BatchNorm2d(32) ], # `mx.gpu()` or `mx.cpu()`. ctx=ctx, ) decoder = ConvolutionalNeuralNetworks( # is-a `ComputableLoss` or `mxnet.gluon.loss`. computable_loss=computable_loss, # `list` of int` of the number of units in hidden layers. hidden_units_list=[ torch.nn.ConvTranspose2d( in_channels=32, out_channels=16, kernel_size=3, stride=1, padding=1, ), torch.nn.ConvTranspose2d( in_channels=16, out_channels=channel, kernel_size=3, stride=1, padding=1, ), ], # `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate. hidden_activation_list=[ torch.nn.ReLU(), "identity", ], # `list` of `float` of dropout rate. hidden_dropout_rate_list=[0.5, 0.0], # `list` of `mxnet.gluon.nn.BatchNorm`. hidden_batch_norm_list=[torch.nn.BatchNorm2d(16), None], # `mx.gpu()` or `mx.cpu()`. ctx=ctx, ) g_model = ConvolutionalAutoEncoder( # is-a `ConvolutionalNeuralNetworks`. encoder=encoder, # is-a `ConvolutionalNeuralNetworks`. decoder=decoder, computable_loss=computable_loss, learning_rate=learning_rate, ctx=ctx, regularizatable_data_list=[], ) generative_model = GenerativeModel( noise_sampler=UniformNoiseSampler(low=-1e-05, high=1e-05, batch_size=batch_size, seq_len=0, channel=channel, height=height, width=width, ctx=ctx), model=g_model, condition_sampler=condition_sampler, conditonal_dim=1, learning_rate=learning_rate, ctx=ctx, ) else: if isinstance(generative_model, GenerativeModel) is False: raise TypeError( "The type of `generative_model` must be `GenerativeModel`." ) ganomaly_controller = GanomalyController( generative_model=generative_model, re_encoder_model=re_encoder_model, discriminative_model=discriminative_model, advarsarial_loss=L2NormLoss(weight=advarsarial_loss_weight), encoding_loss=L2NormLoss(weight=encoding_loss_weight), contextual_loss=L2NormLoss(weight=contextual_loss_weight), discriminator_loss=DiscriminatorLoss( weight=discriminator_loss_weight), feature_matching_loss=None, learning_rate=learning_rate, ctx=ctx, ) self.ganomaly_controller = ganomaly_controller self.test_unlabeled_image_iterator = test_unlabeled_image_iterator
def __init__( self, generative_model, re_encoder_model, discriminative_model, advarsarial_loss, encoding_loss, contextual_loss, discriminator_loss, feature_matching_loss=None, learning_rate=1e-05, ctx="cpu", anomaly_score_lambda=0.5, ): ''' Init. Args: generative_model: is-a `GenerativeModel`. re_encoder_model: is-a `HybridBlock`. discriminative_model: is-a `DiscriminativeModel`. advarsarial_loss: is-a `mxnet.gluon.loss`. encoding_loss: is-a `mxnet.gluon.loss`. contextual_loss: is-a `mxnet.gluon.loss`. discriminator_loss: is-a `DiscriminatorLoss`. feature_matching_loss: is-a `mxnet.gluon.loss`. learning_rate: `float` of learning rate. learning_attenuate_rate: `float` of attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`. attenuate_epoch: `int` of attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`. optimizer_name: `str` of name of optimizer. hybridize_flag: Call `mxnet.gluon.HybridBlock.hybridize()` or not. scale: `float` of scaling factor for initial parameters. ctx: `mx.cpu()` or `mx.gpu()`. initializer: is-a `mxnet.initializer` for parameters of model. If `None`, it is drawing from the Xavier distribution. anomaly_score_lambda: `float` of trade-off parameter for computing the anomaly scores. Anomaly score = `anomaly_score_lambda` * `contextual_loss` + (1 - `anomaly_score_lambda`) * `encoding_loss`. ''' if isinstance(generative_model, GenerativeModel) is False: raise TypeError( "The type of `generative_model` must be `GenerativeModel`.") if isinstance(discriminative_model, DiscriminativeModel) is False: raise TypeError( "The type of `discriminative_model` must be `DiscriminativeModel`." ) if isinstance(discriminator_loss, DiscriminatorLoss) is False: raise TypeError( "The type of `discriminator_loss` must be `DiscriminatorLoss`." ) super().__init__( true_sampler=TrueSampler(), generative_model=generative_model, discriminative_model=discriminative_model, discriminator_loss=discriminator_loss, generator_loss=GeneratorLoss(), feature_matching_loss=feature_matching_loss, learning_rate=learning_rate, ctx=ctx, ) self.__re_encoder_model = re_encoder_model self.__advarsarial_loss = advarsarial_loss self.__encoding_loss = encoding_loss self.__contextual_loss = contextual_loss self.__anomaly_score_lambda = anomaly_score_lambda logger = getLogger("accelbrainbase") self.__logger = logger
def __init__( self, dir_list, width=28, height=28, channel=1, normal_height=14, normal_width=14, normal_channel=32, initializer=None, batch_size=40, learning_rate=1e-03, ctx=mx.gpu(), discriminative_model=None, generative_model=None, discriminator_loss_weight=1.0, reconstruction_loss_weight=1.0, feature_matching_loss_weight=1.0, ): ''' Init. If you are not satisfied with this simple default setting, delegate `discriminative_model` and `generative_model` designed by yourself. Args: dir_list: `list` of `str` of path to image files. width: `int` of image width. height: `int` of image height. channel: `int` of image channel. normal_width: `int` of width of image drawn from normal distribution, p(z). normal_height: `int` of height of image drawn from normal distribution, p(z). normal_channel: `int` of channel of image drawn from normal distribution, p(z). initializer: is-a `mxnet.initializer` for parameters of model. If `None`, it is drawing from the Xavier distribution. batch_size: `int` of batch size. learning_rate: `float` of learning rate. ctx: `mx.gpu()` or `mx.cpu()`. discriminative_model: is-a `accelbrainbase.observabledata._mxnet.adversarialmodel.discriminative_model.discriminativemodel.eb_discriminative_model.EBDiscriminativeModel`. generative_model: is-a `accelbrainbase.observabledata._mxnet.adversarialmodel.generative_model.GenerativeModel`. discriminator_loss_weight: `float` of weight for discriminator loss. reconstruction_loss_weight: `float` of weight for reconstruction loss. feature_matching_loss_weight: `float` of weight for feature matching loss. ''' image_extractor = ImageExtractor(width=width, height=height, channel=channel, ctx=ctx) unlabeled_image_iterator = UnlabeledImageIterator( image_extractor=image_extractor, dir_list=dir_list, batch_size=batch_size, norm_mode="z_score", scale=1.0, noiseable_data=GaussNoise(sigma=1e-03, mu=0.0), ) computable_loss = L2NormLoss() if initializer is None: initializer = mx.initializer.Uniform() else: if isinstance(initializer, mx.initializer.Initializer) is False: raise TypeError( "The type of `initializer` must be `mxnet.initializer.Initializer`." ) if discriminative_model is None: d_encoder = ConvolutionalNeuralNetworks( # is-a `ComputableLoss` or `mxnet.gluon.loss`. computable_loss=computable_loss, # `list` of int` of the number of units in hidden layers. hidden_units_list=[ # `mxnet.gluon.nn.Conv2D`. Conv2D( channels=16, kernel_size=6, strides=(2, 2), padding=(1, 1), ), Conv2D( channels=32, kernel_size=3, strides=(1, 1), padding=(1, 1), ), ], # `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate. hidden_activation_list=["relu", "relu"], # `list` of `float` of dropout rate. hidden_dropout_rate_list=[0.5, 0.5], # `list` of `mxnet.gluon.nn.BatchNorm`. hidden_batch_norm_list=[BatchNorm(), BatchNorm()], # Call `mxnet.gluon.HybridBlock.hybridize()` or not. hybridize_flag=True, # `mx.gpu()` or `mx.cpu()`. ctx=ctx, ) d_decoder = ConvolutionalNeuralNetworks( # is-a `ComputableLoss` or `mxnet.gluon.loss`. computable_loss=computable_loss, # `list` of int` of the number of units in hidden layers. hidden_units_list=[ # `mxnet.gluon.nn.Conv2DTranspose`. Conv2DTranspose( channels=16, kernel_size=3, strides=(1, 1), padding=(1, 1), ), Conv2DTranspose( channels=32, kernel_size=6, strides=(2, 2), padding=(1, 1), ), ], # `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate. hidden_activation_list=["identity", "identity"], # `list` of `float` of dropout rate. hidden_dropout_rate_list=[0.0, 0.0], # `list` of `mxnet.gluon.nn.BatchNorm`. hidden_batch_norm_list=[BatchNorm(), None], # Call `mxnet.gluon.HybridBlock.hybridize()` or not. hybridize_flag=True, # `mx.gpu()` or `mx.cpu()`. ctx=ctx, ) d_model = ConvolutionalAutoEncoder( # is-a `ConvolutionalNeuralNetworks`. encoder=d_encoder, # is-a `ConvolutionalNeuralNetworks`. decoder=d_decoder, # is-a `ComputableLoss` or `mxnet.gluon.loss`. computable_loss=computable_loss, # `bool` of flag to tied weights or not. tied_weights_flag=True, # Call `mxnet.gluon.HybridBlock.hybridize()` or not. hybridize_flag=True, # `mx.gpu()` or `mx.cpu()`. ctx=ctx, ) d_model.batch_size = 40 discriminative_model = EBDiscriminativeModel( # is-a `ConvolutionalAutoEncoder`. model=d_model, # Call `mxnet.gluon.HybridBlock.hybridize()` or not. hybridize_flag=True, # `mx.gpu()` or `mx.cpu()`. ctx=ctx, ) else: if isinstance(discriminative_model, DiscriminativeModel) is False: raise TypeError( "The type of `discriminative_model` must be `DiscriminativeModel`." ) if generative_model is None: encoder = ConvolutionalNeuralNetworks( computable_loss=computable_loss, initializer=initializer, learning_rate=learning_rate, learning_attenuate_rate=1.0, attenuate_epoch=50, hidden_units_list=[ Conv2D( channels=16, kernel_size=6, strides=(2, 2), padding=(0, 0), ), Conv2D( channels=32, kernel_size=3, strides=(1, 1), padding=(1, 1), ), ], input_nn=None, input_result_height=None, input_result_width=None, input_result_channel=None, output_nn=None, hidden_dropout_rate_list=[0.5, 0.5], hidden_batch_norm_list=[BatchNorm(), BatchNorm()], optimizer_name="SGD", hidden_activation_list=["relu", "relu"], hidden_residual_flag=False, hidden_dense_flag=False, dense_axis=1, ctx=ctx, hybridize_flag=True, regularizatable_data_list=[], scale=1.0, ) decoder = ConvolutionalNeuralNetworks( computable_loss=computable_loss, initializer=initializer, learning_rate=learning_rate, learning_attenuate_rate=1.0, attenuate_epoch=50, hidden_units_list=[ Conv2DTranspose( channels=16, kernel_size=3, strides=(1, 1), padding=(1, 1), ), Conv2DTranspose( channels=channel, kernel_size=6, strides=(2, 2), padding=(0, 0), ), ], input_nn=None, input_result_height=None, input_result_width=None, input_result_channel=None, output_nn=None, hidden_dropout_rate_list=[0.0, 0.0], hidden_batch_norm_list=[BatchNorm(), None], optimizer_name="SGD", hidden_activation_list=["identity", "identity"], hidden_residual_flag=False, hidden_dense_flag=False, dense_axis=1, ctx=ctx, hybridize_flag=True, regularizatable_data_list=[], scale=1.0, ) g_model = ConvolutionalAutoEncoder( encoder=encoder, decoder=decoder, computable_loss=computable_loss, initializer=initializer, learning_rate=learning_rate, learning_attenuate_rate=1.0, attenuate_epoch=50, optimizer_name="SGD", ctx=ctx, hybridize_flag=True, regularizatable_data_list=[], scale=1.0, ) d_model.batch_size = 40 true_sampler = TrueSampler() true_sampler.iteratorable_data = unlabeled_image_iterator condition_sampler = ConditionSampler() condition_sampler.true_sampler = true_sampler generative_model = GenerativeModel( noise_sampler=UniformNoiseSampler(low=-1e-03, high=1e-03, batch_size=batch_size, seq_len=0, channel=channel, height=height, width=width, ctx=ctx), model=g_model, initializer=initializer, condition_sampler=condition_sampler, conditonal_dim=1, learning_rate=learning_rate, optimizer_name="SGD", hybridize_flag=True, scale=1.0, ctx=ctx, ) else: if isinstance(generative_model, GenerativeModel) is False: raise TypeError( "The type of `generative_model` must be `GenerativeModel`." ) normal_ture_sampler = NormalTrueSampler(batch_size=batch_size, seq_len=0, channel=normal_channel, height=normal_height, width=normal_width, ctx=ctx) EBAAE = EBAAEController( true_sampler=normal_ture_sampler, generative_model=generative_model, discriminative_model=discriminative_model, discriminator_loss=EBDiscriminatorLoss( weight=discriminator_loss_weight), reconstruction_loss=L2NormLoss(weight=reconstruction_loss_weight), feature_matching_loss=L2NormLoss( weight=feature_matching_loss_weight), optimizer_name="SGD", learning_rate=learning_rate, learning_attenuate_rate=1.0, attenuate_epoch=50, hybridize_flag=True, scale=1.0, ctx=ctx, initializer=initializer, ) self.EBAAE = EBAAE
def __init__( self, dir_list, width=28, height=28, channel=1, initializer_f=None, optimizer_f=None, batch_size=40, learning_rate=1e-03, ctx="cpu", discriminative_model=None, generative_model=None, discriminator_loss_weight=1.0, feature_matching_loss_weight=1.0, ): ''' Init. If you are not satisfied with this simple default setting, delegate `discriminative_model` and `generative_model` designed by yourself. Args: dir_list: `list` of `str` of path to image files. width: `int` of image width. height: `int` of image height. channel: `int` of image channel. initializer: is-a `mxnet.initializer` for parameters of model. If `None`, it is drawing from the Xavier distribution. batch_size: `int` of batch size. learning_rate: `float` of learning rate. ctx: `mx.gpu()` or `mx.cpu()`. discriminative_model: is-a `accelbrainbase.observabledata._torch.adversarialmodel.discriminative_model.discriminativemodel.eb_discriminative_model.EBDiscriminativeModel`. generative_model: is-a `accelbrainbase.observabledata._torch.adversarialmodel.generative_model.GenerativeModel`. discriminator_loss_weight: `float` of weight for discriminator loss. feature_matching_loss_weight: `float` of weight for feature matching loss. ''' image_extractor = ImageExtractor(width=width, height=height, channel=channel, ctx=ctx) unlabeled_image_iterator = UnlabeledImageIterator( image_extractor=image_extractor, dir_list=dir_list, batch_size=batch_size, norm_mode="min_max", scale=0.9, noiseable_data=GaussNoise(sigma=1e-08, mu=0.005), ) true_sampler = TrueSampler() true_sampler.iteratorable_data = unlabeled_image_iterator computable_loss = L2NormLoss() if discriminative_model is None: encoder = ConvolutionalNeuralNetworks( computable_loss=computable_loss, initializer_f=initializer_f, optimizer_f=optimizer_f, learning_rate=learning_rate, hidden_units_list=[ torch.nn.Conv2d( in_channels=channel, out_channels=16, kernel_size=3, stride=1, padding=1, ), torch.nn.Conv2d( in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1, ), ], input_nn=None, input_result_height=None, input_result_width=None, input_result_channel=None, output_nn=None, hidden_dropout_rate_list=[0.5, 0.5], hidden_batch_norm_list=[ torch.nn.BatchNorm2d(16), torch.nn.BatchNorm2d(32) ], hidden_activation_list=[torch.nn.ReLU(), torch.nn.ReLU()], hidden_residual_flag=False, hidden_dense_flag=False, dense_axis=1, ctx=ctx, regularizatable_data_list=[], ) decoder = ConvolutionalNeuralNetworks( computable_loss=computable_loss, initializer_f=initializer_f, learning_rate=learning_rate, hidden_units_list=[ torch.nn.ConvTranspose2d( in_channels=32, out_channels=16, kernel_size=3, stride=1, padding=1, ), torch.nn.ConvTranspose2d( in_channels=16, out_channels=channel, kernel_size=3, stride=1, padding=1, ), ], input_nn=None, input_result_height=None, input_result_width=None, input_result_channel=None, output_nn=None, hidden_dropout_rate_list=[0.5, 0.0], hidden_batch_norm_list=[torch.nn.BatchNorm2d(16), None], hidden_activation_list=[torch.nn.ReLU(), "identity"], hidden_residual_flag=False, hidden_dense_flag=False, dense_axis=1, ctx=ctx, regularizatable_data_list=[], ) d_model = ConvolutionalAutoEncoder( encoder=encoder, decoder=decoder, computable_loss=computable_loss, learning_rate=learning_rate, ctx=ctx, regularizatable_data_list=[], ) d_model.batch_size = batch_size discriminative_model = EBDiscriminativeModel( model=d_model, learning_rate=learning_rate, ctx=ctx, ) else: if isinstance(discriminative_model, EBDiscriminativeModel) is False: raise TypeError( "The type of `discriminative_model` must be `EBDiscriminativeModel`." ) if generative_model is None: g_model = ConvolutionalNeuralNetworks( computable_loss=computable_loss, initializer_f=initializer_f, optimizer_f=optimizer_f, learning_rate=learning_rate, hidden_units_list=[ torch.nn.ConvTranspose2d( in_channels=channel, out_channels=16, kernel_size=3, stride=1, padding=1, ), torch.nn.ConvTranspose2d( in_channels=16, out_channels=channel, kernel_size=3, stride=1, padding=1, ), ], input_nn=None, input_result_height=None, input_result_width=None, input_result_channel=None, output_nn=None, hidden_dropout_rate_list=[0.5, 0.0], hidden_batch_norm_list=[torch.nn.BatchNorm2d(16), None], hidden_activation_list=[ torch.nn.ReLU(), torch.nn.Sigmoid(), ], hidden_residual_flag=False, hidden_dense_flag=False, dense_axis=1, ctx=ctx, regularizatable_data_list=[], ) condition_sampler = ConditionSampler() condition_sampler.true_sampler = true_sampler generative_model = GenerativeModel( noise_sampler=UniformNoiseSampler(low=-1e-05, high=1e-05, batch_size=batch_size, seq_len=0, channel=channel, height=height, width=width, ctx=ctx), model=g_model, condition_sampler=condition_sampler, conditonal_dim=1, learning_rate=learning_rate, ctx=ctx, ) else: if isinstance(generative_model, GenerativeModel) is False: raise TypeError( "The type of `generative_model` must be `GenerativeModel`." ) EBGAN = EBGANController( true_sampler=true_sampler, generative_model=generative_model, discriminative_model=discriminative_model, discriminator_loss=EBDiscriminatorLoss( weight=discriminator_loss_weight), feature_matching_loss=L2NormLoss( weight=feature_matching_loss_weight), learning_rate=learning_rate, ctx=ctx, ) self.EBGAN = EBGAN
def __init__(self, generative_model, clustering_model, discriminative_model, discriminator_loss, generator_loss, consistency_loss, feature_matching_loss=None, initializer=None, optimizer_name="SGD", learning_rate=1e-05, learning_attenuate_rate=1.0, attenuate_epoch=50, hybridize_flag=True, scale=1.0, wd=0.07, ctx=mx.cpu(), **kwargs): ''' Init. Args: generative_model: is-a `GenerativeModel`. clustering_model: is-a `GenerativeModel`. discriminative_model: is-a `DiscriminativeModel`. generator_loss: is-a `GeneratorLoss`. discriminator_loss: is-a `GANDiscriminatorLoss`. consistency_loss: is-a `Loss`. feature_matching_loss: is-a `GANFeatureMatchingLoss`. learning_rate: `float` of learning rate. learning_attenuate_rate: `float` of attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`. attenuate_epoch: `int` of attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`. Additionally, in relation to regularization, this class constrains weight matrixes every `attenuate_epoch`. optimizer_name: `str` of name of optimizer. hybridize_flag: Call `mxnet.gluon.HybridBlock.hybridize()` or not. scale: `float` of scaling factor for initial parameters. ctx: `mx.cpu()` or `mx.gpu()`. ''' logger = getLogger("accelbrainbase") self.__logger = logger init_deferred_flag = self.init_deferred_flag self.init_deferred_flag = True if generator_loss is None: _generator_loss = GeneratorLoss() else: _generator_loss = generator_loss super().__init__(true_sampler=TrueSampler(), generative_model=clustering_model, discriminative_model=discriminative_model, generator_loss=_generator_loss, discriminator_loss=discriminator_loss, feature_matching_loss=feature_matching_loss, optimizer_name=optimizer_name, learning_rate=learning_rate, learning_attenuate_rate=learning_attenuate_rate, attenuate_epoch=attenuate_epoch, hybridize_flag=hybridize_flag, scale=scale, ctx=ctx, **kwargs) self.init_deferred_flag = init_deferred_flag self.__generative_model = generative_model self.__clustering_model = clustering_model self.__discriminative_model = discriminative_model self.__discriminator_loss = discriminator_loss self.__generator_loss = generator_loss self.__consistency_loss = consistency_loss self.__feature_matching_loss = feature_matching_loss logger = getLogger("accelbrainbase") self.__logger = logger if initializer is None: self.initializer = mx.initializer.Xavier(rnd_type="gaussian", factor_type="in", magnitude=2) else: if isinstance(initializer, mx.initializer.Initializer) is False: raise TypeError( "The type of `initializer` must be `mxnet.initializer.Initializer`." ) self.initializer = initializer if self.init_deferred_flag is False: try: self.collect_params().initialize(self.initializer, force_reinit=False, ctx=ctx) self.__clustering_model.collect_params().initialize( self.initializer, force_reinit=False, ctx=ctx) self.__generative_model.collect_params().initialize( self.initializer, force_reinit=False, ctx=ctx) self.__discriminative_model.collect_params().initialize( self.initializer, force_reinit=False, ctx=ctx) self.clusterer_trainer = gluon.Trainer( self.__clustering_model.collect_params(), optimizer_name, { "learning_rate": learning_rate, "wd": wd }) self.generator_trainer = gluon.Trainer( self.__generative_model.collect_params(), optimizer_name, { "learning_rate": learning_rate, "wd": wd }) self.discriminator_trainer = gluon.Trainer( self.__discriminative_model.collect_params(), optimizer_name, { "learning_rate": learning_rate, "wd": wd }) if hybridize_flag is True: self.generative_model.hybridize() self.generative_model.model.hybridize() self.clustering_model.hybridize() self.clustering_model.model.hybridize() if self.clustering_model.model.output_nn is not None: self.clustering_model.model.output_nn.hybridize() self.discriminative_model.hybridize() self.discriminative_model.model.hybridize() except InitDeferredError: self.__logger.debug("The initialization should be deferred.") self.__learning_rate = learning_rate self.__learning_attenuate_rate = learning_attenuate_rate self.__attenuate_epoch = attenuate_epoch