def __init__(
        self,
        extractable_historical_data,
        ticker_list,
        start_date,
        end_date,
        batch_size=50,
        seq_len=30,
        learning_rate=1e-04,
        target_features_list=None,
        ctx=mx.gpu(),
        g_params_path="params/recursive_seq_2_seq_model_re_encoder_model.params",
        re_e_params_path="params/recursive_seq_2_seq_model_model.params",
        d_params_path="params/discriminative_model_model.params",
        transfer_flag=True,
        diff_mode=True,
        log_mode=True,
        multi_fractal_mode=True,
        long_term_seq_len=30,
    ):
        '''
        Init.

        Args:
            extractable_historical_data:        is-a `ExtractableHistoricalData`.
            ticker_list:                        `list` if tickers.
            start_date:                         `str` of start date.
            end_date:                           `str` of end date.
            batch_size:                         `int` of batch size.
            seq_len:                            `int` of the length of sequence.
            learning_rate:                      `float` of learning rate.
            target_features_list:               `list` of `str`. The value is ...
                                                    - `adjusted_close`: adjusted close.
                                                    - `close`: close.
                                                    - `high`: high value.
                                                    - `low`: low value.
                                                    - `open`: open.
                                                    - `volume`: volume.

            ctx:                                `mx.gpu()` or `mx.cpu()`.
            g_params_path:                      `str` of path to generator's learned parameters.
            re_e_params_path:                   `str` of path to re-encoder's learned parameters.
            d_params_path:                      `str` of path to discriminator's learned parameters.
            transfer_flag:                      `bool`. If `True`, this class will do transfer learning.
            multi_fractal_mode:                 `bool`.
            long_term_seq_len:                  `int`.
        '''
        if isinstance(extractable_historical_data, ExtractableHistoricalData) is False:
            raise TypeError()
        self.__extractable_historical_data = extractable_historical_data

        if target_features_list is None:
            target_features_list = self.__target_features_list

        computable_loss = L2NormLoss()
        initializer = mx.initializer.Uniform(0.3)
        hybridize_flag = True

        volatility_conditional_true_sampler = VolatilityConditionalTrueSampler(
            extractable_historical_data=extractable_historical_data,
            ticker_list=ticker_list,
            start_date=start_date,
            end_date=end_date,
            batch_size=batch_size, 
            seq_len=seq_len, 
            channel=len(ticker_list),
            target_features_list=target_features_list,
            diff_mode=diff_mode,
            log_mode=log_mode,
            ctx=ctx,
            lstm_mode=True,
            expand_dims_flag=False
        )

        volatility_conditional_noise_sampler = VolatilityConditionalNoiseSampler(
            extractable_historical_data=extractable_historical_data,
            ticker_list=ticker_list,
            start_date=start_date,
            end_date=end_date,
            batch_size=batch_size, 
            seq_len=seq_len, 
            channel=len(ticker_list),
            target_features_list=target_features_list,
            diff_mode=diff_mode,
            log_mode=log_mode,
            ctx=ctx,
            lstm_mode=True
        )

        if multi_fractal_mode is True:
            _Seq2Seq = MultiFractalSeq2SeqModel
        else:
            _Seq2Seq = RecursiveSeq2SeqModel
            print("not multi-fractal mode.")

        generative_model = _Seq2Seq(
            batch_size=batch_size,
            seq_len=seq_len,
            output_n=len(ticker_list),
            hidden_n=len(ticker_list),
            noise_sampler=volatility_conditional_noise_sampler, 
            model=None, 
            initializer=None,
            computable_loss=None,
            condition_sampler=None,
            conditonal_dim=1,
            learning_rate=learning_rate,
            optimizer_name="SGD",
            hybridize_flag=hybridize_flag,
            scale=1.0, 
            ctx=ctx, 
            channel=len(ticker_list),
            diff_mode=diff_mode,
            log_mode=log_mode,
            expand_dims_flag=False
        )

        generative_model.long_term_seq_len = long_term_seq_len

        o_act = "sigmoid"

        d_model = LSTMNetworks(
            # is-a `ComputableLoss` or `mxnet.gluon.loss`.
            computable_loss=computable_loss,
            # `int` of batch size.
            batch_size=batch_size,
            # `int` of the length of series.
            seq_len=seq_len*2,
            # `int` of the number of units in hidden layer.
            hidden_n=len(ticker_list),
            # `int` of the number of units in output layer.
            output_n=1,
            # `float` of dropout rate.
            dropout_rate=0.0,
            # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` 
            # that activates observed data points.
            observed_activation="tanh",
            # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate.
            input_gate_activation="sigmoid",
            # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in forget gate.
            forget_gate_activation="sigmoid",
            # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in output gate.
            output_gate_activation="sigmoid",
            # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in hidden layer.
            hidden_activation="tanh",
            # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in output layer.
            output_activation=o_act,
            # `bool` that means this class has output layer or not.
            output_layer_flag=True,
            # `bool` for using bias or not in output layer(last hidden layer).
            output_no_bias_flag=False,
            # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
            hybridize_flag=hybridize_flag,
            # `mx.cpu()` or `mx.gpu()`.
            ctx=ctx,
            input_adjusted_flag=False
        )

        discriminative_model = DiscriminativeModel(
            model=d_model, 
            learning_rate=learning_rate,
            optimizer_name="SGD",
            hybridize_flag=hybridize_flag,
            scale=1.0, 
            ctx=ctx, 
        )

        if transfer_flag is True:
            if g_params_path is not None:
                try:
                    generative_model.re_encoder_model.load_parameters(g_params_path)
                except:
                    print("generative_model.re_encoder_model.load_parameters is false.")
            if re_e_params_path is not None:
                try:
                    generative_model.model.load_parameters(re_e_params_path)
                except:
                    print("generative_model.model.load_parameters is false.")
            if d_params_path is not None:
                try:
                    discriminative_model.model.load_parameters(d_params_path)
                except:
                    print("discriminative_model.model.load_parameters is false.")

        GAN = VolatilityGANController(
            true_sampler=volatility_conditional_true_sampler,
            generative_model=generative_model,
            discriminative_model=discriminative_model,
            generator_loss=GeneratorLoss(weight=1.0),
            discriminator_loss=DiscriminatorLoss(weight=1.0),
            feature_matching_loss=L2NormLoss(weight=1.0),
            mean_regression_loss=L2NormLoss(weight=0.01),
            mean_regression_weight=1.0,
            similar_loss=L2NormLoss(weight=1.0),
            similar_weight=1.0,
            optimizer_name="SGD",
            learning_rate=learning_rate,
            learning_attenuate_rate=1.0,
            attenuate_epoch=50,
            hybridize_flag=hybridize_flag,
            scale=1.0,
            ctx=ctx,
            initializer=initializer,
        )

        self.__diff_mode = diff_mode
        self.__noise_sampler = volatility_conditional_noise_sampler
        self.__true_sampler = volatility_conditional_true_sampler
        self.__generative_model = generative_model
        self.__discriminative_model = discriminative_model
        self.__GAN = GAN
        self.__ticker_list = ticker_list
        self.__seq_len = seq_len
        self.__log_mode = log_mode
        self.__g_params_path = g_params_path
        self.__re_e_params_path = re_e_params_path
        self.__d_params_path = d_params_path
    def __init__(
            self,
            midi_path_list,
            batch_size=20,
            seq_len=8,
            time_fraction=1.0,
            learning_rate=1e-10,
            learning_attenuate_rate=0.1,
            attenuate_epoch=50,
            generative_model=None,
            discriminative_model=None,
            ctx=mx.gpu(),
            initializer=None,
    ):
        '''
        Init.

        Args:
            midi_path_list:                 `list` of paths to MIDI files.
            batch_size:                     Batch size.
            seq_len:                        The length of sequence that LSTM networks will observe.
            time_fraction:                  Time fraction or time resolution (seconds).

            learning_rate:                  Learning rate in `Generator` and `Discriminator`.
            learning_attenuate_rate:        Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            attenuate_epoch:                Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.

            true_sampler:                   is-a `TrueSampler`.
            noise_sampler:                  is-a `NoiseSampler`.
            generative_model:               is-a `GenerativeModel`.
            discriminative_model:           is-a `DiscriminativeModel`.
            ctx:                            `mx.cpu()` or `mx.gpu()`.
            initializer:                    is-a `mxnet.initializer` for parameters of model. If `None`, it is drawing from the Xavier distribution.
        '''
        computable_loss = mx.gluon.loss.SoftmaxCrossEntropyLoss(
            sparse_label=False)

        self.__midi_controller = MidiController()
        self.__midi_df_list = [
            self.__midi_controller.extract(midi_path)
            for midi_path in midi_path_list
        ]

        bar_gram = BarGram(midi_df_list=self.__midi_df_list,
                           time_fraction=time_fraction)
        self.__bar_gram = bar_gram
        dim = self.__bar_gram.dim

        c_true_sampler = ConditionalBarGramTrueSampler(
            bar_gram=bar_gram,
            midi_df_list=self.__midi_df_list,
            batch_size=batch_size,
            seq_len=seq_len,
            time_fraction=time_fraction)

        true_sampler = BarGramTrueSampler(bar_gram=bar_gram,
                                          midi_df_list=self.__midi_df_list,
                                          batch_size=batch_size,
                                          seq_len=seq_len,
                                          time_fraction=time_fraction)

        if generative_model is None:
            condition_sampler = ConditionSampler()
            condition_sampler.true_sampler = true_sampler

            c_model = ConvolutionalNeuralNetworks(
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                hidden_units_list=[
                    Conv2D(
                        channels=16,
                        kernel_size=6,
                        strides=(1, 1),
                        padding=(1, 1),
                    ),
                    Conv2D(
                        channels=len(true_sampler.program_list),
                        kernel_size=6,
                        strides=(1, 1),
                        padding=(1, 1),
                    ),
                ],
                input_nn=None,
                input_result_height=None,
                input_result_width=None,
                input_result_channel=None,
                output_nn=None,
                hidden_dropout_rate_list=[0.5, 0.0],
                hidden_batch_norm_list=[BatchNorm(), None],
                optimizer_name="SGD",
                hidden_activation_list=["relu", "identity"],
                hidden_residual_flag=False,
                hidden_dense_flag=False,
                dense_axis=1,
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
            )
            condition_sampler.model = c_model
            g_model = ConvolutionalNeuralNetworks(
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                hidden_units_list=[
                    Conv2DTranspose(
                        channels=16,
                        kernel_size=6,
                        strides=(1, 1),
                        padding=(1, 1),
                    ),
                    Conv2DTranspose(
                        channels=len(true_sampler.program_list),
                        kernel_size=6,
                        strides=(1, 1),
                        padding=(1, 1),
                    ),
                ],
                input_nn=None,
                input_result_height=None,
                input_result_width=None,
                input_result_channel=None,
                output_nn=None,
                hidden_dropout_rate_list=[0.5, 0.0],
                hidden_batch_norm_list=[BatchNorm(), None],
                optimizer_name="SGD",
                hidden_activation_list=["relu", "identity"],
                hidden_residual_flag=False,
                hidden_dense_flag=False,
                dense_axis=1,
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
            )

            generative_model = GenerativeModel(
                noise_sampler=None,
                model=g_model,
                initializer=None,
                condition_sampler=condition_sampler,
                conditonal_dim=1,
                learning_rate=learning_rate,
                optimizer_name="SGD",
                hybridize_flag=True,
                scale=1.0,
                ctx=ctx,
            )
        else:
            if isinstance(generative_model, GenerativeModel) is False:
                raise TypeError(
                    "The type of `generative_model` must be `GenerativeModel`."
                )

        if discriminative_model is None:
            output_nn = NeuralNetworks(
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                units_list=[100, 1],
                dropout_rate_list=[0.5, 0.0],
                optimizer_name="SGD",
                activation_list=["relu", "sigmoid"],
                hidden_batch_norm_list=[BatchNorm(), None],
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
                output_no_bias_flag=True,
                all_no_bias_flag=True,
                not_init_flag=False,
            )

            d_model = ConvolutionalNeuralNetworks(
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                hidden_units_list=[
                    Conv2D(
                        channels=16,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                    Conv2D(
                        channels=32,
                        kernel_size=3,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                ],
                input_nn=None,
                input_result_height=None,
                input_result_width=None,
                input_result_channel=None,
                output_nn=output_nn,
                hidden_dropout_rate_list=[0.5, 0.5],
                hidden_batch_norm_list=[BatchNorm(), BatchNorm()],
                optimizer_name="SGD",
                hidden_activation_list=["relu", "relu"],
                hidden_residual_flag=False,
                hidden_dense_flag=False,
                dense_axis=1,
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
            )

            discriminative_model = DiscriminativeModel(
                model=d_model,
                initializer=None,
                learning_rate=learning_rate,
                optimizer_name="SGD",
                hybridize_flag=True,
                scale=1.0,
                ctx=ctx,
            )
        else:
            if isinstance(discriminative_model, DiscriminativeModel) is False:
                raise TypeError(
                    "The type of `discriminative_model` must be `DiscriminativeModel`."
                )

        GAN = GANController(
            true_sampler=c_true_sampler,
            generative_model=generative_model,
            discriminative_model=discriminative_model,
            generator_loss=GeneratorLoss(weight=1.0),
            discriminator_loss=DiscriminatorLoss(weight=1.0),
            feature_matching_loss=L2NormLoss(weight=1.0),
            optimizer_name="SGD",
            learning_rate=learning_rate,
            learning_attenuate_rate=1.0,
            attenuate_epoch=50,
            hybridize_flag=True,
            scale=1.0,
            ctx=ctx,
            initializer=initializer,
        )

        self.__true_sampler = true_sampler
        self.__generative_model = generative_model
        self.__discriminative_model = discriminative_model
        self.__GAN = GAN
        self.__time_fraction = time_fraction
Ejemplo n.º 3
0
    def __init__(
        self,
        dir_list,
        width=28,
        height=28,
        channel=1,
        initializer=None,
        batch_size=40,
        learning_rate=1e-03,
        ctx=mx.gpu(),
        discriminative_model=None,
        generative_model=None,
    ):
        '''
        Init.

        If you are not satisfied with this simple default setting,
        delegate `discriminative_model` and `generative_model` designed by yourself.

        Args:
            dir_list:       `list` of `str` of path to image files.
            width:          `int` of image width.
            height:         `int` of image height.
            channel:        `int` of image channel.
            initializer:    is-a `mxnet.initializer` for parameters of model.
                            If `None`, it is drawing from the Xavier distribution.
            
            batch_size:     `int` of batch size.
            learning_rate:  `float` of learning rate.
            ctx:            `mx.gpu()` or `mx.cpu()`.

            discriminative_model:       is-a `accelbrainbase.observabledata._mxnet.adversarialmodel.discriminative_model.DiscriminativeModel`.
            generative_model:           is-a `accelbrainbase.observabledata._mxnet.adversarialmodel.generative_model.GenerativeModel`.

        '''
        image_extractor = ImageExtractor(
            width=width,
            height=height,
            channel=channel,
            ctx=ctx
        )

        unlabeled_image_iterator = UnlabeledImageIterator(
            image_extractor=image_extractor,
            dir_list=dir_list,
            batch_size=batch_size,
            norm_mode="z_score",
            scale=1.0,
            noiseable_data=GaussNoise(sigma=1e-03, mu=0.0),
        )

        true_sampler = TrueSampler()
        true_sampler.iteratorable_data = unlabeled_image_iterator

        condition_sampler = ConditionSampler()
        condition_sampler.true_sampler = true_sampler

        computable_loss = L2NormLoss()

        if discriminative_model is None:
            output_nn = NeuralNetworks(
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                units_list=[100, 1],
                dropout_rate_list=[0.5, 0.0],
                optimizer_name="SGD",
                activation_list=["relu", "sigmoid"],
                hidden_batch_norm_list=[BatchNorm(), None],
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
                output_no_bias_flag=True,
                all_no_bias_flag=True,
                not_init_flag=False,
            )

            d_model = ConvolutionalNeuralNetworks(
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                hidden_units_list=[
                    Conv2D(
                        channels=16,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ), 
                    Conv2D(
                        channels=32,
                        kernel_size=3,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                ],
                input_nn=None,
                input_result_height=None,
                input_result_width=None,
                input_result_channel=None,
                output_nn=output_nn,
                hidden_dropout_rate_list=[0.5, 0.5],
                hidden_batch_norm_list=[BatchNorm(), BatchNorm()],
                optimizer_name="SGD",
                hidden_activation_list=["relu", "relu"],
                hidden_residual_flag=False,
                hidden_dense_flag=False,
                dense_axis=1,
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
            )

            discriminative_model = DiscriminativeModel(
                model=d_model, 
                initializer=None,
                learning_rate=learning_rate,
                optimizer_name="SGD",
                hybridize_flag=True,
                scale=1.0, 
                ctx=ctx, 
            )
        else:
            if isinstance(discriminative_model, DiscriminativeModel) is False:
                raise TypeError("The type of `discriminative_model` must be `DiscriminativeModel`.")

        if generative_model is None:
            g_model = ConvolutionalNeuralNetworks(
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                hidden_units_list=[
                    Conv2DTranspose(
                        channels=16,
                        kernel_size=6,
                        strides=(1, 1),
                        padding=(1, 1),
                    ), 
                    Conv2DTranspose(
                        channels=1,
                        kernel_size=3,
                        strides=(1, 1),
                        padding=(1, 1),
                    ),
                ],
                input_nn=None,
                input_result_height=None,
                input_result_width=None,
                input_result_channel=None,
                output_nn=None,
                hidden_dropout_rate_list=[0.5, 0.0],
                hidden_batch_norm_list=[BatchNorm(), None],
                optimizer_name="SGD",
                hidden_activation_list=["relu", "identity"],
                hidden_residual_flag=False,
                hidden_dense_flag=False,
                dense_axis=1,
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
            )

            generative_model = GenerativeModel(
                noise_sampler=UniformNoiseSampler(
                    low=-1e-05,
                    high=1e-05,
                    batch_size=batch_size,
                    seq_len=0,
                    channel=channel,
                    height=height,
                    width=width,
                    ctx=ctx
                ), 
                model=g_model, 
                initializer=None,
                condition_sampler=condition_sampler,
                conditonal_dim=1,
                learning_rate=learning_rate,
                optimizer_name="SGD",
                hybridize_flag=True,
                scale=1.0, 
                ctx=ctx, 
            )
        else:
            if isinstance(generative_model, GenerativeModel) is False:
                raise TypeError("The type of `generative_model` must be `GenerativeModel`.")

        GAN = GANController(
            true_sampler=true_sampler,
            generative_model=generative_model,
            discriminative_model=discriminative_model,
            generator_loss=GeneratorLoss(weight=1.0),
            discriminator_loss=DiscriminatorLoss(weight=1.0),
            feature_matching_loss=L2NormLoss(weight=1.0),
            optimizer_name="SGD",
            learning_rate=learning_rate,
            learning_attenuate_rate=1.0,
            attenuate_epoch=50,
            hybridize_flag=True,
            scale=1.0,
            ctx=ctx,
            initializer=initializer,
        )

        self.GAN = GAN
    def __init__(self,
                 batch_size,
                 seq_len,
                 output_n,
                 noise_sampler,
                 model=None,
                 re_encoder_model=None,
                 initializer=None,
                 computable_loss=None,
                 condition_sampler=None,
                 conditonal_dim=2,
                 learning_rate=1e-05,
                 optimizer_name="SGD",
                 hybridize_flag=True,
                 scale=1.0,
                 ctx=mx.gpu(),
                 channel=1000,
                 diff_mode=True,
                 log_mode=True,
                 hidden_n=200,
                 expand_dims_flag=True,
                 **kwargs):
        '''
        Init.

        Args:
            batch_size:                     `int` of batch size.
            seq_len:                        `int` of the length of sequence.
            output_n:                       `int` of the dimension of outputs.
            noise_sampler:                  is-a `NoiseSampler`.
            model:                          model.
            re_encoder_model:               is-a `ReEncoderModel`.
            initializer:                    is-a `mxnet.initializer` for parameters of model. If `None`, it is drawing from the Xavier distribution.
            computable_loss:                is-a `ComputableLoss`.
            condition_sampler:              is-a `ConditionSampler`.
            conditonal_dim:                 `int` of the dimension of conditions.
            learning_rate:                  `float` of learning rate.
            optimizer_name:                 `str` of optimizer's name.
            hybridize_flag:                 Call `mxnet.gluon.HybridBlock.hybridize()` or not. 
            scale:                          `float` of scales.
            ctx:                            `mx.cpu()` or `mx.gpu()`.
            diff_mode:                      `bool`. If `True`, this class outputs difference sequences.
            log_mode:                       `bool`. If `True`, this class outputs logarithmic rates of change.
            hidden_n:                       `int` of the number of hidden units.
            expand_dims_flag:               `bool`. If `True`, this class expands dimensions of output data (axis=1).

        '''
        if computable_loss is None:
            computable_loss = L2NormLoss()

        if model is None:
            if log_mode is True:
                o_act = "tanh"
            else:
                o_act = "identity"

            encoder = LSTMNetworks(
                # is-a `ComputableLoss` or `mxnet.gluon.loss`.
                computable_loss=computable_loss,
                # `int` of batch size.
                batch_size=batch_size,
                # `int` of the length of series.
                seq_len=seq_len,
                # `int` of the number of units in hidden layer.
                hidden_n=hidden_n,
                # `int` of the number of units in output layer.
                output_n=output_n,
                # `float` of dropout rate.
                dropout_rate=0.0,
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation`
                # that activates observed data points.
                observed_activation="tanh",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate.
                input_gate_activation="sigmoid",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in forget gate.
                forget_gate_activation="sigmoid",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in output gate.
                output_gate_activation="sigmoid",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in hidden layer.
                hidden_activation="tanh",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in output layer.
                output_activation=o_act,
                # `bool` that means this class has output layer or not.
                output_layer_flag=True,
                # `bool` for using bias or not in output layer(last hidden layer).
                output_no_bias_flag=False,
                # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
                hybridize_flag=True,
                # `mx.cpu()` or `mx.gpu()`.
                ctx=ctx,
                input_adjusted_flag=False)

            decoder = LSTMNetworks(
                # is-a `ComputableLoss` or `mxnet.gluon.loss`.
                computable_loss=computable_loss,
                # `int` of batch size.
                batch_size=batch_size,
                # `int` of the length of series.
                seq_len=seq_len,
                # `int` of the number of units in hidden layer.
                hidden_n=hidden_n,
                # `int` of the number of units in output layer.
                output_n=output_n,
                # `float` of dropout rate.
                dropout_rate=0.0,
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation`
                # that activates observed data points.
                observed_activation="tanh",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate.
                input_gate_activation="sigmoid",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in forget gate.
                forget_gate_activation="sigmoid",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in output gate.
                output_gate_activation="sigmoid",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in hidden layer.
                hidden_activation="tanh",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in output layer.
                output_activation=o_act,
                # `bool` that means this class has output layer or not.
                output_layer_flag=True,
                # `bool` for using bias or not in output layer(last hidden layer).
                output_no_bias_flag=False,
                # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
                hybridize_flag=True,
                # `mx.cpu()` or `mx.gpu()`.
                ctx=ctx,
                input_adjusted_flag=False)

            model = EncoderDecoder(
                # is-a `LSTMNetworks`.
                encoder=encoder,
                # is-a `LSTMNetworks`.
                decoder=decoder,
                # `int` of batch size.
                batch_size=batch_size,
                # `int` of the length of series.
                seq_len=seq_len,
                # is-a `ComputableLoss` or `mxnet.gluon.loss`.
                computable_loss=computable_loss,
                # is-a `mxnet.initializer` for parameters of model. If `None`, it is drawing from the Xavier distribution.
                initializer=initializer,
                # `float` of learning rate.
                learning_rate=learning_rate,
                # `float` of attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
                learning_attenuate_rate=1.0,
                # `int` of attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
                attenuate_epoch=50,
                # `str` of name of optimizer.
                optimizer_name=optimizer_name,
                # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
                hybridize_flag=True,
                # `mx.cpu()` or `mx.gpu()`.
                ctx=ctx,
                generating_flag=False)

        if re_encoder_model is None:
            re_encoder_model = LSTMNetworks(
                # is-a `ComputableLoss` or `mxnet.gluon.loss`.
                computable_loss=computable_loss,
                # `int` of batch size.
                batch_size=batch_size,
                # `int` of the length of series.
                seq_len=seq_len,
                # `int` of the number of units in hidden layer.
                hidden_n=hidden_n,
                # `int` of the number of units in output layer.
                output_n=output_n,
                # `float` of dropout rate.
                dropout_rate=0.0,
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation`
                # that activates observed data points.
                observed_activation="tanh",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate.
                input_gate_activation="sigmoid",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in forget gate.
                forget_gate_activation="sigmoid",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in output gate.
                output_gate_activation="sigmoid",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in hidden layer.
                hidden_activation="tanh",
                # `act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in output layer.
                output_activation=o_act,
                # `bool` that means this class has output layer or not.
                output_layer_flag=True,
                # `bool` for using bias or not in output layer(last hidden layer).
                output_no_bias_flag=False,
                # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
                hybridize_flag=True,
                # `mx.cpu()` or `mx.gpu()`.
                ctx=ctx,
                input_adjusted_flag=False)

        init_deferred_flag = self.init_deferred_flag
        self.init_deferred_flag = True

        super().__init__(noise_sampler=noise_sampler,
                         model=model,
                         initializer=initializer,
                         condition_sampler=condition_sampler,
                         conditonal_dim=conditonal_dim,
                         learning_rate=learning_rate,
                         optimizer_name=optimizer_name,
                         hybridize_flag=hybridize_flag,
                         scale=1.0,
                         ctx=ctx,
                         **kwargs)
        self.init_deferred_flag = init_deferred_flag

        self.re_encoder_model = re_encoder_model

        if initializer is None:
            self.initializer = mx.initializer.Xavier(rnd_type="gaussian",
                                                     factor_type="in",
                                                     magnitude=2)
        else:
            if isinstance(initializer, mx.initializer.Initializer) is False:
                raise TypeError(
                    "The type of `initializer` must be `mxnet.initializer.Initializer`."
                )
            self.initializer = initializer

        with self.name_scope():
            self.register_child(self.re_encoder_model)

        if self.init_deferred_flag is False:
            try:
                self.collect_params().initialize(self.initializer,
                                                 force_reinit=True,
                                                 ctx=ctx)
                self.trainer = gluon.Trainer(self.collect_params(),
                                             optimizer_name,
                                             {"learning_rate": learning_rate})
                if hybridize_flag is True:
                    self.model.hybridize()
                    self.re_encoder_model.hybridize()
                    if self.condition_sampler is not None:
                        if self.condition_sampler.model is not None:
                            self.condition_sampler.model.hybridize()
            except InitDeferredError:
                self.__logger.debug("The initialization should be deferred.")

        logger = getLogger("accelbrainbase")
        self.__logger = logger

        self.__learning_rate = learning_rate

        self.__cnn = model
        self.__condition_sampler = condition_sampler
        self.__computable_loss = computable_loss

        self.__q_shape = None
        self.__loss_list = []
        self.__epoch_counter = 0

        self.conditonal_dim = conditonal_dim
        self.__expand_dims_flag = expand_dims_flag
Ejemplo n.º 5
0
    def __init__(
        self,
        dir_list,
        test_dir_list,
        width=28,
        height=28,
        channel=1,
        initializer=None,
        batch_size=40,
        learning_rate=0.0002,
        ctx=mx.gpu(),
        discriminative_model=None,
        generative_model=None,
        re_encoder_model=None,
    ):
        '''
        Init.

        If you are not satisfied with this simple default setting,
        delegate `discriminative_model` and `generative_model` designed by yourself.

        Args:
            dir_list:       `list` of `str` of path to image files.
            test_dir_list:  `list` of `str` of path to image files for test.
            width:          `int` of image width.
            height:         `int` of image height.
            channel:        `int` of image channel.
            initializer:    is-a `mxnet.initializer` for parameters of model.
                            If `None`, it is drawing from the Xavier distribution.
            
            batch_size:     `int` of batch size.
            learning_rate:  `float` of learning rate.
            ctx:            `mx.gpu()` or `mx.cpu()`.

            discriminative_model:       is-a `accelbrainbase.observabledata._mxnet.adversarialmodel.discriminative_model.DiscriminativeModel`.
            generative_model:           is-a `accelbrainbase.observabledata._mxnet.adversarialmodel.generative_model.GenerativeModel`.
            re_encoder_model:           is-a `HybridBlock`.

        '''
        image_extractor = ImageExtractor(width=width,
                                         height=height,
                                         channel=channel,
                                         ctx=ctx)

        unlabeled_image_iterator = UnlabeledImageIterator(
            image_extractor=image_extractor,
            dir_list=dir_list,
            batch_size=batch_size,
            norm_mode="z_score",
            scale=1 / 1000,
            noiseable_data=GaussNoise(sigma=1e-08, mu=0.0),
        )

        test_unlabeled_image_iterator = UnlabeledImageIterator(
            image_extractor=image_extractor,
            dir_list=test_dir_list,
            batch_size=batch_size,
            norm_mode="z_score",
            scale=1 / 1000,
            noiseable_data=GaussNoise(sigma=1e-08, mu=0.0),
        )

        true_sampler = TrueSampler()
        true_sampler.iteratorable_data = unlabeled_image_iterator

        condition_sampler = ConditionSampler()
        condition_sampler.true_sampler = true_sampler

        computable_loss = L2NormLoss()

        if discriminative_model is None:
            output_nn = NeuralNetworks(
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                units_list=[1],
                dropout_rate_list=[0.0],
                optimizer_name="SGD",
                activation_list=["sigmoid"],
                hidden_batch_norm_list=[None],
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
                output_no_bias_flag=True,
                all_no_bias_flag=True,
                not_init_flag=False,
            )

            d_model = ConvolutionalNeuralNetworks(
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                hidden_units_list=[
                    Conv2D(
                        channels=16,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                    Conv2D(
                        channels=32,
                        kernel_size=3,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                ],
                input_nn=None,
                input_result_height=None,
                input_result_width=None,
                input_result_channel=None,
                output_nn=output_nn,
                hidden_dropout_rate_list=[
                    0.5,
                    0.5,
                ],
                hidden_batch_norm_list=[BatchNorm(), BatchNorm()],
                optimizer_name="SGD",
                hidden_activation_list=[
                    "relu",
                    "relu",
                ],
                hidden_residual_flag=False,
                hidden_dense_flag=False,
                dense_axis=1,
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
            )

            discriminative_model = DiscriminativeModel(
                model=d_model,
                initializer=None,
                learning_rate=learning_rate,
                optimizer_name="SGD",
                hybridize_flag=True,
                scale=1.0,
                ctx=ctx,
            )
        else:
            if isinstance(discriminative_model, DiscriminativeModel) is False:
                raise TypeError(
                    "The type of `discriminative_model` must be `DiscriminativeModel`."
                )

        if re_encoder_model is None:
            re_encoder_model = ConvolutionalNeuralNetworks(
                # is-a `ComputableLoss` or `mxnet.gluon.loss`.
                computable_loss=computable_loss,
                # `list` of int` of the number of units in hidden layers.
                hidden_units_list=[
                    # `mxnet.gluon.nn.Conv2D`.
                    Conv2D(
                        channels=16,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                    Conv2D(
                        channels=32,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                ],
                # `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate.
                hidden_activation_list=[
                    "relu",
                    "relu",
                ],
                # `list` of `float` of dropout rate.
                hidden_dropout_rate_list=[
                    0.5,
                    0.5,
                ],
                # `list` of `mxnet.gluon.nn.BatchNorm`.
                hidden_batch_norm_list=[BatchNorm(), BatchNorm()],
                # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
                hybridize_flag=True,
                # `mx.gpu()` or `mx.cpu()`.
                ctx=ctx,
            )

        if generative_model is None:
            encoder = ConvolutionalNeuralNetworks(
                # is-a `ComputableLoss` or `mxnet.gluon.loss`.
                computable_loss=computable_loss,
                # `list` of int` of the number of units in hidden layers.
                hidden_units_list=[
                    # `mxnet.gluon.nn.Conv2D`.
                    Conv2D(
                        channels=16,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                    Conv2D(
                        channels=32,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                ],
                # `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate.
                hidden_activation_list=[
                    "relu",
                    "relu",
                ],
                # `list` of `float` of dropout rate.
                hidden_dropout_rate_list=[
                    0.5,
                    0.5,
                ],
                # `list` of `mxnet.gluon.nn.BatchNorm`.
                hidden_batch_norm_list=[BatchNorm(), BatchNorm()],
                # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
                hybridize_flag=True,
                # `mx.gpu()` or `mx.cpu()`.
                ctx=ctx,
            )
            decoder = ConvolutionalNeuralNetworks(
                # is-a `ComputableLoss` or `mxnet.gluon.loss`.
                computable_loss=computable_loss,
                # `list` of int` of the number of units in hidden layers.
                hidden_units_list=[
                    Conv2DTranspose(
                        channels=16,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                    Conv2DTranspose(
                        channels=channel,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(0, 0),
                    ),
                ],
                # `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate.
                hidden_activation_list=["relu", "tanh"],
                # `list` of `float` of dropout rate.
                hidden_dropout_rate_list=[0.5, 0.0],
                # `list` of `mxnet.gluon.nn.BatchNorm`.
                hidden_batch_norm_list=[BatchNorm(), None],
                # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
                hybridize_flag=True,
                # `mx.gpu()` or `mx.cpu()`.
                ctx=ctx,
            )

            g_model = ConvolutionalAutoEncoder(
                # is-a `ConvolutionalNeuralNetworks`.
                encoder=encoder,
                # is-a `ConvolutionalNeuralNetworks`.
                decoder=decoder,
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                input_nn=None,
                input_result_height=None,
                input_result_width=None,
                input_result_channel=None,
                output_nn=None,
                optimizer_name="SGD",
                hidden_residual_flag=False,
                hidden_dense_flag=False,
                dense_axis=1,
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
            )

            generative_model = GenerativeModel(
                noise_sampler=UniformNoiseSampler(low=-1e-05,
                                                  high=1e-05,
                                                  batch_size=batch_size,
                                                  seq_len=0,
                                                  channel=channel,
                                                  height=height,
                                                  width=width,
                                                  ctx=ctx),
                model=g_model,
                initializer=None,
                condition_sampler=condition_sampler,
                conditonal_dim=1,
                learning_rate=learning_rate,
                optimizer_name="SGD",
                hybridize_flag=True,
                scale=1.0,
                ctx=ctx,
            )
        else:
            if isinstance(generative_model, GenerativeModel) is False:
                raise TypeError(
                    "The type of `generative_model` must be `GenerativeModel`."
                )

        ganomaly_controller = GanomalyController(
            generative_model=generative_model,
            re_encoder_model=re_encoder_model,
            discriminative_model=discriminative_model,
            advarsarial_loss=L2NormLoss(weight=0.015),
            encoding_loss=L2NormLoss(weight=0.015),
            contextual_loss=L1Loss(weight=0.5),
            discriminator_loss=DiscriminatorLoss(weight=0.015),
            feature_matching_loss=None,
            optimizer_name="SGD",
            learning_rate=learning_rate,
            learning_attenuate_rate=1.0,
            attenuate_epoch=50,
            hybridize_flag=True,
            scale=1.0,
            ctx=ctx,
            initializer=initializer,
        )

        self.ganomaly_controller = ganomaly_controller
        self.test_unlabeled_image_iterator = test_unlabeled_image_iterator
Ejemplo n.º 6
0
    def __init__(self,
                 initializer=None,
                 computable_loss=None,
                 margin_param=0.01,
                 retrospective_lambda=0.5,
                 retrospective_eta=0.5,
                 encoder_decoder_controller=None,
                 retrospective_encoder=None,
                 hidden_neuron_count=20,
                 output_neuron_count=20,
                 dropout_rate=0.5,
                 batch_size=20,
                 learning_rate=1e-05,
                 learning_attenuate_rate=1.0,
                 attenuate_epoch=50,
                 optimizer_name="sgd",
                 grad_clip_threshold=1e+10,
                 seq_len=8,
                 ctx=mx.gpu(),
                 **kwargs):
        '''
        Init.

        Args:
            margin_param:                   A margin parameter for the mismatched pairs penalty.
            retrospective_lambda:           Tradeoff parameter for loss function.
            retrospective_eta:              Tradeoff parameter for loss function.
            encoder_decoder_controller:     is-a `EncoderDecoderController`.
            retrospective_encoder:          is-a `LSTMModel` as a retrospective encoder(or re-encoder).
            hidden_neuron_count:            The number of units in hidden layers.
            output_neuron_count:            The number of units in output layers.

            dropout_rate:                   Probability of dropout.

            batch_size:                     Batch size.
            learning_rate:                  Learning rate.
            learning_attenuate_rate:        Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            attenuate_epoch:                Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
                                            Additionally, in relation to regularization,
                                            this class constrains weight matrixes every `attenuate_epoch`.

            grad_clip_threshold:            Threshold of the gradient clipping.
            seq_len:                        The length of sequneces in Decoder with Attention model.

        '''
        super(ReSeq2Seq, self).__init__()
        self.__ctx = ctx
        if computable_loss is None:
            computable_loss = L2NormLoss()

        if isinstance(margin_param, float) is False:
            raise TypeError("The type of `margin_param` must be `float`.")
        if margin_param <= 0:
            raise ValueError(
                "The value of `margin_param` must be more than `0`.")

        self.__margin_param = margin_param

        if isinstance(retrospective_lambda, float) is False or isinstance(
                retrospective_eta, float) is False:
            raise TypeError(
                "The type of `retrospective_lambda` and `retrospective_eta` must be `float`."
            )

        if retrospective_lambda < 0 or retrospective_eta < 0:
            raise ValueError(
                "The values of `retrospective_lambda` and `retrospective_eta` must be more then `0`."
            )
        if retrospective_lambda + retrospective_eta != 1:
            raise ValueError(
                "The sum of `retrospective_lambda` and `retrospective_eta` must be `1`."
            )

        self.__retrospective_lambda = retrospective_lambda
        self.__retrospective_eta = retrospective_eta

        if encoder_decoder_controller is None:
            encoder_decoder_controller = self.__build_encoder_decoder_controller(
                computable_loss=computable_loss,
                hidden_neuron_count=hidden_neuron_count,
                output_neuron_count=output_neuron_count,
                dropout_rate=dropout_rate,
                batch_size=batch_size,
                learning_rate=learning_rate,
                attenuate_epoch=attenuate_epoch,
                learning_attenuate_rate=learning_attenuate_rate,
                seq_len=seq_len,
            )
        else:
            if isinstance(encoder_decoder_controller,
                          EncoderDecoderController) is False:
                raise TypeError()

        if retrospective_encoder is None:
            retrospective_encoder = self.__build_retrospective_encoder(
                computable_loss=computable_loss,
                hidden_neuron_count=hidden_neuron_count,
                output_neuron_count=output_neuron_count,
                dropout_rate=dropout_rate,
                batch_size=batch_size,
                learning_rate=learning_rate,
                seq_len=seq_len)
        else:
            if isinstance(retrospective_encoder, LSTMModel) is False:
                raise TypeError()

        self.__encoder_decoder_controller = encoder_decoder_controller
        self.__retrospective_encoder = retrospective_encoder

        if initializer is None:
            self.initializer = mx.initializer.Xavier(rnd_type="gaussian",
                                                     factor_type="in",
                                                     magnitude=1)
        else:
            if isinstance(initializer, mx.initializer.Initializer) is False:
                raise TypeError(
                    "The type of `initializer` must be `mxnet.initializer.Initializer`."
                )

            self.initializer = initializer

        self.collect_params().initialize(self.initializer,
                                         force_reinit=True,
                                         ctx=ctx)
        self.trainer = gluon.Trainer(self.collect_params(), optimizer_name,
                                     {"learning_rate": learning_rate})

        self.__batch_size = batch_size
        self.__learning_rate = learning_rate
        self.__attenuate_epoch = attenuate_epoch
        self.__learning_attenuate_rate = learning_attenuate_rate
        self.__grad_clip_threshold = grad_clip_threshold

        self.__output_neuron_count = output_neuron_count
        self.__hidden_neuron_count = hidden_neuron_count

        logger = getLogger("accelbrainbase")
        self.__logger = logger
        self.__logs_tuple_list = []
Ejemplo n.º 7
0
    def __init__(
        self,
        computable_loss=None,
        normal_prior_flag=False,
        encoder_decoder_controller=None,
        hidden_neuron_count=20,
        output_neuron_count=20,
        dropout_rate=0.5,
        epochs=100,
        batch_size=20,
        learning_rate=1e-05,
        learning_attenuate_rate=1.0,
        attenuate_epoch=50,
        seq_len=8,
    ):
        '''
        Init.

        Args:
            computable_loss:                is-a `ComputableLoss`.
            normal_prior_flag:              If `True`, this class will select abstract sentence
                                            from sentences with low reconstruction error.

            encoder_decoder_controller:     is-a `EncoderDecoderController`.
            hidden_neuron_count:            The number of units in hidden layers.
            output_neuron_count:            The number of units in output layers.

            dropout_rate:                   Probability of dropout.
            epochs:                         The epochs in mini-batch training Encoder/Decoder and retrospective encoder.
            batch_size:                     Batch size.
            learning_rate:                  Learning rate.
            learning_attenuate_rate:        Attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            attenuate_epoch:                Attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
                                            

            seq_len:                        The length of sequneces in Decoder with Attention model.

        '''
        if computable_loss is None:
            computable_loss = L2NormLoss()

        self.__normal_prior_flag = normal_prior_flag
        if encoder_decoder_controller is None:
            encoder_decoder_controller = self.__build_encoder_decoder_controller(
                computable_loss=computable_loss,
                hidden_neuron_count=hidden_neuron_count,
                output_neuron_count=output_neuron_count,
                dropout_rate=dropout_rate,
                batch_size=batch_size,
                learning_rate=learning_rate,
                attenuate_epoch=attenuate_epoch,
                learning_attenuate_rate=learning_attenuate_rate,
                seq_len=seq_len,
            )
        else:
            if isinstance(encoder_decoder_controller,
                          EncoderDecoderController) is False:
                raise TypeError()

        self.__encoder_decoder_controller = encoder_decoder_controller

        logger = getLogger("accelbrainbase")
        self.__logger = logger
        self.__logs_tuple_list = []
        self.__computable_loss = computable_loss
    def __init__(
        self,
        dir_list,
        width=28,
        height=28,
        channel=1,
        normal_height=14,
        normal_width=14,
        normal_channel=32,
        initializer=None,
        batch_size=40,
        learning_rate=1e-03,
        ctx=mx.gpu(),
        discriminative_model=None,
        generative_model=None,
        discriminator_loss_weight=1.0,
        reconstruction_loss_weight=1.0,
        feature_matching_loss_weight=1.0,
    ):
        '''
        Init.

        If you are not satisfied with this simple default setting,
        delegate `discriminative_model` and `generative_model` designed by yourself.

        Args:
            dir_list:       `list` of `str` of path to image files.
            width:          `int` of image width.
            height:         `int` of image height.
            channel:        `int` of image channel.

            normal_width:   `int` of width of image drawn from normal distribution, p(z).
            normal_height:  `int` of height of image drawn from normal distribution, p(z).
            normal_channel: `int` of channel of image drawn from normal distribution, p(z).

            initializer:    is-a `mxnet.initializer` for parameters of model.
                            If `None`, it is drawing from the Xavier distribution.
            
            batch_size:     `int` of batch size.
            learning_rate:  `float` of learning rate.
            ctx:            `mx.gpu()` or `mx.cpu()`.

            discriminative_model:       is-a `accelbrainbase.observabledata._mxnet.adversarialmodel.discriminative_model.discriminativemodel.eb_discriminative_model.EBDiscriminativeModel`.
            generative_model:           is-a `accelbrainbase.observabledata._mxnet.adversarialmodel.generative_model.GenerativeModel`.

            discriminator_loss_weight:      `float` of weight for discriminator loss.
            reconstruction_loss_weight:     `float` of weight for reconstruction loss.
            feature_matching_loss_weight:   `float` of weight for feature matching loss.
        '''
        image_extractor = ImageExtractor(width=width,
                                         height=height,
                                         channel=channel,
                                         ctx=ctx)

        unlabeled_image_iterator = UnlabeledImageIterator(
            image_extractor=image_extractor,
            dir_list=dir_list,
            batch_size=batch_size,
            norm_mode="z_score",
            scale=1.0,
            noiseable_data=GaussNoise(sigma=1e-03, mu=0.0),
        )

        computable_loss = L2NormLoss()

        if initializer is None:
            initializer = mx.initializer.Uniform()
        else:
            if isinstance(initializer, mx.initializer.Initializer) is False:
                raise TypeError(
                    "The type of `initializer` must be `mxnet.initializer.Initializer`."
                )

        if discriminative_model is None:
            d_encoder = ConvolutionalNeuralNetworks(
                # is-a `ComputableLoss` or `mxnet.gluon.loss`.
                computable_loss=computable_loss,
                # `list` of int` of the number of units in hidden layers.
                hidden_units_list=[
                    # `mxnet.gluon.nn.Conv2D`.
                    Conv2D(
                        channels=16,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                    Conv2D(
                        channels=32,
                        kernel_size=3,
                        strides=(1, 1),
                        padding=(1, 1),
                    ),
                ],
                # `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate.
                hidden_activation_list=["relu", "relu"],
                # `list` of `float` of dropout rate.
                hidden_dropout_rate_list=[0.5, 0.5],
                # `list` of `mxnet.gluon.nn.BatchNorm`.
                hidden_batch_norm_list=[BatchNorm(), BatchNorm()],
                # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
                hybridize_flag=True,
                # `mx.gpu()` or `mx.cpu()`.
                ctx=ctx,
            )

            d_decoder = ConvolutionalNeuralNetworks(
                # is-a `ComputableLoss` or `mxnet.gluon.loss`.
                computable_loss=computable_loss,
                # `list` of int` of the number of units in hidden layers.
                hidden_units_list=[
                    # `mxnet.gluon.nn.Conv2DTranspose`.
                    Conv2DTranspose(
                        channels=16,
                        kernel_size=3,
                        strides=(1, 1),
                        padding=(1, 1),
                    ),
                    Conv2DTranspose(
                        channels=32,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                ],
                # `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate.
                hidden_activation_list=["identity", "identity"],
                # `list` of `float` of dropout rate.
                hidden_dropout_rate_list=[0.0, 0.0],
                # `list` of `mxnet.gluon.nn.BatchNorm`.
                hidden_batch_norm_list=[BatchNorm(), None],
                # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
                hybridize_flag=True,
                # `mx.gpu()` or `mx.cpu()`.
                ctx=ctx,
            )
            d_model = ConvolutionalAutoEncoder(
                # is-a `ConvolutionalNeuralNetworks`.
                encoder=d_encoder,
                # is-a `ConvolutionalNeuralNetworks`.
                decoder=d_decoder,
                # is-a `ComputableLoss` or `mxnet.gluon.loss`.
                computable_loss=computable_loss,
                # `bool` of flag to tied weights or not.
                tied_weights_flag=True,
                # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
                hybridize_flag=True,
                # `mx.gpu()` or `mx.cpu()`.
                ctx=ctx,
            )
            d_model.batch_size = 40

            discriminative_model = EBDiscriminativeModel(
                # is-a `ConvolutionalAutoEncoder`.
                model=d_model,
                # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
                hybridize_flag=True,
                # `mx.gpu()` or `mx.cpu()`.
                ctx=ctx,
            )
        else:
            if isinstance(discriminative_model, DiscriminativeModel) is False:
                raise TypeError(
                    "The type of `discriminative_model` must be `DiscriminativeModel`."
                )

        if generative_model is None:
            encoder = ConvolutionalNeuralNetworks(
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                hidden_units_list=[
                    Conv2D(
                        channels=16,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(0, 0),
                    ),
                    Conv2D(
                        channels=32,
                        kernel_size=3,
                        strides=(1, 1),
                        padding=(1, 1),
                    ),
                ],
                input_nn=None,
                input_result_height=None,
                input_result_width=None,
                input_result_channel=None,
                output_nn=None,
                hidden_dropout_rate_list=[0.5, 0.5],
                hidden_batch_norm_list=[BatchNorm(), BatchNorm()],
                optimizer_name="SGD",
                hidden_activation_list=["relu", "relu"],
                hidden_residual_flag=False,
                hidden_dense_flag=False,
                dense_axis=1,
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
            )

            decoder = ConvolutionalNeuralNetworks(
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                hidden_units_list=[
                    Conv2DTranspose(
                        channels=16,
                        kernel_size=3,
                        strides=(1, 1),
                        padding=(1, 1),
                    ),
                    Conv2DTranspose(
                        channels=channel,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(0, 0),
                    ),
                ],
                input_nn=None,
                input_result_height=None,
                input_result_width=None,
                input_result_channel=None,
                output_nn=None,
                hidden_dropout_rate_list=[0.0, 0.0],
                hidden_batch_norm_list=[BatchNorm(), None],
                optimizer_name="SGD",
                hidden_activation_list=["identity", "identity"],
                hidden_residual_flag=False,
                hidden_dense_flag=False,
                dense_axis=1,
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
            )

            g_model = ConvolutionalAutoEncoder(
                encoder=encoder,
                decoder=decoder,
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                optimizer_name="SGD",
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
            )
            d_model.batch_size = 40

            true_sampler = TrueSampler()
            true_sampler.iteratorable_data = unlabeled_image_iterator

            condition_sampler = ConditionSampler()
            condition_sampler.true_sampler = true_sampler

            generative_model = GenerativeModel(
                noise_sampler=UniformNoiseSampler(low=-1e-03,
                                                  high=1e-03,
                                                  batch_size=batch_size,
                                                  seq_len=0,
                                                  channel=channel,
                                                  height=height,
                                                  width=width,
                                                  ctx=ctx),
                model=g_model,
                initializer=initializer,
                condition_sampler=condition_sampler,
                conditonal_dim=1,
                learning_rate=learning_rate,
                optimizer_name="SGD",
                hybridize_flag=True,
                scale=1.0,
                ctx=ctx,
            )
        else:
            if isinstance(generative_model, GenerativeModel) is False:
                raise TypeError(
                    "The type of `generative_model` must be `GenerativeModel`."
                )

        normal_ture_sampler = NormalTrueSampler(batch_size=batch_size,
                                                seq_len=0,
                                                channel=normal_channel,
                                                height=normal_height,
                                                width=normal_width,
                                                ctx=ctx)

        EBAAE = EBAAEController(
            true_sampler=normal_ture_sampler,
            generative_model=generative_model,
            discriminative_model=discriminative_model,
            discriminator_loss=EBDiscriminatorLoss(
                weight=discriminator_loss_weight),
            reconstruction_loss=L2NormLoss(weight=reconstruction_loss_weight),
            feature_matching_loss=L2NormLoss(
                weight=feature_matching_loss_weight),
            optimizer_name="SGD",
            learning_rate=learning_rate,
            learning_attenuate_rate=1.0,
            attenuate_epoch=50,
            hybridize_flag=True,
            scale=1.0,
            ctx=ctx,
            initializer=initializer,
        )
        self.EBAAE = EBAAE