Beispiel #1
0
 def __init__(self, alpha: float, beta_vgg:float, beta_pix: float, context=None) -> None:
     self._alpha = alpha
     self._bce = SigmoidBCELoss()
     self._beta_vgg = beta_vgg
     self._beta_pix = beta_pix
     self._l1 = L1Loss()
     self._vgg = VggLoss(context)
Beispiel #2
0
 def __init__(self, weight=100, batch_axis=0, **kwargs):
     """
     :param weight: for l1 loss
     :param batch_axis:
     :param kwargs:
     """
     super(GeneratorCriterion, self).__init__(weight, batch_axis, **kwargs)
     self.bce_loss = SigmoidBinaryCrossEntropyLoss(from_sigmoid=True,
                                                   batch_axis=batch_axis)
     self.l1_loss = L1Loss(weight=weight, batch_axis=0)
Beispiel #3
0
    def get_optimizer(self):
        trainer_actor = gluon.Trainer(
            self.actor.collect_params(), 'adadelta', {
                'learning_rate': self.hparams.actor_learning_rate,
                'rho': 0.99,
                'epsilon': 0.01
            })
        trainer_critic = gluon.Trainer(
            self.critic.collect_params(), 'adadelta', {
                'learning_rate': self.hparams.actor_learning_rate,
                'rho': 0.99,
                'epsilon': 0.01
            })
        loss_actor = CrossEntropy()
        loss_critic = L1Loss()

        return trainer_actor, loss_actor, trainer_critic, loss_critic
Beispiel #4
0
 def __init__(self, context) -> None:
     self.vgg19 = vision.vgg19(pretrained=True, ctx=context)
     self.vgg_layer = 22
     self._l1 = L1Loss()
Beispiel #5
0
from mxnet.gluon.loss import L1Loss, L2Loss

from data import AsianFaceDatasets, IMDBWIKIDatasets
from model import Net

import math
epoches = 60
lr = 0.001
mom = 0.9
weight_decay = 0.0005

data_ctx = mx.gpu(0)
model_ctx = mx.gpu(0)
scale = nd.arange(0, 101, ctx=model_ctx).reshape((101, 1))

l1_loss = L1Loss()
# l1_loss = L2Loss()
net = Net()

# training_datasets = AsianFaceDatasets(csv_path='/home/gdshen/datasets/face/asian/train.csv',
#                                       img_dir='/home/gdshen/datasets/face/asian/images')
# test_datasets = AsianFaceDatasets(csv_path='/home/gdshen/datasets/face/asian/test.csv',
#                                   img_dir='/home/gdshen/datasets/face/asian/images', train=False)

training_datasets = IMDBWIKIDatasets(
    csv_path='/home/gdshen/datasets/face/processed/train.csv', train=True)
test_datasets = IMDBWIKIDatasets(
    csv_path='/home/gdshen/datasets/face/processed/test.csv', train=False)


def evaluate_accracy(data_iterator, net):
Beispiel #6
0
 def __init__(self, alpha: float, beta: float) -> None:
     self._alpha = alpha
     self._bce = SigmoidBCELoss()
     self._beta = beta
     self._l1 = L1Loss()
Beispiel #7
0
 def __init__(self, context) -> None:
     #         self.resizer_224 = mx.gluon.data.vision.transforms.Resize(224)
     self.vgg19 = vision.vgg19(pretrained=True, ctx=context)
     self.vgg_layer = 22
     self._l1 = L1Loss()
    def __init__(
        self,
        dir_list,
        test_dir_list,
        width=28,
        height=28,
        channel=1,
        initializer=None,
        batch_size=40,
        learning_rate=0.0002,
        ctx=mx.gpu(),
        discriminative_model=None,
        generative_model=None,
        re_encoder_model=None,
    ):
        '''
        Init.

        If you are not satisfied with this simple default setting,
        delegate `discriminative_model` and `generative_model` designed by yourself.

        Args:
            dir_list:       `list` of `str` of path to image files.
            test_dir_list:  `list` of `str` of path to image files for test.
            width:          `int` of image width.
            height:         `int` of image height.
            channel:        `int` of image channel.
            initializer:    is-a `mxnet.initializer` for parameters of model.
                            If `None`, it is drawing from the Xavier distribution.
            
            batch_size:     `int` of batch size.
            learning_rate:  `float` of learning rate.
            ctx:            `mx.gpu()` or `mx.cpu()`.

            discriminative_model:       is-a `accelbrainbase.observabledata._mxnet.adversarialmodel.discriminative_model.DiscriminativeModel`.
            generative_model:           is-a `accelbrainbase.observabledata._mxnet.adversarialmodel.generative_model.GenerativeModel`.
            re_encoder_model:           is-a `HybridBlock`.

        '''
        image_extractor = ImageExtractor(width=width,
                                         height=height,
                                         channel=channel,
                                         ctx=ctx)

        unlabeled_image_iterator = UnlabeledImageIterator(
            image_extractor=image_extractor,
            dir_list=dir_list,
            batch_size=batch_size,
            norm_mode="z_score",
            scale=1 / 1000,
            noiseable_data=GaussNoise(sigma=1e-08, mu=0.0),
        )

        test_unlabeled_image_iterator = UnlabeledImageIterator(
            image_extractor=image_extractor,
            dir_list=test_dir_list,
            batch_size=batch_size,
            norm_mode="z_score",
            scale=1 / 1000,
            noiseable_data=GaussNoise(sigma=1e-08, mu=0.0),
        )

        true_sampler = TrueSampler()
        true_sampler.iteratorable_data = unlabeled_image_iterator

        condition_sampler = ConditionSampler()
        condition_sampler.true_sampler = true_sampler

        computable_loss = L2NormLoss()

        if discriminative_model is None:
            output_nn = NeuralNetworks(
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                units_list=[1],
                dropout_rate_list=[0.0],
                optimizer_name="SGD",
                activation_list=["sigmoid"],
                hidden_batch_norm_list=[None],
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
                output_no_bias_flag=True,
                all_no_bias_flag=True,
                not_init_flag=False,
            )

            d_model = ConvolutionalNeuralNetworks(
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                hidden_units_list=[
                    Conv2D(
                        channels=16,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                    Conv2D(
                        channels=32,
                        kernel_size=3,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                ],
                input_nn=None,
                input_result_height=None,
                input_result_width=None,
                input_result_channel=None,
                output_nn=output_nn,
                hidden_dropout_rate_list=[
                    0.5,
                    0.5,
                ],
                hidden_batch_norm_list=[BatchNorm(), BatchNorm()],
                optimizer_name="SGD",
                hidden_activation_list=[
                    "relu",
                    "relu",
                ],
                hidden_residual_flag=False,
                hidden_dense_flag=False,
                dense_axis=1,
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
            )

            discriminative_model = DiscriminativeModel(
                model=d_model,
                initializer=None,
                learning_rate=learning_rate,
                optimizer_name="SGD",
                hybridize_flag=True,
                scale=1.0,
                ctx=ctx,
            )
        else:
            if isinstance(discriminative_model, DiscriminativeModel) is False:
                raise TypeError(
                    "The type of `discriminative_model` must be `DiscriminativeModel`."
                )

        if re_encoder_model is None:
            re_encoder_model = ConvolutionalNeuralNetworks(
                # is-a `ComputableLoss` or `mxnet.gluon.loss`.
                computable_loss=computable_loss,
                # `list` of int` of the number of units in hidden layers.
                hidden_units_list=[
                    # `mxnet.gluon.nn.Conv2D`.
                    Conv2D(
                        channels=16,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                    Conv2D(
                        channels=32,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                ],
                # `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate.
                hidden_activation_list=[
                    "relu",
                    "relu",
                ],
                # `list` of `float` of dropout rate.
                hidden_dropout_rate_list=[
                    0.5,
                    0.5,
                ],
                # `list` of `mxnet.gluon.nn.BatchNorm`.
                hidden_batch_norm_list=[BatchNorm(), BatchNorm()],
                # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
                hybridize_flag=True,
                # `mx.gpu()` or `mx.cpu()`.
                ctx=ctx,
            )

        if generative_model is None:
            encoder = ConvolutionalNeuralNetworks(
                # is-a `ComputableLoss` or `mxnet.gluon.loss`.
                computable_loss=computable_loss,
                # `list` of int` of the number of units in hidden layers.
                hidden_units_list=[
                    # `mxnet.gluon.nn.Conv2D`.
                    Conv2D(
                        channels=16,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                    Conv2D(
                        channels=32,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                ],
                # `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate.
                hidden_activation_list=[
                    "relu",
                    "relu",
                ],
                # `list` of `float` of dropout rate.
                hidden_dropout_rate_list=[
                    0.5,
                    0.5,
                ],
                # `list` of `mxnet.gluon.nn.BatchNorm`.
                hidden_batch_norm_list=[BatchNorm(), BatchNorm()],
                # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
                hybridize_flag=True,
                # `mx.gpu()` or `mx.cpu()`.
                ctx=ctx,
            )
            decoder = ConvolutionalNeuralNetworks(
                # is-a `ComputableLoss` or `mxnet.gluon.loss`.
                computable_loss=computable_loss,
                # `list` of int` of the number of units in hidden layers.
                hidden_units_list=[
                    Conv2DTranspose(
                        channels=16,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(1, 1),
                    ),
                    Conv2DTranspose(
                        channels=channel,
                        kernel_size=6,
                        strides=(2, 2),
                        padding=(0, 0),
                    ),
                ],
                # `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate.
                hidden_activation_list=["relu", "tanh"],
                # `list` of `float` of dropout rate.
                hidden_dropout_rate_list=[0.5, 0.0],
                # `list` of `mxnet.gluon.nn.BatchNorm`.
                hidden_batch_norm_list=[BatchNorm(), None],
                # Call `mxnet.gluon.HybridBlock.hybridize()` or not.
                hybridize_flag=True,
                # `mx.gpu()` or `mx.cpu()`.
                ctx=ctx,
            )

            g_model = ConvolutionalAutoEncoder(
                # is-a `ConvolutionalNeuralNetworks`.
                encoder=encoder,
                # is-a `ConvolutionalNeuralNetworks`.
                decoder=decoder,
                computable_loss=computable_loss,
                initializer=initializer,
                learning_rate=learning_rate,
                learning_attenuate_rate=1.0,
                attenuate_epoch=50,
                input_nn=None,
                input_result_height=None,
                input_result_width=None,
                input_result_channel=None,
                output_nn=None,
                optimizer_name="SGD",
                hidden_residual_flag=False,
                hidden_dense_flag=False,
                dense_axis=1,
                ctx=ctx,
                hybridize_flag=True,
                regularizatable_data_list=[],
                scale=1.0,
            )

            generative_model = GenerativeModel(
                noise_sampler=UniformNoiseSampler(low=-1e-05,
                                                  high=1e-05,
                                                  batch_size=batch_size,
                                                  seq_len=0,
                                                  channel=channel,
                                                  height=height,
                                                  width=width,
                                                  ctx=ctx),
                model=g_model,
                initializer=None,
                condition_sampler=condition_sampler,
                conditonal_dim=1,
                learning_rate=learning_rate,
                optimizer_name="SGD",
                hybridize_flag=True,
                scale=1.0,
                ctx=ctx,
            )
        else:
            if isinstance(generative_model, GenerativeModel) is False:
                raise TypeError(
                    "The type of `generative_model` must be `GenerativeModel`."
                )

        ganomaly_controller = GanomalyController(
            generative_model=generative_model,
            re_encoder_model=re_encoder_model,
            discriminative_model=discriminative_model,
            advarsarial_loss=L2NormLoss(weight=0.015),
            encoding_loss=L2NormLoss(weight=0.015),
            contextual_loss=L1Loss(weight=0.5),
            discriminator_loss=DiscriminatorLoss(weight=0.015),
            feature_matching_loss=None,
            optimizer_name="SGD",
            learning_rate=learning_rate,
            learning_attenuate_rate=1.0,
            attenuate_epoch=50,
            hybridize_flag=True,
            scale=1.0,
            ctx=ctx,
            initializer=initializer,
        )

        self.ganomaly_controller = ganomaly_controller
        self.test_unlabeled_image_iterator = test_unlabeled_image_iterator
Beispiel #9
0
        .random_split(datasets["train"],
                      frac=[config["HYPER_PARAMS"]["TRAIN_SPLIT"],
                            1 - config["HYPER_PARAMS"]["TRAIN_SPLIT"]],
                      random_state=config["HYPER_PARAMS"]["SEED"],
                      shuffle=True)

    # Dump data to memory
    print("\nDumping data to memory...")
    datasets["train"] = datasets["train"].compute(num_workers=cpu_count())
    datasets["validation"] = datasets["validation"].compute(
        num_workers=cpu_count())
    datasets["test"] = datasets["test"].compute(num_workers=cpu_count())
    print("Data dumped!")

    # Define locals for MXNet
    model_mgr: ModelManager = ModelManager(loss_fn=L1Loss(),
                                           model_style=config["STYLE"].lower(),
                                           writer=SummaryWriter(
                                               logdir=config["PATH"]["LOGS"],
                                               flush_secs=1))

    # Prepare data
    batch_data["train"] = model_mgr.get_batch_data(
        data=datasets["train"],
        workers=cpu_count(),
        cols=config["COLUMNS"],
        batch_size=config["HYPER_PARAMS"]["BATCH_SIZE"],
        label="train")
    batch_data["validation"] = model_mgr.get_batch_data(
        data=datasets["validation"],
        workers=cpu_count(),