class LogDistributionsCallback(object):
    """
    This function has been deprecated because it consumes too much time.
    The faster way is to use "ParseLogCallback" with a 'iter_monitor' flag

    Log metrics periodically in TensorBoard.
    This callback works almost same as `callback.Speedometer`, but write TensorBoard event file
    for visualization.
    logging_dir : str
        where the tensorboard file will be created
    layers_list : list[str]
        list of layers to be tracked
    """
    def __init__(self, logging_dir, prefix=None, layers_list=None):
        self.prefix = prefix
        self.layers_list = layers_list
        try:
            from tensorboard import SummaryWriter
            self.summary_writer = SummaryWriter(logging_dir)
        except ImportError:
            logging.error('You can install tensorboard via `pip install tensorboard`.')

    def __call__(self, param):
        """Callback to log layers' distributions in TensorBoard."""
        if param.locals is None:
            return
        for name, value in param.locals['arg_params'].iteritems():
            # TODO - implement layer to choose from..
            if self.layers_list is None:
                continue
            if self.prefix is not None:
                name = '%s-%s' % (self.prefix, name)
            self.summary_writer.add_histogram(name, value.asnumpy().flatten())
Beispiel #2
0
class LogDistributionsCallback(object):
    """
    This function has been deprecated because it consumes too much time.
    The faster way is to use "ParseLogCallback" with a 'iter_monitor' flag
    Log metrics periodically in TensorBoard.
    This callback works almost same as `callback.Speedometer`, but write TensorBoard event file
    for visualization.
    logging_dir : str
        where the tensorboard file will be created
    layers_list : list[str]
        list of layers to be tracked
    """
    def __init__(self, logging_dir, prefix=None, layers_list=None):
        self.prefix = prefix
        self.layers_list = layers_list
        try:
            from tensorboard import SummaryWriter
            self.summary_writer = SummaryWriter(logging_dir)
        except ImportError:
            logging.error(
                'You can install tensorboard via `pip install tensorboard`.')

    def __call__(self, param):
        """Callback to log layers' distributions in TensorBoard."""
        if param.locals is None:
            return
        for name, value in param.locals['arg_params'].iteritems():
            # TODO - implement layer to choose from..
            if self.layers_list is None:
                continue
            if self.prefix is not None:
                name = '%s-%s' % (self.prefix, name)
            self.summary_writer.add_histogram(name, value.asnumpy().flatten())
class ParseLogCallback(object):
    """
    1. log distribution's std to tensorboard (as distribution)
    This function make use of mxnet's "monitor" module, and it's output to a log file.
    while training, it is possible to specify layers to be monitored.
    these layers will be printed to a given log file,
    their values are computed **asynchronously**.

    2. log training loss to tensorboard (as scalar)

    Currently - does not support resume training..
    """
    def __init__(self, dist_logging_dir=None, scalar_logging_dir=None,
                 logfile_path=None, batch_size=None, iter_monitor=0,
                 frequent=None, prefix='ssd'):
        self.scalar_logging_dir = scalar_logging_dir
        self.dist_logging_dir = dist_logging_dir
        self.logfile_path = logfile_path
        self.batch_size = batch_size
        self.iter_monitor = iter_monitor
        self.frequent = frequent
        self.prefix = prefix
        self.batch = 0
        self.line_idx = 0
        try:
            from tensorboard import SummaryWriter
            self.dist_summary_writer = SummaryWriter(dist_logging_dir)
            self.scalar_summary_writer = SummaryWriter(scalar_logging_dir)
        except ImportError:
            logging.error('You can install tensorboard via `pip install tensorboard`.')

    def __call__(self, param):
        """Callback to parse a log file and and add params to TensorBoard."""

        # save distributions from the monitor output log
        if not self.iter_monitor == 0 and self.batch % self.iter_monitor == 0:
            with open(self.logfile_path) as fp:
                for i in range(self.line_idx):
                    fp.next()
                for line in fp:
                    if line.startswith('Batch'):
                        line = line.split(' ')
                        line = [x for x in line if x]
                        layer_name = line[2]
                        layer_value = np.array(float(line[3].split('\t')[0])).flatten()
                        if np.isfinite(layer_value):
                            self.dist_summary_writer.add_histogram(layer_name, layer_value)
                    self.line_idx += 1

        # save training loss
        if self.batch % self.frequent == 0:
            if param.eval_metric is None:
                return
            name_value = param.eval_metric.get_name_value()
            for name, value in name_value:
                if self.prefix is not None:
                    name = '%s-%s' % (self.prefix, name)
                self.scalar_summary_writer.add_scalar(name, value, global_step=self.batch)
        self.batch += 1
Beispiel #4
0
        # LOGGING
        progress.update(progress.value + 1,
                        loss=loss_value.data.cpu().numpy()[0],
                        accuracy=accuracy_value.data.cpu().numpy()[0],
                        epoch=i + 1)

        if j % logging_step == 0:
            # LOSS ACCURACY
            writer.add_scalar('loss', loss_value.data[0], i * batch_number + j)
            writer.add_scalar('accuracy', accuracy_value.data[0],
                              i * batch_number + j)
            # PARAMS
            for name, param in net.named_parameters():
                writer.add_histogram(name,
                                     param.clone().cpu().data.numpy(),
                                     i * batch_number + j)

        if j % logging_text_step == 0:
            net.train(False)
            # STEP
            s = "non sopporto i giocatori di biliardo, i soprannomi, gli indecisi, i no"[
                0:75]
            s_final = s
            s = numpy.asarray([
                CHARS.index(c) if c in CHARS else CHARS.index(FILL_CHAR)
                for c in s
            ])
            s = to_categorical(s, num_classes=features_size)
            for k in xrange(500):
                c = net(
Beispiel #5
0
import torch
import torchvision.utils as vutils
import numpy as np
import torchvision.models as models
from datetime import datetime
from tensorboard import SummaryWriter
resnet18 = models.resnet18(True)
writer = SummaryWriter('runs/' + datetime.now().strftime('%B%d  %H:%M:%S'))
sample_rate = 44100
freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
for n_iter in range(100):
    M_global = torch.rand(1)  # value to keep
    writer.add_scalar('M_global', M_global[0], n_iter)
    x = torch.rand(32, 3, 64, 64)  # output from network
    if n_iter % 10 == 0:
        x = vutils.make_grid(x, normalize=True, scale_each=True)
        writer.add_image('Image', x, n_iter)
        x = torch.zeros(sample_rate * 2)
        for i in range(x.size(0)):
            x[i] = np.cos(
                freqs[n_iter // 10] * np.pi * float(i) /
                float(sample_rate))  # sound amplitude should in [-1, 1]
        writer.add_audio('Audio', x, n_iter)
        for name, param in resnet18.named_parameters():
            writer.add_histogram(name,
                                 param.clone().cpu().data.numpy(), n_iter)
        writer.add_text('Text', 'text logged at step:' + str(n_iter), n_iter)
        writer.add_text('another Text',
                        'another text logged at step:' + str(n_iter), n_iter)

writer.close()
Beispiel #6
0
            for param_group in optimG.param_groups:
                param_group['lr'] = opt.lr#param_group['lr']/2
            
        if n_iter%1000==1:
            writer.add_scalar('misc/learning', opt.lr, n_iter)
            print('dumping histogram')
            xG = netG(Variable(z).cuda())
            reconstructed, z = netD(xR)
            x = torch.cat([vutils.make_grid(reconstructed.data.cpu()/2+0.5, normalize=True, scale_each=True), vutils.make_grid(reconstructed.data.cpu()/2+0.5, normalize=False, scale_each=False)], 2)
            writer.add_image('reconstructed real image', x, n_iter)
            reconstructed, z = netD(xG)            
            x = torch.cat([vutils.make_grid(reconstructed.data.cpu()/2+0.5, normalize=True, scale_each=True), vutils.make_grid(reconstructed.data.cpu()/2+0.5, normalize=False, scale_each=False)], 2)
            writer.add_image('reconstructed fake image', x, n_iter)
            x = torch.cat([vutils.make_grid(xG.data.cpu()/2+0.5, normalize=True, scale_each=True), vutils.make_grid(xG.data.cpu()/2+0.5, normalize=False, scale_each=False)], 2)
            writer.add_image('generated fake image', x, n_iter)
            x = torch.cat([vutils.make_grid(netG(fixedNoise).data.cpu()/2+0.5, normalize=True, scale_each=True), vutils.make_grid(netG(fixedNoise).data.cpu()/2+0.5, normalize=False, scale_each=False)], 2)
            writer.add_image('generated fake image with fixed noise', x, n_iter)
            torch.save(netD, 'netD'+socket.gethostname()+'.pth')
            torch.save(netG, 'netG'+socket.gethostname()+'.pth')            
            for name, param in netG.named_parameters():
                if 'bn' in name:
                    continue
                writer.add_histogram('weight_G/'+name, param.clone().cpu().data.numpy(), n_iter)
                writer.add_histogram('grad_G/'+name, param.grad.clone().cpu().data.numpy(), n_iter)
                
            for name, param in netD.named_parameters():                
                if 'bn' in name:
                    continue
                writer.add_histogram('weight_D/'+name, param.clone().cpu().data.numpy(), n_iter)
                writer.add_histogram('grad_D/'+name, param.grad.clone().cpu().data.numpy(), n_iter)
class LogMetricsCallback(object):
    """Log metrics periodically in TensorBoard.
    This callback works almost same as `callback.Speedometer`, but write TensorBoard event file
    for visualization. For more usage, please refer https://github.com/dmlc/tensorboard

    Parameters
    ----------
    logging_dir : str
        TensorBoard event file directory.
        After that, use `tensorboard --logdir=path/to/logs` to launch TensorBoard visualization.
    prefix : str
        Prefix for a metric name of `scalar` value.
        You might want to use this param to leverage TensorBoard plot feature,
        where TensorBoard plots different curves in one graph when they have same `name`.
        The follow example shows the usage(how to compare a train and eval metric in a same graph).

    Examples
    --------
    >>> # log train and eval metrics under different directories.
    >>> training_log = 'logs/train'
    >>> evaluation_log = 'logs/eval'
    >>> # in this case, each training and evaluation metric pairs has same name, you can add a prefix
    >>> # to make it separate.
    >>> batch_end_callbacks = [mx.tensorboard.LogMetricsCallback(training_log)]
    >>> eval_end_callbacks = [mx.tensorboard.LogMetricsCallback(evaluation_log)]
    >>> # run
    >>> model.fit(train,
    >>>     ...
    >>>     batch_end_callback = batch_end_callbacks,
    >>>     eval_end_callback  = eval_end_callbacks)
    >>> # Then use `tensorboard --logdir=logs/` to launch TensorBoard visualization.
    """
    def __init__(self, logging_dir, score_store=False, prefix=None):
        self.prefix = prefix
        self.step = 0
        self.score_store = score_store
        try:
            self.summary_writer = SummaryWriter(logging_dir)
        except ImportError:
            logging.error(
                'You can install tensorboard via `pip install tensorboard`.')

    def __call__(self, param):
        """Callback to log training speed and metrics in TensorBoard."""
        self.step += 1
        if param.eval_metric is None:
            return
        name_value = param.eval_metric.get_name_value()
        if self.step % 20 == 0:
            for name, value in name_value:
                if self.prefix is not None:
                    name = '%s-%s' % (self.prefix, name)
                self.summary_writer.add_scalar(name, value, self.step)
        if self.step % 1000 == 0:
            im_ori = param.locals['data_batch'].label[0].asnumpy()
            im_rec = (param.locals['rec_img'])[0].asnumpy()
            im_ori = imageFromTensor(im_ori)
            im_rec = imageFromTensor(im_rec)
            self.summary_writer.add_image('im_ori', im_ori, self.step)
            self.summary_writer.add_image('im_rec', im_rec, self.step)

            if self.score_store:
                facenet_scores = param.locals['facenet_scores']
                self.summary_writer.add_scalar('scores_mean',
                                               facenet_scores.mean(),
                                               self.step)
                self.summary_writer.add_histogram('facenet_scores',
                                                  facenet_scores, self.step)
Beispiel #8
0
def DCGAN(epoch, noise_size, batch_size, save_period, dataset):

    if dataset == 'MNIST':
        '''location of tensorboard save file'''
        logdir = 'tensorboard/MNIST/'
        summary_writer = SummaryWriter(logdir)
        train_iter, train_data_number = Mnist_Data_Processing(batch_size)  #all

    elif dataset == 'CIFAR10':
        '''location of tensorboard save file'''
        logdir = 'tensorboard/CIFAR10/'
        summary_writer = SummaryWriter(logdir)
        train_iter, train_data_number = Image_Data_Processing(
            batch_size, "CIFAR10")  #class by class

    elif dataset == 'ImageNet':
        '''location of tensorboard save file'''
        logdir = 'tensorboard/IMAGENET/'
        summary_writer = SummaryWriter(logdir)
        train_iter, train_data_number = Image_Data_Processing(
            batch_size, "ImageNet")  #face
    else:
        print "no input data!!!"

    # No need, but must be declared.
    label = mx.nd.zeros((batch_size, ))
    '''Network'''
    generator = Generator()
    discriminator = Discriminator()
    context = mx.gpu(0)
    '''In the code below, the 'inputs_need_grad' parameter in the 'mod.bind' function is very important.'''

    # =============module G=============
    modG = mx.mod.Module(symbol=generator,
                         data_names=['noise'],
                         label_names=None,
                         context=context)
    modG.bind(data_shapes=[('noise', (batch_size, noise_size, 1, 1))],
              label_shapes=None,
              for_training=True)

    if dataset == 'MNIST':
        try:
            # load the saved modG data
            modG.load_params("MNIST_Weights/modG-10.params")
        except:
            pass

    if dataset == 'CIFAR10':
        try:
            # load the saved modG data
            modG.load_params("CIFAR10_Weights/modG-300.params")
        except:
            pass

    if dataset == 'ImageNet':
        try:
            #pass
            # load the saved modG data
            modG.load_params("ImageNet_Weights/modG-1000.params")
        except:
            pass

    modG.init_params(initializer=mx.initializer.Normal(sigma=0.02))
    modG.init_optimizer(optimizer='adam',
                        optimizer_params={
                            'learning_rate': 0.0002,
                            'beta1': 0.5
                        })

    # =============module discriminator[0],discriminator[1]=============
    modD_0 = mx.mod.Module(symbol=discriminator[0],
                           data_names=['data'],
                           label_names=None,
                           context=context)
    modD_0.bind(data_shapes=train_iter.provide_data,
                label_shapes=None,
                for_training=True,
                inputs_need_grad=True)

    if dataset == 'MNIST':
        try:
            # load the saved modG data
            modD_0.load_params("MNIST_Weights/modD_0-10.params")
        except:
            pass
    if dataset == 'CIFAR10':
        try:
            # load the saved modG data
            modD_0.load_params("CIFAR10_Weights/modD_0-200.params")
        except:
            pass

    if dataset == 'ImageNet':
        #pass
        try:
            # load the saved modG data
            modD_0.load_params("ImageNet_Weights/modD_0-1000.params")
        except:
            pass

    modD_0.init_params(initializer=mx.initializer.Normal(sigma=0.02))
    modD_0.init_optimizer(optimizer='adam',
                          optimizer_params={
                              'learning_rate': 0.0002,
                              'beta1': 0.5
                          })
    """
    Parameters
    shared_module : Module
        Default is `None`. This is used in bucketing. When not `None`, the shared module
        essentially corresponds to a different bucket -- a module with different symbol
        but with the same sets of parameters (e.g. unrolled RNNs with different lengths).

    In here, for sharing the Discriminator parameters, we must to use shared_module=modD_0
    """
    modD_1 = mx.mod.Module(symbol=discriminator[1],
                           data_names=['data'],
                           label_names=None,
                           context=context)
    modD_1.bind(data_shapes=train_iter.provide_data,
                label_shapes=None,
                for_training=True,
                inputs_need_grad=True,
                shared_module=modD_0)

    # =============generate image=============
    column_size = 10
    row_size = 10
    test_mod = mx.mod.Module(symbol=generator,
                             data_names=['noise'],
                             label_names=None,
                             context=context)
    test_mod.bind(data_shapes=[
        mx.io.DataDesc(name='noise',
                       shape=(column_size * row_size, noise_size, 1, 1))
    ],
                  label_shapes=None,
                  shared_module=modG,
                  for_training=False,
                  grad_req='null')
    '''############Although not required, the following code should be declared.#################'''
    '''make evaluation method 1 - Using existing ones.
        metrics = {
        'acc': Accuracy,
        'accuracy': Accuracy,
        'ce': CrossEntropy,
        'f1': F1,
        'mae': MAE,
        'mse': MSE,
        'rmse': RMSE,
        'top_k_accuracy': TopKAccuracy
    }'''

    metric = mx.metric.create(['acc', 'mse'])
    '''make evaluation method 2 - Making new things.'''
    '''
    Custom evaluation metric that takes a NDArray function.
    Parameters:
    •feval (callable(label, pred)) – Customized evaluation function.
    •name (str, optional) – The name of the metric.
    •allow_extra_outputs (bool) – If true, the prediction outputs can have extra outputs.
    This is useful in RNN, where the states are also produced in outputs for forwarding.
    '''
    def zero(label, pred):
        return 0

    null = mx.metric.CustomMetric(zero)

    ####################################training loop############################################
    # =============train===============
    for epoch in xrange(1, epoch + 1, 1):
        Max_cost_0 = 0
        Max_cost_1 = 0
        Min_cost = 0
        total_batch_number = np.ceil(train_data_number / (batch_size * 1.0))
        train_iter.reset()
        for batch in train_iter:

            noise = mx.random.uniform(low=-1.0,
                                      high=1.0,
                                      shape=(batch_size, noise_size, 1, 1),
                                      ctx=context)
            modG.forward(data_batch=mx.io.DataBatch(data=[noise], label=None),
                         is_train=True)
            modG_output = modG.get_outputs()

            ################################updating only parameters related to modD.########################################
            # update discriminator on noise data
            '''MAX : modD_1 : cost : (-mx.symbol.log(1-discriminator2))  - noise data Discriminator update , bigger and bigger -> smaller and smaller discriminator2'''

            modD_1.forward(data_batch=mx.io.DataBatch(data=modG_output,
                                                      label=None),
                           is_train=True)
            '''Max_Cost of noise data Discriminator'''
            Max_cost_1 += modD_1.get_outputs()[0].asnumpy().astype(np.float32)
            modD_1.backward()
            modD_1.update()

            # updating discriminator on real data
            '''MAX : modD_0 : cost: (-mx.symbol.log(discriminator2)) real data Discriminator update , bigger and bigger discriminator2'''
            modD_0.forward(data_batch=batch, is_train=True)
            '''Max_Cost of real data Discriminator'''
            Max_cost_0 += modD_0.get_outputs()[0].asnumpy().astype(np.float32)
            modD_0.backward()
            modD_0.update()

            ################################updating only parameters related to modG.########################################
            # update generator on noise data
            '''MIN : modD_0 : cost : (-mx.symbol.log(discriminator2)) - noise data Discriminator update  , bigger and bigger discriminator2'''
            modD_0.forward(data_batch=mx.io.DataBatch(data=modG_output,
                                                      label=None),
                           is_train=True)
            modD_0.backward()
            '''Max_Cost of noise data Generator'''
            Min_cost += modD_0.get_outputs()[0].asnumpy().astype(np.float32)

            diff_v = modD_0.get_input_grads()
            modG.backward(diff_v)
            modG.update()
        '''tensorboard part'''
        Max_C = ((Max_cost_0 + Max_cost_1) / total_batch_number * 1.0).mean()
        Min_C = (Min_cost / total_batch_number * 1.0).mean()

        arg_params, aux_params = modG.get_params()
        #write scalar values

        summary_writer.add_scalar(name="Max_cost",
                                  scalar_value=Max_C,
                                  global_step=epoch)
        summary_writer.add_scalar(name="Min_cost",
                                  scalar_value=Min_C,
                                  global_step=epoch)

        #write matrix values

        summary_writer.add_histogram(
            name="g1_weight", values=arg_params["g1_weight"].asnumpy().ravel())
        summary_writer.add_histogram(
            name="g2_weight", values=arg_params["g2_weight"].asnumpy().ravel())
        summary_writer.add_histogram(
            name="g3_weight", values=arg_params["g3_weight"].asnumpy().ravel())
        summary_writer.add_histogram(
            name="g4_weight", values=arg_params["g4_weight"].asnumpy().ravel())
        summary_writer.add_histogram(
            name="g5_weight", values=arg_params["g5_weight"].asnumpy().ravel())

        # cost print
        print "epoch : {}".format(epoch)
        print "Max Discriminator Cost : {}".format(Max_C)
        print "Min Generator Cost : {}".format(Min_C)

        #Save the data
        if epoch % save_period == 0:

            # write image values
            generate_image = modG_output[0][0].asnumpy()  # only one image
            generate_image = (generate_image + 1.0) * 127.5
            '''
            Args:
            tag: A name for the generated node. Will also serve as a series name in
            TensorBoard.
            tensor: A 3-D `uint8` or `float32` `Tensor` of shape `[height, width,
            channels]` where `channels` is 1, 3, or 4.
            '''
            generate_image = generate_image.astype(
                np.uint8
            )  # only dtype uint8 ,  Only this is done...- Should be improved.
            summary_writer.add_image(
                tag='generate_image_epoch_{}'.format(epoch),
                img_tensor=generate_image.transpose(1, 2, 0))

            print('Saving weights')
            if dataset == "MNIST":
                modG.save_params("MNIST_Weights/modG-{}.params".format(epoch))
                modD_0.save_params(
                    "MNIST_Weights/modD_0-{}.params".format(epoch))
            elif dataset == "CIFAR10":
                modG.save_params(
                    "CIFAR10_Weights/modG-{}.params".format(epoch))
                modD_0.save_params(
                    "CIFAR10_Weights/modD_0-{}.params".format(epoch))
            elif dataset == 'ImageNet':
                modG.save_params(
                    "ImageNet_Weights/modG-{}.params".format(epoch))
                modD_0.save_params(
                    "ImageNet_Weights/modD_0-{}.params".format(epoch))
            '''test_method-2'''
            test = mx.random.uniform(low=-1.0,
                                     high=1.0,
                                     shape=(column_size * row_size, noise_size,
                                            1, 1),
                                     ctx=context)
            test_mod.forward(
                data_batch=mx.io.DataBatch(data=[test], label=None))
            result = test_mod.get_outputs()[0]
            result = result.asnumpy()
            '''range adjustment  -1 ~ 1 -> 0 ~ 2 -> 0 ~1  -> 0 ~ 255 '''
            # result = np.clip((result + 1.0) * (255.0 / 2.0), 0, 255).astype(np.uint8)
            result = ((result + 1.0) * 127.5).astype(np.uint8)
            '''Convert the image size to 4 times'''
            result = np.asarray([[
                cv2.resize(i, None, fx=2, fy=2, interpolation=cv2.INTER_AREA)
                for i in im
            ] for im in result])

            result = result.transpose((0, 2, 3, 1))
            '''visualization'''
            fig, ax = plt.subplots(row_size,
                                   column_size,
                                   figsize=(column_size, row_size))
            fig.suptitle('generator')
            for j in xrange(row_size):
                for i in xrange(column_size):
                    ax[j][i].set_axis_off()
                    if dataset == "MNIST":
                        ax[j][i].imshow(result[i + j * column_size],
                                        cmap='gray')
                    elif dataset == "CIFAR10":
                        ax[j][i].imshow(result[i + j * column_size])
                    elif dataset == 'ImageNet':
                        ax[j][i].imshow(result[i + j * column_size])

            if dataset == "MNIST":
                fig.savefig(
                    "Generate_Image/DCGAN_MNIST_Epoch_{}.png".format(epoch))
            elif dataset == "CIFAR10":
                fig.savefig(
                    "Generate_Image/DCGAN_CIFAR10_Epoch_{}.png".format(epoch))
            elif dataset == 'ImageNet':
                fig.savefig(
                    "Generate_Image/DCGAN_ImageNet_Epoch_{}.png".format(epoch))

            plt.close(fig)

    print "Optimization complete."
    '''tensorboard_part'''
    summary_writer.close()

    #################################Generating Image####################################
    '''load method1 - load the training mod.get_params() directly'''
    #arg_params, aux_params = mod.get_params()
    '''Annotate only when running test data. and Uncomment only if it is 'load method2' '''
    #test_mod.set_params(arg_params=arg_params, aux_params=aux_params)
    '''test_method-1'''
    '''
    noise = noise_iter.next()
    test_mod.forward(noise, is_train=False)
    result = test_mod.get_outputs()[0]
    result = result.asnumpy()
    print np.shape(result)
    '''
    '''load method2 - using the shared_module'''
    """
    Parameters
    shared_module : Module
        Default is `None`. This is used in bucketing. When not `None`, the shared module
        essentially corresponds to a different bucket -- a module with different symbol
        but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
    """
    '''test_method-2'''
    test = mx.random.uniform(low=-1.0,
                             high=1.0,
                             shape=(column_size * row_size, noise_size, 1, 1),
                             ctx=context)
    test_mod.forward(data_batch=mx.io.DataBatch(data=[test], label=None))
    result = test_mod.get_outputs()[0]
    result = result.asnumpy()
    '''range adjustment  -1 ~ 1 -> 0 ~ 2 -> 0 ~1  -> 0 ~ 255 '''
    #result = np.clip((result + 1.0) * (255.0 / 2.0), 0, 255).astype(np.uint8)
    result = ((result + 1.0) * 127.5).astype(np.uint8)
    '''Convert the image size to 4 times'''
    result = np.asarray([[
        cv2.resize(i, None, fx=2, fy=2, interpolation=cv2.INTER_AREA)
        for i in im
    ] for im in result])

    result = result.transpose((0, 2, 3, 1))
    '''visualization'''
    fig, ax = plt.subplots(row_size,
                           column_size,
                           figsize=(column_size, row_size))
    fig.suptitle('generator')
    for j in xrange(row_size):
        for i in xrange(column_size):
            ax[j][i].set_axis_off()
            if dataset == "MNIST":
                ax[j][i].imshow(result[i + j * column_size], cmap='gray')
            elif dataset == "CIFAR10":
                ax[j][i].imshow(result[i + j * column_size])
            elif dataset == 'ImageNet':
                ax[j][i].imshow(result[i + j * column_size])

    if dataset == "MNIST":
        fig.savefig("Generate_Image/DCGAN_MNIST_Final.png")
    elif dataset == "CIFAR10":
        fig.savefig("Generate_Image/DCGAN_CIFAR10_Final.png")
    elif dataset == 'ImageNet':
        fig.savefig("Generate_Image/DCGAN_ImageNet_Final.png")

    plt.show(fig)