コード例 #1
0
 def test_reconstruction_error(self):
     """Tests the function `reconstruction_error`.
     
     A curve is saved at
     "tools/pseudo_visualization/reconstruction_error.png".
     The test is successful if the curve
     looks like the evolution of the cross
     entropy between a visible unit fixed to
     1.0 and a unit moving from 0.0 to 1.0.
     
     """
     nb_points = 999
     
     # The visible unit is modeled as
     # the probability that a Bernoulli
     # random variable turns on.
     visible_unit = 1.
     reconstruction = numpy.linspace(0.001, 0.999, num=nb_points)
     rec_error = numpy.reshape(-visible_unit*numpy.log(reconstruction) -
         (1. - visible_unit)*numpy.log(1. - reconstruction), (1, nb_points))
     tls.plot_graphs(reconstruction,
                     rec_error,
                     'reconstruction',
                     'reconstruction error',
                     ['visible unit = 1.'],
                     ['b'],
                     'Evolution of the reconstruction error with the reconstruction',
                     'tools/pseudo_visualization/reconstruction_error.png')
コード例 #2
0
 def test_kl_divergence(self):
     """Tests the function `kl_divergence`.
     
     A curve is saved at
     "tools/pseudo_visualization/kl_divergence.png".
     The test is successful if the curve
     is convex and its minimum is 0.
     
     """
     nb_points = 201
     z_log_std_squared = numpy.linspace(-5., 5., num=nb_points)
     kl_divergence = numpy.reshape(0.5*(-1. - z_log_std_squared +
         numpy.exp(z_log_std_squared)), (1, nb_points))
     tls.plot_graphs(z_log_std_squared,
                     kl_divergence,
                     'log of the std squared',
                     'KL divergence',
                     ['zero mean'],
                     ['b'],
                     'Evolution of the KL divergence with the log of the std squared',
                     'tools/pseudo_visualization/kl_divergence.png')
コード例 #3
0
 def test_plot_graphs(self):
     """Tests the function `plot_graphs`.
     
     A plot is saved at
     "tools/pseudo_visualization/plot_graphs.png".
     The test is successful is the two
     graphs in the plot are consistent
     with the legend.
     
     """
     x_values = numpy.linspace(-5., 5., num=101)
     y_values = numpy.zeros((2, 101))
     y_values[0, :] = 1.7159*numpy.tanh((2./3)*x_values)
     y_values[1, :] = 1./(1. + numpy.exp(-x_values))
     tls.plot_graphs(x_values,
                     y_values,
                     'input',
                     'neural activation',
                     ['scaled tanh', 'sigmoid'],
                     ['b', 'r'],
                     'Evolution of the neural activation with the input',
                     'tools/pseudo_visualization/plot_graphs.png')
コード例 #4
0
 def test_relu(self):
     """Tests the function `relu`.
     
     A plot is saved at
     "tools/pseudo_visualization/relu.png".
     The test is successful if the curve
     of ReLU is consistent with the curve
     of its derivative.
     
     """
     nb_x = 1001
     x_values = numpy.linspace(-5., 5., num=nb_x)
     y_values = numpy.zeros((2, nb_x))
     y_values[0, :] = tls.relu(x_values)
     y_values[1, :] = tls.relu_derivative(x_values)
     tls.plot_graphs(x_values,
                     y_values,
                     '$x$',
                     '$y$',
                     ['$f(x) = $ReLU$(x)$', r'$\partial f(x) / \partial x$'],
                     ['r', 'b'],
                     'ReLU and its derivative',
                     'tools/pseudo_visualization/relu.png')
            print('Training loss of density approximation: {}'.format(
                loss_density_approx[0, counter]))
            print('Quantization bin width: {}'.format(entropy_ae.bin_width))
            counter += 1
        entropy_ae.training_fct(training_float64)
        entropy_ae.training_eae_bw(training_float64)

    evenly_spaced = numpy.linspace(100,
                                   args.nb_epochs_training,
                                   num=nb_measures - 1,
                                   dtype=numpy.int32)
    x_values = numpy.concatenate(
        (numpy.ones(1, dtype=numpy.int32), evenly_spaced))
    tls.plot_graphs(
        x_values, scaled_approx_entropy, 'epoch',
        'scaled approximate entropy of the quantized latent variables',
        ['training'], ['r'],
        'Evolution of the scaled approximate entropy over epochs',
        os.path.join(path_to_checking_l, 'scaled_approximate_entropy.png'))
    tls.plot_graphs(
        x_values, rec_error, 'epoch', 'reconstruction error', ['training'],
        ['r'], 'Evolution of the reconstruction error over epochs',
        os.path.join(path_to_checking_l, 'reconstruction_error.png'))
    tls.plot_graphs(
        x_values, loss_density_approx, 'epoch',
        'loss of density approximation', ['training'], ['r'],
        'Evolution of the loss of density approximation over epochs',
        os.path.join(path_to_checking_l, 'loss_density_approximation.png'))
    t_stop = time.time()
    nb_minutes = int((t_stop - t_start) / 60)
    print('\nTraining time: {} minutes.'.format(nb_minutes))
                entropy_ae.checking_p_3(
                    'encoder', 1,
                    os.path.join(path_to_checking_p,
                                 'image_gamma_1_' + str_epoch + '.png'))
            entropy_ae.save(sess, path_to_model, path_to_nb_itvs_per_side_save)

    # The optional argument `dtype` in
    # the function `numpy.linspace` was
    # introduced in Numpy 1.9.0.
    x_values = numpy.linspace(1,
                              args.nb_epochs_training,
                              num=args.nb_epochs_training,
                              dtype=numpy.int32)
    tls.plot_graphs(
        x_values, mean_approx_entropy, 'epoch',
        'mean approximate entropy of the quantized latent variables',
        ['training', 'validation'], ['r', 'b'],
        'Evolution of the mean approximate entropy over epochs',
        os.path.join(path_to_checking_l, 'mean_approximate_entropy.png'))
    tls.plot_graphs(x_values, mean_disc_entropy, 'epoch',
                    'mean entropy of the quantized latent variables',
                    ['training', 'validation'], ['r', 'b'],
                    'Evolution of the mean entropy over epochs',
                    os.path.join(path_to_checking_l, 'mean_entropy.png'))
    tls.plot_graphs(
        x_values, scaled_approx_entropy, 'epoch',
        'scaled cumulated approximate entropy of the quantized latent variables',
        ['training', 'validation'], ['r', 'b'],
        'Evolution of the scaled cumulated approximate entropy over epochs',
        os.path.join(path_to_checking_l, 'scaled_approximate_entropy.png'))
    tls.plot_graphs(
        x_values, rec_error, 'epoch', 'reconstruction error',
コード例 #7
0
                             'ar_mean_' + str_epoch + '.png'),
                os.path.join(path_to_checking_a,
                             'ar_log_std_squared_' + str_epoch + '.png'),
                os.path.join(path_to_checking_a,
                             'image_ar_l1_' + str_epoch + '.png'))

    # The optional argument `dtype` in the
    # function `numpy.linspace` was introduced
    # in Numpy 1.9.0.
    x_values = numpy.linspace(1,
                              args.nb_epochs_training,
                              num=args.nb_epochs_training,
                              dtype=numpy.int32)
    tls.plot_graphs(
        x_values, scaled_kld, 'epoch',
        'scaled KL divergence of the approximate posterior from the prior',
        ['training', 'validation'], ['r', 'b'],
        'Evolution of scaled KL divergence over epochs',
        os.path.join(path_to_checking_l, 'scaled_kld.png'))
    tls.plot_graphs(
        x_values, rec_error, 'epoch', 'reconstruction error',
        ['training', 'validation'], ['r', 'b'],
        'Evolution of the reconstruction error over epochs',
        os.path.join(path_to_checking_l, 'reconstruction_error.png'))
    tls.plot_graphs(x_values, w_decay, 'epoch', 'weight decay',
                    ['l2-norm weight decay'], ['b'],
                    'Evolution of the weight decay over epochs',
                    os.path.join(path_to_checking_l, 'weight_decay.png'))
    tls.plot_graphs(
        x_values, mean_magnitude, 'epoch', 'mean magnitude ratio', [
            'recognition l1', 'recognition mean',
            'recognition log std squared', 'generation l1', 'generation mean'
                             'ae_latent_' + str_epoch + '.png'),
                os.path.join(path_to_checking_a,
                             'ae_noisy_latent_' + str_epoch + '.png'),
                os.path.join(path_to_checking_a,
                             'image_dead_zone_' + str_epoch + '.png'))

    # The optional argument `dtype` in the
    # function `numpy.linspace` was introduced
    # in Numpy 1.9.0.
    x_values = numpy.linspace(1,
                              args.nb_epochs_training,
                              num=args.nb_epochs_training,
                              dtype=numpy.int32)
    tls.plot_graphs(
        x_values, approx_entropy, 'epoch',
        'approximate entropy of the quantized latent variables',
        ['training', 'validation'], ['r', 'b'],
        'Evolution of the approximate entropy over epochs',
        os.path.join(path_to_checking_l, 'approximate_entropy.png'))
    tls.plot_graphs(x_values, disc_entropy, 'epoch',
                    'entropy of the quantized latent variables',
                    ['training', 'validation'], ['r', 'b'],
                    'Evolution of the entropy over epochs',
                    os.path.join(path_to_checking_l, 'entropy.png'))
    tls.plot_graphs(
        x_values, scaled_approx_entropy, 'epoch',
        'scaled approximate entropy of the quantized latent variables',
        ['training', 'validation'], ['r', 'b'],
        'Evolution of the scaled approximate entropy over epochs',
        os.path.join(path_to_checking_l, 'scaled_approximate_entropy.png'))
    tls.plot_graphs(
        x_values, rec_error, 'epoch', 'reconstruction error',