def test_float_to_str(self):
     """Tests the function `float_to_str`.
     
     The test is successful if, for each
     float to be converted, "." is replaced
     by "dot" if the float is not a whole
     number and "-" is replaced by "minus".
     
     """
     float_0 = 2.3
     print('1st float to be converted: {}'.format(float_0))
     print('1st string: {}'.format(tls.float_to_str(float_0)))
     float_1 = -0.01
     print('2nd float to be converted: {}'.format(float_1))
     print('2nd string: {}'.format(tls.float_to_str(float_1)))
     float_2 = 3.
     print('3rd float to be converted: {}'.format(float_2))
     print('3rd string: {}'.format(tls.float_to_str(float_2)))
     float_3 = 0.
     print('4th float to be converted: {}'.format(float_3))
     print('4th string: {}'.format(tls.float_to_str(float_3)))
     float_4 = -4.
     print('5th float to be converted: {}'.format(float_4))
     print('5th string: {}'.format(tls.float_to_str(float_4)))
    def test_save_statistics(self):
        """Tests the function `save_statistics` in the file "lossless/stats.py".
        
        The test is successful if the directory at "lossless/pseudo_data/save_statistics/"
        contains the files "'map_mean.npy", "idx_map_exception.pkl", "binary_probabilities_0dot5.npy",
        and "binary_probabilities_20.npy".
        
        """
        batch_size = 2
        multipliers = numpy.array([0.5, 20.], dtype=numpy.float32)

        # The entropy autoencoder is randomly initialized.
        path_to_nb_itvs_per_side_load = ''
        path_to_restore = ''
        path_to_stats = 'lossless/pseudo_data/save_statistics/'
        path_to_map_mean = os.path.join(path_to_stats, 'map_mean.npy')
        path_to_idx_map_exception = os.path.join(path_to_stats,
                                                 'idx_map_exception.pkl')
        paths_to_binary_probabilities = [
            os.path.join(
                path_to_stats, 'binary_probabilities_{}.npy'.format(
                    tls.float_to_str(multipliers[i].item())))
            for i in range(multipliers.size)
        ]
        luminances_uint8 = numpy.random.randint(0,
                                                high=256,
                                                size=(2 * batch_size, 32, 64,
                                                      1),
                                                dtype=numpy.uint8)

        # The 4th, 5th, and 7th arguments have no importance.
        entropy_ae = EntropyAutoencoder(batch_size, luminances_uint8.shape[1],
                                        luminances_uint8.shape[2], 1., 10000.,
                                        path_to_nb_itvs_per_side_load, False)
        with tf.Session() as sess:
            entropy_ae.initialization(sess, path_to_restore)
            lossless.stats.save_statistics(luminances_uint8, sess, entropy_ae,
                                           batch_size, multipliers, 10,
                                           path_to_map_mean,
                                           path_to_idx_map_exception,
                                           paths_to_binary_probabilities)
        print('Files in the directory at {}:'.format(path_to_stats))
        print(os.listdir(path_to_stats))
                        default=5000,
                        metavar='')
    parser.add_argument(
        '--nb_training',
        help='number of examples in the small portion of the SVHN training set',
        type=parsing.parsing.int_strictly_positive,
        default=10,
        metavar='')
    args = parser.parse_args()

    path_to_training_data = 'svhn/results/training_data.npy'
    path_to_mean_training = 'svhn/results/mean_training.npy'
    path_to_std_training = 'svhn/results/std_training.npy'
    if args.learn_bin_width:
        suffix = 'learning_bw_{0}_{1}'.format(
            tls.float_to_str(args.bin_width_init),
            tls.float_to_str(args.gamma))
    else:
        suffix = '{0}_{1}'.format(tls.float_to_str(args.bin_width_init),
                                  tls.float_to_str(args.gamma))
    path_to_checking_l = os.path.join(
        'eae/visualization/overfitting/checking_loss/', suffix)
    if not os.path.isdir(path_to_checking_l):
        os.makedirs(path_to_checking_l)
    path_to_checking_c = os.path.join(
        'eae/visualization/overfitting/checking_compression/', suffix)
    if not os.path.isdir(path_to_checking_c):
        os.makedirs(path_to_checking_c)
    if args.nb_epochs_training % 100 != 0:
        raise ValueError(
            'The number of training epochs is not divisible by 100.')
        metavar='')
    parser.add_argument('--activation_value',
                        help='activation value',
                        type=float,
                        default=8.,
                        metavar='')
    args = parser.parse_args()

    # The height of the width of the decoder
    # output are not important. They must be
    # larger than 64.
    h_in = 256
    w_in = 256
    if args.learn_bin_widths:
        suffix = 'learning_bw_{0}_{1}'.format(
            tls.float_to_str(args.bin_width_init),
            tls.float_to_str(args.gamma_scaling))
    else:
        suffix = '{0}_{1}'.format(tls.float_to_str(args.bin_width_init),
                                  tls.float_to_str(args.gamma_scaling))
    suffix_idx_training = '{0}/training_index_{1}'.format(
        suffix, args.idx_training)
    path_to_nb_itvs_per_side_load = 'eae/results/{0}/nb_itvs_per_side_{1}.pkl'.format(
        suffix, args.idx_training)
    path_to_restore = 'eae/results/{0}/model_{1}.ckpt'.format(
        suffix, args.idx_training)
    path_to_map_mean = 'lossless/results/{}/map_mean.npy'.format(
        suffix_idx_training)
    path_to_directory_crop = os.path.join(
        'eae/visualization/test/checking_activating/', suffix_idx_training,
        '{0}_{1}'.format(args.idx_map_activation + 1,
def vary_gamma_fix_bin_widths(reference_uint8, bin_width_init, idxs_training,
                              gammas_scaling, batch_size, path_to_checking_r,
                              list_rotation, positions_top_left):
    """Computes a series of pairs (rate, PSNR).
    
    Several entropy autoencoders, each trained with a
    different scaling coefficient, are considered. At
    training time, the quantization bin widths were fixed.
    For each scaling coefficient, for each luminance
    image, the pair (rate, PSNR) associated to the
    compression of the luminance image via the entropy
    autoencoder trained with the scaling coefficient
    is computed.
    
    Parameters
    ----------
    reference_uint8 : numpy.ndarray
        3D array with data-type `numpy.uint8`.
        Luminance images. `reference_uint8[i, :, :]`
        is the ith luminance image.
    bin_width_init : float
        Value of the quantization bin widths
        at the beginning of the 1st training.
        In this function, the quantization bin
        widths are the same at training time
        and at test time.
    idxs_training : numpy.ndarray
        1D array with data-type `numpy.int32`.
        Its ith element is the training phase
        index of the entropy autoencoder trained
        with the ith scaling coefficient.
    gammas_scaling : numpy.ndarray
        1D array with data-type `numpy.float64`.
        Scaling coefficients.
    batch_size : int
        Size of the mini-batches for encoding
        and decoding via the entropy autoencoders.
    path_to_checking_r : str
        Path to the folder containing the
        luminance images before/after being
        compressed via entropy autoencoders.
    list_rotation : list
        Each integer in this list is the index
        of a rotated luminance image.
    positions_top_left : numpy.ndarray
        2D array with data-type `numpy.int32`.
        This array is dedicated to visualization.
        `positions_top_left[:, i]` contains the
        row and the column of the image pixel at
        the top-left of the ith crop of each
        luminance image after being compressed
        via entropy autoencoders.
    
    Returns
    -------
    tuple
        numpy.ndarray
            2D array with data-type `numpy.float64`.
            The element at the position [i, j] in this
            array is the rate associated to the compression
            of the jth luminance image via the entropy
            autoencoder trained with the ith scaling
            coefficient.
        numpy.ndarray
            2D array with data-type `numpy.float64`.
            The element at the position [i, j] in this
            array is the PSNR associated to the compression
            of the jth luminance image via the entropy
            autoencoder trained with the ith scaling
            coefficient.
    
    Raises
    ------
    ValueError
        If `gammas_scaling.size` is not equal to
        `idxs_training.size`.
    
    """
    nb_points = gammas_scaling.size
    if idxs_training.size != nb_points:
        raise ValueError(
            '`gammas_scaling.size` is not equal to `idxs_training.size`.')
    (nb_images, h_in, w_in) = reference_uint8.shape
    rate = numpy.zeros((nb_points, nb_images))
    psnr = numpy.zeros((nb_points, nb_images))
    for i in range(nb_points):
        gamma_scaling = gammas_scaling[i].item()
        idx_training = idxs_training[i].item()
        suffix = '{0}_{1}'.format(tls.float_to_str(bin_width_init),
                                  tls.float_to_str(gamma_scaling))
        path_to_nb_itvs_per_side_load = 'eae/results/{0}/nb_itvs_per_side_{1}.pkl'.format(
            suffix, idx_training)
        path_to_restore = 'eae/results/{0}/model_{1}.ckpt'.format(
            suffix, idx_training)
        path_to_storage = os.path.join(
            path_to_checking_r, 'reconstruction_vary_gamma_fix_bin_widths',
            suffix)
        if not os.path.isdir(path_to_storage):
            os.makedirs(path_to_storage)

        # Every time `gamma_scaling` changes, a new
        # entropy autoencoder is created.
        entropy_ae = EntropyAutoencoder(batch_size, h_in, w_in, bin_width_init,
                                        gamma_scaling,
                                        path_to_nb_itvs_per_side_load, False)
        with tf.Session() as sess:
            entropy_ae.initialization(sess, path_to_restore)
            y_float32 = eae.batching.encode_mini_batches(
                numpy.expand_dims(reference_uint8, axis=3), sess, entropy_ae,
                batch_size)

            # `bin_widths` are the quantization bin widths
            # at training time.
            bin_widths = entropy_ae.get_bin_widths()

        # The graph of the entropy autoencoder is destroyed.
        tf.reset_default_graph()

        # Every time `gamma_scaling` changes, a new
        # decoder is created.
        isolated_decoder = IsolatedDecoder(batch_size, h_in, w_in, False)
        quantized_y_float32 = tls.quantize_per_map(y_float32, bin_widths)
        with tf.Session() as sess:
            isolated_decoder.initialization(sess, path_to_restore)
            expanded_reconstruction_uint8 = eae.batching.decode_mini_batches(
                quantized_y_float32, sess, isolated_decoder, batch_size)

        # The elements of `reconstruction_uint8` span
        # the range [|16, 235|].
        reconstruction_uint8 = numpy.squeeze(expanded_reconstruction_uint8,
                                             axis=3)

        # The graph of the decoder is destroyed.
        tf.reset_default_graph()
        for j in range(nb_images):
            rate[i, j] = tls.rate_3d(quantized_y_float32[j, :, :, :],
                                     bin_widths, h_in, w_in)
            psnr[i, j] = tls.psnr_2d(reference_uint8[j, :, :],
                                     reconstruction_uint8[j, :, :])

            paths = [
                os.path.join(path_to_storage,
                             'reconstruction_{}.png'.format(j))
            ]
            paths += [
                os.path.join(
                    path_to_storage,
                    'reconstruction_{0}_crop_{1}.png'.format(j, index_crop))
                for index_crop in range(positions_top_left.shape[1])
            ]
            tls.visualize_rotated_luminance(reconstruction_uint8[j, :, :], j
                                            in list_rotation,
                                            positions_top_left, paths)
    return (rate, psnr)
def fix_gamma(reference_uint8, bin_width_init, multipliers, idx_training,
              gamma_scaling, batch_size, are_bin_widths_learned, is_lossless,
              path_to_checking_r, list_rotation, positions_top_left):
    """Computes a series of pairs (rate, PSNR).
    
    A single entropy autoencoder is considered. At training
    time, the quantization bin widths were either fixed or
    learned.
    For each multiplier, the quantization bin widths
    at the end of the training are multiplied by the
    multiplier, yielding a set of test quantization bin
    widths. Then, for each set of test quantization bin
    widths, for each luminance image, the pair (rate, PSNR)
    associated to the compression of the luminance image
    via the single entropy autoencoder and the set of
    test quantization bin widths is computed.
    
    Parameters
    ----------
    reference_uint8 : numpy.ndarray
        3D array with data-type `numpy.uint8`.
        Luminance images. `reference_uint8[i, :, :]`
        is the ith luminance image.
    bin_width_init : float
        Value of the quantization bin widths
        at the beginning of the 1st training.
    multipliers : numpy.ndarray
        1D array with data-type `numpy.float32`.
        Multipliers.
    idx_training : int
        Training phase index of the single
        entropy autoencoder.
    gamma_scaling : float
        Scaling coefficient of the single
        entropy autoencoder.
    batch_size : int
        Size of the mini-batches for encoding
        and decoding via the single entropy
        autoencoder.
    are_bin_widths_learned : bool
        Were the quantization bin widths learned
        at training time?
    is_lossless : bool
        Are the quantized latent variables coded
        losslessly?
    path_to_checking_r : str
        Path to the folder containing the luminance
        images before/after being compressed via the
        single entropy autoencoder.
    list_rotation : list
        Each integer in this list is the index
        of a rotated luminance image.
    positions_top_left : numpy.ndarray
        2D array with data-type `numpy.int32`.
        This array is dedicated to visualization.
        `positions_top_left[:, i]` contains the
        row and the column of the image pixel at
        the top-left of the ith crop of each
        luminance image after being compressed
        via the single entropy autoencoder.
    
    Returns
    -------
    tuple
        numpy.ndarray
            2D array with data-type `numpy.float64`.
            The element at the position [i, j] in this
            array is the rate associated to the compression
            of the jth luminance image via the single
            entropy autoencoder and the ith set of test
            quantization bin widths.
        numpy.ndarray
            2D array with data-type `numpy.float64`.
            The element at the position [i, j] in this
            array is the PSNR associated to the compression
            of the jth luminance image via the single
            entropy autoencoder and the ith set of test
            quantization bin widths.
    
    """
    nb_points = multipliers.size
    (nb_images, h_in, w_in) = reference_uint8.shape
    rate = numpy.zeros((nb_points, nb_images))
    psnr = numpy.zeros((nb_points, nb_images))
    if are_bin_widths_learned:
        suffix = 'learning_bw_{0}_{1}'.format(tls.float_to_str(bin_width_init),
                                              tls.float_to_str(gamma_scaling))
    else:
        suffix = '{0}_{1}'.format(tls.float_to_str(bin_width_init),
                                  tls.float_to_str(gamma_scaling))
    path_to_nb_itvs_per_side_load = 'eae/results/{0}/nb_itvs_per_side_{1}.pkl'.format(
        suffix, idx_training)
    path_to_restore = 'eae/results/{0}/model_{1}.ckpt'.format(
        suffix, idx_training)
    if is_lossless:
        path_to_vis = os.path.join(path_to_checking_r,
                                   'reconstruction_fix_gamma', suffix,
                                   'lossless')
    else:
        path_to_vis = os.path.join(path_to_checking_r,
                                   'reconstruction_fix_gamma', suffix,
                                   'approx')
    path_to_stats = 'lossless/results/{0}/training_index_{1}/'.format(
        suffix, idx_training)
    path_to_map_mean = os.path.join(path_to_stats, 'map_mean.npy')

    # A single entropy autoencoder is created.
    entropy_ae = EntropyAutoencoder(batch_size, h_in, w_in, bin_width_init,
                                    gamma_scaling,
                                    path_to_nb_itvs_per_side_load,
                                    are_bin_widths_learned)
    with tf.Session() as sess:
        entropy_ae.initialization(sess, path_to_restore)
        y_float32 = eae.batching.encode_mini_batches(
            numpy.expand_dims(reference_uint8, axis=3), sess, entropy_ae,
            batch_size)

        # `bin_widths` are the quantization bin widths
        # at the end of the training.
        bin_widths = entropy_ae.get_bin_widths()

    # The graph of the entropy autoencoder is destroyed.
    tf.reset_default_graph()

    # A single decoder is created.
    isolated_decoder = IsolatedDecoder(batch_size, h_in, w_in,
                                       are_bin_widths_learned)

    # `array_nb_deads[i, j]` stores the number of dead feature
    # maps at the rate of index i for the luminance image of
    # index j.
    array_nb_deads = numpy.zeros((nb_points, nb_images), dtype=numpy.int32)

    # `map_mean[i]` is the approximate mean of the latent
    # variable feature map of index i. It was computed on
    # the extra set.
    map_mean = numpy.load(path_to_map_mean)
    tiled_map_mean = numpy.tile(
        map_mean, (nb_images, y_float32.shape[1], y_float32.shape[2], 1))

    # `idx_map_exception` was also computed on the extra set.
    if is_lossless:
        with open(os.path.join(path_to_stats, 'idx_map_exception.pkl'),
                  'rb') as file:
            idx_map_exception = pickle.load(file)
    centered_y_float32 = y_float32 - tiled_map_mean
    with tf.Session() as sess:
        isolated_decoder.initialization(sess, path_to_restore)
        for i in range(nb_points):
            multiplier = multipliers[i].item()
            str_multiplier = tls.float_to_str(multiplier)
            bin_widths_test = multiplier * bin_widths
            centered_quantized_y_float32 = tls.quantize_per_map(
                centered_y_float32, bin_widths_test)

            # For a given luminance image, if at least a coefficient
            # of a feature map is different from 0.0, this feature map
            # is viewed as not dead.
            array_nb_deads[i, :] = tls.count_nb_deads(
                centered_quantized_y_float32)
            off_centered_quantized_y_float32 = centered_quantized_y_float32 + tiled_map_mean
            expanded_reconstruction_uint8 = eae.batching.decode_mini_batches(
                off_centered_quantized_y_float32, sess, isolated_decoder,
                batch_size)

            # The elements of span `reconstruction_uint8`
            # the range [|16, 235|].
            reconstruction_uint8 = numpy.squeeze(expanded_reconstruction_uint8,
                                                 axis=3)

            # The binary probabilities were also computed
            # on the extra set.
            if is_lossless:
                path_to_binary_probabilities = os.path.join(
                    path_to_stats,
                    'binary_probabilities_{}.npy'.format(str_multiplier))
            path_to_storage = os.path.join(
                path_to_vis, 'multiplier_{}'.format(str_multiplier))
            if not os.path.isdir(path_to_storage):
                os.makedirs(path_to_storage)
            for j in range(nb_images):
                if is_lossless:
                    nb_bits = lossless.compression.rescale_compress_lossless_maps(
                        centered_quantized_y_float32[j, :, :, :],
                        bin_widths_test,
                        path_to_binary_probabilities,
                        idx_map_exception=idx_map_exception)
                    rate[i, j] = float(nb_bits) / (h_in * w_in)
                else:
                    rate[i, j] = tls.rate_3d(
                        centered_quantized_y_float32[j, :, :, :],
                        bin_widths_test, h_in, w_in)
                psnr[i, j] = tls.psnr_2d(reference_uint8[j, :, :],
                                         reconstruction_uint8[j, :, :])

                paths = [
                    os.path.join(path_to_storage,
                                 'reconstruction_{}.png'.format(j))
                ]
                paths += [
                    os.path.join(
                        path_to_storage,
                        'reconstruction_{0}_crop_{1}.png'.format(
                            j, index_crop))
                    for index_crop in range(positions_top_left.shape[1])
                ]
                tls.visualize_rotated_luminance(reconstruction_uint8[j, :, :],
                                                j in list_rotation,
                                                positions_top_left, paths)

    # The graph of the decoder is destroyed.
    tf.reset_default_graph()
    path_to_directory_nb_dead = os.path.join(path_to_vis, 'nb_dead')
    if not os.path.isdir(path_to_directory_nb_dead):
        os.makedirs(path_to_directory_nb_dead)
    plot_nb_dead_feature_maps(rate, array_nb_deads, [
        os.path.join(path_to_directory_nb_dead, 'nb_dead_{}.png'.format(i))
        for i in range(nb_images)
    ])
    return (rate, psnr)
Beispiel #7
0
def fix_gamma_fix_bin_width(reference_uint8, mean_training, std_training, bin_width_init,
                            multipliers, gamma, path_to_checking_r):
    """Computes a series of pairs (mean rate, mean PSNR).
    
    A single entropy autoencoder is considered.
    At training time, the quantization bin width
    was fixed.
    For each multiplier, the quantization bin width
    at training time is multiplied by the multiplier,
    yielding a test quantization bin width. Then, for
    each test quantization bin width, the pair
    (mean rate, mean PSNR) associated to the compression
    of the RGB digits via the single entropy autoencoder
    and the test quantization bin width is computed.
    
    Parameters
    ----------
    reference_uint8 : numpy.ndarray
        2D array with data-type `numpy.uint8`.
        RGB digits. `reference_uint8[i, :]`
        contains the ith RGB digit.
    mean_training : numpy.ndarray
        1D array with data-type `numpy.float64`.
        Mean of each pixel over all training images.
    std_training : numpy.float64
        Mean of the standard deviation of each pixel
        over all training images.
    bin_width_init : float
        Value of the quantization bin width at the
        beginning of the training.
    multipliers : numpy.ndarray
        1D array with data-type `numpy.float64`.
        Multipliers.
    gamma : float
        Scaling coefficient of the single
        entropy autoencoder.
    path_to_checking_r : str
        Path to the folder containing the RGB
        digits after being compressed via
        entropy autoencoders.
    
    Returns
    -------
    tuple
        numpy.ndarray
            1D array with data-type `numpy.float64`.
            Its ith element is the mean rate associated
            to the compression of the RGB digits via the
            single entropy autoencoder and the ith test
            quantization bin width.
        numpy.ndarray
            1D array with data-type `numpy.float64`.
            Its ith element is the mean PSNR associated
            to the compression of the RGB digits via the
            single entropy autoencoder and the ith test
            quantization bin width.
    
    Raises
    ------
    OSError
        If the scaling coefficient written in the
        name of the file ".pkl" is incorrect.
    OSError
        If the quantization bin width written in the
        name of the file ".pkl" is incorrect.
    
    """
    suffix = '{0}_{1}'.format(tls.float_to_str(bin_width_init),
                              tls.float_to_str(gamma))
    path_to_storage = os.path.join(path_to_checking_r,
                                   'reconstruction_fix_gamma_fix_bin_width',
                                   suffix)
    path_to_model = 'eae/results/eae_svhn_{}.pkl'.format(suffix)
    with open(path_to_model, 'rb') as file:
        entropy_ae = pickle.load(file)
    if entropy_ae.gamma != gamma:
        raise OSError('The file name is {0} whereas the scaling coefficient is {1}.'.format(path_to_model, entropy_ae.gamma))
    if entropy_ae.bin_width != bin_width_init:
        raise OSError('The file name is {0} whereas the quantization bin width is {1}.'.format(path_to_model, entropy_ae.bin_width))
    nb_points = multipliers.size
    rate = numpy.zeros(nb_points)
    psnr = numpy.zeros(nb_points)
    for i in range(nb_points):    
        multiplier = multipliers[i].item()
        bin_width_test = multiplier*bin_width_init
        path_to_directory_reconstruction = os.path.join(path_to_storage,
                                                        'multiplier_{}'.format(tls.float_to_str(multiplier)))
        if not os.path.isdir(path_to_directory_reconstruction):
            os.makedirs(path_to_directory_reconstruction)
        path_to_reconstruction = os.path.join(path_to_directory_reconstruction,
                                              'reconstruction.png')
        (rate[i], psnr[i]) = eae.utils.compute_rate_psnr(reference_uint8,
                                                         mean_training,
                                                         std_training,
                                                         entropy_ae,
                                                         bin_width_test,
                                                         10,
                                                         path_to_reconstruction)
    return (rate, psnr)
Beispiel #8
0
def vary_gamma_learn_bin_width(reference_uint8, mean_training, std_training, bin_width_init,
                               gammas, path_to_checking_r):
    """Computes a series of pairs (mean rate, mean PSNR).
    
    Several entropy autoencoders, each trained
    with a different scaling coefficient, are
    considered. At training time, the quantization
    bin width was learned.
    For each scaling coefficient, the pair
    (mean rate, mean PSNR) associated to the compression
    of the RGB digits via the entropy autoencoder trained
    with the scaling coefficient is computed.
    
    Parameters
    ----------
    reference_uint8 : numpy.ndarray
        2D array with data-type `numpy.uint8`.
        RGB digits. `reference_uint8[i, :]`
        contains the ith RGB digit.
    mean_training : numpy.ndarray
        1D array with data-type `numpy.float64`.
        Mean of each pixel over all training images.
    std_training : numpy.float64
        Mean of the standard deviation of each pixel
        over all training images.
    bin_width_init : float
        Value of the quantization bin width at the
        beginning of the training.
    gammas : numpy.ndarray
        1D array with data-type `numpy.float64`.
        Scaling coefficients.
    path_to_checking_r : str
        Path to the folder containing the RGB
        digits after being compressed via
        entropy autoencoders.
    
    Returns
    -------
    tuple
        numpy.ndarray
            1D array with data-type `numpy.float64`.
            Its ith element is the mean rate associated
            to the compression of the RGB digits via the
            entropy autoencoder trained with the ith
            scaling coefficient.
        numpy.ndarray
            1D array with data-type `numpy.float64`.
            Its ith element is the mean PSNR associated
            to the compression of the RGB digits via the
            entropy autoencoder trained with the ith
            scaling coefficient.
    
    Raises
    ------
    OSError
        If, for a file ".pkl", the scaling coefficient
        written in the file name is incorrect.
    
    """
    nb_points = gammas.size
    rate = numpy.zeros(nb_points)
    psnr = numpy.zeros(nb_points)
    for i in range(nb_points):
        suffix = 'learning_bw_{0}_{1}'.format(tls.float_to_str(bin_width_init),
                                              tls.float_to_str(gammas[i].item()))
        path_to_model = 'eae/results/eae_svhn_{}.pkl'.format(suffix)
        with open(path_to_model, 'rb') as file:
            entropy_ae = pickle.load(file)
        if entropy_ae.gamma != gammas[i].item():
            raise OSError('The file name is {0} whereas the scaling coefficient is {1}.'.format(path_to_model, entropy_ae.gamma))
        
        # The quantization bin width at the end
        # of the training is rounded to the 1st
        # digit after the decimal point, yielding
        # the test quantization bin width.
        bin_width_test = round(entropy_ae.bin_width, 1)
        path_to_directory_reconstruction = os.path.join(path_to_checking_r,
                                                        'reconstruction_vary_gamma_learn_bin_width',
                                                        suffix)
        if not os.path.isdir(path_to_directory_reconstruction):
            os.makedirs(path_to_directory_reconstruction)
        path_to_reconstruction = os.path.join(path_to_directory_reconstruction,
                                              'reconstruction.png')
        (rate[i], psnr[i]) = eae.utils.compute_rate_psnr(reference_uint8,
                                                         mean_training,
                                                         std_training,
                                                         entropy_ae,
                                                         bin_width_test,
                                                         10,
                                                         path_to_reconstruction)
    return (rate, psnr)
 def test_activate_latent_variables(self):
     """Tests the function `activate_latent_variables` in the file "eae.analysis.py".
     
     The test is successful if the images saved in the directory
     at "eae/pseudo_visualization/activate_latent_variables/" do
     not exhibit any structure.
     
     """
     h_in = 256
     w_in = 384
     idx_map_activation = 4
     tuple_activation_values = (-200., -20., 20., 200.)
     
     isolated_decoder = IsolatedDecoder(1,
                                        h_in,
                                        w_in,
                                        False)
     
     # As the bin widths are not learned, they are all set to 1.
     bin_widths = numpy.ones(csts.NB_MAPS_3, dtype=numpy.float32)
     
     # At initialization, the feature map mean over many
     # luminance images must be close to 0.
     map_mean = numpy.zeros(csts.NB_MAPS_3, dtype=numpy.float32)
     tuple_pairs_row_col = (
         (1, 1),
         (6, 6)
     )
     with tf.Session() as sess:
         isolated_decoder.initialization(sess, '')
         for activation_value in tuple_activation_values:
             path_to_directory_crop = os.path.join('eae/pseudo_visualization/activate_latent_variables/',
                                                   '{0}_{1}'.format(idx_map_activation + 1, tls.float_to_str(activation_value)))
             if not os.path.isdir(path_to_directory_crop):
                 os.makedirs(path_to_directory_crop)
             for (row_activation, col_activation) in tuple_pairs_row_col:
                 eae.analysis.activate_latent_variable(sess,
                                                       isolated_decoder,
                                                       h_in,
                                                       w_in,
                                                       bin_widths,
                                                       row_activation,
                                                       col_activation,
                                                       idx_map_activation,
                                                       activation_value,
                                                       map_mean,
                                                       64,
                                                       64,
                                                       os.path.join(path_to_directory_crop, '{0}_{1}.png'.format(row_activation, col_activation)))