def preliminary_fitting(training_uint8, sess, entropy_ae, batch_size,
                        nb_epochs_fitting):
    """"Pre-trains the parameters of the piecewise linear functions.
    
    Parameters
    ----------
    training_uint8 : numpy.ndarray
        4D array with data-type `numpy.uint8`.
        Training set. `training_uint8[i, :, :, :]` is
        the ith training luminance image. The last
        dimension of `training_uint8` is equal to 1.
    sess : Session
        Session that runs the graph.
    entropy_ae : EntropyAutoencoder
        Entropy auto-encoder trained with a specific
        scaling coefficient.
    batch_size : int
        Size of the mini-batches.
    nb_epochs_fitting : int
        Number of fitting epochs.
    
    """
    nb_batches = tls.subdivide_set(training_uint8.shape[0], batch_size)
    for _ in range(nb_epochs_fitting):
        for j in range(nb_batches):
            batch_float32 = training_uint8[j * batch_size:(j + 1) *
                                           batch_size, :, :, :].astype(
                                               numpy.float32)
            entropy_ae.training_fct(sess, batch_float32)
def decode_mini_batches(quantized_y_float32, sess, isolated_decoder,
                        batch_size):
    """Computes the reconstruction of the luminance images from the quantized latent variables, one mini-batch at a time.
    
    Parameters
    ----------
    quantized_y_float32 : numpy.ndarray
        4D array with data-type `numpy.float32`.
        Quantized latent variables. `quantized_y_float32[i, :, :, :]`
        contains the quantized latent variables
        previously computed from the ith luminance
        image.
    sess : Session
        Session that runs the graph.
    isolated_decoder : IsolatedDecoder
        Isolated decoder.
    batch_size : int
        Size of the mini-batches.
    
    Returns
    -------
    numpy.ndarray
        4D array with data-type `numpy.uint8`.
        Reconstruction of the luminance images. The
        4th array dimension is equal to 1.
    
    """
    # If `quantized_y_float32.ndim` is not equal to 4,
    # the unpacking below raises a `ValueError` exception.
    (nb_images, h_in, w_in, _) = quantized_y_float32.shape
    nb_batches = tls.subdivide_set(nb_images, batch_size)

    # The height of a quantized latent variable feature map
    # is `csts.STRIDE_PROD` times smaller than the height
    # of the luminance image. Similarly, the width of a
    # quantized latent variable feature map is `csts.STRIDE_PROD`
    # times smaller than the width of the luminance image.
    expanded_reconstruction_uint8 = numpy.zeros(
        (nb_images, h_in * csts.STRIDE_PROD, w_in * csts.STRIDE_PROD, 1),
        dtype=numpy.uint8)
    for i in range(nb_batches):
        reconstruction_float32 = sess.run(
            isolated_decoder.node_reconstruction,
            feed_dict={
                isolated_decoder.node_quantized_y:
                quantized_y_float32[i * batch_size:(i + 1) *
                                    batch_size, :, :, :]
            })
        expanded_reconstruction_uint8[i * batch_size:(i + 1) *
                                      batch_size, :, :, :] = tls.cast_bt601(
                                          reconstruction_float32)
    return expanded_reconstruction_uint8
def encode_mini_batches(luminances_uint8, sess, entropy_ae, batch_size):
    """Computes the latent variables from the luminance images via the entropy autoencoder, one mini-batch at a time.
    
    Parameters
    ----------
    luminances_uint8 : numpy.ndarray
        4D array with data-type `numpy.uint8`.
        Luminance images. `luminances_uint8[i, :, :, :]`
        is the ith luminance image. The last dimension
        of `luminances_uint8` is equal to 1.
    sess : Session
        Session that runs the graph.
    entropy_ae : EntropyAutoencoder
        Entropy auto-encoder trained with a specific
        scaling coefficient.
    batch_size : int
        Size of the mini-batches.
    
    Returns
    -------
    numpy.ndarray
        4D array with data-type `numpy.float32`.
        Latent variables.
    
    Raises
    ------
    TypeError
        If `luminances_uint8.dtype` is not equal to `numpy.uint8`.
    
    """
    if luminances_uint8.dtype != numpy.uint8:
        raise TypeError(
            '`luminances_uint8.dtype` is not equal to `numpy.uint8`.')

    # If `luminances_uint8.ndim` is not equal to 4,
    # the unpacking below raises a `ValueError` exception.
    (nb_images, h_in, w_in, _) = luminances_uint8.shape
    nb_batches = tls.subdivide_set(nb_images, batch_size)
    y_float32 = numpy.zeros((nb_images, h_in // csts.STRIDE_PROD,
                             w_in // csts.STRIDE_PROD, csts.NB_MAPS_3),
                            dtype=numpy.float32)
    for i in range(nb_batches):
        batch_float32 = luminances_uint8[i * batch_size:(i + 1) *
                                         batch_size, :, :, :].astype(
                                             numpy.float32)
        y_float32[i * batch_size:(i + 1) * batch_size, :, :, :] = sess.run(
            entropy_ae.node_y,
            feed_dict={entropy_ae.node_visible_units: batch_float32})
    return y_float32
 def test_subdivide_set(self):
     """Tests the function `subdivide_set`.
     
     The test is successful if an exception
     is raised when the number of examples
     cannot be divided into a whole number
     of mini-batches.
     
     """
     nb_examples = 400
     batch_size = 20
     
     nb_batches = tls.subdivide_set(nb_examples, batch_size)
     print('Number of examples: {}'.format(nb_examples))
     print('Size of the mini-batches: {}'.format(batch_size))
     print('Number of mini-batches: {}'.format(nb_batches))
def preliminary_fitting(training_uint8, mean_training, std_training,
                        entropy_ae, batch_size, nb_epochs_fitting):
    """"Pre-trains the parameters of the piecewise linear function.
    
    Parameters
    ----------
    training_uint8 : numpy.ndarray
        2D array with data-type `numpy.uint8`.
        Training set. `training_uint8[i, :]`
        contains the ith training image.
    mean_training : numpy.ndarray
        1D array with data-type `numpy.float64`.
        Mean of each pixel over all training images.
    std_training : numpy.float64
        Mean of the standard deviation of each pixel
        over all training images.
    entropy_ae : EntropyAutoencoder
        Entropy auto-encoder trained with a
        specific scaling coefficient.
    batch_size : int
        Size of the mini-batches.
    nb_epochs_fitting : int
        Number of fitting epochs.
    
    """
    nb_batches = tls.subdivide_set(training_uint8.shape[0], batch_size)
    for _ in range(nb_epochs_fitting):
        for j in range(nb_batches):
            batch_uint8 = training_uint8[j * batch_size:(j + 1) *
                                         batch_size, :]

            # The function `svhn.svhn.preprocess_svhn` checks
            # that `batch_uint8.dtype` is equal to `numpy.uint8`
            # and `batch_uint8.ndim` is equal to 2.
            batch_float64 = svhn.svhn.preprocess_svhn(batch_uint8,
                                                      mean_training,
                                                      std_training)
            entropy_ae.training_fct(batch_float64)
    path_to_nb_itvs_per_side_save = os.path.join(
        path_to_directory_model,
        'nb_itvs_per_side_{}.pkl'.format(args.idx_training + 1))
    if os.path.isfile(path_to_model):
        print('"{}" already exists.'.format(path_to_model))
        print('Delete the model manually to retrain.')
        exit()
    elif os.path.isfile(path_to_meta):
        print('"{}" already exists.'.format(path_to_meta))
        print('Delete the metadata manually to retrain.')
        exit()

    # `training_uint8.dtype` is equal to `numpy.uint8`.
    training_uint8 = numpy.load(path_to_training_data)
    (nb_training, h_in, w_in, _) = training_uint8.shape
    nb_batches = tls.subdivide_set(nb_training, batch_size)
    print('Number of training examples: {}'.format(nb_training))
    print('Size of the mini-batches: {}'.format(batch_size))
    print('Number of mini-batches: {}'.format(nb_batches))

    # `validation_float32.dtype` and `training_portion_float32.dtype`
    # are equal to `numpy.float32`.
    validation_float32 = numpy.load(path_to_validation_data).astype(
        numpy.float32)
    if validation_float32.shape[0] != batch_size:
        raise ValueError(
            'The number of validation examples is not equal to {}.'.format(
                batch_size))
    print('Number of validation examples: {}'.format(batch_size))
    training_portion_float32 = training_uint8[0:batch_size, :, :, :].astype(
        numpy.float32)