コード例 #1
0
def _quasi_uniform(dim, sample_shape, random_type, dtype, seed=None, **kwargs):
    """Quasi random draws from a uniform distribution on [0, 1)."""
    # Shape of the output
    output_shape = tf.concat([sample_shape] + [[dim]], -1)
    # Number of quasi random samples
    num_samples = tf.reduce_prod(sample_shape)
    # Number of initial low discrepancy sequence numbers to skip
    if 'skip' in kwargs:
        skip = kwargs['skip']
    else:
        skip = 0
    if random_type == RandomType.SOBOL:
        # Shape [num_samples, dim] of the Sobol samples
        low_discrepancy_seq = sobol.sample(dim=dim,
                                           num_results=num_samples,
                                           skip=skip,
                                           dtype=dtype)
        # TODO(b/148005344): Remove after tf.reshape after the bug is fixed
        low_discrepancy_seq = tf.reshape(low_discrepancy_seq,
                                         [num_samples, dim])
    else:  # HALTON or HALTON_RANDOMIZED random_dtype
        if 'randomization_params' in kwargs:
            randomization_params = kwargs['randomization_params']
        else:
            randomization_params = None
        randomized = random_type == RandomType.HALTON_RANDOMIZED
        # Shape [num_samples, dim] of the Sobol samples
        low_discrepancy_seq, _ = halton.sample(
            dim=dim,
            sequence_indices=tf.range(skip, skip + num_samples),
            randomized=randomized,
            randomization_params=randomization_params,
            seed=seed,
            dtype=dtype)
    return tf.reshape(low_discrepancy_seq, output_shape)
コード例 #2
0
def _mvnormal_quasi(sample_shape,
                    mean,
                    random_type,
                    seed,
                    covariance_matrix=None,
                    scale_matrix=None,
                    validate_args=False,
                    dtype=None,
                    **kwargs):
    """Returns normal draws using low-discrepancy sequences."""
    if scale_matrix is None and covariance_matrix is None:
        scale_matrix = tf.linalg.eye(tf.shape(mean)[-1], dtype=mean.dtype)
    elif scale_matrix is None and covariance_matrix is not None:
        covariance_matrix = tf.convert_to_tensor(covariance_matrix,
                                                 dtype=dtype,
                                                 name='covariance_matrix')
        scale_matrix = tf.linalg.cholesky(covariance_matrix)
    else:
        scale_matrix = tf.convert_to_tensor(scale_matrix,
                                            dtype=dtype,
                                            name='scale_matrix')
    scale_shape = scale_matrix.shape.as_list()
    dim = scale_shape[-1]
    if mean is None:
        mean = tf.zeros([dim], dtype=scale_matrix.dtype)
    distribution = tfp.distributions.Normal(loc=tf.zeros_like(mean),
                                            scale=tf.ones(
                                                scale_shape[:-1],
                                                dtype=scale_matrix.dtype),
                                            validate_args=validate_args)
    # Batch shape of the output
    batch_shape = distribution.batch_shape
    # Reverse elements of the batch shape
    batch_shape_reverse = tf.reverse_sequence(
        tf.expand_dims(batch_shape, 0),
        seq_lengths=[tf.size(batch_shape)],
        seq_dim=1)
    batch_shape_reverse = tf.squeeze(batch_shape_reverse, 0)
    # Reverse elements of the sample shape
    sample_shape_reverse = tf.reverse_sequence(
        tf.expand_dims(sample_shape, 0),
        seq_lengths=[tf.size(sample_shape)],
        seq_dim=1)
    sample_shape_reverse = tf.squeeze(sample_shape_reverse, 0)
    # Transposed shape of the output
    output_shape_t = tf.concat([batch_shape_reverse, sample_shape_reverse], -1)
    # Number of quasi random samples
    num_samples = tf.reduce_prod(output_shape_t) // dim
    # Number of initial low discrepancy sequence numbers to skip
    if 'skip' in kwargs:
        skip = kwargs['skip']
    else:
        skip = 0
    if random_type == RandomType.SOBOL:
        # Shape [num_samples, dim] of the Sobol samples
        low_discrepancy_seq = sobol.sample(dim=dim,
                                           num_results=num_samples,
                                           skip=skip,
                                           dtype=distribution.dtype)
    else:  # HALTON or HALTON_RANDOMIZED random_dtype
        if 'randomization_params' in kwargs:
            randomization_params = kwargs['randomization_params']
        else:
            randomization_params = None
        randomized = random_type == RandomType.HALTON_RANDOMIZED
        # Shape [num_samples, dim] of the Sobol samples
        low_discrepancy_seq, _ = halton.sample(
            dim=dim,
            sequence_indices=tf.range(skip, skip + num_samples),
            randomized=randomized,
            randomization_params=randomization_params,
            seed=seed,
            dtype=distribution.dtype)

    # Transpose to the shape [dim, num_samples]
    low_discrepancy_seq = tf.transpose(low_discrepancy_seq)
    size_sample = tf.size(sample_shape)
    size_batch = tf.size(batch_shape) - 1
    # Permutation for `output_shape_t` to the output shape
    permutation = tf.concat([
        tf.range(size_batch + size_sample, size_batch, -1),
        tf.range(size_batch, -1, -1)
    ], -1)
    # Reshape Sobol samples to the correct output shape
    low_discrepancy_seq = tf.transpose(
        tf.reshape(low_discrepancy_seq, output_shape_t), permutation)
    # Apply inverse Normal CDF to Sobol samples to obtain the corresponding
    # Normal samples
    samples = distribution.quantile(low_discrepancy_seq)
    return mean + tf.linalg.matvec(scale_matrix, samples)
コード例 #3
0
def _mvnormal_quasi(sample_shape,
                    mean,
                    random_type,
                    seed,
                    covariance_matrix=None,
                    scale_matrix=None,
                    validate_args=False,
                    dtype=None,
                    **kwargs):
    """Returns normal draws using low-discrepancy sequences."""
    (mean, scale_matrix, batch_shape, dim,
     dtype) = _process_mean_scale(mean, scale_matrix, covariance_matrix, dtype)
    # Reverse elements of the batch shape
    batch_shape_reverse = tf.reverse(batch_shape, [0])
    # Transposed shape of the output
    output_shape_t = tf.concat([batch_shape_reverse, sample_shape], -1)
    # Number of quasi random samples
    num_samples = tf.reduce_prod(output_shape_t) // dim
    # Number of initial low discrepancy sequence numbers to skip
    if 'skip' in kwargs:
        skip = kwargs['skip']
    else:
        skip = 0
    if random_type == RandomType.SOBOL:
        # TODO(b/182621549): For Sobol sequences, dimension should be known at graph
        # construction time.
        dim = tf.get_static_value(dim)
        if dim is None:
            raise ValueError(
                'For Sobol sequences, dimension should be known at graph'
                ' construction time.')
        # Shape [num_samples, dim] of the Sobol samples
        low_discrepancy_seq = sobol.sample(dim=dim,
                                           num_results=num_samples,
                                           skip=skip,
                                           dtype=dtype)
    else:  # HALTON or HALTON_RANDOMIZED random_dtype
        if 'randomization_params' in kwargs:
            randomization_params = kwargs['randomization_params']
        else:
            randomization_params = None
        randomized = random_type == RandomType.HALTON_RANDOMIZED
        # Shape [num_samples, dim] of the Sobol samples
        low_discrepancy_seq, _ = halton.sample(
            dim=dim,
            sequence_indices=tf.range(skip, skip + num_samples),
            randomized=randomized,
            randomization_params=randomization_params,
            seed=seed,
            validate_args=validate_args,
            dtype=dtype)

    # Transpose to the shape [dim, num_samples]
    low_discrepancy_seq = tf.transpose(low_discrepancy_seq)
    size_sample = tf.size(sample_shape)
    size_batch = tf.size(batch_shape)
    # Permutation for `output_shape_t` to the output shape
    permutation = tf.concat([
        tf.range(size_batch, size_batch + size_sample),
        tf.range(size_batch - 1, -1, -1)
    ], -1)
    # Reshape Sobol samples to the correct output shape
    low_discrepancy_seq = tf.transpose(
        tf.reshape(low_discrepancy_seq, output_shape_t), permutation)
    # Apply inverse Normal CDF to Sobol samples to obtain the corresponding
    # Normal samples
    samples = tf.math.erfinv((low_discrepancy_seq - 0.5) * 2) * _SQRT_2
    if scale_matrix is None:
        return mean + samples
    else:
        return mean + tf.linalg.matvec(scale_matrix, samples)