def test_2d_scalar_valued_function(self):
    x_ref_min = np.array([0., 1.], dtype=np.float32)
    x_ref_max = np.array([1.3, 2.], dtype=np.float32)
    ny = [100, 110]

    # Build y_ref.
    x0s, x1s = tf.meshgrid(
        tf.linspace(x_ref_min[0], x_ref_max[0], ny[0]),
        tf.linspace(x_ref_min[1], x_ref_max[1], ny[1]),
        indexing='ij')

    def func(x0, x1):
      return tf.sin(x0) * tf.cos(x1)

    # Shape ny
    y_ref = self.evaluate(func(x0s, x1s))

    # Shape [10, 2]
    seed = test_util.test_seed_stream()
    x = tf.stack([
        tf.random.uniform(
            shape=(10,), minval=x_ref_min[0], maxval=x_ref_max[0], seed=seed()),
        tf.random.uniform(
            shape=(10,), minval=x_ref_min[1], maxval=x_ref_max[1], seed=seed()),
    ],
                 axis=-1)

    x = self.evaluate(x)

    expected_y = func(x[:, 0], x[:, 1])
    actual_y = tfp.math.batch_interp_regular_nd_grid(
        x=x, x_ref_min=x_ref_min, x_ref_max=x_ref_max, y_ref=y_ref, axis=-2)

    self.assertAllClose(
        self.evaluate(expected_y), self.evaluate(actual_y), atol=0.02)
 def first_order_coeff_fn(t, coord_grid):
     del t
     y, x = tf.meshgrid(*coord_grid, indexing='ij')
     return [
         y**2 * (1 + 3 * x**2) + 2 * tf.math.sin(y),
         x * (1 + 2 * x**2 * y) - 2 * tf.math.cos(x)
     ]
  def _generate_boxes(self):
    """Generates multiscale anchor boxes.

    Returns:
      a Tensor of shape [N, 4], represneting anchor boxes of all levels
      concatenated together.
    """
    boxes_all = []
    for level in range(self.min_level, self.max_level + 1):
      boxes_l = []
      for scale in range(self.num_scales):
        for aspect_ratio in self.aspect_ratios:
          stride = 2 ** level
          intermidate_scale = 2 ** (scale / float(self.num_scales))
          base_anchor_size = self.anchor_size * stride * intermidate_scale
          aspect_x = aspect_ratio ** 0.5
          aspect_y = aspect_ratio ** -0.5
          half_anchor_size_x = base_anchor_size * aspect_x / 2.0
          half_anchor_size_y = base_anchor_size * aspect_y / 2.0
          x = tf.range(stride / 2, self.image_size[1], stride)
          y = tf.range(stride / 2, self.image_size[0], stride)
          xv, yv = tf.meshgrid(x, y)
          xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32)
          yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32)
          # Tensor shape Nx4.
          boxes = tf.stack([yv - half_anchor_size_y, xv - half_anchor_size_x,
                            yv + half_anchor_size_y, xv + half_anchor_size_x],
                           axis=1)
          boxes_l.append(boxes)
      # Concat anchors on the same level to tensor shape NxAx4.
      boxes_l = tf.stack(boxes_l, axis=1)
      boxes_l = tf.reshape(boxes_l, [-1, 4])
      boxes_all.append(boxes_l)
    return tf.concat(boxes_all, axis=0)
Beispiel #4
0
def binary_image_to_points(features, normalize_coords=True, keys=("image", )):
    """Converts a (binary) image into a 2D point cloud.

  Args:
    features: Dictionary of data features to preprocess.
    normalize_coords: Normalize coords to be in [0,1] by preserving the aspect
                      ratio.
    keys: On which keys to apply this function.

  Returns:
    Features with the image as a point cloud.
  """

    for key in keys:
        image = features[key]  # [HxW] or [HxWx1]
        image = tf.reshape(image, [image.shape[0], image.shape[1], 1])
        # We map background pixels to the origin, which may be suboptimal
        # but saves us some engineering work.
        coords = tf.cast(
            tf.stack(tf.meshgrid(tf.range(image.shape[0]),
                                 tf.range(image.shape[1]),
                                 indexing="ij"),
                     axis=-1), tf.float32)
        if normalize_coords:
            coords /= tf.cast(tf.reduce_max(image.shape[:2]), tf.float32)
        mask = tf.tile(image > 0, [1, 1, 2])
        features[key] = tf.reshape(tf.cast(mask, tf.float32) * coords, [-1, 2])
    return features
Beispiel #5
0
    def _generate_boxes(self):
        """Generates multiscale anchor boxes.

    Returns:
      a Tensor of shape [N, 4], represneting anchor boxes of all levels
      concatenated together.
    """
        boxes_all = []
        for level in range(self.min_level, self.max_level + 1):
            boxes_l = []
            for anchor_size in anchors[self.anchor_masks[level - 2]]:
                stride = 2**level
                half_anchor_size_x = anchor_size / 2.0
                half_anchor_size_y = anchor_size / 2.0
                x = tf.range(stride / 2, self.image_size[1], stride)
                y = tf.range(stride / 2, self.image_size[0], stride)
                xv, yv = tf.meshgrid(x, y)
                xv = tf.cast(tf.reshape(xv, [-1]), dtype=tf.float32)
                yv = tf.cast(tf.reshape(yv, [-1]), dtype=tf.float32)
                # Tensor shape Nx4.
                boxes = tf.stack([
                    yv - half_anchor_size_y, xv - half_anchor_size_x,
                    yv + half_anchor_size_y, xv + half_anchor_size_x
                ],
                                 axis=1)
                boxes_l.append(boxes)

            # Concat anchors on the same level to tensor shape NxAx4.
            boxes_l = tf.stack(boxes_l, axis=1)
            boxes_l = tf.reshape(boxes_l, [-1, 4])
            boxes_all.append(boxes_l)
        return tf.concat(boxes_all, axis=0)
Beispiel #6
0
    def basis(sample_paths, time_index):
        """Computes polynomial basis expansion at the given sample points.

    Args:
      sample_paths: A `Tensor` of either `flaot32` or `float64` dtype and of
        shape `[num_samples, num_times, dim]`.
      time_index: An integer scalar `Tensor` that corresponds to the time
        coordinate at which the basis function is computed.

    Returns:
      A `Tensor`s of shape `[(degree + 1)**dim, num_samples]`.
    """
        sample_paths = tf.convert_to_tensor(sample_paths, name="sample_paths")
        shape = sample_paths.shape.as_list()
        num_samples = shape[0]
        dim = shape[-1]
        slice_samples = tf.slice(sample_paths, [0, time_index, 0],
                                 [num_samples, 1, dim])
        slice_samples = tf.squeeze(slice_samples, 1)
        dim = sample_paths.shape.as_list()[-1]
        samples_centered = slice_samples - tf.math.reduce_mean(slice_samples,
                                                               axis=0)
        samples_centered = tf.expand_dims(samples_centered, axis=-2)
        grid = tf.range(degree + 1, dtype=samples_centered.dtype)
        # Creates a grid of 'power' expansions, i.e., a `Tensor` of shape
        # [(degree + 1)**dim, dim] with entries [k_1, .., k_dim] where
        ## 0 <= k_i <= dim.
        grid = tf.meshgrid(*(dim * [grid]))
        # Shape [(degree + 1)**3, dim]
        grid = tf.reshape(tf.stack(grid, -1), [-1, dim])
        # `samples_centered` has shape [num_samples, 1, dim],
        # `samples_centered**grid` has shape `[num_samples, (degree + 1)**dim, dim]`
        # so that the output shape is [num_samples, (degree + 1)**dim]
        basis_expansion = tf.reduce_prod(samples_centered**grid, -1)
        return tf.transpose(basis_expansion)
Beispiel #7
0
    def plot_prior_latent(self, intervals, figsize=None, **kwargs):
        if len(intervals) != self.ndim_latent or self.ndim_latent not in (1,
                                                                          2):
            raise ValueError(
                "This method is only defined for 1D or 2D models.")

        y = [
            tf.linspace(float(i[0]), float(i[1]), int(i[2])) for i in intervals
        ]
        y = tf.meshgrid(*y, indexing="ij")
        y = tf.stack(y, axis=-1)
        prob = tfpd.Independent(self.prior(**kwargs), 1).prob(y)

        if self.ndim_latent == 1:
            plt.figure(figsize=figsize or (16, 6))
            plt.plot(y, prob)
            plt.xlabel("latent space")
            plt.ylabel("prior")
            plt.grid(True)
            plt.xticks(np.arange(*np.ceil(intervals[0][:2])))
        else:
            plt.figure(figsize=figsize or (16, 14))
            plt.imshow(prob,
                       vmin=0,
                       vmax=np.max(prob),
                       origin="lower",
                       extent=(y[0, 0, 1], y[0, -1, 1], y[0, 0, 0], y[-1, 0,
                                                                      0]))
            plt.axis("image")
            plt.grid(False)
            plt.xlim(y[0, 0, 1], y[0, -1, 1])
            plt.ylim(y[0, 0, 0], y[-1, 0, 0])
            plt.xlabel("latent dimension 1")
            plt.ylabel("latent dimension 2")
Beispiel #8
0
def _cartesian_product(*supports):
    """Construct "cartesian product" of tensors.

  Args:
    *supports: a sequence of tensors `s1, ..., sn`.

  Returns:
    This function computes a tensor analogous to the cartesian
    product of sets.
    If `t = _cartesian_product(s1, ..., sn)` then
    `t[i1, ..., in] = s1[i1] s2[i2] ... sn[in]`
    where the elements on the right hand side are concatenated
    together.

    In particular, if `s1, ..., sn` are the supports of `n`
    distributions, the cartesian product represents the support of the
    product distribution.

    For example if `a = [0, 1]`, b = [10, 20]` and
    `c = _cartesian_product(a, b)` then
    `c = [[[0, 10], [0, 20]], [[1, 10], [1, 20]]]`.
    In this case note (for example) that
    `a[0] = 0`, `b[1] = 20` and so `c[0, 1] = [0, 20]`.
  """

    return tf.stack(tf.meshgrid(*supports, indexing='ij'), axis=-1)
Beispiel #9
0
def _coord_grid_to_mesh_grid(coord_grid):
    if len(coord_grid) == 1:
        return tf.expand_dims(coord_grid[0], 0)
    x_meshgrid = tf.stack(values=tf.meshgrid(*coord_grid, indexing='ij'),
                          axis=-1)
    perm = [len(coord_grid)] + list(range(len(coord_grid)))
    return tf.transpose(x_meshgrid, perm=perm)
Beispiel #10
0
    def create_hmm(self, num_steps):
        """Same as the original CREPE viterbi decdoding, but in TF."""
        # Initial distribution is uniform.
        initial_distribution = tfp.distributions.Categorical(
            probs=tf.ones([360]) / 360)

        # Transition probabilities inducing continuous pitch.
        bins = tf.range(360, dtype=tf.float32)
        xx, yy = tf.meshgrid(bins, bins)
        min_transition = 1e-5  # For training stabiity.
        transition = tf.maximum(12 - abs(xx - yy), min_transition)
        transition = transition / tf.reduce_sum(transition, axis=1)[:, None]
        transition = tf.cast(transition, tf.float32)
        transition_distribution = tfp.distributions.Categorical(
            probs=transition)

        # Emission probability = fixed probability for self, evenly distribute the
        # others.
        self_emission = 0.1
        emission = (tf.eye(360) * self_emission + tf.ones(shape=(360, 360)) *
                    ((1 - self_emission) / 360.))
        emission = tf.cast(emission, tf.float32)[None, ...]
        observation_distribution = tfp.distributions.Multinomial(
            total_count=1, probs=emission)

        return tfp.distributions.HiddenMarkovModel(
            initial_distribution=initial_distribution,
            transition_distribution=transition_distribution,
            observation_distribution=observation_distribution,
            num_steps=num_steps,
        )
Beispiel #11
0
def _two_d_integration(grid, value_grid):
    """Perform 2-D integration numerically."""
    log_spot_grid, variance_grid = tf.meshgrid(*grid)
    delta_v = variance_grid[1:, :] - variance_grid[:-1, :]
    delta_s = log_spot_grid[:, 1:] - log_spot_grid[:, :-1]
    integral = tf.math.reduce_sum(value_grid[0, :-1, :] * delta_v, axis=0)
    integral = tf.math.reduce_sum(integral[:-1] * delta_s[0, :])
    return integral
Beispiel #12
0
    def call(self, inputs):
        value, index = inputs
        if self.cache.shape == inputs[0].shape:
            self.cache = value
            return value

        shape = self.cache.shape.as_list()
        num_index_axes = index.shape[0]
        num_batch_axes = self.num_batch_axes
        num_feature_axes = len(shape) - num_index_axes - num_batch_axes
        features_shape = shape[num_batch_axes + num_index_axes:]
        batch_shape = shape[:num_batch_axes]

        value_index_shape = tf.shape(value)[num_batch_axes:-num_feature_axes]
        if tf.reduce_max(value_index_shape) > 1:
            # This is a block update starting at index.
            value_ranges = []
            for i, s in enumerate(tf.unstack(value_index_shape)):
                curr_range = tf.range(index[i], index[i] + s)
                value_ranges.append(curr_range)

            batch_ranges = [tf.range(s) for s in batch_shape]

            mesh = tf.meshgrid(*(batch_ranges + value_ranges), indexing='ij')
            indices = tf.stack(mesh, axis=-1)
            indices = tf.reshape(indices,
                                 [-1, num_index_axes + num_batch_axes])
        else:
            # This is a single update at index position.
            batch_ranges = [tf.range(s) for s in batch_shape]
            mesh = tf.meshgrid(*batch_ranges, indexing='ij')
            batch_indices = tf.stack(mesh, axis=-1)
            batch_indices = tf.reshape(batch_indices, [-1, num_batch_axes])

            # Add leading axes to nd-index and tile to get batched indices.
            shape_indices = tf.reshape(index, [1] * num_batch_axes + [-1])
            shape_indices = tf.tile(shape_indices, batch_shape + [1])
            shape_indices = tf.reshape(shape_indices, [-1, num_index_axes])

            indices = tf.concat([batch_indices, shape_indices], axis=-1)

        # We need to squeeze nd-axes from value before updating.
        value = tf.reshape(value, [-1] + features_shape)
        self.cache = tf.tensor_scatter_nd_update(self.cache, indices, value)
        return self.cache
 def second_order_coeff_fn(t, location_grid):
     del t
     z, y, x = tf.meshgrid(*location_grid, indexing='ij')
     u_zz = 1
     u_xx = 1
     u_yy = 1
     u_xy = tf.math.sin(x) * tf.math.cos(y) / 2
     u_yz = tf.math.cos(y) * tf.math.cos(z) / 2
     return [[u_zz, u_yz, None], [u_yz, u_yy, u_xy], [None, u_xy, u_xx]]
Beispiel #14
0
    def test_3d_vector_valued_function_and_fill_value(self):
        x_ref_min = np.array([1.0, 0.0, -1.2], dtype=np.float32)
        x_ref_max = np.array([2.3, 3.0, 1.0], dtype=np.float32)
        ny = [200, 210, 211]

        # Build y_ref.
        x0s, x1s, x2s = tf.meshgrid(tf.linspace(x_ref_min[0], x_ref_max[0],
                                                ny[0]),
                                    tf.linspace(x_ref_min[1], x_ref_max[1],
                                                ny[1]),
                                    tf.linspace(x_ref_min[2], x_ref_max[2],
                                                ny[2]),
                                    indexing='ij')

        def func(x0, x1, x2):
            # Shape [..., 2] output.
            return tf.stack([tf.sin(x0 * x1 * x2),
                             tf.cos(x0 * x1 * x2)],
                            axis=-1)

        # Shape ny + [2]
        y_ref = self.evaluate(func(x0s, x1s, x2s))

        seed = test_util.test_seed_stream()
        # Shape [10, 3]
        x = tf.stack([
            tf.random.uniform(shape=(10, ),
                              minval=x_ref_min[0],
                              maxval=x_ref_max[0],
                              seed=seed()),
            tf.random.uniform(shape=(10, ),
                              minval=x_ref_min[1],
                              maxval=x_ref_max[1],
                              seed=seed()),
            tf.random.uniform(shape=(10, ),
                              minval=x_ref_min[2],
                              maxval=x_ref_max[2],
                              seed=seed()),
        ],
                     axis=-1)

        x = np.array(self.evaluate(x))
        x[0, 0] = -3  # Outside the grid, so `fill_value` will be imputed.

        expected_y = np.array(self.evaluate(func(x[:, 0], x[:, 1], x[:, 2])))
        fill_value = -42
        expected_y[0, :] = fill_value

        actual_y = tfp.math.batch_interp_regular_nd_grid(x=x,
                                                         x_ref_min=x_ref_min,
                                                         x_ref_max=x_ref_max,
                                                         y_ref=y_ref,
                                                         axis=-4,
                                                         fill_value=fill_value)

        self.assertAllClose(expected_y, self.evaluate(actual_y), atol=0.02)
Beispiel #15
0
 def call(self, x, training=None):
     x = self._conv2(x)
     x = self._conv3(x)
     x = self._conv4(x)
     posx, posy = tf.meshgrid(tf.linspace(-1., 1., num=16),
                              tf.linspace(-1., 1., num=16))
     stacked_pos = tf.expand_dims(tf.stack([posx, posy], axis=-1), axis=0)
     stacked_pos = tf.tile(stacked_pos, [tf.shape(x)[0], 1, 1, 1])
     x = tf.concat([x, stacked_pos], axis=-1)
     x = self._conv5(x)
     return self._reshape(x)
 def test_multidim_with_x(self):
     y = tf.ones((4, 5), dtype=tf.float64)
     v0 = tf.cast(tf.linspace(0., 1., 4), tf.float64)
     v1 = tf.cast(tf.linspace(0., 4.2, 5), tf.float64)
     x0, x1 = tf.meshgrid(v0, v1, indexing='ij')
     integral_0 = self.evaluate(tfp_math.trapz(y, x0, axis=0))
     integral_1 = self.evaluate(tfp_math.trapz(y, x1, axis=1))
     self.assertTupleEqual(integral_0.shape, (5, ))
     self.assertTupleEqual(integral_1.shape, (4, ))
     self.assertAllClose(integral_0, np.ones((5, )) * 1.0)
     self.assertAllClose(integral_1, np.ones((4, )) * 4.2)
Beispiel #17
0
    def _initial_value():
        """Computes initial value as a delta function delta(log_spot(t), var(0))."""
        log_spot, variance = tf.meshgrid(*grid)

        init_value = tf.where(
            tf.math.logical_and(
                tf.math.abs(log_spot - scaled_initial_point[0]) <
                delta_x + pde_grid_tol,
                tf.math.abs(variance - scaled_initial_point[1]) <
                delta_y + pde_grid_tol), 1.0 / (delta_x * delta_y * 4), 0.0)
        # initial_value.shape = (1, num_grid_x, num_grid_y)
        return tf.expand_dims(init_value, axis=0)
    def test_compare_monte_carlo_to_backward_pde(self):
        dtype = tf.float64
        kappa = 0.3
        theta = 0.05
        epsilon = 0.02
        rho = 0.1
        maturity_time = 1.0
        initial_log_spot = 3.0
        initial_vol = 0.05
        strike = 15
        discounting = 0.5

        heston = HestonModel(kappa=kappa,
                             theta=theta,
                             epsilon=epsilon,
                             rho=rho,
                             dtype=dtype)
        initial_state = np.array([initial_log_spot, initial_vol])
        samples = heston.sample_paths(
            times=[maturity_time / 2, maturity_time],
            initial_state=initial_state,
            time_step=0.01,
            num_samples=1000,
            random_type=tff.math.random.RandomType.PSEUDO_ANTITHETIC,
            seed=42)
        self.assertEqual(samples.shape, [1000, 2, 2])
        log_spots = samples[:, -1, 0]
        monte_carlo_price = (
            tf.constant(np.exp(-discounting * maturity_time), dtype=dtype) *
            tf.math.reduce_mean(tf.nn.relu(tf.math.exp(log_spots) - strike)))

        s_min, s_max = 2, 4
        v_min, v_max = 0.03, 0.07
        grid_size_s, grid_size_v = 101, 101
        time_step = 0.01

        grid = grids.uniform_grid(minimums=[s_min, v_min],
                                  maximums=[s_max, v_max],
                                  sizes=[grid_size_s, grid_size_v],
                                  dtype=dtype)

        s_mesh, _ = tf.meshgrid(grid[0], grid[1], indexing="ij")
        final_value_grid = tf.nn.relu(tf.math.exp(s_mesh) - strike)
        value_grid = heston.fd_solver_backward(
            start_time=1.0,
            end_time=0.0,
            coord_grid=grid,
            values_grid=final_value_grid,
            time_step=time_step,
            discounting=lambda *args: discounting)[0]
        pde_price = value_grid[int(grid_size_s / 2), int(grid_size_v / 2)]

        self.assertAllClose(monte_carlo_price, pde_price, atol=0.1, rtol=0.1)
    def testReferenceEquation_WithTransformationYieldingMixedTerm(self):
        """Tests an equation with mixed terms against exact solution.

    Take the reference equation `v_{t} = v_{xx} + v_{yy}` and substitute
    `v(x, y, t) = u(x, 2y - x, t)`. This yields
    `u_{t} = u_{xx} + 5u_{zz} - 2u_{xz}`, where `z = 2y - x`.
    Having `u(x, z, t) = v(x, (x+z)/2, t)` where `v(x, y, t)` is the known
    solution of the reference equation, we derive the boundary conditions
    and the expected solution for `u(x, y, t)`.
    """
        grid = grids.uniform_grid(minimums=[0, 0],
                                  maximums=[1, 1],
                                  sizes=[201, 251],
                                  dtype=tf.float32)

        final_t = 0.1
        time_step = 0.002

        def second_order_coeff_fn(t, coord_grid):
            del t, coord_grid
            return [[-5, 1], [None, -1]]

        @dirichlet
        def boundary_lower_z(t, coord_grid):
            x = coord_grid[1]
            return _reference_pde_solution(x, t) * _reference_pde_solution(
                x / 2, t)

        @dirichlet
        def boundary_upper_z(t, coord_grid):
            x = coord_grid[1]
            return _reference_pde_solution(x, t) * _reference_pde_solution(
                (x + 1) / 2, t)

        z_mesh, x_mesh = tf.meshgrid(grid[0], grid[1], indexing='ij')
        initial = (_reference_pde_initial_cond(x_mesh) *
                   _reference_pde_initial_cond((x_mesh + z_mesh) / 2))
        expected = (_reference_pde_solution(x_mesh, final_t) *
                    _reference_pde_solution((x_mesh + z_mesh) / 2, final_t))

        actual = fd_solvers.solve_forward(
            start_time=0,
            end_time=final_t,
            coord_grid=grid,
            values_grid=initial,
            time_step=time_step,
            second_order_coeff_fn=second_order_coeff_fn,
            boundary_conditions=[(boundary_lower_z, boundary_upper_z),
                                 (_zero_boundary, _zero_boundary)])[0]

        self.assertAllClose(expected, actual, atol=1e-3, rtol=1e-3)
 def testLogitNormalMeanAndVariance(self):
     locs, scales = tf.meshgrid(tf.linspace(-10.0, 10.0, 10),
                                tf.exp(tf.linspace(-3.0, 3.0, 10)))
     dist = tfd.LogitNormal(loc=locs,
                            scale=scales,
                            validate_args=True,
                            gauss_hermite_scale_limit=1.,
                            num_probit_terms_approx=6)
     means = dist.mean_approx()
     trap_means = logit_normal_mean_trapezoid(locs, scales)
     self.assertAllClose(trap_means, means, rtol=1e-4)
     variances = dist.variance_approx()
     trap_variances = logit_normal_variance_trapezoid(locs, scales)
     self.assertAllClose(trap_variances, variances, rtol=1e-4)
Beispiel #21
0
    def test_2d_vector_valued_function_with_batch_dims(self):
        # No batch dims, will broadcast.
        x_ref_min = np.array([0., 0.], dtype=np.float32)

        # No batch dims, will broadcast.
        x_ref_max = np.array([1., 1.], dtype=np.float32)
        ny = [200, 210]

        # Build y_ref.

        # First step is to build up two batches of x0 and x1.
        x0s, x1s = tf.meshgrid(tf.linspace(x_ref_min[0], x_ref_max[0], ny[0]),
                               tf.linspace(x_ref_min[1], x_ref_max[1], ny[1]),
                               indexing='ij')
        x0s = tf.stack([x0s, x0s], axis=0)
        x1s = tf.stack([x1s, x1s], axis=0)

        def func(batch_of_x0, batch_of_x1):
            """Function that does something different for batch 0 and batch 1."""
            # batch_0_result.shape = [..., 2].
            x0, x1 = batch_of_x0[0, ...], batch_of_x1[0, ...]
            batch_0_result = tf.stack(
                [tf.sin(x0 * x1), tf.cos(x0 * x1)], axis=-1)

            x0, x1 = batch_of_x0[1, ...], batch_of_x1[1, ...]
            batch_1_result = tf.stack(
                [tf.sin(2 * x0), tf.cos(2 * x1)], axis=-1)

            return tf.stack([batch_0_result, batch_1_result], axis=0)

        # Shape [2] + ny + [2]
        y_ref = self.evaluate(func(x0s, x1s))

        # Shape [2, 10, 2].  The batch shape is [2], the [10] is the number of
        # interpolants per batch.
        x = tf.random.uniform(shape=[2, 10, 2], seed=test_util.test_seed())

        x = self.evaluate(x)

        expected_y = func(x[..., 0], x[..., 1])
        actual_y = tfp.math.batch_interp_regular_nd_grid(x=x,
                                                         x_ref_min=x_ref_min,
                                                         x_ref_max=x_ref_max,
                                                         y_ref=y_ref,
                                                         axis=-3)

        self.assertAllClose(self.evaluate(expected_y),
                            self.evaluate(actual_y),
                            atol=0.02)
Beispiel #22
0
def _conditional_expected_variance_from_pde_solution(grid, value_grid, dtype):
    """Computes E[variance|log_spot=k]."""
    # value_grid.shape = [1, num_x, num_y]
    log_spot_grid, variance_grid = tf.meshgrid(*grid)
    delta_s = variance_grid[1:, :] - variance_grid[:-1, :]
    # Calculate I(0)
    integral_0 = tf.math.reduce_sum(value_grid[0, :-1, :] * delta_s, axis=0)
    # Calculate I(1)
    integral_1 = tf.math.reduce_sum(variance_grid[:-1, :] *
                                    value_grid[0, :-1, :] * delta_s,
                                    axis=0)
    variance_given_logspot = tf.math.divide_no_nan(integral_1, integral_0)
    return functools.partial(linear.interpolate,
                             x_data=log_spot_grid[0, :],
                             y_data=variance_given_logspot,
                             dtype=dtype)
Beispiel #23
0
def meshgrid(*xi, **kwargs):
    """This currently requires copy=True and sparse=False."""
    sparse = kwargs.get('sparse', False)
    if sparse:
        raise ValueError('tf.numpy doesnt support returning sparse arrays yet')

    copy = kwargs.get('copy', True)
    if not copy:
        raise ValueError('tf.numpy only supports copy=True')

    indexing = kwargs.get('indexing', 'xy')

    xi = [array_ops.asarray(arg).data for arg in xi]
    kwargs = {'indexing': indexing}

    outputs = tf.meshgrid(*xi, **kwargs)
    outputs = [utils.tensor_to_ndarray(output) for output in outputs]

    return outputs
Beispiel #24
0
 def __getitem__(self, slices):
   if not isinstance(slices, tuple):
     slices = [slices]
   else:
     slices = list(slices)
   if Ellipsis in slices:
     idx = slices.index(Ellipsis)
     slices[idx:idx+1] = [slice(None)] * (self.rank - len(slices) + 1)
   # Remove trailing full slices for performance.
   while (slices
          and isinstance(slices[-1], slice)
          and slices[-1] == slice(None)):
     slices.pop()
   grid = tf.meshgrid(*(rng[sl] for rng, sl in zip(self.ranges, slices)),
                      indexing='ij')
   stack = tf.stack(grid, axis=-1)
   to_squeeze = [i for i, sl in enumerate(slices) if not isinstance(sl, slice)]
   if to_squeeze:
     stack = tf.squeeze(stack, axis=to_squeeze)
   return stack
Beispiel #25
0
    def basis(sample_paths):
        """Computes polynomial basis expansion at the given sample points.

    Args:
      sample_paths: A `Tensor`s of either `flot32` or `float64` dtype and of
        shape `[num_samples, dim]` where `dim` has to be statically known.

    Returns:
      A `Tensor`s of shape `[degree * dim, num_samples]`.
    """
        samples = tf.convert_to_tensor(sample_paths)
        dim = samples.shape.as_list()[-1]
        grid = tf.range(0, degree + 1, dtype=samples.dtype)

        samples_centered = samples - tf.math.reduce_mean(samples, axis=0)
        samples_centered = tf.expand_dims(samples_centered, -2)
        grid = tf.meshgrid(*(dim * [grid]))
        grid = tf.reshape(tf.stack(grid, -1), [-1, dim])
        # Shape [num_samples, degree * dim]
        basis_expansion = tf.reduce_prod(samples_centered**grid, -1)
        return tf.transpose(basis_expansion)
    def test_2d_vector_valued_function(self):
        x_ref_min = [1., 0.]
        x_ref_max = [2.3, 1.]
        ny = [200, 210]

        # Build y_ref.
        x0s, x1s = tf.meshgrid(tf.linspace(x_ref_min[0], x_ref_max[0], ny[0]),
                               tf.linspace(x_ref_min[1], x_ref_max[1], ny[1]),
                               indexing='ij')

        def func(x0, x1):
            # Shape [..., 2] output.
            return tf.stack([tf.sin(x0 * x1), tf.cos(x0 * x1)], axis=-1)

        # Shape ny + [2]
        y_ref = self.evaluate(func(x0s, x1s))

        # Shape [10, 2]
        x = tf.stack([
            tf.random.uniform(
                shape=(10, ), minval=x_ref_min[0], maxval=x_ref_max[0],
                seed=0),
            tf.random.uniform(
                shape=(10, ), minval=x_ref_min[1], maxval=x_ref_max[1],
                seed=1),
        ],
                     axis=-1)

        x = self.evaluate(x)

        expected_y = func(x[:, 0], x[:, 1])
        actual_y = tfp.math.batch_interp_regular_nd_grid(x=x,
                                                         x_ref_min=x_ref_min,
                                                         x_ref_max=x_ref_max,
                                                         y_ref=y_ref,
                                                         axis=-3)

        self.assertAllClose(self.evaluate(expected_y),
                            self.evaluate(actual_y),
                            atol=0.02)
Beispiel #27
0
    def basis(sample_paths: types.RealTensor,
              time_index: types.IntTensor) -> types.RealTensor:
        """Computes polynomial basis expansion at the given sample points.

    Args:
      sample_paths: A `Tensor` of either `flaot32` or `float64` dtype and of
        either shape `[num_samples, num_times, dim]` or
        `[batch_size, num_samples, num_times, dim]`.
      time_index: An integer scalar `Tensor` that corresponds to the time
        coordinate at which the basis function is computed.

    Returns:
      A `Tensor`s of shape `[batch_size, (degree + 1)**dim, num_samples]`.
    """
        sample_paths = tf.convert_to_tensor(sample_paths, name="sample_paths")
        if sample_paths.shape.rank == 3:
            sample_paths = tf.expand_dims(sample_paths, axis=0)
        shape = tf.shape(sample_paths)
        num_samples = shape[1]
        batch_size = shape[0]
        dim = sample_paths.shape[-1]  # Dimension should statically known
        # Shape [batch_size, num_samples, 1, dim]
        slice_samples = tf.slice(sample_paths, [0, 0, time_index, 0],
                                 [batch_size, num_samples, 1, dim])
        # Shape [batch_size, num_samples, 1, dim]
        samples_centered = slice_samples - tf.math.reduce_mean(
            slice_samples, axis=1, keepdims=True)
        grid = tf.range(degree + 1, dtype=samples_centered.dtype)
        # Creates a grid of 'power' expansions, i.e., a `Tensor` of shape
        # [(degree + 1)**dim, dim] with entries [k_1, .., k_dim] where
        ## 0 <= k_i <= dim.
        grid = tf.meshgrid(*(dim * [grid]))
        # Shape [(degree + 1)**3, dim]
        grid = tf.reshape(tf.stack(grid, -1), [-1, dim])
        # `samples_centered` has shape [batch_size, num_samples, 1, dim],
        # `samples_centered**grid` has shape
        # `[batch_size, num_samples, (degree + 1)**dim, dim]`
        # so that the output shape is `[batch_size, num_samples, (degree + 1)**dim]`
        basis_expansion = tf.reduce_prod(samples_centered**grid, axis=-1)
        return tf.transpose(basis_expansion, [0, 2, 1])
    def testLogProb(self):
        # Test that numerically integrating over some portion of the domain yields a
        # normalization constant of close to 1.
        # pyformat: disable
        scale = tf.linalg.LinearOperatorFullMatrix(
            self._input([[1., -0.5], [-0.5, 1.]]))
        # pyformat: enable
        dist = tfd.MultivariateStudentTLinearOperator(loc=self._input([1.,
                                                                       1.]),
                                                      df=self._input(5.),
                                                      scale=scale)

        spacings = tf.cast(tf.linspace(-20., 20., 100), self.dtype)
        x, y = tf.meshgrid(spacings, spacings)
        points = tf.concat([x[..., tf.newaxis], y[..., tf.newaxis]], -1)
        log_probs = dist.log_prob(points)
        normalization = tf.exp(
            tf.reduce_logsumexp(log_probs)) * (spacings[1] - spacings[0])**2
        self.assertAllClose(1., self.evaluate(normalization), atol=1e-3)

        mode_log_prob = dist.log_prob(dist.mode())
        self.assertTrue(np.all(self.evaluate(mode_log_prob >= log_probs)))
 def testLogitNormalVarianceGH(self):
     locs, scales = tf.meshgrid(tf.linspace(-10.0, 10.0, 10),
                                tf.exp(tf.linspace(-3.0, 0.0, 10)))
     ghs = ln_lib.logit_normal_variance_gh(locs, scales, deg=50)
     traps = logit_normal_variance_trapezoid(locs, scales)
     self.assertAllClose(traps, ghs, rtol=1e-4)
Beispiel #30
0
    def call(self, net, training):
        keep_prob = self.keep_prob
        dropblock_size = self.dropblock_size
        data_format = self.data_format
        if not training or keep_prob is None:
            return net

        tf.logging.info(
            'Applying DropBlock: dropblock_size {}, net.shape {}'.format(
                dropblock_size, net.shape))

        if data_format == 'channels_last':
            _, width, height, _ = net.get_shape().as_list()
        else:
            _, _, width, height = net.get_shape().as_list()
        if width != height:
            raise ValueError(
                'Input tensor with width!=height is not supported.')

        dropblock_size = min(dropblock_size, width)
        # seed_drop_rate is the gamma parameter of DropBlcok.
        seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (
            width - dropblock_size + 1)**2

        # Forces the block to be inside the feature map.
        w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width))
        valid_block_center = tf.logical_and(
            tf.logical_and(w_i >= int(dropblock_size // 2),
                           w_i < width - (dropblock_size - 1) // 2),
            tf.logical_and(h_i >= int(dropblock_size // 2),
                           h_i < width - (dropblock_size - 1) // 2))

        valid_block_center = tf.expand_dims(valid_block_center, 0)
        valid_block_center = tf.expand_dims(
            valid_block_center, -1 if data_format == 'channels_last' else 0)

        randnoise = tf.random_uniform(net.shape, dtype=tf.float32)
        block_pattern = (
            1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast(
                (1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1
        block_pattern = tf.cast(block_pattern, dtype=tf.float32)

        if dropblock_size == width:
            block_pattern = tf.reduce_min(
                block_pattern,
                axis=[1, 2] if data_format == 'channels_last' else [2, 3],
                keepdims=True)
        else:
            if data_format == 'channels_last':
                ksize = [1, dropblock_size, dropblock_size, 1]
            else:
                ksize = [1, 1, dropblock_size, dropblock_size]
            block_pattern = -tf.nn.max_pool(-block_pattern,
                                            ksize=ksize,
                                            strides=[1, 1, 1, 1],
                                            padding='SAME',
                                            data_format='NHWC' if data_format
                                            == 'channels_last' else 'NCHW')

        percent_ones = (tf.cast(tf.reduce_sum((block_pattern)), tf.float32) /
                        tf.cast(tf.size(block_pattern), tf.float32))

        net = net / tf.cast(percent_ones, net.dtype) * tf.cast(
            block_pattern, net.dtype)
        return net