Example #1
0
  def testNanFromGradsDontPropagate(self):
    """Test that update with NaN gradients does not cause NaN in results."""
    if tf1.control_flow_v2_enabled():
      self.skipTest('b/138796859')
    if tf.executing_eagerly(): return
    def _nan_log_prob_with_nan_gradient(x):
      return np.nan * tf.reduce_sum(x)

    initial_x = tf.linspace(0.01, 5, 10)
    hmc = tfp.mcmc.HamiltonianMonteCarlo(
        target_log_prob_fn=_nan_log_prob_with_nan_gradient,
        step_size=2.,
        num_leapfrog_steps=5)
    updated_x, kernel_results = hmc.one_step(
        current_state=initial_x,
        previous_kernel_results=hmc.bootstrap_results(initial_x),
        seed=test_util.test_seed())
    initial_x_, updated_x_, log_accept_ratio_ = self.evaluate(
        [initial_x, updated_x, kernel_results.log_accept_ratio])
    acceptance_probs = np.exp(np.minimum(log_accept_ratio_, 0.))

    logging.vlog(1, 'initial_x = {}'.format(initial_x_))
    logging.vlog(1, 'updated_x = {}'.format(updated_x_))
    logging.vlog(1, 'log_accept_ratio = {}'.format(log_accept_ratio_))

    self.assertAllEqual(initial_x_, updated_x_)
    self.assertEqual(acceptance_probs, 0.)

    self.assertAllEqual([True], [
        g is None for g in tf.gradients(
            ys=kernel_results.proposed_results.grads_target_log_prob,
            xs=initial_x)
    ])
    self.assertAllFinite(
        self.evaluate(tf.gradients(ys=updated_x, xs=initial_x)[0]))
Example #2
0
 def _multi_gamma_sequence(self, a, p, name="multi_gamma_sequence"):
     """Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p]."""
     with self._name_and_control_scope(name):
         # Linspace only takes scalars, so we'll add in the offset afterwards.
         seq = tf.linspace(tf.constant(0., dtype=self.dtype), 0.5 - 0.5 * p,
                           tf.cast(p, tf.int32))
         return seq + tf.expand_dims(a, [-1])
 def trapz_sin_fn(x_min, x_max, n, expected, rtol):
     pi = tf.constant(np.pi, dtype=tf.float32)
     x = tf.cast(tf.linspace(x_min, x_max, n), tf.float32)
     y = tf.sin(pi * x)
     np.testing.assert_allclose(self.evaluate(tfp_math.trapz(y, x)),
                                expected,
                                rtol=rtol)
Example #4
0
    def plot_prior_latent(self, intervals, figsize=None, **kwargs):
        if len(intervals) != self.ndim_latent or self.ndim_latent not in (1,
                                                                          2):
            raise ValueError(
                "This method is only defined for 1D or 2D models.")

        y = [
            tf.linspace(float(i[0]), float(i[1]), int(i[2])) for i in intervals
        ]
        y = tf.meshgrid(*y, indexing="ij")
        y = tf.stack(y, axis=-1)
        prob = tfpd.Independent(self.prior(**kwargs), 1).prob(y)

        if self.ndim_latent == 1:
            plt.figure(figsize=figsize or (16, 6))
            plt.plot(y, prob)
            plt.xlabel("latent space")
            plt.ylabel("prior")
            plt.grid(True)
            plt.xticks(np.arange(*np.ceil(intervals[0][:2])))
        else:
            plt.figure(figsize=figsize or (16, 14))
            plt.imshow(prob,
                       vmin=0,
                       vmax=np.max(prob),
                       origin="lower",
                       extent=(y[0, 0, 1], y[0, -1, 1], y[0, 0, 0], y[-1, 0,
                                                                      0]))
            plt.axis("image")
            plt.grid(False)
            plt.xlim(y[0, 0, 1], y[0, -1, 1])
            plt.ylim(y[0, 0, 0], y[-1, 0, 0])
            plt.xlabel("latent dimension 1")
            plt.ylabel("latent dimension 2")
Example #5
0
    def test_frequencies_controls_are_bounded(self):
        depth = 10

        def freq_scale_fn(x):
            return core.frequencies_sigmoid(x,
                                            depth=depth,
                                            hz_min=0.0,
                                            hz_max=8000.0)

        synthesizer = synths.Sinusoidal(n_samples=32000,
                                        sample_rate=16000,
                                        freq_scale_fn=freq_scale_fn)
        batch_size = 3
        num_frames = 10
        n_partials = 100
        amps = tf.zeros((batch_size, num_frames, n_partials), dtype=tf.float32)
        freqs = tf.linspace(-100.0, 100.0, n_partials)
        freqs = tf.tile(freqs[tf.newaxis, tf.newaxis, :, tf.newaxis],
                        [batch_size, num_frames, 1, depth])

        controls = synthesizer.get_controls(amps, freqs)
        freqs = controls['frequencies']
        lt_nyquist = (freqs <= 8000.0)
        gt_zero = (freqs >= 0.0)
        both_conditions = np.logical_and(lt_nyquist, gt_zero)

        self.assertTrue(np.all(both_conditions))
 def testBijector(self, lower, upper):
   bijector = tfb.Reciprocal()
   self.assertStartsWith(bijector.name, 'reciprocal')
   x = tf.linspace(lower, upper, 100)
   y = 1. / x
   self.assertAllClose(self.evaluate(y), self.evaluate(bijector.forward(x)))
   self.assertAllClose(self.evaluate(x), self.evaluate(bijector.inverse(y)))
Example #7
0
  def __init__(self,
         synth,
         n_samples,
         synth_params=('f0', 'amp'),
         fft_sizes=(2048, 1024, 512, 256, 128, 64),
         loss_type='L1',
         mag_weight=1.0,
         delta_time_weight=0.0,
         delta_freq_weight=0.0,
         cumsum_freq_weight=0.0,
         loudness_weight=0.0,
         spectral_centroid_weight=0.0,
         blurred_spectral_weight=0.0,
         rate=16000,
         name='spectral_loss'):
    super().__init__()
    self.loss_type = loss_type
    self.mag_weight = mag_weight
    self.delta_time_weight = delta_time_weight
    self.delta_freq_weight = delta_freq_weight
    self.cumsum_freq_weight = cumsum_freq_weight
    self.loudness_weight = loudness_weight
    self.spectral_centroid_weight = spectral_centroid_weight
    self.blurred_spectral_weight = blurred_spectral_weight

    self.synth_params = synth_params
    self.n_samples = n_samples
    self.synth = synth
    self.spec_layer = nn.AudioToMultiSpec(n_samples=n_samples,  fft_sizes = fft_sizes)
    self.t_bins, self.f_bins, self.channels = self.spec_layer._output_shape
    self.f_quantization_bins = tf.cast(tf.linspace(0, rate//2, 100), tf.float32)

    self.optimizer = tf.keras.optimizers.Adam()
    self.dense = tfkl.Dense(self.f_bins * self.channels, activation='relu')
    self.conv1d = tfkl.DepthwiseConv2D((128, 1), strides=(1, 1), padding='same', activation='relu')
Example #8
0
    def activations_to_f0_and_confidence(cls, activations, centers=None):
        """Convert network outputs (activations) to f0 predictions."""
        cent_mapping = tf.cast(
            tf.linspace(0, 7180, 360) + 1997.3794084376191, tf.float32)

        # The confidence of voicing activity and the argmax bin.
        confidence = tf.reduce_max(activations, axis=-1, keepdims=True)
        if centers is None:
            centers = tf.math.argmax(activations, axis=-1)
        centers = tf.cast(centers, tf.int32)

        # Slice the local neighborhood around the argmax bin.
        start = centers - 4
        idx_list = tf.range(0, 10)
        idx_list = start[:, None] + idx_list[None, :]

        # Bound to [0, 359].
        idx_list = tf.where(idx_list > 0, idx_list, 0)
        idx_list = tf.where(idx_list < 359, idx_list, 359)

        # Gather and weight activations.
        weights = tf.gather(activations, idx_list, batch_dims=1)
        cents = tf.gather(cent_mapping, idx_list, batch_dims=0)
        f0_cent = tf.reduce_sum(weights * cents, axis=-1) / tf.reduce_sum(
            weights, axis=-1)
        f0_hz = 10 * 2**(f0_cent / 1200.)

        return f0_hz, confidence
Example #9
0
def trapezoid(integral, x_start, x_stop, dx):
    """
    Integrate function using trapezoid rule as Tensorflow's odeint() is much slower

    Args:
        integral (function): Function that takes 'x' as argument and returns y
        x_start (tensor): Rank-0 tensor of x value to start at
        x_stop( tensor): Rank-0 tensor of x value to stop at
        dx (tensor): Rank-0 tensor of # of trapezoids

    Return:
        tensor: Rank-0 tensor of integrated value. Has the the same dtype as integral() returns
    """
    # Generate points to integrate
    steps = tf.cast((x_stop - x_start) / dx, dtype=tf.int32)
    x = tf.linspace(x_start, x_stop, steps + 1)

    # Get tensor of y values for our x points
    y = integral(x)

    # Make tensors of the start and end y values for our trapezoids
    # So if y=[10.0, 25.0, 45.0, 20.0]
    y_start = y[:-1]  # ... [10.0, 25.0, 45.0]
    y_stop = y[1:]  # ... [25.0, 45.0, 20.0]

    # Make a tensor of our trapezoid areas
    y_combined = tf.math.add(y_start, y_stop)
    trapezoids = (y_combined / tf.constant(
        2.0, dtype=y_combined.dtype)) * tf.cast(dx, dtype=y_combined.dtype)

    # Add the trapezoids together
    return tf.reduce_sum(trapezoids)
    def test_adapt_step_size_default(self):
        """Test that step size adaptation finds the theoretical optimal step size.

    See _caclulate_expected_step_size for formula details, but roughly, for a
    high dimensional Gaussian posterior, we can calculate the approximate step
    size to achieve a given target accept rate. For such a posterior,
    `PreconditionedHMC` mimics the dynamics of sampling from an isotropic
    standard normal distribution, and so should adapt to the step size where
    the scales are all ones.

    In the example below, `expected_step` is around 0.00002, so there is
    significantly different behavior when conditioning.
    """
        dims = 100
        target_accept = 0.75
        scale_diag = tf.linspace(1e-5, 1., dims)
        _, step_size = self._run_hmc_with_step_size(scale_diag,
                                                    target_accept,
                                                    precondition=False,
                                                    num_adaptation_steps=500,
                                                    num_results=1)

        expected_step = self._calculate_expected_step_size(
            scale_diag, target_accept)

        self.assertAllClose(step_size[-1], expected_step, atol=0.001)
 def test_scalar_valued_function_and_get_matrix_of_results(self):
   y_ref = tf.exp(tf.linspace(start=0., stop=10., num=200))
   x = [[1.1, 1.2], [2.1, 2.2]]
   y = tfp.math.interp_regular_1d_grid(
       x, x_ref_min=0., x_ref_max=10., y_ref=y_ref)
   self.assertAllEqual((2, 2), y.shape)
   self.assertAllClose(np.exp(x), self.evaluate(y), rtol=1e-3)
Example #12
0
    def test_1d_scalar_valued_function(self):
        x_ref_min = [-2.]
        x_ref_max = [1.3]
        ny = [100]

        # Build y_ref.
        x0s = tf.linspace(x_ref_min[0], x_ref_max[0], ny[0])

        def func(x0):
            return tf.sin(x0)**2

        # Shape ny
        y_ref = self.evaluate(func(x0s))

        # Shape [10, 1]
        x = tf.random.uniform(shape=(10, 1),
                              minval=x_ref_min[0],
                              maxval=x_ref_max[0],
                              seed=0)

        x = self.evaluate(x)

        expected_y = func(x[:, 0])
        actual_y = tfp.math.batch_interp_regular_nd_grid(x=x,
                                                         x_ref_min=x_ref_min,
                                                         x_ref_max=x_ref_max,
                                                         y_ref=y_ref,
                                                         axis=-1)

        self.assertAllClose(self.evaluate(expected_y),
                            self.evaluate(actual_y),
                            atol=0.02)
Example #13
0
    def test_crps_increases_with_increasing_deviation_in_mean(self):
        """Assert that the CRPS score increases when we increase the mean.
    """
        tf.random.set_seed(1)

        nspacing = 10
        npredictive_samples = 10000
        ntrue_samples = 1000

        # (nspacing,npredictive_samples) samples from N(mu_i, 1)
        predictive_samples = tf.random.normal((nspacing, npredictive_samples))
        predictive_samples += tf.expand_dims(tf.linspace(0.0, 5.0, nspacing),
                                             1)

        crps_samples = []
        for _ in range(ntrue_samples):
            labels = tf.random.normal((nspacing, ))
            crps_sample = regression.crps_score(
                labels=labels, predictive_samples=predictive_samples)
            crps_samples.append(crps_sample)

        crps_samples = tf.stack(crps_samples, 1)
        crps_average = tf.reduce_mean(crps_samples, axis=1)
        crps_average = crps_average.numpy()

        # The average should be monotonically increasing
        for i in range(1, len(crps_average)):
            crps_cur = crps_average[i]
            crps_prev = crps_average[i - 1]
            logging.info("CRPS cur %.5f, prev %.5f", crps_cur, crps_prev)
            self.assertLessEqual(crps_prev,
                                 crps_cur,
                                 msg="CRPS violates monotonicity in mean")
Example #14
0
    def testNanRejection(self):
        """Tests that an update that yields NaN potentials gets rejected.

    We run HMC with a target distribution that returns NaN
    log-likelihoods if any element of x < 0, and unit-scale
    exponential log-likelihoods otherwise. The exponential potential
    pushes x towards 0, ensuring that any reasonably large update will
    push us over the edge into NaN territory.
    """
        def _unbounded_exponential_log_prob(x):
            """An exponential distribution with log-likelihood NaN for x < 0."""
            per_element_potentials = tf.where(x < 0.,
                                              tf.constant(np.nan, x.dtype), -x)
            return tf.reduce_sum(per_element_potentials)

        initial_x = tf.linspace(0.01, 5, 10)
        hmc = tfp.mcmc.HamiltonianMonteCarlo(
            target_log_prob_fn=_unbounded_exponential_log_prob,
            step_size=2.,
            num_leapfrog_steps=5,
            seed=_set_seed(46))
        updated_x, kernel_results = hmc.one_step(
            current_state=initial_x,
            previous_kernel_results=hmc.bootstrap_results(initial_x))
        initial_x_, updated_x_, log_accept_ratio_ = self.evaluate(
            [initial_x, updated_x, kernel_results.log_accept_ratio])
        acceptance_probs = np.exp(np.minimum(log_accept_ratio_, 0.))

        tf1.logging.vlog(1, 'initial_x = {}'.format(initial_x_))
        tf1.logging.vlog(1, 'updated_x = {}'.format(updated_x_))
        tf1.logging.vlog(1, 'log_accept_ratio = {}'.format(log_accept_ratio_))

        self.assertAllEqual(initial_x_, updated_x_)
        self.assertEqual(acceptance_probs, 0.)
Example #15
0
 def _multi_gamma_sequence(self, a, p, name='multi_gamma_sequence'):
     """Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p]."""
     with tf.name_scope(name):
         # Linspace only takes scalars, so we'll add in the offset afterwards.
         seq = tf.linspace(tf.constant(0., dtype=self.dtype), 0.5 - 0.5 * p,
                           tf.cast(p, tf.int32))
         return seq + a[..., tf.newaxis]
Example #16
0
    def test_grads_at_non_sample_pts_with_yes_preserve_gradients(self):
        # Here we use linspace, *not* random samples.  Why?  Because we want the
        # quantiles to be nicely spaced all of the time...we don't want sudden jumps
        x = tf.linspace(0., 100., num=100)
        q = tf.constant(50.1234)  # Not a sample point

        sample_pct, d_sample_pct_dq = tfp.math.value_and_gradient(
            lambda q_: tfp.stats.percentile(  # pylint: disable=g-long-lambda
                x,
                q_,
                interpolation='linear',
                preserve_gradients=True),
            q)

        [
            sample_pct,
            d_sample_pct_dq,
        ] = self.evaluate([
            sample_pct,
            d_sample_pct_dq,
        ])

        # Since `x` is evenly distributed between 0 and 100, the percentiles are as
        # well.
        self.assertAllClose(50.1234, sample_pct)
        self.assertAllClose(1, d_sample_pct_dq)
Example #17
0
    def testQuantilesBroadcasting(self):
        loc = tf.constant([0.1, 0.2])[:, tf.newaxis, tf.newaxis]
        scale = tf.constant([0.9, 1., 1.1])[:, tf.newaxis]
        concentration = tf.constant([0.0, 0.4, 0.5, 0.6, 1.0])

        d = tfd.GeneralizedPareto(loc=loc,
                                  scale=scale,
                                  concentration=concentration,
                                  validate_args=True)
        p = tf.linspace(0., 1., 1000)[1:-1][:, tf.newaxis, tf.newaxis,
                                            tf.newaxis]
        q = d.quantile(p)
        self.assertAllFinite(q)

        loc_numpy = self.evaluate(loc)
        scale_numpy = self.evaluate(scale)
        conc_numpy = self.evaluate(concentration)

        q_scipys = []
        for i in range(5):
            q_scipys.append(
                sp_stats.genpareto.ppf(
                    np.linspace(0., 1., 1000)[1:-1].reshape(998, 1, 1, 1),
                    conc_numpy[i].reshape(1, 1, 1, 1),
                    loc_numpy.reshape(1, 2, 1, 1),
                    scale_numpy.reshape(1, 1, 3, 1)))
        q_scipy = np.concatenate(q_scipys, axis=-1)

        print(q.shape, q_scipy.shape)
        self.assertAllClose(q, q_scipy, rtol=1.e-5)
Example #18
0
def frequencies_softmax(freqs: tf.Tensor,
                        depth: int = 1,
                        hz_min: float = 20.0,
                        hz_max: float = 8000.0) -> tf.Tensor:
    """Softmax to logarithmically scale network outputs to frequencies.

  Args:
    freqs: Neural network outputs, [batch, time, n_sinusoids * depth] or
      [batch, time, n_sinusoids, depth].
    depth: If freqs is 3-D, the number of softmax components per a sinusoid to
      unroll from the last dimension.
    hz_min: Lowest frequency to consider.
    hz_max: Highest frequency to consider.

  Returns:
    A tensor of frequencies in hertz [batch, time, n_sinusoids].
  """
    if len(freqs.shape) == 3:
        # Add depth: [B, T, N*D] -> [B, T, N, D]
        freqs = _add_depth_axis(freqs, depth)
    else:
        depth = int(freqs.shape[-1])

    # Probs: [B, T, N, D].
    f_probs = tf.nn.softmax(freqs, axis=-1)

    # [1, 1, 1, D]
    unit_bins = tf.linspace(0.0, 1.0, depth)
    unit_bins = unit_bins[tf.newaxis, tf.newaxis, tf.newaxis, :]

    # [B, T, N]
    f_unit = tf.reduce_sum(unit_bins * f_probs, axis=-1, keepdims=False)
    return unit_to_hz(f_unit, hz_min=hz_min, hz_max=hz_max)
  def test_3d_vector_valued_function_and_fill_value(self):
    x_ref_min = np.array([1.0, 0.0, -1.2], dtype=np.float32)
    x_ref_max = np.array([2.3, 3.0, 1.0], dtype=np.float32)
    ny = [200, 210, 211]

    # Build y_ref.
    x0s, x1s, x2s = tf.meshgrid(
        tf.linspace(x_ref_min[0], x_ref_max[0], ny[0]),
        tf.linspace(x_ref_min[1], x_ref_max[1], ny[1]),
        tf.linspace(x_ref_min[2], x_ref_max[2], ny[2]),
        indexing='ij')

    def func(x0, x1, x2):
      # Shape [..., 2] output.
      return tf.stack([tf.sin(x0 * x1 * x2), tf.cos(x0 * x1 * x2)], axis=-1)

    # Shape ny + [2]
    y_ref = self.evaluate(func(x0s, x1s, x2s))

    seed = test_util.test_seed_stream()
    # Shape [10, 3]
    x = tf.stack([
        tf.random.uniform(
            shape=(10,), minval=x_ref_min[0], maxval=x_ref_max[0], seed=seed()),
        tf.random.uniform(
            shape=(10,), minval=x_ref_min[1], maxval=x_ref_max[1], seed=seed()),
        tf.random.uniform(
            shape=(10,), minval=x_ref_min[2], maxval=x_ref_max[2], seed=seed()),
    ],
                 axis=-1)

    x = onp.array(self.evaluate(x))
    x[0, 0] = -3  # Outside the grid, so `fill_value` will be imputed.

    expected_y = onp.array(self.evaluate(func(x[:, 0], x[:, 1], x[:, 2])))
    fill_value = -42
    expected_y[0, :] = fill_value

    actual_y = tfp.math.batch_interp_regular_nd_grid(
        x=x,
        x_ref_min=x_ref_min,
        x_ref_max=x_ref_max,
        y_ref=y_ref,
        axis=-4,
        fill_value=fill_value)

    self.assertAllClose(expected_y, self.evaluate(actual_y), atol=0.02)
Example #20
0
 def _get_ir(self, gain, decay):
   """Simple exponential decay of white noise."""
   gain = self._scale_fn(gain)
   decay_exponent = 2.0 + tf.exp(decay)
   time = tf.linspace(0.0, 1.0, self._reverb_length)[tf.newaxis, :]
   noise = tf.random.uniform([1, self._reverb_length], minval=-1.0, maxval=1.0)
   ir = gain * tf.exp(-decay_exponent * time) * noise
   return ir
 def test_quartiles_of_vector(self):
     x = tf.linspace(0., 1000., 10000)
     cut_points = tfp.stats.quantiles(x, num_quantiles=4)
     self.assertAllEqual((5, ), cut_points.shape)
     cut_points_ = self.evaluate(cut_points)
     self.assertAllClose([0., 250., 500., 750., 1000.],
                         cut_points_,
                         rtol=0.002)
    def test_2d_vector_valued_function_with_batch_dims(self):
        x_ref_min = [0., 0.]  # No batch dims, will broadcast.
        x_ref_max = [1., 1.]  # No batch dims, will broadcast.
        ny = [200, 210]

        # Build y_ref.

        # First step is to build up two batches of x0 and x1.
        x0s, x1s = tf.meshgrid(tf.linspace(x_ref_min[0], x_ref_max[0], ny[0]),
                               tf.linspace(x_ref_min[1], x_ref_max[1], ny[1]),
                               indexing='ij')
        x0s = tf.stack([x0s, x0s], axis=0)
        x1s = tf.stack([x1s, x1s], axis=0)

        def func(batch_of_x0, batch_of_x1):
            """Function that does something different for batch 0 and batch 1."""
            # batch_0_result.shape = [..., 2].
            x0, x1 = batch_of_x0[0, ...], batch_of_x1[0, ...]
            batch_0_result = tf.stack(
                [tf.sin(x0 * x1), tf.cos(x0 * x1)], axis=-1)

            x0, x1 = batch_of_x0[1, ...], batch_of_x1[1, ...]
            batch_1_result = tf.stack(
                [tf.sin(2 * x0), tf.cos(2 * x1)], axis=-1)

            return tf.stack([batch_0_result, batch_1_result], axis=0)

        # Shape [2] + ny + [2]
        y_ref = self.evaluate(func(x0s, x1s))

        # Shape [2, 10, 2].  The batch shape is [2], the [10] is the number of
        # interpolants per batch.
        x = tf.random.uniform(shape=[2, 10, 2], seed=0)

        x = self.evaluate(x)

        expected_y = func(x[..., 0], x[..., 1])
        actual_y = tfp.math.batch_interp_regular_nd_grid(x=x,
                                                         x_ref_min=x_ref_min,
                                                         x_ref_max=x_ref_max,
                                                         y_ref=y_ref,
                                                         axis=-3)

        self.assertAllClose(self.evaluate(expected_y),
                            self.evaluate(actual_y),
                            atol=0.02)
 def test_multidim_broadcast_1d_x(self):
     # To use trapz() with a 1d x array, first broadcast it with the shape of y
     y = tf.ones((4, 5), dtype=tf.float64)
     x1 = tf.cast(tf.linspace(0., 4.2, 5), tf.float64)
     x = x1 * tf.ones((4, 5), dtype=tf.float64)
     integral = self.evaluate(tfp_math.trapz(y, x, axis=1))
     self.assertTupleEqual(integral.shape, (4, ))
     self.assertAllClose(integral, np.ones((4, )) * 4.2)
Example #24
0
 def test_uniform_is_special_case(self):
     # With the scale parameter going to zero, the adapted distribution should
     # approach a unit-width uniform distribution. As a side effect, this tests
     # that `mean()` is defined (because not for all distributions the mean
     # coincides with the location parameter).
     dist = self.dist_cls(loc=10, scale=1e-7)
     mean = dist.mean()
     x = tf.linspace(mean - 1, mean + 1, 10)
     self.assertAllClose(dist.prob(x), [0, 0, 0, 1, 1, 1, 1, 0, 0, 0])
Example #25
0
def generate_notes(n_batch,
                   n_timesteps,
                   n_harmonics=100,
                   n_mags=65,
                   get_controls=True):
  """Generate self-supervision signal of discrete notes."""
  n_notes = uniform_int(1, 20)

  # Amplitudes.
  method = 'nearest' if flip(0.5) else 'linear'
  harm_amp = uniform_generator([n_batch, n_notes, 1], n_timesteps,
                               minval=-2, maxval=2, method=method)
  if get_controls:
    harm_amp = ddsp.core.exp_sigmoid(harm_amp)

  # Frequencies.
  note_midi = uniform_generator([n_batch, n_notes, 1], n_timesteps,
                                minval=24.0, maxval=84.0, method='nearest')
  f0_hz = ddsp.core.midi_to_hz(note_midi)

  # Harmonic Distribution
  method = 'nearest' if flip(0.5) else 'linear'
  n_lines = 10
  exponents = [uniform_float(1.0, 6.0) for _ in range(n_lines)]
  harm_dist_lines = [-tf.linspace(0.0, float(i), n_harmonics)**exponents[i]
                     for i in range(n_lines)]
  harm_dist_lines = tf.stack(harm_dist_lines)
  lines_dist = uniform_generator([n_batch, n_notes, n_lines], n_timesteps,
                                 minval=0.0, maxval=1.0, method=method)
  harm_dist = (lines_dist[..., tf.newaxis] *
               harm_dist_lines[tf.newaxis, tf.newaxis, :])
  harm_dist = tf.reduce_sum(harm_dist, axis=-2)

  if get_controls:
    harm_dist = ddsp.core.exp_sigmoid(harm_dist)
    harm_dist = ddsp.core.remove_above_nyquist(f0_hz, harm_dist)
    harm_dist = ddsp.core.safe_divide(
        harm_dist, tf.reduce_sum(harm_dist, axis=-1, keepdims=True))

  # Noise Magnitudes.
  method = 'nearest' if flip(0.5) else 'linear'
  mags = uniform_generator([n_batch, n_notes, n_mags], n_timesteps,
                           minval=-6.0, maxval=uniform_float(-4.0, 0.0),
                           method=method)
  if get_controls:
    mags = ddsp.core.exp_sigmoid(mags)

  sin_amps, sin_freqs = ddsp.core.harmonic_to_sinusoidal(
      harm_amp, harm_dist, f0_hz)

  controls = {'harm_amp': harm_amp,
              'harm_dist': harm_dist,
              'f0_hz': f0_hz,
              'sin_amps': sin_amps,
              'sin_freqs': sin_freqs,
              'noise_magnitudes': mags}
  return controls
Example #26
0
 def test_logistic_is_special_case(self, method):
     # With no hidden units, the density should collapse to a logistic
     # distribution.
     df = deep_factorized.DeepFactorized(num_filters=(), init_scale=1)
     logistic = tfp.distributions.Logistic(loc=-df._biases[0][0, 0],
                                           scale=1.)
     x = tf.linspace(-5., 5., 20)
     val_df = getattr(df, method)(x)
     val_logistic = getattr(logistic, method)(x)
     self.assertAllClose(val_df, val_logistic)
Example #27
0
 def test_logistic_is_special_case_log_cdf(self):
     # With no hidden units, the density should collapse to a logistic
     # distribution.
     df = deep_factorized.DeepFactorized(num_filters=(), init_scale=1)
     logistic = tfp.distributions.Logistic(loc=-df._biases[0][0, 0],
                                           scale=1.)
     x = tf.linspace(-5000., 5000., 1000)
     log_cdf_df = df.log_cdf(x)
     log_cdf_logistic = logistic.log_cdf(x)
     self.assertAllClose(log_cdf_df, log_cdf_logistic)
Example #28
0
 def test_logistic_is_special_case(self):
     # With no hidden units, the density should collapse to a logistic
     # distribution convolved with a standard uniform distribution.
     df = deep_factorized.NoisyDeepFactorized(num_filters=(), init_scale=1)
     logistic = tfp.distributions.Logistic(loc=-df.base._biases[0][0, 0],
                                           scale=1.)
     x = tf.linspace(-5., 5., 20)
     prob_df = df.prob(x)
     prob_log = logistic.cdf(x + .5) - logistic.cdf(x - .5)
     self.assertAllClose(prob_df, prob_log)
Example #29
0
    def test_2d_vector_valued_function(self):
        x_ref_min = np.array([1., 0.], dtype=np.float32)
        x_ref_max = np.array([2.3, 1.], dtype=np.float32)
        ny = [200, 210]

        # Build y_ref.
        x0s, x1s = tf.meshgrid(tf.linspace(x_ref_min[0], x_ref_max[0], ny[0]),
                               tf.linspace(x_ref_min[1], x_ref_max[1], ny[1]),
                               indexing='ij')

        def func(x0, x1):
            # Shape [..., 2] output.
            return tf.stack([tf.sin(x0 * x1), tf.cos(x0 * x1)], axis=-1)

        # Shape ny + [2]
        y_ref = self.evaluate(func(x0s, x1s))

        # Shape [10, 2]
        seed = test_util.test_seed_stream()
        x = tf.stack([
            tf.random.uniform(shape=(10, ),
                              minval=x_ref_min[0],
                              maxval=x_ref_max[0],
                              seed=seed()),
            tf.random.uniform(shape=(10, ),
                              minval=x_ref_min[1],
                              maxval=x_ref_max[1],
                              seed=seed()),
        ],
                     axis=-1)

        x = self.evaluate(x)

        expected_y = func(x[:, 0], x[:, 1])
        actual_y = tfp.math.batch_interp_regular_nd_grid(x=x,
                                                         x_ref_min=x_ref_min,
                                                         x_ref_max=x_ref_max,
                                                         y_ref=y_ref,
                                                         axis=-3)

        self.assertAllClose(self.evaluate(expected_y),
                            self.evaluate(actual_y),
                            atol=0.02)
Example #30
0
def assign_bboxes(
    pillar_map_size=(256, 256),
    pillar_map_range=(75.2, 75.2),
    bboxes=None,
    bboxes_mask=None,
    bboxes_label=None,
):
    """Assign bboxes to birds-eye view pillars."""
    half_size_height = pillar_map_range[0] * 2 / pillar_map_size[0] / 2
    half_size_width = pillar_map_range[1] * 2 / pillar_map_size[1] / 2
    height_range = tf.linspace(-pillar_map_range[0] + half_size_height,
                               pillar_map_range[0] - half_size_height,
                               pillar_map_size[0])
    width_range = tf.linspace(-pillar_map_range[1] + half_size_width,
                              pillar_map_range[1] - half_size_width,
                              pillar_map_size[1])
    height_range = tf.reshape(height_range, [pillar_map_size[0], 1])
    width_range = tf.reshape(width_range, [1, pillar_map_size[1]])
    height_range = tf.tile(height_range, [1, pillar_map_size[1]])
    width_range = tf.tile(width_range, [pillar_map_size[0], 1])
    z_range = tf.zeros_like(height_range)
    pillar_map_xyz = tf.stack([height_range, width_range, z_range], axis=2)
    pillar_map_xyz = tf.reshape(pillar_map_xyz, [-1, 3])
    (pillar_map_bboxes, pillar_map_bboxes_label, pillar_map_if_in_bboxes,
     pillar_map_centerness,
     pillar_map_bboxes_index) = add_points_bboxes(pillar_map_xyz,
                                                  bboxes,
                                                  bboxes_label,
                                                  bboxes_mask,
                                                  is_2d=True)
    pillar_map_xyz = tf.reshape(pillar_map_xyz,
                                [pillar_map_size[0], pillar_map_size[1], 3])
    pillar_map_bboxes = tf.reshape(pillar_map_bboxes,
                                   [pillar_map_size[0], pillar_map_size[1], 7])
    pillar_map_bboxes_label = tf.reshape(
        pillar_map_bboxes_label, [pillar_map_size[0], pillar_map_size[1]])
    pillar_map_if_in_bboxes = tf.reshape(
        pillar_map_if_in_bboxes, [pillar_map_size[0], pillar_map_size[1]])
    pillar_map_bboxes_index = tf.reshape(
        pillar_map_bboxes_index, [pillar_map_size[0], pillar_map_size[1]])
    return (pillar_map_xyz, pillar_map_bboxes, pillar_map_bboxes_label,
            pillar_map_if_in_bboxes, pillar_map_centerness,
            pillar_map_bboxes_index)