Exemplo n.º 1
0
    def testVonMisesSampleMoments(self):
        locs_v = np.array([-1., 0.3, 2.3])
        concentrations_v = np.array([1.0, 2.0, 10.0])
        von_mises = tfd.VonMises(self.make_tensor(locs_v),
                                 self.make_tensor(concentrations_v))

        n = 10000
        seed = tfp_test_util.test_seed()
        samples = von_mises.sample(n, seed=seed)

        expected_mean = von_mises.mean()
        actual_mean = tf.atan2(
            tf.reduce_mean(input_tensor=tf.sin(samples), axis=0),
            tf.reduce_mean(input_tensor=tf.cos(samples), axis=0))

        expected_variance = von_mises.variance()
        standardized_samples = samples - tf.expand_dims(von_mises.mean(), 0)
        actual_variance = 1. - tf.reduce_mean(
            input_tensor=tf.cos(standardized_samples), axis=0)

        [
            expected_mean_val, expected_variance_val, actual_mean_val,
            actual_variance_val
        ] = self.evaluate(
            [expected_mean, expected_variance, actual_mean, actual_variance])

        self.assertAllClose(expected_mean_val, actual_mean_val, rtol=0.1)
        self.assertAllClose(expected_variance_val,
                            actual_variance_val,
                            rtol=0.1)
Exemplo n.º 2
0
 def trapz_sin_fn(x_min, x_max, n, expected, rtol):
   pi = tf.constant(np.pi, dtype=tf.float32)
   s = np.linspace(0, 1, n)**1.5
   x = tf.convert_to_tensor(x_min + s * (x_max - x_min), tf.float32)
   y = tf.sin(pi * x)
   np.testing.assert_allclose(
       self.evaluate(tfp_math.trapz(y, x)), expected, rtol=rtol)
def _rotate_on_ellipse(state_parts, vectors, angle):
    new_state_parts = []
    padded_angle = _right_pad_with_ones(angle, tf.rank(state_parts[0]))
    for state, vector in zip(state_parts, vectors):
        new_state_parts.append(state * tf.cos(padded_angle) +
                               vector * tf.sin(padded_angle))
    return new_state_parts
 def trapz_sin_fn(x_min, x_max, n, expected, rtol):
     pi = tf.constant(np.pi, dtype=tf.float32)
     x = tf.cast(tf.linspace(x_min, x_max, n), tf.float32)
     y = tf.sin(pi * x)
     np.testing.assert_allclose(self.evaluate(tfp_math.trapz(y, x)),
                                expected,
                                rtol=rtol)
Exemplo n.º 5
0
def oscillator_bank(frequency_envelopes: tf.Tensor,
                    amplitude_envelopes: tf.Tensor,
                    sample_rate: int = 16000) -> tf.Tensor:
  """Generates audio from sample-wise frequencies for a bank of oscillators.

  Args:
    frequency_envelopes: Sample-wise oscillator frequencies (Hz). Shape
      [batch_size, n_samples, n_sinusoids].
    amplitude_envelopes: Sample-wise oscillator amplitude. Shape [batch_size,
      n_samples, n_sinusoids].
    sample_rate: Sample rate in samples per a second.

  Returns:
    wav: Sample-wise audio. Shape [batch_size, n_samples, n_sinusoids].
  """
  frequency_envelopes = tf_float32(frequency_envelopes)
  amplitude_envelopes = tf_float32(amplitude_envelopes)

  # Don't exceed Nyquist.
  amplitude_envelopes = remove_above_nyquist(frequency_envelopes,
                                             amplitude_envelopes,
                                             sample_rate)

  # Change Hz to radians per sample.
  omegas = frequency_envelopes * (2.0 * np.pi)  # rad / sec
  omegas = omegas / float(sample_rate)  # rad / sample

  # Accumulate phase and synthesize.
  phases = cumsum(omegas, axis=1)
  wavs = tf.sin(phases)
  harmonic_audio = amplitude_envelopes * wavs  # [mb, n_samples, n_sinusoids]
  audio = tf.reduce_sum(harmonic_audio, axis=-1)  # [mb, n_samples]
  return audio
Exemplo n.º 6
0
    def testVonMisesSampleMoments(self):
        locs_v = np.array([-1., 0.3, 2.3])
        concentrations_v = np.array([1., 2., 10.])
        von_mises = tfd.VonMises(self.make_tensor(locs_v),
                                 self.make_tensor(concentrations_v),
                                 validate_args=True)

        n = 10000
        seed = test_util.test_seed()
        samples = von_mises.sample(n, seed=seed)

        expected_mean = von_mises.mean()
        actual_mean = tf.atan2(tf.reduce_mean(tf.sin(samples), axis=0),
                               tf.reduce_mean(tf.cos(samples), axis=0))

        expected_variance = von_mises.variance()
        standardized_samples = samples - tf.expand_dims(von_mises.mean(), 0)
        variance_samples = 1. - tf.cos(standardized_samples)

        [
            expected_mean_val, expected_variance_val, actual_mean_val,
            variance_samples_
        ] = self.evaluate(
            [expected_mean, expected_variance, actual_mean, variance_samples])

        # TODO(axch, cgs): atan2(means) is not mean(atan2), but maybe there
        # is a formulation of what this is testing that does use IID samples
        # and is amenable to assertAllMeansClose?
        self.assertAllClose(actual_mean_val, expected_mean_val, rtol=0.1)
        self.assertAllMeansClose(variance_samples_,
                                 expected_variance_val,
                                 axis=0,
                                 rtol=0.1)
Exemplo n.º 7
0
    def _apply(self, x1, x2, example_ndims=0):
        difference = np.pi * tf.abs(x1 - x2)

        if self.period is not None:
            period = tf.convert_to_tensor(self.period)
            # period acts as a batch of periods, and hence we must additionally
            # pad the shape with self.feature_ndims number of ones.
            period = util.pad_shape_with_ones(period,
                                              ndims=(example_ndims +
                                                     self.feature_ndims))
            difference /= period
        log_kernel = util.sum_rightmost_ndims_preserving_shape(
            -2 * tf.sin(difference)**2, ndims=self.feature_ndims)

        if self.length_scale is not None:
            length_scale = tf.convert_to_tensor(self.length_scale)
            length_scale = util.pad_shape_with_ones(length_scale,
                                                    ndims=example_ndims)
            log_kernel /= length_scale**2

        if self.amplitude is not None:
            amplitude = tf.convert_to_tensor(self.amplitude)
            amplitude = util.pad_shape_with_ones(amplitude,
                                                 ndims=example_ndims)
            log_kernel += 2. * tf.math.log(amplitude)
        return tf.exp(log_kernel)
  def test_basic_statistics_no_latent_variance_one_frequency(self):
    # fix the latent variables at the value 1 so the results are deterministic
    num_timesteps = 10
    period = 42
    frequency_multipliers = [3]
    drift_scale = 0.

    initial_state_loc = self._build_placeholder(np.ones([2]))
    initial_state_scale = tf.zeros_like(initial_state_loc)

    initial_state_prior = tfd.MultivariateNormalDiag(
        loc=initial_state_loc, scale_diag=initial_state_scale)

    ssm = SmoothSeasonalStateSpaceModel(
        num_timesteps=num_timesteps,
        period=period,
        frequency_multipliers=frequency_multipliers,
        drift_scale=drift_scale,
        initial_state_prior=initial_state_prior)

    two_pi = 6.283185307179586
    sine_terms = tf.sin(two_pi * 3 * tf.range(
        0, num_timesteps, dtype=tf.float32) / 42)
    cosine_terms = tf.cos(two_pi * 3 * tf.range(
        0, num_timesteps, dtype=tf.float32) / 42)
    predicted_time_series_ = self.evaluate(
        (sine_terms + cosine_terms)[..., tf.newaxis])

    self.assertAllClose(self.evaluate(ssm.mean()), predicted_time_series_)
    self.assertAllClose(*self.evaluate((ssm.stddev(),
                                        tf.zeros_like(predicted_time_series_))))
Exemplo n.º 9
0
    def setUp(self):
        super(MultiplexerDataProviderTest, self).setUp()
        self.logdir = self.get_temp_dir()

        logdir = os.path.join(self.logdir, "polynomials")
        with tf.summary.create_file_writer(logdir).as_default():
            for i in xrange(10):
                scalar_summary.scalar("square",
                                      i**2,
                                      step=2 * i,
                                      description="boxen")
                scalar_summary.scalar("cube", i**3, step=3 * i)

        logdir = os.path.join(self.logdir, "waves")
        with tf.summary.create_file_writer(logdir).as_default():
            for i in xrange(10):
                scalar_summary.scalar("sine", tf.sin(float(i)), step=i)
                scalar_summary.scalar("square",
                                      tf.sign(tf.sin(float(i))),
                                      step=i)
                # Summary with rank-0 data but not owned by the scalars plugin.
                metadata = summary_pb2.SummaryMetadata()
                metadata.plugin_data.plugin_name = "marigraphs"
                tf.summary.write("high_tide",
                                 tensor=i,
                                 step=i,
                                 metadata=metadata)

        logdir = os.path.join(self.logdir, "pictures")
        with tf.summary.create_file_writer(logdir).as_default():
            colors = [
                ("`#F0F`", (255, 0, 255), "purple"),
                ("`#0F0`", (255, 0, 255), "green"),
            ]
            for (description, rgb, name) in colors:
                pixel = tf.constant([[list(rgb)]], dtype=tf.uint8)
                for i in xrange(1, 11):
                    pixels = [tf.tile(pixel, [i, i, 1])]
                    image_summary.image(name,
                                        pixels,
                                        step=i,
                                        description=description)
Exemplo n.º 10
0
Arquivo: ftm.py Projeto: lylyhan/ddsp
    def get_gaus_k(
        self, m1, m2, omega, f1, f2, x1, x2, l, alpha
    ):  #take int, int, tensor, tensor, tensor, tensor, tensor, tensor, tensor
        #f1,f2 are tensors of (1,m1),(1,m2)
        m1 = tf.reshape(core.tf_float32(tf.range(1, m1 + 1, 1)), [1, m1])
        m2 = tf.reshape(core.tf_float32(tf.range(1, m2 + 1, 1)), [1, m2])

        l2 = l * alpha
        if float(x1) > float(l):
            x1 = 0
        if float(x2) > float(l2):
            x2 = 0

        k = (tf.linalg.matrix_transpose(f1) *
             f2) * (tf.linalg.matrix_transpose(tf.sin(m1 * np.pi * x1 / l)) *
                    tf.sin(m2 * np.pi * x2 / l2)
                    ) / omega  #assuming x1,x2 at center of the surface

        #k = np.round(k,10)
        return k  #tensor of m by m
Exemplo n.º 11
0
    def setUp(self):
        super(MultiplexerDataProviderTest, self).setUp()
        self.logdir = self.get_temp_dir()

        logdir = os.path.join(self.logdir, "polynomials")
        with tf.summary.create_file_writer(logdir).as_default():
            for i in xrange(10):
                scalar_summary.scalar("square",
                                      i**2,
                                      step=2 * i,
                                      description="boxen")
                scalar_summary.scalar("cube", i**3, step=3 * i)

        logdir = os.path.join(self.logdir, "waves")
        with tf.summary.create_file_writer(logdir).as_default():
            for i in xrange(10):
                scalar_summary.scalar("sine", tf.sin(float(i)), step=i)
                scalar_summary.scalar("square",
                                      tf.sign(tf.sin(float(i))),
                                      step=i)
                # Summary with rank-0 data but not owned by the scalars plugin.
                metadata = summary_pb2.SummaryMetadata()
                metadata.plugin_data.plugin_name = "marigraphs"
                tf.summary.write("high_tide",
                                 tensor=i,
                                 step=i,
                                 metadata=metadata)

        logdir = os.path.join(self.logdir, "lebesgue")
        with tf.summary.create_file_writer(logdir).as_default():
            data = [
                ("very smooth", (0.0, 0.25, 0.5, 0.75, 1.0), "uniform"),
                ("very smoothn't", (0.0, 0.01, 0.99, 1.0), "bimodal"),
            ]
            for (description, distribution, name) in data:
                tensor = tf.constant([distribution], dtype=tf.float64)
                for i in xrange(1, 11):
                    histogram_summary.histogram(name,
                                                tensor * i,
                                                step=i,
                                                description=description)
Exemplo n.º 12
0
Arquivo: ftm.py Projeto: lylyhan/ddsp
    def get_gaus_f(self, m, l, tau, r):  #takes int, tensor, int, float
        #trapezoid rule to integrate f(x)sin(mpix) from 0 to l
        #(f(a)+f(b))*(b-a)/2

        num_m = m
        m = tf.reshape(core.tf_float32(np.arange(1, m + 1, 1)), [1, m])
        x = self.approxnorm(l, l * r, 0.1,
                            tau)  #l is a tensor here, x is a tensor too
        h = l / tau

        t = tf.reshape(core.tf_float32(tf.range(0, tau, 1)),
                       [1, tau])  #to replace the i
        # m by tau
        integral = (core.tf_float32(x[:-1]) *
                    tf.sin(tf.linalg.matrix_transpose(m) * np.pi * t * h / l) +
                    core.tf_float32(x[1:]) * tf.sin(
                        tf.linalg.matrix_transpose(m) * np.pi *
                        (t + 1) * h / l)) * h / 2
        integral_sum = tf.reduce_sum(integral, axis=1) * 2 / l

        return tf.reshape(integral_sum, [1, num_m])  #return tensor
Exemplo n.º 13
0
def get_rotation_matrix(angles, image_height, image_width, name=None):
  """Returns projective transform(s) for the given angle(s).

  Args:
    angles: A scalar angle to rotate all images by, or (for batches of images) a
      vector with an angle to rotate each image in the batch. The rank must be
      statically known (the shape is not `TensorShape(None)`).
    image_height: Height of the image(s) to be transformed.
    image_width: Width of the image(s) to be transformed.
    name: The name of the op.

  Returns:
    A tensor of shape (num_images, 8). Projective transforms which can be given
      to operation `image_projective_transform_v2`. If one row of transforms is
       [a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
       `(x, y)` to a transformed *input* point
       `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
       where `k = c0 x + c1 y + 1`.
  """
  with backend.name_scope(name or 'rotation_matrix'):
    x_offset = ((image_width - 1) - (tf.cos(angles) *
                                     (image_width - 1) - tf.sin(angles) *
                                     (image_height - 1))) / 2.0
    y_offset = ((image_height - 1) - (tf.sin(angles) *
                                      (image_width - 1) + tf.cos(angles) *
                                      (image_height - 1))) / 2.0
    num_angles = tf.compat.v1.shape(angles)[0]
    return tf.concat(
        values=[
            tf.cos(angles)[:, None],
            -tf.sin(angles)[:, None],
            x_offset[:, None],
            tf.sin(angles)[:, None],
            tf.cos(angles)[:, None],
            y_offset[:, None],
            tf.zeros((num_angles, 2), tf.float32),
        ],
        axis=1)
Exemplo n.º 14
0
    def loop_body(n, rn, drn_dconcentration, vn, dvn_dconcentration):
        """One iteration of the series loop."""

        denominator = 2. * n / concentration + rn
        ddenominator_dk = -2. * n / concentration**2 + drn_dconcentration
        rn = 1. / denominator
        drn_dconcentration = -ddenominator_dk / denominator**2

        multiplier = tf.sin(n * x) / n + vn
        vn = rn * multiplier
        dvn_dconcentration = (drn_dconcentration * multiplier +
                              rn * dvn_dconcentration)
        n = n - 1.

        return n, rn, drn_dconcentration, vn, dvn_dconcentration
Exemplo n.º 15
0
 def _process_step_num(self, single_input, max_step):
     if self._step_encoding == 'one_hot':
         return tf.one_hot(single_input, max_step + 1)
     if self._step_encoding == 'sinusoid':
         i = tf.range(self._d_step_emb, dtype=tf.float32)[tf.newaxis, :]
         step_num = tf.cast(single_input, tf.float32)[:, tf.newaxis]
         rads = step_num / tf.math.pow(
             1.0e4, 2 * (i // 2) / tf.cast(self._d_step_emb, tf.float32))
         return tf.concat([tf.sin(rads[:, 0::2]),
                           tf.cos(rads[:, 1::2])],
                          axis=-1)
     if self._step_encoding == 'learned':
         return self._step_embedding_layer(
             tf.one_hot(single_input, max_step + 1))
     raise ValueError(
         'Step encoding must be one of ["one_hot, "sinusoid", "learned"].')
Exemplo n.º 16
0
def points_rotate(features,
                  max_rotation,
                  min_rotation=0.0,
                  axis="z",
                  keys=("image", )):
    """Randomly rotate points on a given axis.

  Args:
    features: Dictionary of data features to preprocess.
    max_rotation: The maximum possible rotation in radians.
    min_rotation: The minimum possible rotation in radians.
    axis: The rotation axis.
    keys: On which keys to apply this function.

  Returns:
    Features with rotated points.
  """
    assert axis in {"x", "y", "z"}, "invalid rotation axis"

    for key in keys:
        phi = tf.random.uniform(shape=(1, ),
                                minval=min_rotation,
                                maxval=max_rotation)
        cos, sin, zero, one = (tf.cos(phi), tf.sin(phi), tf.zeros(
            (1, )), tf.ones((1, )))
        # Matrices from
        # https://en.wikipedia.org/wiki/Rotation_matrix#Basic_rotations.
        if axis == "x":
            rotation_matrix = [
                one, zero, zero, zero, cos, -sin, zero, sin, cos
            ]
        elif axis == "y":
            rotation_matrix = [
                cos, zero, sin, zero, one, zero, -sin, zero, cos
            ]
        elif axis == "z":
            rotation_matrix = [
                cos, -sin, zero, sin, cos, zero, zero, zero, one
            ]
        rotate = tf.reshape(tf.stack(rotation_matrix, axis=0), [3, 3])
        features[key] = tf.matmul(features[key], rotate)

    return features
Exemplo n.º 17
0
  def compute_loss(self, inputs, preds):
    reg_labels = inputs['pillar_map_bboxes']
    cls_labels = inputs['pillar_map_if_in_bboxes']
    reg_xyz = inputs['pillar_map_xyz']
    reg_logits = preds['reg_logits']
    cls_logits = preds['cls_logits']
    cls_loss = loss.sigmoid_cross_entropy_focal_loss(cls_logits, cls_labels)
    cls_loss = tf.reduce_sum(cls_loss, axis=1)

    angle_pred = reg_logits[:, :, 6]
    angle_label = reg_labels[:, :, 6]

    rot_diff = tf.sin(angle_label - angle_pred)
    rot_loss = tf.math.multiply_no_nan(
        loss.smooth_l1_loss(rot_diff, tf.zeros_like(rot_diff)), cls_labels)

    size_prior = tf.convert_to_tensor(self.size_prior)
    size_prior = tf.reshape(size_prior, [1, 1, 3])
    size_pred = reg_logits[:, :, 3:6]
    size_label = reg_labels[:, :, 3:6]

    size_loss = tf.reduce_sum(
        loss.smooth_l1_loss(size_pred, tf.math.log(
            tf.clip_by_value(size_label / size_prior, 1e-8, 1e10))),
        axis=-1)
    size_loss = tf.math.multiply_no_nan(size_loss, cls_labels)

    pos_pred = reg_logits[:, :, 0:3]
    pos_label = reg_labels[:, :, 0:3]
    pos_loss = tf.reduce_sum(
        loss.smooth_l1_loss(
            pos_pred,
            (pos_label - reg_xyz) / size_prior),
        axis=-1)
    pos_loss = tf.math.multiply_no_nan(pos_loss, cls_labels)
    reg_loss = rot_loss + size_loss + pos_loss

    has_pos = tf.reduce_max(cls_labels, axis=1)
    reg_loss = tf.reduce_sum(reg_loss, axis=1)
    reg_loss = tf.math.multiply_no_nan(reg_loss, has_pos)

    return cls_loss, reg_loss
Exemplo n.º 18
0
Arquivo: ftm.py Projeto: lylyhan/ddsp
    def getsounds_imp_gaus(self, m1, m2, r1, r2, w11, tau11, p, D, alpha, sr):
        """
      Calculate sound of a FTM 2D rectangular drum of shape {w,tau,p,D,alpha}
      excitation function:(Laplace) spatially gaussian, temporally delta
      modes: m1, m2 along each direction
      excitation position: r1, r2 
      """

        l = core.tf_float32(np.pi)  #does this need to be tensor too?
        l2 = l * core.tf_float32(alpha)
        w11 = core.tf_float32(w11)
        s11 = -1 / core.tf_float32(tau11)
        p = core.tf_float32(p)
        D = core.tf_float32(D)
        alpha = core.tf_float32(alpha)

        #position of striking
        x1 = l * r1
        x2 = l2 * r2
        #calculate intermediate variables sigma, omega, k
        sigma = self.getsigma(m1, m2, alpha, p, s11)
        omega = self.getomega(m1, m2, alpha, p, D, w11, s11)
        k = self.get_gaus_k(m1, m2, omega, self.get_gaus_f(m1, 1, 300, r1),
                            self.get_gaus_f(m2, alpha, 300, r2), x1, x2, l,
                            alpha)

        # define total number of samples
        dur = self.n_samples

        #sound can be roughly calculated as k * exp(sigma*t)*sin(omega*t)
        t = tf.reshape(core.tf_float32(tf.range(0, dur, 1)), [1, 1, dur])
        y = tf.reduce_sum(tf.reduce_sum(
            tf.reshape(k, [m1, m2, 1]) *
            tf.math.exp(tf.reshape(sigma, [m1, m2, 1]) * t / sr) *
            tf.sin(tf.reshape(omega, [m1, m2, 1]) * t / sr),
            axis=0),
                          axis=0)

        #normalize output sound
        y = y / max(y)

        return core.tf_float32(y)
Exemplo n.º 19
0
  def cdf_func(concentration):
    """A helper function that is passed to value_and_gradient."""
    # z is an "almost Normally distributed" random variable.
    z = ((np.sqrt(2. / np.pi) / tf.math.bessel_i0e(concentration)) *
         tf.sin(.5 * x))

    # This is the correction described in [1] which reduces the error
    # of the Normal approximation.
    z2 = z ** 2
    z3 = z2 * z
    z4 = z2 ** 2
    c = 24. * concentration
    c1 = 56.

    xi = z - z3 / ((c - 2. * z2 - 16.) / 3. -
                   (z4 + (7. / 4.) * z2 + 167. / 2.) / (c - c1 - z2 + 3.)) ** 2

    distrib = normal.Normal(tf.cast(0., dtype), tf.cast(1., dtype))

    return distrib.cdf(xi)
Exemplo n.º 20
0
    def _test_batches_and_types(self, integrate_function, args):
        """Checks handling batches and dtypes."""
        dtypes = [np.float32, np.float64, np.complex64, np.complex128]
        a = [[0.0, 0.0], [0.0, 0.0]]
        b = [[np.pi / 2, np.pi], [1.5 * np.pi, 2 * np.pi]]
        a = [a, a]
        b = [b, b]
        k = tf.constant([[[[1.0]]], [[[2.0]]]])
        func = lambda x: tf.cast(k, dtype=x.dtype) * tf.sin(x)
        ans = [[[1.0, 2.0], [1.0, 0.0]], [[2.0, 4.0], [2.0, 0.0]]]

        results = []
        for dtype in dtypes:
            lower = tf.constant(a, dtype=dtype)
            upper = tf.constant(b, dtype=dtype)
            results.append(integrate_function(func, lower, upper, **args))

        results = self.evaluate(results)

        for i in range(len(results)):
            assert results[i].dtype == dtypes[i]
            assert np.allclose(results[i], ans, atol=1e-3)
Exemplo n.º 21
0
def build_smooth_seasonal_transition_matrix(period, frequency_multipliers,
                                            dtype):
    """Build the transition matrix for a SmoothSeasonalStateSpaceModel."""

    two_pi = tf.constant(2. * np.pi, dtype=dtype)
    frequencies = two_pi * frequency_multipliers / period
    num_frequencies = static_num_frequencies(frequency_multipliers)

    sin_frequencies = tf.sin(frequencies)
    cos_frequencies = tf.cos(frequencies)

    trigonometric_values = tf.stack(
        [cos_frequencies, sin_frequencies, -sin_frequencies, cos_frequencies],
        axis=-1)

    transition_matrix = tf.linalg.LinearOperatorBlockDiag([
        tf.linalg.LinearOperatorFullMatrix(matrix=tf.reshape(
            trigonometric_values[i], [2, 2]),
                                           is_square=True)
        for i in range(num_frequencies)
    ])

    return transition_matrix
Exemplo n.º 22
0
  def test_equivalent_with_librosa(self, range_db):
    """Tests the finite difference function."""
    x = tf.sin(tf.linspace(0.0, 100 * np.pi, 16000))**2.0

    # Amplitude.
    librosa_db = librosa.amplitude_to_db(x, top_db=range_db)
    ddsp_db = core.amplitude_to_db(x, range_db=range_db)
    self.assertAllClose(librosa_db, ddsp_db, rtol=1e-6, atol=1e-6)

    # Back to linear.
    librosa_x = librosa.db_to_amplitude(librosa_db)
    ddsp_x = core.db_to_amplitude(ddsp_db)
    self.assertAllClose(librosa_x, ddsp_x, rtol=1e-6, atol=1e-6)

    # Power.
    librosa_power_db = librosa.power_to_db(x, top_db=range_db)
    ddsp_power_db = core.power_to_db(x, range_db=range_db)
    self.assertAllClose(librosa_power_db, ddsp_power_db, rtol=1e-6, atol=1e-6)

    # Back to linear.
    librosa_power_x = librosa.db_to_power(librosa_power_db)
    ddsp_power_x = core.db_to_power(ddsp_power_db)
    self.assertAllClose(librosa_power_x, ddsp_power_x, rtol=1e-6, atol=1e-6)
Exemplo n.º 23
0
 def func(x0):
     return tf.sin(x0)**2
Exemplo n.º 24
0
def _mc_cormick(coord):
    """See https://www.sfu.ca/~ssurjano/mccorm.html."""
    x = coord[0]
    y = coord[1]
    return tf.sin(x + y) + tf.square(x - y) - 1.5 * x + 2.5 * y + 1
Exemplo n.º 25
0
def sinc(x, threshold=1e-20):
    """Normalized zero phase version (peak at zero)."""
    x = tf_float32(x)
    x = tf.where(tf.abs(x) < threshold, threshold * tf.ones_like(x), x)
    x = np.pi * x
    return tf.sin(x) / x
 def expected_result_fn(x):
   return tf.sin(x)
 def expected_result_fn(x):
   return np.exp(-final_time) * tf.sin(x)
 def initial_cond_fn(x):
   return tf.sin(x)
Exemplo n.º 29
0
 def func(x0, x1, x2):
     # Shape [..., 2] output.
     return tf.stack([tf.sin(x0 * x1 * x2),
                      tf.cos(x0 * x1 * x2)],
                     axis=-1)
Exemplo n.º 30
0
 def func(x0, x1):
     return tf.sin(x0) * tf.cos(x1)