Пример #1
0
    def __init__(self,
                 centered_returns,
                 name='stochastic_volatility',
                 pretty_name='Stochastic Volatility'):
        """Construct the stochastic volatility model.

    Args:
      centered_returns: Float `Tensor` of shape `[num_timesteps]` giving the
        mean-adjusted return (change in asset price, minus the average change)
        observed at each step.
      name: Python `str` name prefixed to Ops created by this class.
      pretty_name: A Python `str`. The pretty name of this model.
    """
        with tf.name_scope(name):
            num_timesteps = ps.size0(centered_returns)
            if tf.is_tensor(num_timesteps):
                raise ValueError(
                    'Returns series length must be static, but saw '
                    'shape {}.'.format(centered_returns.shape))

            self._prior_dist = tfd.JointDistributionCoroutine(
                functools.partial(stochastic_volatility_prior_fn,
                                  num_timesteps=num_timesteps))

            self._log_likelihood_fn = functools.partial(
                stochastic_volatility_log_likelihood_fn,
                centered_returns=centered_returns)

            def _ext_identity(params):
                res = collections.OrderedDict()
                res['persistence_of_volatility'] = params[0]
                res['mean_log_volatility'] = params[1]
                res['white_noise_shock_scale'] = params[2]
                res['log_volatility'] = tf.stack(params[3], axis=-1)
                return res

            sample_transformations = {
                'identity':
                model.Model.SampleTransformation(
                    fn=_ext_identity,
                    pretty_name='Identity',
                )
            }

        super(StochasticVolatility, self).__init__(
            default_event_space_bijector=(tfb.Sigmoid(
                -1., 1.), tfb.Identity(), tfb.Softplus()) +
            ((tfb.Identity(), ) * num_timesteps, ),
            event_shape=self._prior_dist.event_shape,
            dtype=self._prior_dist.dtype,
            name=name,
            pretty_name=pretty_name,
            sample_transformations=sample_transformations,
        )
Пример #2
0
 def sample_from_banana():
   def banana_model():
     x0 = yield tfd.JointDistributionCoroutine.Root(tfd.Normal(0., 10.))
     _ = yield tfd.Normal(0.03 * (tf.square(x0) - 100.), 1.)
   banana = tfd.JointDistributionCoroutine(banana_model)
   kernel = tfp.mcmc.NoUTurnSampler(banana.log_prob,
                                    step_size=0.35)
   trace_fn = lambda _, pkr: pkr.log_accept_ratio
   return tfp.mcmc.sample_chain(50,
                                [0., 0.],
                                kernel=kernel,
                                trace_fn=trace_fn,
                                seed=seed)[1]
Пример #3
0
def stochastic_volatility_prior_fn(num_timesteps):
    """Generative process for the stochastic volatility model."""
    persistence_of_volatility = yield Root(
        tfb.Shift(-1.)(tfb.Scale(2.)(tfd.Beta(
            concentration1=20.,
            concentration0=1.5,
            name='persistence_of_volatility'))))
    mean_log_volatility = yield Root(
        tfd.Cauchy(loc=0., scale=5., name='mean_log_volatility'))
    white_noise_shock_scale = yield Root(
        tfd.HalfCauchy(loc=0., scale=2., name='white_noise_shock_scale'))

    _ = yield tfd.JointDistributionCoroutine(functools.partial(
        autoregressive_series_fn,
        num_timesteps=num_timesteps,
        mean=mean_log_volatility,
        noise_scale=white_noise_shock_scale,
        persistence=persistence_of_volatility),
                                             name='log_volatility')
Пример #4
0
    def __init__(self,
                 train_features,
                 train_labels,
                 test_features=None,
                 test_labels=None,
                 name='logistic_regression',
                 pretty_name='Logistic Regression'):
        """Construct the logistic regression model.

    Args:
      train_features: Floating-point `Tensor` with shape `[num_train_points,
        num_features]`. Training features.
      train_labels: Integer `Tensor` with shape `[num_train_points]`. Training
        labels.
      test_features: Floating-point `Tensor` with shape `[num_test_points,
        num_features]`. Testing features. Can be `None`, in which case
        test-related sample transformations are not computed.
      test_labels: Integer `Tensor` with shape `[num_test_points]`. Testing
        labels. Can be `None`, in which case test-related sample transformations
        are not computed.
      name: Python `str` name prefixed to Ops created by this class.
      pretty_name: A Python `str`. The pretty name of this model.
    """
        with tf.name_scope(name):
            train_features = _add_bias(train_features)
            train_labels = tf.convert_to_tensor(train_labels)
            num_features = int(train_features.shape[1])

            root = tfd.JointDistributionCoroutine.Root
            zero = tf.zeros(num_features)
            one = tf.ones(num_features)

            def model_fn(features):
                weights = yield root(tfd.Independent(tfd.Normal(zero, one), 1))
                logits = tf.einsum('nd,...d->...n', features, weights)
                yield tfd.Independent(tfd.Bernoulli(logits=logits), 1)

            train_joint_dist = tfd.JointDistributionCoroutine(
                functools.partial(model_fn, features=train_features))

            sample_transformations = {
                'identity':
                bayesian_model.BayesianModel.SampleTransformation(
                    fn=lambda params: params,
                    pretty_name='Identity',
                )
            }
            if test_features is not None and test_labels is not None:
                test_features = _add_bias(test_features)
                test_labels = tf.convert_to_tensor(test_labels)
                test_joint_dist = tfd.JointDistributionCoroutine(
                    functools.partial(model_fn, features=test_features))

                def _get_label_dist(weights):
                    # TODO(b/150897904): The seed does nothing since the model is fully
                    # conditioned.
                    distributions, _ = test_joint_dist.sample_distributions(
                        value=[weights, test_labels], seed=42)
                    return distributions[-1]

                sample_transformations['test_nll'] = (
                    bayesian_model.BayesianModel.SampleTransformation(
                        fn=lambda weights: -(  # pylint: disable=g-long-lambda
                            _get_label_dist(weights).log_prob(test_labels)),
                        pretty_name='Test NLL',
                    ))
                sample_transformations['per_example_test_nll'] = (
                    bayesian_model.BayesianModel.SampleTransformation(
                        fn=lambda weights: -(  # pylint: disable=g-long-lambda
                            _get_label_dist(weights).distribution.log_prob(
                                test_labels)),
                        pretty_name='Per-example Test NLL',
                    ))

        self._train_joint_dist = train_joint_dist
        self._train_labels = train_labels

        super(LogisticRegression, self).__init__(
            default_event_space_bijector=tfb.Identity(),
            event_shape=train_joint_dist.event_shape[0],
            dtype=train_joint_dist.dtype[0],
            name=name,
            pretty_name=pretty_name,
            sample_transformations=sample_transformations,
        )
Пример #5
0
    def __init__(self,
                 train_features,
                 train_labels,
                 test_features=None,
                 test_labels=None,
                 name='sparse_logistic_regression',
                 pretty_name='Sparse Logistic Regression'):
        """Construct the sparse logistic regression model.

    Args:
      train_features: Floating-point `Tensor` with shape `[num_train_points,
        num_features]`. Training features.
      train_labels: Integer `Tensor` with shape `[num_train_points]`. Training
        labels.
      test_features: Floating-point `Tensor` with shape `[num_test_points,
        num_features]`. Testing features. Can be `None`, in which case
        test-related sample transformations are not computed.
      test_labels: Integer `Tensor` with shape `[num_test_points]`. Testing
        labels. Can be `None`, in which case test-related sample transformations
        are not computed.
      name: Python `str` name prefixed to Ops created by this class.
      pretty_name: A Python `str`. The pretty name of this model.

    Raises:
      ValueError: If `test_features` and `test_labels` are either not both
        `None` or not both specified.
    """
        with tf.name_scope(name):
            train_features = _add_bias(train_features)
            train_labels = tf.convert_to_tensor(train_labels)
            num_features = int(train_features.shape[1])

            root = tfd.JointDistributionCoroutine.Root
            zero = tf.zeros(num_features)
            one = tf.ones(num_features)
            half = tf.fill([num_features], 0.5)

            def model_fn(features):
                """Model definition."""
                unscaled_weights = yield root(
                    tfd.Independent(tfd.Normal(zero, one),
                                    1,
                                    name='unscaled_weights'))
                local_scales = yield root(
                    tfd.Independent(tfd.Gamma(half, half),
                                    1,
                                    name='local_scales'))
                global_scale = yield root(
                    tfd.Gamma(0.5, 0.5, name='global_scale'))

                weights = unscaled_weights * local_scales * global_scale[
                    ..., tf.newaxis]

                logits = tf.einsum('nd,...d->...n', features, weights)
                yield tfd.Independent(tfd.Bernoulli(logits=logits),
                                      1,
                                      name='labels')

            train_joint_dist = tfd.JointDistributionCoroutine(
                functools.partial(model_fn, features=train_features))

            sample_transformations = {
                'identity':
                bayesian_model.BayesianModel.SampleTransformation(
                    fn=lambda params: params,
                    pretty_name='Identity',
                )
            }
            if (test_features is not None) != (test_labels is not None):
                raise ValueError(
                    '`test_features` and `test_labels` must either both '
                    'be `None` or both specified. Got: test_features={}, '
                    'test_labels={}'.format(test_features, test_labels))

            if test_features is not None and test_labels is not None:
                test_features = _add_bias(test_features)
                test_labels = tf.convert_to_tensor(test_labels)
                test_joint_dist = tfd.JointDistributionCoroutine(
                    functools.partial(model_fn, features=test_features))

                def _get_label_dist(params):
                    # TODO(b/150897904): The seed does nothing since the model is fully
                    # conditioned.
                    distributions, _ = test_joint_dist.sample_distributions(
                        value=self._dict_to_tuple(params) + (test_labels, ),
                        seed=42)
                    return distributions[-1]

                sample_transformations['test_nll'] = (
                    bayesian_model.BayesianModel.SampleTransformation(
                        fn=lambda params: -(  # pylint: disable=g-long-lambda
                            _get_label_dist(params).log_prob(test_labels)),
                        pretty_name='Test NLL',
                    ))
                sample_transformations['per_example_test_nll'] = (
                    bayesian_model.BayesianModel.SampleTransformation(
                        fn=lambda params: -(  # pylint: disable=g-long-lambda
                            _get_label_dist(params).distribution.log_prob(
                                test_labels)),
                        pretty_name='Per-example Test NLL',
                    ))

        self._train_joint_dist = train_joint_dist
        self._train_labels = train_labels

        super(SparseLogisticRegression, self).__init__(
            default_event_space_bijector=self._tuple_to_dict(
                (tfb.Identity(), tfb.Exp(), tfb.Exp())),
            event_shape=self._tuple_to_dict(train_joint_dist.event_shape[:-1]),
            dtype=self._tuple_to_dict(train_joint_dist.dtype[:-1]),
            name=name,
            pretty_name=pretty_name,
            sample_transformations=sample_transformations,
        )
Пример #6
0
    def __init__(
        self,
        train_student_ids,
        train_question_ids,
        train_correct,
        test_student_ids=None,
        test_question_ids=None,
        test_correct=None,
        name='item_response_theory',
        pretty_name='Item-Response Theory',
    ):
        """Construct the item-response theory model.

    This models a set of students answering a set of questions, and being scored
    whether they get the question correct or not. Each student is associated
    with a scalar `student_ability`, and each question is associated with a
    scalar `question_difficulty`. Additionally, a scalar `mean_student_ability`
    is shared between all the students. This corresponds to the [1PL
    item-response theory](1) model.

    The data are encoded into three parallel arrays per set. I.e.
    `*_correct[i]  == 1` means that student `*_student_ids[i]` answered question
    `*_question_ids[i]` correctly; `*_correct[i] == 0` means they didn't.

    Args:
      train_student_ids: integer `tensor` with shape `[num_train_points]`.
        training student ids, ranging from 0 to `num_students`.
      train_question_ids: integer `tensor` with shape `[num_train_points]`.
        training question ids, ranging from 0 to `num_questions`.
      train_correct: integer `tensor` with shape `[num_train_points]`.
        whether the student in the training set answered the question correctly,
        either 0 or 1.
      test_student_ids: Integer `Tensor` with shape `[num_test_points]`.
        Testing student ids, ranging from 0 to `num_students`. Can be `None`, in
        which case test-related sample transformations are not computed.
      test_question_ids: Integer `Tensor` with shape `[num_test_points]`.
        Testing question ids, ranging from 0 to `num_questions`. Can be `None`,
        in which case test-related sample transformations are not computed.
      test_correct: Integer `Tensor` with shape `[num_test_points]`.
        Whether the student in the testing set answered the question correctly,
        either 0 or 1. Can be `None`, in which case test-related sample
        transformations are not computed.
      name: Python `str` name prefixed to Ops created by this class.
      pretty_name: A Python `str`. The pretty name of this model.

    Raises:
      ValueError: If `test_student_ids`, `test_question_ids` or `test_correct`
        are not either all `None` or are all specified.
      ValueError: If the parallel arrays are not all of the same size.

    #### References

    1. https://en.wikipedia.org/wiki/Item_response_theory
    """
        with tf.name_scope(name):
            test_data_present = (
                e is not None
                for e in [test_student_ids, test_question_ids, test_correct])
            self._have_test = all(test_data_present)
            if not self._have_test and any(test_data_present):
                raise ValueError(
                    '`test_student_ids`, `test_question_ids` and '
                    '`test_correct` must either all be `None` or '
                    'all be specified. Got: test_student_ids={}, '
                    'test_question_ids={}, test_correct={}'.format(
                        test_student_ids, test_question_ids, test_correct))
            if not (train_student_ids.shape[0] == train_question_ids.shape[0]
                    == train_correct.shape[0]):
                raise ValueError(
                    '`train_student_ids`, `train_question_ids` and '
                    '`train_correct` must all have the same length. '
                    'Got: {} {} {}'.format(train_student_ids.shape[0],
                                           train_question_ids.shape[0],
                                           train_correct.shape[0]))

            max_student_id = train_student_ids.max()
            max_question_id = train_question_ids.max()
            if self._have_test:
                max_student_id = max(max_student_id, test_student_ids.max())
                max_question_id = max(max_question_id, test_question_ids.max())

            self._num_students = max_student_id + 1
            self._num_questions = max_question_id + 1

            # TODO(siege): Make it an option to use a sparse encoding. The dense
            # encoding is only efficient when the dataset is not very sparse to begin
            # with.
            train_dense_y, train_y_mask = self._sparse_to_dense(
                train_student_ids,
                train_question_ids,
                train_correct,
            )

            root = tfd.JointDistributionCoroutine.Root

            def model_fn(dense_y, y_mask):
                """Model definition."""
                mean_student_ability = yield root(
                    tfd.Normal(0.75, 1., name='mean_student_ability'))
                student_ability = yield root(
                    tfd.Sample(
                        tfd.Normal(0., 1.),
                        dense_y.shape[0],
                        name='student_ability',
                    ))
                question_difficulty = yield root(
                    tfd.Sample(
                        tfd.Normal(0., 1.),
                        dense_y.shape[1],
                        name='question_difficulty',
                    ))
                logits = (mean_student_ability[..., tf.newaxis, tf.newaxis] +
                          student_ability[..., tf.newaxis] -
                          question_difficulty[..., tf.newaxis, :])
                # TODO(b/150949917): Use a more dedicated masking functionality.
                masked_logits = logits * y_mask - 1e10 * (1 - y_mask)
                yield tfd.Independent(tfd.Bernoulli(masked_logits),
                                      2,
                                      name='correct')

            train_joint_dist = tfd.JointDistributionCoroutine(
                functools.partial(model_fn, train_dense_y, train_y_mask))
            dtype = self._tuple_to_dict(train_joint_dist.dtype[:-1])

            sample_transformations = {
                'identity':
                bayesian_model.BayesianModel.SampleTransformation(
                    fn=lambda params: params,
                    pretty_name='Identity',
                    dtype=dtype,
                )
            }
            if self._have_test:
                if not (test_student_ids.shape[0] == test_question_ids.shape[0]
                        == test_correct.shape[0]):
                    raise ValueError(
                        '`test_student_ids`, `test_question_ids` and '
                        '`test_correct` must all have the same length. '
                        'Got: {} {} {}'.format(test_student_ids.shape[0],
                                               test_question_ids.shape[0],
                                               test_correct.shape[0]))
                test_dense_y, test_y_mask = self._sparse_to_dense(
                    test_student_ids,
                    test_question_ids,
                    test_correct,
                )
                test_joint_dist = tfd.JointDistributionCoroutine(
                    functools.partial(model_fn, test_dense_y, test_y_mask))

                def _get_label_dist(params):
                    # TODO(b/150897904): The seed does nothing since the model is fully
                    # conditioned.
                    distributions, _ = test_joint_dist.sample_distributions(
                        value=self._dict_to_tuple(params) + (test_dense_y, ),
                        seed=42)
                    return distributions[-1]

                sample_transformations['test_nll'] = (
                    bayesian_model.BayesianModel.SampleTransformation(
                        fn=lambda params: -(  # pylint: disable=g-long-lambda
                            _get_label_dist(params).log_prob(test_dense_y)),
                        pretty_name='Test NLL',
                    ))

                def _per_example_test_nll(params):
                    """Computes per-example test NLL."""
                    dense_nll = _get_label_dist(params).distribution.log_prob(
                        test_dense_y)
                    return self._dense_to_sparse(test_student_ids,
                                                 test_question_ids, dense_nll)

                sample_transformations['per_example_test_nll'] = (
                    bayesian_model.BayesianModel.SampleTransformation(
                        fn=_per_example_test_nll,
                        pretty_name='Per-example Test NLL',
                    ))

        self._train_joint_dist = train_joint_dist
        self._train_correct = train_correct
        self._train_student_ids = train_student_ids
        self._train_question_ids = train_question_ids
        self._test_student_ids = test_student_ids
        self._test_question_ids = test_question_ids
        self._evidence_val = train_dense_y

        super(ItemResponseTheory, self).__init__(
            default_event_space_bijector=self._tuple_to_dict(
                (tfb.Identity(), tfb.Identity(), tfb.Identity())),
            event_shape=self._tuple_to_dict(train_joint_dist.event_shape[:-1]),
            dtype=dtype,
            name=name,
            pretty_name=pretty_name,
            sample_transformations=sample_transformations,
        )