def sampling_func(y_pred):
     out_mu, out_sigma, out_pi = tf.split(y_pred, num_or_size_splits=[num_mixes * output_dim,
                                                                      num_mixes * output_dim,
                                                                      num_mixes],
                                          axis=1, name='mdn_coef_split')
     cat = Categorical(logits=out_pi)
     component_splits = [output_dim] * num_mixes
     mus = tf.split(out_mu, num_or_size_splits=component_splits, axis=1)
     sigs = tf.split(out_sigma, num_or_size_splits=component_splits, axis=1)
     coll = [MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc, scale
             in zip(mus, sigs)]
     mixture = Mixture(cat=cat, components=coll)
     samp = mixture.sample()
     # Todo: temperature adjustment for sampling function.
     return samp
 def loss_func(y_true, y_pred):
     out_mu, out_sigma, out_pi = tf.split(y_pred, num_or_size_splits=[num_mixes * output_dim,
                                                                      num_mixes * output_dim,
                                                                      num_mixes],
                                          axis=1, name='mdn_coef_split')
     cat = Categorical(logits=out_pi)
     component_splits = [output_dim] * num_mixes
     mus = tf.split(out_mu, num_or_size_splits=component_splits, axis=1)
     sigs = tf.split(out_sigma, num_or_size_splits=component_splits, axis=1)
     coll = [MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc, scale
             in zip(mus, sigs)]
     mixture = Mixture(cat=cat, components=coll)
     loss = mixture.log_prob(y_true)
     loss = tf.negative(loss)
     loss = tf.reduce_mean(loss)
     return loss
Exemple #3
0
def get_gaussian_mixture_log_prob(cat_probs, gauss_mu, gauss_sigma):
  """Get the logrithmic p.d.f. of a Gaussian mixture model.

  Args:
    cat_probs:
      `1-D` tensor with unit (reduce) sum, as the categorical probabilities.

    gauss_mu:
      List of tensors, with the length the shape of `cat_probs`, as the `mu`
      values of the Gaussian components. All these tensors shall share the
      same shape (as, e.g., `gauss_mu[0]`)

    gauss_sigma:
      List of tensors, with the length the shape of `cat_probs`, as the `sigma`
      values of the Gaussian components. Thus shall be all positive, and shall
      be all the same shape as `gauss_mu[0]`.

  Returns:
    Callable, mapping from tensor of the shape of `gauss_mu[0]` to scalar, as
    the p.d.f..
  """

  n_cats = cat_probs.shape[0]
  cat = Categorical(probs=cat_probs)
  components = [
      Independent( Normal(gauss_mu[i], gauss_sigma[i]) )
      for i in range(n_cats)
  ]
  distribution = Mixture(cat=cat, components=components)

  return distribution.log_prob
Exemple #4
0
def mixture(locs, scales, pi, K):
    cat = Categorical(probs=pi)
    components = [
        MultivariateNormalDiag(loc=locs[:, i, :], scale_diag=scales[:, i, :])
        for i in range(K)]
    # get the mixture distribution
    mix = Mixture(cat=cat, components=components)
    return mix
Exemple #5
0
def mog_from_out_params(mog_params, use_log_scales):
    logit_probs, means, std_params = tf.split(mog_params, num_or_size_splits=3, axis=2)
    cat = Categorical(logits=logit_probs)

    nr_mix = mog_params.get_shape().as_list()[2] // 3
    components = []
    for i in range(nr_mix):
        gauss_params = tf.stack([means[:, :, i], std_params[:, :, i]], axis=2)
        mean, std = mean_std_from_out_params(gauss_params, use_log_scales)
        components.append(Normal(loc=mean, scale=std))
    distribution = Mixture(cat=cat, components=components)
    return distribution
Exemple #6
0
def get_trained_q(trained_var):
  """Get the trained inference distribution :math:`q` (c.f. section "Notation"
  in the documentation).

  Args:
    trained_var:
      `dict` object with keys contains "a", "mu", and "zeta", and values being
      either numpy arraies or TensorFlow tensors (`tf.constant`), as the value
      of the trained value of variables in "nn4post".

  Returns:
    An instance of `Mixture`.
  """

  var_names = ['a', 'mu', 'zeta']
  for name in var_names:
    if name not in trained_var.keys():
      e = (
          '{0} is not in the keys of {1}.'
      ).format(name, trained_var)
      raise Exception(e)

  _trained_var = {
      name:
          val if isinstance(val, tf.Tensor) \
          else tf.constant(val)
      for name, val in trained_var.items()
  }

  cat = Categorical(tf.nn.softmax(_trained_var['a']))
  mu_zetas = list(zip(
      tf.unstack(_trained_var['mu'], axis=0),
      tf.unstack(_trained_var['zeta'], axis=0),
  ))
  components = [
      Independent(
          NormalWithSoftplusScale(mu, zeta)
      ) for mu, zeta in mu_zetas
  ]
  mixture = Mixture(cat, components)

  return mixture
Exemple #7
0
def get_trained_posterior(trained_var, param_shape):
  """
  Args:
    trained_var:
      `dict` object with keys contains "a", "mu", and "zeta", and values being
      either numpy arraies or TensorFlow tensors (`tf.constant`), as the value
      of the trained value of variables in "nn4post".
	
	param_shape:
      `dict` with keys the parameter-names and values the assocated shapes (as
      lists).

  Returns:
	Dictionary with keys the parameter-names and values instances of `Mixture`
    as the distributions that fit the associated posteriors.
  """

  n_c = trained_var['a'].shape[0]
  cat = Categorical(logits=trained_var['a'])

  parse_param = get_parse_param(param_shape)
  mu_list = [parse_param(trained_var['mu'][i]) for i in range(n_c)]
  zeta_list = [parse_param(trained_var['zeta'][i]) for i in range(n_c)]

  trained_posterior = {}

  for param_name in trained_var.keys():

    components = [
        Independent(NormalWithSoftplusScale(
            mu_list[i][param_name], zeta_list[i][param_name]))
        for i in range(n_c)
    ]
    mixture = Mixture(cat, components)
    trained_posterior[param_name] = mixture

  return trained_posterior
# -- Gaussian Mixture Distribution
with tf.name_scope('posterior'):

    target_c = tf.constant([0.05, 0.25, 0.70])
    target_mu = tf.stack(
        [tf.ones([N_D]) * (i - 1) * 3 for i in range(TARGET_N_C)], axis=0)
    target_zeta_val = np.zeros([TARGET_N_C, N_D])
    #target_zeta_val[1] = np.ones([N_D]) * 5.0
    target_zeta = tf.constant(target_zeta_val, dtype='float32')

    cat = Categorical(probs=target_c)
    components = [
        Independent(NormalWithSoftplusScale(target_mu[i], target_zeta[i]))
        for i in range(TARGET_N_C)
    ]
    p = Mixture(cat, components)

    def log_posterior(theta):
        return p.log_prob(theta)


# test!
# test 1
init_var = {
    'a':
        np.zeros([N_C], dtype=DTYPE),
    'mu':
        np.array([np.ones([N_D]) * (i - 1) * 3 for i in range(N_C)],
                 dtype=DTYPE) \
        + np.array(np.random.normal(size=[N_C, N_D]) * 0.5,
                   dtype=DTYPE),
Exemple #9
0
    def build_model(self):
        net = tl.layers.InputLayer(self.input, name='input_layer')
        with tf.variable_scope('fc1'):
            net = tl.layers.TimeDistributedLayer(
                net,
                layer_class=tl.layers.DenseLayer,
                args={
                    'n_units': 32,
                    'act': tf.nn.elu,
                    'W_init': tf.contrib.layers.variance_scaling_initializer(),
                    'W_init_args': {
                        'regularizer':
                        tf.contrib.layers.l2_regularizer(
                            self.args.weight_decay)
                    },
                    'name': 'fc1_'
                },
                name='time_dense_fc1')
            # net = tl.layers.DropoutLayer(net, keep=self.args.keep_prob, name='fc1_drop')
        with tf.variable_scope('highway'):
            num_highway = 3
            for idx in xrange(num_highway):
                highway_args = {
                    'n_units': 32,
                    'act': tf.nn.elu,
                    'W_init': tf.contrib.layers.variance_scaling_initializer(),
                    'b_init': tf.constant_initializer(value=0.0),
                    'W_init_args': {
                        'regularizer':
                        tf.contrib.layers.l2_regularizer(
                            self.args.weight_decay)
                    },
                    'name': 'highway_%03d_' % idx
                }
                net = tl.layers.TimeDistributedLayer(
                    net,
                    layer_class=utility.Highway,
                    args=highway_args,
                    name='time_dense_highway_%d' % idx)
        with tf.variable_scope('fc2'):
            net = tl.layers.TimeDistributedLayer(
                net,
                layer_class=tl.layers.DenseLayer,
                args={
                    'n_units': 64,
                    'act': tf.nn.elu,
                    'W_init': tf.contrib.layers.variance_scaling_initializer(),
                    'W_init_args': {
                        'regularizer':
                        tf.contrib.layers.l2_regularizer(
                            self.args.weight_decay)
                    },
                    'name': 'highway_to_fc_'
                },
                name='time_dense_highway_to_fc')
            net = tl.layers.DropoutLayer(net,
                                         keep=self.args.keep_prob,
                                         name='hw_to_fc_drop')
        with tf.variable_scope('RNN'):
            if self.args.rnn_cell == 'lstm':
                rnn_cell_fn = tf.contrib.rnn.BasicLSTMCell
            elif self.args.rnn_cell == 'gru':
                rnn_cell_fn = tf.contrib.rnn.GRUCell
            else:
                raise ValueError(
                    'Unimplemented RNN Cell, should be \'lstm\' or \'gru\'')
            self.rnn_keep_prob = tf.placeholder(tf.float32)
            rnn_layer_name = 'DRNN_layer'
            net = tl.layers.DynamicRNNLayer(layer=net,
                                            cell_fn=rnn_cell_fn,
                                            n_hidden=128,
                                            dropout=(1.0, self.rnn_keep_prob),
                                            n_layer=self.args.num_cells,
                                            return_last=True,
                                            name=rnn_layer_name)
            rnn_weights_params = [
                var for var in net.all_params
                if rnn_layer_name in var.name and 'weights' in var.name
            ]
            self.add_regularization_loss(rnn_weights_params)
        net = tl.layers.DenseLayer(
            net,
            n_units=256,
            act=tf.nn.elu,
            W_init=tf.contrib.layers.variance_scaling_initializer(),
            W_init_args={
                'regularizer':
                tf.contrib.layers.l2_regularizer(self.args.weight_decay)
            },
            name='fc_3')
        net = tl.layers.DenseLayer(
            net,
            n_units=128,
            act=tf.nn.elu,
            W_init=tf.contrib.layers.variance_scaling_initializer(),
            W_init_args={
                'regularizer':
                tf.contrib.layers.l2_regularizer(self.args.weight_decay)
            },
            name='fc_4')
        mus_num = self.args.num_mixtures * self.args.gaussian_dim
        sigmas_num = self.args.num_mixtures * self.args.gaussian_dim
        weights_num = self.args.num_mixtures
        num_output = mus_num + sigmas_num + weights_num
        net = tl.layers.DenseLayer(
            net,
            n_units=num_output * self.args.pred_frames_num,
            act=tf.identity,
            W_init=tf.contrib.layers.variance_scaling_initializer(),
            W_init_args={
                'regularizer':
                tf.contrib.layers.l2_regularizer(self.args.weight_decay)
            },
            name='nn_output')
        self.net = tl.layers.ReshapeLayer(
            net,
            shape=[-1, self.args.pred_frames_num, num_output],
            name='reshape')

        output = self.net.outputs
        with tf.variable_scope('MDN'):
            mus = output[:, :, :mus_num]
            sigmas = tf.exp(output[:, :, mus_num:mus_num + sigmas_num])
            self.weight_logits = output[:, :, mus_num + sigmas_num:]
            self.mus = tf.reshape(
                mus, (-1, self.args.pred_frames_num, self.args.num_mixtures,
                      self.args.gaussian_dim))
            self.sigmas = tf.reshape(
                sigmas, (-1, self.args.pred_frames_num, self.args.num_mixtures,
                         self.args.gaussian_dim))
            self.weights = tf.nn.softmax(self.weight_logits)
            self.y_mix = []
            for time_step in xrange(self.args.pred_frames_num):
                cat = Categorical(logits=self.weight_logits[:, time_step, :])
                components = [
                    MultivariateNormalDiag(mu=mu, diag_stdev=sigma)
                    for mu, sigma in zip(
                        tf.unstack(
                            tf.transpose(self.mus[:, time_step, :, :], (1, 0,
                                                                        2))),
                        tf.unstack(
                            tf.transpose(self.sigmas[:,
                                                     time_step, :, :], (1, 0,
                                                                        2))))
                ]
                self.y_mix.append(Mixture(cat=cat, components=components))
        self.loss = self.get_loss()
Exemple #10
0
         },
         "mus": {
             "support": [-inf, inf],
             "activation function": identity,
             "initial value": lambda x: tf.random_normal(x, stddev=1)
         },
         "log_sigmas": {
             "support": [-3, 3],
             "activation function": identity,
             "initial value": tf.zeros
         }
     },
     "class":
     lambda theta: Mixture(
         cat=Categorical(logits=theta["logits"]),
         components=[
             MultivariateNormalDiag(loc=m, scale_diag=tf.exp(s))
             for m, s in zip(theta["mus"], theta["log_sigmas"])
         ])
 },
 "categorical": {
     "parameters": {
         "logits": {
             "support": [-inf, inf],
             "activation function": identity
         }
     },
     "class": lambda theta: Categorical(logits=theta["logits"]),
 },
 "bernoulli": {
     "parameters": {
         "logits": {
Exemple #11
0
class Model_SF(Model):
    """ Prediction model for single future frame """
    def __init__(self, args):
        super(Model_SF, self).__init__(args)
        self.ckpt_dir = os.path.join(self.args.train_dir, 'sf_ckpt')
        if not args.test and not args.restore_training and not args.train_condition == 'real_time_play':
            if os.path.exists(self.ckpt_dir):
                print("Checkpoint file path :%s already exist..." %
                      self.ckpt_dir)
                print(
                    "Do you want to delete this folder and recreate one? ( \'y\' or \'n\')"
                )
                while True:
                    keyboard_input = raw_input("Enter your choice:\n")
                    if keyboard_input == 'y':
                        shutil.rmtree(self.ckpt_dir)
                        break
                    elif keyboard_input == 'n':
                        break
                    else:
                        print(
                            "Unrecognized response, please enter \'y\' or \'n\'"
                        )
        self.input = tf.placeholder(
            dtype=tf.float32,
            shape=[None, self.args.seq_length, self.args.features_dim],
            name='input_data')
        self.target = tf.placeholder(dtype=tf.float32,
                                     shape=[None, self.args.gaussian_dim],
                                     name='target')
        self.build_model()
        self.initialize()

    def build_model(self):
        net = tl.layers.InputLayer(self.input, name='input_layer')
        with tf.variable_scope('fc1'):
            net = tl.layers.TimeDistributedLayer(
                net,
                layer_class=tl.layers.DenseLayer,
                args={
                    'n_units': 64,
                    'act': tf.nn.elu,
                    'W_init': tf.contrib.layers.variance_scaling_initializer(),
                    'W_init_args': {
                        'regularizer':
                        tf.contrib.layers.l2_regularizer(
                            self.args.weight_decay)
                    },
                    'name': 'fc1_'
                },
                name='time_dense_fc1')
            # net = tl.layers.DropoutLayer(net, keep=self.args.keep_prob, name='fc1_drop')
        with tf.variable_scope('highway'):
            num_highway = 3
            for idx in xrange(num_highway):
                highway_args = {
                    'n_units': 64,
                    'act': tf.nn.elu,
                    'W_init': tf.contrib.layers.variance_scaling_initializer(),
                    'b_init': tf.constant_initializer(value=0.0),
                    'W_init_args': {
                        'regularizer':
                        tf.contrib.layers.l2_regularizer(
                            self.args.weight_decay)
                    },
                    'name': 'highway_%03d_' % idx
                }
                net = tl.layers.TimeDistributedLayer(
                    net,
                    layer_class=utility.Highway,
                    args=highway_args,
                    name='time_dense_highway_%d' % idx)
                # if idx % 8 == 0:
                #     net = tl.layers.DropoutLayer(net, keep=self.args.keep_prob, name='highway_drop_%d' % idx)
        # net = tl.layers.DropoutLayer(net, keep=self.args.keep_prob, name='highway_drop')
        with tf.variable_scope('fc2'):
            net = tl.layers.TimeDistributedLayer(
                net,
                layer_class=tl.layers.DenseLayer,
                args={
                    'n_units': 64,
                    'act': tf.nn.elu,
                    'W_init': tf.contrib.layers.variance_scaling_initializer(),
                    'W_init_args': {
                        'regularizer':
                        tf.contrib.layers.l2_regularizer(
                            self.args.weight_decay)
                    },
                    'name': 'highway_to_fc_'
                },
                name='time_dense_highway_to_fc')
            net = tl.layers.DropoutLayer(net,
                                         keep=self.args.keep_prob,
                                         name='hw_to_fc_drop')
        with tf.variable_scope('RNN'):
            if self.args.rnn_cell == 'lstm':
                rnn_cell_fn = tf.contrib.rnn.BasicLSTMCell
            elif self.args.rnn_cell == 'gru':
                rnn_cell_fn = tf.contrib.rnn.GRUCell
            else:
                raise ValueError(
                    'Unimplemented RNN Cell, should be \'lstm\' or \'gru\'')
            self.rnn_keep_prob = tf.placeholder(tf.float32)
            rnn_layer_name = 'DRNN_layer'
            net = tl.layers.DynamicRNNLayer(layer=net,
                                            cell_fn=rnn_cell_fn,
                                            n_hidden=128,
                                            dropout=(1.0, self.rnn_keep_prob),
                                            n_layer=self.args.num_cells,
                                            return_last=True,
                                            name=rnn_layer_name)
            rnn_weights_params = [
                var for var in net.all_params
                if rnn_layer_name in var.name and 'weights' in var.name
            ]
            self.add_regularization_loss(rnn_weights_params)
        # net = tl.layers.DenseLayer(net,
        #                            n_units=50,
        #                            act=tf.nn.elu,
        #                            W_init=tf.contrib.layers.variance_scaling_initializer(),
        #                            name='fc1')

        # with tf.variable_scope('Highway'):
        #     num_highway = 15
        #     for idx in xrange(num_highway - 1):
        #         net = utility.Highway(net,
        #                               n_units=64,
        #                               act=tf.nn.elu,
        #                               W_init=tf.contrib.layers.variance_scaling_initializer(),
        #                               b_init=tf.constant_initializer(value=0.0),
        #                               W_init_args={'regularizer': tf.contrib.layers.l2_regularizer(self.args.weight_decay)},
        #                               reuse=False,
        #                               name='highway_%d'%idx)
        net = tl.layers.DenseLayer(
            net,
            n_units=64,
            act=tf.nn.elu,
            W_init=tf.contrib.layers.variance_scaling_initializer(),
            W_init_args={
                'regularizer':
                tf.contrib.layers.l2_regularizer(self.args.weight_decay)
            },
            name='fc_3')
        mus_num = self.args.num_mixtures * self.args.gaussian_dim
        sigmas_num = self.args.num_mixtures * self.args.gaussian_dim
        weights_num = self.args.num_mixtures
        num_output = mus_num + sigmas_num + weights_num
        self.net = tl.layers.DenseLayer(
            net,
            n_units=num_output,
            act=tf.identity,
            W_init=tf.contrib.layers.variance_scaling_initializer(),
            W_init_args={
                'regularizer':
                tf.contrib.layers.l2_regularizer(self.args.weight_decay)
            },
            name='nn_output')
        output = self.net.outputs
        with tf.variable_scope('MDN'):
            mus = output[:, :mus_num]
            sigmas = tf.exp(output[:, mus_num:mus_num + sigmas_num])
            self.weight_logits = output[:, mus_num + sigmas_num:]
            self.mus = tf.reshape(
                mus, (-1, self.args.num_mixtures, self.args.gaussian_dim))
            self.sigmas = tf.reshape(
                sigmas, (-1, self.args.num_mixtures, self.args.gaussian_dim))
            self.weights = tf.nn.softmax(self.weight_logits)
            cat = Categorical(logits=self.weight_logits)
            components = [
                MultivariateNormalDiag(mu=mu, diag_stdev=sigma)
                for mu, sigma in zip(
                    tf.unstack(tf.transpose(self.mus, (
                        1, 0,
                        2))), tf.unstack(tf.transpose(self.sigmas, (1, 0, 2))))
            ]
            self.y_mix = Mixture(cat=cat, components=components)
        self.loss = self.get_loss()

    def get_loss(self):
        with tf.variable_scope('Loss'):
            loss = -self.y_mix.log_prob(self.target)
            loss = tf.reduce_mean(loss) + tf.losses.get_total_loss()
        return loss

    def print_stats(self,
                    distances,
                    title=None,
                    draw=True,
                    save_to_file=False):
        if len(distances.shape) == 2:
            distances = np.average(distances, axis=1)
        from scipy import stats
        n, min_max, mean, var, skew, kurt = stats.describe(distances)
        median = np.median(distances)
        first_quartile = np.percentile(distances, 25)
        third_quartile = np.percentile(distances, 75)
        print('\nDistances statistics:')
        print("Minimum: {0:9.4f} Maximum: {1:9.4f}".format(
            min_max[0], min_max[1]))
        print("Mean: {0:9.4f}".format(mean))
        print("Variance: {0:9.4f}".format(var))
        print("Median: {0:9.4f}".format(median))
        print("First quartile: {0:9.4f}".format(first_quartile))
        print("Third quartile: {0:9.4f}".format(third_quartile))
        threshold = 0.01
        percentage_thr = (distances <= threshold).sum() / float(
            distances.size) * 100.0
        percentage_double_thr = (distances <= 2 * threshold).sum() / float(
            distances.size) * 100.0
        percentage_triple_thr = (distances <= 3 * threshold).sum() / float(
            distances.size) * 100.0
        print(
            "Percentage of testing with distance less than {0:.3f}m is: {1:4.2f} %"
            .format(threshold, percentage_thr))
        print(
            "Percentage of testing with distance less than {0:.3f}m is: {1:4.2f} %"
            .format(2 * threshold, percentage_double_thr))
        print(
            "Percentage of testing with distance less than {0:.3f}m is: {1:4.2f} %"
            .format(3 * threshold, percentage_triple_thr))
        if draw:
            try:
                import seaborn as sns
                import matplotlib.pyplot as plt
                sns.set_style("whitegrid")
                plt.figure()
                vio_ax = sns.violinplot(x=distances, cut=0)
                vio_ax.set_xlabel('distances_error')
                if title is not None:
                    plt.title(title)
                plt.figure()
                strip_ax = sns.stripplot(x=distances)
                strip_ax.set_xlabel('distances_error')
                if title is not None:
                    plt.title(title)
            except ImportError:
                pass

        if save_to_file:
            import csv
            filename = os.path.join(self.ckpt_dir, 'error_stats.csv')
            with open(filename, 'a+') as f:
                csv_writer = csv.writer(f,
                                        delimiter=',',
                                        quoting=csv.QUOTE_ALL)
                data = [
                    percentage_thr, percentage_double_thr,
                    percentage_triple_thr
                ]
                csv_writer.writerow(data)