예제 #1
0
 def _predicted_mean_op(self, activations):
   activation, activation_size = activations[-1]
   predicted_mean = model_utils.fully_connected(
       activation,
       activation_size,
       self.output_window_size * self.num_features,
       name="predicted_mean",
       activation=None)
   return array_ops.reshape(predicted_mean,
                            [-1, self.output_window_size, self.num_features])
예제 #2
0
 def _predicted_mean_op(self, activations):
   activation, activation_size = activations[-1]
   predicted_mean = model_utils.fully_connected(
       activation,
       activation_size,
       self.output_window_size * self.num_features,
       name="predicted_mean",
       activation=None)
   return array_ops.reshape(predicted_mean,
                            [-1, self.output_window_size, self.num_features])
예제 #3
0
 def _create_hidden_stack(self, activation, activation_size):
   activations = []
   for layer_number, layer_size in enumerate(self.hidden_layer_sizes):
     # TODO(agarwal): Migrate to fully_connected in tf slim
     activation = model_utils.fully_connected(
         activation, activation_size, layer_size,
         name="layer_{}".format(layer_number))
     activation_size = layer_size
     activations.append((activation, activation_size))
   return activations
예제 #4
0
 def _create_hidden_stack(self, activation, activation_size):
     activations = []
     for layer_number, layer_size in enumerate(self.hidden_layer_sizes):
         # TODO(agarwal): Migrate to fully_connected in tf slim
         activation = model_utils.fully_connected(
             activation,
             activation_size,
             layer_size,
             name="layer_{}".format(layer_number))
         activation_size = layer_size
         activations.append((activation, activation_size))
     return activations
예제 #5
0
 def _predicted_covariance_op(self, activations, num_values):
   activation, activation_size = activations[-1]
   if self.loss == ARModel.NORMAL_LIKELIHOOD_LOSS:
     log_sigma_square = model_utils.fully_connected(
         activation,
         activation_size,
         self.output_window_size * num_values,
         name="log_sigma_square",
         activation=None)
     predicted_covariance = gen_math_ops.exp(log_sigma_square)
     predicted_covariance = array_ops.reshape(
         predicted_covariance, [-1, self.output_window_size, num_values])
   else:
     shape = array_ops.stack([
         array_ops.shape(activation)[0],
         constant_op.constant(self.output_window_size),
         constant_op.constant(num_values)
     ])
     predicted_covariance = array_ops.ones(shape=shape, dtype=activation.dtype)
   return predicted_covariance
예제 #6
0
 def _predicted_covariance_op(self, activations, num_values):
   activation, activation_size = activations[-1]
   if self.loss == ARModel.NORMAL_LIKELIHOOD_LOSS:
     log_sigma_square = model_utils.fully_connected(
         activation,
         activation_size,
         self.output_window_size * num_values,
         name="log_sigma_square",
         activation=None)
     predicted_covariance = gen_math_ops.exp(log_sigma_square)
     predicted_covariance = array_ops.reshape(
         predicted_covariance, [-1, self.output_window_size, num_values])
   else:
     shape = array_ops.stack([
         array_ops.shape(activation)[0],
         constant_op.constant(self.output_window_size),
         constant_op.constant(num_values)
     ])
     predicted_covariance = array_ops.ones(shape=shape, dtype=activation.dtype)
   return predicted_covariance