예제 #1
0
파일: cost.py 프로젝트: yuan39/tensorlayer
 def mn_i(weights, name=None):
   """Applies max-norm regularization to weights."""
   try: # TF12
       with ops.name_scope(scope, 'maxnorm_i_regularizer', [weights]) as name:
         my_scale = ops.convert_to_tensor(scale,
                                          dtype=weights.dtype.base_dtype,
                                                  name='scale')
         return standard_ops.mul(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 1)), name=scope)
   except: # TF11
       with ops.op_scope([weights], name, 'maxnorm_i_regularizer') as scope:
         my_scale = ops.convert_to_tensor(scale,
                                          dtype=weights.dtype.base_dtype,
                                                  name='scale')
         return standard_ops.mul(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 1)), name=scope)
예제 #2
0
파일: cost.py 프로젝트: shorxp/tensorlayer
 def mn_i(weights, name=None):
   """Applies max-norm regularization to weights."""
   with ops.op_scope([weights], name, 'maxnorm_o_regularizer') as scope:
     my_scale = ops.convert_to_tensor(scale,
                                      dtype=weights.dtype.base_dtype,
                                              name='scale')
     return standard_ops.mul(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 1)), name=scope)
예제 #3
0
 def mn_o(weights, name=None):
   """Applies max-norm regularization to weights."""
   with ops.op_scope([weights], name, 'maxnorm_o_regularizer') as scope:
     my_scale = ops.convert_to_tensor(scale,
                                      dtype=weights.dtype.base_dtype,
                                              name='scale')
     return standard_ops.mul(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 0)), name=scope)
예제 #4
0
 def mn(weights, name='max_regularizer'):
     """Applies max-norm regularization to weights."""
     with tf.name_scope(name) as scope:
         my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
         #   if tf.__version__ <= '0.12':
         #       standard_ops_fn = standard_ops.mul
         #   else:
         standard_ops_fn = standard_ops.multiply
         return standard_ops_fn(my_scale, standard_ops.reduce_max(standard_ops.abs(weights)), name=scope)
예제 #5
0
 def mn_i(weights, name='maxnorm_i_regularizer'):
     """Applies max-norm regularization to weights."""
     with tf.name_scope(name) as scope:
         my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
         if tf.__version__ <= '0.12':
             standard_ops_fn = standard_ops.mul
         else:
             standard_ops_fn = standard_ops.multiply
         return standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 1)), name=scope)
예제 #6
0
def _project_log_stochastic_matrix_wrt_kl_divergence(log_matrix):
  """Projects its argument onto the set of log-left-stochastic matrices.

  Args:
    log_matrix: 2d square tensor, the element-wise logarithm of the matrix to
      project.

  Returns:
    The 2d square tensor that results from projecting exp(`matrix`) onto the set
      of left-stochastic matrices w.r.t. the KL-divergence applied column-wise.
  """

  # For numerical reasons, make sure that the largest matrix element is zero
  # before exponentiating.
  log_matrix -= standard_ops.reduce_max(log_matrix, axis=0, keepdims=True)
  log_matrix -= standard_ops.log(
      standard_ops.reduce_sum(
          standard_ops.exp(log_matrix), axis=0, keepdims=True))
  return log_matrix
예제 #7
0
def _project_log_stochastic_matrix_wrt_kl_divergence(log_matrix):
  """Projects its argument onto the set of log-left-stochastic matrices.

  Args:
    log_matrix: 2d square tensor, the element-wise logarithm of the matrix to
      project.

  Returns:
    The 2d square tensor that results from projecting exp(`matrix`) onto the set
      of left-stochastic matrices w.r.t. the KL-divergence applied column-wise.
  """

  # For numerical reasons, make sure that the largest matrix element is zero
  # before exponentiating.
  log_matrix -= standard_ops.reduce_max(log_matrix, axis=0, keep_dims=True)
  log_matrix -= standard_ops.log(
      standard_ops.reduce_sum(
          standard_ops.exp(log_matrix), axis=0, keep_dims=True))
  return log_matrix