def mn_i(weights, name=None): """Applies max-norm regularization to weights.""" try: # TF12 with ops.name_scope(scope, 'maxnorm_i_regularizer', [weights]) as name: my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') return standard_ops.mul(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 1)), name=scope) except: # TF11 with ops.op_scope([weights], name, 'maxnorm_i_regularizer') as scope: my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') return standard_ops.mul(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 1)), name=scope)
def l2(weights, name=None): """Applies l2 regularization to weights.""" with ops.op_scope([weights], name, 'l2_regularizer') as scope: my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') return standard_ops.mul(my_scale, nn.l2_loss(weights), name=scope)
def l2(weights): """Applies l2 regularization to weights.""" with ops.name_scope(scope, 'l2_regularizer', [weights]) as name: my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') return standard_ops.mul(my_scale, nn.l2_loss(weights), name=name)
def mn_i(weights, name=None): """Applies max-norm regularization to weights.""" with ops.op_scope([weights], name, 'maxnorm_o_regularizer') as scope: my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') return standard_ops.mul(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 1)), name=scope)
def mn_o(weights, name=None): """Applies max-norm regularization to weights.""" with ops.op_scope([weights], name, 'maxnorm_o_regularizer') as scope: my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') return standard_ops.mul(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 0)), name=scope)
def lo(weights, name=None): """Applies group column regularization to weights.""" with ops.op_scope([weights], name, 'lo_regularizer') as scope: my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') return standard_ops.mul( my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 0))), name=scope)
def li(weights, name=None): """Applies li regularization to weights.""" with ops.op_scope([weights], name, 'li_regularizer') as scope: my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') return standard_ops.mul( my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))), name=scope)
def l1(weights, name=None): """Applies L1 regularization to weights.""" with ops.name_scope(scope, 'l1_regularizer', [weights]) as name: my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') return standard_ops.mul( my_scale, standard_ops.reduce_sum(standard_ops.abs(weights)), name=name)
def li(weights, name=None): """Applies li regularization to weights.""" # with ops.op_scope([weights], name, 'li_regularizer') as scope: # tf.op_scope(values, name, default_name) is deprecated, use tf.name_scope(name, default_name, values) try: # TF12 with ops.name_scope(scope, 'li_regularizer', [weights]) as name: my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') return standard_ops.mul( my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))), name=scope) except: # TF11 with ops.op_scope([weights], name, 'li_regularizer') as scope: my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') return standard_ops.mul( my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))), name=scope)
def testCondContext(self): with self.test_session() as sess: x = tf.constant(2) y = tf.constant(5) control_flow_ops.cond(tf.less(x, y), lambda: tf.mul(x, 17), lambda: tf.add(y, 23)) for op in sess.graph.get_operations(): c = op._get_control_flow_context() if c: compare.ProtoEq( c.to_proto(), control_flow_ops.CondContext.from_proto( c.to_proto()).to_proto())
def testCondContext(self): with self.test_session() as sess: x = tf.constant(2) y = tf.constant(5) control_flow_ops.cond(tf.less(x, y), lambda: tf.mul(x, 17), lambda: tf.add(y, 23)) for op in sess.graph.get_operations(): c = op._get_control_flow_context() if c: compare.ProtoEq( c.to_proto(), control_flow_ops.CondContext.from_proto(c.to_proto()).to_proto())
def lo(weights, name=None): """Applies group column regularization to weights.""" with ops.op_scope([weights], name, 'lo_regularizer') as scope: my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') # return standard_ops.mul( # my_scale, # standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(weights**2, 0))), # name=scope) return standard_ops.mul( my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 0))), # standard_ops.reduce_mean(standard_ops.sqrt(standard_ops.reduce_mean(tf.square(weights), 0))), name=scope)
def li(weights, name=None): """Applies li regularization to weights.""" with ops.op_scope([weights], name, 'li_regularizer') as scope: my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') # return standard_ops.mul( # my_scale, # standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(weights**2, 1))), # name=scope) return standard_ops.mul( my_scale, standard_ops.reduce_sum( standard_ops.sqrt( standard_ops.reduce_sum(tf.square(weights), 1))), # standard_ops.reduce_mean(standard_ops.sqrt(standard_ops.reduce_mean(tf.square(weights), 1))), name=scope)
def call(self, inputs): rank = len(inputs.shape) if rank > 2: # Broadcasting is required for the inputs. outputs = standard_ops.tensordot(inputs, standard_ops.mul(self.kernel, self.mask), [[rank - 1], [0]]) # outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]]) # Reshape the output back to the original ndim of the input. if not context.executing_eagerly(): shape = inputs.shape.as_list() output_shape = shape[:-1] + [self.units] outputs.set_shape(output_shape) else: inputs = math_ops.cast(inputs, self._compute_dtype) outputs = gen_math_ops.mat_mul(inputs, gen_math_ops.mul(self.kernel, self.mask)) if self.use_bias: outputs = nn.bias_add(outputs, self.bias) if self.activation is not None: return self.activation(outputs) return outputs
def l1(weights, name=None): """Applies L1 regularization to weights.""" with ops.op_scope([weights], name, "l1_regularizer") as scope: my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name="scale") return standard_ops.mul(my_scale, standard_ops.reduce_sum(standard_ops.abs(weights)), name=scope)
def call(self, inputs): dtype = K.dtype(inputs) if dtype != 'int32' and dtype != 'int64': inputs = math_ops.cast(inputs, 'int32') out = embedding_ops.embedding_lookup(standard_ops.mul(self.weights[0], self.mask), inputs) return out