Esempio n. 1
0
 def __init__(self, in_sequence=False, axis=1, name=None, **kwargs):
     super().__init__(name=name)
     self.in_sequence = in_sequence
     if self.in_sequence:
         self.filter_index = -1
     self.eps = epsilon()
     self.axis = axis
Esempio n. 2
0
def unit_norm(model, axis=0):
    """
    Constrains the weights incident to each hidden unit to have unit norm.
    Args:
        axis (int):axis along which to calculate weight norms.
        model : the model contains  weights need to setting the constraints.

    """
    if isinstance(model, Layer):
        for name, param in model.named_parameters():
            if 'bias' not in name and param is not None and param.requires_grad == True:
                norm = param.data.norm(2, dim=axis, keepdim=True)
                param.data.copy_(param.data / (epsilon() + norm))
    elif is_tensor(model):
        if model is not None and model.requires_grad == True:
            norm = model.data.norm(2, dim=axis, keepdim=True)
            model.data.copy_(model.data / (epsilon() + norm))
 def apply_constraint(t: Tensor):
     w_data = None
     if isinstance(t, tf.Variable):
         w_data = t.value().detach()
     else:
         w_data = t.copy().detach()
     param_applied = w_data / (epsilon() + sqrt(
         reduce_sum(square(w_data), axis=axis, keepdims=True)))
     param_applied = param_applied.detach()
     return param_applied
 def apply_constraint(t: Tensor):
     w_data = None
     if isinstance(t, tf.Variable):
         w_data = t.value().detach()
     else:
         w_data = t.copy().detach()
     norms = sqrt(reduce_sum(square(w_data), axis=axis, keepdims=True))
     desired = clip(norms, 0, max_value)
     param_applied = w_data * (desired / (epsilon() + norms))
     param_applied = param_applied.detach()
     return param_applied
Esempio n. 5
0
def unit_norm(model, axis=0):
    """
    Constrains the weights incident to each hidden unit to have unit norm.
    Args:
        axis (int):axis along which to calculate weight norms.
        model : the model contains  weights need to setting the constraints.

    """
    ws = model.get_weights()
    for i in range(len(ws)):
        w = ws[i]
        w = w / (epsilon() +
                 sqrt(reduce_sum(square(w), axis=axis, keepdims=True)))
Esempio n. 6
0
def max_norm(model, max_value=3, axis=0):
    """
    MaxNorm weight constraint.
    Constrains the weights incident to each hidden unit to have a norm less than or equal to a desired value.
    Args:
        model : the model contains  weights need to setting the constraints.
        max_value (float):the maximum norm value for the incoming weights
        axis (int):axis along which to calculate weight norms.

    """
    for name, param in model.named_parameters():
        if 'bias' not in name and param is not None and param.requires_grad == True:
            norm = param.data.norm(2, dim=axis, keepdim=True)
            desired = torch.clamp(norm, 0, max_value)
            param.data.copy_(param.data * (desired / (epsilon() + norm)))
Esempio n. 7
0
def max_norm(model, max_value=3, axis=0):
    """
    MaxNorm weight constraint.
    Constrains the weights incident to each hidden unit to have a norm less than or equal to a desired value.
    Args:
        model : the model contains  weights need to setting the constraints.
        max_value (float):the maximum norm value for the incoming weights
        axis (int):axis along which to calculate weight norms.

    """
    ws = model.weights()
    for i in range(len(ws)):
        w = ws[i]
        norms = sqrt(reduce_sum(square(w), axis=axis, keepdims=True))
        desired = clip(norms, 0, max_value)
        w = w * (desired / (epsilon() + norms))
Esempio n. 8
0
def min_max_norm(model, min_value=0.0, max_value=1.0, rate=3.0, axis=0):
    """
    MinMaxNorm weight constraint.
    Constrains the weights incident to each hidden unit to have the norm between a lower bound and an upper bound.

    Args:
        model : the model contains  weights need to setting the constraints.
        min_value (float):the minimum norm for the incoming weights.
        max_value ()float:the maximum norm for the incoming weights.
        rate (float):rate for enforcing the constraint: weights will be rescaled to yield (1 - rate) * norm + rate * norm.clip(min_value, max_value). Effectively, this means that rate=1.0 stands for strict enforcement of the constraint, while rate<1.0 means that weights will be rescaled at each step to slowly move towards a value inside the desired interval.
        axis (int): axis along which to calculate weight norms
    """

    ws = model.get_weights()
    for i in range(len(ws)):
        w = ws[i]
        norms = sqrt(reduce_sum(square(w), axis=axis, keepdims=True))
        desired = (rate * clip(norms, min_value, max_value) +
                   (1 - rate) * norms)
        w = w * (desired / (epsilon() + norms))
Esempio n. 9
0
def min_max_norm(model, min_value=0, max_value=1, rate=2.0, axis=0):
    """
    MinMaxNorm weight constraint.
    Constrains the weights incident to each hidden unit to have the norm between a lower bound and an upper bound.

    Args:
        model : the model contains  weights need to setting the constraints.
        min_value (float):the minimum norm for the incoming weights.
        max_value ()float:the maximum norm for the incoming weights.
        rate (float):rate for enforcing the constraint: weights will be rescaled to yield (1 - rate) * norm + rate * norm.clip(min_value, max_value). Effectively, this means that rate=1.0 stands for strict enforcement of the constraint, while rate<1.0 means that weights will be rescaled at each step to slowly move towards a value inside the desired interval.
        axis (int): axis along which to calculate weight norms


    """
    for name, param in model.named_parameters():
        if 'bias' not in name and param is not None and param.requires_grad == True:
            norm = param.data.norm(2, dim=axis, keepdim=True)
            desired = rate * clip(norm, min_value,
                                  max_value) + (1 - rate) * norm
            param.data.copy_(param.data * (desired / (epsilon() + norm)))
Esempio n. 10
0
 def __init__(self, in_sequence=False, axis=1, name=None, **kwargs):
     super().__init__(in_sequence=in_sequence, name=name)
     self.eps = epsilon()
     self.axis = axis