def __init__(self, delta_v=0.5, delta_d=1.5, max_embedding_dim=10, norm=1, alpha=1.0, beta=1.0, gamma=0.001): self.delta_v = delta_v self.delta_d = delta_d self.alpha = alpha self.beta = beta self.gamma = gamma self.max_embedding_dim = max_embedding_dim if self.max_embedding_dim <= 0: raise ValueError("Max number of embeddings has to be positive!") # L1 or L2 norm is allowed only if norm == 1: self.norm = lambda x, axis=None: c_sum(absolute(x), axis=axis) elif norm == 2: self.norm = lambda x, axis=None: sqrt(c_sum(x**2, axis=axis)) else: raise ValueError("For discriminative loss, " "norm can only be 1 or 2. " "Obtained the value : {}".format(norm))
def _normalize(self, x): size = x.shape[1] mean = broadcast.broadcast_to( (sum.sum(x, axis=1) / size)[:, None], x.shape) std = broadcast.broadcast_to(sqrt.sqrt( sum.sum(square.square(x - mean), axis=1) / size)[:, None], x.shape) + self.eps return (x - mean) / std
def __init__(self, delta_v=0.5, delta_d=1.5, max_embedding_dim=10, norm=1, alpha=1.0, beta=1.0, gamma=0.001): self.delta_v = delta_v self.delta_d = delta_d self.alpha = alpha self.beta = beta self.gamma = gamma self.max_embedding_dim = max_embedding_dim if self.max_embedding_dim <= 0: raise ValueError("Max number of embeddings has to be positive!") # L1 or L2 norm is allowed only if norm == 1: self.norm = lambda x, axis=None: c_sum(absolute(x), axis=axis) elif norm == 2: self.norm = lambda x, axis=None: sqrt(c_sum(x ** 2, axis=axis)) else: raise ValueError("For discriminative loss, " "norm can only be 1 or 2. " "Obtained the value : {}".format(norm))
def stddev(self): return sqrt.sqrt(self.variance)