def arcosh(x): max_value = tf.reduce_max(x) if x.dtype == tf.float32: max_value = tf.cond(max_value < 1.0+1e-7, lambda: 1.0+1e-7, lambda: max_value) result = tf.acosh(tf.clip_by_value(x, clip_value_min=1.0+1e-7, clip_value_max=max_value)) #1e38 elif x.dtype == tf.float64: max_value = tf.cond(max_value < 1.0+1e-16, lambda: 1.0+1e-16, lambda: max_value) result = tf.acosh(tf.clip_by_value(x, clip_value_min=1.0+1e-16, clip_value_max=max_value)) else: raise ValueError('invalid dtype!') return result
def make_event(xarr): """Generate event kinematics""" shat, jac, x1, x2 = get_x1x2(xarr) mV = tf.sqrt(shat * x1 * x2) mV2 = mV*mV ecmo2 = mV/2 zeros = tf.zeros_like(ecmo2, dtype=DTYPE) p0 = tf.stack([ecmo2, zeros, zeros, ecmo2]) p1 = tf.stack([ecmo2, zeros, zeros,-ecmo2]) pV = p0 + p1 YV = 0.5 * tf.math.log(tf.abs((pV[0] + pV[3])/(pV[0] - pV[3]))) pVt2 = tf.square(pV[1]) + tf.square(pV[2]) phi = 2 * np.pi * xarr[:, 3] ptmax = 0.5 * mV2 / (tf.sqrt(mV2 + pVt2) - (pV[1]*tf.cos(phi) + pV[2]*tf.sin(phi))) pta = ptmax * xarr[:, 2] pt = tf.stack([zeros, pta*tf.cos(phi), pta*tf.sin(phi), zeros]) Delta = (mV2 + 2 * (pV[1]*pt[1] + pV[2]*pt[2]))/2.0/pta/tf.sqrt(mV2 + pVt2) y = YV - tf.acosh(Delta) kallenF = 2.0 * ptmax/tf.sqrt(mV2 + pVt2)/tf.abs(tf.sinh(YV-y)) p2 = tf.stack([pta*tf.cosh(y), pta*tf.cos(phi), pta*tf.sin(phi),pta*tf.sinh(y)]) p3 = pV - p2 psw = 1 / (8*np.pi)* kallenF # psw psw *= jac # jac for tau, y flux = 1 / (2 * mV2) # flux return psw, flux, p0, p1, p2, p3, x1, x2
def Poincare_dis(a, b): L2_a = tf.reduce_sum(tf.square(a), 1) L2_b = tf.reduce_sum(tf.square(b), 1) theta = 2 * tf.reduce_sum(tf.square(a - b), 1) / ((1 - L2_a) * (1 - L2_b)) distance = tf.reduce_mean(tf.acosh(1.0 + theta)) return distance
def sample_gauss_pd(shape, oversample=5, radius=0.85): # rejection sample in Poincaré disk d = shape[1] n_samples = tf.reduce_prod(shape) * oversample phi = tf.random_uniform([n_samples]) * np.pi p = tf.random_uniform([n_samples]) r = tf.acosh(1 + p * (tf.cosh(radius - 1e-5) - 1)) # support (-3sigma, 3sigma) unif_samples = tf.stack( [tf.sinh(r) * tf.cos(phi), tf.sinh(r) * tf.sin(phi)], axis=1) # zero mean, unit sigma in half plane mean = tf.constant(np.array([[0.0, 0.0]]), tf.float32) sigma = 1.0 p_samples = gauss_prob_pd(unif_samples, mean, sigma) # accept in proportion to highest max_value = tf.reduce_max(p_samples) accepted = tf.squeeze( tf.where(tf.random_uniform([n_samples]) < (p_samples / max_value)), 1) # select the samples - make sure it's enough u = tf.boolean_mask(unif_samples[:, 0], accepted) v = tf.boolean_mask(unif_samples[:, 1], accepted) # transform samples using cayley mapping z = (w-i)/(w+i) - project to unit disk disk_samples = tf.stack([u, v], axis=1) idx = tf.cast(tf.range(tf.reduce_prod(shape) / 2), tf.int32) return tf.reshape(tf.gather(disk_samples, idx), shape)
def poincare_dist(self, u, v): uu, uv, vv = tf.norm(u, axis=-1)**2, tf.norm( u - v, axis=-1)**2, tf.norm(v, axis=-1)**2 alpha, beta = tf.maximum(1. - uu, self.eps), tf.maximum(1. - vv, self.eps) gamma = tf.maximum(1. + 2. * uv / alpha / beta, 1 + self.eps) return tf.acosh(gamma)
def loss(y_true, y_pred, r=r, t=t): u_emb = y_pred[:, 0] samples_emb = y_pred[:, 1:] inner_uv = minkowski_dot(u_emb, samples_emb) inner_uv = -inner_uv - 1. + 1e-7 inner_uv = K.maximum(inner_uv, K.epsilon()) # clip to avoid nan d_uv = tf.acosh(1. + inner_uv) d_uv_sq = K.square(d_uv) r_sq = K.square(r) # r_sq = K.stop_gradient( K.mean(d_uv_sq) ) out_uv = (r_sq - d_uv_sq) / t pos_out_uv = out_uv[:, 0] neg_out_uv = out_uv[:, 1:] pos_p_uv = tf.nn.sigmoid(pos_out_uv) neg_p_uv = 1 - tf.nn.sigmoid(neg_out_uv) pos_p_uv = K.clip(pos_p_uv, min_value=1e-7, max_value=1 - 1e-7) neg_p_uv = K.clip(neg_p_uv, min_value=1e-7, max_value=1 - 1e-7) return -K.mean(K.log(pos_p_uv) + K.sum(K.log(neg_p_uv), axis=-1))
def dists(self, u, v): uu, uv, vv = tf.norm(u)**2, tf.matmul(u, tf.transpose(v)), tf.norm(v)**2 alpha, beta = tf.maximum(1 - uu, self.eps), tf.maximum(1 - vv, self.eps) gamma = tf.maximum(1 + 2 * (uu + vv - 2 * uv) / alpha / beta, 1 + self.eps) return tf.acosh(gamma)
def distance(self, u, v): sq_u_norm = tf.reduce_sum(u * u, axis=-1, keepdims=True) sq_v_norm = tf.reduce_sum(v * v, axis=-1, keepdims=True) sq_u_norm = tf.clip_by_value(sq_u_norm, clip_value_min=0.0, clip_value_max=self.max_norm) sq_v_norm = tf.clip_by_value(sq_v_norm, clip_value_min=0.0, clip_value_max=self.max_norm) sq_dist = tf.reduce_sum(tf.pow(u - v, 2), axis=-1, keepdims=True) distance = tf.acosh(1 + self.eps + (sq_dist / ((1 - sq_u_norm) * (1 - sq_v_norm)) * 2)) return distance
def _hyperbolic_distance(self, q, a, eps=1e-16): def _square_norm(x): return tf.square(tf.norm(x, keepdims=True, axis=1)) z = _square_norm(q - a) q1 = 1.0 - _square_norm(q) a1 = 1.0 - _square_norm(a) return tf.acosh(1.0 + 2.0 * z / (q1 * a1 + eps)) # to avoid zero division
def _sigmoids(x, value_at_1, sigmoid): """Returns 1 when `x` == 0, between 0 and 1 otherwise. Args: x: A scalar or numpy array. value_at_1: A float between 0 and 1 specifying the output when `x` == 1. sigmoid: String, choice of sigmoid type. Returns: A numpy array with values between 0.0 and 1.0. Raises: ValueError: If not 0 < `value_at_1` < 1, except for `linear`, `cosine` and `quadratic` sigmoids which allow `value_at_1` == 0. ValueError: If `sigmoid` is of an unknown type. """ if sigmoid in ('cosine', 'linear', 'quadratic'): if not 0 <= value_at_1 < 1: raise ValueError('`value_at_1` must be nonnegative and smaller than 1, ' 'got {}.'.format(value_at_1)) else: if not 0 < value_at_1 < 1: raise ValueError('`value_at_1` must be strictly between 0 and 1, ' 'got {}.'.format(value_at_1)) if sigmoid == 'gaussian': scale = tf.sqrt(-2 * tf.log(value_at_1)) return tf.exp(-0.5 * (x * scale) ** 2) elif sigmoid == 'hyperbolic': scale = tf.acosh(1 / value_at_1) return 1 / tf.cosh(x * scale) elif sigmoid == 'long_tail': scale = tf.sqrt(1 / value_at_1 - 1) return 1 / ((x * scale) ** 2 + 1) elif sigmoid == 'cosine': scale = tf.acos(2 * value_at_1 - 1) / np.pi scaled_x = x * scale return tf.where(abs(scaled_x) < 1, (1 + tf.cos(np.pi * scaled_x)) / 2, 0.0 * scaled_x) elif sigmoid == 'linear': scale = 1.0 - value_at_1 scaled_x = x * scale return tf.where(abs(scaled_x) < 1, 1 - scaled_x, 0.0 * scaled_x) elif sigmoid == 'quadratic': scale = tf.sqrt(1.0 - value_at_1) scaled_x = x * scale return tf.where(abs(scaled_x) < 1, 1 - scaled_x ** 2, 0.0 * scaled_x) elif sigmoid == 'tanh_squared': scale = tf.arctanh(tf.sqrt(1 - value_at_1)) return 1 - tf.tanh(x * scale) ** 2 else: raise ValueError('Unknown sigmoid type {!r}.'.format(sigmoid))
def logarithmic_map(p, x): assert len(p.shape) == len(x.shape) alpha = -minkowski_dot(p, x) alpha = K.maximum(alpha, 1 + K.epsilon()) return tf.acosh(alpha) * (x - alpha * p) / \ K.maximum(K.sqrt(K.maximum(alpha ** 2 - 1., 0.)), K.epsilon())
def tf_my_poincare_list_distance(mat_x, mat_y): # input: [nodes, features] norm_x_sq = tf.norm(mat_x, axis=1)**2 norm_y_sq = tf.norm(mat_y, axis=1)**2 dif_xy = tf.norm(mat_x - mat_y, axis=1)**2 demoninator = tf.multiply(1 - norm_x_sq, 1 - norm_y_sq) res = tf.clip_by_value(1 + 2 * dif_xy / demoninator, clip_value_min=1.000001, clip_value_max=100) return tf.acosh(res)
def tf_logarithm(base, other): """ Return the logarithm of `other` in the tangent space of `base`. """ mdp = tf_mink_dot_matrix(base, other) dist = tf.acosh(-mdp) proj = other + (mdp * base) norm = tf.sqrt(tf_mink_dot_matrix(proj, proj)) proj *= dist / norm return proj
def distance(self, u, v): sq_u_norm = tf.clip_by_value( tf.reduce_sum(u * u, axis=-1), clip_value_min=0, clip_value_max=self.max_norm, ) sq_v_norm = tf.clip_by_value( tf.reduce_sum(v * v, axis=-1), clip_value_min=0, clip_value_max=self.max_norm, ) sq_dist = tf.reduce_sum(tf.pow(u - v, 2), axis=-1) return tf.acosh(1 + (sq_dist / ((1 - sq_u_norm) * (1 - sq_v_norm))) * 2)
def tf_elementwise_hyperbolic_distance(x, y): """ creates a vector of euclidean distances D(i,j) = ||x[i,:] - y[j,:]|| :param x: first set of vectors of shape (ndata1, ndim) :param y: second set of vectors of shape (ndata2, ndim) :return: A numpy array of shape (ndata1, ndata2) of pairwise squared distances """ ynorm_sq = tf.reduce_sum(tf.square(y), axis=1) xnorm_sq = tf.reduce_sum(tf.square(x), axis=1) euclidean_dist_sq = tf.reduce_sum(tf.square(x - y), axis=1) denom = tf.multiply(1 - xnorm_sq, 1 - ynorm_sq) hyp_dist = tf.acosh(1 + 2 * tf.divide(euclidean_dist_sq, denom)) return hyp_dist
def elementwise_distance(examples, labels): """ creates a matrix of euclidean distances D(i,j) = ||x[i,:] - y[j,:]|| :param examples: first set of vectors of shape (ndata1, ndim) :param labels: second set of vectors of shape (ndata2, ndim) :return: A numpy array of shape (ndata1, ndata2) of pairwise squared distances """ xnorm_sq = tf.square(tf.norm(examples, axis=1)) ynorm_sq = tf.square(tf.norm(labels, axis=1)) euclidean_dist_sq = tf.square(tf.norm(examples - labels, axis=1)) denom = tf.multiply(1 - xnorm_sq, 1 - ynorm_sq) hyp_dist = tf.acosh(1 + 2 * tf.divide(euclidean_dist_sq, denom)) return hyp_dist
def loss(y_true, y_pred, sigma=sigma): source_node_embedding = y_pred[:, 0] target_nodes_embedding = y_pred[:, 1:] inner_uv = minkowski_dot(source_node_embedding, target_nodes_embedding) inner_uv = -inner_uv inner_uv = K.maximum(inner_uv, 1. + K.epsilon()) d_uv = tf.acosh(inner_uv) minus_d_uv_sq = -0.5 * K.square(d_uv / sigma) return K.mean( tf.nn.sparse_softmax_cross_entropy_with_logits( labels=y_true[:, 0, 0], logits=minus_d_uv_sq))
def tf_pairwise_hyperbolic_distance(x, y): """ creates a matrix of euclidean distances D(i,j) = ||x[i,:] - y[j,:]|| :param x: first set of vectors of shape (ndata1, ndim) :param y: second set of vectors of shape (ndata2, ndim) :return: A numpy array of shape (ndata1, ndata2) of pairwise squared distances """ xnorm_sq = tf.reduce_sum(tf.square(x), axis=1) ynorm_sq = tf.reduce_sum(tf.square(y), axis=1) # use the multiplied out version of the l2 norm to simplify broadcasting ||x-y||^2 = ||x||^2 + ||y||^2 - 2xy.T euclidean_dist = xnorm_sq[:, None] + ynorm_sq[None, :] - 2 * tf.matmul( x, y, transpose_b=True) denom = (1 - xnorm_sq[:, None]) * (1 - ynorm_sq[None, :]) hyp_dist = tf.acosh(1 + 2 * tf.divide(euclidean_dist, denom)) return hyp_dist
def pairwise_distance(examples, samples): """ creates a matrix of euclidean distances D(i,j) = ||x[i,:] - y[j,:]|| :param examples: first set of vectors of shape (ndata1, ndim) :param samples: second set of vectors of shape (ndata2, ndim) :return: A numpy array of shape (ndata1, ndata2) of pairwise squared distances """ xnorm_sq = tf.square(tf.norm(examples, axis=1)) ynorm_sq = tf.square(tf.norm(samples, axis=1)) # use the multiplied out version of the l2 norm to simplify broadcasting ||x-y||^2 = ||x||^2 + ||y||^2 - 2xy.T euclidean_dist_sq = xnorm_sq[:, None] + ynorm_sq[None, :] - 2 * tf.matmul( examples, samples, transpose_b=True) denom = (1 - xnorm_sq[:, None]) * (1 - ynorm_sq[None, :]) hyp_dist = tf.acosh(1 + 2 * tf.divide(euclidean_dist_sq, denom)) return hyp_dist
def tf_vec_distance(x, y): """ The hyperbolic distance in the Poincare ball with curvature = -1 :param x: A 1D tensor of shape (1, ndim) :param y: A tensor of shape (nsamples, ndim) :return: A tensor of shape (1, nsamples) """ if len(y.shape) > 1: norm_square = tf.square(tf.norm(x - y, axis=1)) denom2 = 1 - tf.square(tf.norm(y, axis=1)) else: norm_square = tf.square(tf.norm(x - y, axis=0)) denom2 = 1 - tf.square(tf.norm(y, axis=0)) denom1 = 1 - tf.square(tf.norm(x, axis=0)) arg = 1 + 2 * norm_square / (denom1 * denom2) return tf.acosh(arg)
def tf_distance(x, y): """ The distance between two vectors :param x: shape (1, ndims) :param y: shape (1,ndims) :return: a scalar hyperbolic distance """ norm_square = tf.square(tf.norm(x - y, axis=0)) print norm_square denom1 = 1 - tf.square(tf.norm(x, axis=0)) print denom1 denom2 = 1 - tf.square(tf.norm(y, axis=0)) print denom2 arg = 1 + 2 * norm_square / (denom1 * denom2) print arg return tf.acosh(arg)
def hyperbolic_distance(n1, n2, embeddings, eps): """ Returns: ------------ distance: hyperbolic distance between two nodes """ v1 = embeddings[n1, :] v2 = embeddings[n2, :] norm1 = tf.norm(v1) norm2 = tf.norm(v2) v1 = tf.cond(tf.greater_equal(norm1, 1), lambda: v1 / norm1 - eps, lambda: v1) v2 = tf.cond(tf.greater_equal(norm2, 1), lambda: v2 / norm2 - eps, lambda: v2) distance = tf.acosh(1 + 2 * tf.reduce_sum(tf.square(v1 - v2)) / ((1 - tf.reduce_sum(tf.square(v1))) * (1 - tf.reduce_sum(tf.square(v2))))) return distance
def hyperbolic_ball(x, y, eps=1e-8): """ Poincare Distance Function. """ z = x - y z = tf.norm(z, ord='euclidean', keep_dims=True, axis=1) z = tf.square(z) x_d = 1 - tf.square(tf.norm(x, ord='euclidean', keep_dims=True, axis=1)) y_d = 1 - tf.square(tf.norm(y, ord='euclidean', keep_dims=True, axis=1)) x_d = tf.maximum(x_d, eps) y_d = tf.maximum(y_d, eps) d = x_d * y_d z = z / d z = (2 * z) + 1 z = tf.maximum(z, 1 + eps) arcosh = tf.acosh(z) arcosh = tf.squeeze(arcosh, axis=1) return arcosh
def dist_pd(x, y): x_norm = tf.norm(x, 2, axis=1) y_norm = tf.norm(y, 2, axis=1) d_xy = tf.norm(x - y, 2, axis=1) return tf.acosh(1 + 2 * d_xy**2 / ((1 - x_norm**2) * (1 - y_norm**2)))
import tensorflow as tf sess = tf.InteractiveSession() t = tf.constant([1.8, 2.2]) target = tf.acosh(t).eval() print target
def arccosh(x): return tf.acosh(x)
def arccosh(self, x): return tf.acosh(x)
def _tf_arccosh(x): return tf.acosh(x)
def execute_acosh(self): return tf.acosh(self.a, name="acosh" + str(self.node_num)) '''
return 1 + 2 * np.dot(u - v, u - v) / ((1 - np.dot(u,u)) * (1 - np.dot(v,v))) def dot_product(x, y): return tf.reduce_sum(tf.multiply(x,y)) u = tf.placeholder(tf.float32) v = tf.placeholder(tf.float32) u_instance = np.random.rand(10)#np.array([1.0,2.0, 3643]) v_instance = np.random.rand(10)#np.array([3.0,4.0, 5.15]) print 'u:', u_instance print 'v:', v_instance dist_poincare = tf.acosh(1 + 2 * dot_product(u-v, u - v) / ((1 - dot_product(u,u))*(1 - dot_product(v,v)))) numerator = (1 + 2 * dot_product(u,v) + dot_product(v,v)) * u + (1 - dot_product(u,u)) * v denominator = 1 + 2 * dot_product(u,v) + dot_product(u,u) * dot_product(v,v) mobius_addition = tf.divide(numerator, denominator) gradient_poincare_distance_u, gradient_poincare_distance_v = tf.gradients(ys = dist_poincare, xs = [u,v]) with tf.Session() as sess: print 'Gradients:' print sess.run(gradient_poincare_distance_u, feed_dict = { u: u_instance, v: v_instance }) print sess.run(gradient_poincare_distance_v, feed_dict = {