def to_simplex(x): """Transform real vector of length ``(K-1)`` to a simplex of dimension ``K`` using a backward stick breaking construction. Parameters ---------- x : tf.Tensor A 1-D or 2-D tensor. Returns ------- tf.Tensor A tensor of same shape as input but with last dimension of size ``K``. Raises ------ InvalidArgumentError If the input has Inf or NaN values. Notes ----- x as a 3-D or higher tensor is not guaranteed to be supported. """ x = tf.cast(x, dtype=tf.float32) dependencies = [tf.verify_tensor_all_finite(x, msg='')] x = control_flow_ops.with_dependencies(dependencies, x) if isinstance(x, tf.Tensor) or isinstance(x, tf.Variable): shape = get_dims(x) else: shape = x.shape if len(shape) == 1: n_rows = () K_minus_one = shape[0] eq = -tf.log( tf.cast(K_minus_one - tf.range(K_minus_one), dtype=tf.float32)) z = tf.sigmoid(eq + x) pil = tf.concat([z, tf.constant([1.0])], 0) piu = tf.concat([tf.constant([1.0]), 1.0 - z], 0) S = tf.cumprod(piu) return S * pil else: n_rows = shape[0] K_minus_one = shape[1] eq = -tf.log( tf.cast(K_minus_one - tf.range(K_minus_one), dtype=tf.float32)) z = tf.sigmoid(eq + x) pil = tf.concat([z, tf.ones([n_rows, 1])], 1) piu = tf.concat([tf.ones([n_rows, 1]), 1.0 - z], 1) S = tf.cumprod(piu, axis=1) return S * pil
def tile(input, multiples, *args, **kwargs): """Constructs a tensor by tiling a given tensor. This extends ``tf.tile`` to features available in ``np.tile``. Namely, ``inputs`` and ``multiples`` can be a 0-D tensor. Further, if 1-D, ``multiples`` can be of any length according to broadcasting rules (see documentation of ``np.tile`` or examples below). Parameters ---------- input : tf.Tensor The input tensor. multiples : tf.Tensor The number of repetitions of ``input`` along each axis. Has type ``tf.int32``. 0-D or 1-D. *args : Passed into ``tf.tile``. **kwargs : Passed into ``tf.tile``. Returns ------- tf.Tensor Has the same type as ``input``. Examples -------- >>> a = tf.constant([0, 1, 2]) >>> sess.run(ed.tile(a, 2)) array([0, 1, 2, 0, 1, 2], dtype=int32) >>> sess.run(ed.tile(a, (2, 2))) array([[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]], dtype=int32) >>> sess.run(ed.tile(a, (2, 1, 2))) array([[[0, 1, 2, 0, 1, 2]], [[0, 1, 2, 0, 1, 2]]], dtype=int32) >>> >>> b = tf.constant([[1, 2], [3, 4]]) >>> sess.run(ed.tile(b, 2)) array([[1, 2, 1, 2], [3, 4, 3, 4]], dtype=int32) >>> sess.run(ed.tile(b, (2, 1))) array([[1, 2], [3, 4], [1, 2], [3, 4]], dtype=int32) >>> >>> c = tf.constant([1, 2, 3, 4]) >>> sess.run(ed.tile(c, (4, 1))) array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], dtype=int32) Notes ----- Sometimes this can result in an unknown shape. The core reason for this is the following behavior: >>> n = tf.constant([1]) >>> tf.tile(tf.constant([[1.0]]), ... tf.concat(0, [n, tf.constant([1.0]).get_shape()])) <tf.Tensor 'Tile:0' shape=(1, 1) dtype=float32> >>> n = tf.reshape(tf.constant(1), [1]) >>> tf.tile(tf.constant([[1.0]]), ... tf.concat(0, [n, tf.constant([1.0]).get_shape()])) <tf.Tensor 'Tile_1:0' shape=(?, 1) dtype=float32> For this reason, we try to fetch ``multiples`` out of session if possible. This can be slow if ``multiples`` has computationally intensive dependencies in order to perform this fetch. """ input = tf.convert_to_tensor(input) multiples = tf.convert_to_tensor(multiples) # 0-d tensor if len(input.get_shape()) == 0: input = tf.expand_dims(input, 0) # 0-d tensor if len(multiples.get_shape()) == 0: multiples = tf.expand_dims(multiples, 0) try: get_session() multiples = tf.convert_to_tensor(multiples.eval()) except: pass # broadcasting diff = len(input.get_shape()) - get_dims(multiples)[0] if diff < 0: input = tf.reshape(input, [1] * np.abs(diff) + get_dims(input)) elif diff > 0: multiples = tf.concat(0, [tf.ones(diff, dtype=tf.int32), multiples]) return tf.tile(input, multiples, *args, **kwargs)