Example #1
0
    def testEmptyRnnInput(self):
        observation_values = tf.SparseTensor(
            indices=tf.reshape(tf.constant([], dtype=tf.int64), shape=[0, 3]),
            values=tf.constant([], dtype=tf.float32),
            dense_shape=[2, 0, 1])
        observation_code_ids = tf.SparseTensor(
            indices=observation_values.indices,
            values=tf.constant([], dtype=tf.string),
            dense_shape=observation_values.dense_shape)
        delta_time, obs_values, indicator = osm.construct_input(
            {
                'Observation.code':
                observation_code_ids,
                'Observation.valueQuantity.value':
                observation_values,
                'deltaTime':
                tf.reshape(tf.constant([[], []], dtype=tf.int64), [2, 0, 1])
            }, ['loinc:1', 'loinc:2', 'MISSING'],
            'Observation.code',
            'Observation.valueQuantity.value',
            mode=tf.estimator.ModeKeys.TRAIN,
            normalize=False,
            momentum=0.9,
            min_value=-10000000,
            max_value=10000000,
            input_keep_prob=1.0)

        result = tf.concat([delta_time, indicator, obs_values], axis=2)

        with self.test_session() as sess:
            sess.run(tf.tables_initializer())
            actual_result, = sess.run([tf.shape(result)])
            self.assertAllClose([2, 0, 7], actual_result)
 def test_intersect_indices_sparse(self):
     obs_code = tf.SparseTensor(indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0],
                                         [1, 1, 0], [1, 2, 0], [1, 3, 0],
                                         [2, 0, 0], [2, 2, 0]],
                                values=[
                                    'loinc:1', 'loinc:1', 'loinc:2',
                                    'loinc:4', 'loinc:4', 'loinc:2',
                                    'loinc:1', 'loinc:4'
                                ],
                                dense_shape=[3, 4, 1])
     obs_harm_code = tf.SparseTensor(indices=[[0, 0, 0], [0, 1,
                                                          0], [1, 1, 0],
                                              [2, 1, 0], [2, 2, 0]],
                                     values=[
                                         'pulse', 'pulse', 'blood_pressure',
                                         'temperature', 'temperature'
                                     ],
                                     dense_shape=[3, 2, 3])
     indices = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [2, 1, 0]]
     values = [b'loinc:1', b'loinc:1', b'loinc:4', b'loinc:4']
     dense_shape = [3, 2, 1]
     new_obs_code = input_fn._intersect_indices(obs_code, obs_harm_code)
     with self.test_session() as sess:
         acutal_obs_code = sess.run(new_obs_code)
         self.assertAllEqual(values, acutal_obs_code.values)
         self.assertAllEqual(indices, acutal_obs_code.indices)
         self.assertAllEqual(dense_shape, acutal_obs_code.dense_shape)
Example #3
0
    def graph_attention_layer(self, A, M, v, layer):

        with tf.variable_scope("layer_%s" % layer):
            # drop
            # M = tf.nn.dropout(M, 1 - self.dropout)
            f1 = tf.matmul(M, v[0])  # (?,1)
            f1 = A * f1  # (?,) element-wise product
            f2 = tf.matmul(M, v[1])  # (?,1)
            f2 = A * tf.transpose(
                f2, [1, 0])  # (?,?) transpose: for the coefficient of h_j
            logits = tf.sparse_add(f1, f2)  # (?,)

            unnormalized_attentions = tf.SparseTensor(
                indices=logits.indices,
                values=tf.nn.sigmoid(logits.values),
                dense_shape=logits.dense_shape)
            attentions = tf.sparse_softmax(unnormalized_attentions)  # (?,)

            attentions = tf.SparseTensor(
                indices=attentions.indices,
                values=attentions.values,
                dense_shape=attentions.dense_shape)  # (?,)

            # attention dropout, each node is exposed to a stochastically sampled neighborhood
            if FLAGS.gat_dropout > 0:
                attentions = tf.SparseTensor(
                    indices=attentions.indices,
                    values=tf.nn.dropout(
                        attentions.values,
                        1.0 - self.placeholders['gat_dropout']),
                    dense_shape=attentions.dense_shape)  # (?,)

            return attentions
Example #4
0
 def _make_adjacency(self, sources, dests, num_nodes=None, tensor=True):
     if num_nodes is None:
         num_nodes = len(self.node_encode.classes_)
     if tensor:
         try:
             adj = tf.SparseTensor(
                 [[sources.values[i, 0], dests.values[i, 0]]
                  for i in range(sources.values.shape[0])],
                 [1.0 for i in range(sources.values.shape[0])],
                 dense_shape=(num_nodes, num_nodes))
         except:
             adj = tf.SparseTensor([[sources[i], dests[i]]
                                    for i in range(sources.shape[0])],
                                   [1.0 for i in range(sources.shape[0])],
                                   dense_shape=(num_nodes, num_nodes))
     else:
         try:
             adj = csr_matrix(
                 ([1.0 for i in range(sources.values.shape[0])], ([
                     sources.values[i, 0]
                     for i in range(sources.values.shape[0])
                 ], [
                     dests.values[i, 0]
                     for i in range(sources.values.shape[0])
                 ])),
                 shape=(num_nodes, num_nodes))
         except:
             adj = csr_matrix(
                 ([1.0 for i in range(sources.shape[0])],
                  ([sources[i] for i in range(sources.shape[0])
                    ], [dests[i] for i in range(sources.shape[0])])),
                 shape=(num_nodes, num_nodes))
     return adj
Example #5
0
def _extend_with_dummy(extend_with, to_extend, dummy_value='n/a'):
  """Extends one SparseTensor with dummy_values at positions of other."""
  dense_shape = tf.to_int64(
      tf.concat([[tf.shape(extend_with)[0]],
                 [tf.maximum(tf.shape(extend_with)[1],
                             tf.shape(to_extend)[1])],
                 [tf.maximum(tf.shape(extend_with)[2],
                             tf.shape(to_extend)[2])]],
                axis=0))
  additional_indices = tf.sets.set_difference(
      tf.SparseTensor(
          indices=extend_with.indices,
          values=tf.zeros_like(extend_with.values, dtype=tf.int32),
          dense_shape=dense_shape),
      tf.SparseTensor(
          indices=to_extend.indices,
          values=tf.zeros([tf.shape(to_extend.indices)[0]], dtype=tf.int32),
          dense_shape=dense_shape)).indices
  # Supply defaults for all other indices.
  default = tf.tile(
      tf.constant([dummy_value]), multiples=[tf.shape(additional_indices)[0]])

  string_value = (
      tf.as_string(to_extend.values)
      if to_extend.values.dtype != tf.string else to_extend.values)
  return tf.sparse_reorder(
      tf.SparseTensor(
          indices=tf.concat([to_extend.indices, additional_indices], axis=0),
          values=tf.concat([string_value, default], axis=0),
          dense_shape=dense_shape))
    def _build_inputs(self):
        self.pos_indices = tf.placeholder(tf.int64,
                                          shape=[None, 2],
                                          name='pos_indices')
        self.pos_values = tf.placeholder(tf.float32,
                                         shape=[None],
                                         name='pos_values')
        self.pos_shape = tf.placeholder(tf.int64, shape=[2], name='pos_shape')

        self.neg_indices = tf.placeholder(tf.int64,
                                          shape=[None, 2],
                                          name='neg_indices')
        self.neg_values = tf.placeholder(tf.float32,
                                         shape=[None],
                                         name='neg_values')
        self.neg_shape = tf.placeholder(tf.int64, shape=[2], name='neg_shape')

        self.mess_dropout = tf.placeholder(tf.float32,
                                           shape=[None],
                                           name='mess_dropout')

        # Input positive features, shape=(batch_size * feature_dim)
        self.sp_pos_feats = tf.SparseTensor(self.pos_indices, self.pos_values,
                                            self.pos_shape)
        # Input negative features, shape=(batch_size * feature_dim)
        self.sp_neg_feats = tf.SparseTensor(self.neg_indices, self.neg_values,
                                            self.neg_shape)
Example #7
0
    def _create_implicit_feedback(self, implicit_feedback, dual=False):
        """Returns the (tuple of) sparse tensor(s) of implicit feedback.
        """
        with tf.variable_scope('implicit_feedback'):
            if not dual:
                N = tf.SparseTensor(**implicit_feedback)

                return N
            else:
                N = tf.SparseTensor(**implicit_feedback[0])
                H = tf.SparseTensor(**implicit_feedback[1])

                return N, H
Example #8
0
    def testRnnInput(self):
        observation_values = tf.SparseTensor(
            indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [1, 0, 0], [1, 1, 0],
                     [1, 2, 0]],
            values=[100.0, 2.3, 9999999.0, 0.5, 0.0, 4.0],
            dense_shape=[2, 3, 1])
        observation_code_ids = tf.SparseTensor(
            indices=observation_values.indices,
            values=[
                'loinc:2', 'loinc:1', 'loinc:2', 'loinc:1', 'MISSING',
                'loinc:1'
            ],
            dense_shape=observation_values.dense_shape)
        delta_time, obs_values, indicator = osm.construct_input(
            {
                'Observation.code':
                observation_code_ids,
                'Observation.valueQuantity.value':
                observation_values,
                'deltaTime':
                tf.constant([[[2 * 60 * 60], [3 * 60 * 60], [0]],
                             [[1 * 60 * 60], [3 * 60 * 60], [6 * 60 * 60]]],
                            dtype=tf.int64)
            }, ['loinc:1', 'loinc:2', 'MISSING'],
            'Observation.code',
            'Observation.valueQuantity.value',
            mode=tf.estimator.ModeKeys.TRAIN,
            normalize=False,
            momentum=0.9,
            min_value=-10000000,
            max_value=10000000,
            input_keep_prob=1.0)

        result = tf.concat([delta_time, indicator, obs_values], axis=2)

        expected_result = [
            [
                [0, 0, 1, 0, 0, 100, 0],
                [-1, 1, 0, 0, 2.3, 0, 0],
                # value 9999999.0 was filtered.
                [3, 0, 0, 0, 0, 0, 0]
            ],
            [[0, 1, 0, 0, 0.5, 0, 0], [-2, 0, 0, 1, 0, 0, 0],
             [-3, 1, 0, 0, 4.0, 0, 0]]
        ]

        with self.test_session() as sess:
            sess.run(tf.tables_initializer())
            actual_result = sess.run(result)
            print(actual_result)
            self.assertAllClose(expected_result, actual_result, atol=0.01)
Example #9
0
def _make_sparse_tensor_dict():
    rel_name1 = 'real_stuff'
    # Note, these matrices are transposed.
    sparse_tensor1 = tf.SparseTensor(indices=[[0, 0], [99, 1]],
                                     values=[1., 2.],
                                     dense_shape=[100, 2])
    rel_name2 = 'other_stuff'
    sparse_tensor2 = tf.SparseTensor(indices=[[100, 0]],
                                     values=[3.],
                                     dense_shape=[1000, 2])
    return {
        rel_name1: tf.Session().run(sparse_tensor1),
        rel_name2: tf.Session().run(sparse_tensor2)
    }
Example #10
0
    def get_tf_tensor(self, rel_name, shard_id=-1):
        """Get the Tensor that represents a relation.

    Args:
      rel_name: string naming a declared relation
      shard_id: the i'th shard of the matrix. -1 if the matrix is not sharded.

    Returns:
      tf.SparseTensor

    Raises:
      RuntimeError: If the expression has no initial value.
    """
        if shard_id < 0:
            return super(DistributedNeuralQueryContext,
                         self).get_tf_tensor(rel_name)

        sharded_rel_name = self._sharded_rel_name(rel_name, shard_id)
        # construct tensor if it's not been created before, and cache it.
        if sharded_rel_name not in self._cached_tensor:
            if sharded_rel_name not in self._np_initval:
                raise RuntimeError(
                    'KG relation named %r has no initial value.' %
                    sharded_rel_name)
            if self.is_dense(rel_name):
                raise TypeError(
                    'DistributedNeuralQueryContext does not support dense relation %d'
                    % rel_name)

            m = self._np_initval[sharded_rel_name]
            n_rows, n_cols = m.shape
            data_m = np.transpose(np.vstack([m.row, m.col]))
            if self.is_trainable(
                    rel_name):  # construct tf variable if trainable
                data_var_name = 'nql/%s_values' % rel_name
                data_var = tf.Variable(m.data,
                                       trainable=True,
                                       name=data_var_name)
                sparse_tensor = tf.SparseTensor(data_m, data_var,
                                                [n_rows, n_cols])
                assert self._declaration[rel_name].underlying_parameter is None
                self._declaration[rel_name].underlying_parameters[
                    shard_id] = data_var
            else:  # if not trainable, construct a constant tensor
                sparse_tensor = tf.SparseTensor(data_m, m.data,
                                                [n_rows, n_cols])

            self._cached_tensor[sharded_rel_name] = sparse_tensor
        return self._cached_tensor[sharded_rel_name]
Example #11
0
 def compute_sparse_matrix(self, stencils, batch_size, grid_size):
     with tf.device(self.device):
         indexes, values_indices = self.get_indices_compute_A((batch_size, grid_size))
         tau = tf.SparseTensor(indices=indexes,
                               values=tf.gather_nd(params=stencils, indices=values_indices),
                               dense_shape=(batch_size, grid_size ** 2, grid_size ** 2))
         return tau
Example #12
0
def dense_to_sparse(dense_tensor, out_type, ignore_value=-1):
  indices = tf.where(
      tf.not_equal(dense_tensor,
                   tf.constant(ignore_value, dense_tensor.dtype)))
  values = tf.gather_nd(dense_tensor, indices)
  shape = tf.shape(dense_tensor, out_type=out_type)
  return tf.SparseTensor(indices, values, shape)
Example #13
0
 def chebyshev5(self, x, L, Fout, K):
     N, M, Fin = x.get_shape()
     N, M, Fin = int(N), int(M), int(Fin)
     # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
     L = scipy.sparse.csr_matrix(L)
     L = graph.rescale_L(L, lmax=2)
     L = L.tocoo()
     indices = np.column_stack((L.row, L.col))
     L = tf.SparseTensor(indices, L.data, L.shape)
     L = tf.sparse_reorder(L)
     # Transform to Chebyshev basis
     x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
     x0 = tf.reshape(x0, [M, Fin*N])  # M x Fin*N
     x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N
     print("Test")
     def concat(x, x_):
         x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
         return tf.concat([x, x_], axis=0)  # K x M x Fin*N
     if K > 1:
         x1 = tf.sparse_tensor_dense_matmul(L, x0)
         x = concat(x, x1)
         print(" K = 1")
     for k in range(2, K):
         x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
         x = concat(x, x2)
         x0, x1 = x1, x2
     x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
     x = tf.transpose(x, perm=[3,1,2,0])  # N x M x Fin x K
     x = tf.reshape(x, [N*M, Fin*K])  # N*M x Fin*K
     # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
     W = self._weight_variable([Fin*K, Fout], regularization=False)
     x = tf.matmul(x, W)  # N*M x Fout
     return tf.reshape(x, [N, M, Fout])  # N x M x Fout
    def rotation_matrix(self, plane):
        i = plane.first_axis
        j = plane.second_axis
        theta = plane.theta

        cos_theta = theta
        sin_thesta = tf.stop_gradient(tf.sin(tf.acos(theta)))
        # rotation = np.eye(self.dim).tolist()
        # rotation[i][i] = cos_theta
        # rotation[i][j] = -sin_thesta
        # rotation[j][i] = sin_thesta
        # rotation[j][j] = cos_theta
        # rotation = tf.stack(rotation)

        idx = []
        values = []
        for k in range(self.dim):
            idx.append([k, k])
            if k == i or k == j:
                values.append(cos_theta)
            else:
                values.append(1)
        idx.append([i, j])
        values.append(-sin_thesta)
        idx.append([j, i])
        values.append(sin_thesta)

        rotation = tf.SparseTensor(idx,
                                   values=tf.stack(values),
                                   dense_shape=[self.dim, self.dim])

        return rotation
    def chebyshev(self, x, L, Fout, K , normalized=False, algo='LB'):
        '''normalized or not,  algo='LB' or 'gL' (graph Laplacian)
        will affact the value of "lmax" (maximum eigenvalue)'''
        N, M, Fin = x.get_shape()
        N, M, Fin = int(N), int(M), int(Fin)
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
        L = scipy.sparse.csr_matrix(L)  
        lmax=graph.lmax(L, normalized, algo)   # 202000912
#        L = graph.rescale_L(L, lmax=2)            
        L = graph.rescale_L(L, lmax)        # 202000912
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)
        # Transform to Chebyshev basis
        x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x0 = tf.reshape(x0, [M, Fin*N])  # M x Fin*N
        x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N
        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
            return tf.concat([x, x_], axis=0)  # K x M x Fin*N
        if K > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)
        for k in range(1, K-1):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
        x = tf.transpose(x, perm=[3,1,2,0])  # N x M x Fin x K
        x = tf.reshape(x, [N*M, Fin*K])  # N*M x Fin*K
        # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
        W = self._weight_variable([Fin*K, Fout], regularization=False)
        x = tf.matmul(x, W)  # N*M x Fout
        return tf.reshape(x, [N, M, Fout])  # N x M x Fout
Example #16
0
def _slice_with_actions(embeddings, actions):
    """Slice a Tensor.

  Take embeddings of the form [batch_size, num_actions, embed_dim]
  and actions of the form [batch_size, 1], and return the sliced embeddings
  like embeddings[:, actions, :].

  Args:
    embeddings: Tensor of embeddings to index.
    actions: int Tensor to use as index into embeddings

  Returns:
    Tensor of embeddings indexed by actions
  """
    batch_size, num_actions = embeddings.get_shape()[:2]

    # Values are the 'values' in a sparse tensor we will be setting
    act_indx = tf.cast(actions, tf.int64)[:, None]
    values = tf.reshape(tf.cast(tf.ones(tf.shape(actions)), tf.bool), [-1])

    # Create a range for each index into the batch
    act_range = tf.range(0, batch_size, dtype=tf.int64)[:, None]
    # Combine this into coordinates with the action indices
    indices = tf.concat([act_range, act_indx], 1)

    actions_mask = tf.SparseTensor(indices, values, [batch_size, num_actions])
    actions_mask = tf.stop_gradient(
        tf.sparse_tensor_to_dense(actions_mask, default_value=False))
    sliced_emb = tf.boolean_mask(embeddings, actions_mask)
    return sliced_emb
Example #17
0
def multihot_embedding(sess, slot_id):
    slotx_emb_table = tf.get_variable(
        name='multi_hot_emb_slot_%s' % str(slot_id),
        shape=(g_dict_len, g_emb_size),
        initializer=tf.glorot_uniform_initializer())
    '''
    slotx_emb_table = tf.constant([[6.4, 1.2, 0.5, 3.3],
                                   [0.3, 0.4, 0.5, 0.8],
                                   [1.5, 0.3, 2.2, 1.9],
                                   [0.4, 0.9, 1.1, 4.3]])
    '''

    #定义稀疏矩阵, indices是位置[0,0]表示矩阵的第0行第0列,这样拼出来稀疏矩阵. values是对应emb_table中的索引。dense_shape是稀疏矩阵的长*宽
    #这个稀疏矩阵就是下面这个样子,每一行是一个multihot,行数代表batch_size,列数代表multihot最多允许多少个hot。N表示稀疏矩阵这位置没有存
    #[[1, 2, 3, N, N],
    # [N, N, 2, N, N],
    # [N, N, 3, 1, N]]
    slotx_idx = tf.SparseTensor(indices=[[0, 0], [0, 1], [0, 2], [1, 2],
                                         [2, 2], [2, 3]],
                                values=[1, 2, 3, 2, 3, 1],
                                dense_shape=(10, 5))
    print("slotx_emb_table.shape=", slotx_emb_table.shape)

    slotx_emb = tf.nn.embedding_lookup_sparse(
        slotx_emb_table, slotx_idx, sp_weights=None,
        combiner="sum")  #combiner=sum表示multihot用sum方式聚合
    sess.run(tf.global_variables_initializer())
    #print("emb_table(slot"+str(slot_id)+")=\n", sess.run(slotx_emb_table))
    print("emb(slot" + str(slot_id) + ")=\n", sess.run(slotx_emb))
    return slotx_emb
Example #18
0
def get_bag_vectors(model):
    """
    Represents snapshots as a bag of clinical observations. Specifically, returns a V-length
    binary vector such that the v-th index is 1 iff the v-th observation occurs in the given snapshot
    :param model: CANTRIP model
    :type model: modeling.CANTRIPModel
    :return: clinical snapshot encoding
    """
    # 1. Evaluate which entries in model.observations are non-zero
    mask = tf.not_equal(model.observations, 0)
    where = tf.where(mask)

    # 2. Get the vocabulary indices for non-zero observations
    vocab_indices = tf.boolean_mask(model.observations, mask)
    vocab_indices = tf.expand_dims(vocab_indices[:], axis=-1)
    vocab_indices = tf.cast(vocab_indices, dtype=tf.int64)

    # 3. Get batch and sequence indices for non-zero observations
    tensor_indices = where[:, :-1]

    # Concat batch, sequence, and vocabulary indices
    indices = tf.concat([tensor_indices, vocab_indices], axis=-1)

    # Our sparse tensor will be 1 for observed observations, 0, otherwise
    ones = tf.ones_like(indices[:, 0], dtype=tf.float32)

    # The dense shape will be the same as model.observations, but using the entire vocabulary as the final dimension
    dense_shape = model.observations.get_shape().as_list()
    dense_shape[2] = model.vocabulary_size

    # Store as a sparse tensor because they're neat
    st = tf.SparseTensor(indices=indices, values=ones, dense_shape=dense_shape)
    return tf.sparse.reorder(st)
Example #19
0
def build_rating_sparse_tensor(ratings_df):
  indices = ratings_df[['user_id', 'movie_id']].values
  values = ratings_df['rating'].values
  return tf.SparseTensor(
      indices=indices,
      values=values,
      dense_shape=[users.shape[0], movies.shape[0]])
Example #20
0
    def testCombine(self):
        """Low-level test for the results of combine_observation_code_and_values."""

        observation_code_ids = tf.SparseTensor(indices=[[0, 0, 0], [0, 1, 0],
                                                        [1, 0, 0], [1, 1, 0],
                                                        [1, 2, 0]],
                                               values=tf.constant(
                                                   [1, 1, 3, 0, 1],
                                                   dtype=tf.int64),
                                               dense_shape=[2, 3, 1])

        vocab_size = 5

        expected_result = [[[0, 100.0, 0, 0, 0], [0, 2.3, 0, 0, 0],
                            [0, 0, 0, 0, 0]],
                           [[0, 0, 0, 0.5, 0], [0.0, 0, 0, 0, 0],
                            [0, 4.0, 0, 0, 0]]]
        expected_indicator = [[[0, 1.0, 0, 0, 0], [0, 1, 0, 0, 0],
                               [0, 0, 0, 0, 0]],
                              [[0, 0, 0, 1, 0], [1, 0, 0, 0, 0],
                               [0, 1, 0, 0, 0]]]

        acutal_result, acutal_indicator = osm.combine_observation_code_and_values(
            observation_code_ids=observation_code_ids,
            observation_values=self.observation_values,
            vocab_size=vocab_size,
            mode=tf.estimator.ModeKeys.TRAIN,
            normalize=False,
            momentum=0.9,
            min_value=-10000000,
            max_value=10000000)
        with self.test_session() as sess:
            a_result, a_indicator = sess.run([acutal_result, acutal_indicator])
            self.assertAllClose(expected_result, a_result, atol=0.01)
            self.assertAllClose(expected_indicator, a_indicator, atol=0.01)
Example #21
0
def rfunc(triple_list, ent_num, rel_num):
    head = dict()
    tail = dict()
    rel_count = dict()
    r_mat_ind = list()
    r_mat_val = list()
    head_r = np.zeros((ent_num, rel_num))
    tail_r = np.zeros((ent_num, rel_num))
    for triple in triple_list:
        head_r[triple[0]][triple[1]] = 1
        tail_r[triple[2]][triple[1]] = 1
        r_mat_ind.append([triple[0], triple[2]])
        r_mat_val.append(triple[1])
        if triple[1] not in rel_count:
            rel_count[triple[1]] = 1
            head[triple[1]] = set()
            tail[triple[1]] = set()
            head[triple[1]].add(triple[0])
            tail[triple[1]].add(triple[2])
        else:
            rel_count[triple[1]] += 1
            head[triple[1]].add(triple[0])
            tail[triple[1]].add(triple[2])
    r_mat = tf.SparseTensor(indices=r_mat_ind,
                            values=r_mat_val,
                            dense_shape=[ent_num, ent_num])

    return head, tail, head_r, tail_r, r_mat
 def CreateCombinedFeedDict(
         input_feed_dict,
         baseline_feed_dict=None,
         baseline_transform_info=baseline_transform_info):
     """Combine baseline and input feed dicts into a common feed dict."""
     combined_feed_dict = input_feed_dict.copy()
     if baseline_feed_dict is None:
         baseline_feed_dict = input_feed_dict
     for key, feed_value in baseline_feed_dict.items():
         if isinstance(key, tf.Tensor):
             combined_feed_dict[baseline_transform_info.transformed(
                 key)] = (feed_value)
         elif isinstance(key, six.text_type):
             if six.PY2:
                 tensor = graph.get_tensor_by_name(key.decode())
             else:
                 tensor = graph.get_tensor_by_name(key)
             combined_feed_dict[baseline_transform_info.transformed(
                 tensor)] = (feed_value)
         elif isinstance(key, tf.SparseTensor):
             sparse_transformed_tensor = tf.SparseTensor(
                 baseline_transform_info.transformed(key.indices),
                 baseline_transform_info.transformed(key.values),
                 baseline_transform_info.transformed(key.dense_shape))
             combined_feed_dict[sparse_transformed_tensor] = feed_value
         else:
             raise ValueError('Invalid key type %s in Feed Dict.' %
                              type(key))
     return combined_feed_dict
  def get_tensor(self, tensor_dict):
    """Construct a SparseTensor representation of the relation from tensors.

    This can either be called with a decoded tf.Example, in which case this
    returns a sparse tensor for an individual instance, or with the result
    of `batching_loop_results`, in which case it returns a sparse tensor for
    the whole batch.

    Args:
      tensor_dict: A dictionary mapping key names to tensors.

    Returns:
      A SparseTensor representation of the shepherd's data.
    """
    dense_shape = tensor_dict[self.dense_shape_key()]
    source_indices = tensor_dict[self.source_indices_key()]
    dest_indices = tensor_dict[self.dest_indices_key()]
    values = tensor_dict[self.values_key()]

    # 'indices' is a [source_indices.shape[0], 2] with each row representing
    # a (row, column) pair where there is a nonzero entry in the SparseTensor.
    indices = tf.concat([tf.expand_dims(source_indices, 1),
                         tf.expand_dims(dest_indices, 1)], axis=1)

    return tf.sparse_reorder(
        tf.SparseTensor(indices=indices,
                        values=values,
                        dense_shape=dense_shape))
Example #24
0
    def testAddBOWIntegratedGradientsOps(self):
        with tf.Graph().as_default() as graph:
            # pyformat: disable
            embedding_weights = tf.constant([[1., 3., 5.], [4., 6., 8.],
                                             [4., 5., 4.]])
            batch_size = tf.placeholder_with_default(tf.constant(1, tf.int64),
                                                     [])
            sparse_ids = tf.SparseTensor([[0, 0], [0, 1], [0, 2]], [2, 0, 2],
                                         [batch_size, 3])
            # pyformat: enable
            sparse_embedding = contrib_layers.safe_embedding_lookup_sparse(
                embedding_weights,
                sparse_ids,
                combiner='sum',
                partition_strategy='div')

            vector = tf.constant([1., 2., 4.])
            output_tensor = tf.reduce_sum(vector * sparse_embedding, axis=1)
            embedding_lookup = tf.nn.embedding_lookup(
                embedding_weights,
                tf.sparse_tensor_to_dense(sparse_ids),
                partition_strategy='div')
            bow_attribution_hooks = integrated_gradients.AddBOWIntegratedGradientsOps(
                graph, [embedding_lookup], [sparse_embedding], [],
                output_tensor)
            with tf.Session(graph=graph) as sess:
                sess.run(tf.global_variables_initializer())
                result = sess.run(bow_attribution_hooks['bow_attributions'])
                self.assertTupleEqual(result[0].shape, (3, ))
                # Since the output is a sum of dot products, attributions are simply dot
                # products of the embedding with [1., 2., 4.].
                self.assertAlmostEqual(result[0][0], 30.)
                self.assertAlmostEqual(result[0][1], 27.)
                self.assertAlmostEqual(result[0][2], 30.)
Example #25
0
	def setupCTC(self):
		"create CTC loss and decoder and return them"
		# BxTxC -> TxBxC
		self.ctcIn3dTBC = tf.transpose(self.rnnOut3d, [1, 0, 2])
		# ground truth text as sparse tensor
		self.gtTexts = tf.SparseTensor(tf.placeholder(tf.int64, shape=[None, 2]) , tf.placeholder(tf.int32, [None]), tf.placeholder(tf.int64, [2]))

		# calc loss for batch
		self.seqLen = tf.placeholder(tf.int32, [None])
		self.loss = tf.reduce_mean(tf.nn.ctc_loss(labels=self.gtTexts, inputs=self.ctcIn3dTBC, sequence_length=self.seqLen, ctc_merge_repeated=True))

		# calc loss for each element to compute label probability
		self.savedCtcInput = tf.placeholder(tf.float32, shape=[Model.maxTextLen, None, len(self.charList) + 1])
		self.lossPerElement = tf.nn.ctc_loss(labels=self.gtTexts, inputs=self.savedCtcInput, sequence_length=self.seqLen, ctc_merge_repeated=True)

		# decoder: either best path decoding or beam search decoding
		if self.decoderType == DecoderType.BestPath:
			self.decoder = tf.nn.ctc_greedy_decoder(inputs=self.ctcIn3dTBC, sequence_length=self.seqLen)
		elif self.decoderType == DecoderType.BeamSearch:
			self.decoder = tf.nn.ctc_beam_search_decoder(inputs=self.ctcIn3dTBC, sequence_length=self.seqLen, beam_width=50, merge_repeated=False)
		elif self.decoderType == DecoderType.WordBeamSearch:
			# import compiled word beam search operation (see https://github.com/githubharald/CTCWordBeamSearch)
			word_beam_search_module = tf.load_op_library('TFWordBeamSearch.so')

			# prepare information about language (dictionary, characters in dataset, characters forming words)
			chars = str().join(self.charList)
			wordChars = open('./model/model/wordCharList.txt').read().splitlines()[0]
			corpus = open('./data/corpus.txt').read()

			# decode using the "Words" mode of word beam search
			self.decoder = word_beam_search_module.word_beam_search(tf.nn.softmax(self.ctcIn3dTBC, dim=2), 50, 'Words', 0.0, corpus.encode('utf8'), chars.encode('utf8'), wordChars.encode('utf8'))
Example #26
0
def _sparse_intersect_indices(sp_tensor, required_sp_tensor):
  """Filters timestamps in sp_tensor to those present in required_sp_tensor."""
  # We extend both sp_tensor and required_sp_tensor with each others indices
  # so that they have the same indices.
  # E.g. their dense representation of one batch entry could be:
  # [dummy, dummy, 1 ]
  dummy_value = 'n/a'
  dummy_required_sp_tensor = _extend_with_dummy(
      sp_tensor, required_sp_tensor, dummy_value)
  dummy_sp_tensor = _extend_with_dummy(required_sp_tensor, sp_tensor,
                                       dummy_value)
  # We get rid to dummy values both for indices in the required_sp_tensor and
  # the sp_tensor.
  # First get rid of indices with dummy values in dummy_required_sp_tensor.
  in_required = tf.sparse_retain(
      dummy_sp_tensor,
      tf.logical_not(tf.equal(dummy_required_sp_tensor.values, dummy_value)))
  # Remove empty timesteps so that the timesteps align with the original
  # required_sp_tensor.
  # Then remove the indices with dummy values.
  in_required = tf.sparse_retain(
      _remove_empty_timesteps(in_required),
      tf.logical_not(tf.equal(in_required.values, dummy_value)))
  if sp_tensor.values.dtype != tf.string:
    in_required = tf.SparseTensor(
        indices=in_required.indices, dense_shape=in_required.dense_shape,
        values=tf.strings.to_number(
            in_required.values, out_type=sp_tensor.values.dtype))
  return in_required
Example #27
0
 def __init__(self,
              input_dim,
              output_dim,
              adj,
              nodes_num,
              dropout_rate,
              is_sparse_input=False,
              use_bias=True,
              activation=None,
              name="alinet"):
     self.input_dim = input_dim
     self.output_dim = output_dim
     self.adjs = [
         tf.SparseTensor(indices=adj[0][0],
                         values=adj[0][1],
                         dense_shape=adj[0][2])
     ]
     self.dropout_rate = dropout_rate
     self.is_sparse_input = is_sparse_input
     self.nodes_num = nodes_num
     self.use_bias = use_bias
     self.activation = activation
     self.name = name
     self.data_type = tf.float32
     self._get_variable()
Example #28
0
def _from_proto_sparse_tensor(sparse_tensor_proto, process_leafs):
    """Deserializes a `tf.SparseTensor` from `sparse_tensor_proto`.

    Args:
      sparse_tensor_proto: A proto representing a `tf.SparseTensor`.
      process_leafs: A function to be applied to the leaf valued of the nested
        structure.

    Returns:
      An instance of `tf.SparseTensor`.
    """
    if not sparse_tensor_proto.HasField("named_tuple"):
        raise base_errors.ModuleInfoError(
            "Error while deserializing a SparseTensor: expected proto tuple.")
    if sparse_tensor_proto.named_tuple.name != _SPARSE_TENSOR_NAME:
        raise base_errors.ModuleInfoError(
            "Error while deserializing a SparseTensor: The name of the tuple "
            "should have been {} but was {}.".format(
                _SPARSE_TENSOR_NAME, sparse_tensor_proto.named_tuple.name))
    named_tuple_map = sparse_tensor_proto.named_tuple.map
    return tf.SparseTensor(
        indices=process_leafs(named_tuple_map["indices"].value),
        values=process_leafs(named_tuple_map["values"].value),
        dense_shape=process_leafs(named_tuple_map["dense_shape"].value),
    )
Example #29
0
 def __init__(self,
              input_dim,
              output_dim,
              adj,
              num_features_nonzero,
              dropout_rate=0.0,
              name='GCN',
              is_sparse_inputs=False,
              activation=tf.tanh,
              use_bias=True):
     self.activation = activation
     self.input_dim = input_dim
     self.output_dim = output_dim
     self.adjs = [
         tf.SparseTensor(indices=am[0], values=am[1], dense_shape=am[2])
         for am in adj
     ]
     self.num_features_nonzero = num_features_nonzero
     self.dropout_rate = dropout_rate
     self.is_sparse_inputs = is_sparse_inputs
     self.use_bias = use_bias
     self.kernels = list()
     self.bias = list()
     self.name = name
     self.data_type = tf.float32
     self._get_variable()
Example #30
0
 def call(self, inputs):
     inputs = self.batch_normlization(inputs)
     mapped_inputs = tf.matmul(inputs, self.kernel)
     attention_inputs1 = tf.matmul(inputs, self.kernel1)
     attention_inputs2 = tf.matmul(inputs, self.kernel2)
     con_sa_1 = tf.reduce_sum(tf.multiply(attention_inputs1, inputs),
                              1,
                              keepdims=True)
     con_sa_2 = tf.reduce_sum(tf.multiply(attention_inputs2, inputs),
                              1,
                              keepdims=True)
     con_sa_1 = tf.keras.activations.tanh(con_sa_1)
     con_sa_2 = tf.keras.activations.tanh(con_sa_2)
     if self.dropout_rate > 0.0:
         con_sa_1 = tf.nn.dropout(con_sa_1, self.dropout_rate)
         con_sa_2 = tf.nn.dropout(con_sa_2, self.dropout_rate)
     con_sa_1 = tf.cast(self.adjs[0], dtype=tf.float32) * con_sa_1
     con_sa_2 = tf.cast(self.adjs[0], dtype=tf.float32) * tf.transpose(
         con_sa_2, [1, 0])
     weights = tf.sparse_add(con_sa_1, con_sa_2)
     weights = tf.SparseTensor(indices=weights.indices,
                               values=tf.nn.leaky_relu(weights.values),
                               dense_shape=weights.dense_shape)
     attention_adj = tf.sparse_softmax(weights)
     attention_adj = tf.sparse_reshape(
         attention_adj, shape=[self.nodes_num, self.nodes_num])
     value = tf.sparse_tensor_dense_matmul(attention_adj, mapped_inputs)
     return self.activation(value)