예제 #1
0
def add_gcnLayer(adj,features,in_size,out_size,activation_function=None,adj_sparse = True,features_sparse = False):
    if adj_sparse:
        print('adj sparse')
        adj_ordered = tf.sparse_reorder(adj)
        adj = tf.sparse.to_dense(adj_ordered)
    if features_sparse:
        print('features sparse')
        features_ordered = tf.sparse_reorder(features)
        features = tf.sparse.to_dense(features_ordered)
    print(adj)
    print(features)

    with tf.name_scope('GraphConvLayer'):
        with tf.name_scope('GraphConvLayer_W'):
            Weights = tf.Variable(tf.truncated_normal(shape=[in_size, out_size], mean=0.1, stddev=0.1))
        with tf.name_scope('GraphConvLayer_B'):
            biases = tf.Variable(tf.zeros(shape=[1,out_size])+0.01)
        with tf.name_scope('GraphConvLayer_propagate'):
            adjFeatures = tf.matmul(adj,features)
        with tf.name_scope('GraphConvLayer_conv'):
            Wx_plus_b = tf.matmul(adjFeatures,Weights) + biases
        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b)
        return outputs
예제 #2
0
    def build_feat(self):
        placeholders = self.placeholders
        self.plhd_inputs = [
            placeholders['features'], placeholders['support'],
            placeholders['graph_sizes']
        ]
        try:
            self.plhd_inputs[0] = tf.sparse.reorder(self.plhd_inputs[0])
            self.plhd_inputs[1] = tf.sparse.reorder(self.plhd_inputs[1])
        except AttributeError:
            self.plhd_inputs[0] = tf.sparse_reorder(self.plhd_inputs[0])
            self.plhd_inputs[1] = tf.sparse_reorder(self.plhd_inputs[1])

        self.plhd_activations = []
        self.plhd_activations.append(self.plhd_inputs)
        self.plhd_graph_embs = []
        # conv layers
        for conv_layer, pool_layer in zip(self.layers[0], self.layers[1]):
            hidden = conv_layer(self.plhd_activations[-1])
            graph_emb = pool_layer(hidden)
            self.plhd_graph_embs.append(graph_emb[0])
            self.plhd_activations.append(hidden)

        final_graph_emb = tf.concat(self.plhd_graph_embs, axis=1)
        self.plhd_activations.append([final_graph_emb, hidden[1], hidden[2]])

        self.plhd_feat = self.plhd_activations[-1][0]
예제 #3
0
    def calculate_features_wrap(self,indices):
        pyOutput = tf.py_func(self.calculate_features, [indices], [tf.float32, tf.int64, tf.float32, tf.int64] +\
                              [tf.int64]*len(self.poolRatios)+[tf.float32]*len(self.poolRatios)+\
                              [tf.int64]*len(self.poolRatios))
        pyOutput[0].set_shape((self.N,self.C))
        pyOutput[0] = tf.reshape(pyOutput[0], [self.N, self.C])
        V = pyOutput[0]
        A = tf.sparse_reorder(tf.SparseTensor(pyOutput[1],pyOutput[2],[self.N, self.L, self.N]))
        if not self.sparse:
            A = tf.sparse_tensor_to_dense(A)

        pyOutput[3] = tf.one_hot(pyOutput[3],(self.numClasses))
        pyOutput[3].set_shape([self.numClasses])
        label = pyOutput[3]
        Pouts = pyOutput[4:]
        NUM_POOLS = len(self.poolRatios)
        Pidxlist = Pouts[0:NUM_POOLS]
        Pvallist = Pouts[NUM_POOLS:(2*NUM_POOLS)]
        #Pshapelist = Pouts[2*NUM_POOLS:(3*NUM_POOLS)]
        Plist = []
        prevSize = self.N
        for pidx in range(NUM_POOLS):
            currentSize = np.floor(prevSize * self.poolRatios[pidx]).astype(np.int64)
            currentPSparse = tf.sparse_reorder(tf.SparseTensor(Pidxlist[pidx],Pvallist[pidx],[1,prevSize,currentSize]))
            #currentPSparse.set_shape([1,prevSize,currentSize])
            if not self.sparse:
                currentPDense = tf.squeeze(tf.sparse_tensor_to_dense(currentPSparse))
                currentPDense.set_shape([prevSize,currentSize])
                Plist.append(currentPDense)
            else:
                Plist.append(currentPSparse)
            prevSize = currentSize
        return [V,A,label] + Plist
예제 #4
0
    def transformation_tensor(self, splat_vector_tensor, weight_vector_tensor,
                              blur_vector_tensor, n_lattice_tensor, d, n):
        """
        Construct the transformation tensors of Gauss filtering using sparse tensor
        :param splat_vector_tensor:
        :param weight_vector_tensor:
        :param blur_vector_tensor:
        :param n_lattice_tensor:
        :return:
        """
        # first construct the indices of splat matrix from splat_vector_tensor
        ind_tmp = np.repeat(np.arange(n), (d + 1))
        ind_splat = tf.stack(
            [splat_vector_tensor,
             tf.constant(ind_tmp, dtype=tf.int64)],
            axis=1)
        splat_shape = tf.stack(
            [n_lattice_tensor + 1,
             tf.constant(n, dtype=tf.int64)])
        splat_sparse_matrix = tf.sparse_reorder(
            tf.SparseTensor(indices=ind_splat,
                            values=weight_vector_tensor,
                            dense_shape=splat_shape))

        # construct the list of blur matrices
        blur_vector_tensor_reshape = tf.reshape(blur_vector_tensor, (-1, 3))
        blur_vector_tensor_split = tf.split(blur_vector_tensor_reshape, d + 1)
        blur_list = []

        # calculate the blur weight
        shape_tmp = tf.stack([n_lattice_tensor, 1], axis=0)
        ones_tmp = tf.ones(shape=tf.to_int32(shape_tmp), dtype=tf.float32)
        blur_weight = tf.matmul(
            ones_tmp,
            tf.constant([[0.5, 0.25, 0.25]], shape=[1, 3], dtype=tf.float32))
        blur_weight = tf.reshape(blur_weight, shape=[-1])

        for i in range(d + 1):
            blur_vector_tensor_dim = blur_vector_tensor_split[i]
            ind_dim_tmp = tf.slice(blur_vector_tensor_dim,
                                   begin=[0, 0],
                                   size=[-1, 1])
            ind_dim_tmp_tile = tf.tile(ind_dim_tmp, [1, 3])
            ind_blur_dim = tf.stack([
                tf.reshape(ind_dim_tmp_tile, [-1]),
                tf.reshape(blur_vector_tensor_dim, [-1])
            ],
                                    axis=1)

            #tmp = tf.stack([n_lattice_tensor])
            #blur_weight = tf.tile(tf.constant([0.5, 0.25, 0.25]), tmp)

            blur_shape = tf.stack([n_lattice_tensor + 1, n_lattice_tensor + 1])
            blur_list.append(
                tf.sparse_reorder(
                    tf.SparseTensor(indices=ind_blur_dim,
                                    values=blur_weight,
                                    dense_shape=blur_shape)))

        return splat_sparse_matrix, blur_list
예제 #5
0
파일: main.py 프로젝트: RLWH/Kaggle
def eval_iterator_generator(features,
                            labels=None,
                            test_ids=None,
                            batch_size=2048):
    """An input function for evaluation or prediction"""

    if labels is not None:
        # No labels, use only features.
        inputs = (tf.cast(tf.sparse_reorder(
            convert_sparse_matrix_to_sparse_tensor(features)),
                          dtype=tf.float32),
                  tf.cast(tf.reshape(tf.convert_to_tensor(labels),
                                     shape=[-1, 1]),
                          dtype=tf.float32))
    else:
        inputs = (tf.reshape(tf.convert_to_tensor(test_ids), shape=[-1, 1]),
                  tf.cast(tf.sparse_reorder(
                      convert_sparse_matrix_to_sparse_tensor(features)),
                          dtype=tf.float32))

    # Convert the inputs to a Dataset.
    dataset = tf.data.Dataset.from_tensor_slices(inputs)

    # Batch the examples
    assert batch_size is not None, "batch_size must not be None"
    dataset = dataset.batch(batch_size)

    # Return the read end of the pipeline.
    return dataset.make_initializable_iterator()
예제 #6
0
def make_sparse_max_graph_pooling_layer(V, A, P, name=None):
    with tf.variable_scope(name, default_name='Graph-Max-Pooling') as scope:
        #Generate shape array for BN
        #oldVShapeNonTensor = V.get_shape()
        #Generate shape tensor for vertex pooling function I know hacky af
        oldVShape = tf.shape(V, out_type=tf.int64)
        newVShape = tf.stack([oldVShape[0], P.dense_shape[3], oldVShape[2]])
        Ptranspose = tf.sparse_transpose(P, perm=[0, 1, 3, 2])

        Vout = _graphcnn_max_vertex_pool_sparse_module.sparse_max_vertex_pool(
            V, Ptranspose.indices, Ptranspose.values, newVShape)
        Vout.set_shape([V.get_shape()[0].value,\
                        P.get_shape()[3].value,\
                        V.get_shape()[2].value])

        #Change indices to pooled mapping
        print(P.indices[:, 3].get_shape())
        Pcols = P.indices[:, 3]
        newRowidx = tf.gather(Pcols, A.indices[:, 1])
        newColidx = tf.gather(Pcols, A.indices[:, 3])
        newIndices = tf.stack(
            (A.indices[:, 0], newRowidx, A.indices[:, 2], newColidx), axis=1)
        newShape = tf.stack((A.dense_shape[0], P.dense_shape[3],
                             A.dense_shape[2], P.dense_shape[3]),
                            axis=0)

        Adupe = tf.sparse_reorder(
            tf.SparseTensor(newIndices, A.values, newShape))

        #Segment sum to merge duplicate indices
        #Worried about this casting, but matmul doesn't support int64 for some dumb reason
        linearized = tf.cast(
            tf.matmul(
                tf.cast(Adupe.indices, tf.float64),
                tf.cast([[newShape[1] * newShape[2] * newShape[3]],
                         [newShape[2] * newShape[3]], [newShape[3]], [1]],
                        tf.float64)), tf.int64)
        print(linearized.get_shape())

        y, idx = tf.unique(tf.squeeze(linearized))

        # Use the positions of the unique values as the segment ids to
        # get the unique values
        print(idx.get_shape())
        print(Adupe.values.get_shape())
        values = tf.segment_sum(Adupe.values, idx)
        delinearized = tf.stack(
            (y // (newShape[1] * newShape[2] * newShape[3]), y //
             (newShape[2] * newShape[3]) % newShape[1],
             y // newShape[3] % newShape[2], y % newShape[3]),
            axis=1)
        Aout = tf.sparse_reorder(
            tf.SparseTensor(delinearized, values, newShape))
        return Vout, Aout
예제 #7
0
def construct_input(data):
    features = tf.SparseTensor(data[0], data[1], data[2])
    laplacians = tf.SparseTensor(data[3], data[4], data[5])
    try:
        features = tf.sparse.reorder(features)
        laplacians = tf.sparse.reorder(laplacians)
    except AttributeError:
        features = tf.sparse_reorder(features)
        laplacians = tf.sparse_reorder(laplacians)

    return features, laplacians, data[6], data[7], data[8]
예제 #8
0
def main(unused_argv):
    tf.logging.info("{} Flags {}".format('*' * 15, '*' * 15))
    for k, v in FLAGS.flag_values_dict().items():
        tf.logging.info("FLAG `{}`: {}".format(k, v))
    tf.logging.info('*' * (2 * 15 + len(' Flags ')))

    np.random.seed(FLAGS.seed)
    tf.set_random_seed(FLAGS.seed)

    rgat_layer = RGAT(units=FLAGS.units, relations=FLAGS.relations)

    features, supports = get_batch_of_features_supports_values()

    # Route 1: Run RGAT on each element in the batch separately and combine the
    # results
    individual_supports = [
        graph_utils.relational_supports_to_support(d) for d in supports
    ]
    individual_supports = [
        graph_utils.triple_from_coo(s) for s in individual_supports
    ]
    individual_supports = [
        tf.SparseTensor(i, v, ds) for i, v, ds in individual_supports
    ]
    individual_supports = [tf.sparse_reorder(s) for s in individual_supports]

    individual_results = [
        rgat_layer(inputs=f, support=s)
        for f, s in zip(features, individual_supports)
    ]
    individual_results = tf.concat(individual_results, axis=0)

    # Route 2: First combine the batch into a single graph and pass everything
    # through in one go
    combined_features = tf.concat(features, axis=0)

    combined_supports = graph_utils.batch_of_relational_supports_to_support(
        supports)
    combined_supports = graph_utils.triple_from_coo(combined_supports)
    combined_supports = tf.SparseTensor(*combined_supports)
    combined_supports = tf.sparse_reorder(combined_supports)

    combined_results = rgat_layer(inputs=combined_features,
                                  support=combined_supports)

    if np.allclose(combined_results, individual_results):
        tf.logging.info("The approaches match!")
    else:
        raise ValueError(
            "Doing each element in a batch independently does not produce the "
            "same results as doing all the batch in one go. Something has "
            "clearly broken. Please contact the author ASAP :).")
예제 #9
0
def batch_laplacian(v, f, return_sparse=True):
    # v: B x N x 3
    # f: M x 3

    num_b = tf.shape(v)[0]
    num_v = tf.shape(v)[1]
    num_f = tf.shape(f)[0]

    v_a = f[:, 0]
    v_b = f[:, 1]
    v_c = f[:, 2]

    a = tf.gather(v, v_a, axis=1)
    b = tf.gather(v, v_b, axis=1)
    c = tf.gather(v, v_c, axis=1)

    ab = a - b
    bc = b - c
    ca = c - a

    cot_a = -1 * tf.reduce_sum(ab * ca, axis=2) / tf.sqrt(tf.reduce_sum(tf.cross(ab, ca) ** 2, axis=-1))
    cot_b = -1 * tf.reduce_sum(bc * ab, axis=2) / tf.sqrt(tf.reduce_sum(tf.cross(bc, ab) ** 2, axis=-1))
    cot_c = -1 * tf.reduce_sum(ca * bc, axis=2) / tf.sqrt(tf.reduce_sum(tf.cross(ca, bc) ** 2, axis=-1))

    I = tf.tile(tf.expand_dims(tf.concat((v_a, v_c, v_a, v_b, v_b, v_c), axis=0), 0), (num_b, 1))
    J = tf.tile(tf.expand_dims(tf.concat((v_c, v_a, v_b, v_a, v_c, v_b), axis=0), 0), (num_b, 1))

    W = 0.5 * tf.concat((cot_b, cot_b, cot_c, cot_c, cot_a, cot_a), axis=1)

    batch_dim = tf.tile(tf.expand_dims(tf.range(num_b), 1), (1, num_f * 6))

    indices = tf.reshape(tf.stack((batch_dim, J, I), axis=2), (num_b, 6, -1, 3))
    W = tf.reshape(W, (num_b, 6, -1))

    l_indices = [tf.cast(tf.reshape(indices[:, i], (-1, 3)), tf.int64) for i in range(6)]
    shape = tf.cast(tf.stack((num_b, num_v, num_v)), tf.int64)
    sp_L_raw = [tf.sparse_reorder(tf.SparseTensor(l_indices[i], tf.reshape(W[:, i], (-1,)), shape)) for i in range(6)]

    L = sp_L_raw[0]
    for i in range(1, 6):
        L = tf.sparse_add(L, sp_L_raw[i])

    dia_values = tf.sparse_reduce_sum(L, axis=-1) * -1

    I = tf.tile(tf.expand_dims(tf.range(num_v), 0), (num_b, 1))
    batch_dim = tf.tile(tf.expand_dims(tf.range(num_b), 1), (1, num_v))
    indices = tf.reshape(tf.stack((batch_dim, I, I), axis=2), (-1, 3))

    dia = tf.sparse_reorder(tf.SparseTensor(tf.cast(indices, tf.int64), tf.reshape(dia_values, (-1,)), shape))

    return tf.sparse_add(L, dia)
예제 #10
0
def dense_to_sparse(value):
    """Convert a dense tensor to sparse tensor
    
    Args:
      value (Tensor): input dense Tensor
    
    Return:
      SparseTensor: converted sparse tensor

    Examples:

    >>> import tensorflow as tf
    >>> x = np.array([[1,0],[0,1]], dtype=np.float32)
    >>> y = dense_to_sparse(x)
    >>> y = tf.sparse.to_dense(y)
    >>> with tf.Session():
    ...     np.testing.assert_array_equal(y.eval(), x)
    
    """
    x = tf.convert_to_tensor(value)
    x.shape.assert_has_rank(2)
    indices = tf.where(tf.not_equal(x, 0))
    res = tf.SparseTensor(indices=indices,
                          values=tf.gather_nd(x, indices),
                          dense_shape=x.get_shape())
    return tf.sparse_reorder(res)
예제 #11
0
def discriminator_dag_supervised(latent,
                                 dag,
                                 dag_bw,
                                 dag_feats,
                                 sequence_length,
                                 params,
                                 idx,
                                 weights_regularizer=None,
                                 is_training=True):
    # latent (N, L, D)
    with tf.variable_scope('discriminator'):
        h = tf.concat([latent, dag_feats], axis=-1)
        with tf.variable_scope("upward"):
            h = message_passing(latent=h,
                                dag_bw=dag_bw,
                                params=params,
                                fully_connected_fn=sn_fully_connected,
                                weights_regularizer=weights_regularizer,
                                hidden_depth=params.discriminator_layers,
                                dim=params.discriminator_dim)
        with tf.variable_scope("downward"):
            h = message_passing(latent=h,
                                dag_bw=dag,
                                params=params,
                                fully_connected_fn=sn_fully_connected,
                                weights_regularizer=weights_regularizer,
                                hidden_depth=params.discriminator_layers,
                                dim=params.discriminator_dim)
        with tf.variable_scope('output_mlp'):
            if params.lstm_output_discriminator:
                h, _ = lstm(x=h,
                            num_units=params.decoder_dim,
                            bidirectional=True,
                            num_layers=params.decoder_layers,
                            sequence_lengths=sequence_length)
            else:
                for i in range(params.discriminator_layers):
                    h = sn_fully_connected(
                        inputs=h,
                        activation_fn=tf.nn.leaky_relu,
                        weights_regularizer=weights_regularizer,
                        num_outputs=params.discriminator_dim,
                        scope='discriminator_output_{}'.format(i))
            logits = sn_fully_connected(
                inputs=h,
                num_outputs=1,
                activation_fn=None,
                scope='discriminator_logits',
                weights_regularizer=weights_regularizer)  # (N,L,1)
            logits = tf.squeeze(logits, axis=-1)  # (N, L)
            logits_values = tf.gather_nd(params=logits, indices=idx)  # (X,)
            sparse_logits = tf.SparseTensor(values=logits_values,
                                            indices=tf.cast(idx, tf.int64),
                                            dense_shape=tf.cast(
                                                tf.shape(logits), tf.int64))
            sparse_logits = tf.sparse_reorder(sparse_logits)
            dis_values = tf.sparse_reduce_sum(
                sp_input=sparse_logits, axis=-1) / tf.cast(
                    sequence_length, tf.float32)  # (n,)
        return dis_values  # (n,)
예제 #12
0
 def to_matrix(sparse_indices, values, dense_shape):
     sparse_tensor = tf.sparse_reorder(tf.SparseTensor(
         indices=sparse_indices,
         values=tf.ones(sparse_indices.get_shape().as_list()[0]),
         #values=tf.reshape(values, [-1]),
         dense_shape=dense_shape))
     return tf.sparse_tensor_to_dense(sparse_tensor)
예제 #13
0
def _extend_with_dummy(extend_with, to_extend, dummy_value='n/a'):
    """Extends one SparseTensor with dummy_values at positions of other."""
    dense_shape = tf.to_int64(
        tf.concat(
            [[tf.shape(extend_with)[0]],
             [tf.maximum(tf.shape(extend_with)[1],
                         tf.shape(to_extend)[1])],
             [tf.maximum(tf.shape(extend_with)[2],
                         tf.shape(to_extend)[2])]],
            axis=0))
    additional_indices = tf.sets.set_difference(
        tf.SparseTensor(indices=extend_with.indices,
                        values=tf.zeros_like(extend_with.values,
                                             dtype=tf.int32),
                        dense_shape=dense_shape),
        tf.SparseTensor(indices=to_extend.indices,
                        values=tf.zeros([tf.shape(to_extend.indices)[0]],
                                        dtype=tf.int32),
                        dense_shape=dense_shape)).indices
    # Supply defaults for all other indices.
    default = tf.tile(tf.constant([dummy_value]),
                      multiples=[tf.shape(additional_indices)[0]])

    string_value = (tf.as_string(to_extend.values) if
                    to_extend.values.dtype != tf.string else to_extend.values)
    return tf.sparse_reorder(
        tf.SparseTensor(indices=tf.concat(
            [to_extend.indices, additional_indices], axis=0),
                        values=tf.concat([string_value, default], axis=0),
                        dense_shape=dense_shape))
예제 #14
0
    def encode_sparse(self, color_channels):
        # sample down to output size
        a = zoom(color_channels[0], 1 / self.in_out_ratio)
        b = zoom(color_channels[1], 1 / self.in_out_ratio)
        color_channels = np.array([a, b])

        height, width = a.shape[0], a.shape[1]
        dense_shape = (height, width, self.vec_size)
        indices, values = [], []
        for y in range(height):
            for x in range(width):
                # get actual a,b values
                ab = np.array(
                    [[color_channels[0][y, x], color_channels[1][y, x]]])
                # find the 5 closest a,b values from the grid
                distances, ab_indices = self.nbrs.kneighbors(ab)
                vector = np.zeros((self.vec_size))
                # find weights for each of the 5 closest points
                weights = [gaussian(d, 3) for d in distances[0]]
                sum_weights = sum(weights)

                indices += [[x, y, i] for i in ab_indices[0]]
                values += [w / sum_weights for w in weights]
        labels = tf.SparseTensor(indices=tf.constant(indices, dtype=tf.int64),
                                 values=tf.constant(values, dtype=self.dtype),
                                 dense_shape=tf.constant(dense_shape,
                                                         dtype=tf.int64))
        return tf.sparse_reorder(labels)
예제 #15
0
파일: models.py 프로젝트: yhjflower/tgcn
 def chebyshev5(self, x, L, Fout, K):
     N, M, Fin = x.get_shape()
     N, M, Fin = int(N), int(M), int(Fin)
     # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
     L = scipy.sparse.csr_matrix(L)
     L = graph.rescale_L(L, lmax=2)
     L = L.tocoo()
     indices = np.column_stack((L.row, L.col))
     L = tf.SparseTensor(indices, L.data, L.shape)
     L = tf.sparse_reorder(L)
     # Transform to Chebyshev basis
     x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
     x0 = tf.reshape(x0, [M, Fin*N])  # M x Fin*N
     x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N
     def concat(x, x_):
         x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
         return tf.concat([x, x_], axis=0)  # filter_order x M x Fin*N
     if K > 1:
         x1 = tf.sparse_tensor_dense_matmul(L, x0)
         x = concat(x, x1)
     for k in range(2, K):
         x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
         x = concat(x, x2)
         x0, x1 = x1, x2
     x = tf.reshape(x, [K, M, Fin, N])  # filter_order x M x Fin x N
     x = tf.transpose(x, perm=[3,1,2,0])  # N x M x Fin x filter_order
     x = tf.reshape(x, [N*M, Fin*K])  # N*M x Fin*filter_order
     # Filter: Fin*Fout filters of order filter_order, i.e. one filterbank per feature pair.
     W = self._weight_variable([Fin*K, Fout], regularization=False)
     x = tf.matmul(x, W)  # N*M x Fout
     return tf.reshape(x, [N, M, Fout])  # N x M x Fout
예제 #16
0
def get_bag_vectors(model):
    """
    Represents snapshots as a bag of clinical observations. Specifically, returns a V-length
    binary vector such that the v-th index is 1 iff the v-th observation occurs in the given snapshot
    :param model: PRONTO model
    :type model: modeling.PRONTOModel
    :return: clinical snapshot encoding
    """
    # 1. Evaluate which entries in model.observations are non-zero
    mask = tf.not_equal(model.observations, 0)
    where = tf.where(mask)

    # 2. Get the vocabulary indices for non-zero observations
    vocab_indices = tf.boolean_mask(model.observations, mask)
    vocab_indices = tf.expand_dims(vocab_indices[:], axis=-1)
    vocab_indices = tf.to_int64(vocab_indices)

    # 3. Get batch and sequence indices for non-zero observations
    tensor_indices = where[:, :-1]

    # Concat batch, sequence, and vocabulary indices
    indices = tf.concat([tensor_indices, vocab_indices], axis=-1)

    # Our sparse tensor will be 1 for observed observations, 0, otherwise
    ones = tf.ones_like(indices[:, 0], dtype=tf.float32)

    # The dense shape will be the same as model.observations, but using the entire vocabulary as the final dimension
    dense_shape = model.observations.get_shape().as_list()
    dense_shape[2] = model.vocabulary_size

    # Store as a sparse tensor because they're neat
    st = tf.SparseTensor(indices=indices, values=ones, dense_shape=dense_shape)
    return tf.sparse_reorder(st)
예제 #17
0
def unpooling_layer2x2(x, layer_name, raveled_argmax, out_shape):
    with tf.name_scope(layer_name):
        argmax = unravel_argmax(raveled_argmax, tf.to_int64(out_shape))
        output = tf.zeros([out_shape[1], out_shape[2], out_shape[3]])

        height = tf.shape(output)[0]
        width = tf.shape(output)[1]
        channels = tf.shape(output)[2]

        t1 = tf.to_int64(tf.range(channels))
        t1 = tf.tile(t1, [((width + 1) // 2) * ((height + 1) // 2)])
        t1 = tf.reshape(t1, [-1, channels])
        t1 = tf.transpose(t1, perm=[1, 0])
        t1 = tf.reshape(t1, [channels, (height + 1) // 2, (width + 1) // 2, 1])

        t2 = tf.squeeze(argmax)
        t2 = tf.stack((t2[0], t2[1]), axis=0)
        t2 = tf.transpose(t2, perm=[3, 1, 2, 0])

        t = tf.concat([t2, t1], 3)
        indices = tf.reshape(t, [((height + 1) // 2) *
                                 ((width + 1) // 2) * channels, 3])

        x1 = tf.squeeze(x)
        x1 = tf.reshape(x1, [-1, channels])
        x1 = tf.transpose(x1, perm=[1, 0])
        values = tf.reshape(x1, [-1])

        delta = tf.SparseTensor(indices, values, tf.to_int64(tf.shape(output)))
        return tf.expand_dims(
            tf.sparse_tensor_to_dense(tf.sparse_reorder(delta)), 0)
예제 #18
0
    def unpool_layer2x2(self, x, argmax):
        x_shape = tf.shape(x)
        output = tf.zeros([x_shape[1] * 2, x_shape[2] * 2, x_shape[3]])

        height = tf.shape(output)[0]
        width = tf.shape(output)[1]
        channels = tf.shape(output)[2]

        t1 = tf.to_int64(tf.range(channels))
        t1 = tf.tile(t1, [(width // 2) * (height // 2)])
        t1 = tf.reshape(t1, [-1, channels])
        t1 = tf.transpose(t1, perm=[1, 0])
        t1 = tf.reshape(t1, [channels, height // 2, width // 2, 1])

        t2 = tf.squeeze(argmax)
        t2 = tf.pack((t2[0], t2[1]), axis=0)
        t2 = tf.transpose(t2, perm=[3, 1, 2, 0])

        t = tf.concat(3, [t2, t1])
        indices = tf.reshape(t, [(height // 2) * (width // 2) * channels, 3])

        x1 = tf.squeeze(x)
        x1 = tf.reshape(x1, [-1, channels])
        x1 = tf.transpose(x1, perm=[1, 0])
        values = tf.reshape(x1, [-1])

        delta = tf.SparseTensor(indices, values, tf.to_int64(tf.shape(output)))
        return tf.expand_dims(
            tf.sparse_tensor_to_dense(tf.sparse_reorder(delta)), 0)
예제 #19
0
 def __init__(self, n_concepts, sparse_ancestors, sparse_ancestors_values):
     super(HierarchicalAggregate, self).__init__()
     self.n_concepts = n_concepts
     self.ancestry_sparse_tensor = tf.sparse_reorder(
         tf.SparseTensor(indices=sparse_ancestors,
                         values=sparse_ancestors_values,
                         dense_shape=[self.n_concepts, self.n_concepts]))
def _generate_sketch_matrix(rand_h, rand_s, output_dim):
    """
    Return a sparse matrix used for tensor sketch operation in compact bilinear
    pooling

    Args:
        rand_h: an 1D numpy array containing indices in interval `[0, output_dim)`.
        rand_s: an 1D numpy array of 1 and -1, having the same shape as `rand_h`.
        output_dim: the output dimensions of compact bilinear pooling.

    Returns:
        a sparse matrix of shape [input_dim, output_dim] for tensor sketch.
    """

    # Generate a sparse matrix for tensor count sketch
    rand_h = rand_h.astype(np.int64)
    rand_s = rand_s.astype(np.float32)
    assert(rand_h.ndim==1 and rand_s.ndim==1 and len(rand_h)==len(rand_s))
    assert(np.all(rand_h >= 0) and np.all(rand_h < output_dim))

    input_dim = len(rand_h)
    indices = np.concatenate((np.arange(input_dim)[..., np.newaxis],
                              rand_h[..., np.newaxis]), axis=1)
    sparse_sketch_matrix = tf.sparse_reorder(
        tf.SparseTensor(indices, rand_s, [input_dim, output_dim]))
    return sparse_sketch_matrix
예제 #21
0
    def chebyshev5(self, x, L, Fout, K_coeff):
        N, M, Fin = K.int_shape(x)
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
        L = scipy.sparse.csr_matrix(L)
        L = self.rescale_L(L, lmax=2)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)
        # Transform to Chebyshev basis
        x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x0 = tf.reshape(x0, [M, -1])  # M x Fin*N
        x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N

        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
            return tf.concat([x, x_], axis=0)  # K_coeff x M x Fin*N

        if K_coeff > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)
        for k in range(2, K_coeff):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = tf.reshape(x, [K_coeff, M, Fin, -1])  # K_coeff x M x Fin x N
        x = tf.transpose(x, perm=[3, 1, 2, 0])  # N x M x Fin x K_coeff
        x = tf.reshape(x, [-1, Fin * K_coeff])  # N*M x Fin*K_coeff
        # Filter: Fin*Fout filters of order K_coeff, i.e. one filterbank per feature pair.
        #  W = self._weight_variable([Fin*K_coeff, Fout], regularization=False)
        x = tf.matmul(x, self.kernel)  # N*M x Fout
        x = tf.reshape(x, [-1, M, Fout])  # N x M x Fout
        return x
예제 #22
0
 def chebyshev5(self, x, L, Fout, K, regularization=False):
     N, M, Fin = x.get_shape()
     N, M, Fin = int(N), int(M), int(Fin)
     # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
     L = scipy.sparse.csr_matrix(L)
     L = graph.rescale_L(L, lmax=2)
     L = L.tocoo()
     indices = np.column_stack((L.row, L.col))
     L = tf.SparseTensor(indices, L.data, L.shape)
     L = tf.sparse_reorder(L)
     # Transform to Chebyshev basis
     x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
     x0 = tf.reshape(x0, [M, Fin*N])  # M x Fin*N
     x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N
     def concat(x, x_):
         x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
         return tf.concat(0, [x, x_])  # K x M x Fin*N
     if K > 1:
         x1 = tf.sparse_tensor_dense_matmul(L, x0)
         x = concat(x, x1)
     for k in range(2, K):
         x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
         x = concat(x, x2)
         x0, x1 = x1, x2
     x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
     x = tf.transpose(x, perm=[3,1,2,0])  # N x M x Fin x K
     x = tf.reshape(x, [N*M, Fin*K])  # N*M x Fin*K
     # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
     W = self._weight_variable([Fin*K, Fout], regularization=regularization)
     x = tf.matmul(x, W)  # N*M x Fout
     return tf.reshape(x, [N, M, Fout])  # N x M x Fout
예제 #23
0
    def monomials(self, x, L, Fout, K):
        r"""Convolution on graph with monomials."""
        N, M, Fin = x.get_shape()
        N, M, Fin = int(N), int(M), int(Fin)
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
        L = sparse.csr_matrix(L)
        lmax = 1.02 * sparse.linalg.eigsh(
            L, k=1, which='LM', return_eigenvectors=False)[0]
        L = utils.rescale_L(L, lmax=lmax)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)
        # Transform to monomial basis.
        x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x0 = tf.reshape(x0, [M, Fin * N])  # M x Fin*N
        x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N

        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
            return tf.concat([x, x_], axis=0)  # K x M x Fin*N

        for k in range(1, K):
            x1 = tf.sparse_tensor_dense_matmul(L, x0)  # M x Fin*N
            x = concat(x, x1)
            x0 = x1
        x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
        x = tf.transpose(x, perm=[3, 1, 2, 0])  # N x M x Fin x K
        x = tf.reshape(x, [N * M, Fin * K])  # N*M x Fin*K
        # Filter: Fin*Fout filters of order K, i.e. one filterbank per output feature.
        W = self._weight_variable([Fin * K, Fout], regularization=True)
        x = tf.matmul(x, W)  # N*M x Fout
        return tf.reshape(x, [N, M, Fout])  # N x M x Fout
예제 #24
0
def tok_k_masks(inputs, K):
    """Returns a mask for top_k op.

  Args:
      inputs: A `Tensor`
      K: A `int`

  Returns:
      A `Tensor` representing top_k mask
  """
    values, indices = tf.nn.top_k(arr, k=K, sorted=False)

    temp_indices = tf.meshgrid(*[
        tf.range(d)
        for d in (tf.unstack(tf.shape(arr)[:(arr.get_shape().ndims - 1)]) +
                  [K])
    ],
                               indexing='ij')
    temp_indices = tf.stack(temp_indices[:-1] + [indices], axis=-1)

    full_indices = tf.reshape(temp_indices, [-1, arr.get_shape().ndims])

    values = tf.reshape(values, [-1])

    mask_st = tf.SparseTensor(indices=tf.cast(full_indices, dtype=tf.int64),
                              values=tf.ones_like(values),
                              dense_shape=arr.shape)

    return tf.sparse_tensor_to_dense(tf.sparse_reorder(mask_st))
예제 #25
0
def sparse_transpose(sp_input):
    transposed_indices = tf.reverse(tf.cast(sp_input.indices, tf.int32), [False, True])
    transposed_values = sp_input.values
    transposed_shape = tf.reverse(tf.cast(sp_input.shape, tf.int32), [True])
    sp_output = tf.SparseTensor(tf.cast(transposed_indices, tf.int64), transposed_values, tf.cast(transposed_shape, tf.int64))
    sp_output = tf.sparse_reorder(sp_output)
    return sp_output
예제 #26
0
def get_multi_hop_neighbor(nodes, edge_types):
    """
  Get multi-hop neighbors with adjacent matrix.

  Args:
    nodes: A 1-D `tf.Tensor` of `int64`.
    edge_types: A list of 1-D `tf.Tensor` of `int32`. Specify edge types to
      filter outgoing edges in each hop.

  Return:
    A tuple of list: (nodes, adjcents)
      nodes: A list of N + 1 `tf.Tensor` of `int64`, N is the number of
        hops. Specify node set of each hop, including the root.
      adjcents: A list of N `tf.SparseTensor` of `int64`. Specify adjacent
        matrix between hops.
  """
    nodes = tf.reshape(nodes, [-1])
    nodes_list = [nodes]
    adj_list = []
    for hop_edge_types in edge_types:
        neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types)
        next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64)
        next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1)
        next_values = weight.values
        next_shape = tf.stack([tf.size(nodes), tf.size(next_nodes)])
        next_shape = tf.cast(next_shape, tf.int64)
        next_adj = tf.SparseTensor(next_indices, next_values, next_shape)
        next_adj = tf.sparse_reorder(next_adj)
        nodes_list.append(next_nodes)
        adj_list.append(next_adj)
        nodes = next_nodes
    return nodes_list, adj_list
예제 #27
0
    def _do_test(self, expected_result, config=None):
        # Make sure that expected_result is an np array
        if not type(expected_result).__module__ == np.__name__:
            expected_result = np.array(expected_result)

        with tf.Graph().as_default():
            inputs, indices, _ = dense_to_sparse(self.input)
            sparse_tensor_reordered = tf.sparse_reorder(inputs)
            sparse_tensor_reshaped = tf.sparse_reshape(sparse_tensor_reordered, self.input.shape)

            W = tf.constant(self.W1, dtype=tf.float32)
            b = tf.constant(self.b1, dtype=tf.float32)

            # Sparse layer
            logits = tf.sparse_tensor_dense_matmul(sparse_tensor_reshaped, W) + b

            # Dense layer
            logits = logits @ tf.constant(self.W2, tf.float32) + tf.constant(self.b2, tf.float32)

            explanation = lrp.lrp(inputs, logits, config)

            with tf.Session() as s:
                expl = s.run(explanation)
                self.assertTrue(np.all(np.equal(indices, expl.indices)),
                                "expected indices did not equal actual indices")
                self.assertTrue(np.allclose(expl.values, expected_result.reshape((-1)), rtol=1.e-3, atol=1.e-3),
                                "expected indices did not equal actual indices")
예제 #28
0
    def chebyshev5(self, x, L, Fout, nK):
        L = L.astype(np.float32)
        _, M, Fin = x.get_shape().as_list()
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)
        # Transform to Chebyshev basis
        x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x0 = tf.reshape(x0, [M, -1])  # M x Fin*N
        x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N

        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
            return tf.concat([x, x_], axis=0)  # K x M x Fin*N

        if nK > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)
        for k in range(2, nK):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = tf.reshape(x, [nK, M, Fin, -1])  # K x M x Fin x N
        x = tf.transpose(x, perm=[3, 1, 2, 0])  # N x M x Fin x K
        x = tf.reshape(x, [-1, Fin * nK])  # N*M x Fin*K
        # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
        W = self._weight_variable
        x = tf.matmul(x, W)  # N*M x Fout
        out = tf.reshape(x, [-1, M, Fout])  # N x M x Fout
        return out
예제 #29
0
    def unpool_layer2x2_batch(self, bottom, argmax, top_shape):
        bottom_shape = tf.shape(bottom)

        batch_size = top_shape[0]
        height = top_shape[1]
        width = top_shape[2]
        channels = top_shape[3]

        argmax_shape = tf.to_int64([batch_size, height, width, channels])
        argmax = self.unravel_argmax(argmax, argmax_shape)

        t1 = tf.to_int64(tf.range(channels))
        t1 = tf.tile(t1, [batch_size*(width//1)*(height//height)])
        t1 = tf.reshape(t1, [-1, channels])
        t1 = tf.transpose(t1, perm=[1, 0])
        t1 = tf.reshape(t1, [channels, batch_size, height//height, width//1, 1])
        t1 = tf.transpose(t1, perm=[1, 0, 2, 3, 4])

        t2 = tf.to_int64(tf.range(batch_size))
        t2 = tf.tile(t2, [channels*(width//1)*(height//height)])
        t2 = tf.reshape(t2, [-1, batch_size])
        t2 = tf.transpose(t2, perm=[1, 0])
        t2 = tf.reshape(t2, [batch_size, channels, height//height, width//1, 1])

        t3 = tf.transpose(argmax, perm=[1, 4, 2, 3, 0])

        t = tf.concat(4, [t2, t3, t1])
        indices = tf.reshape(t, [(height//height)*(width//1)*channels*batch_size, 4])

        x1 = tf.transpose(bottom, perm=[0, 3, 1, 2])
        values = tf.reshape(x1, [-1])

        delta = tf.SparseTensor(indices, values, tf.to_int64(top_shape))
        return tf.sparse_add(tf.zeros(tf.to_int32(delta.shape)), tf.sparse_reorder(delta))
    def unpool_layer2x2(self, x, raveled_argmax, out_shape):
        argmax = self.unravel_argmax(raveled_argmax, tf.to_int64(out_shape))
        output = tf.zeros([out_shape[1], out_shape[2], out_shape[3]])

        height = tf.shape(output)[0]
        width = tf.shape(output)[1]
        channels = tf.shape(output)[2]

        t1 = tf.to_int64(tf.range(channels))
        t1 = tf.tile(t1, [((width + 1) // 2) * ((height + 1) // 2)])
        t1 = tf.reshape(t1, [-1, channels])
        t1 = tf.transpose(t1, perm=[1, 0])
        t1 = tf.reshape(t1, [channels, (height + 1) // 2, (width + 1) // 2, 1])

        t2 = tf.squeeze(argmax)
        t2 = tf.pack((t2[0], t2[1]), axis=0)
        t2 = tf.transpose(t2, perm=[3, 1, 2, 0])

        t = tf.concat(3, [t2, t1])
        indices = tf.reshape(t, [((height + 1) // 2) * ((width + 1) // 2) * channels, 3])

        x1 = tf.squeeze(x)
        x1 = tf.reshape(x1, [-1, channels])
        x1 = tf.transpose(x1, perm=[1, 0])
        values = tf.reshape(x1, [-1])

        delta = tf.SparseTensor(indices, values, tf.to_int64(tf.shape(output)))
        return tf.expand_dims(tf.sparse_tensor_to_dense(tf.sparse_reorder(delta)), 0)
예제 #31
0
def apply_neurosat(cfg, params, args):
    n_vars, n_lits, n_clauses = args.n_vars, 2 * args.n_vars, args.n_clauses

    CL = tf.sparse_reorder(tf.SparseTensor(indices=tf.cast(args.CL_idxs, tf.int64),
                                           values=tf.ones(tf.shape(args.CL_idxs)[0]),
                                           dense_shape=[tf.cast(n_clauses, tf.int64), tf.cast(n_lits, tf.int64)]))

    L  = tf.ones(shape=[2 * n_vars, cfg['d']], dtype=tf.float32) * params.L_init_scale
    C  = tf.ones(shape=[n_clauses, cfg['d']], dtype=tf.float32) * params.C_init_scale

    LC = tf.sparse_transpose(CL)

    def flip(lits): return tf.concat([lits[n_vars:, :], lits[0:n_vars, :]], axis=0)

    for t in range(cfg['n_rounds']):
        C_old, L_old = C, L

        LC_msgs = tf.sparse_tensor_dense_matmul(CL, L, adjoint_a=False) * params.LC_scale
        C       = params.C_updates[t].forward(tf.concat([C, LC_msgs], axis=-1))
        C       = tf.check_numerics(C, message="C after update")
        C       = tfutil.normalize(C, axis=cfg['norm_axis'], eps=cfg['norm_eps'])
        C       = tf.check_numerics(C, message="C after norm")
        if cfg['res_layers']: C = C + C_old

        CL_msgs = tf.sparse_tensor_dense_matmul(LC, C, adjoint_a=False) * params.CL_scale
        L       = params.L_updates[t].forward(tf.concat([L, CL_msgs, flip(L)], axis=-1))
        L       = tf.check_numerics(L, message="L after update")
        L       = tfutil.normalize(L, axis=cfg['norm_axis'], eps=cfg['norm_eps'])
        L       = tf.check_numerics(L, message="L after norm")
        if cfg['res_layers']: L = L + L_old

    V         = tf.concat([L[0:n_vars, :], L[n_vars:, :]], axis=1)
    V_scores  = params.V_score.forward(V) # (n_vars, 1)

    return NeuroSATGuesses(pi_core_var_logits=tf.squeeze(V_scores))
예제 #32
0
def _generate_sketch_matrix(rand_h, rand_s, output_dim):
    """
    Return a sparse matrix used for tensor/count sketch operation,
    which is random feature projection from input_dim-->output_dim.

    Parameters
    ----------
    rand_h: array, shape=(input_dim,)
        Vector containing indices in interval `[0, output_dim)`.
    rand_s: array, shape=(input_dim,)
        Vector containing values of 1 and -1.
    output_dim: int
        The dimensions of the count sketch vector representation.
    Returns
    -------
    sparse_sketch_matrix : SparseTensor
        A sparse matrix of shape [input_dim, output_dim] for count sketch.
    """

    # Generate a sparse matrix for tensor count sketch
    rand_h = rand_h.astype(np.int64)
    rand_s = rand_s.astype(np.float32)
    assert(rand_h.ndim==1 and rand_s.ndim==1 and len(rand_h)==len(rand_s))
    assert(np.all(rand_h >= 0) and np.all(rand_h < output_dim))

    input_dim = len(rand_h)
    indices = np.concatenate((np.arange(input_dim)[..., np.newaxis],
                              rand_h[..., np.newaxis]), axis=1)
    sparse_sketch_matrix = tf.sparse_reorder(
        tf.SparseTensor(indices, rand_s, [input_dim, output_dim]))
    return sparse_sketch_matrix
예제 #33
0
    def get_init_grad(self, name, num_topics, num_words, num_docs, alpha):
        U = tf.placeholder(dtype=tf.float32,
                           shape=[num_words, num_topics],
                           name="U-%s" % (name))
        V = tf.placeholder(dtype=tf.float32,
                           shape=[num_topics, num_docs],
                           name="V-%s" % (name))
        D = tf.sparse_reorder(self.D)

        VVT = tf.matmul(V, V, transpose_b=True)
        UTU = tf.matmul(U, U, transpose_a=True)
        DVT = self.D_mul_U(D, tf.transpose(V), num_topics)
        eye = tf.eye(num_topics, dtype=tf.float32)
        u_grads = [
            # 2.0 * tf.matmul(UV - D, V, transpose_b=True),
            2.0 * (tf.matmul(U, VVT) - DVT),
            4.0 * alpha * tf.matmul(U, UTU - eye)
        ]

        UTD = tf.transpose(self.D_mul_U(tf.sparse_transpose(D), U, num_topics))
        v_grads = [2.0 * (tf.matmul(UTU, V) - UTD)]

        u_grads = tf.reduce_sum(tf.stack(u_grads, axis=0), axis=0)
        v_grads = tf.reduce_sum(tf.stack(v_grads, axis=0), axis=0)

        # loss = tf.reduce_sum([
        #     tf.reduce_sum(tf.norm(D - UV, ord=2)),
        #     tf.reduce_sum(tf.norm(tf.matmul(U, U, transpose_a=True) - eye, ord=2))
        # ])
        UV = tf.matmul(U, V[:, 0:100])
        loss = tf.reduce_mean(tf.norm(self.Ds - UV, ord=2))
        return u_grads, v_grads, loss, [U, V]
  def testAlreadyInOrder(self):
    with self.test_session(use_gpu=False) as sess:
      input_val = self._SparseTensorValue_5x6(np.arange(6))
      sp_output = tf.sparse_reorder(input_val)

      output_val = sess.run(sp_output)
      self.assertAllEqual(output_val.indices, input_val.indices)
      self.assertAllEqual(output_val.values, input_val.values)
      self.assertAllEqual(output_val.shape, input_val.shape)
예제 #35
0
  def testFeedAlreadyInOrder(self):
    with self.test_session(use_gpu=False) as sess:
      sp_input = self._SparseTensorPlaceholder()
      input_val = self._SparseTensorValue_5x6(np.arange(6))
      sp_output = tf.sparse_reorder(sp_input)

      output_val = sess.run(sp_output, {sp_input: input_val})
      self.assertAllEqual(output_val.indices, input_val.indices)
      self.assertAllEqual(output_val.values, input_val.values)
      self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
  def testOutOfOrder(self):
    expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
    with self.test_session(use_gpu=False) as sess:
      for _ in range(5):  # To test various random permutations
        input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
        sp_output = tf.sparse_reorder(input_val)

        output_val = sess.run(sp_output)
        self.assertAllEqual(output_val.indices, expected_output_val.indices)
        self.assertAllEqual(output_val.values, expected_output_val.values)
        self.assertAllEqual(output_val.shape, expected_output_val.shape)
예제 #37
0
파일: models.py 프로젝트: hyzcn/cnn_graph
 def __init__(self, L, F, K):
     super().__init__()
     L = graph.rescale_L(L, lmax=2)  # Graph Laplacian, M x M
     L = L.tocoo()
     data = L.data
     indices = np.empty((L.nnz, 2))
     indices[:,0] = L.row
     indices[:,1] = L.col
     L = tf.SparseTensor(indices, data, L.shape)
     self.L = tf.sparse_reorder(L)
     self.F = F  # Number of filters
     self.K = K  # Polynomial order, i.e. filter size (number of hopes)
  def testGradients(self):
    with self.test_session(use_gpu=False):
      for _ in range(5):  # To test various random permutations
        input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
        sp_input = tf.SparseTensor(
            input_val.indices, input_val.values, input_val.dense_shape)
        sp_output = tf.sparse_reorder(sp_input)

        err = tf.test.compute_gradient_error(
            sp_input.values,
            input_val.values.shape,
            sp_output.values,
            input_val.values.shape,
            x_init_value=input_val.values)
        self.assertLess(err, 1e-11)
예제 #39
0
파일: tensor_ops.py 프로젝트: PFCM/rnns
def random_sparse_tensor(shape, sparsity, stddev=0.15, name='random-sparse'):
    """Returns a sparse tensor with a set sparsity but
    with random indices and values.

    Values are from a normal with mean 0 and given std deviation.

    Args:
        shape: list of ints, the final shape of the
            tensor.
        sparsity: scalar, If it is an integer > 0 it is assumed to be the
            number of elements in the sparse tensor, otherwise if it is a
            float in [0, 1] it is treated as a fraction of elements to set.
        stddev: the standard deviation of the values.
        name: the name of the tensor
    """
    if int(sparsity) != 0:
        logger.info('assuming sparsity is number of elements')
        num_elements = int(sparsity)
    elif sparsity <= 0 or sparsity >= 1:
        raise ValueError(
            'sparsity {} is out of range (0-1)'.format(sparsity))
    else:
        logger.info('assuming sparsity (%.3f) is fraction', sparsity)

        size = 1
        for dim in shape:
            size *= dim
        # now how many non-zero
        num_elements = int(size * sparsity)
        logger.info('(amounts to %d elements)', num_elements)
    # the first thing we need are random indices
    # it's a bit hard to do this without the possibility of repeats
    idces = tf.stack([tf.cast(
        tf.get_variable(name+'_idcs{}'.format(i),
                        shape=[num_elements],
                        initializer=tf.random_uniform_initializer(
                            maxval=dim,
                            dtype=tf.float32),
                        dtype=tf.float32),
        tf.int64)
                     for i, dim in enumerate(shape)])
    idces = tf.transpose(idces)  # should check for repeats?
    # and now values
    vals = tf.get_variable(name+'values',
                           [num_elements],
                           initializer=tf.random_normal_initializer(
                               stddev=stddev))
    return tf.sparse_reorder(tf.SparseTensor(idces, vals, shape))
    def unpool_layer2x2_batch(self, x, argmax):
        '''
        Args:
            x: 4D tensor of shape [batch_size x height x width x channels]
            argmax: A Tensor of type Targmax. 4-D. The flattened indices of the max
            values chosen for each output.
        Return:
            4D output tensor of shape [batch_size x 2*height x 2*width x channels]
        '''
        x_shape = tf.shape(x)
        out_shape = [x_shape[0], x_shape[1]*2, x_shape[2]*2, x_shape[3]]

        batch_size = out_shape[0]
        height = out_shape[1]
        width = out_shape[2]
        channels = out_shape[3]

        argmax_shape = tf.to_int64([batch_size, height, width, channels])
        argmax = unravel_argmax(argmax, argmax_shape)

        t1 = tf.to_int64(tf.range(channels))
        t1 = tf.tile(t1, [batch_size*(width//2)*(height//2)])
        t1 = tf.reshape(t1, [-1, channels])
        t1 = tf.transpose(t1, perm=[1, 0])
        t1 = tf.reshape(t1, [channels, batch_size, height//2, width//2, 1])
        t1 = tf.transpose(t1, perm=[1, 0, 2, 3, 4])

        t2 = tf.to_int64(tf.range(batch_size))
        t2 = tf.tile(t2, [channels*(width//2)*(height//2)])
        t2 = tf.reshape(t2, [-1, batch_size])
        t2 = tf.transpose(t2, perm=[1, 0])
        t2 = tf.reshape(t2, [batch_size, channels, height//2, width//2, 1])

        t3 = tf.transpose(argmax, perm=[1, 4, 2, 3, 0])

        t = tf.concat(4, [t2, t3, t1])
        indices = tf.reshape(t, [(height//2)*(width//2)*channels*batch_size, 4])

        x1 = tf.transpose(x, perm=[0, 3, 1, 2])
        values = tf.reshape(x1, [-1])

        delta = tf.SparseTensor(indices, values, tf.to_int64(out_shape))
        return tf.sparse_tensor_to_dense(tf.sparse_reorder(delta))
 def transfer_fn(self,xy):
     # A function that takes the control pulses with a smaller timestep and interpolate between them to generate the simulation weights
     indices=[]
     values=[]
     shape=[self.sys_para.steps,self.sys_para.control_steps]
     dt=self.sys_para.dt
     Dt=self.sys_para.Dt
 
 # Cubic Splines
     for ll in range (self.sys_para.steps):
         jj=self.get_j(ll)
         tao= ll*dt - jj*Dt - 0.5*Dt
         if jj >= 1:
             indices.append([int(ll),int(jj-1)])
             temp= -(tao/(2*Dt))*((tao/Dt)-1)**2
             values.append(temp)
             
         if jj >= 0:
             indices.append([int(ll),int(jj)])
             temp= 1+((3*tao**3)/(2*Dt**3))-((5*tao**2)/(2*Dt**2))
             values.append(temp)
             
         if jj+1 <= self.sys_para.control_steps-1:
             indices.append([int(ll),int(jj+1)])
             temp= ((tao)/(2*Dt))+((4*tao**2)/(2*Dt**2))-((3*tao**3)/(2*Dt**3))
             values.append(temp)
            
         if jj+2 <= self.sys_para.control_steps-1:
             indices.append([int(ll),int(jj+2)])
             temp= ((tao**3)/(2*Dt**3))-((tao**2)/(2*Dt**2))
             values.append(temp)
             
         
     T1=tf.SparseTensor(indices, values, shape)  
     T2=tf.sparse_reorder(T1)
     T=tf.sparse_tensor_to_dense(T2)
     temp1 = tf.matmul(T,tf.reshape(xy[0,:],[self.sys_para.control_steps,1]))
     temp2 = tf.matmul(T,tf.reshape(xy[1,:],[self.sys_para.control_steps,1]))
     xys=tf.concat(1,[temp1,temp2])
     return tf.transpose(xys)
예제 #42
0
 def preview_like(self, inp):
   """Generate a preview image of the states of the particle filter"""
   indicies, onscreen = self.project()
   indicies = tf.to_int64(tf.boolean_mask(indicies, onscreen))
   probs = tf.gather_nd(inp, indicies)
   return tf.sparse_tensor_to_dense(tf.sparse_reorder(tf.SparseTensor(indicies, probs, inp.get_shape())), validate_indices = False)
예제 #43
0
    def __init__(self, config, ont, word_model):
        #print("Creating the model graph")
        tf.reset_default_graph()
        self.ont = ont
        self.word_model = word_model

        ######
        self.raw_data = ([(x,c) for c in self.ont.names for x in self.ont.names[c]])
        self.anchors, self.anchors_len = self.phrase2vec([x[0] for x in self.raw_data], config.max_sequence_length)

        ##
        #config.concepts_size = len(self.ont.concepts) +1
        ##

        self.config = config

        ### Inputs ###
        self.label = tf.placeholder(tf.int32, shape=[None])
        self.class_weights = tf.Variable(tf.ones([config.concepts_size]), False)

        self.seq = tf.placeholder(tf.float32, shape=[None, config.max_sequence_length, word_model.dim])
        self.seq_len = tf.placeholder(tf.int32, shape=[None])
        self.lr = tf.Variable(config.lr, trainable=False)
        self.is_training = tf.placeholder(tf.bool)

        self.ancestry_sparse_tensor = tf.sparse_reorder(tf.SparseTensor(indices = ont.sparse_ancestrs, values = [1.0]*len(ont.sparse_ancestrs), dense_shape=[config.concepts_size, config.concepts_size]))

        #######################
        ## Phrase embeddings ##
        #######################

        '''
        layer1 = tf.layers.conv1d(self.seq, self.config.cl1, 1, activation=tf.nn.elu,\
                kernel_initializer=tf.random_normal_initializer(0.0,0.1),\
                bias_initializer=tf.random_normal_initializer(stddev=0.01), use_bias=True)

        layer2 = tf.layers.dense(tf.reduce_max(layer1, [1]), self.config.cl2, activation=tf.nn.relu,\
        #layer2 = tf.layers.dense(tf.reduce_max(layer1, [1]), self.config.cl2,\
                kernel_initializer=tf.random_normal_initializer(0.0,stddev=0.1),
                bias_initializer=tf.random_normal_initializer(0.0,stddev=0.01), use_bias=True)

        #self.seq_embedding = tf.nn.l2_normalize(layer2  , dim=1)
        '''
        self.seq_embedding = tf.nn.l2_normalize(tf.reduce_sum(self.seq, axis=1), axis=1)

        ########################
        ## Concept embeddings ##
        ########################
        '''
        self.embeddings = tf.get_variable("embeddings", shape = [self.config.concepts_size, self.config.cl2], initializer = tf.random_normal_initializer(stddev=0.1))
        #self.embeddings = tf.nn.l2_normalize(self.embeddings, dim=1)
        self.aggregated_embeddings = tf.sparse_tensor_dense_matmul(self.ancestry_sparse_tensor, self.embeddings) 
        if config.flat:
            aggregated_w = self.embeddings
        else:
            aggregated_w = self.aggregated_embeddings

        last_layer_b = tf.get_variable('last_layer_bias', shape = [self.config.concepts_size], initializer = tf.random_normal_initializer(stddev=0.001))

        self.score_layer = tf.matmul(self.seq_embedding, tf.transpose(aggregated_w)) + last_layer_b

        ########################
        ########################
        ########################

        label_one_hot = tf.one_hot(self.label, config.concepts_size)
        self.loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(self.label, self.score_layer)) 
        #self.loss = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(tf.one_hot(self.label, config.concepts_size), self.score_layer)) 
        #self.loss =  -tf.reduce_mean(tf.reduce_sum(label_one_hot*tf.log(0.001+ tf.sigmoid(self.score_layer)) + (1-label_one_hot)*tf.log(0.001+1-tf.sigmoid(self.score_layer))/(config.concepts_size-1), axis=-1))


        self.pred = tf.nn.softmax(self.score_layer)
        #self.pred = tf.nn.sigmoid(self.score_layer)
        self.agg_pred, _ =  tf.nn.top_k(tf.transpose(tf.sparse_tensor_dense_matmul(tf.sparse_transpose(self.ancestry_sparse_tensor), tf.transpose(self.pred))), 2)

        self.train_step = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
        '''

        self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        #print("initializing")
        self.sess.run(tf.global_variables_initializer())
예제 #44
0
파일: linear.py 프로젝트: hycis/TensorGraph
 def _train_fprop(self, state_below):
     idx, val = state_below
     X = tf.SparseTensor(tf.cast(idx, 'int64'), val, shape=[self.batchsize, self.prev_dim])
     X_order = tf.sparse_reorder(X)
     XW = tf.sparse_tensor_dense_matmul(X_order, self.W, adjoint_a=False, adjoint_b=False)
     return tf.add(XW, self.b)