コード例 #1
0
def sparse_dropout(x, keep_prob, noise_shape):
    """Dropout for sparse tensors."""
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)
    return pre_out * (1./keep_prob)
コード例 #2
0
ファイル: layers.py プロジェクト: SoheilaMolaei/COOLnorm
    def call(self, x, idx=0):
        if self.sparse_inputs:
            random_tensor = 1 - self.dropout
            random_tensor += tf.random_uniform(tf.shape(x.values))
            dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
            x = tf.sparse_retain(x, dropout_mask) * (1. / (1 - self.dropout))

        else:
            x = tf.nn.dropout(x, rate=self.dropout)

        supports = list()
        for _ker, _sup in zip(self.kernel, self.support):
            if self.featureless:
                pre_sup = _ker
            elif self.sparse_inputs:
                pre_sup = tf.sparse_tensor_dense_matmul(x, _ker)
            else:
                pre_sup = tf.matmul(x, _ker)

            if self.model in ('COOL', 'COOLnorm'):
                support = tf.sparse_tensor_dense_matmul(_sup, pre_sup)
                supports.append(support)

            else:
                raise RuntimeError('unknown model')

        output = tf.add_n(supports)

        if self.use_bias: output += self.bias

        if self.activation is not None:
            output = self.activation(output)

        return output
コード例 #3
0
ファイル: layers.py プロジェクト: Eilene/gcn
def sparse_dropout(x, keep_prob, noise_shape):
    """Dropout for sparse tensors."""
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)
    return pre_out * (1./keep_prob)
コード例 #4
0
def dropout_sparse(x, keep_prob, num_nonzero_elems):
    noise_shape = [num_nonzero_elems]
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)
    return pre_out * (1. / keep_prob)
コード例 #5
0
ファイル: layer.py プロジェクト: miao3723/gcnMicrobe1
def dropout_sparse(x, keep_prob, num_nonzero_elems):
    noise_shape = [num_nonzero_elems]
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape) #从均匀分布中输出随机值.
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool) #tf.floor返回不大于 x 的元素最大整数
    pre_out = tf.sparse_retain(x, dropout_mask) #在一个 SparseTensor 中保留指定的非空值.
    return pre_out * (1. / keep_prob)
コード例 #6
0
ファイル: util.py プロジェクト: burakbayramli/classnotes
def dropout_sparse(x, keep_prob, num_nonzero_elems):
    noise_shape = [num_nonzero_elems]
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)
    return pre_out * (1./keep_prob)
コード例 #7
0
def sparse_dropout(x, keep_prob, noise_shape):
    """Dropout for sparse tensors.丢弃稀疏张量"""
    random_tensor = keep_prob  # 定义张量类对象random_tensor,赋值为元素保留概率
    # keep_prob:与x具有相同类型的标量张量,意为保留每个元素的概率。
    # 即每个元素被保留的概率,则keep_prob:1就是所有元素全部保留的意思。
    # 一般在大量数据训练时,为了防止过拟合,添加Dropout层,设置一个0~1之间的小数。

    random_tensor += tf.random_uniform(noise_shape)
    # 此句意为:random_tensor重新赋值为原random_tensor+(noise_shape*noise_shape)的矩阵

    # 一般情况下的noise_shape:
    # noise_shape是一个一维张量,其实就是一个一维数组(可以是list或者tuple),长度必须跟x.shape一样。
    # 而且noise_shape里边的元素,只能是1或者是x.shape里边对应的元素。哪个轴为1,哪个轴就会被一致地dropout。
    # (可以理解为,加在每个样本的噪音都是一样的。)被丢弃的整行或者整列元素都为0。
    # 自己的理解:noise_shape即噪音张量,是dropout过程中用于表示丢弃哪些元素(或哪几维度的元素)。

    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    # tf.cast():将张量转换为新类型(转换为bool型)
    # tf.floor():返回不大于x的元素最大整数。(即向下取整)

    pre_out = tf.sparse_retain(x, dropout_mask)
    # 此句意为:定义SparseTensor类对象pre_out,赋值为tf.sparse_retain()(保留指定非空值)的返回值
    # sparse_retain():在一个SparseTensor(稀疏张量)中保留指定的非空值。
    # sparse_retain(sp_input,to_retain)参数:
    #    sp_input:输入的SparseTensor,带有N个非空元素。
    #    to_retain:一个布尔类型的向量,向量长度是N,并且其中包含M个True值。
    #    输出:一个SparseTensor,数据维度和输入数据相同,其中包含M个非空值,该值的位置根据True的位置来决定。
    return pre_out * (1. / keep_prob)  # 返回pre_out与元素保留概率倒数(简单理解为期望)的乘积
コード例 #8
0
ファイル: layers.py プロジェクト: ventr1c/GCN-LPA
def sparse_dropout(x, keep_prob, noise_shape):
    random_tensor = keep_prob
    random_tensor += tf.random_uniform([noise_shape], dtype=tf.float64)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    res = tf.sparse_retain(x, dropout_mask)
    res /= keep_prob
    return res
コード例 #9
0
ファイル: model.py プロジェクト: kiminh/IC-GAR
 def _dropout_sparse(self, X, keep_prob, n_nonzero):
     noise_shape = [n_nonzero]
     random_tensor = keep_prob
     random_tensor += tf.random_uniform(noise_shape)
     dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
     pre_out = tf.sparse_retain(X, dropout_mask)
     return pre_out*tf.div(1., keep_prob)
コード例 #10
0
 def node_dropout(self, adj_matrix, num_value, keep_prob):
     noise_shape = [num_value]
     random_tensor = keep_prob
     random_tensor += tf.random_uniform(noise_shape)
     dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
     pre_out = tf.sparse_retain(adj_matrix, dropout_mask) * tf.div(
         1.0, keep_prob)
     return pre_out
コード例 #11
0
 def node_dropout_(self, adj_i, adj_i_sp):
     nnz_i = adj_i.nnz
     random_tensor = (1.0 - self.node_dropout) + tf.random_uniform(
         (nnz_i, ))
     mask_ = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
     adj_i_sp = tf.sparse_retain(adj_i_sp, mask_) * tf.div(
         1.0, 1.0 - self.node_dropout)
     return adj_i_sp
コード例 #12
0
ファイル: networkUnit.py プロジェクト: Tinky2013/GraphVAE
def dropoutSparse(x, keepProb, numNonzeroElems):
    """Dropout for sparse tensors """
    noiseShape = [numNonzeroElems]
    randomTensor = keepProb
    randomTensor += tf.random_uniform(noiseShape)
    dropoutMask = tf.cast(tf.floor(randomTensor), dtype=tf.bool)
    preOut = tf.sparse_retain(x, dropoutMask)
    return preOut * (1. / keepProb)
コード例 #13
0
ファイル: layers.py プロジェクト: SherlockedTxh/DGL_GAT
def sparse_dropout(x, keep_prob, noise_shape):
    """Dropout for sparse tensors."""
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)  #随机生成一个tensor
    dropout_mask = tf.cast(tf.floor(random_tensor),
                           dtype=tf.bool)  # floor 向下取整     case数据转换格式
    pre_out = tf.sparse_retain(x, dropout_mask)  #为x  中的非空元素    设置对应位的空值
    return pre_out * (1. / keep_prob)
コード例 #14
0
def sparse_dropout(x, keep_prob, noise_shape):
    """Dropout for sparse tensors."""
    random_tensor = keep_prob  # 比如0.9
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    # https://www.w3cschool.cn/tensorflow_python/tensorflow_python-ad3z2lx3.html
    # 在一个 SparseTensor(即x)中保留指定的非空值.  tf.sparse...可以看下↑↑,稀疏矩阵的表示
    pre_out = tf.sparse_retain(x, dropout_mask)
    return pre_out * (1. / keep_prob)
コード例 #15
0
ファイル: layers.py プロジェクト: harvardchen/UCD
def dropout_sparse(x, rate, num_nonzero_elems):
    """Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
    """
    noise_shape = [num_nonzero_elems]
    random_tensor = 1 - rate
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)
    return pre_out * (1. / (1 - rate))
コード例 #16
0
ファイル: layers.py プロジェクト: habedi/link-prediction
def dropout_sparse(x, keep_prob, num_nonzero_elems, dtype=tf.float32):
    """Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
    """
    noise_shape = [num_nonzero_elems]
    random_tensor = tf.cast(keep_prob, dtype=dtype)
    random_tensor += tf.random_uniform(noise_shape, dtype=dtype)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)
    return tf.cast(pre_out, dtype) * tf.cast((1./keep_prob), dtype)
コード例 #17
0
ファイル: tool.py プロジェクト: zjfng1733/NeuRec-1
def dropout_sparse(tf_sp_mat, keep_prob, nnz):
    """Dropout for sparse tensors.
    """
    noise_shape = [nnz]
    random_tensor = tf.random_uniform(noise_shape) + keep_prob
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(tf_sp_mat, dropout_mask)
    scale = 1.0 / keep_prob
    return pre_out * scale
コード例 #18
0
def dropout_sparse(x, keep_prob, num_nonzero_elems):
    """Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
    """
    noise_shape = [num_nonzero_elems]
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)  # r=True if keep_prob+r>1 else False
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)  # filter by dropout_mask
    return pre_out * (1. / keep_prob)
コード例 #19
0
def dropout_sparse(x, keep_prob, num_nonzero_elems, dtype=tf.float32):
    """Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
    """
    noise_shape = [num_nonzero_elems]
    random_tensor = tf.cast(keep_prob, dtype=dtype)
    random_tensor += tf.random_uniform(noise_shape, dtype=dtype)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)
    return tf.cast(pre_out, dtype) * tf.cast((1. / keep_prob), dtype)
コード例 #20
0
ファイル: mixhop_model.py プロジェクト: zbn123/mixhop
def sparse_dropout(x, drop_prob, num_entries, is_training):
    """Dropout for sparse tensors."""
    keep_prob = 1.0 - drop_prob
    is_test_float = 1.0 - tf.cast(is_training, tf.float32)
    random_tensor = is_test_float + keep_prob
    random_tensor += tf.random_uniform([num_entries])
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)
    return pre_out * (1. / tf.maximum(is_test_float, keep_prob))
コード例 #21
0
ファイル: layers.py プロジェクト: puppyapple/gcn
def sparse_dropout(x, keep_prob, noise_shape):
    """Dropout for sparse tensors."""
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    #tf.cast向下取整;tf.ceil向上取整
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    #x是(x,y):val形式的“字典”式稀疏矩阵记录格式,dropout_mask是bool值列表来确定保留对应的val与否
    pre_out = tf.sparse_retain(x, dropout_mask)
    return pre_out * (1. / keep_prob)
コード例 #22
0
def sparse_dropout(x, keep_prob, noise_shape):
    # x, 1-self.dropout, self.num_features_nonzero
    # 对稀疏矩阵做drop_out
    """Dropout for sparse tensors."""
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)
    return pre_out * (1./keep_prob)
コード例 #23
0
def sparse_dropout(x, keep_prob, noise_shape):
    """Dropout for sparse tensors."""
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)
    return tf.SparseTensor(indices=pre_out.indices,
                           values=pre_out.values / keep_prob,
                           dense_shape=pre_out.dense_shape)
コード例 #24
0
def sparse_dropout(x, keep_prob, noise_shape):
    """Dropout for sparse tensors.
    稀疏张量dropout
    """
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor),
                           dtype=tf.bool)  #【tf.floor向下取整;tf.cast强制类型转换为bool】
    pre_out = tf.sparse_retain(x, dropout_mask)  #【去除x中的空值】
    return pre_out * (1. / keep_prob)  #【为什么不是*keepprob】
コード例 #25
0
def dropout_sparse(x, keep_prob, num_nonzero_elems):
    """Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
    """
    noise_shape = [num_nonzero_elems]
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor),
                           dtype=tf.bool)  #将数据tf.floor(random_tensor)转成dtpye类型
    pre_out = tf.sparse_retain(
        x, dropout_mask)  #返回x长度的具有dropout_mask中真值个数的 bool 向量.
    return pre_out * (1. / keep_prob)
コード例 #26
0
def dropout_sparse(X, keep_prob, num_nonzero_elems):
    """
    sparse型の入力に対するdropout
    """
    noise_shape = [num_nonzero_elems]
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(
        noise_shape)  # 上記でkeep_prob足すことで,最小値を0.5とする
    dropout_mask = tf.cast(tf.floor(random_tensor),
                           dtype=tf.bool)  # 1を超える場合がTrue, そうでないとFalse
    pre_out = tf.sparse_retain(X, dropout_mask)  # sparse行列の中の値がある
    return pre_out * (1. / keep_prob)
コード例 #27
0
    def _dropout_sparse(self, X, keep_prob, n_nonzero_elems):
        """
        Dropout for sparse tensors.
        """
        noise_shape = [n_nonzero_elems]
        random_tensor = keep_prob
        random_tensor += tf.random_uniform(noise_shape)
		#0-1之间均匀分布的随机数,这样有keep_prob的概率保 
        dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
        pre_out = tf.sparse_retain(X, dropout_mask)
		#非空值中保留dropout_mask中为true的 

        return pre_out * tf.div(1., keep_prob)
コード例 #28
0
def dropout_sparse(x, keep_prob, num_nonzero_elems):
    """
    Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
    num_nonzero_elems: 稀疏矩阵中的非零元素个数
    keep_prob: 
    x: input
    """
    noise_shape = [num_nonzero_elems]
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    pre_out = tf.sparse_retain(x, dropout_mask)
    return pre_out * (1. / keep_prob)
コード例 #29
0
def sparse_dropout(x, keep_prob, noise_shape):
    """
    Dropout for sparse tensors.
    稀疏矩阵的dropout
    keep_prob:保持概率,是个常数
    noise_shape:[N, M]
    """
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    # cast类型转换,将数值型数据转换为布尔值
    # tf.floor:向下取整
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    # 将稀疏矩阵做dropout,x是稀疏矩阵,dropout_mask:是一维bool张量,True保留,False舍弃
    pre_out = tf.sparse_retain(x, dropout_mask)

    return pre_out * (1. / keep_prob)
コード例 #30
0
def test_sparse():
    """
    测试SparseTensor。
    :return:
    """
    # 位置索引
    idx = [[0, 0, 0], [0, 1, 0], [1, 0, 3], [1, 1, 2], [1, 1, 3], [1, 2, 1]]
    # 张量值
    val = [0, 10, 103, 112, 113, 114]
    # 张量形状
    shape = [2, 3, 4]

    # 创建稀疏张量
    sp = tf.SparseTensor(idx, val, shape)

    # 将SparseTensor转换为稠密的布尔指示器张量
    si = tf.sparse_to_indicator(sp, 200)
    si_val = si[1, 1, 113]

    test_run_sess("sparse indicator", si)
    test_run_sess("sparse indicator value", si_val)

    # 稀疏张量叠加
    sp1 = tf.SparseTensor([[0, 2], [1, 0], [1, 1]], ['a', 'b', 'c'], [2, 3])
    sp2 = tf.SparseTensor([[0, 1], [0, 2]], ['d', 'e'], [2, 4])
    sp3 = tf.SparseTensor([[0, 1], [0, 2]], ['d', 'e'], [2, 3])
    con1 = tf.sparse_concat(1, [sp1, sp2], name=None)
    con2 = tf.sparse_concat(0, [sp1, sp3], name=None)

    test_run_sess("sparse concat1", con1)
    test_run_sess("sparse concat2", con2)

    # 稀疏张量重排序,成为以行为主的标准排序
    sp4 = tf.SparseTensor([[0, 3], [0, 1], [3, 1], [2, 0]],
                          ['b', 'a', 'd', 'c'], [4, 5])
    rsp4 = tf.sparse_reorder(sp4)

    # 保留部分元素
    to_retain = [True, False, False, True]
    rsp5 = tf.sparse_retain(sp4, to_retain)

    # 填充空行
    rsp6 = tf.sparse_fill_empty_rows(sp4, 'zz')

    test_run_sess("rsp4", rsp4)
    test_run_sess("rsp5", rsp5)
    test_run_sess("rsp6", rsp6)
def dropout(x, keep_prob, noise_shape, is_sparse=False):
	"""
	Perform dropout
	"""
	if keep_prob == 0:
		return x
	random = tf.random_uniform([noise_shape])
	
	random_tensor = tf.add(random, keep_prob)
	
	dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
	if is_sparse:
		pre_out = tf.sparse_retain(x, dropout_mask)
	else:
		pre_out = tf.nn.dropout(x, keep_prob)

	return pre_out *1.0 / keep_prob
コード例 #32
0
def sparse_dropout(x, keep_prob, noise_shape):
    '''
    From kipf, GCN
    Do dropout for sparse tensors
    '''
    
    random_tensor = keep_prob

    #Add a random noise
    random_tensor += tf.random_uniform(noise_shape)
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype = tf.bool)

    #Do the dropout
    pre_out = tf.sparse_retain(x, dropout_mask)
    out = pre_out * (1./keep_prob)

    return out
コード例 #33
0
ファイル: layers.py プロジェクト: Yuming326/gcn
def sparse_dropout(x, keep_prob, noise_shape):
    """Dropout for sparse tensors."""
    # keep_prob is a scalar
    # keep_prob = 1 - dropout_rate
    # noise_shape is a 1-D integer tensor,
    # specifying the shape of output tensor
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape)
    # random_tensor now becomes a tensor of shape (noise_shape)
    # it defines the keep probability (with random noise) of each individual
    dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
    # dropout_mask defines a mask for dropout,
    # positions that random_tensor >=1 will dropout
    pre_out = tf.sparse_retain(x, dropout_mask)
    # tf.sparse_retain, output a SparseTensor of same shape as input
    # while only retains M non-empty elements
    # corresponding to the true positions in dropout_mask
    return pre_out * (1. / keep_prob)