コード例 #1
0
    def __init__(self, dim_in, dim_out, dropout=0., act='relu', \
            order=1, aggr='mean', is_train=True, bias='norm', **kwargs):
        super(HighOrderAggregator, self).__init__(**kwargs)
        self.dropout = dropout
        self.bias = bias
        self.act = F_ACT[act]
        self.order = order
        self.aggr = aggr
        self.is_train = is_train
        if dim_out > 0:
            with tf.variable_scope(self.name + '_vars'):
                for o in range(self.order + 1):
                    _k = 'order{}_weights'.format(o)
                    self.vars[_k] = glorot([dim_in, dim_out], name=_k)
                for o in range(self.order + 1):
                    _k = 'order{}_bias'.format(o)
                    self.vars[_k] = zeros([dim_out], name=_k)
                if self.bias == 'norm':
                    for o in range(self.order + 1):
                        _k1 = 'order{}_offset'.format(o)
                        _k2 = 'order{}_scale'.format(o)
                        self.vars[_k1] = zeros([1, dim_out], name=_k1)
                        self.vars[_k2] = ones([1, dim_out], name=_k2)
        print('>> layer {}, dim: [{},{}]'.format(self.name, dim_in, dim_out))
        if self.logging:
            self._log_vars()

        self.dim_in = dim_in
        self.dim_out = dim_out
コード例 #2
0
    def __init__(self,
                 dim_in,
                 dim_out,
                 dropout=0.,
                 act='relu',
                 order=1,
                 aggr='mean',
                 is_train=True,
                 bias='norm',
                 **kwargs):
        assert order <= 1, "now only support attention for order 0/1 layers"
        super(AttentionAggregator, self).__init__(**kwargs)
        self.dropout = dropout
        self.bias = bias
        self.act = F_ACT[act]
        self.order = order
        self.aggr = aggr
        self.is_train = is_train
        if 'mulhead' in kwargs.keys():
            self.mulhead = int(kwargs['mulhead'])
        else:
            self.mulhead = 1
        with tf.variable_scope(self.name + '_vars'):
            self.vars['order0_weights'] = glorot([dim_in, dim_out],
                                                 name='order0_weights')
            for k in range(self.mulhead):
                self.vars['order1_weights_h{}'.format(k)] = glorot(
                    [dim_in, int(dim_out / self.mulhead)],
                    name='order1_weights_h{}'.format(k))
            self.vars['order0_bias'] = zeros([dim_out], name='order0_bias')
            for k in range(self.mulhead):
                self.vars['order1_bias_h{}'.format(k)] = zeros(
                    [int(dim_out / self.mulhead)],
                    name='order1_bias_h{}'.format(k))

            if self.bias == 'norm':
                for o in range(self.order + 1):
                    _k1 = 'order{}_offset'.format(o)
                    _k2 = 'order{}_scale'.format(o)
                    self.vars[_k1] = zeros([1, dim_out], name=_k1)
                    self.vars[_k2] = ones([1, dim_out], name=_k2)
            for k in range(self.mulhead):
                self.vars['attention_0_h{}'.format(k)] = glorot(
                    [1, int(dim_out / self.mulhead)],
                    name='attention_0_h{}'.format(k))
                self.vars['attention_1_h{}'.format(k)] = glorot(
                    [1, int(dim_out / self.mulhead)],
                    name='attention_1_h{}'.format(k))
                self.vars['att_bias_0_h{}'.format(k)] = zeros(
                    [1], name='att_bias_0_h{}'.format(k))
                self.vars['att_bias_1_h{}'.format(k)] = zeros(
                    [1], name='att_bias_1_h{}'.format(k))
        print('>> layer {}, dim: [{},{}]'.format(self.name, dim_in, dim_out))
        if self.logging:
            self._log_vars()

        self.dim_in = dim_in
        self.dim_out = dim_out
コード例 #3
0
ファイル: layers.py プロジェクト: zwytop/GraphSAINT
    def __init__(self, arch_gcn, dim_input_jk, mode=None, **kwargs):
        """
        """
        super(JumpingKnowledge,self).__init__(**kwargs)
        self.mode = mode
        if not mode:
            return
        self.act = F_ACT[arch_gcn['act']]
        self.bias = arch_gcn['bias']
        self.dim_in = dim_input_jk
        self.dim_out = arch_gcn['dim']

        with tf.variable_scope(self.name + '_vars'):
            self.vars['weights'] = glorot([self.dim_in,self.dim_out],name='weights')
            self.vars['bias'] = zeros([self.dim_out],name='bias')
            if self.bias == 'norm':
                self.vars['offset'] = zeros([1,self.dim_out],name='offset')
                self.vars['scale'] = ones([1,self.dim_out],name='scale')