示例#1
0
    def __init__(self,
                 dim_in,
                 dim_out,
                 dropout=0.,
                 act='relu',
                 order=1,
                 aggr='mean',
                 model_pretrain=None,
                 is_train=True,
                 bias='norm',
                 **kwargs):
        assert order <= 1, "now only support attention for order 0/1 layers"
        super(AttentionAggregator, self).__init__(**kwargs)
        self.dropout = dropout
        self.bias = bias
        self.act = F_ACT[act]
        self.order = order
        self.aggr = aggr
        self.is_train = is_train
        if 'mulhead' in kwargs.keys():
            self.mulhead = int(kwargs['mulhead'])
        else:
            self.mulhead = 1
        with tf.variable_scope(self.name + '_vars'):
            self.vars['order0_weights'] = glorot([dim_in, dim_out],
                                                 name='order0_weights')
            for k in range(self.mulhead):
                self.vars['order1_weights_h{}'.format(k)] = glorot(
                    [dim_in, int(dim_out / self.mulhead)],
                    name='order1_weights_h{}'.format(k))
            self.vars['order0_bias'] = zeros([dim_out], name='order0_bias')
            for k in range(self.mulhead):
                self.vars['order1_bias_h{}'.format(k)] = zeros(
                    [int(dim_out / self.mulhead)],
                    name='order1_bias_h{}'.format(k))

            if self.bias == 'norm':
                for o in range(self.order + 1):
                    _k1 = 'order{}_offset'.format(o)
                    _k2 = 'order{}_scale'.format(o)
                    self.vars[_k1] = zeros([1, dim_out], name=_k1)
                    self.vars[_k2] = ones([1, dim_out], name=_k2)
            for k in range(self.mulhead):
                self.vars['attention_0_h{}'.format(k)] = glorot(
                    [1, int(dim_out / self.mulhead)],
                    name='attention_0_h{}'.format(k))
                self.vars['attention_1_h{}'.format(k)] = glorot(
                    [1, int(dim_out / self.mulhead)],
                    name='attention_1_h{}'.format(k))
                self.vars['att_bias_0_h{}'.format(k)] = zeros(
                    [1], name='att_bias_0_h{}'.format(k))
                self.vars['att_bias_1_h{}'.format(k)] = zeros(
                    [1], name='att_bias_1_h{}'.format(k))
        print('>> layer {}, dim: [{},{}]'.format(self.name, dim_in, dim_out))
        if self.logging:
            self._log_vars()

        self.dim_in = dim_in
        self.dim_out = dim_out
示例#2
0
    def __init__(self,
                 dim_in,
                 dim_out,
                 dropout=0.,
                 act='relu',
                 order=1,
                 aggr='mean',
                 model_pretrain=None,
                 is_train=True,
                 bias='norm',
                 **kwargs):
        super(HighOrderAggregator, self).__init__(**kwargs)
        self.dropout = dropout
        self.bias = bias
        self.act = F_ACT[act]
        self.order = order
        self.aggr = aggr
        self.is_train = is_train
        if dim_out > 0:
            with tf.variable_scope(self.name + '_vars'):
                if model_pretrain is None:
                    for o in range(self.order + 1):
                        _k = 'order{}_weights'.format(o)
                        self.vars[_k] = glorot([dim_in, dim_out], name=_k)
                else:
                    for o in range(self.order + 1):
                        _k = 'order{}_weights'.format(o)
                        self.vars[_k] = trained(model_pretrain[0], name=_k)
                for o in range(self.order + 1):
                    _k = 'order{}_bias'.format(o)
                    self.vars[_k] = zeros([dim_out], name=_k)
                if self.bias == 'norm':
                    for o in range(self.order + 1):
                        _k1 = 'order{}_offset'.format(o)
                        _k2 = 'order{}_scale'.format(o)
                        self.vars[_k1] = zeros([1, dim_out], name=_k1)
                        self.vars[_k2] = ones([1, dim_out], name=_k2)
        print('>> layer {}, dim: [{},{}]'.format(self.name, dim_in, dim_out))
        if self.logging:
            self._log_vars()

        self.dim_in = dim_in
        self.dim_out = dim_out
示例#3
0
    def __init__(self, arch_gcn, dim_input_jk, mode=None, **kwargs):
        """
        """
        super(JumpingKnowledge, self).__init__(**kwargs)
        self.mode = mode
        if not mode:
            return
        self.act = F_ACT[arch_gcn['act']]
        self.bias = arch_gcn['bias']
        self.dim_in = dim_input_jk
        self.dim_out = arch_gcn['dim']

        with tf.variable_scope(self.name + '_vars'):
            self.vars['weights'] = glorot([self.dim_in, self.dim_out],
                                          name='weights')
            self.vars['bias'] = zeros([self.dim_out], name='bias')
            if self.bias == 'norm':
                self.vars['offset'] = zeros([1, self.dim_out], name='offset')
                self.vars['scale'] = ones([1, self.dim_out], name='scale')
示例#4
0
    def __init__(self,
                 dim_in,
                 dim_out,
                 weight_decay,
                 dropout=0.,
                 act=lambda x: x,
                 bias=True,
                 model_pretrain=None,
                 **kwargs):
        """
        model_pretrain is not None if you want to load the trained model
        model_pretrain[0] is weights
        model_pretrain[1] is bias
        """
        super(Dense, self).__init__(**kwargs)
        self.dropout = dropout
        self.act = F_ACT[act]
        self.bias = bias
        self.dim_in = dim_in
        self.dim_out = dim_out
        self.weight_decay = weight_decay

        with tf.variable_scope(self.name + '_vars'):
            if model_pretrain is None:
                self.vars['weights'] = tf.get_variable(
                    'weights',
                    shape=(dim_in, dim_out),
                    dtype=DTYPE,
                    initializer=tf.contrib.layers.xavier_initializer(),
                    regularizer=tf.contrib.layers.l2_regularizer(
                        self.weight_decay))
                if self.bias:
                    self.vars['bias'] = zeros([dim_out], name='bias')
            else:
                self.vars['weights'] = trained(model_pretrain[0],
                                               name='weight')
                if self.bias:
                    self.vars['bias'] = trained(model_pretrain[1], name='bias')
        if self.logging:
            self._log_vars()