Пример #1
0
 def __init__(self,
              in_channels,
              out_channels,
              num_heads=1,
              in_drop=0.0,
              coef_drop=0.0,
              activation=nn.ELU(),
              residual=False,
              output_transform='concat'):
     super(AttentionAggregator, self).__init__()
     self.num_heads = num_heads
     self.attns = []
     for _ in range(num_heads):
         self.attns.append(
             AttentionHead(in_channels,
                           out_channels,
                           in_drop_ratio=in_drop,
                           coef_drop_ratio=coef_drop,
                           activation=activation,
                           residual=residual))
     self.attns = nn.layer.CellList(self.attns)
     if output_transform == 'concat':
         self.out_trans = P.Concat(-1)
     elif output_transform == 'sum':
         self.out_trans = P.AddN()
     else:
         raise ValueError
Пример #2
0
 def __init__(self,
              features,
              biases,
              ftr_dims,
              num_class,
              num_nodes,
              hidden_units,
              num_heads,
              attn_drop=0.0,
              ftr_drop=0.0,
              activation=nn.ELU(),
              residual=False):
     super(GAT, self).__init__()
     self.features = Tensor(features)
     self.biases = Tensor(biases)
     self.ftr_dims = check_int_positive(ftr_dims)
     self.num_class = check_int_positive(num_class)
     self.num_nodes = check_int_positive(num_nodes)
     self.hidden_units = hidden_units
     self.num_heads = num_heads
     self.attn_drop = attn_drop
     self.ftr_drop = ftr_drop
     self.activation = activation
     self.residual = check_bool(residual)
     self.layers = []
     # first layer
     self.layers.append(
         AttentionAggregator(self.ftr_dims,
                             self.hidden_units[0],
                             self.num_heads[0],
                             self.ftr_drop,
                             self.attn_drop,
                             self.activation,
                             residual=False))
     # intermediate layer
     for i in range(1, len(self.hidden_units)):
         self.layers.append(
             AttentionAggregator(self.hidden_units[i - 1] *
                                 self.num_heads[i - 1],
                                 self.hidden_units[i],
                                 self.num_heads[i],
                                 self.ftr_drop,
                                 self.attn_drop,
                                 self.activation,
                                 residual=self.residual))
     # output layer
     self.layers.append(
         AttentionAggregator(self.hidden_units[-1] * self.num_heads[-2],
                             self.num_class,
                             self.num_heads[-1],
                             self.ftr_drop,
                             self.attn_drop,
                             activation=None,
                             residual=False,
                             output_transform='sum'))
     self.layers = nn.layer.CellList(self.layers)
Пример #3
0
    def __init__(self,
                 in_channel,
                 out_channel,
                 in_drop_ratio=0.0,
                 coef_drop_ratio=0.0,
                 residual=False,
                 coef_activation=nn.LeakyReLU(),
                 activation=nn.ELU()):
        super(AttentionHead, self).__init__()
        self.in_channel = in_channel
        self.out_channel = out_channel
        self.in_drop_ratio = in_drop_ratio
        self.in_drop = nn.Dropout(keep_prob=1 - in_drop_ratio)
        self.in_drop_2 = nn.Dropout(keep_prob=1 - in_drop_ratio)
        self.feature_transform = GNNFeatureTransform(
            in_channels=self.in_channel,
            out_channels=self.out_channel,
            has_bias=False,
            weight_init='XavierUniform')

        self.f_1_transform = GNNFeatureTransform(
            in_channels=self.out_channel,
            out_channels=1,
            weight_init='XavierUniform')
        self.f_2_transform = GNNFeatureTransform(
            in_channels=self.out_channel,
            out_channels=1,
            weight_init='XavierUniform')
        self.softmax = nn.Softmax()

        self.coef_drop = nn.Dropout(keep_prob=1 - coef_drop_ratio)
        self.matmul = P.MatMul()
        self.bias_add = P.BiasAdd()
        self.bias = Parameter(initializer('zeros', self.out_channel), name='bias')
        self.residual = residual
        if self.residual:
            if in_channel != out_channel:
                self.residual_transform_flag = True
                self.residual_transform = GNNFeatureTransform(
                    in_channels=self.in_channel,
                    out_channels=self.out_channel)
            else:
                self.residual_transform = None
        self.coef_activation = coef_activation
        self.activation = activation
Пример #4
0
 def __init__(self,
              in_channels,
              out_channels,
              num_heads=1,
              in_drop=0.0,
              coef_drop=0.0,
              activation=nn.ELU(),
              residual=False):
     super(AttentionAggregator, self).__init__()
     self.num_heads = num_heads
     self.attns = []
     for _ in range(num_heads):
         self.attns.append(
             AttentionHead(in_channels,
                           out_channels,
                           in_drop_ratio=in_drop,
                           coef_drop_ratio=coef_drop,
                           activation=activation,
                           residual=residual))
     self.attns = nn.layer.CellList(self.attns)
Пример #5
0
def test_GAT():
    ft_sizes = 1433
    num_class = 7
    num_nodes = 2708
    hid_units = [8]
    n_heads = [8, 1]
    activation = nn.ELU()
    residual = False
    input_data = Tensor(
        np.array(np.random.rand(1, 2708, 1433), dtype=np.float32))
    biases = Tensor(np.array(np.random.rand(1, 2708, 2708), dtype=np.float32))
    net = GAT(ft_sizes,
              num_class,
              num_nodes,
              hidden_units=hid_units,
              num_heads=n_heads,
              attn_drop=0.6,
              ftr_drop=0.6,
              activation=activation,
              residual=residual)
    _executor.compile(net, input_data, biases)
Пример #6
0
    def __init__(self,
                 in_channel,
                 out_channel,
                 in_drop_ratio=0.0,
                 coef_drop_ratio=0.0,
                 residual=False,
                 coef_activation=nn.LeakyReLU(),
                 activation=nn.ELU()):
        super(AttentionHead, self).__init__()
        self.in_channel = Validator.check_positive_int(in_channel)
        self.out_channel = Validator.check_positive_int(out_channel)
        self.in_drop_ratio = in_drop_ratio
        self.in_drop = nn.Dropout(keep_prob=1 - in_drop_ratio)
        self.in_drop_2 = nn.Dropout(keep_prob=1 - in_drop_ratio)
        self.feature_transform = GNNFeatureTransform(
            in_channels=self.in_channel,
            out_channels=self.out_channel,
            has_bias=False)

        self.f_1_transform = GNNFeatureTransform(in_channels=self.out_channel,
                                                 out_channels=1)
        self.f_2_transform = GNNFeatureTransform(in_channels=self.out_channel,
                                                 out_channels=1)
        self.softmax = nn.Softmax()

        self.coef_drop = nn.Dropout(keep_prob=1 - coef_drop_ratio)
        self.batch_matmul = P.BatchMatMul()
        self.bias_add = P.BiasAdd()
        self.bias = Parameter(initializer('zeros', self.out_channel),
                              name='bias')
        self.residual = Validator.check_bool(residual)
        if self.residual:
            if in_channel != out_channel:
                self.residual_transform_flag = True
                self.residual_transform = GNNFeatureTransform(
                    in_channels=self.in_channel, out_channels=self.out_channel)
            else:
                self.residual_transform = None
        self.coef_activation = coef_activation
        self.activation = activation