def __init__(self,
                 in_channels: int,
                 out_channels: int,
                 edge_pos_emb=False,
                 aggr: str = 'softmax',
                 t: float = 1.0,
                 learn_t: bool = False,
                 p: float = 1.0,
                 learn_p: bool = False,
                 msg_norm: bool = False,
                 learn_msg_scale: bool = False,
                 norm: str = 'batch',
                 num_layers: int = 2,
                 eps: float = 1e-7,
                 **kwargs):

        kwargs.setdefault('aggr', None)
        super(GENConv, self).__init__(**kwargs)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.aggr = aggr
        self.eps = eps

        assert aggr in ['softmax', 'softmax_sg', 'power']

        channels = [in_channels]
        for i in range(num_layers - 1):
            channels.append(in_channels * 2)
        channels.append(out_channels)
        self.mlp = MLP(channels, norm=norm)

        self.msg_norm = MessageNorm(learn_msg_scale) if msg_norm else None

        self.initial_t = t
        self.initial_p = p

        if learn_t and aggr == 'softmax':
            self.t = Parameter(torch.Tensor([t]), requires_grad=True)
        else:
            self.t = t

        if learn_p:
            self.p = Parameter(torch.Tensor([p]), requires_grad=True)
        else:
            self.p = p

        if edge_pos_emb:
            self.edge_enc = TrajPositionalEncoding(d_model=out_channels,
                                                   max_len=110)
        else:
            self.edge_enc = None
예제 #2
0
    def __init__(self,
                 in_channels: int,
                 out_channels: int,
                 aggr: str = 'softmax',
                 t: float = 1.0,
                 learn_t: bool = False,
                 p: float = 1.0,
                 learn_p: bool = False,
                 msg_norm: bool = False,
                 learn_msg_scale: bool = False,
                 norm: str = 'batch',
                 num_layers: int = 2,
                 eps: float = 1e-7,
                 **kwargs):

        super(GENConv, self).__init__(aggr=None, **kwargs)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.aggr = aggr
        self.eps = eps

        assert aggr in ['softmax', 'softmax_sg', 'power', 'stat']

        channels = [in_channels]
        for i in range(num_layers - 1):
            channels.append(in_channels * 2)
        channels.append(out_channels)
        self.mlp = MLP(channels, norm=norm)

        self.msg_norm = MessageNorm(learn_msg_scale) if msg_norm else None

        self.initial_t = t
        self.initial_p = p

        if learn_t and aggr == 'softmax':
            self.t = Parameter(torch.Tensor([t]), requires_grad=True)
        else:
            self.t = t

        if learn_p:
            self.p = Parameter(torch.Tensor([p]), requires_grad=True)
        else:
            self.p = p

        self.lin_stat = nn.Linear(4, 1)
예제 #3
0
    def __init__(self,
                 in_channels: int,
                 out_channels: int,
                 aggr: str = 'softmax',
                 t: float = 1.0,
                 learn_t: bool = False,
                 p: float = 1.0,
                 learn_p: bool = False,
                 msg_norm: bool = False,
                 learn_msg_scale: bool = False,
                 norm: str = 'batch',
                 num_layers: int = 2,
                 eps: float = 1e-7,
                 **kwargs):

        # Backward compatibility:
        aggr = 'softmax' if aggr == 'softmax_sg' else aggr
        aggr = 'powermean' if aggr == 'power' else aggr

        aggr_kwargs = {}
        if aggr == 'softmax':
            aggr_kwargs = dict(t=t, learn=learn_t)
        elif aggr == 'powermean':
            aggr_kwargs = dict(p=p, learn=learn_p)

        super().__init__(aggr=aggr, aggr_kwargs=aggr_kwargs, **kwargs)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.eps = eps

        channels = [in_channels]
        for i in range(num_layers - 1):
            channels.append(in_channels * 2)
        channels.append(out_channels)
        self.mlp = MLP(channels, norm=norm)

        self.msg_norm = MessageNorm(learn_msg_scale) if msg_norm else None
예제 #4
0
    def __init__(self, in_channels, out_channels, edge_channels=1, **kwargs):
        super(GateConv, self).__init__(aggr='add', **kwargs)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.edge_channels = edge_channels

        self.linear_n = nn.Parameter(torch.Tensor(in_channels, out_channels))
        self.linear_e = nn.Parameter(torch.Tensor(edge_channels, out_channels))

        # self.linear_attn = LinearMultiHeadedAttention(h=)
        self.tfm_encoder = make_transformer_encoder(num_layers=2,
                                                    hidden_size=out_channels *
                                                    2,
                                                    ff_size=out_channels * 2,
                                                    num_att_heads=8)

        self.msg_norm = MessageNorm(learn_scale=True)

        self.linear_msg = nn.Linear(out_channels * 2, out_channels)
        self.linear_aggr = nn.Linear(out_channels, out_channels)

        self.reset_parameters()