Esempio n. 1
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 num_relations,
                 num_bases,
                 after_relu,
                 bias=False,
                 **kwargs):
        super(MyRGCNConv, self).__init__(aggr='mean', **kwargs)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.num_relations = num_relations
        self.num_bases = num_bases
        self.after_relu = after_relu

        self.basis = Param(torch.Tensor(num_bases, in_channels, out_channels))
        self.att = Param(torch.Tensor(num_relations, num_bases))
        self.root = Param(torch.Tensor(in_channels, out_channels))

        if bias:
            self.bias = Param(torch.Tensor(out_channels))
        else:
            self.register_parameter('bias', None)

        self.reset_parameters()
Esempio n. 2
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 num_relations,
                 num_bases,
                 root_weight=True,
                 bias=True,
                 **kwargs):
        super(RGCNConv, self).__init__(aggr='add', **kwargs)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.num_relations = num_relations
        self.num_bases = num_bases

        self.basis = Param(torch.Tensor(num_bases, in_channels, out_channels))
        self.att = Param(torch.Tensor(num_relations, num_bases))

        if root_weight:
            self.root = Param(torch.Tensor(in_channels, out_channels))
        else:
            self.register_parameter('root', None)

        if bias:
            self.bias = Param(torch.Tensor(out_channels))
        else:
            self.register_parameter('bias', None)

        self.reset_parameters()
Esempio n. 3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 num_relations,
                 num_bases,
                 bias=True):
        super(RGCNConv, self).__init__('add')

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.num_relations = num_relations
        self.num_bases = num_bases

        if num_bases == 0:
            self.basis = Param(torch.Tensor(num_relations, in_channels, out_channels))
            self.att = None
        else:
            self.basis = Param(torch.Tensor(num_bases, in_channels, out_channels))
            self.att = Param(torch.Tensor(num_relations, num_bases))
        self.root = Param(torch.Tensor(in_channels, out_channels))

        if bias:
            self.bias = Param(torch.Tensor(out_channels))
        else:
            self.register_parameter('bias', None)

        self.reset_parameters()
Esempio n. 4
0
    def __init__(self,
                 in_dim,
                 out_dim,
                 unigue_source_num,
                 unique_target_num,
                 is_after_relu=True,
                 is_bias=False,
                 **kwargs):

        super(MyHierarchyConv, self).__init__(aggr='mean', **kwargs)

        self.in_dim = in_dim
        self.out_dim = out_dim
        self.unique_source_num = unigue_source_num
        self.unique_target_num = unique_target_num
        self.is_after_relu = is_after_relu

        # parameter setting
        self.weight = Param(torch.Tensor(in_dim, out_dim))

        if is_bias:
            self.bias = Param(torch.Tensor(out_dim))
        else:
            self.register_parameter('bias', None)

        self.reset_parameters()
Esempio n. 5
0
    def __init__(
        self,
        in_channels: Union[int, Tuple[int, int]],
        out_channels: int,
        num_relations: int,
        num_bases: Optional[int] = None,
        num_blocks: Optional[int] = None,
        aggr: str = 'mean',
        root_weight: bool = True,
        bias: bool = True,
        **kwargs,
    ):
        kwargs.setdefault('aggr', aggr)
        super().__init__(node_dim=0, **kwargs)

        if num_bases is not None and num_blocks is not None:
            raise ValueError('Can not apply both basis-decomposition and '
                             'block-diagonal-decomposition at the same time.')

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.num_relations = num_relations
        self.num_bases = num_bases
        self.num_blocks = num_blocks

        if isinstance(in_channels, int):
            in_channels = (in_channels, in_channels)
        self.in_channels_l = in_channels[0]

        if num_bases is not None:
            self.weight = Parameter(
                torch.Tensor(num_bases, in_channels[0], out_channels))
            self.comp = Parameter(torch.Tensor(num_relations, num_bases))

        elif num_blocks is not None:
            assert (in_channels[0] % num_blocks == 0
                    and out_channels % num_blocks == 0)
            self.weight = Parameter(
                torch.Tensor(num_relations, num_blocks,
                             in_channels[0] // num_blocks,
                             out_channels // num_blocks))
            self.register_parameter('comp', None)

        else:
            self.weight = Parameter(
                torch.Tensor(num_relations, in_channels[0], out_channels))
            self.register_parameter('comp', None)

        if root_weight:
            self.root = Param(torch.Tensor(in_channels[1], out_channels))
        else:
            self.register_parameter('root', None)

        if bias:
            self.bias = Param(torch.Tensor(out_channels))
        else:
            self.register_parameter('bias', None)

        self.reset_parameters()
Esempio n. 6
0
    def __init__(self, out_channels, num_layers, aggr='add', bias=True):
        super(InOutGGNN, self).__init__(aggr)

        self.out_channels = out_channels
        self.num_layers = num_layers

        self.weight = Param(Tensor(num_layers, 2, out_channels, out_channels))
        self.rnn = torch.nn.GRUCell(2 * out_channels, out_channels, bias=bias)
        self.bias_in = Param(Tensor(self.out_channels))
        self.bias_out = Param(Tensor(self.out_channels))

        self.reset_parameters()
Esempio n. 7
0
    def __init__(self, g, in_channels, out_channels, num_relations, num_bases):
        super(RGCNSPMVConv, self).__init__()

        self.g = g
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.num_relations = num_relations
        self.num_bases = num_bases

        self.basis = Param(torch.Tensor(num_bases, in_channels, out_channels))
        self.att = Param(torch.Tensor(num_relations, num_bases))
        self.root = Param(torch.Tensor(in_channels, out_channels))
        self.bias = Param(torch.Tensor(out_channels))

        self.reset_parameters()
Esempio n. 8
0
    def __init__(self, in_dim, num_et):
        super(MultiInnerProductDecoder, self).__init__()
        self.num_et = num_et
        self.in_dim = in_dim
        self.weight = Param(torch.Tensor(num_et, in_dim))

        self.reset_parameters()
Esempio n. 9
0
    def __init__(self,
                 out_channels: int,
                 num_layers: int,
                 num_rels=1,
                 residual_channels=0,
                 aggr: str = 'add',
                 bias: bool = True,
                 weight_dropout=0.2,
                 **kwargs):
        super(GatedGraphConv, self).__init__(aggr=aggr, **kwargs)

        self.out_channels = out_channels
        self.num_layers = num_layers
        self.num_rels = num_rels

        self.weight = Param(
            Tensor(num_layers, num_rels, out_channels, out_channels))
        # self.weight = \
        F.dropout(self.weight, p=weight_dropout,
                  training=True)  #self.training)

        self.rnn = torch.nn.GRUCell(out_channels + residual_channels,
                                    out_channels,
                                    bias=bias)

        self.reset_parameters()
Esempio n. 10
0
    def __init__(self,
                 device,
                 in_dim_drug,
                 num_dd_et,
                 in_dim_prot,
                 uni_num_prot,
                 uni_num_drug,
                 prot_drug_dim=64,
                 num_base=32,
                 n_embed=64,
                 n_hid1=32,
                 n_hid2=16,
                 mod='add'):
        '''
        :param device:
        :param in_dim_drug:
        :param num_dd_et:
        :param in_dim_prot:
        :param uni_num_prot:
        :param uni_num_drug:
        :param prot_drug_dim:
        :param num_base:
        :param n_embed:
        :param n_hid1:
        :param n_hid2:
        :param mod: 'cat', 'ave'
        '''
        super(FMEncoder, self).__init__()
        self.num_et = num_dd_et
        self.out_dim = n_hid2
        self.uni_num_drug = uni_num_drug
        self.uni_num_prot = uni_num_prot

        # on pp-net
        self.pp_encoder = PPEncoder(in_dim_prot)

        # feat: drug index
        self.embed = Param(torch.Tensor(in_dim_drug, n_embed))

        # on pd-net
        self.hgcn = MyHierarchyConv(self.pp_encoder.out_dim, prot_drug_dim,
                                    uni_num_prot, uni_num_drug)
        self.hdrug = torch.zeros(
            (self.uni_num_drug, self.pp_encoder.out_dim)).to(device)

        # on dd-net +self.hgcn.out_dim
        self.rgcn1 = MyRGCNConv2(n_embed,
                                 n_hid1,
                                 num_dd_et,
                                 num_base,
                                 after_relu=False)
        self.rgcn2 = MyRGCNConv2(n_hid1,
                                 n_hid2,
                                 num_dd_et,
                                 num_base,
                                 after_relu=True)

        self.reset_parameters()
Esempio n. 11
0
    def __init__(self, in_dim, num_uni_edge_type, l1_dim=16):
        """ in_dim: the feat dim of a drug
            num_edge_type: num of dd edge type """

        super(NNDecoder, self).__init__()
        self.l1_dim = l1_dim  # Decoder Lays' dim setting

        # parameters
        # for drug 1
        self.w1_l1 = Param(torch.Tensor(in_dim, l1_dim))
        self.w1_l2 = Param(torch.Tensor(num_uni_edge_type, l1_dim))  # dd_et
        # specified
        # for drug 2
        self.w2_l1 = Param(torch.Tensor(in_dim, l1_dim))
        self.w2_l2 = Param(torch.Tensor(num_uni_edge_type, l1_dim))  # dd_et
        # specified

        self.reset_parameters()
Esempio n. 12
0
    def __init__(self, source_dim, embed_dim, target_dim, uni_num_source,
                 uni_num_target):
        super(HierEncoder, self).__init__()

        self.embed = Param(torch.Tensor(source_dim, embed_dim))
        self.hgcn = MyHierarchyConv(embed_dim, target_dim, uni_num_source,
                                    uni_num_target)

        self.reset_parameters()
    def __init__(self, out_channels, num_layers, aggr='add', bias=True):
        super(GatedGraphConv, self).__init__(aggr)

        self.out_channels = out_channels
        self.num_layers = num_layers

        self.weight = Param(Tensor(num_layers, out_channels, out_channels))
        self.rnn = torch.nn.GRUCell(out_channels, out_channels, bias=bias)

        self.reset_parameters()
Esempio n. 14
0
    def __init__(self,
                 input_dim,
                 num_timesteps,
                 num_edge_types,
                 aggr='add',
                 bias=True,
                 dropout=0):
        super(GatedGraphConv, self).__init__(aggr)

        self._input_dim = input_dim
        self.num_timesteps = num_timesteps
        self.num_edge_types = num_edge_types

        self.weight = Param(
            Tensor(num_timesteps, num_edge_types, input_dim, input_dim))
        self.bias = Param(Tensor(num_timesteps, num_edge_types, input_dim))
        self.rnn = torch.nn.GRUCell(input_dim, input_dim, bias=bias)
        self.dropout = torch.nn.Dropout(dropout)

        self.reset_parameters()
Esempio n. 15
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 num_relations,
                 num_bases,
                 device,
                 root_weight=True,
                 bias=True,
                 **kwargs):
        super(RAGCNConv, self).__init__(aggr='add', **kwargs)
        self.device = device
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.num_relations = num_relations
        self.num_bases = num_bases

        self.basis = Param(torch.Tensor(num_bases, in_channels,
                                        out_channels)).to(self.device)
        self.att_r = Param(torch.Tensor(num_relations,
                                        num_bases)).to(self.device)
        self.heads = 1
        self.att = Param(torch.Tensor(1, self.heads,
                                      2 * out_channels)).to(self.device)
        self.gate_layer = nn.Linear(2 * out_channels, 1)
        self.relu = nn.ReLU()
        self.negative_slope = 0.2

        if root_weight:
            self.root = Param(torch.Tensor(in_channels,
                                           out_channels)).to(self.device)
        else:
            self.register_parameter('root', None)

        if bias:
            self.bias = Param(torch.Tensor(out_channels)).to(self.device)
        else:
            self.register_parameter('bias', None)

        self.reset_parameters()
        self.dropout = 0
Esempio n. 16
0
    def __init__(self, out_channels, num_layers, edge_network, aggr="add", bias=True):
        super(GatedGraphConv, self).__init__(aggr)

        self.out_channels = out_channels
        self.num_layers = num_layers

        self.edge_network = (
            edge_network  # TODO: make into a list of neural networks for each edge_attr
        )

        self.weight = Param(Tensor(num_layers, out_channels, out_channels))
        self.rnn = torch.nn.GRUCell(out_channels, out_channels, bias=bias)
        self.reset_parameters()
Esempio n. 17
0
    def __init__(self,
                 out_channels: int,
                 num_layers: int,
                 aggr: str = 'add',
                 bias: bool = True,
                 **kwargs):
        super().__init__(aggr=aggr, **kwargs)

        self.out_channels = out_channels
        self.num_layers = num_layers

        self.weight = Param(Tensor(num_layers, out_channels, out_channels))
        self.rnn = torch.nn.GRUCell(out_channels, out_channels, bias=bias)

        self.reset_parameters()
Esempio n. 18
0
 def __init__(self, p):
     super(LinReg, self).__init__()
     self.b = Param(torch.randn(p, requires_grad=True))
     self.log_sig = Param(torch.randn(1, requires_grad=True))
Esempio n. 19
0
    def __init__(self, size):
        log_conc = torch.randn(size)
        log_rate = torch.randn(size)

        self.vp = Param(torch.stack([log_conc, log_rate]), requires_grad=True)
        self.size = size
Esempio n. 20
0
    def __init__(self, size):
        m = torch.randn(size)
        log_s = torch.randn(size)

        self.vp = Param(torch.stack([m, log_s]), requires_grad=True)
        self.size = size