コード例 #1
0
 def __init__(self, out_dim=1, hidden_dim=32):
     super(SchNetReadout, self).__init__()
     with self.init_scope():
         self.linear1 = GraphLinear(hidden_dim)
         self.linear2 = GraphLinear(out_dim)
     self.out_dim = out_dim
     self.hidden_dim = hidden_dim
コード例 #2
0
 def __init__(self, out_dim=1, in_channels=None, hidden_channels=32):
     super(SchNetReadout, self).__init__()
     with self.init_scope():
         self.linear1 = GraphLinear(in_channels, hidden_channels)
         self.linear2 = GraphLinear(hidden_channels, out_dim)
     self.out_dim = out_dim
     self.hidden_dim = in_channels
コード例 #3
0
 def __init__(self, hidden_dim=16, dropout_ratio=0.5):
     super(GINUpdate, self).__init__()
     with self.init_scope():
         # two Linear + RELU
         self.linear_g1 = GraphLinear(hidden_dim, hidden_dim)
         self.linear_g2 = GraphLinear(hidden_dim, hidden_dim)
     # end with
     self.dropout_ratio = dropout_ratio
コード例 #4
0
 def __init__(self, in_channels, out_channels, n_edge_types=4, **kwargs):
     super(RelGCNUpdate, self).__init__()
     with self.init_scope():
         self.graph_linear_self = GraphLinear(in_channels, out_channels)
         self.graph_linear_edge = GraphLinear(in_channels,
                                              out_channels * n_edge_types)
     self.n_edge_types = n_edge_types
     self.in_channels = in_channels
     self.out_channels = out_channels
コード例 #5
0
 def __init__(self, in_channels, out_channels, num_edge_type=4):
     super(RelGCNUpdate, self).__init__()
     with self.init_scope():
         self.graph_linear_self = GraphLinear(in_channels, out_channels)
         self.graph_linear_edge = GraphLinear(in_channels,
                                              out_channels * num_edge_type)
     self.num_edge_type = num_edge_type
     self.in_channels = in_channels
     self.out_channels = out_channels
コード例 #6
0
 def __init__(self, in_channels=None, hidden_channels=16, out_channels=None,
              dropout_ratio=0.5, **kwargs):
     if out_channels is None:
         out_channels = hidden_channels
     super(GINUpdate, self).__init__()
     with self.init_scope():
         # two Linear + RELU
         self.linear_g1 = GraphLinear(in_channels, hidden_channels)
         self.linear_g2 = GraphLinear(hidden_channels, out_channels)
     self.dropout_ratio = dropout_ratio
コード例 #7
0
 def __init__(self, out_dim, in_channels=None, nobias=False,
              activation=functions.identity,
              activation_agg=functions.identity):
     super(GGNNReadout, self).__init__()
     with self.init_scope():
         self.i_layer = GraphLinear(in_channels, out_dim, nobias=nobias)
         self.j_layer = GraphLinear(in_channels, out_dim, nobias=nobias)
     self.out_dim = out_dim
     self.in_channels = in_channels
     self.nobias = nobias
     self.activation = activation
     self.activation_agg = activation_agg
コード例 #8
0
 def __init__(self,
              out_dim,
              hidden_dim=16,
              nobias=False,
              activation=functions.identity,
              activation_agg=functions.identity):
     super(GINReadout, self).__init__()
     with self.init_scope():
         self.i_layer = GraphLinear(None, out_dim, nobias=nobias)
         self.j_layer = GraphLinear(None, out_dim, nobias=nobias)
     self.out_dim = out_dim
     self.hidden_dim = hidden_dim
     self.nobias = nobias
     self.activation = activation
     self.activation_agg = activation_agg
コード例 #9
0
 def __init__(self, hidden_dim=64):
     super(SchNetUpdate, self).__init__()
     with self.init_scope():
         self.linear = chainer.ChainList(
             *[GraphLinear(hidden_dim) for _ in range(3)])
         self.cfconv = CFConv(hidden_dim=hidden_dim)
     self.hidden_dim = hidden_dim
コード例 #10
0
 def __init__(self, hidden_dim=16, num_edge_type=4):
     super(GGNNUpdate, self).__init__()
     with self.init_scope():
         self.graph_linear = GraphLinear(hidden_dim,
                                         num_edge_type * hidden_dim)
         self.update_layer = links.GRU(2 * hidden_dim, hidden_dim)
     self.num_edge_type = num_edge_type
コード例 #11
0
    def __init__(self,
                 out_channels=64,
                 num_edge_type=4,
                 ch_list=None,
                 n_atom_types=MAX_ATOMIC_NUM,
                 input_type='int',
                 scale_adj=None):
        super(RelGCN, self).__init__()
        if ch_list is None:
            ch_list = [16, 128, 64]
        with self.init_scope():
            if input_type == 'int':
                self.embed = EmbedAtomID(out_size=ch_list[0],
                                         in_size=n_atom_types)

            elif input_type == 'float':
                self.embed = GraphLinear(None, ch_list[0])

            else:
                raise ValueError(
                    "[ERROR] Unexpected value input type={}".format(
                        input_type))

            self.rgcn_convs = chainer.ChainList(*[
                RelGCNUpdate(ch_list[i], ch_list[i + 1], num_edge_type)
                for i in range(len(ch_list) - 1)
            ])

            self.rgcn_readout = GGNNReadout(out_dim=out_channels,
                                            hidden_dim=ch_list[-1],
                                            nobias=True,
                                            activation=functions.tanh)

        self.input_type = input_type
        self.scale_adj = scale_adj
コード例 #12
0
    def __init__(self, in_channels, out_channels, n_heads=3, n_edge_types=4,
                 dropout_ratio=-1., negative_slope=0.2, softmax_mode='across',
                 concat_heads=False):
        super(RelGATUpdate, self).__init__()
        with self.init_scope():
            self.message_layer = GraphLinear(
                in_channels, out_channels * n_edge_types * n_heads)
            self.attention_layer = GraphLinear(out_channels * 2, 1)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.n_heads = n_heads
        self.n_edge_types = n_edge_types
        self.dropout_ratio = dropout_ratio
        self.softmax_mode = softmax_mode
        self.concat_heads = concat_heads
        self.negative_slope = negative_slope
コード例 #13
0
 def __init__(self,
              hidden_channels=16,
              n_edge_types=5,
              activation=functions.relu):
     super(GNNFiLMUpdate, self).__init__()
     self.n_edge_types = n_edge_types
     self.activation = activation
     with self.init_scope():
         self.W_linear = GraphLinear(in_size=None,
                                     out_size=self.n_edge_types *
                                     hidden_channels,
                                     nobias=True)  # W_l in eq. (6)
         self.W_g = GraphLinear(in_size=None,
                                out_size=self.n_edge_types *
                                hidden_channels * 2,
                                nobias=True)  # g in eq. (6)
         self.norm_layer = links.LayerNormalization()  # l in eq. (6)
コード例 #14
0
 def __init__(self, in_channels, out_channels, **kwargs):
     super(RSGCNUpdate, self).__init__()
     with self.init_scope():
         self.graph_linear = GraphLinear(in_channels,
                                         out_channels,
                                         nobias=True)
     self.in_channels = in_channels
     self.out_channels = out_channels
コード例 #15
0
 def __init__(self, hidden_dim=64, num_rbf=300, radius_resolution=0.1,
              gamma=10.0):
     super(SchNetUpdate, self).__init__()
     with self.init_scope():
         self.linear = chainer.ChainList(
             *[GraphLinear(hidden_dim) for _ in range(3)])
         self.cfconv = CFConv(
             num_rbf=num_rbf, radius_resolution=radius_resolution,
             gamma=gamma, hidden_dim=hidden_dim)
     self.hidden_dim = hidden_dim
コード例 #16
0
ファイル: nfp_update.py プロジェクト: slryou41/reaction-gcnn
 def __init__(self, in_channels, out_channels, max_degree=6):
     super(NFPUpdate, self).__init__()
     num_degree_type = max_degree + 1
     with self.init_scope():
         self.graph_linears = chainer.ChainList(*[
             GraphLinear(in_channels, out_channels)
             for _ in range(num_degree_type)
         ])
     self.max_degree = max_degree
     self.in_channels = in_channels
     self.out_channels = out_channels
コード例 #17
0
ファイル: graph_mlp.py プロジェクト: zizai/chainer-chemistry
    def __init__(self, channels, in_channels=None, activation=relu):
        super(GraphMLP, self).__init__()
        if not isinstance(channels, (list, numpy.ndarray)):
            raise TypeError('channels {} is expected to be list, actual {}'
                            .format(channels, type(channels)))

        channels_list = [in_channels] + list(channels)
        layers = [GraphLinear(channels_list[i], channels_list[i+1])
                  for i in range(len(channels_list) - 1)]
        with self.init_scope():
            self.layers = chainer.ChainList(*layers)
        self.activation = activation
コード例 #18
0
ファイル: ggnn_update.py プロジェクト: pyli/chainer-chemistry
 def __init__(self,
              hidden_dim=16,
              n_layers=4,
              n_atom_types=MAX_ATOMIC_NUM,
              num_edge_type=4,
              weight_tying=True):
     super(GGNNUpdate, self).__init__()
     n_layer = 1 if weight_tying else n_layers
     with self.init_scope():
         self.graph_linears = chainer.ChainList(*[
             GraphLinear(hidden_dim, num_edge_type * hidden_dim)
             for _ in range(n_layer)
         ])
         self.update_layer = links.GRU(2 * hidden_dim, hidden_dim)
     self.n_layers = n_layers
     self.num_edge_type = num_edge_type
     self.weight_tying = weight_tying
コード例 #19
0
 def __init__(self, in_channels=None, hidden_channels=16,
              out_channels=None, n_edge_types=4, **kwargs):
     if out_channels is None:
         out_channels = hidden_channels
     super(GGNNUpdate, self).__init__()
     if in_channels is None:
         gru_in_channels = None
     else:
         gru_in_channels = in_channels + hidden_channels
     with self.init_scope():
         self.graph_linear = GraphLinear(
             in_channels, n_edge_types * hidden_channels)
         self.update_layer = links.GRU(gru_in_channels, out_channels)
     self.n_edge_types = n_edge_types
     self.in_channels = in_channels
     self.hidden_channels = hidden_channels
     self.out_channels = out_channels
コード例 #20
0
    def __init__(self,
                 hidden_channels,
                 out_dim,
                 update_layer,
                 readout_layer,
                 n_update_layers=None,
                 out_channels=None,
                 wle_dim=None,
                 n_atom_types=MAX_ATOMIC_NUM,
                 n_edge_types=4,
                 dropout_ratio=-1.0,
                 with_wle=True,
                 concat_hidden=False,
                 sum_hidden=False,
                 weight_tying=False,
                 scale_adj=False,
                 activation=None,
                 use_batchnorm=False,
                 n_activation=None,
                 update_kwargs=None,
                 readout_kwargs=None,
                 wle_kwargs=None,
                 n_wle_types=MAX_WLE_NUM):
        super(GWLEGraphConvModel, self).__init__()

        # General: length of hidden_channels must be n_layers + 1
        if isinstance(hidden_channels, int):
            if n_update_layers is None:
                raise ValueError('n_update_layers is None')
            else:
                hidden_channels = [
                    hidden_channels for _ in range(n_update_layers + 1)
                ]
        elif isinstance(hidden_channels, list):
            if out_channels is None:
                n_update_layers = len(hidden_channels) - 1
            else:
                n_update_layers = len(hidden_channels)
        else:
            raise TypeError('Unexpected value for hidden_channels {}'.format(
                hidden_channels))

        if readout_layer == GeneralReadout and hidden_channels[-1] != out_dim:
            # When use GWM, hidden channels must be same. But GeneralReadout
            # cannot change the dimension. So when use General Readout and GWM,
            # hidden channel and out_dim should be same.
            if with_wle:
                raise ValueError('Unsupported combination.')
            else:
                hidden_channels[-1] = out_dim

        # When use with_gwm, concat_hidden, sum_hidden and weight_tying option,
        # hidden_channels must be same
        if with_wle or concat_hidden or sum_hidden or weight_tying:
            if not all(
                [in_dim == hidden_channels[0] for in_dim in hidden_channels]):
                raise ValueError(
                    'hidden_channels must be same but different {}'.format(
                        hidden_channels))

        if with_wle and wle_dim is None:
            print('[WARNING] wle_dim is None, set to {}'.format(
                hidden_channels[0]))
            wle_dim = hidden_channels[0]

        if out_channels is None:
            in_channels_list = hidden_channels[:-1]
            out_channels_list = hidden_channels[1:]
        else:
            # For RelGAT concat_heads option
            in_channels_list = hidden_channels
            out_channels_list = out_channels
        assert len(in_channels_list) == n_update_layers
        assert len(out_channels_list) == n_update_layers

        n_use_update_layers = 1 if weight_tying else n_update_layers
        n_readout_layers = n_use_update_layers if concat_hidden or sum_hidden else 1
        n_activation = n_use_update_layers if n_activation is None else n_activation

        if update_kwargs is None:
            update_kwargs = {}
        if readout_kwargs is None:
            readout_kwargs = {}
        if wle_kwargs is None:
            wle_kwargs = {}

        with self.init_scope():
            self.embed = EmbedAtomID(out_size=hidden_channels[0],
                                     in_size=n_atom_types)  # +1 for label 0
            self.update_layers = chainer.ChainList(*[
                update_layer(in_channels=in_channels_list[i],
                             out_channels=out_channels_list[i],
                             n_edge_types=n_edge_types,
                             **update_kwargs)
                for i in range(n_use_update_layers)
            ])
            # when use weight_tying option, hidden_channels must be same. So we can use -1 index
            self.readout_layers = chainer.ChainList(*[
                readout_layer(
                    out_dim=out_dim,
                    # in_channels=hidden_channels[-1],
                    in_channels=None,
                    **readout_kwargs) for _ in range(n_readout_layers)
            ])
            if with_wle:
                self.embed_wle = links.EmbedID(out_size=wle_dim,
                                               in_size=n_wle_types)
                # Gates
                self.gate_W1 = GraphLinear(in_size=hidden_channels[0],
                                           out_size=hidden_channels[0])
                self.gate_W2 = GraphLinear(in_size=wle_dim,
                                           out_size=hidden_channels[0])

            if use_batchnorm:
                self.bnorms = chainer.ChainList(*[
                    GraphBatchNormalization(out_channels_list[i])
                    for i in range(n_use_update_layers)
                ])

        self.readout_layer = readout_layer
        self.update_layer = update_layer
        self.weight_tying = weight_tying
        self.with_wle = with_wle
        self.concat_hidden = concat_hidden
        self.sum_hidden = sum_hidden
        self.scale_adj = scale_adj
        self.activation = activation
        self.dropout_ratio = dropout_ratio
        self.use_batchnorm = use_batchnorm
        self.n_activation = n_activation
        self.n_update_layers = n_update_layers
        self.n_edge_types = n_edge_types
コード例 #21
0
 def __init__(self, in_channels, out_size):
     super(NFPReadout, self).__init__()
     with self.init_scope():
         self.output_weight = GraphLinear(in_channels, out_size)
     self.in_channels = in_channels
     self.out_size = out_size
コード例 #22
0
def model():
    l = GraphLinear(in_size=in_size, out_size=out_size)
    l.cleargrads()
    return l