예제 #1
0
 def __init__(self, out_dim, hidden_channels=32, n_update_layers=4,
              n_atom_types=MAX_ATOMIC_NUM,
              use_batch_norm=False, readout=None, dropout_ratio=0.5):
     super(RSGCN, self).__init__()
     in_dims = [hidden_channels for _ in range(n_update_layers)]
     out_dims = [hidden_channels for _ in range(n_update_layers)]
     out_dims[n_update_layers - 1] = out_dim
     if readout is None:
         readout = GeneralReadout()
     with self.init_scope():
         self.embed = chainer_chemistry.links.EmbedAtomID(out_size=hidden_channels, in_size=n_atom_types)
         self.gconvs = chainer.ChainList(
             *[RSGCNUpdate(in_dims[i], out_dims[i])
               for i in range(n_update_layers)])
         if use_batch_norm:
             self.bnorms = chainer.ChainList(
                 *[chainer_chemistry.links.GraphBatchNormalization(
                     out_dims[i]) for i in range(n_update_layers)])
         else:
             self.bnorms = [None for _ in range(n_update_layers)]
         if isinstance(readout, chainer.Link):
             self.readout = readout
     if not isinstance(readout, chainer.Link):
         self.readout = readout
     self.out_dim = out_dim
     self.hidden_channels = hidden_channels
     self.n_update_layers = n_update_layers
     self.dropout_ratio = dropout_ratio
예제 #2
0
 def __init__(self, n_channel, n_layer, n_atom, mode='sum'):
     super(PairToAtom, self).__init__()
     with self.init_scope():
         self.linearLayer = chainer.ChainList(
             *[links.Linear(None, n_channel) for _ in range(n_layer)])
         self.readout = GeneralReadout(mode=mode)
     self.n_atom = n_atom
     self.n_channel = n_channel
     self.mode = mode
    def __init__(
            self,
            out_dim,
            hidden_dim=32,
            hidden_dim_super=32,
            n_layers=4,
            n_heads=8,
            n_atom_types=MAX_ATOMIC_NUM,
            n_super_feature=2 + 2 +
        MAX_ATOMIC_NUM * 2,  #4 + 2 + 4 + MAX_ATOMIC_NUM*2,
            use_batch_norm=False,
            readout=None,
            dropout_ratio=0.5):
        super(RSGCN_GWM, self).__init__()
        in_dims = [hidden_dim for _ in range(n_layers)]
        out_dims = [hidden_dim for _ in range(n_layers)]
        out_dims[n_layers - 1] = out_dim
        if readout is None:
            readout = GeneralReadout()
        with self.init_scope():
            self.embed = chainer_chemistry.links.EmbedAtomID(
                in_size=n_atom_types, out_size=hidden_dim)
            self.gconvs = chainer.ChainList(*[
                RSGCNUpdate(in_dims[i], out_dims[i]) for i in range(n_layers)
            ])

            # GWM
            self.embed_super = links.Linear(in_size=n_super_feature,
                                            out_size=hidden_dim_super)
            self.gwm = GWM(hidden_dim=hidden_dim,
                           hidden_dim_super=hidden_dim_super,
                           n_layers=n_layers,
                           n_heads=n_heads,
                           dropout_ratio=dropout_ratio,
                           tying_flag=False,
                           gpu=-1)

            if use_batch_norm:
                self.bnorms = chainer.ChainList(*[
                    chainer_chemistry.links.GraphBatchNormalization(
                        out_dims[i]) for i in range(n_layers)
                ])
            else:
                self.bnorms = [None for _ in range(n_layers)]
            if isinstance(readout, chainer.Link):
                self.readout = readout

            self.linear_for_concat_super = links.Linear(in_size=None,
                                                        out_size=out_dim)
        if not isinstance(readout, chainer.Link):
            self.readout = readout
        self.out_dim = out_dim
        self.hidden_dim = hidden_dim
        self.hidden_dim_super = hidden_dim_super
        self.n_layers = n_layers
        self.dropout_ratio = dropout_ratio
예제 #4
0
    def __init__(self,
                 weave_channels=None,
                 hidden_dim=16,
                 n_atom=WEAVE_DEFAULT_NUM_MAX_ATOMS,
                 n_sub_layer=1,
                 n_atom_types=MAX_ATOMIC_NUM,
                 readout_mode='sum'):
        weave_channels = weave_channels or WEAVENET_DEFAULT_WEAVE_CHANNELS
        weave_module = [
            WeaveModule(n_atom, c, n_sub_layer, readout_mode=readout_mode)
            for c in weave_channels
        ]

        super(WeaveNet, self).__init__()
        with self.init_scope():
            self.embed = EmbedAtomID(out_size=hidden_dim, in_size=n_atom_types)
            self.weave_module = chainer.ChainList(*weave_module)
            self.readout = GeneralReadout(mode=readout_mode)
        self.readout_mode = readout_mode
예제 #5
0
def test_forward_cpu_assert_raises(data):
    atom_data = data[0]
    readout = GeneralReadout(mode='invalid')
    with pytest.raises(ValueError):
        cuda.to_cpu(readout(atom_data).data)
예제 #6
0
def readouts():
    modes = ['sum', 'max', 'summax']
    return (GeneralReadout(mode=mode) for mode in modes)