def data():
    numpy.random.seed(0)
    atom_data = numpy.random.randint(
        0, high=MAX_ATOMIC_NUM, size=(batch_size, atom_size)).astype('i')
    adj_data = numpy.random.randint(
        0, high=2, size=(batch_size, n_edge_types, atom_size, atom_size)
    ).astype('f')
    y_grad = numpy.random.uniform(
        -1, 1, (batch_size, atom_size, hidden_dim)).astype('f')

    embed = EmbedAtomID(in_size=MAX_ATOMIC_NUM, out_size=in_channels)
    embed_atom_data = embed(atom_data).data
    return embed_atom_data, adj_data, y_grad
def data():
    # type: () -> Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray]  # NOQA
    numpy.random.seed(0)
    atom_data = numpy.random.randint(
        0, high=MAX_ATOMIC_NUM, size=(batch_size, atom_size)).astype('i')
    adj_data = numpy.random.randint(
        0, high=2, size=(batch_size, num_edge_type, atom_size,
                         atom_size)).astype('f')
    y_grad = numpy.random.uniform(
        -1, 1, (batch_size, atom_size, hidden_channels)).astype('f')
    y_grad_ = numpy.random.uniform(
        -1, 1, (batch_size, atom_size, hidden_channels)).astype('f')
    embed = EmbedAtomID(in_size=MAX_ATOMIC_NUM, out_size=hidden_channels)
    embed_atom_data = embed(atom_data).data
    return embed_atom_data, adj_data, y_grad, y_grad_
def data():
    numpy.random.seed(0)
    atom_data = numpy.random.randint(
        0, high=MAX_ATOMIC_NUM, size=(batch_size, atom_size)
    ).astype('i')
    # symmetric matrix
    dist_data = numpy.random.uniform(
        0, high=30, size=(batch_size, atom_size, atom_size)).astype('f')
    dist_data = (dist_data + dist_data.swapaxes(-1, -2)) / 2.

    y_grad = numpy.random.uniform(
        -1, 1, (batch_size, atom_size, hidden_dim)).astype('f')
    embed = EmbedAtomID(in_size=MAX_ATOMIC_NUM, out_size=hidden_dim)
    embed_atom_data = embed(atom_data).data
    return embed_atom_data, dist_data, y_grad
示例#4
0
    def __init__(self,
                 out_dim,
                 hidden_dim=16,
                 n_layers=4,
                 n_atom_types=MAX_ATOMIC_NUM,
                 concat_hidden=False,
                 weight_tying=True,
                 activation=functions.identity,
                 num_edge_type=4,
                 readout=True):
        super(GGNN, self).__init__()
        n_readout_layer = n_layers if concat_hidden else 1
        n_message_layer = 1 if weight_tying else n_layers
        with self.init_scope():
            # Update
            self.embed = EmbedAtomID(out_size=hidden_dim, in_size=n_atom_types)
            self.update_layers = chainer.ChainList(*[
                GGNNUpdate(hidden_dim=hidden_dim, num_edge_type=num_edge_type)
                for _ in range(n_message_layer)
            ])
            # Readout
            if readout:
                if chainer_chemistry.__version__ == '0.7.0':
                    self.readout_layers = chainer.ChainList(*[
                        GGNNReadout(out_dim=out_dim,
                                    in_channels=hidden_dim,
                                    activation=activation,
                                    activation_agg=activation)
                        for _ in range(n_readout_layer)
                    ])
                else:
                    self.readout_layers = chainer.ChainList(*[
                        GGNNReadout(out_dim=out_dim,
                                    hidden_dim=hidden_dim,
                                    activation=activation,
                                    activation_agg=activation)
                        for _ in range(n_readout_layer)
                    ])

        self.out_dim = out_dim
        self.hidden_dim = hidden_dim
        self.n_layers = n_layers
        self.num_edge_type = num_edge_type
        self.activation = activation
        self.concat_hidden = concat_hidden
        self.weight_tying = weight_tying
        self.readout = readout
示例#5
0
 def __init__(self,
              out_dim=1,
              hidden_dim=64,
              n_layers=3,
              readout_hidden_dim=32,
              n_atom_types=MAX_ATOMIC_NUM,
              concat_hidden=False):
     super(SchNet, self).__init__()
     with self.init_scope():
         self.embed = EmbedAtomID(out_size=hidden_dim, in_size=n_atom_types)
         self.update_layers = chainer.ChainList(
             *[SchNetUpdate(hidden_dim) for _ in range(n_layers)])
         self.readout_layer = SchNetReadout(out_dim, readout_hidden_dim)
     self.out_dim = out_dim
     self.hidden_dim = hidden_dim
     self.readout_hidden_dim = readout_hidden_dim
     self.n_layers = n_layers
     self.concat_hidden = concat_hidden
示例#6
0
    def __init__(self,
                 weave_channels=None,
                 hidden_dim=16,
                 n_atom=WEAVE_DEFAULT_NUM_MAX_ATOMS,
                 n_sub_layer=1,
                 n_atom_types=MAX_ATOMIC_NUM,
                 readout_mode='sum'):
        weave_channels = weave_channels or WEAVENET_DEFAULT_WEAVE_CHANNELS
        weave_module = [
            WeaveModule(n_atom, c, n_sub_layer, readout_mode=readout_mode)
            for c in weave_channels
        ]

        super(WeaveNet, self).__init__()
        with self.init_scope():
            self.embed = EmbedAtomID(out_size=hidden_dim, in_size=n_atom_types)
            self.weave_module = chainer.ChainList(*weave_module)
            self.readout = GeneralReadout(mode=readout_mode)
        self.readout_mode = readout_mode
def data():
    numpy.random.seed(0)
    atom_data = numpy.random.randint(0,
                                     high=MAX_ATOMIC_NUM,
                                     size=(batch_size, atom_size)).astype('i')
    adj_data = numpy.random.randint(0,
                                    high=2,
                                    size=(batch_size, atom_size,
                                          atom_size)).astype('f')
    y_grad = numpy.random.uniform(
        -1, 1, (batch_size, atom_size, hidden_channels)).astype('f')

    embed = EmbedAtomID(in_size=MAX_ATOMIC_NUM, out_size=hidden_channels)
    embed_atom_data = embed(atom_data).data
    degree_mat = numpy.sum(adj_data, axis=1)
    deg_conds = numpy.array([
        numpy.broadcast_to(((degree_mat - degree) == 0)[:, :, None],
                           embed_atom_data.shape)
        for degree in range(1, num_degree_type + 1)
    ])
    return embed_atom_data, adj_data, deg_conds, y_grad
示例#8
0
 def __init__(self,
              out_dim,
              hidden_dim=16,
              n_layers=4,
              max_degree=6,
              n_atom_types=MAX_ATOMIC_NUM,
              concat_hidden=False):
     super(NFP, self).__init__()
     num_degree_type = max_degree + 1
     with self.init_scope():
         self.embed = EmbedAtomID(in_size=n_atom_types, out_size=hidden_dim)
         self.layers = chainer.ChainList(*[
             NFPUpdate(hidden_dim, hidden_dim, max_degree=max_degree)
             for _ in range(n_layers)
         ])
         self.read_out_layers = chainer.ChainList(
             *[NFPReadout(hidden_dim, out_dim) for _ in range(n_layers)])
     self.out_dim = out_dim
     self.hidden_dim = hidden_dim
     self.max_degree = max_degree
     self.num_degree_type = num_degree_type
     self.n_layers = n_layers
     self.concat_hidden = concat_hidden
示例#9
0
    def __init__(self,
                 hidden_channels,
                 out_dim,
                 update_layer,
                 readout_layer,
                 n_update_layers=None,
                 out_channels=None,
                 super_node_dim=None,
                 n_atom_types=MAX_ATOMIC_NUM,
                 n_edge_types=4,
                 dropout_ratio=-1.0,
                 with_gwm=True,
                 concat_hidden=False,
                 sum_hidden=False,
                 weight_tying=False,
                 scale_adj=False,
                 activation=None,
                 use_batchnorm=False,
                 n_activation=None,
                 update_kwargs=None,
                 readout_kwargs=None,
                 gwm_kwargs=None):
        super(GWMGraphConvModel, self).__init__()

        # General: length of hidden_channels must be n_layers + 1
        if isinstance(hidden_channels, int):
            if n_update_layers is None:
                raise ValueError('n_update_layers is None')
            else:
                hidden_channels = [
                    hidden_channels for _ in range(n_update_layers + 1)
                ]
        elif isinstance(hidden_channels, list):
            if out_channels is None:
                n_update_layers = len(hidden_channels) - 1
            else:
                n_update_layers = len(hidden_channels)
        else:
            raise TypeError('Unexpected value for hidden_channels {}'.format(
                hidden_channels))

        if readout_layer == GeneralReadout and hidden_channels[-1] != out_dim:
            # When use GWM, hidden channels must be same. But GeneralReadout
            # cannot change the dimension. So when use General Readout and GWM,
            # hidden channel and out_dim should be same.
            if with_gwm:
                raise ValueError('Unsupported combination.')
            else:
                hidden_channels[-1] = out_dim

        # When use with_gwm, concat_hidden, sum_hidden and weight_tying option,
        # hidden_channels must be same
        if with_gwm or concat_hidden or sum_hidden or weight_tying:
            if not all(
                [in_dim == hidden_channels[0] for in_dim in hidden_channels]):
                raise ValueError(
                    'hidden_channels must be same but different {}'.format(
                        hidden_channels))

        if with_gwm and super_node_dim is None:
            print('[WARNING] super_node_dim is None, set to {}'.format(
                hidden_channels[0]))
            super_node_dim = hidden_channels[0]

        if out_channels is None:
            in_channels_list = hidden_channels[:-1]
            out_channels_list = hidden_channels[1:]
        else:
            # For RelGAT concat_heads option
            in_channels_list = hidden_channels
            out_channels_list = out_channels
        assert len(in_channels_list) == n_update_layers
        assert len(out_channels_list) == n_update_layers

        n_use_update_layers = 1 if weight_tying else n_update_layers
        n_readout_layers = n_use_update_layers if concat_hidden or sum_hidden else 1
        n_activation = n_use_update_layers if n_activation is None else n_activation

        if update_kwargs is None:
            update_kwargs = {}
        if readout_kwargs is None:
            readout_kwargs = {}
        if gwm_kwargs is None:
            gwm_kwargs = {}

        with self.init_scope():
            self.embed = EmbedAtomID(out_size=hidden_channels[0],
                                     in_size=n_atom_types)
            self.update_layers = chainer.ChainList(*[
                update_layer(in_channels=in_channels_list[i],
                             out_channels=out_channels_list[i],
                             n_edge_types=n_edge_types,
                             **update_kwargs)
                for i in range(n_use_update_layers)
            ])
            # when use weight_tying option, hidden_channels must be same. So we can use -1 index
            self.readout_layers = chainer.ChainList(*[
                readout_layer(
                    out_dim=out_dim,
                    # in_channels=hidden_channels[-1],
                    in_channels=None,
                    **readout_kwargs) for _ in range(n_readout_layers)
            ])
            if with_gwm:
                self.gwm = GWM(hidden_dim=hidden_channels[0],
                               hidden_dim_super=super_node_dim,
                               n_layers=n_use_update_layers,
                               **gwm_kwargs)
                self.embed_super = links.Linear(None, out_size=super_node_dim)
                self.linear_for_concat_super = links.Linear(in_size=None,
                                                            out_size=out_dim)
            if use_batchnorm:
                self.bnorms = chainer.ChainList(*[
                    GraphBatchNormalization(out_channels_list[i])
                    for i in range(n_use_update_layers)
                ])

        self.readout_layer = readout_layer
        self.update_layer = update_layer
        self.weight_tying = weight_tying
        self.with_gwm = with_gwm
        self.concat_hidden = concat_hidden
        self.sum_hidden = sum_hidden
        self.scale_adj = scale_adj
        self.activation = activation
        self.dropout_ratio = dropout_ratio
        self.use_batchnorm = use_batchnorm
        self.n_activation = n_activation
        self.n_update_layers = n_update_layers
        self.n_edge_types = n_edge_types