Exemple #1
0
    def test_megnet(self):
        units_v = [13, 14, 16]
        units_e = [16, 16, 17]
        units_u = [13, 14, 18]
        layer = MEGNetLayer(units_v, units_e, units_u)
        out = layer(self.x)
        self.assertListEqual([i._keras_shape[-1] for i in out], [units_v[-1], units_e[-1], units_u[-1]])
        new_layer = MEGNetLayer.from_config(layer.get_config())
        out2 = new_layer(self.x)
        self.assertListEqual([i._keras_shape[-1] for i in out2], [units_v[-1], units_e[-1], units_u[-1]])

        int32 = 'int32'
        x1 = np.random.rand(1, 5, 10)
        x2 = np.random.rand(1, 6, 5)
        x3 = np.random.rand(1, 2, 20)
        x4 = np.array([0, 1, 2, 3, 3, 4]).reshape([1, -1])
        x5 = np.array([1, 0, 3, 2, 4, 3]).reshape([1, -1])
        x6 = np.array([[0, 0, 1, 1, 1]])
        x7 = np.array([[0, 0, 1, 1, 1, 1]])
        x1_ = Input(shape=(None, 10))
        x2_ = Input(shape=(None, 5))
        x3_ = Input(shape=(None, 20))
        x4_ = Input(shape=(None,), dtype=int32)
        x5_ = Input(shape=(None,), dtype=int32)
        x6_ = Input(shape=(None,), dtype=int32)
        x7_ = Input(shape=(None,), dtype=int32)
        out = MEGNetLayer([10, 5], [20, 4], [30, 3])(
            [x1_, x2_, x3_, x4_, x5_, x6_, x7_])
        model = Model(inputs=[x1_, x2_, x3_, x4_, x5_, x6_, x7_], outputs=out)
        model.compile('adam', 'mse')
        ans = model.predict([x1, x2, x3, x4, x5, x6, x7])
        self.assertEqual(ans[0].shape, (1, 5, 5))
Exemple #2
0
 def setUpClass(cls):
     cls.n_feature = 5
     cls.n_bond_features = 6
     cls.n_global_features = 2
     cls.inp = [
         Input(shape=(None, cls.n_feature)),
         Input(shape=(None, cls.n_bond_features)),
         Input(shape=(None, cls.n_global_features)),
         Input(shape=(None, ), dtype='int32'),
         Input(shape=(None, ), dtype='int32'),
         Input(shape=(None, ), dtype='int32'),
         Input(shape=(None, ), dtype='int32'),
     ]
     units_v = [2, 2]
     units_e = [2, 2]
     units_u = [
         2,
     ]
     layer = MEGNetLayer(units_v, units_e, units_u)
     out = layer(cls.inp)
     cls.out = Dense(1)(out[2])
     cls.model = Model(inputs=cls.inp, outputs=cls.out)
     cls.model.compile(loss='mse', optimizer='adam')
     cls.x = [
         np.random.normal(size=(1, 4, cls.n_feature)),
         np.random.normal(size=(1, 6, cls.n_bond_features)),
         np.random.normal(size=(1, 2, cls.n_global_features)),
         np.array([[0, 0, 1, 1, 2, 3]]),
         np.array([[1, 1, 0, 0, 3, 2]]),
         np.array([[0, 0, 1, 1]]),
         np.array([[0, 0, 0, 0, 1, 1]]),
     ]
     cls.y = np.random.normal(size=(1, 2, 1))
     cls.train_gen = Generator(cls.x, cls.y)
Exemple #3
0
    def one_block(a, b, c, has_ff=True, block_index=0):
        if has_ff:
            x1_ = ff(a, name_prefix=f"block_{block_index}_atom_ff")
            x2_ = ff(b, name_prefix=f"block_{block_index}_bond_ff")
            x3_ = ff(c, name_prefix=f"block_{block_index}_state_ff")
        else:
            x1_ = a
            x2_ = b
            x3_ = c
        out = MEGNetLayer(
            [n1, n1, n2],
            [n1, n1, n2],
            [n1, n1, n2],
            pool_method="mean",
            activation=act,
            kernel_regularizer=reg,
            name=f"megnet_{block_index}",
        )([x1_, x2_, x3_, x4, x5, x6, x7])

        x1_temp = out[0]
        x2_temp = out[1]
        x3_temp = out[2]
        if dropout:
            x1_temp = Dropout(dropout, name=f"dropout_atom_{block_index}")(
                x1_temp, training=dropout_training)
            x2_temp = Dropout(dropout, name=f"dropout_bond_{block_index}")(
                x2_temp, training=dropout_training)
            x3_temp = Dropout(dropout, name=f"dropout_state_{block_index}")(
                x3_temp, training=dropout_training)
        return x1_temp, x2_temp, x3_temp
 def test_megnet(self):
     units_v = [13, 14, 16]
     units_e = [16, 16, 17]
     units_u = [13, 14, 18]
     layer = MEGNetLayer(units_v, units_e, units_u)
     out = layer(self.x)
     self.assertListEqual([i._keras_shape[-1] for i in out],
                          [units_v[-1], units_e[-1], units_u[-1]])
     new_layer = MEGNetLayer.from_config(layer.get_config())
     out2 = new_layer(self.x)
     self.assertListEqual([i._keras_shape[-1] for i in out2],
                          [units_v[-1], units_e[-1], units_u[-1]])
Exemple #5
0
    def test_reduce_lr_upon_nan(self):
        with ScratchDir('.'):
            callbacks = [ReduceLRUponNan(patience=100)]
            self.assertAlmostEqual(float(kb.get_value(self.model.optimizer.lr)), 1e-3)
            gen = Generator(self.x, np.array([1, np.nan]).reshape((1, 2, 1)))
            self.model.fit_generator(gen, steps_per_epoch=1, epochs=1, callbacks=callbacks, verbose=0)
            self.assertAlmostEqual(float(kb.get_value(self.model.optimizer.lr)), 0.5e-3)

            inp = [
                Input(shape=(None, self.n_feature)),
                Input(shape=(None, self.n_bond_features)),
                Input(shape=(None, self.n_global_features)),
                Input(shape=(None,), dtype='int32'),
                Input(shape=(None,), dtype='int32'),
                Input(shape=(None,), dtype='int32'),
                Input(shape=(None,), dtype='int32'),
            ]
            units_v = [2, 2]
            units_e = [2, 2]
            units_u = [2, ]
            layer = MEGNetLayer(units_v, units_e, units_u)
            out = layer(inp)
            out = Dense(1)(out[2])
            model = Model(inputs=inp, outputs=out)
            model.compile(loss='mse', optimizer='adam')
            x = [np.random.normal(size=(1, 4, self.n_feature)),
                     np.random.normal(size=(1, 6, self.n_bond_features)),
                     np.random.normal(size=(1, 2, self.n_global_features)),
                     np.array([[0, 0, 1, 1, 2, 3]]),
                     np.array([[1, 1, 0, 0, 3, 2]]),
                     np.array([[0, 0, 1, 1]]),
                     np.array([[0, 0, 0, 0, 1, 1]]),
                     ]
            y = np.random.normal(size=(1, 2, 1))
            train_gen = Generator(x, y)

            callbacks = [ReduceLRUponNan(filepath='./val_mae_{epoch:05d}_{val_mae:.6f}.hdf5', patience=100),
                         ModelCheckpointMAE(filepath='./val_mae_{epoch:05d}_{val_mae:.6f}.hdf5', val_gen=train_gen,
                                            steps_per_val=1)
                         ]
            # 1. involve training and saving
            model.fit_generator(train_gen, steps_per_epoch=1, epochs=2, callbacks=callbacks, verbose=1)
            # 2. throw nan loss, trigger ReduceLRUponNan
            model.fit_generator(gen, steps_per_epoch=1, epochs=2, callbacks=callbacks, verbose=1)
            # 3. Normal training, recover saved model from 1
            model.fit_generator(train_gen, steps_per_epoch=1, epochs=2, callbacks=callbacks, verbose=1)
            self.assertAlmostEqual(float(kb.get_value(model.optimizer.lr)), 0.25e-3)
Exemple #6
0
        def one_block(a, b, c, has_ff=True):
            if has_ff:
                x1_ = ff(a)
                x2_ = ff(b)
                x3_ = ff(c)
            else:
                x1_ = a
                x2_ = b
                x3_ = c
            out = MEGNetLayer(
                [n1, n1, n2], [n1, n1, n2], [n1, n1, n2],
                pool_method='mean', activation=act, kernel_regularizer=reg)(
                [x1_, x2_, x3_, x4, x5, x6, x7])

            x1_temp = out[0]
            x2_temp = out[1]
            x3_temp = out[2]
            if dropout:
                x1_temp = Dropout(dropout)(x1_temp)
                x2_temp = Dropout(dropout)(x2_temp)
                x3_temp = Dropout(dropout)(x3_temp)
            return x1_temp, x2_temp, x3_temp
Exemple #7
0
def create_model():
    n_atom_feature = 8
    n_bond_feature = 1
    n_global_feature = 0
    Xavier_init = initializers.VarianceScaling(scale=1.0,
                                               mode='fan_avg',
                                               distribution='normal')

    x1 = Input(shape=(None, n_atom_feature))  # atom feature placeholder
    x2 = Input(shape=(None, n_bond_feature))  # bond feature placeholder
    x3 = Input(shape=(None, n_global_feature))  # global feature placeholder
    x4 = Input(shape=(None, ), dtype='int32')  # bond index1 placeholder
    x5 = Input(shape=(None, ), dtype='int32')  # bond index2 placeholder
    x6 = Input(shape=(None, ), dtype='int32')  # atom_ind placeholder
    x7 = Input(shape=(None, ), dtype='int32')  # bond_ind placeholder
    x8 = Input(shape=(None, ), dtype='int32'
               )  # Ga_index placeholder to gather Ga nodes to a single tensor
    x9 = Input(shape=(None, ), dtype='int32'
               )  # As_index placeholder to gather As nodes to a single tensor
    x10 = Input(
        shape=(None, ), dtype='int32'
    )  # Ga_ind placeholder to sum of the atomic energies of Ga atoms for each single structure
    x11 = Input(
        shape=(None, ), dtype='int32'
    )  # As_ind placeholder to sum of the atomic energies of As atoms for each single structure

    with kb.name_scope("embedding"):
        embed_atom_fea = layers.Dense(16,
                                      name="embedding_atom_fea",
                                      activation='tanh',
                                      kernel_initializer=Xavier_init)
        embed_bond_fea = layers.Dense(16,
                                      name="embedding_bond_fea",
                                      activation='tanh',
                                      kernel_initializer=Xavier_init)
        GN_1 = MEGNetLayer([16, 16], [16, 16], [1],
                           pool_method='mean',
                           activation=softplus2)
        GN_2 = MEGNetLayer([16, 16], [16, 16], [1],
                           pool_method='mean',
                           activation=softplus2)

        x1_ = embed_atom_fea(x1)
        x2_ = embed_bond_fea(x2)

        out1 = GN_1([x1_, x2_, x3, x4, x5, x6, x7])
        x1__ = layers.Add()([x1_, out1[0]])
        x2__ = layers.Add()([x2_, out1[1]])

        out2 = GN_2([x1__, x2__, out1[2], x4, x5, x6, x7])
        x1___ = layers.Add()([x1__, out2[0]])
        x2___ = layers.Add()([x2__, out2[1]])

        Ga_idx = layers.Lambda(lambda x: tf.reshape(x, (-1, )),
                               name="Ga_idx")(x8)
        As_idx = layers.Lambda(lambda x: tf.reshape(x, (-1, )),
                               name="As_idx")(x9)
        Ga = layers.Lambda(lambda x: tf.gather(x, Ga_idx, axis=1),
                           name="Ga")(x1___)
        As = layers.Lambda(lambda x: tf.gather(x, As_idx, axis=1),
                           name="As")(x1___)

        Ga_grp = layers.Lambda(lambda x: tf.reshape(x, (-1, )),
                               name="Ga_grp")(x10)
        As_grp = layers.Lambda(lambda x: tf.reshape(x, (-1, )),
                               name="As_grp")(x11)

        #node = Set2Set(T=3, n_hidden=3)([x1___, x6])
        edge = Set2Set(T=3, n_hidden=3)([x2___, x7])
        edge = layers.Lambda(lambda x: kb.sum(x, axis=2, keepdims=True),
                             name="sum_edge")(edge)
        zero = layers.Lambda(lambda x: tf.zeros_like(x),
                             name="zero_like_edge")(edge)
        zero_edge = layers.Multiply(name="zero_edge")([edge, zero])
        zero_glob = layers.Multiply(name="zero_glob")([out2[2], zero])
        #final = layers.Concatenate(axis=-1)([node, edge, out2[2]])

    with kb.name_scope("Ga"):
        hidden_Ga1 = layers.Dense(10,
                                  name="hidden_Ga1",
                                  activation='tanh',
                                  kernel_initializer=Xavier_init)
        hidden_Ga2 = layers.Dense(10,
                                  name="hidden_Ga2",
                                  activation='tanh',
                                  kernel_initializer=Xavier_init)
        output_Ga = layers.Dense(1,
                                 name="output_Ga",
                                 activation=None,
                                 kernel_initializer=Xavier_init)

        E_Ga = hidden_Ga1(Ga)
        E_Ga = hidden_Ga2(E_Ga)
        E_Ga = output_Ga(E_Ga)

        E_Ga = layers.Lambda(lambda x: tf.reshape(x, (-1, 1)),
                             name="reshape_E_Ga")(E_Ga)
        sum_Ga = layers.Lambda(lambda x: tf.math.segment_sum(x, Ga_grp),
                               name="sum_Ga")(E_Ga)
        #sum_Ga = layers.Lambda(lambda x: tf.reshape(x, glob.shape), name="reshape_sum_Ga")(sum_Ga)
        sum_Ga = layers.Lambda(lambda x: tf.expand_dims(x, axis=0),
                               name="reshape_sum_Ga")(sum_Ga)

    with kb.name_scope("As"):
        hidden_As1 = layers.Dense(10,
                                  name="hidden_As1",
                                  activation='tanh',
                                  kernel_initializer=Xavier_init)
        hidden_As2 = layers.Dense(10,
                                  name="hidden_As2",
                                  activation='tanh',
                                  kernel_initializer=Xavier_init)
        output_As = layers.Dense(1,
                                 name="output_As",
                                 activation=None,
                                 kernel_initializer=Xavier_init)

        E_As = hidden_As1(As)
        E_As = hidden_As2(E_As)
        E_As = output_As(E_As)

        E_As = layers.Lambda(lambda x: tf.reshape(x, (-1, 1)),
                             name="reshape_E_As")(E_As)
        sum_As = layers.Lambda(lambda x: tf.math.segment_sum(x, As_grp),
                               name="sum_As")(E_As)
        #sum_As = layers.Lambda(lambda x: tf.reshape(x, glob.shape), name="reshape_sum_As")(sum_As)
        sum_As = layers.Lambda(lambda x: tf.expand_dims(x, axis=0),
                               name="reshape_sum_As")(sum_As)

    total_E = layers.Add(name="total_E")([sum_Ga, sum_As])
    final_E = layers.Add(name="final_E")([total_E, zero_edge, zero_glob])
    #	total_E = layers.Lambda(lambda x: tf.expand_dims(x, axis=0))(total_E)
    return Model(inputs=[x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11],
                 outputs=final_E)