def build_basic_model(self):
        # Encoder
        encoder_inputs = Input(shape=(None, ), name='encoder_inputs')
        encoder_embedding = Embedding(self.num_encoder_tokens,
                                      200,
                                      name='encoder_embedding')(encoder_inputs)
        encoder_lstm = LSTM(self.latent_dim,
                            return_state=True,
                            name='encoder_lstm')
        encoder_lstm.supports_masking = True
        _, *encoder_states = encoder_lstm(encoder_embedding)

        # Decoder
        decoder_inputs = Input(shape=(None, ), name='decoder_inputs')
        decoder_embedding = Embedding(self.num_decoder_tokens,
                                      200,
                                      name='decoder_embedding')(decoder_inputs)
        decoder_lstm = LSTM(self.latent_dim,
                            return_state=True,
                            return_sequences=True,
                            name='decoder_lstm')
        rnn_outputs, *decoder_states = decoder_lstm(
            decoder_embedding, initial_state=encoder_states)
        decoder_dense = Dense(self.num_decoder_tokens,
                              activation='softmax',
                              name='decoder_dense')
        decoder_dense.supports_masking = True
        decoder_outputs = decoder_dense(
            Dropout(rate=0.4, name='dropout1')(rnn_outputs))

        basic_model = Model([encoder_inputs, decoder_inputs],
                            [decoder_outputs])
        basic_model.compile(optimizer='Adam',
                            loss='sparse_categorical_crossentropy',
                            metrics=['accuracy'])

        train_data, teach_data, train_data_y, test_data, test_teach_data, test_y = self.train_test_split(
            self.encoder_input_data, self.teach_data, self.target_data)

        basic_model.fit_generator(
            self.generate_batch_data_random(train_data, teach_data,
                                            train_data_y, self.BATCH_SIZE),
            validation_data=self.generate_batch_data_random(
                test_data, test_teach_data, test_y, self.BATCH_SIZE),
            steps_per_epoch=train_data.shape[0] // self.BATCH_SIZE,
            validation_steps=test_data.shape[0] // self.BATCH_SIZE,
            epochs=10,
            workers=5)

        #        # 回调函数
        #        callback_list = [callbacks.ModelCheckpoint('C:/Users/user/Desktop/chan-si/QA_Generate/web/chan-si_API_1/basic_model_best_V2.h', save_best_only=True)]
        #        # 训练
        #        basic_model_hist = basic_model.fit([self.encoder_input_data, self.decoder_input_data], self.decoder_target_data,
        #                        batch_size=32, epochs=15,
        #                        validation_split=0.2, callbacks=callback_list)

        basic_model.save_weights("P1_old.h5")
    def creat_model(self):

        encoder_inputs = Input(shape=(None, ), name='encoder_inputs')
        encoder_embedding = Embedding(self.num_encoder_tokens,
                                      200,
                                      name='encoder_embedding')(encoder_inputs)
        encoder_lstm = LSTM(self.latent_dim,
                            return_state=True,
                            name='encoder_lstm')
        encoder_lstm.supports_masking = True
        _, *encoder_states = encoder_lstm(encoder_embedding)

        # Decoder
        decoder_inputs = Input(shape=(None, ), name='decoder_inputs')
        decoder_embedding = Embedding(self.num_decoder_tokens,
                                      200,
                                      name='decoder_embedding')(decoder_inputs)
        decoder_lstm = LSTM(self.latent_dim,
                            return_state=True,
                            return_sequences=True,
                            name='decoder_lstm')
        rnn_outputs, *decoder_states = decoder_lstm(
            decoder_embedding, initial_state=encoder_states)
        decoder_dense = Dense(self.num_decoder_tokens,
                              activation='softmax',
                              name='decoder_dense')
        decoder_dense.supports_masking = True
        decoder_outputs = decoder_dense(
            Dropout(rate=0.4, name='dropout1')(rnn_outputs))

        basic_model = Model([encoder_inputs, decoder_inputs],
                            [decoder_outputs])
        basic_model.compile(optimizer='Adam',
                            loss='sparse_categorical_crossentropy',
                            metrics=['accuracy'])

        for op in tf.get_default_graph().get_operations():
            print(op.graph)
            break

        return basic_model