from keras import backend as K from keras.models import Sequential, load_model from keras.engine import InputSpec, Model from keras.layers.recurrent import LSTM from keras.layers import activations, Wrapper from keras.layers import Input, Embedding, Flatten, Dropout, Lambda, concatenate, Dense if __name__ == "__main__": os.environ["CUDA_VISIBLE_DEVICES"] = "0" config = OrderedDict() config.MAX_WINDOW_SIZE = 10 config.MAX_MENTION_LENGTH = 10 config.EMBEDDING_TRAINABLE = False config.WORD_EMBEDDING_DIM = 300 #first #config.ENTITY_EMBEDDING_DIM = 300 #second config.MAX_ENTITY_DESC_LENGTH = 150 #no config.MENTION_CONTEXT_LATENT_SIZE = 50 config.LSTM_SIZE = 300 config.DROPOUT = 0.3 config.ACTIVATION_FUNCTION = 'tanh' config.batch_size = 1024 config.num_of_neg = 1 #the number of negative sample of each senetence config.start_epochs = 0 # the epoch of start config.epochs = 5 #the number of iteration config.batch_epochs = 1 #the number of batch of evaluate times # reforcement learning config config.updaterate = 1