def main(mode='test', question=None, answers=None): """ This function is used to train, predict or test Args: mode (str): train/preddict/test question (str): this contains the question answers (list): this contains list of answers in string format Returns: index (integer): index of the most likely answer """ # get the train and predict model model vocabulary = Vocabulary("./data/vocab_all.txt") embedding_file = "./data/word2vec_100_dim.embeddings" qa_model = QAModel() train_model, predict_model = qa_model.get_bilstm_model( embedding_file, len(vocabulary)) epoch = 1 if mode == 'train': for i in range(epoch): print('Training epoch', i) # load training data qa_data = QAData() questions, good_answers, bad_answers = qa_data.get_training_data() # train the model Y = np.zeros(shape=(questions.shape[0], )) train_model.fit([questions, good_answers, bad_answers], Y, epochs=1, batch_size=64, validation_split=0.1, verbose=1) # save the trained model train_model.save_weights('model/train_weights_epoch_' + str(epoch) + '.h5', overwrite=True) predict_model.save_weights('model/predict_weights_epoch_' + str(epoch) + '.h5', overwrite=True) elif mode == 'predict': # load the evaluation data data = pickle.load(open("./data/dev.pkl", 'rb')) random.shuffle(data) # load weights from trained model qa_data = QAData() predict_model.load_weights('model/lstm_predict_weights_epoch_1.h5') c = 0 c1 = 0 for i, d in enumerate(data): print(i, len(data)) # pad the data and get it in desired format indices, answers, question = qa_data.process_data(d) # get the similarity score sims = predict_model.predict([question, answers]) n_good = len(d['good']) max_r = np.argmax(sims) max_n = np.argmax(sims[:n_good]) r = rankdata(sims, method='max') c += 1 if max_r == max_n else 0 c1 += 1 / float(r[max_r] - r[max_n] + 1) precision = c / float(len(data)) mrr = c1 / float(len(data)) print("Precision", precision) print("MRR", mrr) elif mode == 'test': # question and answers come from params qa_data = QAData() answers, question = qa_data.process_test_data(question, answers) # load weights from the trained model predict_model.load_weights('model/lstm_predict_weights_epoch_1.h5') # get similarity score sims = predict_model.predict([question, answers]) max_r = np.argmax(sims) return max_r
def main(mode='train', question=None, answers=None, epochs=2, batch_size=32, validation_split=0.1, model_name='baseline'): """ This function is used to train, predict or test Args: mode (str): train/preddict/test question (str): this contains the question answers (list): this contains list of answers in string format Returns: index (integer): index of the most likely answer """ # get the train and predict model model if mode == 'train': # baseline model train_model, prediction_model = get_baseline_model() train_model.summary() train(train_model, prediction_model, epochs=epochs, batch_size=batch_size, validation_split=validation_split) # small model small_train_model, small_prediction_model = get_small_model() small_train_model.summary() train(small_train_model, small_prediction_model, model_name='small', epochs=epochs, batch_size=batch_size, validation_split=validation_split) # larger model larger_train_model, larger_prediction_model = get_larger_model() larger_train_model.summary() train(larger_train_model, larger_prediction_model, model_name='larger', epochs=epochs, batch_size=batch_size, validation_split=validation_split) elif mode == 'predict': # load the evaluation data data = [] with open('data/test.json') as read_file: data = json.load(read_file) random.shuffle(data) qa_data = QAData() # create model from json model's architecture saved # logger.info(f'Loading models architecture: model/model_architecture_{model_name}.json') logger.info(f'Creating predict model: {model_name}') #with open(f'model/model_architecture_{model_name}.json', 'r') as read_file: # json_string = read_file.read() #predict_model = model_from_json(json_string) predict_model = None if model_name == 'small': _, predict_model = get_small_model() elif model_name == 'larger': _, predict_model = get_larger_model() else: _, predict_model = get_baseline_model() # load weights logger.info( f'Loading model weigths: model/train_weights_{model_name}.h5') predict_model.load_weights(f'model/train_weights_{model_name}.h5') c = 0 c1 = 0 for i, d in enumerate(data): print(i, len(data)) # pad the data and get it in desired format answers, question = qa_data.process_data(d) # get the similarity score sims = predict_model.predict([question, answers]) n_good = len(d['good']) max_r = np.argmax(sims) max_n = np.argmax(sims[:n_good]) r = rankdata(sims, method='max') c += 1 if max_r == max_n else 0 c1 += 1 / float(r[max_r] - r[max_n] + 1) precision = c / float(len(data)) mrr = c1 / float(len(data)) print("Precision", precision) print("MRR", mrr) elif mode == 'test': # question and answers come from params qa_data = QAData() answers, question = qa_data.process_test_data(question, answers) # create model from json model's architecture saved logger.info( f'Loading models architecture: model/model_architecture_{model_name}.json' ) json_string = '' with open(f'model/model_architecture_{model_name}.json', 'r') as read_file: json_string = read_file.read() predict_model = model_from_json(json_string) # load weights logger.info( f'Loading model weigths: model/train_weights_{model_name}.h5') predict_model.load_weights(f'model/train_weights_{model_name}.h5') # get similarity score sims = predict_model.predict([question, answers]) max_r = np.argmax(sims) return max_r