"trained_word2vec.model") if not os.path.exists(trained_word2vec_model_file): print("Word2vec model file \'{}\' doesn't exist!".format( trained_word2vec_model_file)) print("Using word2vec model file : {}".format(trained_word2vec_model_file)) # validate training params file training_params_file = os.path.join(FLAGS.checkpoint_dir, "..", "training_params.pickle") if not os.path.exists(training_params_file): print("Training params file \'{}\' is missing!".format( training_params_file)) print("Using training params file : {}".format(training_params_file)) # Load params params = data_helpers.loadDict(training_params_file) num_labels = int(params['num_labels']) max_document_length = int(params['max_document_length']) # Load data if FLAGS.eval_train: x_raw, y_test = data_helpers.load_data_and_labels( FLAGS.input_text_file, FLAGS.input_label_file, num_labels) else: x_raw = [ "a masterpiece four years in the making", "everything is off." ] y_test = [1, 0] # Get Embedding vector x_test sentences, max_document_length = data_helpers.padding_sentences(
print("Using checkpoint file : {}".format(checkpoint_file)) # validate word2vec model file trained_word2vec_model_file = os.path.join(FLAGS.checkpoint_dir, "..", "trained_word2vec.model") if not os.path.exists(trained_word2vec_model_file): print("Word2vec model file \'{}\' doesn't exist!".format(trained_word2vec_model_file)) print("Using word2vec model file : {}".format(trained_word2vec_model_file)) # validate training params file training_params_file = os.path.join(FLAGS.checkpoint_dir, "..", "training_params.pickle") if not os.path.exists(training_params_file): print("Training params file \'{}\' is missing!".format(training_params_file)) print("Using training params file : {}".format(training_params_file)) # Load params params = data_helpers.loadDict(training_params_file) num_labels = int(params['num_labels']) max_document_length = int(params['max_document_length']) # Load data if FLAGS.eval_train: x_raw, y_test = data_helpers.load_data_and_labels(FLAGS.input_text_file, FLAGS.input_label_file, num_labels) else: x_raw = ["a masterpiece four years in the making", "everything is off."] y_test = [1, 0] # Get Embedding vector x_test sentences, max_document_length = data_helpers.padding_sentences(x_raw, '<PADDING>', padding_sentence_length = max_document_length) x_test = np.array(word2vec_helpers.embedding_sentences(sentences, file_to_load = trained_word2vec_model_file)) print("x_test.shape = {}".format(x_test.shape))