return results

    def sort_results_by(self, results, key: str, order='asc'):
        sign = 1
        if order == 'desc':
            sign = -1
        return OrderedDict(
            sorted(results.items(), key=lambda x: sign * x[1][key]))


if __name__ == '__main__':
    # Load GLoVe vectors
    print('Loading GLoVe vectors...')
    start_time = time.time()
    GLOVE_FILENAME = 'data/glove.6B.100d.txt'
    word2index, index2word, index2embedding = load_embedding(GLOVE_FILENAME)
    print('Loaded %s word vectors in %f seconds' %
          (len(word2index), time.time() - start_time))
    embedding = Embedding(word2index, index2word, index2embedding)

    # Load model
    imdb_model = IMDBModel('models/model.h5', embedding)

    # Create Verifier
    verifier = DeepGoTextVerifier(imdb_model, embedding)

    # The text to verify
    text = 'great movie, highly recommended.'

    results = verifier.verify_text(text,
                                   normalize=False,
Beispiel #2
0
#model_input_shape = (1, ksize, ksize, emb_dims)
input_bounds = [[-eps, eps] for _ in range(input_len)]
#tf_model_path = KERAS_REL_PATH + '{}cnn2d-{}inp-16hu-keras-IMDB-{}d'.format(prefix, num_words, emb_dims)
frozen_graph_prefix, frozen_graph_path = FROZEN_REL_PATH, 'tf_model_IMDB.pb'
log_results, logs_path = True, "MSA_NEW/MSA_NEW_IMDB_fc_{}_inp_{}d_knn_linf/MSA_NEW_results_smallest_expl_IMDB_{}fc_{}_inp_{}d_".format(
    num_words, emb_dims, prefix, num_words,
    emb_dims) + "alternate_cost"  # write results on file+

# Load model and test the input_ review
model = load_model(tf_model_path, compile=False)

# Load embedding
EMBEDDING_FILENAME = EMBEDDINGS_REL_PATH + 'custom-embedding-IMDB.{}d.txt'.format(
    emb_dims)
word2index, index2word, index2embedding = load_embedding(EMBEDDING_FILENAME)
embedding = lambda W: np.array([index2embedding[word2index[w]]
                                for w in W]).reshape(model_input_shape)

# Test accuracy of the model
#acc = test_IMDB(load_model(tf_model_path, compile=True), index2embedding, word2index, [num_words*emb_dims], num_words, emb_dims, 90000)
#logger("Loss/Accuracy of the model on test set is {}".format(acc), True, "[logger]")

# Review + <pad>(s)
input_without_padding = input_without_padding.lower().split(' ')
input_ = input_without_padding[:num_words] + ['<PAD>'] * (
    num_words - len(input_without_padding))
x = embedding(input_)
inpforcost = input_

# Extract the k-nearest-neighbors