Beispiel #1
0
    def __init__(self, alphabet_encoding):
        self.alphabet_encoding = alphabet_encoding

        #self.test_words = ["cat", "feline", "car", "truck", "tuck"]
        #self.test_words = ['cat', 'CAT', 'egg', 'eggplant', 'brontosaurus']
        self.test_words = ['pokemon', 'bigger', 'better', 'faster', 'stronger']

        _encoded_test_words = map(self._encode, self.test_words)
        self.data_provider = PaddedSequenceMinibatchProvider(
            X=_encoded_test_words,
            padding=self.alphabet_encoding['PADDING'],
            batch_size=len(_encoded_test_words),
            shuffle=False)
Beispiel #2
0

def load_json(file_name):
    with open(file_name) as f:
        return json.loads(f.read())


if __name__ == "__main__":
    np.set_printoptions(linewidth=100)
    data = load_json(
        os.path.join(os.environ['DATA'], "words", "words.encoded.json"))
    alphabet = load_json(
        os.path.join(os.environ['DATA'], "words",
                     "words.alphabet.encoding.json"))

    train_data_provider = PaddedSequenceMinibatchProvider(
        X=data, padding=alphabet['PADDING'], batch_size=100)

    embedding_dimension = 8
    vocabulary_size = len(alphabet)
    n_feature_maps = 8
    kernel_width = 5
    pooling_size = 2

    n_epochs = 1

    model = CSM(layers=[
        WordEmbedding(dimension=embedding_dimension,
                      vocabulary_size=len(alphabet)),
        SentenceConvolution(n_feature_maps=n_feature_maps,
                            kernel_width=kernel_width,
                            n_channels=1,