Exemplo n.º 1
0
def get_model(encoding):

    return CSM(
        layers=[
            DictionaryEncoding(vocabulary=encoding),

            WordEmbedding(
                dimension={{embedding_dimension}},
                vocabulary_size=len(encoding),
                padding=encoding['PADDING']),

            {% for layer in word_layers %}
            {% set layer_index = loop.index0 %}

            SentenceConvolution(
                n_feature_maps={{layer.n_feature_maps}},
                kernel_width={{layer.kernel_width}},
                n_channels={{layer.n_channels}},
                n_input_dimensions=1),

            Bias(
                n_input_dims=1,
                n_feature_maps={{layer.n_feature_maps}}),

            KMaxPooling(k={{layer.k_pooling}}, k_dynamic={{layer.k_dynamic}} if {{layer.k_dynamic}} > 0 else None),

            {{layer.nonlinearity}}(),

            {% endfor %}

            ReshapeForDocuments(),

            {% for layer in sentence_layers %}
            {% set layer_index = loop.index0 %}

            SentenceConvolution(
                n_feature_maps={{layer.n_feature_maps}},
                kernel_width={{layer.kernel_width}},
                n_channels={{layer.n_channels}},
                n_input_dimensions=1),

            Bias(
                n_input_dims=1,
                n_feature_maps={{layer.n_feature_maps}}),

            KMaxPooling(k={{layer.k_pooling}}, k_dynamic={{layer.k_dynamic}} if {{layer.k_dynamic}} > 0 else None),

            {{layer.nonlinearity}}(),

            {% endfor %}

            {% if dropout %}
            Dropout(('b', 'd', 'f', 'w'), 0.5),
            {% endif %}

            Softmax(
                n_classes={{n_classes}},
                n_input_dimensions={{softmax_input_dimensions}}),
            ])
Exemplo n.º 2
0
def _sentence_convolution(conv_layer, ratio):
    new_conv = SentenceConvolution(
        n_feature_maps=conv_layer.n_feature_maps,
        kernel_width=conv_layer.kernel_width,
        n_channels=conv_layer.n_channels,
        n_input_dimensions=conv_layer.n_input_dimensions)

    new_conv.W = conv_layer.W.copy() * (1-ratio)
    return new_conv
Exemplo n.º 3
0
def _sentence_convolution(conv_layer, ratio):
    new_conv = SentenceConvolution(
        n_feature_maps=conv_layer.n_feature_maps,
        kernel_width=conv_layer.kernel_width,
        n_channels=conv_layer.n_channels,
        n_input_dimensions=conv_layer.n_input_dimensions)

    new_conv.W = conv_layer.W.copy() * (1 - ratio)
    return new_conv
def model_two_layer_small_embedding(alphabet):
    return CSM(layers=[
        DictionaryEncoding(vocabulary=alphabet),
        WordEmbedding(dimension=32, vocabulary_size=len(alphabet)),
        SentenceConvolution(n_feature_maps=5,
                            kernel_width=10,
                            n_channels=1,
                            n_input_dimensions=32),
        SumFolding(),
        KMaxPooling(k=7),
        Bias(n_input_dims=16, n_feature_maps=5),
        Tanh(),
        SentenceConvolution(n_feature_maps=5,
                            kernel_width=5,
                            n_channels=5,
                            n_input_dimensions=16),
        KMaxPooling(k=4),
        Bias(n_input_dims=16, n_feature_maps=5),
        Tanh(),
        Softmax(n_classes=2, n_input_dimensions=320),
    ])
def model_one_layer_variant_2(alphabet):
    return CSM(layers=[
        DictionaryEncoding(vocabulary=alphabet),
        WordEmbedding(dimension=42, vocabulary_size=len(alphabet)),
        SentenceConvolution(n_feature_maps=5,
                            kernel_width=6,
                            n_channels=1,
                            n_input_dimensions=42),
        SumFolding(),
        KMaxPooling(k=4),
        Bias(n_input_dims=21, n_feature_maps=5),
        Tanh(),
        Softmax(n_classes=2, n_input_dimensions=420),
    ])
Exemplo n.º 6
0
    train_data_provider = LabelledSequenceMinibatchProvider(X=X[:-500],
                                                            Y=Y[:-500],
                                                            batch_size=100)

    print train_data_provider.batches_per_epoch

    validation_data_provider = LabelledSequenceMinibatchProvider(
        X=X[-500:], Y=Y[-500:], batch_size=500)

    word_embedding_model = CSM(layers=[
        WordEmbedding(  # really a character embedding
            dimension=16,
            vocabulary_size=len(alphabet)),
        SentenceConvolution(n_feature_maps=10,
                            kernel_width=5,
                            n_channels=1,
                            n_input_dimensions=16),
        SumFolding(),
        KMaxPooling(k=2),
        MaxFolding(),
        Tanh(),
    ])

    word_embedding = WordFromCharacterEmbedding(
        embedding_model=word_embedding_model, alphabet_encoding=alphabet)

    # print word_embedding.fprop(X, meta)

    tweet_model = CSM(layers=[
        word_embedding,
        SentenceConvolution(n_feature_maps=5,
Exemplo n.º 7
0
    train_data_provider = PaddedSequenceMinibatchProvider(
        X=data, padding=alphabet['PADDING'], batch_size=100)

    embedding_dimension = 8
    vocabulary_size = len(alphabet)
    n_feature_maps = 8
    kernel_width = 5
    pooling_size = 2

    n_epochs = 1

    model = CSM(layers=[
        WordEmbedding(dimension=embedding_dimension,
                      vocabulary_size=len(alphabet)),
        SentenceConvolution(n_feature_maps=n_feature_maps,
                            kernel_width=kernel_width,
                            n_channels=1,
                            n_input_dimensions=embedding_dimension),
        SumFolding(),
        KMaxPooling(k=pooling_size),

        # Bias(
        #     n_input_dims=embedding_dimension / 2,
        #     n_feature_maps=n_feature_maps),
        Linear(n_input=n_feature_maps * pooling_size * embedding_dimension / 2,
               n_output=64),
        Tanh(),
        Linear(n_output=1, n_input=64),
    ])

    print model
    #         Softmax(
    #             n_classes=2,
    #             n_input_dimensions=480),
    #         ]
    # )

    tweet_model = CSM(layers=[
        # cpu.model.encoding.
        DictionaryEncoding(vocabulary=alphabet),

        # cpu.model.embedding.
        WordEmbedding(dimension=28, vocabulary_size=len(alphabet)),

        # HostToDevice(),
        SentenceConvolution(n_feature_maps=6,
                            kernel_width=7,
                            n_channels=1,
                            n_input_dimensions=28),
        Bias(n_input_dims=28, n_feature_maps=6),
        SumFolding(),
        KMaxPooling(k=4, k_dynamic=0.5),
        Tanh(),
        SentenceConvolution(n_feature_maps=14,
                            kernel_width=5,
                            n_channels=6,
                            n_input_dimensions=14),
        Bias(n_input_dims=14, n_feature_maps=14),
        SumFolding(),
        KMaxPooling(k=4),
        Tanh(),
        Softmax(n_classes=2, n_input_dimensions=392),
    ])
Exemplo n.º 9
0
    validation_data_provider = LabelledDocumentMinibatchProvider(
        X=X[-n_validation:],
        Y=Y[-n_validation:],
        batch_size=batch_size,
        padding='PADDING',
        fixed_n_sentences=15,
        fixed_n_words=50)

    model = CSM(layers=[
        DictionaryEncoding(vocabulary=encoding),
        WordEmbedding(dimension=20,
                      vocabulary_size=len(encoding),
                      padding=encoding['PADDING']),
        Dropout(('b', 'w', 'f'), 0.2),
        SentenceConvolution(n_feature_maps=10,
                            kernel_width=15,
                            n_channels=20,
                            n_input_dimensions=1),
        Bias(n_input_dims=1, n_feature_maps=10),
        KMaxPooling(k=7, k_dynamic=0.5),
        Tanh(),
        SentenceConvolution(n_feature_maps=30,
                            kernel_width=9,
                            n_channels=10,
                            n_input_dimensions=1),
        Bias(n_input_dims=1, n_feature_maps=30),
        KMaxPooling(k=5),
        Tanh(),
        ReshapeForDocuments(),
        SentenceConvolution(n_feature_maps=20,
                            kernel_width=11,
                            n_channels=30 * 5,