def test_conv_encoder_highway_default_forward():
    encoder = ConvolutionalEncoder()
    encoder.initialize(init='One')
    print(encoder)
    inputs = mx.nd.random.uniform(shape=(10, 20, 15))
    output = encoder(inputs, None)
    assert output.shape == (20, 525), output.shape
def test_conv_encoder_nohighway_forward_largeinputs():
    encoder = ConvolutionalEncoder(embed_size=7,
                                   num_filters=(1, 1, 2, 3),
                                   ngram_filter_sizes=(1, 2, 3, 4),
                                   output_size=30)
    print(encoder)
    encoder.initialize()
    inputs = mx.nd.random.uniform(shape=(4, 8, 7))
    output = encoder(inputs, None)
    assert output.shape == (8, 30), output.shape
def test_conv_encoder_highway_forward():
    encoder = ConvolutionalEncoder(embed_size=2,
                                   num_filters=(2, 1),
                                   ngram_filter_sizes=(1, 2),
                                   num_highway=2,
                                   output_size=1)
    print(encoder)
    encoder.initialize()
    inputs = mx.nd.array([[[.7, .8], [.1, 1.5], [.7, .8]], [[.7, .8], [.1, 1.5], [.7, .8]]])
    output = encoder(inputs, None)
    print(output)
    assert output.shape == (3, 1), output.shape
def test_conv_encoder_nonhighway_forward():
    encoder = ConvolutionalEncoder(embed_size=2, num_filters=(1, 1), ngram_filter_sizes=(1, 2))
    print(encoder)
    encoder.initialize(init='One')
    inputs = mx.nd.array([[[.7, .8], [.1, 1.5], [.2, .3]], [[.5, .6], [.2, 2.5], [.4, 4]]])
    output = encoder(inputs, None)
    assert output.shape == (3, 2), output.shape
    assert_almost_equal(output.asnumpy(),
                        mx.nd.array([[1.37, 1.42],
                                     [1.49, 1.49],
                                     [1.5, 1.5]]).asnumpy(),
                        decimal=2)
def test_conv_encoder_highway_default_forward(hybridize, mask):
    encoder = ConvolutionalEncoder()
    encoder.initialize(init='One')
    if hybridize:
        encoder.hybridize()
    print(encoder)
    inputs = mx.nd.random.uniform(shape=(10, 20, 15))
    if mask:
        output = encoder(inputs, mx.nd.ones(inputs.shape[:-1]))
    else:
        output = encoder(inputs)
    assert output.shape == (20, 525), output.shape
Пример #6
0
    def __init__(self, word_vocab, embedding_size=300, sentiments=5,
                 prefix=None, params=None):
        super(SentimentNet, self).__init__(prefix=prefix, params=params)
        with self.name_scope():
            self.word_embedding = gluon.nn.Embedding(input_dim=len(word_vocab),
                                                     output_dim=embedding_size)
            self.dropout = gluon.nn.Dropout(0.1)
            self.word_encoder = ConvolutionalEncoder(embed_size=300,
                                                     num_filters=(100, 100, ),
                                                     ngram_filter_sizes=(1, 2, ),
                                                     conv_layer_activation='relu',
                                                     num_highway=None,
                                                     output_size=None)

            self.output = gluon.nn.HybridSequential()

            with self.output.name_scope():
                self.output.add(gluon.nn.Dense(sentiments, activation='sigmoid'))
Пример #7
0
    def __init__(self, elmo, crf, intent_cnt, slots_cnt,
                 embedding_size=1024, prefix=None, params=None):
        super(OneNet, self).__init__(prefix, params)
        self._embedding_size = embedding_size
        self.elmo_container = [elmo]

        with self.name_scope():
            self.crf = crf
            self.elmo_s = self.params.get('elmo_s', shape=(3, 1, 1, 1))
            self.gamma = self.params.get('gamma', shape=(1, 1, 1))
            self.highway = Highway(input_size=embedding_size, num_layers=2)

            self.encoder = ConvolutionalEncoder(embed_size=embedding_size,
                                                num_filters=(100, 100, 100, ),
                                                ngram_filter_sizes=(2, 3, 4, ),
                                                conv_layer_activation='relu',
                                                num_highway=None,
                                                output_size=None)
            self.intent_dense = nn.Dense(units=intent_cnt)
            self.slot_dense = nn.Dense(units=slots_cnt, flatten=False)
def test_conv_encoder_nohighway_forward_largeinputs(hybridize, mask):
    encoder = ConvolutionalEncoder(embed_size=7,
                                   num_filters=(1, 1, 2, 3),
                                   ngram_filter_sizes=(1, 2, 3, 4),
                                   output_size=30)
    print(encoder)
    encoder.initialize()
    if hybridize:
        encoder.hybridize()
    inputs = mx.nd.random.uniform(shape=(4, 8, 7))
    if mask:
        output = encoder(inputs, mx.nd.ones(inputs.shape[:-1]))
    else:
        output = encoder(inputs)
    assert output.shape == (8, 30), output.shape
def test_conv_encoder_highway_forward(hybridize, mask):
    encoder = ConvolutionalEncoder(embed_size=2,
                                   num_filters=(2, 1),
                                   ngram_filter_sizes=(1, 2),
                                   num_highway=2,
                                   output_size=1)
    print(encoder)
    encoder.initialize()
    if hybridize:
        encoder.hybridize()
    inputs = mx.nd.array([[[.7, .8], [.1, 1.5], [.7, .8]], [[.7, .8], [.1, 1.5], [.7, .8]]])
    if mask:
        output = encoder(inputs, mx.nd.ones(inputs.shape[:-1]))
    else:
        output = encoder(inputs)
    print(output)
    assert output.shape == (3, 1), output.shape
Пример #10
0
    def __init__(self, **kwargs):
        super(QANet, self).__init__(**kwargs)
        with self.name_scope():
            self.flatten = gluon.nn.Flatten()
            self.dropout = gluon.nn.Dropout(opt.layers_dropout)
            self.char_conv = ConvolutionalEncoder(
                embed_size=opt.char_emb_dim,
                num_filters=opt.char_conv_filters,
                ngram_filter_sizes=opt.char_conv_ngrams,
                conv_layer_activation=None,
                num_highway=0)

        self.highway = gluon.nn.HybridSequential()
        with self.highway.name_scope():
            self.highway.add(
                gluon.nn.Dense(units=opt.emb_encoder_conv_channels,
                               flatten=False,
                               use_bias=False,
                               weight_initializer=Xavier()))
            self.highway.add(
                Highway(input_size=opt.emb_encoder_conv_channels,
                        num_layers=opt.highway_layers,
                        activation='relu',
                        highway_bias=HighwayBias(nonlinear_transform_bias=0.0,
                                                 transform_gate_bias=0.0)))

        self.word_emb = gluon.nn.HybridSequential()
        with self.word_emb.name_scope():
            self.word_emb.add(
                gluon.nn.Embedding(input_dim=opt.word_corpus,
                                   output_dim=opt.word_emb_dim))
            self.word_emb.add(gluon.nn.Dropout(rate=opt.word_emb_dropout))
        self.char_emb = gluon.nn.HybridSequential()
        with self.char_emb.name_scope():
            self.char_emb.add(
                gluon.nn.Embedding(input_dim=opt.character_corpus,
                                   output_dim=opt.char_emb_dim,
                                   weight_initializer=Normal(sigma=0.1)))
            self.char_emb.add(gluon.nn.Dropout(rate=opt.char_emb_dropout))

        with self.name_scope():
            self.emb_encoder = Encoder(
                kernel_size=opt.emb_encoder_conv_kernerl_size,
                num_filters=opt.emb_encoder_conv_channels,
                conv_layers=opt.emb_encoder_num_conv_layers,
                num_heads=opt.emb_encoder_num_head,
                num_blocks=opt.emb_encoder_num_block)

            self.project = gluon.nn.Dense(units=opt.emb_encoder_conv_channels,
                                          flatten=False,
                                          use_bias=False,
                                          weight_initializer=Xavier())

        with self.name_scope():
            self.co_attention = CoAttention()

        with self.name_scope():
            self.model_encoder = Encoder(
                kernel_size=opt.model_encoder_conv_kernel_size,
                num_filters=opt.model_encoder_conv_channels,
                conv_layers=opt.model_encoder_conv_layers,
                num_heads=opt.model_encoder_num_head,
                num_blocks=opt.model_encoder_num_block)

        with self.name_scope():
            self.predict_begin = gluon.nn.Dense(
                units=1,
                use_bias=True,
                flatten=False,
                weight_initializer=Xavier(rnd_type='uniform',
                                          factor_type='in',
                                          magnitude=1),
                bias_initializer=Uniform(1.0 /
                                         opt.model_encoder_conv_channels))
            self.predict_end = gluon.nn.Dense(
                units=1,
                use_bias=True,
                flatten=False,
                weight_initializer=Xavier(rnd_type='uniform',
                                          factor_type='in',
                                          magnitude=1),
                bias_initializer=Uniform(1.0 /
                                         opt.model_encoder_conv_channels))