Esempio n. 1
0
    def get_default_params(cls) -> ParamTable:
        """:return: model default parameters."""
        params = super().get_default_params(with_embedding=True)
        params.add(
            Param(name='kernel_count',
                  value=[32],
                  desc="The kernel count of the 2D convolution "
                  "of each block."))
        params.add(
            Param(name='kernel_size',
                  value=[[3, 3]],
                  desc="The kernel size of the 2D convolution "
                  "of each block."))
        params.add(
            Param(name='activation',
                  value='relu',
                  desc="The activation function."))
        params.add(
            Param(name='dpool_size',
                  value=[3, 10],
                  desc="The max-pooling size of each block."))

        params.add(
            Param('dropout_rate',
                  0.0,
                  hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
                  desc="The dropout rate."))
        return params
Esempio n. 2
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     # set :attr:`with_multi_layer_perceptron` to False to support
     # user-defined variable dense layer units
     params = super().get_default_params(with_multi_layer_perceptron=True)
     params.add(
         Param(name='vocab_size', value=419, desc="Size of vocabulary."))
     params.add(
         Param(name='filters',
               value=3,
               desc="Number of filters in the 1D convolution "
               "layer."))
     params.add(
         Param(name='kernel_size',
               value=3,
               desc="Number of kernel size in the 1D "
               "convolution layer."))
     params.add(
         Param(name='conv_activation_func',
               value='relu',
               desc="Activation function in the convolution"
               " layer."))
     params.add(
         Param(name='dropout_rate', value=0.3, desc="The dropout rate."))
     return params
Esempio n. 3
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(
         Param(name='mode',
               value='bert-base-uncased',
               desc="Pretrained Bert model."))
     params.add(
         Param('dropout_rate',
               0.0,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     params.add(
         Param(name='kernel_num',
               value=11,
               hyper_space=hyper_spaces.quniform(low=5, high=20),
               desc="The number of RBF kernels."))
     params.add(
         Param(name='sigma',
               value=0.1,
               hyper_space=hyper_spaces.quniform(low=0.01, high=0.2,
                                                 q=0.01),
               desc="The `sigma` defines the kernel width."))
     params.add(
         Param(name='exact_sigma',
               value=0.001,
               desc="The `exact_sigma` denotes the `sigma` "
               "for exact match."))
     return params
Esempio n. 4
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(
         with_embedding=False,
         with_multi_layer_perceptron=True
     )
     params.add(Param(name='mask_value', value=0,
                      desc="The value to be masked from inputs."))
     params.add(Param(name='left_length', value=10,
                      desc='Length of left input.'))
     params.add(Param(name='right_length', value=40,
                      desc='Length of right input.'))
     params.add(Param(name='lm_filters', value=300,
                      desc="Filter size of 1D convolution layer in "
                           "the local model."))
     params.add(Param(name='vocab_size', value=419,
                      desc="Vocabulary size of the tri-letters used in "
                           "the distributed model."))
     params.add(Param(name='dm_filters', value=300,
                      desc="Filter size of 1D convolution layer in "
                           "the distributed model."))
     params.add(Param(name='dm_kernel_size', value=3,
                      desc="Kernel size of 1D convolution layer in "
                           "the distributed model."))
     params.add(Param(name='dm_conv_activation_func', value='relu',
                      desc="Activation functions of the convolution layer "
                           "in the distributed model."))
     params.add(Param(name='dm_right_pool_size', value=8,
                      desc="Kernel size of 1D convolution layer in "
                           "the distributed model."))
     params.add(Param(
         name='dropout_rate', value=0.5,
         hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.02),
         desc="The dropout rate."))
     return params
Esempio n. 5
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True)
     params['optimizer'] = 'adam'
     params.add(
         Param(name='alpha',
               value=0.1,
               desc="Negative slope coefficient of LeakyReLU "
               "function."))
     params.add(
         Param(name='mlp_num_layers',
               value=3,
               desc="The number of layers of mlp."))
     params.add(
         Param(name='mlp_num_units',
               value=[10, 10],
               desc="The hidden size of the FC layers, but not "
               "include the final layer."))
     params.add(
         Param(name='lstm_num_units',
               value=5,
               desc="The hidden size of the LSTM layer."))
     params.add(
         Param(name='dropout_rate',
               value=0.1,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     return params
Esempio n. 6
0
    def get_default_params(cls) -> ParamTable:
        """:return: model default parameters."""
        params = super().get_default_params(with_embedding=True)
        params['optimizer'] = 'adam'

        # params.add(Param('dim_word_embedding', 50))
        # TODO(tjf): remove unused params in the final version
        # params.add(Param('dim_char_embedding', 50))
        # params.add(Param('word_embedding_mat'))
        # params.add(Param('char_embedding_mat'))
        # params.add(Param('embedding_random_scale', 0.2))
        # params.add(Param('activation_embedding', 'softmax'))

        # BiMPM Setting
        params.add(
            Param(
                'perspective', {
                    'full': True,
                    'max-pooling': True,
                    'attentive': True,
                    'max-attentive': True
                }))
        params.add(Param('mp_dim', 3))
        params.add(Param('att_dim', 3))
        params.add(Param('hidden_size', 4))
        params.add(Param('dropout_rate', 0.0))
        params.add(Param('w_initializer', 'glorot_uniform'))
        params.add(Param('b_initializer', 'zeros'))
        params.add(Param('activation_hidden', 'linear'))

        params.add(Param('with_match_highway', False))
        params.add(Param('with_aggregation_highway', False))

        return params
Esempio n. 7
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True,
                                         with_multi_layer_perceptron=True)
     params['optimizer'] = 'adam'
     params.add(
         Param(name='num_blocks',
               value=1,
               desc="Number of convolution blocks."))
     params.add(
         Param(name='left_filters',
               value=[32],
               desc="The filter size of each convolution "
               "blocks for the left input."))
     params.add(
         Param(name='left_kernel_sizes',
               value=[3],
               desc="The kernel size of each convolution "
               "blocks for the left input."))
     params.add(
         Param(name='right_filters',
               value=[32],
               desc="The filter size of each convolution "
               "blocks for the right input."))
     params.add(
         Param(name='right_kernel_sizes',
               value=[3],
               desc="The kernel size of each convolution "
               "blocks for the right input."))
     params.add(
         Param(name='conv_activation_func',
               value='relu',
               desc="The activation function in the "
               "convolution layer."))
     params.add(
         Param(name='left_pool_sizes',
               value=[2],
               desc="The pooling size of each convolution "
               "blocks for the left input."))
     params.add(
         Param(name='right_pool_sizes',
               value=[2],
               desc="The pooling size of each convolution "
               "blocks for the right input."))
     params.add(
         Param(name='padding',
               value='same',
               hyper_space=hyper_spaces.choice(['same', 'valid', 'causal']),
               desc=
               "The padding mode in the convolution layer. It should be one"
               "of `same`, `valid`, and `causal`."))
     params.add(
         Param('dropout_rate',
               0.0,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     return params
Esempio n. 8
0
 def get_default_params(cls):
     """Get default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(
         Param(name='lm_filters',
               value=32,
               desc="Filter size of 1D convolution layer in "
               "the local model."))
     params.add(
         Param(name='lm_hidden_sizes',
               value=[32],
               desc="A list of hidden size of the MLP layer "
               "in the local model."))
     params.add(
         Param(name='dm_filters',
               value=32,
               desc="Filter size of 1D convolution layer in "
               "the distributed model."))
     params.add(
         Param(name='dm_kernel_size',
               value=3,
               desc="Kernel size of 1D convolution layer in "
               "the distributed model."))
     params.add(
         Param(name='dm_q_hidden_size',
               value=32,
               desc="Hidden size of the MLP layer for the "
               "left text in the distributed model."))
     params.add(
         Param(name='dm_d_mpool',
               value=3,
               desc="Max pooling size for the right text in "
               "the distributed model."))
     params.add(
         Param(name='dm_hidden_sizes',
               value=[32],
               desc="A list of hidden size of the MLP layer "
               "in the distributed model."))
     params.add(
         Param(name='padding',
               value='same',
               desc="The padding mode in the convolution "
               "layer. It should be one of `same`, "
               "`valid`, "
               "and `causal`."))
     params.add(
         Param(name='activation_func',
               value='relu',
               desc="Activation function in the convolution"
               " layer."))
     params.add(
         Param(name='dropout_rate',
               value=0.5,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.02),
               desc="The dropout rate."))
     return params
Esempio n. 9
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(
         with_embedding=True,
         with_multi_layer_perceptron=True
     )
     params.add(Param(name='mask_value', value=0,
                      desc="The value to be masked from inputs."))
     params.add(Param(name='hist_bin_size', value=30,
                      desc="The number of bin size of the histogram."))
     params['mlp_num_fan_out'] = 1
     return params
Esempio n. 10
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params()
     params.add(
         Param(name='mode',
               value='bert-base-uncased',
               desc="Pretrained Bert model."))
     params.add(
         Param('dropout_rate',
               0.2,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     return params
Esempio n. 11
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True,
                                         with_multi_layer_perceptron=True)
     params.add(
         Param(name='left_length', value=10, desc='Length of left input.'))
     params.add(
         Param(name='right_length',
               value=100,
               desc='Length of right input.'))
     params.add(
         Param(name='conv_activation_func',
               value='relu',
               desc="The activation function in the "
               "convolution layer."))
     params.add(
         Param(name='left_filters',
               value=[32],
               desc="The filter size of each convolution "
               "blocks for the left input."))
     params.add(
         Param(name='left_kernel_sizes',
               value=[3],
               desc="The kernel size of each convolution "
               "blocks for the left input."))
     params.add(
         Param(name='left_pool_sizes',
               value=[2],
               desc="The pooling size of each convolution "
               "blocks for the left input."))
     params.add(
         Param(name='right_filters',
               value=[32],
               desc="The filter size of each convolution "
               "blocks for the right input."))
     params.add(
         Param(name='right_kernel_sizes',
               value=[3],
               desc="The kernel size of each convolution "
               "blocks for the right input."))
     params.add(
         Param(name='right_pool_sizes',
               value=[2],
               desc="The pooling size of each convolution "
               "blocks for the right input."))
     params.add(
         Param('dropout_rate',
               0.0,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     return params
Esempio n. 12
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(
         with_embedding=True,
         with_multi_layer_perceptron=True
     )
     params.add(Param(name='mask_value', value=-1,
                      desc="The value to be masked from inputs."))
     params['input_shapes'] = [(5,), (300,)]
     params.add(Param(
         'top_k', value=10,
         hyper_space=hyper_spaces.quniform(low=2, high=100),
         desc="Size of top-k pooling layer."
     ))
     return params
Esempio n. 13
0
 def get_default_params(cls):
     """Get default parameters."""
     params = super().get_default_params()
     params.add(Param(name='filters', value=128,
                      desc="The filter size in the convolution"
                           " layer."))
     params.add(Param(name='conv_activation_func', value='relu',
                      desc="The activation function in the "
                           "convolution layer."))
     params.add(Param(name='max_ngram', value=3,
                      desc="The maximum length of n-grams for the "
                           "convolution layer."))
     params.add(Param(name='use_crossmatch', value=True,
                      desc="Whether to match left n-grams and right "
                           "n-grams of different lengths"))
     return params
Esempio n. 14
0
def test_hyper_space(param_table):
    new_param = Param(
        name='my_param',
        value=1,
        hyper_space=quniform(low=1, high=5)
    )
    param_table.add(new_param)
    hyper_space = param_table.hyper_space
    assert hyper_space
Esempio n. 15
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(
         with_embedding=True, with_multi_layer_perceptron=True)
     params.add(Param(name='lstm_hidden_size', value=5,
                      desc="Integer, the hidden size of the "
                           "bi-directional LSTM layer."))
     params.add(Param(name='lstm_num', value=3,
                      desc="Integer, number of LSTM units"))
     params.add(Param(name='num_layers', value=1,
                      desc="Integer, number of LSTM layers."))
     params.add(Param(
         name='dropout_rate', value=0.0,
         hyper_space=hyper_spaces.quniform(
             low=0.0, high=0.8, q=0.01),
         desc="The dropout rate."
     ))
     return params
Esempio n. 16
0
 def get_default_params(cls):
     """Get default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(
         Param('lstm_num_units',
               256,
               hyper_space=hyper_spaces.quniform(low=128, high=384, q=32),
               desc="The hidden size in the LSTM layer."))
     params.add(
         Param('fc_num_units',
               200,
               hyper_space=hyper_spaces.quniform(low=100, high=300, q=20),
               desc="The hidden size in the full connection layer."))
     params.add(
         Param('dropout_rate',
               0.0,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.9, q=0.01),
               desc="The dropout rate."))
     return params
Esempio n. 17
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(Param(
         name='dropout_rate', value=0.1,
         desc="The dropout rate.",
         hyper_space=hyper_spaces.quniform(0, 1, 0.05)
     ))
     params.add(Param(
         name='num_layers', value=2,
         desc="Number of hidden layers in the MLP "
              "layer."
     ))
     params.add(Param(
         name='hidden_sizes', value=[30, 30],
         desc="Number of hidden size for each hidden"
              " layer"
     ))
     return params
Esempio n. 18
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(
         Param(name='channels',
               value=4,
               desc="Number of word interaction tensor channels"))
     params.add(
         Param(name='units', value=10, desc="Number of SpatialGRU units"))
     params.add(
         Param(name='direction',
               value='lt',
               desc="Direction of SpatialGRU scanning"))
     params.add(
         Param(name='dropout_rate',
               value=0.0,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     return params
Esempio n. 19
0
    def get_default_params(cls) -> ParamTable:
        """:return: model default parameters."""
        params = super().get_default_params(with_embedding=True)
        params.add(Param(name='mask_value', value=0,
                         desc="The value to be masked from inputs."))
        params.add(Param(name='num_bins', value=200,
                         desc="Integer, number of bins."))
        params.add(Param(name='hidden_sizes', value=[100],
                         desc="Number of hidden size for each hidden layer"))
        params.add(Param(name='activation', value='relu',
                         desc="The activation function."))

        params.add(Param(
            'dropout_rate', 0.0,
            hyper_space=hyper_spaces.quniform(
                low=0.0, high=0.8, q=0.01),
            desc="The dropout rate."
        ))
        return params
Esempio n. 20
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True,
                                         with_multi_layer_perceptron=True)
     params.add(
         Param(name='lstm_units',
               value=32,
               desc="Integer, the hidden size in the "
               "bi-directional LSTM layer."))
     params.add(
         Param(name='dropout_rate',
               value=0.0,
               desc="Float, the dropout rate."))
     params.add(
         Param('top_k',
               value=10,
               hyper_space=hyper_spaces.quniform(low=2, high=100),
               desc="Integer, the size of top-k pooling layer."))
     params['optimizer'] = 'adam'
     return params
Esempio n. 21
0
 def get_default_params(cls):
     """Get default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(
         Param(name='kernel_num',
               value=11,
               hyper_space=hyper_spaces.quniform(low=5, high=20),
               desc="The number of RBF kernels."))
     params.add(
         Param(name='sigma',
               value=0.1,
               hyper_space=hyper_spaces.quniform(low=0.01, high=0.2,
                                                 q=0.01),
               desc="The `sigma` defines the kernel width."))
     params.add(
         Param(name='exact_sigma',
               value=0.001,
               desc="The `exact_sigma` denotes the `sigma` "
               "for exact match."))
     return params
Esempio n. 22
0
    def get_default_params(cls) -> ParamTable:
        """Get default parameters."""
        params = super().get_default_params(with_embedding=True,
                                            with_multi_layer_perceptron=True)

        params.add(
            Param(name='dropout_rate',
                  value=0.5,
                  desc="The dropout rate for all fully-connected layer"))

        params.add(
            Param(name='lstm_dim',
                  value=8,
                  desc="The dimension of LSTM layer."))

        params.add(
            Param(name='mask_value',
                  value=0,
                  desc="The value would be regarded as pad"))

        return params
Esempio n. 23
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True,
                                         with_multi_layer_perceptron=True)
     params.add(
         Param(name='mask_value',
               value=-1,
               desc="The value to be masked from inputs."))
     params['optimizer'] = 'adam'
     params['input_shapes'] = [(5, ), (
         5,
         30,
     )]
     return params
Esempio n. 24
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(
         with_embedding=True,
         with_multi_layer_perceptron=True
     )
     params.add(Param(name='hidden_size', value=32,
                      desc="Integer, the hidden size in the "
                           "bi-directional LSTM layer."))
     params.add(Param(name='num_layers', value=1,
                      desc="Integer, number of recurrent layers."))
     params.add(Param(
         'top_k', value=10,
         hyper_space=hyper_spaces.quniform(low=2, high=100),
         desc="Size of top-k pooling layer."
     ))
     params.add(Param(
         'dropout_rate', 0.0,
         hyper_space=hyper_spaces.quniform(
             low=0.0, high=0.8, q=0.01),
         desc="Float, the dropout rate."
     ))
     return params
Esempio n. 25
0
    def get_default_params(cls) -> ParamTable:
        """:return: model default parameters."""
        params = super().get_default_params(with_embedding=True,
                                            with_multi_layer_perceptron=False)
        params.add(
            Param(name='mask_value',
                  value=0,
                  desc="The value to be masked from inputs."))
        params.add(Param(name='dropout', value=0.2, desc="Dropout rate."))
        params.add(
            Param(name='hidden_size',
                  value=100,
                  hyper_space=hyper_spaces.quniform(low=100, high=300, q=100),
                  desc="Hidden size."))

        # BiMPM parameters
        params.add(
            Param(name='num_perspective',
                  value=20,
                  hyper_space=hyper_spaces.quniform(low=20, high=100, q=20),
                  desc='num_perspective'))

        return params
Esempio n. 26
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(
         Param(name='left_length', value=10, desc='Length of left input.'))
     params.add(
         Param(name='right_length',
               value=100,
               desc='Length of right input.'))
     params.add(
         Param(name='kernel_1d_count',
               value=32,
               desc="Kernel count of 1D convolution layer."))
     params.add(
         Param(name='kernel_1d_size',
               value=3,
               desc="Kernel size of 1D convolution layer."))
     params.add(
         Param(name='kernel_2d_count',
               value=[32],
               desc="Kernel count of 2D convolution layer in"
               "each block"))
     params.add(
         Param(name='kernel_2d_size',
               value=[(3, 3)],
               desc="Kernel size of 2D convolution layer in"
               " each block."))
     params.add(
         Param(name='activation', value='relu',
               desc="Activation function."))
     params.add(
         Param(name='pool_2d_size',
               value=[(2, 2)],
               desc="Size of pooling layer in each block."))
     params.add(
         Param('dropout_rate',
               0.0,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     return params
Esempio n. 27
0
    def get_default_params(cls) -> ParamTable:
        """:return: model default parameters."""
        params = super().get_default_params(with_embedding=True)
        params.add(
            Param(name='filters',
                  value=128,
                  desc="The filter size in the convolution layer."))
        params.add(
            Param(name='conv_activation_func',
                  value='relu',
                  desc="The activation function in the convolution layer."))
        params.add(
            Param(name='max_ngram',
                  value=3,
                  desc="The maximum length of n-grams for the convolution "
                  "layer."))
        params.add(
            Param(name='use_crossmatch',
                  value=True,
                  desc="Whether to match left n-grams and right n-grams of "
                  "different lengths"))
        params.add(
            Param(name='kernel_num',
                  value=11,
                  hyper_space=hyper_spaces.quniform(low=5, high=20),
                  desc="The number of RBF kernels."))
        params.add(
            Param(name='sigma',
                  value=0.1,
                  hyper_space=hyper_spaces.quniform(low=0.01, high=0.2,
                                                    q=0.01),
                  desc="The `sigma` defines the kernel width."))
        params.add(
            Param(name='exact_sigma',
                  value=0.001,
                  desc="The `exact_sigma` denotes the `sigma` "
                  "for exact match."))

        params.add(
            Param('dropout_rate',
                  0,
                  hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
                  desc="The dropout rate."))
        return params
Esempio n. 28
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True)
     params['optimizer'] = 'adam'
     opt_space = hyper_spaces.choice(['adam', 'rmsprop', 'adagrad'])
     params.get('optimizer').hyper_space = opt_space
     params.add(Param(name='num_blocks', value=1,
                      desc="Number of 2D convolution blocks."))
     params.add(Param(name='kernel_1d_count', value=32,
                      desc="Kernel count of 1D convolution layer."))
     params.add(Param(name='kernel_1d_size', value=3,
                      desc="Kernel size of 1D convolution layer."))
     params.add(Param(name='kernel_2d_count', value=[32],
                      desc="Kernel count of 2D convolution layer in"
                           "each block"))
     params.add(Param(name='kernel_2d_size', value=[[3, 3]],
                      desc="Kernel size of 2D convolution layer in"
                           " each block."))
     params.add(Param(name='activation', value='relu',
                      desc="Activation function."))
     params.add(Param(name='pool_2d_size', value=[[2, 2]],
                      desc="Size of pooling layer in each block."))
     params.add(Param(
         name='padding', value='same',
         hyper_space=hyper_spaces.choice(
             ['same', 'valid']),
         desc="The padding mode in the convolution layer. It should be one"
              "of `same`, `valid`."
     ))
     params.add(Param(
         name='dropout_rate', value=0.0,
         hyper_space=hyper_spaces.quniform(low=0.0, high=0.8,
                                           q=0.01),
         desc="The dropout rate."
     ))
     return params
Esempio n. 29
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     # set :attr:`with_multi_layer_perceptron` to False to support
     # user-defined variable dense layer units
     params = super().get_default_params(with_multi_layer_perceptron=True)
     params.add(
         Param(name='filters',
               value=32,
               desc="Number of filters in the 1D convolution "
               "layer."))
     params.add(
         Param(name='kernel_size',
               value=3,
               desc="Number of kernel size in the 1D "
               "convolution layer."))
     params.add(
         Param(name='strides',
               value=1,
               desc="Strides in the 1D convolution layer."))
     params.add(
         Param(name='padding',
               value='same',
               desc="The padding mode in the convolution "
               "layer. It should be one of `same`, "
               "`valid`, "
               "and `causal`."))
     params.add(
         Param(name='conv_activation_func',
               value='relu',
               desc="Activation function in the convolution"
               " layer."))
     params.add(Param(name='w_initializer', value='glorot_normal'))
     params.add(Param(name='b_initializer', value='zeros'))
     params.add(
         Param(name='dropout_rate', value=0.3, desc="The dropout rate."))
     return params
Esempio n. 30
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(
         with_embedding=True,
         with_multi_layer_perceptron=False
     )
     params.add(Param(name='mask_value', value=0,
                      desc="The value to be masked from inputs."))
     params.add(Param(name='dropout', value=0.2,
                      desc="Dropout rate."))
     params.add(Param(name='hidden_size', value=200,
                      desc="Hidden size."))
     params.add(Param(name='lstm_layer', value=1,
                      desc="Number of LSTM layers"))
     params.add(Param(name='drop_lstm', value=False,
                      desc="Whether dropout LSTM."))
     params.add(Param(name='concat_lstm', value=True,
                      desc="Whether concat intermediate outputs."))
     params.add(Param(name='rnn_type', value='lstm',
                      desc="Choose rnn type, lstm or gru."))
     return params