示例#1
0
    def get_default_params(cls) -> ParamTable:
        """:return: model default parameters."""
        params = super().get_default_params(with_embedding=True)
        params.add(
            Param(name='mode',
                  value='bert-base-uncased',
                  desc="Pretrained Bert model."))
        params.add(
            Param('dropout_rate',
                  0.0,
                  hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
                  desc="The dropout rate."))
        params.add(
            Param(name='kernel_num',
                  value=11,
                  hyper_space=hyper_spaces.quniform(low=5, high=20),
                  desc="The number of RBF kernels."))
        params.add(
            Param(name='sigma',
                  value=0.1,
                  hyper_space=hyper_spaces.quniform(low=0.01, high=0.2,
                                                    q=0.01),
                  desc="The `sigma` defines the kernel width."))
        params.add(
            Param(name='exact_sigma',
                  value=0.001,
                  desc="The `exact_sigma` denotes the `sigma` "
                  "for exact match."))

        params.add(
            Param(name='token_dim',
                  value=512,
                  desc="The maximum number of tokens for BERT."))

        return params
示例#2
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_multi_layer_perceptron=True)
     params['mlp_num_units'] = 256
     params.get('mlp_num_units').hyper_space = \
         hyper_spaces.quniform(16, 512)
     params.get('mlp_num_layers').hyper_space = \
         hyper_spaces.quniform(1, 5)
     return params
示例#3
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(
         Param(name='num_blocks',
               value=1,
               desc="Number of convolution blocks."))
     params.add(
         Param(name='kernel_count',
               value=[32],
               desc="The kernel count of the 2D convolution "
               "of each block."))
     params.add(
         Param(name='kernel_size',
               value=[[3, 3]],
               desc="The kernel size of the 2D convolution "
               "of each block."))
     params.add(
         Param(name='activation',
               value='relu',
               desc="The activation function."))
     params.add(
         Param(name='dpool_size',
               value=[3, 10],
               desc="The max-pooling size of each block."))
     params.add(
         Param(name='padding',
               value='same',
               desc="The padding mode in the convolution layer."))
     params.add(
         Param(name='dropout_rate',
               value=0.0,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     return params
示例#4
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(
         with_embedding=False,
         with_multi_layer_perceptron=True
     )
     params.add(Param(name='mask_value', value=0,
                      desc="The value to be masked from inputs."))
     params.add(Param(name='left_length', value=10,
                      desc='Length of left input.'))
     params.add(Param(name='right_length', value=40,
                      desc='Length of right input.'))
     params.add(Param(name='lm_filters', value=300,
                      desc="Filter size of 1D convolution layer in "
                           "the local model."))
     params.add(Param(name='vocab_size', value=419,
                      desc="Vocabulary size of the tri-letters used in "
                           "the distributed model."))
     params.add(Param(name='dm_filters', value=300,
                      desc="Filter size of 1D convolution layer in "
                           "the distributed model."))
     params.add(Param(name='dm_kernel_size', value=3,
                      desc="Kernel size of 1D convolution layer in "
                           "the distributed model."))
     params.add(Param(name='dm_conv_activation_func', value='relu',
                      desc="Activation functions of the convolution layer "
                           "in the distributed model."))
     params.add(Param(name='dm_right_pool_size', value=8,
                      desc="Kernel size of 1D convolution layer in "
                           "the distributed model."))
     params.add(Param(
         name='dropout_rate', value=0.5,
         hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.02),
         desc="The dropout rate."))
     return params
示例#5
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True)
     params['optimizer'] = 'adam'
     params.add(
         Param(name='alpha',
               value=0.1,
               desc="Negative slope coefficient of LeakyReLU "
               "function."))
     params.add(
         Param(name='mlp_num_layers',
               value=3,
               desc="The number of layers of mlp."))
     params.add(
         Param(name='mlp_num_units',
               value=[10, 10],
               desc="The hidden size of the FC layers, but not "
               "include the final layer."))
     params.add(
         Param(name='lstm_num_units',
               value=5,
               desc="The hidden size of the LSTM layer."))
     params.add(
         Param(name='dropout_rate',
               value=0.1,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     return params
示例#6
0
def test_hyper_space(param_table):
    new_param = Param(name='my_param',
                      value=1,
                      hyper_space=quniform(low=1, high=5))
    param_table.add(new_param)
    hyper_space = param_table.hyper_space
    assert hyper_space
示例#7
0
文件: diin.py 项目: zmskye/MatchZoo
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True)
     params['optimizer'] = 'adam'
     params.add(Param(name='dropout_decay_interval', value=10000,
                      desc="The decay interval of decaying_dropout."))
     params.add(Param(name='char_embedding_input_dim', value=100,
                      desc="The input dimension of character embedding "
                           "layer."))
     params.add(Param(name='char_embedding_output_dim', value=2,
                      desc="The output dimension of character embedding "
                           "layer."))
     params.add(Param(name='char_conv_filters', value=8,
                      desc="The filter size of character convolution "
                           "layer."))
     params.add(Param(name='char_conv_kernel_size', value=2,
                      desc="The kernel size of character convolution "
                           "layer."))
     params.add(Param(name='first_scale_down_ratio', value=0.3,
                      desc="The channel scale down ratio of the "
                           "convolution layer before densenet."))
     params.add(Param(name='nb_dense_blocks', value=1,
                      desc="The number of blocks in densenet."))
     params.add(Param(name='layers_per_dense_block', value=2,
                      desc="The number of convolution layers in dense "
                           "block."))
     params.add(Param(name='growth_rate', value=2,
                      desc="The filter size of each convolution layer in "
                           "dense block."))
     params.add(Param(name='transition_scale_down_ratio', value=0.5,
                      desc="The channel scale down ratio of the "
                           "convolution layer in transition block."))
     params.add(Param(
         name='dropout_initial_keep_rate', value=1.0,
         hyper_space=hyper_spaces.quniform(
             low=0.8, high=1.0, q=0.02),
         desc="The initial keep rate of decaying_dropout."
     ))
     params.add(Param(
         name='dropout_decay_rate', value=0.97,
         hyper_space=hyper_spaces.quniform(
             low=0.90, high=0.99, q=0.01),
         desc="The decay rate of decaying_dropout."
     ))
     return params
示例#8
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True,
                                         with_multi_layer_perceptron=True)
     params['optimizer'] = 'adam'
     params.add(
         Param(name='num_blocks',
               value=1,
               desc="Number of convolution blocks."))
     params.add(
         Param(name='left_filters',
               value=[32],
               desc="The filter size of each convolution "
               "blocks for the left input."))
     params.add(
         Param(name='left_kernel_sizes',
               value=[3],
               desc="The kernel size of each convolution "
               "blocks for the left input."))
     params.add(
         Param(name='right_filters',
               value=[32],
               desc="The filter size of each convolution "
               "blocks for the right input."))
     params.add(
         Param(name='right_kernel_sizes',
               value=[3],
               desc="The kernel size of each convolution "
               "blocks for the right input."))
     params.add(
         Param(name='conv_activation_func',
               value='relu',
               desc="The activation function in the "
               "convolution layer."))
     params.add(
         Param(name='left_pool_sizes',
               value=[2],
               desc="The pooling size of each convolution "
               "blocks for the left input."))
     params.add(
         Param(name='right_pool_sizes',
               value=[2],
               desc="The pooling size of each convolution "
               "blocks for the right input."))
     params.add(
         Param(name='padding',
               value='same',
               hyper_space=hyper_spaces.choice(['same', 'valid', 'causal']),
               desc=
               "The padding mode in the convolution layer. It should be one"
               "of `same`, `valid`, and `causal`."))
     params.add(
         Param('dropout_rate',
               0.0,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     return params
示例#9
0
    def get_default_params(cls) -> ParamTable:
        """:return: model default parameters."""
        params = super().get_default_params(with_embedding=True)
        params.add(
            Param(name='filters',
                  value=128,
                  desc="The filter size in the convolution layer."))
        params.add(
            Param(name='conv_activation_func',
                  value='relu',
                  desc="The activation function in the convolution layer."))
        params.add(
            Param(name='max_ngram',
                  value=3,
                  desc="The maximum length of n-grams for the convolution "
                  "layer."))
        params.add(
            Param(name='use_crossmatch',
                  value=True,
                  desc="Whether to match left n-grams and right n-grams of "
                  "different lengths"))
        params.add(
            Param(name='kernel_num',
                  value=11,
                  hyper_space=hyper_spaces.quniform(low=5, high=20),
                  desc="The number of RBF kernels."))
        params.add(
            Param(name='sigma',
                  value=0.1,
                  hyper_space=hyper_spaces.quniform(low=0.01, high=0.2,
                                                    q=0.01),
                  desc="The `sigma` defines the kernel width."))
        params.add(
            Param(name='exact_sigma',
                  value=0.001,
                  desc="The `exact_sigma` denotes the `sigma` "
                  "for exact match."))

        params.add(
            Param('dropout_rate',
                  0,
                  hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
                  desc="The dropout rate."))
        return params
示例#10
0
 def get_default_params(cls):
     """Get default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(
         Param(name='lm_filters',
               value=32,
               desc="Filter size of 1D convolution layer in "
               "the local model."))
     params.add(
         Param(name='lm_hidden_sizes',
               value=[32],
               desc="A list of hidden size of the MLP layer "
               "in the local model."))
     params.add(
         Param(name='dm_filters',
               value=32,
               desc="Filter size of 1D convolution layer in "
               "the distributed model."))
     params.add(
         Param(name='dm_kernel_size',
               value=3,
               desc="Kernel size of 1D convolution layer in "
               "the distributed model."))
     params.add(
         Param(name='dm_q_hidden_size',
               value=32,
               desc="Hidden size of the MLP layer for the "
               "left text in the distributed model."))
     params.add(
         Param(name='dm_d_mpool',
               value=3,
               desc="Max pooling size for the right text in "
               "the distributed model."))
     params.add(
         Param(name='dm_hidden_sizes',
               value=[32],
               desc="A list of hidden size of the MLP layer "
               "in the distributed model."))
     params.add(
         Param(name='padding',
               value='same',
               desc="The padding mode in the convolution "
               "layer. It should be one of `same`, "
               "`valid`, "
               "and `causal`."))
     params.add(
         Param(name='activation_func',
               value='relu',
               desc="Activation function in the convolution"
               " layer."))
     params.add(
         Param(name='dropout_rate',
               value=0.5,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.02),
               desc="The dropout rate."))
     return params
示例#11
0
 def get_default_params(cls):
     """Get default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(
         Param('lstm_num_units',
               256,
               hyper_space=hyper_spaces.quniform(low=128, high=384, q=32),
               desc="The hidden size in the LSTM layer."))
     params.add(
         Param('fc_num_units',
               200,
               hyper_space=hyper_spaces.quniform(low=100, high=300, q=20),
               desc="The hidden size in the full connection layer."))
     params.add(
         Param('dropout_rate',
               0.0,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.9, q=0.01),
               desc="The dropout rate."))
     return params
示例#12
0
 def get_default_params(cls):
     """Get default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(
         Param(name='kernel_num',
               value=11,
               hyper_space=hyper_spaces.quniform(low=5, high=20),
               desc="The number of RBF kernels."))
     params.add(
         Param(name='sigma',
               value=0.1,
               hyper_space=hyper_spaces.quniform(low=0.01, high=0.2,
                                                 q=0.01),
               desc="The `sigma` defines the kernel width."))
     params.add(
         Param(name='exact_sigma',
               value=0.001,
               desc="The `exact_sigma` denotes the `sigma` "
               "for exact match."))
     return params
示例#13
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params()
     params.add(
         Param(name='mode',
               value='bert-base-uncased',
               desc="Pretrained Bert model."))
     params.add(
         Param('dropout_rate',
               0.2,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     return params
示例#14
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True,
                                         with_multi_layer_perceptron=True)
     params.add(
         Param(name='left_length', value=10, desc='Length of left input.'))
     params.add(
         Param(name='right_length',
               value=100,
               desc='Length of right input.'))
     params.add(
         Param(name='conv_activation_func',
               value='relu',
               desc="The activation function in the "
               "convolution layer."))
     params.add(
         Param(name='left_filters',
               value=[32],
               desc="The filter size of each convolution "
               "blocks for the left input."))
     params.add(
         Param(name='left_kernel_sizes',
               value=[3],
               desc="The kernel size of each convolution "
               "blocks for the left input."))
     params.add(
         Param(name='left_pool_sizes',
               value=[2],
               desc="The pooling size of each convolution "
               "blocks for the left input."))
     params.add(
         Param(name='right_filters',
               value=[32],
               desc="The filter size of each convolution "
               "blocks for the right input."))
     params.add(
         Param(name='right_kernel_sizes',
               value=[3],
               desc="The kernel size of each convolution "
               "blocks for the right input."))
     params.add(
         Param(name='right_pool_sizes',
               value=[2],
               desc="The pooling size of each convolution "
               "blocks for the right input."))
     params.add(
         Param('dropout_rate',
               0.0,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     return params
示例#15
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(
         with_embedding=True,
         with_multi_layer_perceptron=True
     )
     params.add(Param(name='hidden_size', value=32,
                      desc="Integer, the hidden size in the "
                           "bi-directional LSTM layer."))
     params.add(Param(name='num_layers', value=1,
                      desc="Integer, number of recurrent layers."))
     params.add(Param(
         'top_k', value=10,
         hyper_space=hyper_spaces.quniform(low=2, high=100),
         desc="Size of top-k pooling layer."
     ))
     params.add(Param(
         'dropout_rate', 0.0,
         hyper_space=hyper_spaces.quniform(
             low=0.0, high=0.8, q=0.01),
         desc="Float, the dropout rate."
     ))
     return params
示例#16
0
    def get_default_params(cls) -> ParamTable:
        """:return: model default parameters."""
        params = super().get_default_params(with_embedding=True,
                                            with_multi_layer_perceptron=False)
        params.add(
            Param(name='mask_value',
                  value=0,
                  desc="The value to be masked from inputs."))
        params.add(Param(name='dropout', value=0.2, desc="Dropout rate."))
        params.add(
            Param(name='hidden_size',
                  value=100,
                  hyper_space=hyper_spaces.quniform(low=100, high=300, q=100),
                  desc="Hidden size."))

        # BiMPM parameters
        params.add(
            Param(name='num_perspective',
                  value=20,
                  hyper_space=hyper_spaces.quniform(low=20, high=100, q=20),
                  desc='num_perspective'))

        return params
示例#17
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(
         with_embedding=True,
         with_multi_layer_perceptron=True
     )
     params.add(Param(name='mask_value', value=-1,
                      desc="The value to be masked from inputs."))
     params['input_shapes'] = [(5,), (300,)]
     params.add(Param(
         'top_k', value=10,
         hyper_space=hyper_spaces.quniform(low=2, high=100),
         desc="Size of top-k pooling layer."
     ))
     return params
示例#18
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(
         with_embedding=True, with_multi_layer_perceptron=True)
     params.add(Param(name='lstm_hidden_size', value=5,
                      desc="Integer, the hidden size of the "
                           "bi-directional LSTM layer."))
     params.add(Param(name='lstm_num', value=3,
                      desc="Integer, number of LSTM units"))
     params.add(Param(name='num_layers', value=1,
                      desc="Integer, number of LSTM layers."))
     params.add(Param(
         name='dropout_rate', value=0.0,
         hyper_space=hyper_spaces.quniform(
             low=0.0, high=0.8, q=0.01),
         desc="The dropout rate."
     ))
     return params
示例#19
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(
         Param(name='channels',
               value=4,
               desc="Number of word interaction tensor channels"))
     params.add(
         Param(name='units', value=10, desc="Number of SpatialGRU units"))
     params.add(
         Param(name='direction',
               value='lt',
               desc="Direction of SpatialGRU scanning"))
     params.add(
         Param(name='dropout_rate',
               value=0.0,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     return params
示例#20
0
    def get_default_params(cls) -> ParamTable:
        """:return: model default parameters."""
        params = super().get_default_params(with_embedding=True)
        params.add(Param(name='mask_value', value=0,
                         desc="The value to be masked from inputs."))
        params.add(Param(name='num_bins', value=200,
                         desc="Integer, number of bins."))
        params.add(Param(name='hidden_sizes', value=[100],
                         desc="Number of hidden size for each hidden layer"))
        params.add(Param(name='activation', value='relu',
                         desc="The activation function."))

        params.add(Param(
            'dropout_rate', 0.0,
            hyper_space=hyper_spaces.quniform(
                low=0.0, high=0.8, q=0.01),
            desc="The dropout rate."
        ))
        return params
示例#21
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(Param(
         name='dropout_rate', value=0.1,
         desc="The dropout rate.",
         hyper_space=hyper_spaces.quniform(0, 1, 0.05)
     ))
     params.add(Param(
         name='num_layers', value=2,
         desc="Number of hidden layers in the MLP "
              "layer."
     ))
     params.add(Param(
         name='hidden_sizes', value=[30, 30],
         desc="Number of hidden size for each hidden"
              " layer"
     ))
     return params
示例#22
0
文件: mvlstm.py 项目: zmskye/MatchZoo
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True,
                                         with_multi_layer_perceptron=True)
     params.add(
         Param(name='lstm_units',
               value=32,
               desc="Integer, the hidden size in the "
               "bi-directional LSTM layer."))
     params.add(
         Param(name='dropout_rate',
               value=0.0,
               desc="Float, the dropout rate."))
     params.add(
         Param('top_k',
               value=10,
               hyper_space=hyper_spaces.quniform(low=2, high=100),
               desc="Integer, the size of top-k pooling layer."))
     params['optimizer'] = 'adam'
     return params
示例#23
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(
         Param(name='left_length', value=10, desc='Length of left input.'))
     params.add(
         Param(name='right_length',
               value=100,
               desc='Length of right input.'))
     params.add(
         Param(name='kernel_1d_count',
               value=32,
               desc="Kernel count of 1D convolution layer."))
     params.add(
         Param(name='kernel_1d_size',
               value=3,
               desc="Kernel size of 1D convolution layer."))
     params.add(
         Param(name='kernel_2d_count',
               value=[32],
               desc="Kernel count of 2D convolution layer in"
               "each block"))
     params.add(
         Param(name='kernel_2d_size',
               value=[(3, 3)],
               desc="Kernel size of 2D convolution layer in"
               " each block."))
     params.add(
         Param(name='activation', value='relu',
               desc="Activation function."))
     params.add(
         Param(name='pool_2d_size',
               value=[(2, 2)],
               desc="Size of pooling layer in each block."))
     params.add(
         Param('dropout_rate',
               0.0,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     return params
示例#24
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True)
     params['optimizer'] = 'adam'
     opt_space = hyper_spaces.choice(['adam', 'rmsprop', 'adagrad'])
     params.get('optimizer').hyper_space = opt_space
     params.add(Param(name='num_blocks', value=1,
                      desc="Number of 2D convolution blocks."))
     params.add(Param(name='kernel_1d_count', value=32,
                      desc="Kernel count of 1D convolution layer."))
     params.add(Param(name='kernel_1d_size', value=3,
                      desc="Kernel size of 1D convolution layer."))
     params.add(Param(name='kernel_2d_count', value=[32],
                      desc="Kernel count of 2D convolution layer in"
                           "each block"))
     params.add(Param(name='kernel_2d_size', value=[[3, 3]],
                      desc="Kernel size of 2D convolution layer in"
                           " each block."))
     params.add(Param(name='activation', value='relu',
                      desc="Activation function."))
     params.add(Param(name='pool_2d_size', value=[[2, 2]],
                      desc="Size of pooling layer in each block."))
     params.add(Param(
         name='padding', value='same',
         hyper_space=hyper_spaces.choice(
             ['same', 'valid']),
         desc="The padding mode in the convolution layer. It should be one"
              "of `same`, `valid`."
     ))
     params.add(Param(
         name='dropout_rate', value=0.0,
         hyper_space=hyper_spaces.quniform(low=0.0, high=0.8,
                                           q=0.01),
         desc="The dropout rate."
     ))
     return params
示例#25
0
    def get_default_params(cls,
                           with_embedding=False,
                           with_multi_layer_perceptron=False) -> ParamTable:
        """
        Model default parameters.

        The common usage is to instantiate :class:`matchzoo.engine.ModelParams`
            first, then set the model specific parametrs.

        Examples:
            >>> class MyModel(BaseModel):
            ...     def build(self):
            ...         print(self._params['num_eggs'], 'eggs')
            ...         print('and', self._params['ham_type'])
            ...     def forward(self, greeting):
            ...         print(greeting)
            ...
            ...     @classmethod
            ...     def get_default_params(cls):
            ...         params = ParamTable()
            ...         params.add(Param('num_eggs', 512))
            ...         params.add(Param('ham_type', 'Parma Ham'))
            ...         return params
            >>> my_model = MyModel()
            >>> my_model.build()
            512 eggs
            and Parma Ham
            >>> my_model('Hello MatchZoo!')
            Hello MatchZoo!

        Notice that all parameters must be serialisable for the entire model
        to be serialisable. Therefore, it's strongly recommended to use python
        native data types to store parameters.

        :return: model parameters

        """
        params = ParamTable()
        params.add(
            Param(name='model_class',
                  value=cls,
                  desc="Model class. Used internally for save/load. "
                  "Changing this may cause unexpected behaviors."))
        params.add(
            Param(name='task',
                  desc="Decides model output shape, loss, and metrics."))
        params.add(
            Param(name='out_activation_func',
                  value=None,
                  desc="Activation function used in output layer."))
        if with_embedding:
            params.add(
                Param(
                    name='with_embedding',
                    value=True,
                    desc="A flag used help `auto` module. Shouldn't be changed."
                ))
            params.add(
                Param(name='embedding',
                      desc='FloatTensor containing weights for the Embedding.',
                      validator=lambda x: isinstance(x, np.ndarray)))
            params.add(
                Param(
                    name='embedding_input_dim',
                    desc=
                    'Usually equals vocab size + 1. Should be set manually.'))
            params.add(
                Param(name='embedding_output_dim',
                      desc='Should be set manually.'))
            params.add(
                Param(
                    name='padding_idx',
                    value=0,
                    desc='If given, pads the output with the embedding vector at'
                    'padding_idx (initialized to zeros) whenever it encounters'
                    'the index.'))
            params.add(
                Param(name='embedding_freeze',
                      value=False,
                      desc='`True` to freeze embedding layer training, '
                      '`False` to enable embedding parameters.'))
        if with_multi_layer_perceptron:
            params.add(
                Param(name='with_multi_layer_perceptron',
                      value=True,
                      desc=
                      "A flag of whether a multiple layer perceptron is used. "
                      "Shouldn't be changed."))
            params.add(
                Param(name='mlp_num_units',
                      value=128,
                      desc="Number of units in first `mlp_num_layers` layers.",
                      hyper_space=hyper_spaces.quniform(8, 256, 8)))
            params.add(
                Param(name='mlp_num_layers',
                      value=3,
                      desc="Number of layers of the multiple layer percetron.",
                      hyper_space=hyper_spaces.quniform(1, 6)))
            params.add(
                Param(
                    name='mlp_num_fan_out',
                    value=64,
                    desc=
                    "Number of units of the layer that connects the multiple "
                    "layer percetron and the output.",
                    hyper_space=hyper_spaces.quniform(4, 128, 4)))
            params.add(
                Param(name='mlp_activation_func',
                      value='relu',
                      desc='Activation function used in the multiple '
                      'layer perceptron.'))
        return params
示例#26
0
 def get_default_params(cls) -> ParamTable:
     """:return: model default parameters."""
     params = super().get_default_params(with_embedding=True)
     params.add(
         Param(name='mask_value',
               value=0,
               desc="The value to be masked from inputs."))
     params.add(
         Param(name='char_embedding_input_dim',
               value=100,
               desc="The input dimension of character embedding layer."))
     params.add(
         Param(name='char_embedding_output_dim',
               value=8,
               desc="The output dimension of character embedding layer."))
     params.add(
         Param(name='char_conv_filters',
               value=100,
               desc="The filter size of character convolution layer."))
     params.add(
         Param(name='char_conv_kernel_size',
               value=5,
               desc="The kernel size of character convolution layer."))
     params.add(
         Param(name='first_scale_down_ratio',
               value=0.3,
               desc="The channel scale down ratio of the convolution layer "
               "before densenet."))
     params.add(
         Param(name='nb_dense_blocks',
               value=3,
               desc="The number of blocks in densenet."))
     params.add(
         Param(name='layers_per_dense_block',
               value=8,
               desc="The number of convolution layers in dense block."))
     params.add(
         Param(name='growth_rate',
               value=20,
               desc="The filter size of each convolution layer in dense "
               "block."))
     params.add(
         Param(name='transition_scale_down_ratio',
               value=0.5,
               desc="The channel scale down ratio of the convolution layer "
               "in transition block."))
     params.add(
         Param(name='conv_kernel_size',
               value=(3, 3),
               desc="The kernel size of convolution layer in dense block."))
     params.add(
         Param(
             name='pool_kernel_size',
             value=(2, 2),
             desc="The kernel size of pooling layer in transition block."))
     params.add(
         Param('dropout_rate',
               0.0,
               hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.01),
               desc="The dropout rate."))
     return params
示例#27
0
    def get_default_params(
            cls,
            with_embedding=False,
            with_multi_layer_perceptron=False) -> engine.ParamTable:
        """
        Model default parameters.

        The common usage is to instantiate :class:`matchzoo.engine.ModelParams`
            first, then set the model specific parametrs.

        Examples:
            >>> class MyModel(BaseModel):
            ...     def build(self):
            ...         print(self._params['num_eggs'], 'eggs')
            ...         print('and', self._params['ham_type'])
            ...
            ...     @classmethod
            ...     def get_default_params(cls):
            ...         params = engine.ParamTable()
            ...         params.add(engine.Param('num_eggs', 512))
            ...         params.add(engine.Param('ham_type', 'Parma Ham'))
            ...         return params
            >>> my_model = MyModel()
            >>> my_model.build()
            512 eggs
            and Parma Ham

        Notice that all parameters must be serialisable for the entire model
        to be serialisable. Therefore, it's strongly recommended to use python
        native data types to store parameters.

        :return: model parameters

        """
        params = engine.ParamTable()
        params.add(
            engine.Param(name='model_class',
                         value=cls,
                         desc="Model class. Used internally for save/load. "
                         "Changing this may cause unexpected behaviors."))
        params.add(
            engine.Param(
                name='input_shapes',
                desc="Dependent on the model and data. Should be set manually."
            ))
        params.add(
            engine.Param(
                name='task',
                desc="Decides model output shape, loss, and metrics."))
        params.add(engine.Param(
            name='optimizer',
            value='adam',
        ))
        if with_embedding:
            params.add(
                engine.Param(
                    name='with_embedding',
                    value=True,
                    desc="A flag used help `auto` module. Shouldn't be changed."
                ))
            params.add(
                engine.Param(
                    name='embedding_input_dim',
                    desc=
                    'Usually equals vocab size + 1. Should be set manually.'))
            params.add(
                engine.Param(name='embedding_output_dim',
                             desc='Should be set manually.'))
            params.add(
                engine.Param(name='embedding_trainable',
                             value=True,
                             desc='`True` to enable embedding layer training, '
                             '`False` to freeze embedding parameters.'))
        if with_multi_layer_perceptron:
            params.add(
                engine.Param(
                    name='with_multi_layer_perceptron',
                    value=True,
                    desc=
                    "A flag of whether a multiple layer perceptron is used. "
                    "Shouldn't be changed."))
            params.add(
                engine.Param(
                    name='mlp_num_units',
                    value=64,
                    desc="Number of units in first `mlp_num_layers` layers.",
                    hyper_space=hyper_spaces.quniform(8, 256, 8)))
            params.add(
                engine.Param(
                    name='mlp_num_layers',
                    value=3,
                    desc="Number of layers of the multiple layer percetron.",
                    hyper_space=hyper_spaces.quniform(1, 6)))
            params.add(
                engine.Param(
                    name='mlp_num_fan_out',
                    value=32,
                    desc=
                    "Number of units of the layer that connects the multiple "
                    "layer percetron and the output.",
                    hyper_space=hyper_spaces.quniform(4, 128, 4)))
            params.add(
                engine.Param(name='mlp_activation_func',
                             value='relu',
                             desc='Activation function used in the multiple '
                             'layer perceptron.'))
        return params
示例#28
0
                params=[
                    lambda x: x + 2, lambda x: x - 2, lambda x: x * 2,
                    lambda x: x / 2, lambda x: x // 2, lambda x: x**2,
                    lambda x: 2 + x, lambda x: 2 - x, lambda x: 2 * x,
                    lambda x: 2 / x, lambda x: 2 // x, lambda x: 2**x,
                    lambda x: -x
                ])
def op(request):
    return request.param


@pytest.fixture(scope='module',
                params=[
                    hyper_spaces.choice(options=[0, 1]),
                    hyper_spaces.uniform(low=0, high=10),
                    hyper_spaces.quniform(low=0, high=10, q=2)
                ])
def proxy(request):
    return request.param


def test_init(proxy):
    assert isinstance(proxy.convert('label'), hyperopt.pyll.base.Apply)


def test_op(proxy, op):
    assert isinstance(op(proxy).convert('label'), hyperopt.pyll.base.Apply)


def test_str(proxy):
    assert isinstance(str(proxy), str)