Esempio n. 1
0
def encoder(emb_sentence, params, name='encoded'):
    """
	@param emb_sentence:
	@param params:
	@return:
	"""
    conv_num_features = utils.get_dict_value(params, 'conv_num_features')
    conv_widths = utils.get_dict_value(params, 'conv_widths')
    conv_keep_probs = utils.get_dict_value(params, 'conv_keep_probs')
    mlp_config = utils.get_dict_value(params, 'mlp_config')
    bipass_conv = utils.get_dict_value(params, 'bipass_conv')
    mlp_activations = utils.get_dict_value(params, 'mlp_activations')
    mlp_dropout_keep_probs = utils.get_dict_value(params, 'mlp_keep_probs')
    use_no_conv_path = utils.get_dict_value(params, 'use_no_conv_path')

    weight_wd_regularization = utils.get_dict_value(
        params, 'weight_wd_regularization', 0.0)
    bias_wd_regularization = utils.get_dict_value(params,
                                                  'bias_wd_regularization',
                                                  0.0)

    if bipass_conv:
        conv_group = [emb_sentence]
    else:
        if use_no_conv_path:
            conv_group = [emb_sentence]
        else:
            conv_group = []
        for i, (conv_num_feature,
                conv_width) in enumerate(zip(conv_num_features, conv_widths)):
            conv_out = nlp.conv1d_array(emb_sentence,
                                        conv_num_feature,
                                        conv_width,
                                        name='conv%s' % (str(i)),
                                        w_wds=weight_wd_regularization,
                                        b_wds=bias_wd_regularization,
                                        keep_probs=conv_keep_probs)
            conv_group.append(conv_out)
    conv_out, _ = misc.concat(conv_group)
    mlp_out, _ = mlp.fully_connected_network(
        conv_out,
        mlp_config,
        layer_activations=mlp_activations,
        dropout_keep_probs=mlp_dropout_keep_probs)
    return [tf.identity(mlp_out[0], name=name)], {}
Esempio n. 2
0
def sentence_encoder(emb_sentence, params, name='encoded_sentence'):
    """
	@param emb_sentence:
	@param params:
	@return:
	"""
    conv_num_features = utils.get_dict_value(params, 'conv_num_features',
                                             [[100, 100, 100], [100]])
    conv_widths = utils.get_dict_value(params, 'conv_widths', [[2, 3, 4], [3]])
    conv_keep_probs = utils.get_dict_value(params, 'conv_keep_probs', 0.5)
    mlp_config = utils.get_dict_value(params, 'mlp_config', [512])
    bipass_conv = utils.get_dict_value(params, 'bipass_conv', False)
    mlp_activations = utils.get_dict_value(params, 'mlp_activations',
                                           'sigmoid')
    mlp_dropout_keep_probs = utils.get_dict_value(params, 'mlp_keep_probs',
                                                  0.9)
    use_no_conv_path = utils.get_dict_value(params, 'use_no_conv_path', False)
    if bipass_conv:
        conv_group = [emb_sentence]
    else:
        if use_no_conv_path:
            conv_group = [emb_sentence]
        else:
            conv_group = []
        for i, (conv_num_feature,
                conv_width) in enumerate(zip(conv_num_features, conv_widths)):
            conv_out = nlp.conv1d_array(emb_sentence,
                                        conv_num_feature,
                                        conv_width,
                                        name='conv%s' % (str(i)),
                                        w_wds=0.000,
                                        b_wds=0.000,
                                        keep_probs=conv_keep_probs)
            conv_group.append(conv_out)
    conv_out, _ = misc.concat(conv_group)
    mlp_out, _ = mlp.fully_connected_network(
        conv_out,
        mlp_config,
        layer_activations=mlp_activations,
        dropout_keep_probs=mlp_dropout_keep_probs)
    return [tf.identity(mlp_out[0], name=name)], {}