Ejemplo n.º 1
0
    def model_fn(self, features, labels, mode, params):
        super().model_fn(features, labels, mode, params)

        linear_logit = linear_layer(features, **params['training'])
        embedding_outputs = embedding_layer(features, **params['training'])
        fm_logit = fm_layer(embedding_outputs, **params['training'])

        with tf.variable_scope("FM_out"):
            logit = linear_logit + fm_logit

        return get_estimator_spec(logit, labels, mode, params)
Ejemplo n.º 2
0
    def model_fn(self, features, labels, mode, params):
        super().model_fn(features, labels, mode, params)

        linear_logit = linear_layer(features, **params["training"])
        embedding_outputs = embedding_layer(features, **params["training"])
        deep_inputs = bilinear_layer(embedding_outputs, **params["training"])
        deep_logit = dnn_layer(deep_inputs, mode, **params["training"])

        with tf.variable_scope("NFM_out"):
            logit = linear_logit + deep_logit

        return get_estimator_spec(logit, labels, mode, params)
Ejemplo n.º 3
0
    def model_fn(self, features, labels, mode, params):
        super().model_fn(features, labels, mode, params)

        if len(params['training']['conv_kernel_width']) != len(
                params['training']['conv_filters']):
            raise ValueError(
                "conv_kernel_width must have same element with conv_filters")

        linear_logit = linear_layer(features, **params['training'])
        embedding_outputs = embedding_layer(features, **params['training'])
        conv_filters = params['training']['conv_filters']
        conv_kernel_width = params['training']['conv_kernel_width']

        n = params['training']['embedding_size']
        conv_filters_len = len(conv_filters)
        conv_input = tf.concat(embedding_outputs, axis=1)

        pooling_result = tf.keras.layers.Lambda(
            lambda x: tf.expand_dims(x, axis=3))(conv_input)

        for i in range(1, conv_filters_len + 1):
            filters = conv_filters[i - 1]
            width = conv_kernel_width[i - 1]
            p = pow(i / conv_filters_len, conv_filters_len - i)
            k = max(1, int((1 - p) * n)) if i < conv_filters_len else 3

            conv_result = tf.keras.layers.Conv2D(
                filters=filters,
                kernel_size=(width, 1),
                strides=(1, 1),
                padding='same',
                activation='tanh',
                use_bias=True,
            )(pooling_result)

            pooling_result = KMaxPooling(k=min(k, int(conv_result.shape[1])),
                                         axis=1)(conv_result)

        flatten_result = tf.keras.layers.Flatten()(pooling_result)
        deep_logit = dnn_layer(flatten_result, mode, **params['training'])

        with tf.variable_scope("CCPM_out"):
            logit = linear_logit + deep_logit

        return get_estimator_spec(logit, labels, mode, params)
Ejemplo n.º 4
0
    def model_fn(self, features, labels, mode, params):
        super().model_fn(features, labels, mode, params)

        linear_logit = linear_layer(features, **params["training"])

        embedding_outputs = embedding_layer(features, **params["training"])
        fm_logit = fm_layer(embedding_outputs, **params["training"])

        field_size = params["training"]["field_size"]
        embedding_size = params["training"]["embedding_size"]
        deep_inputs = tf.reshape(embedding_outputs,
                                 shape=[-1, field_size * embedding_size])
        deep_logit = dnn_layer(deep_inputs, mode, **params["training"])

        with tf.variable_scope("DeepFM_out"):
            logit = linear_logit + fm_logit + deep_logit

        return get_estimator_spec(logit, labels, mode, params)