Example #1
0
    def model_fn(self, features, labels, mode, params):
        super().model_fn(features, labels, mode, params)

        linear_logit = linear_layer(features, **params['training'])
        embedding_outputs = embedding_layer(features, **params['training'])
        fm_logit = fm_layer(embedding_outputs, **params['training'])

        with tf.variable_scope("FM_out"):
            logit = linear_logit + fm_logit

        return get_estimator_spec(logit, labels, mode, params)
Example #2
0
    def model_fn(self, features, labels, mode, params):
        super().model_fn(features, labels, mode, params)

        linear_logit = linear_layer(features, **params['training'])
        deep_inputs = bilinear_layer(features, **params['training'])
        deep_logit = dnn_layer(deep_inputs, mode, **params['training'])

        with tf.variable_scope("NFM_out"):
            logit = linear_logit + deep_logit

        return get_estimator_spec(logit, labels, mode, params)
Example #3
0
    def model_fn(self, features, labels, mode, params):
        super().model_fn(features, labels, mode, params)

        if len(params['training']['conv_kernel_width']) != len(
                params['training']['conv_filters']):
            raise ValueError(
                "conv_kernel_width must have same element with conv_filters")

        linear_logit = linear_layer(features, **params['training'])
        embedding_outputs = embedding_layer(features, **params['training'])
        conv_filters = params['training']['conv_filters']
        conv_kernel_width = params['training']['conv_kernel_width']

        n = params['training']['embedding_size']
        conv_filters_len = len(conv_filters)
        conv_input = tf.concat(embedding_outputs, axis=1)

        pooling_result = tf.keras.layers.Lambda(
            lambda x: tf.expand_dims(x, axis=3))(conv_input)

        for i in range(1, conv_filters_len + 1):
            filters = conv_filters[i - 1]
            width = conv_kernel_width[i - 1]
            p = pow(i / conv_filters_len, conv_filters_len - i)
            k = max(1, int((1 - p) * n)) if i < conv_filters_len else 3

            conv_result = tf.keras.layers.Conv2D(
                filters=filters,
                kernel_size=(width, 1),
                strides=(1, 1),
                padding='same',
                activation='tanh',
                use_bias=True,
            )(pooling_result)

            pooling_result = KMaxPooling(k=min(k, int(conv_result.shape[1])),
                                         axis=1)(conv_result)

        flatten_result = tf.keras.layers.Flatten()(pooling_result)
        deep_logit = dnn_layer(flatten_result, mode, **params['training'])

        with tf.variable_scope("CCPM_out"):
            logit = linear_logit + deep_logit

        return get_estimator_spec(logit, labels, mode, params)
Example #4
0
    def model_fn(self, features, labels, mode, params):
        super().model_fn(features, labels, mode, params)

        linear_logit = linear_layer(features, **params["training"])

        embedding_outputs = embedding_layer(features, **params["training"])
        fm_logit = fm_layer(embedding_outputs, **params["training"])

        field_size = params["training"]["field_size"]
        embedding_size = params["training"]["embedding_size"]
        deep_inputs = tf.reshape(embedding_outputs,
                                 shape=[-1, field_size * embedding_size])
        deep_logit = dnn_layer(deep_inputs, mode, **params["training"])

        with tf.variable_scope("DeepFM_out"):
            logit = linear_logit + fm_logit + deep_logit

        return get_estimator_spec(logit, labels, mode, params)
Example #5
0
    def model_fn(self, features, labels, mode, params):
        field_size = params["training"]["field_size"]
        feature_size = params["training"]["feature_size"]
        embedding_size = params["training"]["embedding_size"]
        seed = params["training"]["seed"]

        np.random.seed(seed)
        tf.set_random_seed(seed)

        fm_bias = tf.get_variable(name='fm_bias',
                                  shape=[1],
                                  initializer=tf.constant_initializer(0.0))
        fm_weight = tf.get_variable(name='fm_weight',
                                    shape=[feature_size],
                                    initializer=tf.glorot_normal_initializer())
        fm_vector = tf.get_variable(name='fm_vector',
                                    shape=[feature_size, embedding_size],
                                    initializer=tf.glorot_normal_initializer())

        with tf.variable_scope("Feature"):
            feat_ids = features['feat_ids']
            feat_ids = tf.reshape(feat_ids, shape=[-1, field_size])
            feat_vals = features['feat_vals']
            feat_vals = tf.reshape(feat_vals, shape=[-1, field_size])

        with tf.variable_scope("First_order"):
            feat_weights = tf.nn.embedding_lookup(fm_weight, feat_ids)
            y_w = tf.reduce_sum(tf.multiply(feat_weights, feat_vals), 1)

        with tf.variable_scope("Second_order"):
            embeddings = tf.nn.embedding_lookup(fm_vector, feat_ids)
            feat_vals = tf.reshape(feat_vals, shape=[-1, field_size, 1])
            embeddings = tf.multiply(embeddings, feat_vals)
            sum_square = tf.square(tf.reduce_sum(embeddings, 1))
            square_sum = tf.reduce_sum(tf.square(embeddings), 1)
            y_v = 0.5 * tf.reduce_sum(tf.subtract(sum_square, square_sum), 1)

        y = fm_bias + y_w + y_v

        return get_estimator_spec(y, labels, mode, params)
Example #6
0
    def model_fn(self, features, labels, mode, params):
        field_size = params["training"]["field_size"]
        feature_size = params["training"]["feature_size"]
        embedding_size = params["training"]["embedding_size"]
        l2_reg = params["training"]["l2_reg"]
        batch_norm = params["training"]["batch_norm"]
        batch_norm_decay = params["training"]["batch_norm_decay"]
        seed = params["training"]["seed"]
        layers = params["training"]["deep_layers"]
        dropout = params["training"]["dropout"]

        np.random.seed(seed)
        tf.set_random_seed(seed)

        fm_bias = tf.get_variable(name='fm_bias', shape=[1],
                                  initializer=tf.constant_initializer(0.0))
        fm_weight = tf.get_variable(name='fm_weight', shape=[feature_size],
                                    initializer=tf.glorot_normal_initializer())
        fm_vector = tf.get_variable(name='fm_vector', shape=[feature_size, embedding_size],
                                    initializer=tf.glorot_normal_initializer())

        with tf.variable_scope("Feature"):
            feat_ids = features['feat_ids']
            feat_ids = tf.reshape(feat_ids, shape=[-1, field_size])
            feat_vals = features['feat_vals']
            feat_vals = tf.reshape(feat_vals, shape=[-1, field_size])

        with tf.variable_scope("First_order"):
            feat_weights = tf.nn.embedding_lookup(fm_weight, feat_ids)
            y_w = tf.reduce_sum(tf.multiply(feat_weights, feat_vals), 1)

        with tf.variable_scope("Second_order"):
            embeddings = tf.nn.embedding_lookup(fm_vector, feat_ids)
            feat_vals = tf.reshape(feat_vals, shape=[-1, field_size, 1])
            embeddings = tf.multiply(embeddings, feat_vals)
            sum_square = tf.square(tf.reduce_sum(embeddings, 1))
            square_sum = tf.reduce_sum(tf.square(embeddings), 1)
            y_v = 0.5 * tf.reduce_sum(tf.subtract(sum_square, square_sum), 1)

        with tf.variable_scope("Deep-part"):
            if batch_norm:
                if mode == tf.estimator.ModeKeys.TRAIN:
                    train_phase = True
                else:
                    train_phase = False

            deep_inputs = tf.reshape(embeddings, shape=[-1, field_size * embedding_size])
            for i in range(len(layers)):
                deep_inputs = tf.contrib.layers.fully_connected(
                    inputs=deep_inputs, num_outputs=layers[i],
                    weights_regularizer=tf.contrib.layers.l2_regularizer(l2_reg),
                    scope='mlp%d' % i)
                if batch_norm:
                    deep_inputs = batch_norm_layer(
                        deep_inputs, train_phase=train_phase,
                        scope_bn='bn_%d' % i, batch_norm_decay=batch_norm_decay)
                if mode == tf.estimator.ModeKeys.TRAIN:
                    deep_inputs = tf.nn.dropout(deep_inputs, keep_prob=dropout[i])

            y_deep = tf.contrib.layers.fully_connected(
                inputs=deep_inputs, num_outputs=1, activation_fn=tf.identity,
                weights_regularizer=tf.contrib.layers.l2_regularizer(l2_reg),
                scope='deep_out')
            y_d = tf.reshape(y_deep, shape=[-1])

        with tf.variable_scope("DeepFM-out"):
            y_bias = fm_bias * tf.ones_like(y_d, dtype=tf.float32)
            logit = y_bias + y_w + y_v + y_d

        return get_estimator_spec(logit, labels, mode, params)