Пример #1
0
    def model_fn(self, features, labels, mode, params, config):

        blocks = tf.layers.flatten(features["blocks"])

        incoming = tf.layers.flatten(features["incoming"])

        concat = tf.concat([blocks, incoming], axis=1)

        unnormed = parse_layers(inputs=concat,
                                layers=params["layers"],
                                mode=mode,
                                default_summaries=params["default_summaries"])

        normed = tf.nn.l2_normalize(unnormed, dim=1)

        # ================================================================
        if mode == tf.estimator.ModeKeys.PREDICT:
            return tf.estimator.EstimatorSpec(
                mode=mode,
                predictions={"directions": normed},
                export_outputs={
                    DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                    PredictOutput({"directions": normed})
                })
        # ================================================================
        loss = -tf.multiply(normed, labels)
        loss = tf.reduce_sum(loss, axis=1)
        loss = tf.reduce_mean(loss)

        optimizer = params["optimizer_class"](
            learning_rate=params["learning_rate"])

        gradients, variables = zip(*optimizer.compute_gradients(loss))

        gradient_global_norm = tf.global_norm(gradients, name="global_norm")

        if "gradient_max_norm" in params:
            gradients, _ = tf.clip_by_global_norm(
                gradients,
                params["gradient_max_norm"],
                use_norm=gradient_global_norm)

        train_op = optimizer.apply_gradients(
            grads_and_vars=zip(gradients, variables),
            global_step=tf.train.get_global_step())
        # ================================================================

        training_hooks = parse_hooks(params, locals(), self.save_path)
        # ================================================================
        if (mode == tf.estimator.ModeKeys.TRAIN
                or mode == tf.estimator.ModeKeys.EVAL):  # noqa: E129
            return tf.estimator.EstimatorSpec(mode=mode,
                                              loss=loss,
                                              train_op=train_op,
                                              training_hooks=training_hooks)
Пример #2
0
    def model_fn(self, features, labels, mode, params, config):

        blocks = tf.layers.flatten(features["blocks"])

        incoming = tf.layers.flatten(features["incoming"])

        concat = tf.concat([blocks, incoming], axis=1)

        unnormed = parse_layers(inputs=concat,
                                layers=params["layers"],
                                mode=mode,
                                default_summaries=params["default_summaries"])

        normed = tf.nn.l2_normalize(unnormed, dim=1)

        predictions = normed
        # ================================================================
        if mode == tf.estimator.ModeKeys.PREDICT:
            return tf.estimator.EstimatorSpec(
                mode=mode,
                predictions={"predictions": predictions},
                export_outputs={
                    DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                    PredictOutput({"predictions": predictions})
                })
        # ================================================================
        loss = -tf.multiply(normed, labels)
        loss = tf.reduce_sum(loss, axis=1)
        loss = tf.reduce_mean(loss)

        optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=params["learning_rate"])

        train_op = optimizer.minimize(loss=loss,
                                      global_step=tf.train.get_global_step())
        # ================================================================
        if "hooks" in params:
            training_hooks = parse_hooks(params["hooks"], locals(),
                                         self.save_path)
        else:
            training_hooks = []
        # ================================================================
        if (mode == tf.estimator.ModeKeys.TRAIN
                or mode == tf.estimator.ModeKeys.EVAL):  # noqa: E129
            return tf.estimator.EstimatorSpec(mode=mode,
                                              loss=loss,
                                              train_op=train_op,
                                              training_hooks=training_hooks)
Пример #3
0
    def model_fn(self, features, labels, mode, params, config):

        blocks = tf.layers.flatten(features["blocks"])

        incoming = tf.layers.flatten(features["incoming"])

        concat = tf.concat([blocks, incoming], axis=1)

        last_layer = parse_layers(
            inputs=concat,
            layers=params["layers"],
            mode=mode,
            default_summaries=params["default_summaries"])

        # After the last layer specified, add just 1 weight layer to the mean
        # vectors and one to the concentration values.

        # TODO: Make this modular. Parameters should be passed in the config
        # file.
        key = "dense"
        mu_params = {'activation': tf.nn.relu, 'units': 512}
        with var_scope("last_mean", values=(unnormed, )) as scope:
            mu_out = getattr(tf.layers, key)(inputs, **mu_params, name=scope)

        k_params = {'activation': tf.nn.relu, 'units': 512}
        with var_scope("last_k", values=(unnormed, )) as scope:
            k_out = getattr(tf.layers, key)(inputs, **k_params, name=scope)

        if default_summaries is not None:
            for summary in default_summaries:
                summary["sum_op"](name, inputs)

        # Normalize the mean vectors
        mu_normed = tf.nn.l2_normalize(mu_out, dim=1)

        # TODO: How to pass out more predictions than 1? Dictionary??
        # In base.ProbabilisticTracker the implementation already includes
        # a dictionary with 'mean' and 'concentration' keys. Sticking to that
        # for the moment.
        predictions = {
            'mean': mu_normed,  # Complying with base.ProbabilisticTracker
            'concentration': k_out  # Complying with base.ProbabilisticTracker
        }
        # ================================================================
        if mode == tf.estimator.ModeKeys.PREDICT:
            return tf.estimator.EstimatorSpec(
                mode=mode,
                predictions={"predictions": predictions},
                export_outputs={
                    DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                    PredictOutput({"predictions": predictions})
                })
        # ================================================================

        # TODO: Introduce temperature parameter T in the config file.
        cur_T = 1
        loss = self.max_entropy_loss(y=labels, mu=mu_normed, k=k_out, T=cur_T)

        optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=params["learning_rate"])

        train_op = optimizer.minimize(loss=loss,
                                      global_step=tf.train.get_global_step())
        # ================================================================
        if "hooks" in params:
            training_hooks = parse_hooks(params["hooks"], locals(),
                                         self.save_path)
        else:
            training_hooks = []
        # ================================================================
        if (mode == tf.estimator.ModeKeys.TRAIN
                or mode == tf.estimator.ModeKeys.EVAL):  # noqa: E129
            return tf.estimator.EstimatorSpec(mode=mode,
                                              loss=loss,
                                              train_op=train_op,
                                              training_hooks=training_hooks)
Пример #4
0
    def model_fn(self, features, labels, mode, params, config):

        blocks = tf.layers.flatten(features["blocks"])

        incoming = tf.layers.flatten(features["incoming"])

        concat = tf.concat([blocks, incoming], axis=1)

        shared_layers = parse_layers(
            inputs=concat,
            layers=params["shared_layers"],
            mode=mode,
            default_summaries=params["default_summaries"])

        mu_out = parse_layers(inputs=shared_layers,
                              layers=params["mu_head"],
                              mode=mode,
                              default_summaries=params["default_summaries"])

        k_out = parse_layers(inputs=shared_layers,
                             layers=params["k_head"],
                             mode=mode,
                             default_summaries=params["default_summaries"])

        # Normalize the mean vectors
        mu_normed = tf.nn.l2_normalize(mu_out, dim=1)

        predictions = {'mean': mu_normed, 'concentration': k_out}
        # ================================================================
        if mode == tf.estimator.ModeKeys.PREDICT:
            return tf.estimator.EstimatorSpec(
                mode=mode,
                predictions=predictions,
                export_outputs={
                    DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                    PredictOutput(predictions)
                })
        # ================================================================

        dot_products = tf.reduce_sum(tf.multiply(mu_normed, labels), axis=1)

        W = self.W_stabilized(k_out, 10**-12)

        H = self.H_stabilized(k_out, 10**-12)

        cost = -tf.multiply(W, dot_products)

        T_H = -get_rate(params["temp"]) * H

        loss = cost + T_H

        loss = tf.reduce_mean(loss)

        optimizer = params["optimizer_class"](
            learning_rate=get_rate(params["learning_rate"]))

        gradients, variables = zip(*optimizer.compute_gradients(loss))

        gradient_global_norm = tf.global_norm(gradients, name="global_norm")

        if "gradient_max_norm" in params:
            gradients, _ = tf.clip_by_global_norm(
                gradients,
                params["gradient_max_norm"],
                use_norm=gradient_global_norm)

        train_op = optimizer.apply_gradients(
            grads_and_vars=zip(gradients, variables),
            global_step=tf.train.get_global_step())

        # ================================================================

        training_hooks = parse_hooks(params, locals(), self.save_path)

        # ================================================================
        if (mode == tf.estimator.ModeKeys.TRAIN
                or mode == tf.estimator.ModeKeys.EVAL):  # noqa: E129
            return tf.estimator.EstimatorSpec(mode=mode,
                                              loss=loss,
                                              train_op=train_op,
                                              training_hooks=training_hooks)