Ejemplo n.º 1
0
    def model_fn(features_map, targets, mode):

        if mode == tf.contrib.learn.ModeKeys.TRAIN:
            predictions, loss = model_impl(hparams, mode, features_map,
                                           targets)

            if hparams.l1_reg > 0. or hparams.l2_reg > 0.:
                # apply regularization
                with tf.variable_scope("reg") as vs:
                    all_regularize = []
                    if hparams.l1_reg > 0.:
                        all_regularize.append(
                            tf.contrib.layers.l1_regularizer(hparams.l1_reg))
                    if hparams.l2_reg > 0.:
                        all_regularize.append(
                            tf.contrib.layers.l2_regularizer(hparams.l2_reg))

                    regularizer = fu.sum_regularizer(all_regularize, scope=vs)
                    regularization_penalty = tf.contrib.layers.apply_regularization(
                        regularizer)

                    loss += regularization_penalty

            train_op = create_train_op(loss, hparams)

            return model_fn_lib.ModelFnOps(mode=mode,
                                           predictions={
                                               'predictions': predictions,
                                               'features':
                                               features_map['features'],
                                               'targets': targets
                                           },
                                           loss=loss,
                                           train_op=train_op)

        if mode == tf.contrib.learn.ModeKeys.INFER:
            predictions, loss = model_impl(hparams, mode, features_map, None)

            return model_fn_lib.ModelFnOps(mode=mode,
                                           predictions={
                                               'predictions': predictions,
                                               'features':
                                               features_map['features'],
                                               'targets':
                                               features_map['targets']
                                           })

        if mode == tf.contrib.learn.ModeKeys.EVAL:
            predictions, loss = model_impl(hparams, mode, features_map,
                                           targets)

            return model_fn_lib.ModelFnOps(mode=mode,
                                           predictions={
                                               'predictions': predictions,
                                               'features':
                                               features_map['features'],
                                               'targets': targets
                                           },
                                           loss=loss)
Ejemplo n.º 2
0
def model_fn(features, targets, mode, params):
    # Features are board[81] + liberty[81] + group[81] + valid_move[81] + last_move[1] + ko[2].
    # Note that turn and ko info are set to valid_move map.
    board_features, _, _ = tf.split(features, [81 * 4, 1, 2], axis=1)
    board = tf.reshape(board_features, [-1, 9, 9, 4])
    # No relu, input includes negative. 4x25x64 = 6400
    conv_in = tf.layers.conv2d(inputs=board,
                               filters=64,
                               kernel_size=[5, 5],
                               padding="same")
    conv1 = tf.layers.conv2d(inputs=conv_in,
                             filters=16,
                             kernel_size=[3, 3],
                             padding="same")  # 64*9*16 = 9216
    conv2 = tf.layers.conv2d(inputs=conv1,
                             filters=16,
                             kernel_size=[3, 3],
                             padding="same")  # 16*9*16 = 2304
    conv_out = tf.layers.conv2d(inputs=conv2,
                                filters=1,
                                kernel_size=[1, 1],
                                padding="same")  # To reduce size
    # Flattens conv2d output and attaches last_move info.
    conv_flat = tf.reshape(conv_out, [-1, 9 * 9 * 1])

    # Dense layer and output.
    # 81*64 = 5184
    dense = tf.layers.dense(inputs=conv_flat, units=64)
    output_layer = tf.contrib.layers.linear(dense, 1)
    predictions = tf.reshape(output_layer, [-1])

    # For predict mode.
    if targets == None:
        return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions)

    # For modes with targets.
    loss = tf.losses.mean_squared_error(targets, predictions)

    eval_metric_ops = {
        "rmse":
        tf.metrics.root_mean_squared_error(tf.cast(targets, tf.float32),
                                           predictions)
    }

    train_op = tf.contrib.layers.optimize_loss(
        loss=loss,
        global_step=tf.contrib.framework.get_global_step(),
        learning_rate=params["learning_rate"],
        optimizer="SGD")

    return model_fn_lib.ModelFnOps(mode=mode,
                                   predictions=predictions,
                                   loss=loss,
                                   train_op=train_op,
                                   eval_metric_ops=eval_metric_ops)
Ejemplo n.º 3
0
        def model_fn(features, labels, params, mode):
            #get inputs, targets from features, labels
            source_tokens = tf.placeholder_with_default(
                features["source_tokens"],
                shape=[None, None],
                name='sentence_inputs')
            source_tokens_length = tf.placeholder_with_default(
                features["source_len"],
                shape=[None],
                name='source_tokens_length')

            if labels != None:
                target_tokens = tf.placeholder_with_default(
                    labels["target_tokens"],
                    shape=[None, None],
                    name='targets')

                target_tokens_length = tf.placeholder_with_default(
                    labels["target_len"],
                    shape=[None],
                    name='targets_token_length')
            else:
                target_tokens = None
                target_tokens_length = None

            # add placeholders
            self._add_placeholders(source_tokens, source_tokens_length,
                                   target_tokens, target_tokens_length, mode)

            # set input_ids, target_ids, max_target_len
            self._set_variables(mode)

            # add an encoder to computation graph
            self._add_encoder()

            # add a decoder
            self._add_decoder(mode)

            if mode == tf.contrib.learn.ModeKeys.TRAIN:  # train
                return model_fn_lib.ModelFnOps(
                    mode=mode,
                    predictions={'predictions': self.predictions},
                    loss=self.loss,
                    train_op=self.train_op)
            else:
                return model_fn_lib.ModelFnOps(
                    mode=mode,
                    predictions={
                        'predictions':
                        self.target_id_to_vocab.lookup(
                            tf.to_int64(self.predictions)),
                        'source_tokens':
                        self.source_tokens
                    },
                    loss=self.loss)
Ejemplo n.º 4
0
    def _model_fn(features, labels, mode):
        """Creates the prediction and its loss.

    Args:
      features: A dictionary of tensors keyed by the feature name.
      labels: A tensor representing the labels.
      mode: The execution mode, as defined in tf.contrib.learn.ModeKeys.

    Returns:
      A tuple consisting of the prediction, loss, and train_op.
    """
        # Generate one embedding per sparse feature column and concatenate them.
        concat_embeddings = tf.contrib.layers.input_from_feature_columns(
            columns_to_tensors=features,
            feature_columns=_get_feature_columns(include_target_column=False))

        # Add one hidden layer.
        hidden_layer_0 = tf.contrib.layers.relu(concat_embeddings,
                                                FLAGS.hidden_units)

        # Output and logistic loss.
        logits = tf.contrib.layers.linear(hidden_layer_0, FLAGS.num_classes)

        predictions = tf.contrib.layers.softmax(logits)
        if mode == tf.contrib.learn.ModeKeys.INFER:
            predictions = {
                tf.contrib.learn.PredictionKey.PROBABILITIES: predictions,
                PREDICTION_KEY: features[PREDICTION_KEY]
            }
            output_alternatives = {
                DEFAULT_OUTPUT_ALTERNATIVE:
                (tf.contrib.learn.ProblemType.UNSPECIFIED, predictions)
            }
            return model_fn.ModelFnOps(mode=mode,
                                       predictions=predictions,
                                       output_alternatives=output_alternatives)

        target_one_hot = tf.one_hot(labels, FLAGS.num_classes)
        target_one_hot = tf.reduce_sum(input_tensor=target_one_hot,
                                       reduction_indices=[1])
        loss = tf.losses.softmax_cross_entropy(target_one_hot, logits)
        if mode == tf.contrib.learn.ModeKeys.EVAL:
            return predictions, loss, None

        opt = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
        train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=FLAGS.learning_rate,
            optimizer=opt)
        return model_fn.ModelFnOps(mode=mode,
                                   predictions=predictions,
                                   loss=loss,
                                   train_op=train_op)
Ejemplo n.º 5
0
 def _model_fn(self, features, labels, mode):
     x_reshaped = tf.reshape(features, [-1, 28, 28, 1])
     h1 = tf.layers.conv2d(x_reshaped,
                           32, [5, 5],
                           padding="same",
                           activation=tf.nn.relu)
     h2 = tf.layers.max_pooling2d(h1, 2, 2, padding="same")
     h3 = tf.layers.conv2d(h2,
                           64, [5, 5],
                           padding="same",
                           activation=tf.nn.relu)
     h4 = tf.layers.max_pooling2d(h3, 2, 2, padding="same")
     h5 = tf.layers.dense(tf.reshape(h4, [-1, 7 * 7 * 64]),
                          1024,
                          activation=tf.nn.relu)
     h6 = tf.layers.dropout(h5,
                            rate=0.3,
                            training=(mode == learn.ModeKeys.TRAIN))
     ypred = tf.layers.dense(h6, 10)
     predictions = {
         "classes": tf.argmax(ypred, axis=1),
         "probabilities": tf.nn.softmax(ypred, dim=1),
         "logits": ypred
     }
     loss = tf.reduce_mean(
         tf.nn.softmax_cross_entropy_with_logits(logits=ypred,
                                                 labels=tf.one_hot(
                                                     labels, depth=10)))
     train_op = None
     if mode == learn.ModeKeys.TRAIN:
         train_op = tf.train.AdamOptimizer(1e-4).minimize(loss)
     return model_fn_lib.ModelFnOps(predictions=predictions,
                                    loss=loss,
                                    train_op=train_op,
                                    mode=mode)
Ejemplo n.º 6
0
def gmm_cluster_model_fn(features, labels, mode, params, config=None):
    """Model function."""
    assert labels is None, labels

    update_params = ''
    for i in ["w", "m", 'c']:
        if i in params["update_params"]:
            update_params += i
    (all_scores,
     model_predictions,
     losses, training_op) = gmm_ops.gmm(parse_tensor_or_dict(features),
                                        "random",
                                        params["num_clusters"],
                                        params["random_seed"],
                                        params["covariance_type"],
                                        update_params,
                                        )
    incr_step = state_ops.assign_add(variables.get_global_step(), 1)
    loss = math_ops.reduce_sum(losses)
    training_op = with_dependencies([training_op, incr_step], loss)
    predictions = {
        'all_scores': all_scores[0],
        'assignments': model_predictions[0][0],
    }
    eval_metric_ops = {
        'scores': streaming_sum(loss),
    }
    return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
                                   eval_metric_ops=eval_metric_ops,
                                   loss=loss, train_op=training_op)
Ejemplo n.º 7
0
 def _model_fn(features, labels, mode, config):
   """Model function."""
   assert labels is None, labels
   (loss,
    scores,
    model_predictions,
    training_op,
    init_op,
    is_initialized) = gmm_ops.gmm(self._parse_tensor_or_dict(features),
                                  self._training_initial_clusters,
                                  self._num_clusters, self._random_seed,
                                  self._covariance_type,
                                  self._params)
   incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
   training_op = with_dependencies([training_op, incr_step], loss)
   training_hooks = [_InitializeClustersHook(
       init_op, is_initialized, config.is_chief)]
   predictions = {
       GMM.ASSIGNMENTS: model_predictions[0][0],
   }
   eval_metric_ops = {
       GMM.SCORES: scores,
       GMM.LOG_LIKELIHOOD: _streaming_sum(loss),
   }
   return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
                                  eval_metric_ops=eval_metric_ops,
                                  loss=loss, train_op=training_op,
                                  training_hooks=training_hooks)
Ejemplo n.º 8
0
def model_fn(features, targets, mode, params):
    """Model function for Estimator."""

    first_hidden_layer = tf.contrib.layers.relu(features, 10)
    second_hidden_layer = tf.contrib.layers.relu(first_hidden_layer, 10)
    output_layer = tf.contrib.layers.linear(second_hidden_layer, 1)

    predictions = tf.reshape(output_layer, [-1])
    predictions_dict = {"ages": predictions}

    loss = tf.losses.mean_squared_error(targets, predictions)

    eval_metric_ops = {
        "rmse":
        tf.metrics.root_mean_squared_error(tf.cast(targets, tf.float64),
                                           predictions)
    }

    train_op = tf.contrib.layers.optimize_loss(
        loss=loss,
        global_step=tf.contrib.framework.get_global_step(),
        learning_rate=params["learning_rate"],
        optimizer="SGD")

    return model_fn_lib.ModelFnOps(mode=mode,
                                   predictions=predictions_dict,
                                   loss=loss,
                                   train_op=train_op,
                                   eval_metric_ops=eval_metric_ops)
Ejemplo n.º 9
0
  def test_build_all_signature_defs_legacy_input_fn_not_supported(self):
    """Tests that legacy input_fn returning (features, labels) raises error.

    serving_input_fn must return InputFnOps including a default input
    alternative.
    """
    input_features = constant_op.constant(["10"])
    input_ops = ({"features": input_features}, None)
    input_alternatives, _ = (
        saved_model_export_utils.get_input_alternatives(input_ops))
    output_1 = constant_op.constant(["1"])
    output_2 = constant_op.constant(["2"])
    output_3 = constant_op.constant(["3"])
    provided_output_alternatives = {
        "head-1": (constants.ProblemType.LINEAR_REGRESSION, {
            "some_output_1": output_1
        }),
        "head-2": (constants.ProblemType.CLASSIFICATION, {
            "some_output_2": output_2
        }),
        "head-3": (constants.ProblemType.UNSPECIFIED, {
            "some_output_3": output_3
        }),
    }
    model_fn_ops = model_fn.ModelFnOps(
        model_fn.ModeKeys.INFER,
        predictions={"some_output": constant_op.constant(["4"])},
        output_alternatives=provided_output_alternatives)
    output_alternatives, _ = (saved_model_export_utils.get_output_alternatives(
        model_fn_ops, "head-1"))

    with self.assertRaisesRegexp(
        ValueError, "A default input_alternative must be provided"):
      saved_model_export_utils.build_all_signature_defs(
          input_alternatives, output_alternatives, "head-1")
Ejemplo n.º 10
0
def fnn_model_fn(features, labels, mode):
    print(features)
    print(labels)
    # output_labels = tf.reshape(labels,[-1,1])
    dense = tf.layers.dense(features,
                            units=nhidden,
                            activation=tf.nn.relu,
                            use_bias=True)
    print(dense)
    logits = tf.layers.dense(dense, units=1, use_bias=True)
    print(logits)
    onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=1)
    if mode != learn.ModeKeys.EVAL:
        # loss = tf.losses.sigmoid_cross_entropy(output_labels,logits)
        # loss = tf.losses.mean_squared_error(labels=output_labels,predictions=logits)
        loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels,
                                               logits=logits)
    if mode == learn.ModeKeys.TRAIN:
        train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=learning_rate,
            optimizer="SGD")
    predictions = {
        "classes": tf.round(logits),
        "probabilities": tf.nn.softmax(logits, name="softmax_tensor")
    }
    return model_fn.ModelFnOps(mode=mode,
                               predictions=predictions,
                               loss=loss,
                               train_op=train_op)
Ejemplo n.º 11
0
def model_fn(features, targets, mode, params):
    learning_rate = 0.001
    keep_prob = tf.placeholder(tf.float32)  # dropout (keep probability)

    # Construct model
    pred = cnn_model_tfp(features, params, keep_prob)

    pred_dict = {
        "classes": tf.argmax(input=pred, axis=1),
        "probabilities": tf.nn.softmax(pred, name="softmax_tensor")
    }

    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=targets))
    correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(targets, 1))
    eval_metric_ops = {
        "accuracy": tf.reduce_mean(tf.cast(correct_pred, tf.float32)),
        "model_loss": loss
    }

    train_op = tf.contrib.layers.optimize_loss(
        loss=loss,
        global_step=tf.contrib.framework.get_global_step(),
        learning_rate=learning_rate,
        optimizer="Adam")

    print("Got model fn fxn. params:", params)
    return model_fn_lib.ModelFnOps(mode=mode,
                                   loss=loss,
                                   train_op=train_op,
                                   predictions=pred_dict,
                                   eval_metric_ops=eval_metric_ops)
def _cnn_model_fn(features, labels, mode):
    x = features['data']
    layer1 = tf.layers.dense(x, 10)
    layer2 = tf.layers.dense(layer1, 10)
    layer3 = tf.layers.dense(layer2, 10)
    logits = tf.layers.dense(layer3, 1)

    loss = None
    train_op = None

    # Calculate Loss (for both TRAIN and EVAL modes)
    if mode != learn.ModeKeys.INFER:
        loss = tf.losses.sigmoid_cross_entropy(labels, logits)

    # Configure the Training Op (for TRAIN mode)
    if mode == learn.ModeKeys.TRAIN:
        train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=0.001,
            optimizer="Adam")

    # Generate Predictions
    predictions = {
        "classes": tf.argmax(input=logits, axis=1),
        "probabilities": tf.nn.sigmoid(logits)
    }

    # Return a ModelFnOps object
    return model_fn_lib.ModelFnOps(mode=mode,
                                   loss=loss,
                                   train_op=train_op,
                                   predictions=predictions)
Ejemplo n.º 13
0
def model_fn(features, targets, mode, params):

    logits = Dense(10, input_dim=784)(features["x"])
    
    loss = tf.losses.softmax_cross_entropy(
            onehot_labels=targets, logits=logits)
    
    train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=params["learning_rate"],
            optimizer="SGD")
    
    predictions = {
        "classes": tf.argmax(input=logits, axis=1),
        "probabilities": tf.nn.softmax(logits)
    }
    
    eval_metric_ops = {
        "accuracy": tf.metrics.accuracy(
                     tf.argmax(input=logits, axis=1),
                     tf.argmax(input=targets, axis=1))
    }
     
    return model_fn_lib.ModelFnOps(
        mode=mode,
        predictions=predictions,
        loss=loss,
        train_op=train_op,
        eval_metric_ops=eval_metric_ops)
Ejemplo n.º 14
0
    def head_ops(self,
                 features,
                 labels,
                 mode,
                 train_op_fn,
                 logits=None,
                 logits_input=None):
        """See `_Head`."""
        _check_mode_valid(mode)
        _check_logits_input_not_supported(logits, logits_input)
        predictions = self._predictions(logits)
        if (mode == model_fn.ModeKeys.INFER) or (labels is None):
            loss = None
            train_op = None
            eval_metric_ops = None
        else:
            loss = self._training_loss(features, labels, logits)
            train_op = (None if train_op_fn is None
                        or mode == model_fn.ModeKeys.EVAL else self._train_op(
                            features, labels, train_op_fn, logits))
            eval_metric_ops = self._eval_metric_ops(features, labels, logits)
        signature_fn = self._signature_fn()

        return model_fn.ModelFnOps(mode=mode,
                                   predictions=predictions,
                                   loss=loss,
                                   train_op=train_op,
                                   eval_metric_ops=eval_metric_ops,
                                   signature_fn=signature_fn)
Ejemplo n.º 15
0
def model(features, labels, mode):
    input_layer = tf.reshape(features, [-1, 999])
    input_layer = tf.one_hot(input_layer, 1000)
    layer_1 = layers.fully_connected(inputs=input_layer, num_outputs=4)
    output_layer = layers.fully_connected(inputs=layer_1,
                                          num_outputs=1,
                                          activation_fn=None)
    cross_entropy = None
    optimizer = None
    if mode != learn.ModeKeys.INFER:
        cross_entropy = tf.losses.sigmoid_cross_entropy(
            multi_class_labels=tf.reshape(labels, [-1]),
            logits=tf.reshape(output_layer, [-1]))
    if mode == learn.ModeKeys.TRAIN:
        optimizer = tf.contrib.layers.optimize_loss(
            loss=cross_entropy,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=0.001,
            optimizer="SGD")
    activated = tf.sigmoid(output_layer, name='sigmoid_tensor')
    predictions = {"classes": tf.round(activated), "probabilities": activated}
    return model_fn_lib.ModelFnOps(mode=mode,
                                   predictions=predictions,
                                   loss=cross_entropy,
                                   train_op=optimizer)
    def _lstm_model(features, targets):

        cell = tf.contrib.rnn.DropoutWrapper(
            tf.contrib.rnn.BasicLSTMCell(5, state_is_tuple=True), 0.9)
        stacked_lstm = tf.contrib.rnn.MultiRNNCell([cell for _ in range(1)],
                                                   state_is_tuple=True)
        features = tf.unstack(features, axis=1, num=10)

        output, layers = tf.contrib.rnn.static_rnn(stacked_lstm,
                                                   features,
                                                   dtype=dtypes.float32)
        output = dnn_layers(output[-1], [10, 10])

        prediction, loss = tflearn.models.linear_regression(output, targets)

        train_op = tf.contrib.layers.optimize_loss(
            loss,
            tf.contrib.framework.get_global_step(),
            optimizer='Adagrad',
            learning_rate=params["learning_rate"])
        eval_metric_ops = {
            "rmse":
            tf.metrics.root_mean_squared_error(tf.cast(targets, tf.float32),
                                               prediction)
        }
        predictions_dict = {"classes": prediction}

        return model_fn_lib.ModelFnOps(mode=mode,
                                       predictions=predictions_dict,
                                       loss=loss,
                                       train_op=train_op,
                                       eval_metric_ops=eval_metric_ops)
Ejemplo n.º 17
0
    def test_build_all_signature_defs(self):
        input_features = constant_op.constant(["10"])
        input_example = constant_op.constant(["11"])
        input_ops = input_fn_utils.InputFnOps({"features": input_features},
                                              None,
                                              {"default input": input_example})
        input_alternatives, _ = (
            saved_model_export_utils.get_input_alternatives(input_ops))
        output_1 = constant_op.constant(["1"])
        output_2 = constant_op.constant(["2"])
        output_3 = constant_op.constant(["3"])
        provided_output_alternatives = {
            "head-1": (constants.ProblemType.LINEAR_REGRESSION, {
                "some_output_1": output_1
            }),
            "head-2": (constants.ProblemType.CLASSIFICATION, {
                "some_output_2": output_2
            }),
            "head-3": (constants.ProblemType.UNSPECIFIED, {
                "some_output_3": output_3
            }),
        }
        model_fn_ops = model_fn.ModelFnOps(
            model_fn.ModeKeys.INFER,
            predictions={"some_output": constant_op.constant(["4"])},
            output_alternatives=provided_output_alternatives)
        output_alternatives, _ = (
            saved_model_export_utils.get_output_alternatives(
                model_fn_ops, "head-1"))

        signature_defs = saved_model_export_utils.build_all_signature_defs(
            input_alternatives, output_alternatives, "head-1")

        expected_signature_defs = {
            "serving_default":
            signature_def_utils.regression_signature_def(
                input_example, output_1),
            "default_input_alternative:head-1":
            signature_def_utils.regression_signature_def(
                input_example, output_1),
            "default_input_alternative:head-2":
            signature_def_utils.classification_signature_def(
                input_example, output_2, None),
            "default_input_alternative:head-3":
            signature_def_utils.predict_signature_def({"input": input_example},
                                                      {"output": output_3}),
            # "features_input_alternative:head-1":
            #     signature_def_utils.regression_signature_def(input_features,
            #                                                  output_1),
            # "features_input_alternative:head-2":
            #     signature_def_utils.classification_signature_def(input_features,
            #                                                      output_2, None),
            # "features_input_alternative:head-3":
            #     signature_def_utils.predict_signature_def({
            #         "input": input_features
            #     }, {"output": output_3}),
        }

        self.assertDictEqual(expected_signature_defs, signature_defs)
Ejemplo n.º 18
0
 def _model_fn_scaffold(features, labels, mode):
     _, _ = features, labels
     return model_fn.ModelFnOps(
         mode=mode,
         predictions=tf.constant(0.),
         loss=tf.constant(0.),
         train_op=tf.constant(0.),
         training_scaffold=tf.train.Scaffold(init_fn=_init_fn))
 def _model_fn_scaffold(features, labels, mode):
     _, _ = features, labels
     return model_fn.ModelFnOps(
         mode=mode,
         predictions=constant_op.constant(0.),
         loss=constant_op.constant(0.),
         train_op=constant_op.constant(0.),
         scaffold=monitored_session.Scaffold(init_fn=_init_fn))
Ejemplo n.º 20
0
def cnn_model_fn(features, labels, mode):
    input_layer = tf.reshape(features, [-1, 28, 28, 1])

    conv1 = tf.layers.conv2d(inputs=input_layer,
                             filters=20,
                             kernel_size=[5, 5],
                             padding='valid',
                             activation=tf.nn.relu)

    print conv1.get_shape()
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
    print pool1.get_shape()

    conv2 = tf.layers.conv2d(inputs=pool1,
                             filters=40,
                             kernel_size=[5, 5],
                             padding='valid',
                             activation=tf.nn.relu)
    print conv2.get_shape()
    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
    print pool2.get_shape()

    # Dense Layer
    pool2_flat = tf.reshape(pool2, [-1, 4 * 4 * 40])
    dense = tf.layers.dense(inputs=pool2_flat,
                            units=1024,
                            activation=tf.nn.relu)
    dropout = tf.layers.dropout(inputs=dense,
                                rate=0.5,
                                training=mode == learn.ModeKeys.TRAIN)
    # Logits Layer
    logits = tf.layers.dense(inputs=dropout, units=10)

    loss = None
    train_op = None

    if mode != learn.ModeKeys.INFER:
        onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
        loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels,
                                               logits=logits)

    if mode == learn.ModeKeys.TRAIN:
        train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=0.005,
            optimizer='SGD')

    # Generate Predictions
    predictions = {
        "classes": tf.argmax(input=logits, axis=1),
        "probabilities": tf.nn.softmax(logits, name="softmax_tensor")
    }

    return model_fn_lib.ModelFnOps(mode=mode,
                                   predictions=predictions,
                                   loss=loss,
                                   train_op=train_op)
Ejemplo n.º 21
0
def lenet_model_fn(features, labels, mode):
    # input layer
    input_layer = tf.reshape(features,
                             [-1, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS])

    conv1 = tf.layers.conv2d(inputs=input_layer,
                             filters=32,
                             kernel_size=[5, 5],
                             padding='same',
                             activation=tf.nn.relu)

    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)

    conv2 = tf.layers.conv2d(inputs=pool1,
                             filters=64,
                             kernel_size=[5, 5],
                             padding='same',
                             activation=tf.nn.relu)

    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

    pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])

    dense = tf.layers.dense(inputs=pool2_flat,
                            units=1024,
                            activation=tf.nn.relu)

    dropout = tf.layers.dropout(inputs=dense,
                                rate=0.4,
                                training=(mode == learn.ModeKeys.TRAIN))

    logits = tf.layers.dense(inputs=dropout, units=10)

    loss = None
    train_op = None

    if mode != learn.ModeKeys.INFER:
        onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)

        loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels,
                                               logits=logits)

    if mode == learn.ModeKeys.TRAIN:
        train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=0.001,
            optimizer='SGD')

    predictions = {
        "classes": tf.argmax(input=logits, axis=1),
        "probabilities": tf.nn.softmax(logits, name="softmax_tensor")
    }

    return model_fn_lib.ModelFnOps(mode=mode,
                                   predictions=predictions,
                                   loss=loss,
                                   train_op=train_op)
Ejemplo n.º 22
0
def cnn_model_fn(features, labels, mode, params):
    """Model function for CNN."""

    # Input Layer
    input_layer = tf.reshape(features, [-1, IMG_SIZE, IMG_SIZE, COL_CHANNELS])

    # Convolutional Layer #1
    conv1 = tf.layers.conv2d(inputs=input_layer,
                             filters=32,
                             kernel_size=[5, 5],
                             padding="same",
                             activation=tf.nn.relu,
                             strides=2)

    # Pooling Layer #1
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)

    # Convolutional Layer #2 and Pooling Layer #2
    conv2 = tf.layers.conv2d(inputs=pool1,
                             filters=64,
                             kernel_size=[5, 5],
                             padding="same",
                             activation=tf.nn.relu,
                             strides=2)
    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

    # Dense Layer
    height = int(pool2.get_shape()[1])
    width = int(pool2.get_shape()[2])
    pool2_flat = tf.reshape(pool2, [-1, width * height * 64])
    dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
    dropout = tf.layers.dropout(inputs=dense, rate=params["dropout"], training=mode == learn.ModeKeys.TRAIN)

    # Logits Layer
    logits = tf.layers.dense(inputs=dropout, units=10)

    loss = None
    train_op = None

    # Calculate Loss (for both TRAIN and EVAL modes)
    if mode != learn.ModeKeys.INFER:
        onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
        loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)

    # Configure the Training Op (for TRAIN mode)
    if mode == learn.ModeKeys.TRAIN:
        train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=params["learnrate"],
            optimizer=params["optimizer"])

    # Generate Predictions
    predictions = {"classes": tf.argmax(input=logits, axis=1),
                   "probabilities": tf.nn.softmax(logits, name="softmax_tensor")}

    # Return a ModelFnOps object
    return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions, loss=loss, train_op=train_op)
Ejemplo n.º 23
0
def tof_net_func(x, y, mode):
    # it reverse engineer the kinect
    x_shape = [-1, 384, 512, 9]
    y_shape = [-1, 384, 512, 18]
    l = 2
    lr = 1e-5

    # convert to the default data type
    msks = tf.tile(x[:, :, :, 9::], [1, 1, 1, 2])
    msks = tf.cast(msks, dtype)
    x = tf.cast(x[:, :, :, 0:9], dtype)
    v = dnnOpticalFlow(x)
    # v = v * msks

    #
    loss = None
    train_op = None

    # compute loss (for TRAIN and EVAL modes)
    if mode != learn.ModeKeys.INFER:
        y = tf.cast(y, dtype)
        v_true = y
        loss = (tf.reduce_mean(\
            tf.abs(\
                (v-v_true)\
            )**l)
        )**(1/l)
        # loss1 = tf.reduce_sum((inten0 * inten1))/tf.reduce_sum(tf.sqrt(inten0**2*inten1**2))
        # loss2 = tf.reduce_sum((inten2 * inten1))/tf.reduce_sum(tf.sqrt(inten2**2*inten1**2))
        loss = tf.identity(loss, name="loss")

        # configure the training op (for TRAIN mode)
        if mode == learn.ModeKeys.TRAIN:
            train_op = tf.contrib.layers.optimize_loss(\
                loss=loss,
                global_step=tf.contrib.framework.get_global_step(),
                learning_rate=lr,
                optimizer="Adam"
            )

    # generate predictions
    predictions = {
        "v": v,
    }
    # output intermediate things
    # ms = tf.identity(ms, name='ms')
    # x_tilde = tf.identity(x_tilde, name='x_tilde')
    tensors = []
    for tensor in tensors:
        predictions[tensor.name] = tensor

    # return a ModelFnOps object
    return model_fn_lib.ModelFnOps(
        mode=mode,
        predictions=predictions,
        loss=loss,
        train_op=train_op,
    )
Ejemplo n.º 24
0
def cnn_model_fn(features, labels, mode, params):
    nn = cnn(features, labels, mode, params['rank'], params['is_riemannian'])
    # Return a ModelFnOps object
    model = model_fn_lib.ModelFnOps(mode=mode,
                                    predictions=nn['predictions'],
                                    loss=nn['loss'],
                                    train_op=nn['train_op'])

    return model
Ejemplo n.º 25
0
def model_fn(features, targets, mode, params):
    """Model function for Estimator."""

    # 1. Configure the model via TensorFlow operations
    # First, build all the model, a good idea is using Keras or tf.layers
    # since these are high-level API's
    conv1 = Conv2D(32, (5, 5), activation='relu',
                   input_shape=(28, 28, 1))(features)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(64, (5, 5), activation='relu')(pool1)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    flat = Flatten()(pool2)
    dense = Dense(1024, activation='relu')(flat)

    preds = Dense(10, activation='softmax')(dense)

    # 2. Define the loss function for training/evaluation
    loss = None
    train_op = None

    # Calculate Loss (for both TRAIN and EVAL modes)
    if mode != learn.ModeKeys.INFER:
        loss = tf.losses.softmax_cross_entropy(onehot_labels=targets,
                                               logits=preds)

    # 3. Define the training operation/optimizer

    # Configure the Training Op (for TRAIN mode)
    if mode == learn.ModeKeys.TRAIN:
        train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=params["learning_rate"],
            optimizer="Adam",
        )

    # 4. Generate predictions
    predictions_dict = {
        "classes": tf.argmax(input=preds, axis=1),
        "probabilities": tf.nn.softmax(preds, name="softmax_tensor")
    }

    # 5. Define how you want to evaluate the model
    metrics = {
        "accuracy":
        tf.metrics.accuracy(tf.argmax(input=preds, axis=1),
                            tf.argmax(input=targets, axis=1))
    }

    # 6. Return predictions/loss/train_op/eval_metric_ops in ModelFnOps object
    return model_fn_lib.ModelFnOps(mode=mode,
                                   predictions=predictions_dict,
                                   loss=loss,
                                   train_op=train_op,
                                   eval_metric_ops=metrics)
Ejemplo n.º 26
0
def linear_model_fn_with_model_fn_ops(features, labels, mode):
  """Same as linear_model_fn, but returns `ModelFnOps`."""
  assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
                  model_fn.ModeKeys.INFER)
  prediction, loss = (models.linear_regression_zero_init(features, labels))
  train_op = optimizers.optimize_loss(
      loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
  return model_fn.ModelFnOps(
      mode=mode, predictions=prediction, loss=loss, train_op=train_op)
Ejemplo n.º 27
0
    def _model_fn(features, labels, mode):
        """Function that returns predictions, training loss, and training op."""
        weights = None
        if weights_name and weights_name in features:
            weights = features.pop(weights_name)

        graph_builder = graph_builder_class(params,
                                            device_assigner=device_assigner)
        inference = {}
        if (mode == model_fn_lib.ModeKeys.EVAL
                or mode == model_fn_lib.ModeKeys.INFER):
            inference[eval_metrics.INFERENCE_PROB_NAME] = (
                graph_builder.inference_graph(features))

            if not params.regression:
                inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
                    inference[eval_metrics.INFERENCE_PROB_NAME], 1)

        # labels might be None if we're doing prediction (which brings up the
        # question of why we force everything to adhere to a single model_fn).
        loss_deps = []
        training_graph = None
        if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:
            training_graph = control_flow_ops.group(
                graph_builder.training_graph(features,
                                             labels,
                                             input_weights=weights,
                                             num_trainers=num_trainers,
                                             trainer_id=trainer_id),
                state_ops.assign_add(contrib_framework.get_global_step(), 1))
            loss_deps.append(training_graph)

        training_loss = None
        if (mode == model_fn_lib.ModeKeys.EVAL
                or mode == model_fn_lib.ModeKeys.TRAIN):
            with ops.control_dependencies(loss_deps):
                training_loss = graph_builder.training_loss(features,
                                                            labels,
                                                            name=LOSS_NAME)
            if report_feature_importances and mode == model_fn_lib.ModeKeys.EVAL:
                training_loss = logging_ops.Print(
                    training_loss, [graph_builder.feature_importances()],
                    summarize=1000)
        # Put weights back in
        if weights is not None:
            features[weights_name] = weights

        training_hooks = []
        if early_stopping_rounds:
            training_hooks.append(TensorForestLossHook(early_stopping_rounds))

        return model_fn_lib.ModelFnOps(mode=mode,
                                       predictions=inference,
                                       loss=training_loss,
                                       train_op=training_graph,
                                       training_hooks=training_hooks)
Ejemplo n.º 28
0
def estimator_spec_to_model_fn_ops(estimator_spec):
    alternatives = _export_outputs_to_output_alternatives(
        estimator_spec.export_outputs)

    return model_fn.ModelFnOps(mode=_core_mode_to_contrib_mode(
        estimator_spec.mode),
                               predictions=estimator_spec.predictions,
                               loss=estimator_spec.loss,
                               train_op=estimator_spec.train_op,
                               eval_metric_ops=estimator_spec.eval_metric_ops,
                               output_alternatives=alternatives)
Ejemplo n.º 29
0
def cnn_model_fn(features, labels, mode):
	#features = features.astype(dtype=np.float32)

	# Input Layer
    input_layer = tf.reshape(features, [batch_size, 121, 195, 1])
    input_layer = tf.to_float(input_layer)
    # Modèle simplifié avec un seul CNN

	# Conv Layer
    conv1 = tf.layers.conv2d(
		inputs=input_layer,
		filters=16,
         strides=(10,10),
		kernel_size=[10, 10],
		padding="same",
		activation=tf.nn.relu)

	# Dense Layer
    conv1_flat = tf.reshape(conv1, [batch_size, 4160])
    dense = tf.layers.dense(inputs=conv1_flat, units=1024, activation=tf.nn.relu)
    dropout = tf.layers.dropout(
		inputs=dense, rate=0.4, training=mode == learn.ModeKeys.TRAIN)

	# Logits Layer
    logits = tf.layers.dense(inputs=dropout, units=5)

    loss = None
    train_op = None

	# Calculate Loss (for both TRAIN and EVAL modes)
    if mode != learn.ModeKeys.INFER:
        onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=5)
        loss = tf.losses.softmax_cross_entropy(
                onehot_labels=onehot_labels, logits=logits)

	# Configure the Training Op (for TRAIN mode)
    if mode == learn.ModeKeys.TRAIN:
        train_op = tf.contrib.layers.optimize_loss(
                loss=loss,
                global_step=tf.contrib.framework.get_global_step(),
                learning_rate=0.001,
                optimizer="SGD")

	# Generate Predictions
    predictions = {
		"classes": tf.argmax(
			input=logits, axis=1),
		"probabilities": tf.nn.softmax(
			logits, name="softmax_tensor")
	}

	# Return a ModelFnOps object
    return model_fn_lib.ModelFnOps(
		mode=mode, predictions=predictions, loss=loss, train_op=train_op)
Ejemplo n.º 30
0
def cnn_model_fn(features, labels, mode):
    """Model function for CNN."""

    input_one_hot = tf.one_hot(indices=tf.cast(features, tf.int32), depth=3000)
    input_one_hot = tf.reshape(input_one_hot, [-1, 128, 1, 3000])
    conv1a = tf.layers.conv2d(inputs=input_one_hot,
                              filters=1000,
                              kernel_size=[3, 1],
                              padding="same",
                              activation=tf.nn.relu)
    conv1b = tf.layers.conv2d(inputs=input_one_hot,
                              filters=1000,
                              kernel_size=[2, 1],
                              padding="same",
                              activation=tf.nn.relu)
    conv1 = tf.concat([conv1a, conv1b], 1)
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 1], strides=2)
    pool1_flat = tf.reshape(pool1, [-1, 128 * 1 * 1000])
    dense = tf.layers.dense(inputs=pool1_flat,
                            units=1024,
                            activation=tf.nn.relu)
    dropout = tf.layers.dropout(inputs=dense,
                                rate=0.4,
                                training=mode == learn.ModeKeys.TRAIN)
    ratings = tf.layers.dense(inputs=dropout, units=1, activation=tf.nn.relu6)

    loss = None
    train_op = None

    # Calculate Loss (for both TRAIN and EVAL modes)
    if mode != learn.ModeKeys.INFER:
        labels = tf.reshape(labels, [-1, 1])
        loss = tf.losses.mean_squared_error(labels=labels, predictions=ratings)

    def f(lr, gs):
        return tf.train.exponential_decay(lr, gs, 100, 0.85)

    # Configure the Training Op (for TRAIN mode)
    if mode == learn.ModeKeys.TRAIN:
        train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=0.001,
            learning_rate_decay_fn=f,
            optimizer="SGD")

    # Generate Predictions
    predictions = {"ratings": ratings}

    # Return a ModelFnOps object
    return model_fn_lib.ModelFnOps(mode=mode,
                                   predictions=predictions,
                                   loss=loss,
                                   train_op=train_op)