コード例 #1
0
def simple_csv_linear_classifier(export_path, eval_export_path):
    """Trains and exports a simple linear classifier."""
    def parse_csv(rows_string_tensor):
        """Takes the string input tensor and returns a dict of rank-2 tensors."""

        csv_columns = ['age', 'language', 'label']
        csv_column_defaults = [[0.0], ['unknown'], [0.0]]

        # Takes a rank-1 tensor and converts it into rank-2 tensor
        # Example if the data is ['csv,line,1', 'csv,line,2', ..] to
        # [['csv,line,1'], ['csv,line,2']] which after parsing will result in a
        # tuple of tensors: [['csv'], ['csv']], [['line'], ['line']], [[1], [2]]
        row_columns = tf.expand_dims(rows_string_tensor, -1)
        columns = tf.io.decode_csv(records=row_columns,
                                   record_defaults=csv_column_defaults)
        features = dict(zip(csv_columns, columns))
        return features

    def eval_input_receiver_fn():
        """Eval input receiver function."""
        csv_row = tf.compat.v1.placeholder(dtype=tf.string,
                                           shape=[None],
                                           name='input_csv_row')
        features = parse_csv(csv_row)
        receiver_tensors = {'examples': csv_row}

        return export.EvalInputReceiver(features=features,
                                        labels=features['label'],
                                        receiver_tensors=receiver_tensors)

    def input_fn():
        """Train input function."""
        return {
            'age':
            tf.constant([[1], [2], [3], [4]]),
            'language':
            tf.SparseTensor(
                values=['english', 'english', 'chinese', 'chinese'],
                indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
                dense_shape=[4, 1])
        }, tf.constant([[1], [1], [0], [0]])

    language = tf.feature_column.categorical_column_with_vocabulary_list(
        'language', ['english', 'chinese'])
    age = tf.feature_column.numeric_column('age')
    all_features = [age, language]
    feature_spec = tf.feature_column.make_parse_example_spec(all_features)

    classifier = tf_estimator.LinearClassifier(
        feature_columns=all_features, loss_reduction=tf.losses.Reduction.SUM)
    classifier.train(input_fn=input_fn, steps=1000)

    return util.export_model_and_eval_model(
        estimator=classifier,
        serving_input_receiver_fn=(
            tf_estimator.export.build_parsing_serving_input_receiver_fn(
                feature_spec)),
        eval_input_receiver_fn=eval_input_receiver_fn,
        export_path=export_path,
        eval_export_path=eval_export_path)
コード例 #2
0
ファイル: model.py プロジェクト: HoldemGK/gcp-notes
def linear_model(output_dir):
    real, sparse = get_features()
    all = {}
    all.update(real)
    all.update(sparse)
    estimator = tflearn.LinearClassifier(model_dir=output_dir,
                                         feature_columns=all.values())
    return estimator
コード例 #3
0
def linear_model(output_dir):
    real, sparse = get_features()
    all = {}
    all.update(real)
    all.update(sparse)
    estimator = tflearn.LinearClassifier(model_dir=output_dir,
                                         feature_columns=all.values())
    estimator = tf.contrib.estimator.add_metrics(estimator, my_rmse)
    return estimator
コード例 #4
0
def build_estimator(model_dir, model_type):
    """build an estimator"""
    if model_type == 'wide':
        m = estimator.LinearClassifier(model_dir=model_dir,
                                       feature_columns=base_columns +
                                       crossed_columns)
    elif model_type == 'deep':
        m = estimator.DNNClassifier(model_dir=model_dir,
                                    feature_columns=deep_columns,
                                    hidden_units=[100, 50])
    else:
        m = estimator.DNNLinearCombinedClassifier(
            model_dir=model_dir,
            linear_feature_columns=crossed_columns,
            dnn_feature_columns=deep_columns,
            dnn_hidden_units=[100, 50])
    return m
コード例 #5
0
def train_and_evaluate(working_dir,
                       num_train_instances=common.NUM_TRAIN_INSTANCES,
                       num_test_instances=common.NUM_TEST_INSTANCES):
    """Train the model on training data and evaluate on test data.

  Args:
    working_dir: Directory to read transformed data and metadata from and to
        write exported model to.
    num_train_instances: Number of instances in train set
    num_test_instances: Number of instances in test set

  Returns:
    The results from the estimator's 'evaluate' method
  """
    tf_transform_output = tft.TFTransformOutput(working_dir)

    run_config = tf_estimator.RunConfig()

    estimator = tf_estimator.LinearClassifier(
        feature_columns=get_feature_columns(tf_transform_output),
        config=run_config,
        loss_reduction=tf.losses.Reduction.SUM)

    # Fit the model using the default optimizer.
    train_input_fn = _make_training_input_fn(
        tf_transform_output,
        os.path.join(working_dir,
                     common.TRANSFORMED_TRAIN_DATA_FILEBASE + '*'),
        batch_size=common.TRAIN_BATCH_SIZE)
    estimator.train(input_fn=train_input_fn,
                    max_steps=common.TRAIN_NUM_EPOCHS * num_train_instances /
                    common.TRAIN_BATCH_SIZE)

    # Evaluate model on test dataset.
    eval_input_fn = _make_training_input_fn(
        tf_transform_output,
        os.path.join(working_dir, common.TRANSFORMED_TEST_DATA_FILEBASE + '*'),
        batch_size=1)

    # Export the model.
    serving_input_fn = _make_serving_input_fn(tf_transform_output)
    exported_model_dir = os.path.join(working_dir, common.EXPORTED_MODEL_DIR)
    estimator.export_saved_model(exported_model_dir, serving_input_fn)

    return estimator.evaluate(input_fn=eval_input_fn, steps=num_test_instances)
コード例 #6
0
def simple_linear_classifier_multivalent(export_path, eval_export_path):
  """Trains and exports a simple linear classifier with multivalent features."""

  def input_fn():
    """Train input function."""
    return {
        'animals':
            tf.SparseTensor(
                values=[
                    'cat', 'dog', 'bird', 'cat', 'dog', 'cat', 'bird', 'dog',
                    'bird', 'cat', 'dog', 'bird'
                ],
                indices=[[0, 0], [1, 0], [2, 0], [4, 0], [4, 1], [5, 0], [5, 1],
                         [6, 0], [6, 1], [7, 0], [7, 1], [7, 2]],
                dense_shape=[8, 3])
    }, tf.constant([[0], [0], [0], [0], [1], [0], [0], [1]])

  animals = tf.feature_column.categorical_column_with_vocabulary_list(
      'animals', ['bird', 'cat', 'dog'])
  label = tf.feature_column.numeric_column('label')

  all_features = [animals]
  feature_spec = tf.feature_column.make_parse_example_spec(all_features)
  eval_feature_spec = tf.feature_column.make_parse_example_spec(all_features +
                                                                [label])

  classifier = tf_estimator.LinearClassifier(
      feature_columns=all_features, loss_reduction=tf.losses.Reduction.SUM)
  classifier.train(input_fn=input_fn, steps=5000)

  return util.export_model_and_eval_model(
      estimator=classifier,
      serving_input_receiver_fn=(
          tf_estimator.export.build_parsing_serving_input_receiver_fn(
              feature_spec)),
      eval_input_receiver_fn=export.build_parsing_eval_input_receiver_fn(
          eval_feature_spec, label_key='label'),
      export_path=export_path,
      eval_export_path=eval_export_path)