示例#1
0
    def default_transforming_serving_input_fn():
        """Serving input_fn that applies transforms to raw data in Tensors."""

        record_defaults = []
        for k in raw_keys:
            if column_schemas[k].representation.default_value is not None:
                # Note that 0 and '' are valid defaults.
                value = tf.constant(
                    [column_schemas[k].representation.default_value],
                    dtype=column_schemas[k].domain.dtype)
            else:
                value = tf.constant([], dtype=column_schemas[k].domain.dtype)
            record_defaults.append(value)

        placeholder = tf.placeholder(dtype=tf.string,
                                     shape=(None, ),
                                     name="csv_input_placeholder")
        parsed_tensors = tf.decode_csv(placeholder,
                                       record_defaults,
                                       field_delim=field_delim)

        raw_serving_features = {k: v for k, v in zip(raw_keys, parsed_tensors)}

        _, transformed_features = (
            saved_transform_io.partially_apply_saved_transform(
                transform_savedmodel_dir, raw_serving_features))

        if convert_scalars_to_vectors:
            transformed_features = _convert_scalars_to_vectors(
                transformed_features)

        return input_fn_utils.InputFnOps(transformed_features, None,
                                         {"csv_example": placeholder})
示例#2
0
def build_csv_serving_tensors_for_transform_step(analysis_path, features,
                                                 schema, stats, keep_target):
    """Builds a serving function starting from raw csv.

  This should only be used by transform.py (the transform step), and the

  For image columns, the image should be a base64 string encoding the image.
  The output of this function will transform that image to a 2048 long vector
  using the inception model.
  """

    csv_header, record_defaults = csv_header_and_defaults(
        features, schema, stats, keep_target)

    placeholder = tf.placeholder(dtype=tf.string,
                                 shape=(None, ),
                                 name='csv_input_placeholder')
    tensors = tf.decode_csv(placeholder, record_defaults)
    raw_features = dict(zip(csv_header, tensors))

    transform_fn = make_preprocessing_fn(analysis_path, features, keep_target)
    transformed_tensors = transform_fn(raw_features)

    transformed_features = {}
    # Expand the dims of non-sparse tensors
    for k, v in six.iteritems(transformed_tensors):
        if isinstance(v, tf.Tensor) and v.get_shape().ndims == 1:
            transformed_features[k] = tf.expand_dims(v, -1)
        else:
            transformed_features[k] = v

    return input_fn_utils.InputFnOps(transformed_features, None,
                                     {"csv_example": placeholder})
def build_csv_serving_tensors(analysis_path, features, schema, stats,
                              keep_target):
    """Returns a placeholder tensor and transformed tensors."""

    csv_header, record_defaults = csv_header_and_defaults(
        features, schema, stats, keep_target)

    placeholder = tf.placeholder(dtype=tf.string,
                                 shape=(None, ),
                                 name='csv_input_placeholder')
    tensors = tf.decode_csv(placeholder, record_defaults)
    raw_features = dict(zip(csv_header, tensors))

    transform_fn = make_preprocessing_fn(analysis_path, features, keep_target)
    transformed_tensors = transform_fn(raw_features)

    transformed_features = {}
    # Expand the dims of non-sparse tensors
    for k, v in six.iteritems(transformed_tensors):
        if isinstance(v, tf.Tensor) and v.get_shape().ndims == 1:
            transformed_features[k] = tf.expand_dims(v, -1)
        else:
            transformed_features[k] = v

    return input_fn_utils.InputFnOps(transformed_features, None,
                                     {"csv_example": placeholder})
示例#4
0
def _predict_input_fn():
    """Supplies the input to the model.

  Returns:
    A tuple consisting of 1) a dictionary of tensors whose keys are
    the feature names, and 2) a tensor of target labels if the mode
    is not INFER (and None, otherwise).
  """
    feature_spec = tf.contrib.layers.create_feature_spec_for_parsing(
        feature_columns=_get_feature_columns(include_target_column=False))

    feature_spec[FLAGS.id_field] = tf.FixedLenFeature([], dtype=tf.string)
    feature_spec[FLAGS.target_field + "_string"] = tf.FixedLenFeature(
        [], dtype=tf.string)

    # Add a placeholder for the serialized tf.Example proto input.
    examples = tf.placeholder(tf.string, shape=(None, ), name="examples")

    features = tf.parse_example(examples, feature_spec)
    features[PREDICTION_KEY] = features[FLAGS.id_field]

    inputs = {PREDICTION_EXAMPLES: examples}

    return input_fn_utils.InputFnOps(features=features,
                                     labels=None,
                                     default_inputs=inputs)
示例#5
0
def serving_input_fn():
    """Builds the input subgraph for prediction.

  This serving_input_fn accepts raw Tensors inputs which will be fed to the
  server as JSON dictionaries. The values in the JSON dictionary will be
  converted to Tensors of the appropriate type.

  Returns:
     tf.contrib.learn.input_fn_utils.InputFnOps, a named tuple
     (features, labels, inputs) where features is a dict of features to be
     passed to the Estimator, labels is always None for prediction, and
     inputs is a dictionary of inputs that the prediction server should expect
     from the user.
  """
    feature_placeholders = {
        column.name: tf.placeholder(column.dtype, [None])
        for column in INPUT_COLUMNS
    }
    # DNNCombinedLinearClassifier expects rank 2 Tensors, but inputs should be
    # rank 1, so that we can provide scalars to the server
    features = {
        key: tf.expand_dims(tensor, -1)
        for key, tensor in feature_placeholders.items()
    }
    return input_fn_utils.InputFnOps(features, None, feature_placeholders)
示例#6
0
def serving_input_fn():
    feature_placeholders = feature_columns_to_placeholders(INPUT_COLUMNS)
    features = {
        key: tf.expand_dims(tensor, -1)
        for key, tensor in feature_placeholders.items()
    }
    return input_fn_utils.InputFnOps(features, None, feature_placeholders)
示例#7
0
 def serving_input_fn():
   serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
                                                 shape=[None],
                                                 name='input_example_tensor')
   features, labels = input_fn()
   return input_fn_utils.InputFnOps(
       features, labels, {'examples': serialized_tf_example})
示例#8
0
    def default_transforming_serving_input_fn():
        """Serving input_fn that applies transforms to raw data in Tensors."""

        raw_serving_features = {
            k: v
            for k, v in six.iteritems(
                raw_metadata.schema.as_batched_placeholders())
            if k in raw_feature_keys
        }
        sparse_serving_features = [
            t for t in raw_serving_features if isinstance(t, tf.SparseTensor)
        ]
        if sparse_serving_features:
            raise ValueError(
                "Feeding sparse tensors directly at serving time is not "
                "supported.")
        _, transformed_features = (
            saved_transform_io.partially_apply_saved_transform(
                transform_savedmodel_dir, raw_serving_features))

        if convert_scalars_to_vectors:
            transformed_features = _convert_scalars_to_vectors(
                transformed_features)

        return input_fn_utils.InputFnOps(transformed_features, None,
                                         raw_serving_features)
示例#9
0
    def test_build_all_signature_defs(self):
        input_features = constant_op.constant(["10"])
        input_example = constant_op.constant(["11"])
        input_ops = input_fn_utils.InputFnOps({"features": input_features},
                                              None,
                                              {"default input": input_example})
        input_alternatives, _ = (
            saved_model_export_utils.get_input_alternatives(input_ops))
        output_1 = constant_op.constant(["1"])
        output_2 = constant_op.constant(["2"])
        output_3 = constant_op.constant(["3"])
        provided_output_alternatives = {
            "head-1": (constants.ProblemType.LINEAR_REGRESSION, {
                "some_output_1": output_1
            }),
            "head-2": (constants.ProblemType.CLASSIFICATION, {
                "some_output_2": output_2
            }),
            "head-3": (constants.ProblemType.UNSPECIFIED, {
                "some_output_3": output_3
            }),
        }
        model_fn_ops = model_fn.ModelFnOps(
            model_fn.ModeKeys.INFER,
            predictions={"some_output": constant_op.constant(["4"])},
            output_alternatives=provided_output_alternatives)
        output_alternatives, _ = (
            saved_model_export_utils.get_output_alternatives(
                model_fn_ops, "head-1"))

        signature_defs = saved_model_export_utils.build_all_signature_defs(
            input_alternatives, output_alternatives, "head-1")

        expected_signature_defs = {
            "serving_default":
            signature_def_utils.regression_signature_def(
                input_example, output_1),
            "default_input_alternative:head-1":
            signature_def_utils.regression_signature_def(
                input_example, output_1),
            "default_input_alternative:head-2":
            signature_def_utils.classification_signature_def(
                input_example, output_2, None),
            "default_input_alternative:head-3":
            signature_def_utils.predict_signature_def({"input": input_example},
                                                      {"output": output_3}),
            # "features_input_alternative:head-1":
            #     signature_def_utils.regression_signature_def(input_features,
            #                                                  output_1),
            # "features_input_alternative:head-2":
            #     signature_def_utils.classification_signature_def(input_features,
            #                                                      output_2, None),
            # "features_input_alternative:head-3":
            #     signature_def_utils.predict_signature_def({
            #         "input": input_features
            #     }, {"output": output_3}),
        }

        self.assertDictEqual(expected_signature_defs, signature_defs)
示例#10
0
  def test_get_input_alternatives(self):
    input_ops = input_fn_utils.InputFnOps("bogus features dict", None,
                                          "bogus default input dict")

    input_alternatives, _ = saved_model_export_utils.get_input_alternatives(
        input_ops)
    self.assertEqual(input_alternatives[
        saved_model_export_utils.DEFAULT_INPUT_ALTERNATIVE_KEY],
                     "bogus default input dict")
示例#11
0
 def parsing_transforming_serving_input_fn():
     """Serving input_fn that applies transforms to raw data in tf.Examples."""
     raw_input_fn = input_fn_utils.build_parsing_serving_input_fn(
         raw_serving_feature_spec, default_batch_size=None)
     raw_features, _, inputs = raw_input_fn()
     _, transformed_features = (
         saved_transform_io.partially_apply_saved_transform(
             transform_savedmodel_dir, raw_features))
     return input_fn_utils.InputFnOps(transformed_features, None, inputs)
示例#12
0
def serving_input_fn():
    feature_placeholders = {
        column.name: tf.placeholder(column_to_dtype(column), [None])
        for column in INPUT_COLUMNS
    }
    features = {
        key: tf.expand_dims(tensor, -1)
        for key, tensor in feature_placeholders.items()
    }
    return input_fn_utils.InputFnOps(features, None, feature_placeholders)
示例#13
0
def serving_input_fn():
    """Serve the input_fn."""
    feature_placeholders = {
        column: tf.placeholder(column_to_dtype(column), [None])
        for column in FEATURE_COLUMNS
    }

    features = {
        key: tf.expand_dims(tensor, -1)
        for key, tensor in feature_placeholders.items()
    }

    return input_fn_utils.InputFnOps(features, None, feature_placeholders)
    def serving_input_fn_with_asset():
        features, labels, inputs = serving_input_fn()

        vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
        vocab_file = gfile.GFile(vocab_file_name, mode='w')
        vocab_file.write(VOCAB_FILE_CONTENT)
        vocab_file.close()
        hashtable = lookup.HashTable(
            lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
        features['bogus_lookup'] = hashtable.lookup(
            math_ops.to_int64(features['feature']))

        return input_fn_utils.InputFnOps(features, labels, inputs)
示例#15
0
  def serving_input_fn():
    """Input function for serving."""
    # Get raw features by generating the basic serving input_fn and calling it.
    # Here we generate an input_fn that expects a parsed Example proto to be fed
    # to the model at serving time.  See also
    # input_fn_utils.build_default_serving_input_fn.
    raw_input_fn = input_fn_utils.build_parsing_serving_input_fn(
        raw_feature_spec)
    raw_features, _, default_inputs = raw_input_fn()

    # Apply the transform function that was used to generate the materialized
    # data.
    transformed_features = tf_transform_output.transform_raw_features(
        raw_features)

    return input_fn_utils.InputFnOps(transformed_features, None, default_inputs)
示例#16
0
def serving_input_fn():
  feature_placeholders = {
    "flower_features": tf.placeholder(tf.float32, shape=[None, 4])
  }
  # DNNClassifier expects rank 2 Tensors, but inputs should be
  # rank 1, so that we can provide scalars to the server
  features = {
      key: tf.expand_dims(tensor, -1)
      for key, tensor in feature_placeholders.items()
  }
  
  return input_fn_utils.InputFnOps(
      features=features, # input into graph
      labels=None,
      default_inputs=feature_placeholders # tensor input converted from request 
  )
示例#17
0
def serving_input_receiver_fn():
    """Build the serving inputs."""

    # The outer dimension (None) allows us to batch up inputs for
    # efficiency. However, it also means that if we want a prediction
    # for a single instance, we'll need to wrap it in an outer list.
    inputs = {'X': tf.placeholder(dtype=tf.float32, shape=(19, 1))}
    #inputs = {'X': tf.VarLenFeature(dtype=tf.float32)}

    x = tf.feature_column.numeric_column("X")
    feature_columns = [x]
    feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
    #inputs = {}
    #for feat in get_input_columns():
    #    inputs[feat.name] = tf.placeholder(shape=[None], dtype=feat.dtype)
    #return tf.estimator.export.build_raw_serving_input_receiver_fn(inputs)
    # print(tf.estimator.export.ServingInputReceiver(inputs, inputs))
    #feature_spec = tf.feature_column.make_parse_example_spec(inputs)

    export_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
        feature_spec)

    #print(export_input_fn())
    # return export_input_fn()

    # # return tf.contrib.learn.build_parsing_serving_input_fn(feature_spec)
    # serialized_tf_example = tf.placeholder(dtype=tf.string,
    #                                         name='input_example_tensor')
    # receiver_tensors = {'examples': serialized_tf_example}
    # features = tf.parse_example(serialized_tf_example, inputs)
    # return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)

    # inputOps = input_fn_utils.InputFnOps(
    #      feature_spec,
    #      None,
    #      feature_spec)
    # print(inputOps)
    # return inputOps
    print(tf.estimator.export.ServingInputReceiver(inputs, inputs))
    features, receiver_tensors, receiver_tensors_alternatives = tf.estimator.export.ServingInputReceiver(
        inputs, inputs)
    iops = input_fn_utils.InputFnOps(features, receiver_tensors,
                                     receiver_tensors_alternatives)
    #print(iops)
    #return iops
    #return {'features': features, 'receiver_tensors': receiver_tensors}
    return tf.estimator.export.ServingInputReceiver(inputs, inputs)
示例#18
0
    def serving_input_fn():
        """Input function for serving."""
        # Get raw features by generating the basic serving input_fn and calling it.
        # Here we generate an input_fn that expects a parsed Example proto to be fed
        # to the model at serving time.  See also
        # input_fn_utils.build_default_serving_input_fn.
        raw_input_fn = input_fn_utils.build_parsing_serving_input_fn(
            raw_feature_spec)
        raw_features, _, default_inputs = raw_input_fn()

        # Apply the transform function that was used to generate the materialized
        # data.
        _, transformed_features = (tft.partially_apply_saved_transform(
            os.path.join(working_dir, tft.TRANSFORM_FN_DIR), raw_features))

        return input_fn_utils.InputFnOps(transformed_features, None,
                                         default_inputs)
def serving_input_fn():
    feature_placeholders = {
        column: tf.placeholder(column_to_dtype(column), [None])
        for column in FEATURE_COLUMNS
    }
    # DNNCombinedLinearClassifier expects rank 2 Tensors, but inputs should be
    # rank 1, so that we can provide scalars to the server
    features = {
        key: tf.expand_dims(tensor, -1)
        for key, tensor in feature_placeholders.items()
    }

    return input_fn_utils.InputFnOps(
        features,  # input into graph
        None,
        feature_placeholders  # tensor input converted from request 
    )
示例#20
0
 def _input_fn():
     with ops.name_scope('inputs'):
         x = array_ops.placeholder_with_default(0.0, shape=[], name='x')
         y = array_ops.placeholder_with_default(0.0, shape=[], name='y')
     label = constant_op.constant(0.0)
     features = {'x': x, 'y': y}
     if core:
         if train:
             return features, label
         return export_lib.ServingInputReceiver(features=features,
                                                receiver_tensors=features)
     else:
         if train:
             return features, label
         return input_fn_utils.InputFnOps(features=features,
                                          labels={},
                                          default_inputs=features)
示例#21
0
    def _serving_input_fn():
        """Applies transforms to raw data in json-example strings."""

        json_example_placeholder = tf.placeholder(tf.string, shape=[None])
        example_strings = tf.decode_json_example(json_example_placeholder)
        raw_features = tf.parse_example(example_strings,
                                        raw_serving_feature_spec)
        inputs = {"json_example": json_example_placeholder}

        _, transformed_features = (
            saved_transform_io.partially_apply_saved_transform(
                transform_savedmodel_dir, raw_features))

        if convert_scalars_to_vectors:
            transformed_features = _convert_scalars_to_vectors(
                transformed_features)

        return input_fn_utils.InputFnOps(transformed_features, None, inputs)
def _predict_input_fn():
    """Supplies the input to the model.

  Returns:
    A tuple consisting of 1) a dictionary of tensors whose keys are
    the feature names, and 2) a tensor of target labels which for
    clustering must be 'None'.
  """

    # Add a placeholder for the serialized tf.Example proto input.
    examples = tf.placeholder(tf.string, shape=(None, ), name="examples")

    raw_features = tf.parse_example(examples, _get_feature_columns())

    dense = _raw_features_to_dense_tensor(raw_features)

    return input_fn_utils.InputFnOps(features={DENSE_KEY: dense},
                                     labels=None,
                                     default_inputs={EXAMPLE_KEY: examples})
示例#23
0
文件: util.py 项目: zhengr/pydatalab
def serving_from_csv_input(train_config, args, keep_target):
    """Read the input features from a placeholder csv string tensor."""
    examples = tf.placeholder(dtype=tf.string,
                              shape=(None, ),
                              name='csv_input_string')

    features = parse_example_tensor(examples=examples,
                                    train_config=train_config,
                                    keep_target=keep_target)

    if keep_target:
        target = features.pop(train_config['target_column'])
    else:
        target = None
    features, target = preprocess_input(
        features=features,
        target=target,
        train_config=train_config,
        preprocess_output_dir=args.preprocess_output_dir,
        model_type=args.model_type)

    return input_fn_utils.InputFnOps(features, target, {'csv_line': examples})
示例#24
0
def build_csv_serving_tensors_for_training_step(analysis_path, features,
                                                schema, stats, keep_target):
    """Builds a serving function starting from raw csv, used at model export time.

  For image columns, the image should be a base64 string encoding the image.
  The output of this function will transform that image to a 2048 long vector
  using the inception model and then a fully connected net is attached to
  the 2048 long image embedding.
  """

    transformed_features, _, placeholder_dict = build_csv_serving_tensors_for_transform_step(
        analysis_path=analysis_path,
        features=features,
        schema=schema,
        stats=stats,
        keep_target=keep_target)

    transformed_features = image_feature_engineering(
        features=features, feature_tensors_dict=transformed_features)

    return input_fn_utils.InputFnOps(transformed_features, None,
                                     placeholder_dict)
示例#25
0
def serving_input_fn():
    inputs = {'image': tf.placeholder(tf.uint8, [None, 28, 28])}
    # Here, you can transform the data received from the API call
    features = [tf.cast(inputs['image'], tf.float32)]
    return input_fn_utils.InputFnOps(features, None, inputs)
示例#26
0
 def serving_input_fn():
     receiver = receiver_fn()
     return input_fn_utils.InputFnOps(receiver.features, None,
                                      receiver.receiver_tensors)
示例#27
0
def serving_input_fn():
    inputs_123 = {'image': tf.placeholder(tf.uint8, [None, 28, 28])}
    features = [tf.cast(inputs_123['image'], tf.float32)]
    return input_fn_utils.InputFnOps(features, None, inputs_123)