Example #1
0
def CastToFloat(tensor):
  if tensor.dtype == dtypes.string:
    return tensor_forest_ops.reinterpret_string_to_float(tensor)
  elif tensor.dtype.is_integer:
    return math_ops.to_float(tensor)
  else:
    return tensor
Example #2
0
def CastToFloat(tensor):
  if tensor.dtype == dtypes.string:
    return tensor_forest_ops.reinterpret_string_to_float(tensor)
  elif tensor.dtype.is_integer:
    return math_ops.to_float(tensor)
  else:
    return tensor
def ParseDataTensorOrDict(data):
  """Return a tensor to use for input data.

  The incoming features can be a dict where keys are the string names of the
  columns, which we turn into a single 2-D tensor.

  Args:
    data: `Tensor` or `dict` of `Tensor` objects.

  Returns:
    A 2-D tensor for input to tensor_forest, a keys tensor for the
    tf.Examples if they exist, and a list of the type of each column
    (e.g. continuous float, categorical).
  """
  if isinstance(data, dict):
    # If there's at least one sparse tensor, everything has to be sparse.
    is_sparse = False
    for v in data.values():
      if isinstance(v, sparse_tensor.SparseTensor):
        is_sparse = True
        break

    categorical_types = (dtypes.string, dtypes.int32, dtypes.int64)
    data_spec = [constants.DATA_CATEGORICAL if
                 data[k].dtype in categorical_types else
                 constants.DATA_FLOAT for k in sorted(data.keys())]
    data_spec = [constants.DATA_FLOAT] + data_spec
    features = []
    for k in sorted(data.keys()):
      if data[k].dtype == dtypes.string:
        features.append(tensor_forest_ops.reinterpret_string_to_float(data[k]))
      elif data[k].dtype.is_integer:
        features.append(math_ops.to_float(data[k]))
      else:
        features.append(data[k])

    if is_sparse:
      return sparse_ops.sparse_concat(1, features), data_spec
    else:
      return array_ops.concat_v2(features, 1), data_spec
  else:
    return (data, [constants.DATA_FLOAT])