Beispiel #1
0
    def __init__(self, args):
        super(SkipThoughtsModel, self).__init__(args)
        self.uniform_initializer = tf.random_uniform_initializer(
            minval=-self.uniform_init_scale, maxval=self.uniform_init_scale)

        with open(args.vocab, 'r') as f:
            self.vocab = [l.strip() for l in f]
        # Set up input parsing stuff via tf.contrib.learn...
        self.vocab_size = len(self.vocab)
        encode = layers.sparse_column_with_integerized_feature(
            'encode', bucket_size=self.vocab_size)
        decode_pre = layers.sparse_column_with_integerized_feature(
            'decode_pre', bucket_size=self.vocab_size)
        decode_post = layers.sparse_column_with_integerized_feature(
            'decode_post', bucket_size=self.vocab_size)
        self.features = {
            'encode': layers.embedding_column(encode, dimension=100),
            'decode_pre': layers.embedding_column(decode_pre, dimension=100),
            'decode_post': layers.embedding_column(decode_post, dimension=100),
        }
        self.feature_spec = tf.contrib.layers.create_feature_spec_for_parsing(
            self.features),
        # ... or do it the easy way:
        self.features = {
            'encode': tf.VarLenFeature(dtype=tf.int64),
            'decode_pre': tf.VarLenFeature(dtype=tf.int64),
            'decode_post': tf.VarLenFeature(dtype=tf.int64),
        }
        self.feature_spec = self.features
Beispiel #2
0
def build_estimator(model_dir, nbuckets, hidden_units):
  """
     Build an estimator starting from INPUT COLUMNS.
     These include feature transformations and synthetic features.
     The model is a wide-and-deep model.
  """

  # input columns
  (dayofweek, hourofday, latdiff, londiff, euclidean, plon, plat, dlon, dlat, pcount) = INPUT_COLUMNS 

  # bucketize the lats & lons
  latbuckets = np.linspace(38.0, 42.0, nbuckets).tolist()
  lonbuckets = np.linspace(-76.0, -72.0, nbuckets).tolist()
  b_plat = layers.bucketized_column(plat, latbuckets)
  b_dlat = layers.bucketized_column(dlat, latbuckets)
  b_plon = layers.bucketized_column(plon, lonbuckets)
  b_dlon = layers.bucketized_column(dlon, lonbuckets)

  # feature cross
  ploc = layers.crossed_column([b_plat, b_plon], nbuckets*nbuckets)
  dloc = layers.crossed_column([b_dlat, b_dlon], nbuckets*nbuckets)
  pd_pair = layers.crossed_column([ploc, dloc], nbuckets ** 4 )
  day_hr =  layers.crossed_column([dayofweek, hourofday], 24*7)

  # Wide columns and deep columns.
  wide_columns = [
      # feature crosses
      dloc, ploc, pd_pair,
      day_hr,

      # sparse columns
      dayofweek, hourofday,

      # anything with a linear relationship
      pcount 
  ]

  deep_columns = [
      # embedding_column to "group" together ...
      layers.embedding_column(pd_pair, 10),
      layers.embedding_column(day_hr, 10),

      # real_valued_column
      plat, plon, dlat, dlon,
      latdiff, londiff, euclidean
  ]

  return tf.contrib.learn.DNNLinearCombinedRegressor(
      model_dir=model_dir,
      linear_feature_columns=wide_columns,
      dnn_feature_columns=deep_columns,
      dnn_hidden_units=hidden_units or [128, 32, 4])
Beispiel #3
0
def get_wide_deep():
  # define column types
  races = ['White', 'Black', 'American Indian', 'Chinese', 
           'Japanese', 'Hawaiian', 'Filipino', 'Unknown',
           'Asian Indian', 'Korean', 'Samaon', 'Vietnamese']
  is_male,mother_age,mother_race,plurality,gestation_weeks,mother_married,cigarette_use,alcohol_use = \
   [ \
    tflayers.sparse_column_with_keys('is_male', keys=['True', 'False']),
    tflayers.real_valued_column('mother_age'),
    tflayers.sparse_column_with_keys('mother_race', keys=races),
    tflayers.real_valued_column('plurality'),
    tflayers.real_valued_column('gestation_weeks'),
    tflayers.sparse_column_with_keys('mother_married', keys=['True', 'False']),
    tflayers.sparse_column_with_keys('cigarette_use', keys=['True', 'False', 'None']),
    tflayers.sparse_column_with_keys('alcohol_use', keys=['True', 'False', 'None'])
    ]

  # which columns are wide (sparse, linear relationship to output) and which are deep (complex relationship to output?)  
  wide = [is_male, mother_race, plurality, mother_married, cigarette_use, alcohol_use]
  deep = [\
                mother_age,
                gestation_weeks,
                tflayers.embedding_column(mother_race, 3)
               ]
  return wide, deep
Beispiel #4
0
def create_embed(sparse_col):
    dim = 10 # default
    if hasattr(sparse_col, 'bucket_size'):
       nbins = sparse_col.bucket_size
       if nbins is not None:
          dim = 1 + int(round(np.log2(nbins)))
    return tflayers.embedding_column(sparse_col, dimension=dim)
Beispiel #5
0
def train_and_eval(train_steps, log_dir, training_set, validation_set, testing_set, ):
    sparse_columns = [
        layers.sparse_column_with_keys(attribute, training_set[attribute].unique()) for attribute in FEATURE_ATTRIBUTES
    ]
    embedding_columns = [
        layers.embedding_column(column, dimension=8) for column in sparse_columns
    ]
    m = learn.DNNClassifier(
        hidden_units=[10, 50, ],
        feature_columns=embedding_columns,
        model_dir=log_dir,
        config=learn.RunConfig(save_checkpoints_secs=1, ),
    )
    validation_metrics = {
        "accuracy": learn.MetricSpec(metric_fn=metrics.streaming_accuracy, prediction_key="classes"),
        "precision": learn.MetricSpec(metric_fn=metrics.streaming_precision, prediction_key="classes"),
        "recall": learn.MetricSpec(metric_fn=metrics.streaming_recall, prediction_key="classes"),
    }
    monitors = [
        learn.monitors.ValidationMonitor(
            input_fn=lambda: input_fn(validation_set),
            every_n_steps=1000,
            metrics=validation_metrics,
            early_stopping_rounds=1,
        ),
    ]
    m.fit(
        input_fn=lambda: input_fn(training_set),
        steps=train_steps,
        monitors=monitors,
    )
    results = m.evaluate(input_fn=lambda: input_fn(testing_set), steps=1)
    for key in sorted(results):
        print("%s: %s" % (key, results[key]))
Beispiel #6
0
def get_wide_deep():
    # define column types
    
    StyleName,quantity, demand, org_ret_price,sell_price, margin, off_orig_retail, total_ots = \
    [ \
    tflayers.sparse_column_with_hash_bucket('Style_Name', hash_bucket_size = 1000),
    tflayers.real_valued_column('Quantity'),
    tflayers.real_valued_column('Demand'),
    tflayers.real_valued_column('Original_Retail_Price'),
    tflayers.real_valued_column('Selling_Price'),
    tflayers.real_valued_column('Margin'),
    tflayers.real_valued_column('off_Orig_Retail'),
    tflayers.real_valued_column('Total_OTS'),
    ]
    # which columns are wide (sparse, linear relationship to output) and which are deep (complex relationship to output?)  
    wide = [StyleName,quantity, demand]
    deep = [\
               org_ret_price,
               sell_price,
               margin,
               off_orig_retail,
               total_ots,
               tflayers.embedding_column(StyleName, 3)
               ]
    return wide, deep
Beispiel #7
0
def get_wide_deep():
  # define column types
  races = ['White', 'Black', 'American Indian', 'Chinese', 
           'Japanese', 'Hawaiian', 'Filipino', 'Unknown',
           'Asian Indian', 'Korean', 'Samaon', 'Vietnamese']
  is_male,mother_age,mother_race,plurality,gestation_weeks,mother_married,cigarette_use,alcohol_use = \
   [ \
    tflayers.sparse_column_with_keys('is_male', keys=['True', 'False']),
    tflayers.real_valued_column('mother_age'),
    tflayers.sparse_column_with_keys('mother_race', keys=races),
    tflayers.real_valued_column('plurality'),
    tflayers.real_valued_column('gestation_weeks'),
    tflayers.sparse_column_with_keys('mother_married', keys=['True', 'False']),
    tflayers.sparse_column_with_keys('cigarette_use', keys=['True', 'False', 'None']),
    tflayers.sparse_column_with_keys('alcohol_use', keys=['True', 'False', 'None'])
    ]

  # which columns are wide (sparse, linear relationship to output) and which are deep (complex relationship to output?)  
  wide = [is_male, mother_race, plurality, mother_married, cigarette_use, alcohol_use]
  deep = [\
                mother_age,
                gestation_weeks,
                tflayers.embedding_column(mother_race, 3)
               ]
  return wide, deep
Beispiel #8
0
def build_estimator(model_dir, embedding_size=8, hidden_units=None):
    (gender, race, education, marital_status, relationship, workclass,
     occupation, native_country, age, education_num, capital_gain,
     capital_loss, hours_per_week) = INPUT_COLUMNS
    """Build an estimator."""
    # Sparse base columns.
    # Reused Transformations.
    age_buckets = layers.bucketized_column(
        age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])

    # Wide columns and deep columns.
    wide_columns = [
        layers.crossed_column([education, occupation],
                              hash_bucket_size=int(1e4)),
        layers.crossed_column([age_buckets, race, occupation],
                              hash_bucket_size=int(1e6)),
        layers.crossed_column([native_country, occupation],
                              hash_bucket_size=int(1e4)),
        gender,
        native_country,
        education,
        occupation,
        workclass,
        marital_status,
        relationship,
        age_buckets,
    ]

    deep_columns = [
        layers.embedding_column(workclass, dimension=embedding_size),
        layers.embedding_column(education, dimension=embedding_size),
        layers.embedding_column(marital_status, dimension=embedding_size),
        layers.embedding_column(gender, dimension=embedding_size),
        layers.embedding_column(relationship, dimension=embedding_size),
        layers.embedding_column(race, dimension=embedding_size),
        layers.embedding_column(native_country, dimension=embedding_size),
        layers.embedding_column(occupation, dimension=embedding_size),
        age,
        education_num,
        capital_gain,
        capital_loss,
        hours_per_week,
    ]

    return tf.contrib.learn.DNNLinearCombinedClassifier(
        model_dir=model_dir,
        linear_feature_columns=wide_columns,
        dnn_feature_columns=deep_columns,
        dnn_hidden_units=hidden_units or [100, 70, 50, 25])
Beispiel #9
0
def generate_tf_columns():
    columns = OrderedDict()
    for fc in args['fc']:
        id_feature = layers.sparse_column_with_hash_bucket(
                            column_name=fc['feature_name'],
                            hash_bucket_size=fc['hash_bucket_size'])

                    
        embedding = layers.embedding_column(
                        id_feature,
                        dimension=fc["embedding_dimension"])
        columns[fc['feature_name']] = embedding
    return columns
Beispiel #10
0
def contrib_learn_classifier_test():
    """Test tf.contrib.learn.DNN_classifier."""
    language_column = layers.sparse_column_with_hash_bucket(
        "language", hash_bucket_size=20)

    feature_columns = [
        layers.embedding_column(language_column, dimension=3),
        layers.real_valued_column("age", dtype=tf.int64)
    ]

    classifier = learn.DNNClassifier(
        n_classes=3,
        feature_columns=feature_columns,
        hidden_units=[100, 100],
        config=learn.RunConfig(tf_random_seed=1,
                               model_dir="../model_saver/estimators/"
                               "DNN_classifier_01"),
        # optimizer=optimizer_exp_decay
    )
    classifier.fit(input_fn=_input_fn, steps=10000)
    print("variables_names:\n", str(classifier.get_variable_names()))
    # scores = classifier.evaluate(input_fn=_input_fn,
    #                              steps=100)
    # print("scores:\n", str(scores))

    scores = classifier.evaluate(
        input_fn=_input_fn,
        steps=100,
        metrics={
            'my_accuracy':
            MetricSpec(metric_fn=metrics.streaming_accuracy,
                       prediction_key="classes"),
            'my_precision':
            MetricSpec(metric_fn=metrics.streaming_precision,
                       prediction_key="classes"),
            'my_recall':
            MetricSpec(metric_fn=metrics.streaming_recall,
                       prediction_key="classes"),
            'my_metric':
            MetricSpec(metric_fn=my_metric_op, prediction_key="classes")
        })
    print("scores:\n", str(scores))

    predictions = classifier.predict(input_fn=_input_fn,
                                     outputs=["classes", "probabilities"])
    print("predictions")
    for prediction in predictions:
        print(prediction)
Beispiel #11
0
def train_and_eval(model_dir, training_set, testing_set, ):
    sparse_columns = [
        layers.sparse_column_with_keys(
            attribute['name'], pandas.read_csv(attribute['path'], sep='\t')['id'].apply(str),
        ) for attribute in FEATURE_ATTRIBUTES
    ]
    embedding_columns = [layers.embedding_column(column, dimension=3) for column in sparse_columns]
    model = learn.DNNRegressor(
        hidden_units=[3, ],
        feature_columns=embedding_columns,
        model_dir=model_dir,
        config=learn.RunConfig(save_checkpoints_secs=100, ),
    )
    model.fit(input_fn=lambda: input_fn(training_set), steps=20000, )
    results = model.evaluate(input_fn=lambda: input_fn(testing_set), steps=1)
    for key in sorted(results):
        print('%s: %s' % (key, results[key]))
def build_estimator(model_dir, model_type):
    """build an estimator"""

    # base sparse feature process
    gender = layers.sparse_column_with_keys(column_name='gender', keys=['female', 'male'])
    education = layers.sparse_column_with_hash_bucket(column_name='education', hash_bucket_size=1000)
    relationship = layers.sparse_column_with_hash_bucket(column_name='relationship', hash_bucket_size=100)
    workclass = layers.sparse_column_with_hash_bucket(column_name='workclass', hash_bucket_size=100)
    occupation = layers.sparse_column_with_hash_bucket(column_name='occupation', hash_bucket_size=1000)
    native_country = layers.sparse_column_with_hash_bucket(column_name='native_country', hash_bucket_size=1000)

    # base continuous feature
    age = layers.real_valued_column(column_name='age')
    education_num = layers.real_valued_column(column_name='education_num')
    capital_gain = layers.real_valued_column(column_name='capital_gain')
    capital_loss = layers.real_valued_column(column_name='capital_loss')
    hours_per_week = layers.real_valued_column(column_name='hours_per_week')

    # transformation.bucketization 将连续变量转化为类别标签。从而提高我们的准确性
    age_bucket = layers.bucketized_column(source_column=age,
                                          boundaries=[18, 25, 30, 35, 40, 45,50, 55, 60, 65])

    # wide columns and deep columns
    # 深度模型使用到的特征和广度模型使用到的特征
    # 广度模型特征只只用到了分类标签
    wide_columns = [gender, native_country, education, relationship, workclass, occupation, age_bucket,
                    layers.crossed_column(columns=[education, occupation], hash_bucket_size=int(1e4)),
                    layers.crossed_column(columns=[age_bucket, education, occupation], hash_bucket_size=int(1e6)),
                    layers.crossed_column(columns=[native_country, occupation], hash_bucket_size=int(1e4))]

    deep_columns = [layers.embedding_column(workclass, dimension=8),
                    layers.embedding_column(education, dimension=8),
                    layers.embedding_column(gender, dimension=8),
                    layers.embedding_column(relationship, dimension=8),
                    layers.embedding_column(native_country, dimension=8),
                    layers.embedding_column(occupation, dimension=8),
                    age, education_num, capital_gain, capital_loss, hours_per_week]

    if model_type == "wide":
        m=learn.LinearClassifier(feature_columns=wide_columns, model_dir=model_dir)
    elif model_type == "deep":
        m=learn.DNNClassifier(feature_columns=deep_columns, model_dir=model_dir, hidden_units=[100, 50])
    else:
        m=learn.DNNLinearCombinedClassifier(model_dir=model_dir,
                                            linear_feature_columns=wide_columns,
                                            dnn_feature_columns=deep_columns,
                                            dnn_hidden_units=[256, 128, 64],
                                            dnn_activation_fn=tf.nn.relu)
    return m
def build_estimator(model_dir, embedding_size=8, hidden_units=None):
  """Build a wide and deep model for predicting income category.

  Wide and deep models use deep neural nets to learn high level abstractions
  about complex features or interactions between such features.
  These models then combined the outputs from the DNN with a linear regression
  performed on simpler features. This provides a balance between power and
  speed that is effective on many structured data problems.

  You can read more about wide and deep models here:
  https://research.googleblog.com/2016/06/wide-deep-learning-better-together-with.html

  To define model we can use the prebuilt DNNCombinedLinearClassifier class,
  and need only define the data transformations particular to our dataset, and then
  assign these (potentially) transformed features to either the DNN, or linear
  regression portion of the model.

  Args:
    model_dir: str, the model directory used by the Classifier for checkpoints
      summaries and exports.
    embedding_size: int, the number of dimensions used to represent categorical
      features when providing them as inputs to the DNN.
    hidden_units: [int], the layer sizes of the DNN (input layer first)
  Returns:
    A DNNCombinedLinearClassifier
  """
  (gender, race, education, marital_status, relationship,
   workclass, occupation, native_country, age,
   education_num, capital_gain, capital_loss, hours_per_week) = INPUT_COLUMNS
  """Build an estimator."""

  # Reused Transformations.
  # Continuous columns can be converted to categorical via bucketization
  age_buckets = layers.bucketized_column(
      age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])

  # Wide columns and deep columns.
  wide_columns = [
      # Interactions between different categorical features can also
      # be added as new virtual features.
      layers.crossed_column(
          [education, occupation], hash_bucket_size=int(1e4)),
      layers.crossed_column(
          [age_buckets, race, occupation], hash_bucket_size=int(1e6)),
      layers.crossed_column(
          [native_country, occupation], hash_bucket_size=int(1e4)),
      gender,
      native_country,
      education,
      occupation,
      workclass,
      marital_status,
      relationship,
      age_buckets,
  ]

  deep_columns = [
      layers.embedding_column(workclass, dimension=embedding_size),
      layers.embedding_column(education, dimension=embedding_size),
      layers.embedding_column(marital_status, dimension=embedding_size),
      layers.embedding_column(gender, dimension=embedding_size),
      layers.embedding_column(relationship, dimension=embedding_size),
      layers.embedding_column(race, dimension=embedding_size),
      layers.embedding_column(native_country, dimension=embedding_size),
      layers.embedding_column(occupation, dimension=embedding_size),
      age,
      education_num,
      capital_gain,
      capital_loss,
      hours_per_week,
  ]

  return tf.contrib.learn.DNNLinearCombinedClassifier(
      model_dir=model_dir,
      linear_feature_columns=wide_columns,
      dnn_feature_columns=deep_columns,
      dnn_hidden_units=hidden_units or [100, 70, 50, 25])
Beispiel #14
0
def build_estimator(model_dir, nbuckets, hidden_units):
    """
     Build an estimator starting from INPUT COLUMNS.
     These include feature transformations and synthetic features.
     The model is a wide-and-deep model.
  """

    # input columns
    (dayofweek, hourofday, latdiff, londiff, euclidean, plon, plat, dlon, dlat,
     pcount) = INPUT_COLUMNS

    # bucketize the lats & lons
    latbuckets = np.linspace(38.0, 42.0, nbuckets).tolist()
    lonbuckets = np.linspace(-76.0, -72.0, nbuckets).tolist()
    b_plat = layers.bucketized_column(plat, latbuckets)
    b_dlat = layers.bucketized_column(dlat, latbuckets)
    b_plon = layers.bucketized_column(plon, lonbuckets)
    b_dlon = layers.bucketized_column(dlon, lonbuckets)

    # feature cross
    ploc = layers.crossed_column([b_plat, b_plon], nbuckets * nbuckets)
    dloc = layers.crossed_column([b_dlat, b_dlon], nbuckets * nbuckets)
    pd_pair = layers.crossed_column([ploc, dloc], nbuckets**4)
    day_hr = layers.crossed_column([dayofweek, hourofday], 24 * 7)

    # Wide columns and deep columns.
    wide_columns = [
        # feature crosses
        dloc,
        ploc,
        pd_pair,
        day_hr,

        # sparse columns
        dayofweek,
        hourofday,

        # anything with a linear relationship
        pcount
    ]

    deep_columns = [
        # embedding_column to "group" together ...
        layers.embedding_column(pd_pair, 10),
        layers.embedding_column(day_hr, 10),

        # real_valued_column
        plat,
        plon,
        dlat,
        dlon,
        latdiff,
        londiff,
        euclidean
    ]

    return tf.contrib.learn.DNNLinearCombinedRegressor(
        model_dir=model_dir,
        linear_feature_columns=wide_columns,
        dnn_feature_columns=deep_columns,
        dnn_hidden_units=hidden_units or [128, 32, 4])
Beispiel #15
0
def build_feature_cols():
    # Sparse base columns.
    gender = tf.contrib.layers.sparse_column_with_keys(column_name="gender",
                                                       keys=["female", "male"])
    race = tf.contrib.layers.sparse_column_with_keys(column_name="race",
                                                     keys=[
                                                         "Amer-Indian-Eskimo",
                                                         "Asian-Pac-Islander",
                                                         "Black", "Other",
                                                         "White"
                                                     ])

    education = tf.contrib.layers.sparse_column_with_hash_bucket(
        "education", hash_bucket_size=1000)
    marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
        "marital_status", hash_bucket_size=100)
    relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
        "relationship", hash_bucket_size=100)
    workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
        "workclass", hash_bucket_size=100)
    occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
        "occupation", hash_bucket_size=1000)
    native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
        "native_country", hash_bucket_size=1000)

    # Continuous base columns.
    age = real_valued_column("age")
    education_num = real_valued_column("education_num")
    capital_gain = real_valued_column("capital_gain")
    capital_loss = real_valued_column("capital_loss")
    hours_per_week = real_valued_column("hours_per_week")

    # Transformations.
    age_buckets = tf.contrib.layers.bucketized_column(
        age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
    education_occupation = tf.contrib.layers.crossed_column(
        [education, occupation], hash_bucket_size=int(1e4))
    age_race_occupation = tf.contrib.layers.crossed_column(
        [age_buckets, race, occupation], hash_bucket_size=int(1e6))
    country_occupation = tf.contrib.layers.crossed_column(
        [native_country, occupation], hash_bucket_size=int(1e4))

    # Wide columns and deep columns.
    wide_columns = [
        gender, native_country, education, occupation, workclass, race,
        marital_status, relationship, age_buckets, education_occupation,
        age_race_occupation, country_occupation
    ]

    deep_columns = [
        embedding_column(gender, dimension=8),
        embedding_column(native_country, dimension=8),
        embedding_column(education, dimension=8),
        embedding_column(occupation, dimension=8),
        embedding_column(workclass, dimension=8),
        embedding_column(race, dimension=8),
        embedding_column(marital_status, dimension=8),
        embedding_column(relationship, dimension=8),
        embedding_column(age_buckets, dimension=8),
        embedding_column(education_occupation, dimension=8),
        embedding_column(age_race_occupation, dimension=8),
        embedding_column(country_occupation, dimension=8),
        age,
        education_num,
        capital_gain,
        capital_loss,
        hours_per_week,
    ]

    return wide_columns, deep_columns
Beispiel #16
0
def build_estimator(model_dir=MODEL_DIR):
    """
    Build an estimator using
    CONTINTUOUS_COLUMNS, BINARY_COLUMNS and MULTI_CATEGORY_COLUMNS.
    """
    bucketized_columns = \
        [sparse_column_with_hash_bucket(col, 1000)
         for col in MULTI_CATEGORY_COLUMNS] + \
        [sparse_column_with_integerized_feature(col, bucket_size=2)
         for col in BINARY_COLUMNS]

    real_valued_columns = \
        [real_valued_column(col) for col in CONTINUOUS_COLUMNS]

    crossed_columns = \
        []

    # Wide columns and deep columns.
    wide_columns = \
        bucketized_columns + \
        real_valued_columns + \
        crossed_columns

    # embedding columns for hash_bucket columns
    deep_columns = \
        [embedding_column(col, dimension=EMBEDDING_DIMENSION)
         for col in bucketized_columns] + \
        real_valued_columns + \
        crossed_columns

    if MODEL_TYPE == "wide":
        print('Creating wide LinearClassifier model...\n')
        model = tf.contrib.learn.LinearClassifier(
            model_dir=model_dir,
            n_classes=2,
            feature_columns=wide_columns,
            # optimizer=tf.train.GradientDescentOptimizer(
            #     learning_rate=FLAGS.learn_rate)
            # optimizer=tf.train.FtrlOptimizer(
            #     learning_rate=LEARN_RATE,
            #     l1_regularization_strength=0.0,
            #     l2_regularization_strength=0.0),
        )

    elif MODEL_TYPE == "deep":
        print('Creating deep DNNClassifier model...\n')
        model = tf.contrib.learn.DNNClassifier(
            model_dir=model_dir,
            n_classes=2,
            feature_columns=deep_columns,
            hidden_units=HIDDEN_UNITS,
            # optimizer=tf.train.FtrlOptimizer(
            #     learning_rate=LEARN_RATE,
            #     l1_regularization_strength=0.0,
            #     l2_regularization_strength=0.0),
        )
    else:
        print('Creating deepNwide DNNLinearCombinedClassifier model...\n')
        model = tf.contrib.learn.DNNLinearCombinedClassifier(
            model_dir=model_dir,
            n_classes=2,
            linear_feature_columns=wide_columns,
            dnn_feature_columns=deep_columns,
            dnn_hidden_units=HIDDEN_UNITS,
            # optimizer=tf.train.FtrlOptimizer(
            #     learning_rate=LEARN_RATE,
            #     l1_regularization_strength=0.0,
            #     l2_regularization_strength=0.0),
        )

    return model
for var in categorical_vars:
  le = LabelEncoder().fit(X_train[var])
  X_train[var + '_ids'] = le.transform(X_train[var])
  X_test[var + '_ids'] = le.transform(X_test[var])
  X_train.pop(var)
  X_test.pop(var)
  categorical_var_encoders[var] = le

### Note: Feature Columns currently (2016/10/22) not working, update is coming.
# Setup feature columns.
CATEGORICAL_EMBED_SIZE = 10 # Note, you can customize this per variable.
feature_columns = [
  layers.real_valued_column(var) for var in continues_vars
] + [
  layers.embedding_column(
     layers.sparse_column_with_integerized_feature(
       var + '_ids', len(categorical_var_encoders[var].classes_)), 
     CATEGORICAL_EMBED_SIZE) for var in
  categorical_vars
]


# Linear classifier.
'''
random.seed(42)
tflr = learn.LinearClassifier(n_classes=2,
    feature_columns=feature_columns,
    optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05))
tflr.fit(input_fn=train_input_fn, steps=500)
print(list(tflr.predict(input_fn=test_input_fn, as_iterable=True)), y_test)
print(accuracy_score(y_test, list(tflr.predict(input_fn=test_input_fn, as_iterable=True))))
'''
def get_feature_column():
    feature_name = 'gender'
    sparse_id_column = layers.sparse_column_with_hash_bucket(
        column_name=feature_name, hash_bucket_size=100)
    feature_column = layers.embedding_column(sparse_id_column, dimension=10)
    return feature_column
Beispiel #19
0
def build_estimator(model_dir, embedding_size=8, hidden_units=None):
    """Build a wide and deep model for predicting income category.

  Wide and deep models use deep neural nets to learn high level abstractions
  about complex features or interactions between such features.
  These models then combined the outputs from the DNN with a linear regression
  performed on simpler features. This provides a balance between power and
  speed that is effective on many structured data problems.

  You can read more about wide and deep models here:
  https://research.googleblog.com/2016/06/wide-deep-learning-better-together-with.html

  To define model we can use the prebuilt DNNCombinedLinearClassifier class,
  and need only define the data transformations particular to our dataset, and then
  assign these (potentially) transformed features to either the DNN, or linear
  regression portion of the model.

  Args:
    model_dir: str, the model directory used by the Classifier for checkpoints
      summaries and exports.
    embedding_size: int, the number of dimensions used to represent categorical
      features when providing them as inputs to the DNN.
    hidden_units: [int], the layer sizes of the DNN (input layer first)
  Returns:
    A DNNCombinedLinearClassifier
  """
    (actividad, anio, bueno, dia, lugar, mes, pais) = INPUT_COLUMNS
    """Build an estimator."""

    # Reused Transformations.
    # Continuous columns can be converted to categorical via bucketization
    mes_bucket = layers.bucketized_column(
        mes, boundaries=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])

    # Wide columns and deep columns.
    wide_columns = [
        # Interactions between different categorical features can also
        # be added as new virtual features.
        layers.crossed_column([actividad, lugar], hash_bucket_size=int(1e4)),
        layers.crossed_column([actividad, mes_bucket],
                              hash_bucket_size=int(1e4)),
        layers.crossed_column([actividad, dia], hash_bucket_size=int(1e4)),
        layers.crossed_column([actividad, pais], hash_bucket_size=int(1e4)),
        actividad,
        dia,
        lugar,
        mes_bucket,
        pais,
    ]

    deep_columns = [
        layers.embedding_column(actividad, dimension=embedding_size),
        layers.embedding_column(lugar, dimension=embedding_size),
        layers.embedding_column(dia, dimension=embedding_size),
        layers.embedding_column(pais, dimension=embedding_size),
        anio,
        mes,
        bueno,
    ]

    return tf.contrib.learn.DNNLinearCombinedClassifier(
        model_dir=model_dir,
        linear_feature_columns=wide_columns,
        dnn_feature_columns=deep_columns,
        dnn_hidden_units=hidden_units or [100, 70, 50, 25])
Beispiel #20
0
def parse_feature_columns_from_examples_test():
    """Construct examples by tf.train.Example.
     Then, parse feature columns from examples.
     Finally, get input from feature columns.

    Returns:
        The input tensor transformed from examples in defined feature columns
         format.
    """
    language_column = layers.sparse_column_with_hash_bucket(
        "language", hash_bucket_size=20)

    feature_columns = [
        layers.embedding_column(language_column, dimension=3),
        layers.real_valued_column("age", dtype=tf.int64)
    ]
    example1 = tf.train.Example(features=tf.train.Features(
        feature={
            "age":
            tf.train.Feature(int64_list=tf.train.Int64List(value=[18])),
            "language":
            tf.train.Feature(bytes_list=tf.train.BytesList(value=[b"en"]))
        }))
    example2 = tf.train.Example(features=tf.train.Features(
        feature={
            "age":
            tf.train.Feature(int64_list=tf.train.Int64List(value=[20])),
            "language":
            tf.train.Feature(bytes_list=tf.train.BytesList(value=[b"fr"]))
        }))
    example3 = tf.train.Example(features=tf.train.Features(
        feature={
            "age":
            tf.train.Feature(int64_list=tf.train.Int64List(value=[25])),
            "language":
            tf.train.Feature(bytes_list=tf.train.BytesList(value=[b"en"]))
        }))
    examples = [
        example1.SerializeToString(),
        example2.SerializeToString(),
        example3.SerializeToString()
    ]
    print(examples)
    # feature_lists = tf.train.FeatureLists(
    #     feature_list={
    #         "age": tf.train.FeatureList(
    #             feature=[
    #                 tf.train.Feature(int64_list=tf.train.Int64List(value=[18])),
    #                 tf.train.Feature(int64_list=tf.train.Int64List(value=[20])),
    #                 tf.train.Feature(int64_list=tf.train.Int64List(value=[25])),
    #             ]
    #         ),
    #         "language": tf.train.FeatureList(
    #             feature=[
    #                 tf.train.Feature(bytes_list=tf.train.BytesList(value=[
    #                     b"en"])),
    #                 tf.train.Feature(bytes_list=tf.train.BytesList(value=[
    #                     b"fr"])),
    #                 tf.train.Feature(bytes_list=tf.train.BytesList(value=[
    #                     b"zh"]))
    #             ]
    #         )
    #     }
    # )
    # print(feature_lists)
    # serialized = feature_lists.SerializeToString()

    columns_to_tensor = layers.parse_feature_columns_from_examples(
        serialized=examples, feature_columns=feature_columns)
    input_layer = layers.input_from_feature_columns(
        columns_to_tensors=columns_to_tensor, feature_columns=feature_columns)
    print("input_layer:\n", str(input_layer))
    sess = tf.InteractiveSession()
    tf.initialize_all_variables().run(session=sess)
    print(input_layer.eval(session=sess))
Beispiel #21
0
	return input_fn(data)	
	
def testing_fn():
	return input_fn(data_test)		

data["tmp"]= data["class"].apply(help)
data_test["tmp"]= data_test["class"].apply(help)

buying = layers.sparse_column_with_keys(column_name="buying",keys=["low","med","high","vhigh"])
maint = layers.sparse_column_with_keys(column_name="maint",keys=["low","med","high","vhigh"])
doors = layers.sparse_column_with_keys(column_name="doors",keys=["2","3","4","5more"])
persons = layers.sparse_column_with_keys(column_name="persons",keys=["2","4","more"])
lug_boot = layers.sparse_column_with_keys(column_name="lug_boot",keys=["small","med","big"])
safety = layers.sparse_column_with_keys(column_name="safety",keys=["low","med","high"])

buying_emb = layers.embedding_column(buying,dimension=4)
maint_emb = layers.embedding_column(maint,dimension=4)
doors_emb = layers.embedding_column(doors,dimension=4)
persons_emb = layers.embedding_column(persons,dimension=3)
lug_boot_emb = layers.embedding_column(lug_boot,dimension=3)
safety_emb = layers.embedding_column(safety,dimension=3)

dnn_classifier = learn.DNNClassifier(feature_columns=[buying_emb, maint_emb,doors_emb,persons_emb,lug_boot_emb,safety_emb], hidden_units=[10], n_classes=4, )
#dnn_classifier.fit(X_train, y_train, steps = 1000)
dnn_classifier.fit(input_fn=train_input_fn,steps=1000)

y_test = dnn_classifier.evaluate(input_fn=testing_fn,steps=1)

print("Precision=")
print(y_test['accuracy'])
Beispiel #22
0
    def build_network_my(self,
                         num_factor=10,
                         num_factor_mlp=64,
                         hidden_dimension=10,
                         num_neg_sample=30):
        print("my network")
        self.num_neg_sample = num_neg_sample
        self.user_id = tf.placeholder(dtype=tf.string,
                                      shape=[None],
                                      name='user_id')
        self.item_id = tf.placeholder(dtype=tf.string,
                                      shape=[None],
                                      name='item_id')
        ##########################################################################3
        self.target_item_id = tf.placeholder(dtype=tf.string,
                                             shape=[None],
                                             name='target_item_id')
        self.hot_item_id = tf.placeholder(dtype=tf.string,
                                          shape=[None],
                                          name='hot_item_id')
        self.long_item_id = tf.placeholder(dtype=tf.string,
                                           shape=[None],
                                           name='long_item_id')
        ###########################################################################
        self.y = tf.placeholder(dtype=tf.float32, shape=[None], name='y')
        self.par = tf.placeholder(dtype=tf.float32)
        ###################################################################################

        ##################################################################################
        a = {'user': self.user_id}
        b = {'item': self.item_id}
        c = {'item': self.target_item_id}
        d = {'user_low': self.user_id}
        e = {'item_low': self.item_id}
        f = {'item_low': self.target_item_id}
        h = {'item': self.hot_item_id}
        l = {'item': self.long_item_id}
        with tf.variable_scope(name_or_scope='embedding',
                               reuse=tf.AUTO_REUSE) as scope:
            id_feature1 = layers.sparse_column_with_hash_bucket(
                column_name='user',
                hash_bucket_size=190000
                # use_hashmap=use_hashmap
            )

            id_feature2 = layers.sparse_column_with_hash_bucket(
                column_name='item',
                hash_bucket_size=120000
                # use_hashmap=use_hashmap
            )

            shared_embedding_columns1 = layers.embedding_column(
                id_feature1, dimension=64, combiner="mean")
            #
            #
            shared_embedding_columns2 = layers.embedding_column(
                id_feature2, dimension=64, combiner="mean")
            a1 = []
            a1.append(shared_embedding_columns1)
            b1 = []
            b1.append(shared_embedding_columns2)
            #
            mlp_user_latent_factor = layers.input_from_feature_columns(
                a, a1, scope='user')
            mlp_item_latent_factor = layers.input_from_feature_columns(
                b, b1, scope='item')
            mlp_target_item_latent_factor = layers.input_from_feature_columns(
                c, b1, scope='item')
            #########################################################################################
            mlp_hot_item_latent_factor = layers.input_from_feature_columns(
                h, b1, scope='item')
            mlp_long_item_latent_factor = layers.input_from_feature_columns(
                l, b1, scope='item')
            #########################################################################################

            id_feature3 = layers.sparse_column_with_hash_bucket(
                column_name='user_low',
                hash_bucket_size=190000
                # use_hashmap=use_hashmap
            )

            id_feature4 = layers.sparse_column_with_hash_bucket(
                column_name='item_low',
                hash_bucket_size=120000
                # use_hashmap=use_hashmap
            )

            shared_embedding_columns3 = layers.embedding_column(
                id_feature3, dimension=10, combiner="mean")
            #
            #
            shared_embedding_columns4 = layers.embedding_column(
                id_feature4, dimension=10, combiner="mean")
            d1 = []
            d1.append(shared_embedding_columns3)
            e1 = []
            e1.append(shared_embedding_columns4)
            #
            user_latent_factor = layers.input_from_feature_columns(
                d, d1, scope='user_low')
            item_latent_factor = layers.input_from_feature_columns(
                e, e1, scope='item_low')
            target_item_latent_factor = layers.input_from_feature_columns(
                f, e1, scope='item_low')
        ###################################################################################

###################################################################################################

###################################################################################################
        GMF = tf.multiply(user_latent_factor, item_latent_factor)
        #####################################################################
        GMF_target = tf.multiply(user_latent_factor, target_item_latent_factor)
        #####################################################################
        user_feature = self.user_side(mlp_user_latent_factor)
        item_feature = self.item_side(mlp_item_latent_factor)
        #########################################################
        target_item_feature = self.item_side(mlp_target_item_latent_factor,
                                             reuse=True)

        hot_item_feature = self.item_side(mlp_hot_item_latent_factor,
                                          reuse=True)
        long_item_feature = self.item_side(mlp_long_item_latent_factor,
                                           reuse=True)
        #########################################################
        self.pair_loss = 0
        self.resort_item = []
        self.resort_label = []
        for i in range(0, self.batch_size):
            temp1 = []
            temp2 = []

            temp1.append(item_feature[i * self.batch_size:(i + 1) *
                                      self.batch_size, :])
            temp2.append(self.y[i * self.batch_size:(i + 1) * self.batch_size])
            self.resort_item.append(temp1)
            self.resort_label.append(temp2)
        discriminative_loss = []

        for i in range(0, self.batch_size):
            discriminative_loss.append(
                get_center_loss(tf.reshape(self.resort_item[i], (-1, 128)),
                                tf.reshape(self.resort_label[i], (-1, 1)), 2))

        for i in range(0, self.batch_size):
            self.pair_loss = self.pair_loss + discriminative_loss[
                i] / self.batch_size
        # #########################################################
        # self.userF=user_feature
        # self.itemF=item_feature
        # #########################################
        # # self.pred_y = tf.nn.sigmoid(
        # #     tf.reduce_sum( 5 * tf.multiply(user_feature, item_feature),1))
        # ########################################
        self.pred_y = tf.nn.sigmoid(
            tf.reduce_sum(
                tf.concat([GMF, 5 * tf.multiply(user_feature, item_feature)],
                          axis=1), 1))
        # self.pred_long=tf.nn.sigmoid(tf.reduce_sum(tf.concat([GMF,5*tf.multiply(user_feature, target_item_feature)], axis=1), 1))
        avg_GMF = tf.reduce_mean(GMF)
        # avg_GMF=tf.stop_gradient(tf.identity(tf.reduce_mean(GMF)))
        self.pred_long = tf.nn.sigmoid(avg_GMF + tf.reduce_sum(
            5 * tf.multiply(user_feature, target_item_feature), 1))
        # self.pred_y = tf.layers.dense(inputs=tf.concat([GMF, MLP], axis=1), units=1, activation=tf.sigmoid, kernel_initializer=tf.random_normal_initializer, kernel_regularizer= tf.contrib.layers.l2_regularizer(scale=self.reg_rate))

        #Pseudo label
        self.p1 = tf.reshape(
            tf.gather(
                self.pred_long,
                tf.reshape(tf.where(tf.less(self.pred_long, 0.2)), [
                    -1,
                ])), [-1, 1])
        self.p2 = tf.reshape(
            tf.gather(
                self.pred_long,
                tf.reshape(tf.where(tf.greater(self.pred_long, 0.8)), [
                    -1,
                ])), [-1, 1])
        self.tar1 = tf.maximum(
            0.0,
            tf.reduce_mean(-self.p1 * tf.log(
                tf.clip_by_value(self.p1, 0.005, 1))))  #/ self.batch_size
        self.tar2 = tf.maximum(
            0.0,
            tf.reduce_mean(-self.p2 * tf.log(
                tf.clip_by_value(self.p2, 0.005, 1))))  #/ self.batch_size
        self.pseudo_loss = self.tar1 + self.tar2
        # self.loss = - tf.reduce_sum(
        #     self.y * tf.log(self.pred_y + 1e-10) + (1 - self.y) * tf.log(1 - self.pred_y + 1e-10))

        self.loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.pred_y,
                                                    labels=self.y))
        self.weight_loss = 0.01 * tf.losses.get_regularization_loss(
        )  #+ self.reg_rate * (
        # tf.nn.l2_loss(self.P) + tf.nn.l2_loss(self.Q) + tf.nn.l2_loss(self.mlp_P) + tf.nn.l2_loss(self.mlp_Q))
        # self.DAloss=tf.maximum(0.0001,KMMD(hot_item_feature,long_item_feature))
        self.DAloss = self.coral_loss(hot_item_feature, long_item_feature)
        # self.optimizer = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.loss)
        # self.total_loss=self.loss+self.weight_loss+100*self.DAloss
        # self.total_loss=self.loss+self.weight_loss+100*self.DAloss
        # self.total_loss = self.loss + self.weight_loss
        # self.total_loss=self.loss+self.weight_loss+100*self.DAloss+0.001*self.par*self.pseudo_loss+0.001*self.par*self.pair_loss
        self.total_loss = self.loss + self.weight_loss + self.A2C_weight * self.DAloss + self.pseudo_weight * self.par * self.pseudo_loss + self.center_weight * self.par * self.pair_loss
        self.optimizer = tf.train.AdamOptimizer(0.0001).minimize(
            self.total_loss)

        return self
Beispiel #23
0
def build_estimator(model_dir, model_type):
    """Build an estimator."""
    # Sparse base columns.
    userID = layers.sparse_column_with_integerized_feature('userID', 2805118)
    creativeID = layers.sparse_column_with_integerized_feature(
        'creativeID', 6582)
    positionID = layers.sparse_column_with_integerized_feature(
        'positionID', 7645)
    adID = layers.sparse_column_with_integerized_feature('adID', 3616)
    camgaignID = layers.sparse_column_with_integerized_feature(
        'camgaignID', 720)
    advertiserID = layers.sparse_column_with_integerized_feature(
        'advertiserID', 91)
    appID = layers.sparse_column_with_integerized_feature('appID', 50)
    sitesetID = layers.sparse_column_with_integerized_feature('sitesetID', 3)
    appCategory = layers.sparse_column_with_integerized_feature(
        'appCategory', 14)
    appPlatform = layers.sparse_column_with_integerized_feature(
        'appPlatform', 2)
    education = layers.sparse_column_with_integerized_feature('education', 8)
    gender = layers.sparse_column_with_integerized_feature('gender', 3)
    haveBaby = layers.sparse_column_with_integerized_feature('haveBaby', 7)
    marriageStatus = layers.sparse_column_with_integerized_feature(
        'marriageStatus', 4)
    positionType = layers.sparse_column_with_integerized_feature(
        'positionType', 6)
    hometown_c = layers.sparse_column_with_integerized_feature(
        'hometown_c', 22)
    hometown_p = layers.sparse_column_with_integerized_feature(
        'hometown_p', 35)
    residence_c = layers.sparse_column_with_integerized_feature(
        'residence_c', 22)
    residence_p = layers.sparse_column_with_integerized_feature(
        'residence_p', 35)
    telecomsOperator = layers.sparse_column_with_integerized_feature(
        'telecomsOperator', 4)
    connectionType = layers.sparse_column_with_integerized_feature(
        'connectionType', 5)
    clickTime_week = layers.sparse_column_with_integerized_feature(
        'clickTime_week', 7)

    # Continuous base columns.
    age = layers.real_valued_column("age")
    inst_app_installed = layers.real_valued_column('inst_app_installed')
    inst_cate_percent = layers.real_valued_column('inst_cate_percent')
    inst_cnt_appcate = layers.real_valued_column('inst_cnt_appcate')
    inst_cnt_installed = layers.real_valued_column('inst_cnt_installed')
    inst_is_installed = layers.real_valued_column('inst_is_installed')
    action_cate = layers.real_valued_column('action_cate')
    action_cate_recent = layers.real_valued_column('action_cate_recent')
    action_installed = layers.real_valued_column('action_installed')
    tt_cnt_appcate = layers.real_valued_column('tt_cnt_appcate')
    tt_is_installed = layers.real_valued_column('tt_is_installed')
    clickTime_day = layers.real_valued_column('clickTime_day')
    clickTime_hour = layers.real_valued_column('clickTime_hour')
    clickTime_minute = layers.real_valued_column('clickTime_minute')

    # Transformations.
    age_buckets = layers.bucketized_column(
        age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
    inst_app_installed_buckets = layers.bucketized_column(
        inst_app_installed,
        boundaries=[1000, 5000, 10000, 50000, 100000, 500000])
    clickTime_hour_buckets = layers.bucketized_column(
        clickTime_hour, boundaries=[8, 11, 14, 17, 19, 22])

    # Wide columns and deep columns.
    wide_columns = [
        userID,
        creativeID,
        positionID,
        adID,
        camgaignID,
        advertiserID,
        appID,
        sitesetID,
        appCategory,
        appPlatform,
        education,
        gender,
        haveBaby,
        marriageStatus,
        positionType,
        hometown_c,
        hometown_p,
        residence_c,
        residence_p,
        telecomsOperator,
        connectionType,
        clickTime_week,

        # layers.embedding_column(userID, dimension=8),
        # layers.embedding_column(creativeID, dimension=8),
        # layers.embedding_column(positionID, dimension=8),
        # layers.embedding_column(adID, dimension=8),
        # layers.embedding_column(camgaignID, dimension=8),
        # layers.embedding_column(advertiserID, dimension=8),
        # layers.embedding_column(appID, dimension=8),
        # layers.embedding_column(sitesetID, dimension=8),
        # layers.embedding_column(appCategory, dimension=8),
        # layers.embedding_column(appPlatform, dimension=8),
        # layers.embedding_column(education, dimension=8),
        # layers.embedding_column(gender, dimension=8),
        # layers.embedding_column(haveBaby, dimension=8),
        # layers.embedding_column(marriageStatus, dimension=8),
        # layers.embedding_column(positionType, dimension=8),
        # layers.embedding_column(hometown_c, dimension=8),
        # layers.embedding_column(hometown_p, dimension=8),
        # layers.embedding_column(residence_c, dimension=8),
        # layers.embedding_column(residence_p, dimension=8),
        # layers.embedding_column(telecomsOperator, dimension=8),
        # layers.embedding_column(connectionType, dimension=8),
        # layers.embedding_column(clickTime_week, dimension=8),
        # layers.one_hot_column(userID),
        # layers.one_hot_column(creativeID),
        # layers.one_hot_column(positionID),
        # layers.one_hot_column(adID),
        # layers.one_hot_column(camgaignID),
        # layers.one_hot_column(advertiserID),
        # layers.one_hot_column(appID),
        # layers.one_hot_column(sitesetID),
        # layers.one_hot_column(appCategory),
        # layers.one_hot_column(appPlatform),
        # layers.one_hot_column(education),
        # layers.one_hot_column(gender),
        # layers.one_hot_column(haveBaby),
        # layers.one_hot_column(marriageStatus),
        # layers.one_hot_column(positionType),
        # layers.one_hot_column(hometown_c),
        # layers.one_hot_column(hometown_p),
        # layers.one_hot_column(residence_c),
        # layers.one_hot_column(residence_p),
        # layers.one_hot_column(telecomsOperator),
        # layers.one_hot_column(connectionType),
        # layers.one_hot_column(clickTime_week),
        age_buckets,
        clickTime_hour_buckets,
        inst_app_installed_buckets,
    ]

    deep_columns = [
        layers.embedding_column(userID, dimension=8),
        layers.embedding_column(creativeID, dimension=8),
        layers.embedding_column(positionID, dimension=8),
        layers.embedding_column(adID, dimension=8),
        layers.embedding_column(camgaignID, dimension=8),
        layers.embedding_column(advertiserID, dimension=8),
        layers.embedding_column(appID, dimension=8),
        layers.embedding_column(sitesetID, dimension=8),
        layers.embedding_column(appCategory, dimension=8),
        layers.embedding_column(appPlatform, dimension=8),
        layers.embedding_column(education, dimension=8),
        layers.embedding_column(gender, dimension=8),
        layers.embedding_column(haveBaby, dimension=8),
        layers.embedding_column(marriageStatus, dimension=8),
        layers.embedding_column(positionType, dimension=8),
        layers.embedding_column(hometown_c, dimension=8),
        layers.embedding_column(hometown_p, dimension=8),
        layers.embedding_column(residence_c, dimension=8),
        layers.embedding_column(residence_p, dimension=8),
        layers.embedding_column(telecomsOperator, dimension=8),
        layers.embedding_column(connectionType, dimension=8),
        layers.embedding_column(clickTime_week, dimension=8),
        age,
        action_cate,
        action_cate_recent,
        action_installed,
        inst_app_installed,
        inst_cate_percent,
        inst_cnt_appcate,
        inst_cnt_installed,
        inst_is_installed,
        tt_cnt_appcate,
        tt_is_installed,
        clickTime_day,
        clickTime_hour,
        clickTime_minute,
    ]

    if model_type == "wide":
        m = tf.contrib.learn.LinearClassifier(model_dir=model_dir,
                                              feature_columns=wide_columns)
    elif model_type == "deep":
        m = tf.contrib.learn.DNNClassifier(model_dir=model_dir,
                                           feature_columns=deep_columns,
                                           hidden_units=[100, 50])
    else:
        m = tf.contrib.learn.DNNLinearCombinedClassifier(
            model_dir=model_dir,
            linear_feature_columns=wide_columns,
            dnn_feature_columns=deep_columns,
            dnn_hidden_units=[100, 50, 1],
            fix_global_step_increment_bug=True)
    return m
Beispiel #24
0
median_distance_buckets = bucketized_column(median_distance, boundaries=[10,50,100,200,300])
population = real_valued_column("population")
population_buckets = bucketized_column(population, boundaries=[0, 1, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000])
popularity = real_valued_column("popularity")
admin_level_x_median_distance = crossed_column([admin_level, median_distance_buckets], hash_bucket_size=int(1e4))
admin_level_x_cluster_frequency = crossed_column([admin_level, cluster_frequency_buckets], hash_bucket_size=int(1e4))
admin_level_x_country_code = crossed_column([admin_level, country_code], hash_bucket_size=int(1e4))
        
#number_of_other_points_in_country = sparse_column_with_hash_bucket("number_of_other_points_in_country", hash_bucket_size=1000)
#number_of_other_points_in_admin1 = sparse_column_with_hash_bucket("number_of_other_points_in_admin1", hash_bucket_size=1000)
#number_of_other_points_in_admin2 = sparse_column_with_hash_bucket("number_of_other_points_in_admin2", hash_bucket_size=1000)
#feature_columns = [admin_level, cluster_frequency_buckets, country_code, country_rank, edit_distance, is_lowest_admin_level, has_mpoly, has_pcode, matches_topic, median_distance, median_distance_buckets, population_buckets, popularity, admin_level_x_cluster_frequency, admin_level_x_country_code, admin_level_x_median_distance]
#print "feature_columns:", feature_columns
wide_columns = [admin_level, cluster_frequency_buckets, country_code, country_rank, edit_distance, is_country, is_highest_population, is_lowest_admin_level, has_mpoly, has_pcode, matches_topic, median_distance, median_distance_buckets, population_buckets, popularity, admin_level_x_cluster_frequency, admin_level_x_country_code, admin_level_x_median_distance]
deep_columns = [
    embedding_column(admin_level, dimension=8),
    cluster_frequency,
    cluster_frequency_buckets,
    embedding_column(country_code, dimension=8),
    country_rank,
    embedding_column(has_mpoly, dimension=8),
    embedding_column(has_pcode, dimension=8),
    embedding_column(is_country, dimension=8),
    embedding_column(is_lowest_admin_level, dimension=8),
    embedding_column(is_highest_population, dimension=8),
    median_distance_buckets,
    population_buckets,
    popularity
]

Beispiel #25
0
def build_feature_cols():
  # Sparse base columns.
  gender = tf.contrib.layers.sparse_column_with_keys(
            column_name="gender",
            keys=["female", "male"])
  race = tf.contrib.layers.sparse_column_with_keys(
            column_name="race",
            keys=["Amer-Indian-Eskimo",
                  "Asian-Pac-Islander",
                  "Black", "Other",
                  "White"])

  education = tf.contrib.layers.sparse_column_with_hash_bucket(
      "education", hash_bucket_size=1000)
  marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
      "marital_status", hash_bucket_size=100)
  relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
      "relationship", hash_bucket_size=100)
  workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
      "workclass", hash_bucket_size=100)
  occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
      "occupation", hash_bucket_size=1000)
  native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
      "native_country", hash_bucket_size=1000)

  # Continuous base columns.
  age = real_valued_column("age")
  education_num = real_valued_column("education_num")
  capital_gain = real_valued_column("capital_gain")
  capital_loss = real_valued_column("capital_loss")
  hours_per_week = real_valued_column("hours_per_week")

  # Transformations.
  age_buckets = tf.contrib.layers.bucketized_column(
      age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
  education_occupation = tf.contrib.layers.crossed_column(
      [education, occupation], hash_bucket_size=int(1e4))
  age_race_occupation = tf.contrib.layers.crossed_column(
      [age_buckets, race, occupation], hash_bucket_size=int(1e6))
  country_occupation = tf.contrib.layers.crossed_column(
      [native_country, occupation], hash_bucket_size=int(1e4))

  # Wide columns and deep columns.
  wide_columns = [gender, native_country, education, 
                  occupation, workclass, race, 
                  marital_status, relationship, 
                  age_buckets,
                  education_occupation, 
                  age_race_occupation,
                  country_occupation]

  deep_columns = [
      embedding_column(gender, dimension=8),
      embedding_column(native_country, dimension=8),
      embedding_column(education, dimension=8),
      embedding_column(occupation, dimension=8),
      embedding_column(workclass, dimension=8),
      embedding_column(race, dimension=8),
      embedding_column(marital_status, dimension=8),
      embedding_column(relationship, dimension=8),
      embedding_column(age_buckets, dimension=8),
      embedding_column(education_occupation, dimension=8),
      embedding_column(age_race_occupation, dimension=8),
      embedding_column(country_occupation, dimension=8),
      age,
      education_num,
      capital_gain,
      capital_loss,
      hours_per_week,
  ]

  return wide_columns, deep_columns
for var in categorical_vars:
    le = LabelEncoder().fit(X_train[var])
    X_train[var + '_ids'] = le.transform(X_train[var])
    X_test[var + '_ids'] = le.transform(X_test[var])
    X_train.pop(var)
    X_test.pop(var)
    categorical_var_encoders[var] = le

### Note: Feature Columns currently (2016/10/22) not working, update is coming.
# Setup feature columns.
CATEGORICAL_EMBED_SIZE = 10  # Note, you can customize this per variable.
feature_columns = [layers.real_valued_column(var)
                   for var in continues_vars] + [
                       layers.embedding_column(
                           layers.sparse_column_with_integerized_feature(
                               var + '_ids',
                               len(categorical_var_encoders[var].classes_)),
                           CATEGORICAL_EMBED_SIZE) for var in categorical_vars
                   ]

# Linear classifier.
'''
random.seed(42)
tflr = learn.LinearClassifier(n_classes=2,
    feature_columns=feature_columns,
    optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05))
tflr.fit(input_fn=train_input_fn, steps=500)
print(list(tflr.predict(input_fn=test_input_fn, as_iterable=True)), y_test)
print(accuracy_score(y_test, list(tflr.predict(input_fn=test_input_fn, as_iterable=True))))
'''
    return examples


vocab = Vocab(base_dir + '/vocab.txt')
vocab_size = len(vocab)

if tf_layers:
    encode = layers.sparse_column_with_integerized_feature(
        'encode', bucket_size=vocab_size)
    decode_pre = layers.sparse_column_with_integerized_feature(
        'decode_pre', bucket_size=vocab_size)
    decode_post = layers.sparse_column_with_integerized_feature(
        'decode_post', bucket_size=vocab_size)
    features = {
        'encode': encode,
        'encode_emb': layers.embedding_column(encode, dimension=100),
        'decode_pre': layers.embedding_column(decode_pre, dimension=100),
        'decode_post': layers.embedding_column(decode_post, dimension=100),
    }
    features = tf.contrib.layers.create_feature_spec_for_parsing(features)
else:
    # This little dict seems equivalent to the waay more verbose
    # tf.contrib.layers approach. But apparently the latter helps,
    # especially when it comes to tf serving. Still to see the benefit...
    features = {
        "encode": tf.VarLenFeature(dtype=tf.int64),
        "decode_pre": tf.VarLenFeature(dtype=tf.int64),
        "decode_post": tf.VarLenFeature(dtype=tf.int64),
    }
i = get_input(base_dir + '/validation-00000-of-00001', features)