예제 #1
0
def addf_wide_and_deep_Regression(inputs, addfScore_feature_columns, dnn_feature_columns, dnn_hidden_units):
    deep = layers.DenseFeatures(
        dnn_feature_columns, name='deep_inputs')(inputs)
    for layerno, numnodes in enumerate(dnn_hidden_units):
        deep = layers.Dense(numnodes, activation='relu',
                            name='dnn_{}'.format(layerno+1))(deep)
    addf = layers.DenseFeatures(
        addfScore_feature_columns, name='addf_inputs')(inputs)
    addf_deep = layers.concatenate([deep, addf], name='addf_deep')
    output_addf = layers.Dense(
        1, activation='relu', name='pred_addf')(addf_deep)
    model = Model(inputs, outputs=[output_addf])
    model.compile(optimizer='rmsprop', loss='mse', metrics=[
                  tf.keras.metrics.RootMeanSquaredError(), 'mae', custom_mse])
    return model
def create_keras_model(learning_rate=0.001):
    wide, deep, inputs = get_wide_deep()
    feature_layer_wide = layers.DenseFeatures(wide, name='wide_features')
    feature_layer_deep = layers.DenseFeatures(deep, name='deep_features')

    wide_model = feature_layer_wide(inputs)

    deep_model = layers.Dense(64, activation='relu', name='DNN_layer1')(feature_layer_deep(inputs))
    deep_model = layers.Dense(32, activation='relu', name='DNN_layer2')(deep_model)

    wide_deep_model = layers.Dense(1, name='weight')(layers.concatenate([wide_model, deep_model]))
    model = models.Model(inputs=inputs, outputs=wide_deep_model)

    # Compile Keras model
    model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(lr=learning_rate))
    return model
def run_algorithm(run_params):

    # train_df, train_df_norm, test_df, test_df_norm = data_prep.prepare_data(run_params.training_years, run_params.testing_years, run_params.hour_resolution, True)

    data_df, train_mean, train_std = data_prep.prepare_linear_timeseries_data(
        run_params)

    n = len(data_df)
    train_df = data_df[int(n * 0.05):int(n * 0.8)]
    test_df = data_df[int(n * 0.8):]

    feature_columns = data_prep.add_feature_columns(train_df)

    feature_layer = layers.DenseFeatures(feature_columns)

    out = DataFrame()

    train_df.pop('datetime')
    out['datetime'] = test_df.pop('datetime')
    out[run_params.predicted_label] = test_df[
        run_params.predicted_label] * train_std + train_mean
    # out[run_params.predicted_label] = test_df[run_params.predicted_label]
    my_model = get_trained_model(train_df, test_df, feature_layer,
                                 run_params.predicted_label, run_params)
    print(my_model.trainable_variables)
    print("Train mean: {}".format(train_mean))
    print("Train std: {}".format(train_std))
    test_df.sort_values("index", inplace=True)
    test_features = {name: np.array(value) for name, value in test_df.items()}
    out[run_params.predicted_label + '_prediction'] = my_model.predict(
        test_features) * train_std + train_mean
    # out[run_params.predicted_label + '_prediction'] = my_model.predict(test_features)

    return out
예제 #4
0
def build_dnn_model(nbuckets, nnsize, lr):
    # input layer is all float except for pickup_datetime which is a string
    STRING_COLS = ['pickup_datetime']
    NUMERIC_COLS = (set(CSV_COLUMNS) - set([LABEL_COLUMN, 'key']) -
                    set(STRING_COLS))
    inputs = {
        colname: layers.Input(name=colname, shape=(), dtype='float32')
        for colname in NUMERIC_COLS
    }
    inputs.update({
        colname: layers.Input(name=colname, shape=(), dtype='string')
        for colname in STRING_COLS
    })

    # transforms
    transformed, feature_columns = transform(inputs,
                                             NUMERIC_COLS,
                                             STRING_COLS,
                                             nbuckets=nbuckets)
    dnn_inputs = layers.DenseFeatures(feature_columns.values())(transformed)

    x = dnn_inputs
    for layer, nodes in enumerate(nnsize):
        x = layers.Dense(nodes, activation='relu', name='h{}'.format(layer))(x)
    output = layers.Dense(1, name='fare')(x)

    model = models.Model(inputs, output)
    lr_optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
    model.compile(optimizer=lr_optimizer, loss='mse', metrics=[rmse, 'mse'])

    return model
예제 #5
0
def TrainTestNet(batch_size, train_size, train_batch, valid_size, valid_batch,
                 test_size, test_batch):
    Initialise()
    train_df, validation_df, test_df = ProcessData(train_size, train_batch,
                                                   valid_size, valid_batch,
                                                   test_size, test_batch)

    feature_columns = []

    # Create a numerical feature column to represent median_income.
    for j in ['A_00R','A_00I','A_01R','A_01I','A_02R','A_02I','A_03R','A_03I',\
              'A_10R','A_10I','A_11R','A_11I','A_12R','A_12I','A_13R','A_13I',\
              'A_20R','A_20I','A_21R','A_21I','A_22R','A_22I','A_23R','A_23I',\
              'A_30R','A_30I','A_31R','A_31I','A_32R','A_32I','A_33R','A_33I',\
              'B_00R','B_00I','B_01R','B_01I','B_02R','B_02I','B_03R','B_03I',\
              'B_10R','B_10I','B_11R','B_11I','B_12R','B_12I','B_13R','B_13I',\
              'B_20R','B_20I','B_21R','B_21I','B_22R','B_22I','B_23R','B_23I',\
              'B_30R','B_30I','B_31R','B_31I','B_32R','B_32I','B_33R','B_33I']:
        a = tf.feature_column.numeric_column(j)
        feature_columns.append(a)

    # Convert the list of feature columns into a layer that will later be fed into
    # the model.
    feature_layer = layers.DenseFeatures(feature_columns)

    # The following variables are the hyperparameters.
    learning_rate = 0.00045
    epochs = 70

    # Specify the label
    label_name = "Fidelity"

    # Establish the model's topography.
    my_model = create_model(learning_rate, feature_layer)

    # Train the model on the normalized training set. We're passing the entire
    # normalized training set, but the model will only use the features
    # defined by the feature_layer.
    epochs, rmse, history = train_model(my_model, train_df, validation_df,
                                        epochs, label_name, batch_size)
    #plot_the_loss_curve(epochs, mse)

    # After building a model against the training set, test that model
    # against the test set.
    test_features = {name: np.array(value) for name, value in test_df.items()}
    test_label = np.array(test_features.pop(label_name))  # isolate the label

    print("\n Evaluate the new model against the test set:")

    #his = my_model.evaluate(x = test_features, y = test_label, batch_size=1)

    #my_model.save("modelStorage/alpha")

    test_features = test_df.drop(["Fidelity"], axis=1)

    mae_history = GetTestLosses(test_label, test_features, my_model,
                                validation_df)

    return np.mean(mae_history, np.mean(mae_history))
예제 #6
0
    def data_preprocessing(self):
        """
        batch_size = 5  # 예제를 위해 작은 배치 크기를 사용합니다.
        train_ds = self.df_to_dataset(self.train, batch_size=batch_size)
        val_ds = self.df_to_dataset(self.val, shuffle=False, batch_size=batch_size)
        test_ds = self.df_to_dataset(self.test, shuffle=False, batch_size=batch_size)

        for feature_batch, label_batch in train_ds.take(1):
            print('전체 특성:', list(feature_batch.keys()))
            print('나이 특성의 배치:', feature_batch['age'])
            print('타깃의 배치:', label_batch)

        # 특성 열을 시험해 보기 위해 샘플 배치를 만듭니다.
        self.example_batch = next(iter(train_ds))[0]

        age = feature_column.numeric_column("age")
        self.demo(age)
        """
        feature_columns = []

        # 수치형 열
        for header in [
                'age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'slope', 'ca'
        ]:
            feature_columns.append(feature_column.numeric_column(header))

        # 버킷형 열
        age = feature_column.numeric_column("age")
        age_buckets = feature_column.bucketized_column(
            age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
        feature_columns.append(age_buckets)

        # 범주형 열
        thal = feature_column.categorical_column_with_vocabulary_list(
            'thal', ['fixed', 'normal', 'reversible'])
        thal_one_hot = feature_column.indicator_column(thal)
        feature_columns.append(thal_one_hot)

        # 임베딩 열
        thal_embedding = feature_column.embedding_column(thal, dimension=8)
        feature_columns.append(thal_embedding)

        # 교차 특성 열
        crossed_feature = feature_column.crossed_column([age_buckets, thal],
                                                        hash_bucket_size=1000)
        crossed_feature = feature_column.indicator_column(crossed_feature)
        feature_columns.append(crossed_feature)

        self.feature_layer = layers.DenseFeatures(feature_columns)

        batch_size = 32
        self.train_ds = self.df_to_dataset(self.train, batch_size=batch_size)
        self.val_ds = self.df_to_dataset(self.val,
                                         shuffle=False,
                                         batch_size=batch_size)
        self.test_ds = self.df_to_dataset(self.test,
                                          shuffle=False,
                                          batch_size=batch_size)
def create_feature_layer(feature_columns):
    # Convert the list of feature columns into a layer that will later be fed into
    # the model.
    feature_layer = layers.DenseFeatures(feature_columns)

    # Print the first 3 and last 3 rows of the feature_layer's output when applied
    # to train_df_norm:
    #print(feature_layer(dict(train_df_norm)))

    return feature_layer
예제 #8
0
        def _get_dense_feature(inputs, feature, shape=(1, )):
            """
            Convert an input into a dense numeric feature

            NOTE: Can remove this in the future and
                  pass inputs[feature] directly
            """
            feature_col = feature_column.numeric_column(feature, shape=shape)
            dense_feature = layers.DenseFeatures(feature_col)(inputs)
            return dense_feature
예제 #9
0
    def __init__(self, feature_cols):
        super(KerasModel, self).__init__()
        self.feature_layer = layers.DenseFeatures(feature_cols)
        self.dense_1 = layers.Dense(128, activation='relu')
        self.dense_2 = layers.Dense(128, activation='relu')
        self.dropout = layers.Dropout(.1)
        self.final = layers.Dense(1)

        self.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-3),
                     loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
                     metrics=['accuracy'])
예제 #10
0
def create_crossed_feature(feature_columns):
    latitude = feature_columns[0]
    longitude = feature_columns[1]

    # Create a feature cross of latitude and longitude.
    latitude_x_longitude = tf.feature_column.crossed_column([latitude, longitude], hash_bucket_size=100)
    crossed_feature = tf.feature_column.indicator_column(latitude_x_longitude)
    feature_columns.append(crossed_feature)

    # Convert the list of feature columns into a layer that will later be fed into
    # the model.
    feature_cross_feature_layer = layers.DenseFeatures(feature_columns)

    return feature_cross_feature_layer
예제 #11
0
def categorical_embedding_with_indices(feature_tensor, feature_info, file_io: FileIO):
    """
    Converts input integer tensor into categorical embedding.
    Works by converting the categorical indices in the input feature_tensor,
    represented as integer values, into categorical embeddings based on the feature_info.

    Parameters
    ----------
    feature_tensor : Tensor object
        int feature tensor
    feature_info : dict
        Dictionary representing the configuration parameters for the specific feature from the FeatureConfig
    file_io : FileIO object
        FileIO handler object for reading and writing

    Returns
    -------
    Tensor object
        categorical embedding for the input feature_tensor

    Notes
    -----
    Args under feature_layer_info:
        num_buckets : int
            Maximum number of categorical values
        default_value : int
            default value to be assigned to indices out of the num_buckets range
        embedding_size : int
            dimension size of the categorical embedding

    String based categorical features should already be converted into numeric indices
    """
    feature_layer_info = feature_info.get("feature_layer_info")

    categorical_fc = feature_column.categorical_column_with_identity(
        CATEGORICAL_VARIABLE,
        num_buckets=feature_layer_info["args"]["num_buckets"],
        default_value=feature_layer_info["args"].get("default_value", None),
    )
    embedding_fc = feature_column.embedding_column(
        categorical_fc, dimension=feature_layer_info["args"]["embedding_size"], trainable=True
    )

    embedding = layers.DenseFeatures(
        embedding_fc,
        name="{}_embedding".format(feature_info.get("node_name", feature_info["name"])),
    )({CATEGORICAL_VARIABLE: feature_tensor})
    embedding = tf.expand_dims(embedding, axis=1)

    return embedding
예제 #12
0
파일: dssm.py 프로젝트: shboy/game_contest
def build_tower(features, group, model, params, training):
    #     group_features = {}
    #     for feature_name, feature in params["feature_table"].items():
    #         if group == feature.group:
    #             group_features[feature_name] = features[feature_name]

    columns = params[
        "user_context_columns"] if group == "USER_CONTEXT" else params[
            "item_columns"]
    feature_layer = layers.DenseFeatures(columns)
    emb_input = feature_layer(features)
    logging.info("%s input dim: %d", group, emb_input.shape[-1])
    output = model(emb_input, training)
    return output
def _create_model_for_dict_mapping():
    model = tf.keras.Sequential()
    model.add(
        layers.DenseFeatures([
            tf.feature_column.numeric_column("a"),
            tf.feature_column.numeric_column("b"),
            tf.feature_column.numeric_column("c"),
            tf.feature_column.numeric_column("d"),
        ]))
    model.add(layers.Dense(16, activation="relu", input_shape=(4, )))
    model.add(layers.Dense(3, activation="softmax"))

    model.compile(optimizer=tf.keras.optimizers.Adam(),
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    return model
예제 #14
0
def create_model(_feature_columns):
    _feature_layer = layers.DenseFeatures(_feature_columns)

    _model = tf.keras.models.Sequential([
        _feature_layer,
        layers.Dense(100, activation='relu'),
        layers.Dense(100, activation='relu'),
        layers.Dense(40, activation='softmax')
    ])

    _model.compile(
        optimizer='adam',
        loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
        metrics=[tf.metrics.CategoricalCrossentropy()])

    return _model
예제 #15
0
    def __init__(self, features):
        """Constructor for classifier.

    Args:
        features: a list of tf.feature_columns

    Returns:
        None
    """
        super(FairClassifier, self).__init__()
        self.d1 = layers.Dense(128, activation='relu', name='dense_1')
        self.d2 = layers.Dense(64, activation='relu')
        self.d3 = layers.Dense(32, activation='relu')
        self.dropout = layers.Dropout(0.2)
        self.o = layers.Dense(1, activation='sigmoid')
        self.feature_layer = layers.DenseFeatures(features)
예제 #16
0
def build_dnn_model():

    NUM_COLS = [
        'pickuplon',
        'pickuplat',
        'dropofflon',
        'dropofflat',
    ]

    CAT_COLS = [
        'hourofday',
        'dayofweek',
    ]

    inputs = {
        colname: layers.Input(name=colname, shape=(), dtype='float32')
        for colname in NUM_COLS
    }
    inputs.update({
        colname: layers.Input(name=colname, shape=(), dtype='int32')
        for colname in CAT_COLS
    })

    # transforms
    transformed, feature_columns = transform(inputs,
                                             num_cols=NUM_COLS,
                                             cat_cols=CAT_COLS)

    dnn_inputs = layers.DenseFeatures(feature_columns.values())(transformed)

    # two hidden layers of [32, 8] just in like the BQML DNN
    h1 = layers.Dense(32, activation='relu', name='h1')(dnn_inputs)
    h2 = layers.Dense(8, activation='relu', name='h2')(h1)

    # final output is a linear activation because this is regression
    output = layers.Dense(1, activation='linear', name='fare')(h2)
    model = models.Model(inputs, output)

    # Compile model
    model.compile(optimizer='adam',
                  loss='mse',
                  metrics=['RootMeanSquaredError'])
    return model
예제 #17
0
def create_bucket_features(train_df, resolution_in_degrees):

    '''
    Each bin represents all the neighborhoods within a single degree.
    For example, neighborhoods at latitude 35.4 and 35.8 are in the same bucket,
    but neighborhoods in latitude 35.4 and 36.2 are in different buckets.

    The model will learn a separate weight for each bucket.
    For example, the model will learn one weight for all the neighborhoods in the "35" bin",
    a different weight for neighborhoods in the "36" bin, and so on.
    This representation will create approximately 20 buckets:

        10 buckets for latitude.
        10 buckets for longitude.
    '''

    # Create a new empty list that will eventually hold the generated feature column.
    feature_columns = []

    # Create a bucket feature column for latitude.
    latitude_as_a_numeric_column = tf.feature_column.numeric_column("latitude")
    latitude_boundaries = list(np.arange(int(min(train_df['latitude'])),
                                         int(max(train_df['latitude'])),
                                         resolution_in_degrees))
    latitude = tf.feature_column.bucketized_column(latitude_as_a_numeric_column,
                                                   latitude_boundaries)
    feature_columns.append(latitude)

    # Create a bucket feature column for longitude.
    longitude_as_a_numeric_column = tf.feature_column.numeric_column("longitude")
    longitude_boundaries = list(np.arange(int(min(train_df['longitude'])),
                                          int(max(train_df['longitude'])),
                                          resolution_in_degrees))
    longitude = tf.feature_column.bucketized_column(longitude_as_a_numeric_column,
                                                    longitude_boundaries)
    feature_columns.append(longitude)

    # Convert the list of feature columns into a layer that will ultimately become
    # part of the model. Understanding layers is not important right now.
    buckets_feature_layer = layers.DenseFeatures(feature_columns)

    return feature_columns, buckets_feature_layer
예제 #18
0
def create_model(my_learning_rate,
                 feature_column_names,
                 neural_network_structure,
                 dropout_rate=0.2):
    feature_columns = []
    for feature_column_name in feature_column_names:
        feature = tf.feature_column.numeric_column(feature_column_name)
        feature_columns.append(feature)

    feature_layer = layers.DenseFeatures(feature_columns)

    model = tf.keras.models.Sequential()
    model.add(feature_layer)
    model.add(layers.BatchNormalization())

    index = 0
    for number_of_nodes in neural_network_structure:
        index = index + 1
        if index == 2 and dropout_rate > 0:
            print('adding dropout layer', dropout_rate)
            model.add(tf.keras.layers.Dropout(rate=dropout_rate))
        layer_name = 'Hidden' + str(index)
        print('adding hidden layer', layer_name, 'with', number_of_nodes,
              'nodes')
        model.add(
            tf.keras.layers.Dense(
                units=number_of_nodes,
                activation='relu',
                kernel_regularizer=tf.keras.regularizers.l2(l=0.01),
                name=layer_name))

    model.add(tf.keras.layers.Dense(units=1, name='Output'))

    model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=my_learning_rate),
                  loss="mean_squared_error",
                  metrics=[tf.keras.metrics.RootMeanSquaredError()])

    return model
def do_research(run_params):

    output_file_path = 'C:\\Users\\sliwk\\Downloads\\Dane\\Output\\' + run_params.hour_resolution + 'h out.csv'
    file = open(output_file_path, "a")
    file.close()

    train_df, train_df_norm, test_df, test_df_norm = data_prep.prepare_data(
        run_params.training_years, run_params.testing_years,
        run_params.hour_resolution, True)

    feature_columns = data_prep.add_feature_columns(train_df)

    feature_layer = layers.DenseFeatures(feature_columns)

    my_model = get_trained_model(train_df, test_df, feature_layer,
                                 run_params.predicted_label, run_params)

    research_df = data_prep.prepare_research_data(test_df)

    counter = 0

    for record in research_df.index:
        if counter < 2:
            counter = counter + 1
            continue
        data_prep.add_past_one_record(counter, research_df,
                                      run_params.hour_resolution)
        lol = research_df.loc[counter:counter]
        test_features = {name: np.array(value) for name, value in lol.items()}
        prediction = my_model.predict(test_features)
        value = prediction.item(0)
        research_df.loc[counter, run_params.predicted_label] = value
        counter = counter + 1

    research_df.sort_values("index", inplace=True)

    return research_df
예제 #20
0
def model(train, val, test, columns):
    "Building, compiling, fitting, and evaluating model"
    feature_columns = []
    for header in columns[0:len(columns) - 1]:
        feature_columns.append(feature_column.numeric_column(header))
    feature_layer = layers.DenseFeatures(feature_columns)

    model = tf.keras.Sequential([
        feature_layer,
        layers.Dense(64, activation='relu'),
        layers.Dropout(0.5),
        layers.Dense(64, activation='relu'),
        layers.Dropout(0.5),
        layers.Dense(1, activation='sigmoid')
    ])

    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    model.fit(train, validation_data=val, epochs=7)

    loss, accuracy = model.evaluate(test)
    print("Accuracy", accuracy)
예제 #21
0
 def __init__(self,
              cross_layer_num,
              deep_layer_num,
              deep_layer_dim,
              batch_size,
              feature_columns,
              embed_dim,
              num_classes=10,
              **kwargs):
     super(CrossDeep, self).__init__(name='CrossDeep')
     self.cross_layer_num = cross_layer_num
     self.dense_feature = layers.DenseFeatures(feature_columns)
     self.deep_layer_num = deep_layer_num
     self.batch_size = batch_size
     self.num_classes = num_classes
     self.deep_layer_dim = deep_layer_dim
     self.embed_dim = embed_dim
     self.inputs_shape = 5 * self.embed_dim + 1
     self.W = []
     self.bias = []
     self.dense_list = []
     #         self.dense = layers.Dense(self.num_classes,activation='sigmoid')
     self.softmax = layers.Softmax(input_shape=(449, ))
     for i in range(self.deep_layer_num):
         self.dense_list.append(
             layers.Dense(self.deep_layer_dim, activation='relu'))
     b_init = tf.zeros_initializer()
     w_init = tf.random_normal_initializer()
     for i in range(self.cross_layer_num):
         self.W.append(
             tf.Variable(initial_value=w_init(shape=(self.batch_size,
                                                     self.inputs_shape)),
                         trainable=True))
         self.bias.append(
             tf.Variable(initial_value=b_init(shape=(self.batch_size, 1)),
                         trainable=True))
예제 #22
0
def categorical_indicator_with_vocabulary_file(feature_tensor, feature_info,
                                               file_io: FileIO):
    """
    Converts a string tensor into a categorical one-hot representation.
    Works by using a vocabulary file to convert the string tensor into categorical indices
    and then converting the categories into one-hot representation.

    Args:
        feature_tensor: String feature tensor
        feature_info: Dictionary representing the configuration parameters for the specific feature from the FeatureConfig

    Returns:
        Categorical one-hot representation of input feature_tensor

    Args under feature_layer_info:
        vocabulary_file: string; path to vocabulary CSV file for the input tensor containing the vocabulary to look-up.
                        uses the "key" named column as vocabulary of the 1st column if no "key" column present.
        max_length: int; max number of rows to consider from the vocabulary file.
                        if null, considers the entire file vocabulary.
        num_oov_buckets: int - optional; number of out of vocabulary buckets/slots to be used to
                         encode strings into categorical indices. If not specified, the default is 1.

    NOTE:
    The vocabulary CSV file must contain two columns - key, id,
    where the key is mapped to one id thereby resulting in a
    many-to-one vocabulary mapping.
    If id field is absent, a unique whole number id is assigned by default
    resulting in a one-to-one mapping
    """
    #
    ##########################################################################
    #
    # NOTE:
    # Current bug[1] with saving a Keras model when using
    # feature_column.categorical_column_with_vocabulary_list.
    # Tracking the issue currently and should be able to upgrade
    # to current latest stable release 2.2.0 to test.
    #
    # Can not use TF2.1.0 due to issue[2] regarding saving Keras models with
    # custom loss, metric layers
    #
    # Can not use TF2.2.0 due to issues[3, 4] regarding incompatibility of
    # Keras Functional API models and Tensorflow
    #
    # References:
    # [1] https://github.com/tensorflow/tensorflow/issues/31686
    # [2] https://github.com/tensorflow/tensorflow/issues/36954
    # [3] https://github.com/tensorflow/probability/issues/519
    # [4] https://github.com/tensorflow/tensorflow/issues/35138
    #
    # CATEGORICAL_VARIABLE = "categorical_variable"
    # categorical_fc = feature_column.categorical_column_with_vocabulary_list(
    #     CATEGORICAL_VARIABLE,
    #     vocabulary_list=vocabulary_list,
    #     default_value=feature_layer_info["args"].get("default_value", -1),
    #     num_oov_buckets=feature_layer_info["args"].get("num_oov_buckets", 0),
    # )
    #
    # indicator_fc = feature_column.indicator_column(categorical_fc)
    #
    # categorical_one_hot = layers.DenseFeatures(
    #     indicator_fc,
    #     name="{}_one_hot".format(feature_info.get("node_name", feature_info["name"])),
    # )({CATEGORICAL_VARIABLE: feature_tensor})
    # categorical_one_hot = tf.expand_dims(categorical_one_hot, axis=1)
    #
    ##########################################################################
    #
    feature_tensor_indices, vocabulary_keys, num_oov_buckets = categorical_indices_from_vocabulary_file(
        feature_info, feature_tensor, file_io)

    vocabulary_size = len(set(vocabulary_keys))

    categorical_identity_fc = feature_column.categorical_column_with_identity(
        CATEGORICAL_VARIABLE, num_buckets=vocabulary_size + num_oov_buckets)
    indicator_fc = feature_column.indicator_column(categorical_identity_fc)

    categorical_one_hot = layers.DenseFeatures(
        indicator_fc,
        name="{}_one_hot".format(
            feature_info.get("node_name", feature_info["name"])),
    )({
        CATEGORICAL_VARIABLE: feature_tensor_indices
    })
    categorical_one_hot = tf.expand_dims(categorical_one_hot, axis=1)

    return categorical_one_hot
예제 #23
0
 def transform_output(self, featureColumn):
     feature_layer = layers.DenseFeatures(featureColumn)
     example = next(iter(self.exampleData))[0]
     log(feature_layer(example).numpy())
예제 #24
0
    input_fn=lambda: input_fun(test_data, label_column, training=False))

print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

#建立一個keras model 來比較運算時間
feature_column_list = []
for feature in feature_column:
    feature_column_list.append(tf.feature_column.numeric_column(key=feature))

#tf.data.experimental.make_csv_dataset 的輸出分為兩部分
#第一個為以各個輸入column name為 key的字典
#第二個是包含數筆輸入資料類別的 eager_tensor
#需要用DenseDeature 來取的資料
#
model = Sequential([
    layers.DenseFeatures(feature_column_list),
    layers.Dense(5, 'relu'),
    layers.Dense(3, 'relu'),
    layers.Dense(3, 'softmax')
])

input_layer_dict = {}
for feature in feature_column:
    input_layer_dict[feature] = Input(shape=(1, ), name=feature)

input_layer = layers.DenseFeatures(feature_column_list)(input_layer_dict)
layer = layers.Dense(5, 'relu')(input_layer)
layer = layers.Dense(3, 'relu')(layer)
output_layer = layers.Dense(3, 'softmax')(layer)

model = Model(input_layer_dict, output_layer)
예제 #25
0
# Create a feature cross of latitude and longitude.
latitude_x_longitude = tf.feature_column.crossed_column([latitude, longitude],
                                                        hash_bucket_size=200)
crossed_feature = tf.feature_column.indicator_column(latitude_x_longitude)
feature_columns.append(crossed_feature)

#Turn rest of useful columns into features and append to feature_columns
day_diff_numeric_column = tf.feature_column.numeric_column("DAY_DIFF")
feature_columns.append(day_diff_numeric_column)

fire_size_numeric_column = tf.feature_column.numeric_column("FIRE_SIZE")
feature_columns.append(fire_size_numeric_column)

# Convert the list of feature columns into a layer that will later be fed into
# the model.
features_layer = layers.DenseFeatures(feature_columns)


def create_model(my_learning_rate, my_feature_layer):
    """Create and compile a deep neural net."""
    model = tf.keras.models.Sequential()
    #    model.add(my_feature_layer)
    model.add(
        tf.keras.layers.Dense(units=100, activation='relu', input_shape=(4, )))
    model.add(tf.keras.layers.Dropout(rate=0.2))
    model.add(tf.keras.layers.Dense(units=50, activation='relu'))
    model.add(tf.keras.layers.Dropout(rate=0.1))
    model.add(tf.keras.layers.Dense(units=13, activation='softmax'))

    model.compile(loss=loss_function,
                  optimizer=optimizer,
예제 #26
0
def demo_numeric(feature_column):
    global a
    feature_layer = layers.DenseFeatures(feature_column)
    print(feature_layer(a).numpy())
예제 #27
0
def demo_emb(feature_column):
    global c
    feature_layer = layers.DenseFeatures(feature_column)
    print(feature_layer(c).numpy())
예제 #28
0
longitude_boundaries = list(
    np.arange(int(min(train_df['longitude'])), int(max(train_df['longitude'])),
              resolution_in_degrees))
longitude = tf.feature_column.bucketized_column(longitude_as_a_numeric_column,
                                                longitude_boundaries)
feature_columns.append(longitude)

# Create a feature cross of latitude and longitude.
latitude_x_longitude = tf.feature_column.crossed_column([latitude, longitude],
                                                        hash_bucket_size=100)
crossed_feature = tf.feature_column.indicator_column(latitude_x_longitude)
feature_columns.append(crossed_feature)

# Convert the list of feature columns into a layer that will ultimately become
# part of the model. Understanding layers is not important right now.
feature_cross_feature_layer = layers.DenseFeatures(feature_columns)

# The following variables are the hyperparameters.
learning_rate = 0.04
epochs = 35
batch_size = 100
label_name = 'median_house_value'

# Build the model, this time passing in the feature_cross_feature_layer.
my_model = create_model(learning_rate, feature_cross_feature_layer)

# Train the model on the training set.
epochs, rmse = train_model(my_model, train_df, epochs, batch_size, label_name)

plot_the_loss_curve(epochs, rmse)
예제 #29
0
def demo(feature_column):
    feature_layer = layers.DenseFeatures(feature_column)
    print(feature_layer(example_batch).numpy())
예제 #30
0
def demo_cat(feature_column):
    global c
    feature_layer = layers.DenseFeatures(
        [tf.feature_column.indicator_column(feature_column)])
    print(feature_layer(c).numpy())