def train_model(self, *, caller_file: str):
        """
        Perform model training on data from input_file.

        Pass __file__ variable of the caller script as caller_file
        parameter. It will be used as output file prefix.
        """

        # Make numpy printouts easier to read
        np.set_printoptions(precision=3, suppress=True)

        # Set random seed for both Python and TF
        # to make the results reproducible
        seed = 0
        np.random.RandomState(seed)
        tf.random.set_seed(seed)

        # Input file has the same name as the caller script
        # and csv extension, unless specified otherwise.
        if self.input_file is None:
            input_file = f"{FileUtil.get_caller_name(caller_file=caller_file)}.csv"
        else:
            input_file = self.input_file

        # Read the dataset
        self.__input_dataset = pd.read_csv(input_file)

        # Skip the specified number of samples
        if self.skip_samples is not None:
            self.__input_dataset = self.__input_dataset.tail(self.skip_samples)

        # Then take the specified number of samples
        if self.take_samples is not None:
            self.__input_dataset = self.__input_dataset.head(self.take_samples)

        # Convert LOCATION column to one-hot encoding to avoid
        # model bias due to the currency order in sample.
        self.__train_dataset = self.__input_dataset.copy()

        # Split features from labels

        # Remove target series from train dataset and save it to a separate variable
        target_series = self.__train_dataset.pop(self.__lag_short_rate_feature)

        # Create a normalizer layer and adapt it to data
        short_rate = np.array(self.__train_dataset[self.__short_rate_feature])
        normalizer = preprocessing.Normalization(input_shape=[
            1,
        ])
        normalizer.adapt(short_rate)

        # Create DNN model (not yet deep in this example)
        self.__model = keras.Sequential([
            normalizer,
            layers.Dense(64, activation='sigmoid'),
            layers.Dense(1)
        ])

        # Compile the model
        self.__model.compile(loss='mean_squared_error',
                             optimizer=tf.keras.optimizers.Adam(
                                 self.learning_rate))

        # Print model summary
        print(self.__model.summary())

        # Perform training and save training history in a variable
        # Fit is performed by using validation_split fraction of the data
        # to train and the remaining data to minimize.
        training_history = self.__model.fit(
            self.__train_dataset[self.__short_rate_feature],
            target_series,
            validation_split=0.5,
            verbose=0,
            epochs=100)
コード例 #2
0
 def build(self, hp, inputs=None):
     input_node = nest.flatten(inputs)[0]
     return preprocessing.Normalization(axis=self.axis)(input_node)
コード例 #3
0
# The below code is the equivalent of...
# horsepower_model.compile(
#     optimizer=DummyOptimizer(),
#     loss='mean_absolute_error')

# history = horsepower_model.fit(
#     train_features['Horsepower'][:10], train_labels[:10],
#     epochs=100, # Number of epochs really doesn't matter
#     # suppress logging
#     verbose=0)

# Which yields a mean absolute error of 26.834576

# See horsepower.py for construction of this code
horsepower_normalizer = preprocessing.Normalization()
horsepower_normalizer.adapt(horsepower)
kernel = initializer(shape=(1, 1))
input_value = horsepower[:10]
input_value = tf.reshape(input_value, [10, 1])
input_value = tf.cast(input_value, tf.float32)
normalized_value = horsepower_normalizer(horsepower[:10])
matrix_product = tf.tensordot(normalized_value, kernel, 1)
# tf.print(matrix_product)

horsepower_matrix = np.array(matrix_product)
# print(array)

# def calculate_mae(dataset1, dataset2, weight = 1):
#   mae_results = []
コード例 #4
0
#     if i == 0:
#         a = tf.zeros(result.shape)
#         train_data = tf.raw_ops.Add(x=a, y=result)
#     else:
#         train_data = tf.concat([train_data, result], axis=0)

#     print("\r{}th file is done...".format(i+1), end='')

result = np.expand_dims(train_data, -1)
# result = tf.raw_ops.ExpandDims(train_data, -1)

print(result.shape)

x = preprocessing.Resizing(32, 32)(result)
print(x.shape)
x = preprocessing.Normalization()(x)
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.Conv2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D()(x)
x = layers.Dropout(0.25)(x)
x = layers.Flatten()(x)
x = layers.Dense(128, activation='relu')(x)
x = layers.Dropout(0.5)(x)
answer = layers.Dense(7, activation='softmax')(x)

model = keras.Model(inputs=input_sigss, outputs=answer)

model.summary()

keras.utils.plot_model(model,
                       "proto_model_with_shape_info.png",
コード例 #5
0
import numpy as np

# Make numpy values easier to read.
np.set_printoptions(precision=3, suppress=True)

import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing

titanic = pd.read_csv(
    "https://storage.googleapis.com/tf-datasets/titanic/train.csv")
titanic.head()

titanic_features = titanic.copy()
titanic_labels = titanic_features.pop('survived')
normalize = preprocessing.Normalization()

# Create a symbolic input
input = tf.keras.Input(shape=(), dtype=tf.float32)

# Do a calculation using is
result = 2 * input + 1

# the result doesn't have a value
result

calc = tf.keras.Model(inputs=input, outputs=result)

print(calc(1).numpy())
print(calc(2).numpy())
コード例 #6
0
def get_normalization_layer(name, dataset):
    normalizer = preprocessing.Normalization()
    feature_ds = dataset.map(lambda x, y: x[name])
    normalizer.adapt(feature_ds)
    return normalizer
コード例 #7
0
test_dataset = dataset.drop(train_dataset.index)

# Diagnostics Plot
#sns.pairplot(train_dataset[['estimated_power', 'actual_position', 'position_error', 'output_force']], diag_kind='kde')

print("\n\nTransposing Dataset - Overall Statistics:")
print(train_dataset.describe().transpose())

train_features = train_dataset.copy()
test_features = test_dataset.copy()

train_labels = train_features.pop('weight')
test_labels = test_features.pop('weight')

# Normalize the data - Sample of how it looks
normalizer = preprocessing.Normalization()
normalizer.adapt(np.array(train_features))

# Print demo for normalized values
#first = np.array(train_features[:1])
#with np.printoptions(precision=2, suppress=True):
#  print('\nFirst example:', first)
#  print()
#  print('\nNormalized:', normalizer(first).numpy())

# Normalize Output Force
outputforce = np.array(train_features['output_force'])
outputforce_normalizer = preprocessing.Normalization(input_shape=[
    1,
])
outputforce_normalizer.adapt(outputforce)
コード例 #8
0
test_ds = preprocess_dataset(test_files)

batch_size = 64
train_ds = train_ds.batch(batch_size)
val_ds = val_ds.batch(batch_size)

train_ds = train_ds.cache().prefetch(AUTOTUNE)
val_ds = val_ds.cache().prefetch(AUTOTUNE)


for spectrogram, _ in spectrogram_ds.take(1):
  input_shape = spectrogram.shape
print('Input shape:', input_shape)
num_labels = len(commands)

norm_layer = preprocessing.Normalization()
norm_layer.adapt(spectrogram_ds.map(lambda x, _: x))

model = models.Sequential([
    layers.Input(shape=input_shape),
    #preprocessing.Resizing(32, 32), 
    norm_layer,
    layers.Conv2D(32, 3, activation='relu'),
    layers.Conv2D(64, 3, activation='relu'),
    layers.MaxPooling2D(),
    layers.Dropout(0.25),
    layers.Flatten(),
    layers.Dense(128, activation='relu'),
    layers.Dropout(0.5),
    layers.Dense(num_labels),
])
コード例 #9
0
def baseline():
    showPlot = False
    np.set_printoptions(precision=3, suppress=True)

    mobility_dataframe = pd.read_csv('google_baseline_test.csv',
                                     infer_datetime_format=True,
                                     parse_dates=True)
    cdc_dataframe = pd.read_csv('cdc_baseline_test_movingAvg.csv',
                                infer_datetime_format=True,
                                parse_dates=True)

    full_dataframe = pd.concat([mobility_dataframe, cdc_dataframe], axis=1)

    #sns.pairplot(full_dataframe[['newAndPnew', 'retail_and_recreation_percent_change_from_baseline', 'workplaces_percent_change_from_baseline', 'residential_percent_change_from_baseline']], diag_kind='kde')
    #plt.show()

    bestLinearCorr = 0
    bestLogCorr = 0
    bestLinearOffset = -1
    bestLogOffset = -1
    bestLinearData = 0
    bestLogData = 0

    correlationScores = []
    correlationLogScores = []

    for offset in range(100):
        #Shift CDC data by offset value
        cdc_dataframe_truc = cdc_dataframe.shift(periods=offset, fill_value=0)

        #Build new full data array
        mobility_dataframe_truc = mobility_dataframe.drop(columns=['date'])
        full_dataframe = pd.concat(
            [cdc_dataframe_truc, mobility_dataframe_truc], axis=1)
        full_dataframe['originalCases'] = cdc_dataframe[
            'newAndPnew']  #preserve original case values as additional feature
        full_dataframe_noDate = full_dataframe.drop(
            columns=['submission_date'])
        full_dataframe_noDate = full_dataframe_noDate.loc[(
            full_dataframe_noDate['newAndPnew'] !=
            0)]  #remove rows with zero cases

        #Compute linear and logatrithmic correlations
        linearCorr = full_dataframe_noDate.corr()
        linearCorr = linearCorr.to_numpy()[
            0, 1:]  #Take only correlations between 'cases' and mobility data

        logData = np.log(full_dataframe_noDate + 1 -
                         np.min(full_dataframe_noDate.to_numpy()))
        logCorr = logData.corr()
        logCorr = logCorr.to_numpy()[
            0, 1:]  #Take only correlations between 'cases' and mobility data

        print("Offset:", offset, "Correlation:    ", linearCorr)
        print("           Log Correlation:", logCorr)

        #Save best values
        if np.linalg.norm(linearCorr) > np.linalg.norm(bestLinearCorr):
            bestLinearCorr = linearCorr
            bestLinearOffset = offset
            bestLinearData = full_dataframe_noDate

        if np.linalg.norm(logCorr) > np.linalg.norm(bestLogCorr):
            bestLogCorr = logCorr
            bestLogOffset = offset
            bestLogData = logData

        correlationScores.append(np.linalg.norm(linearCorr))
        correlationLogScores.append(np.linalg.norm(logCorr))

    if showPlot:
        plt.plot(correlationScores)
        plt.xlabel("Cases offset (days)")
        plt.ylabel("Norm of correlation vector")
        plt.title("Linear correlation vs. data offset")
        plt.show()
        plt.plot(correlationLogScores)
        plt.xlabel("Cases offset (days)")
        plt.ylabel("Norm of correlation vector")
        plt.title("Logarithmic correlation vs. data offset")
        plt.show()

        sns.pairplot(bestLinearData[[
            'newAndPnew', 'retail_and_recreation_percent_change_from_baseline',
            'grocery_and_pharmacy_percent_change_from_baseline',
            'parks_percent_change_from_baseline',
            'workplaces_percent_change_from_baseline',
            'residential_percent_change_from_baseline', 'originalCases'
        ]],
                     diag_kind='kde')
        plt.show()

        sns.pairplot(bestLogData[[
            'newAndPnew', 'retail_and_recreation_percent_change_from_baseline',
            'grocery_and_pharmacy_percent_change_from_baseline',
            'parks_percent_change_from_baseline',
            'workplaces_percent_change_from_baseline',
            'residential_percent_change_from_baseline', 'originalCases'
        ]],
                     diag_kind='kde')
        plt.show()

    print("Best Full Correlation:", bestLinearCorr)
    print("Best Full Correlation Norm:", np.linalg.norm(bestLinearCorr))
    print("Best Full Offset:", bestLinearOffset)

    print("Best Log Correlation:", bestLogCorr)
    print("Best Log Correlation Norm:", np.linalg.norm(bestLogCorr))
    print("Best Log Offset:", bestLogOffset)

    #Define models

    normalizer = preprocessing.Normalization()
    caseNormalizer = preprocessing.Normalization()

    linear_model = tf.keras.Sequential([normalizer, layers.Dense(units=1)])

    linear_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.1),
                         loss='mean_absolute_error')

    dnn_model = keras.Sequential([
        normalizer,
        layers.Dense(64, activation='relu'),
        layers.Dense(64, activation='relu'),
        layers.Dense(1)
    ])

    dnn_model.compile(loss='mean_absolute_error',
                      optimizer=tf.keras.optimizers.Adam(0.001))

    cases_model = tf.keras.Sequential([
        normalizer,
        layers.Dense(64, activation='relu'),
        layers.Dense(64, activation='relu'),
        layers.Dense(1)
    ])

    cases_model.compile(loss='mean_absolute_error',
                        optimizer=tf.keras.optimizers.Adam(0.001))

    linearMSE = []
    logMSEAdj = []
    linearDNNMSE = []
    logDNNMSEAdj = []
    linearCasesMSE = []
    logCasesMSE = []

    #Convert data to numpy
    linearCasesOnly = bestLinearData['originalCases'].to_numpy()
    logCasesOnly = np.log(linearCasesOnly)
    bestLinearData = bestLinearData.to_numpy()
    bestLogData = bestLogData.to_numpy()

    stride = 10  #trains a new model every {stride} days
    maxEpoch = 100

    for t in range(
        (min(bestLinearData.shape[0], bestLogData.shape[0]) - 60) // stride):
        print("Training model:", t)
        linearTrainX = bestLinearData[t * stride:t * stride + 30, 1:]
        linearTrainy = bestLinearData[t * stride:t * stride + 30, :1]
        logTrainy2 = np.log(linearTrainy + 1)
        logTrainX = bestLogData[t * stride:t * stride + 30, 1:]
        logTrainy = bestLogData[t * stride:t * stride + 30, :1]
        linearCasesTrainX = linearCasesOnly[t * stride:t * stride + 30]
        logCasesTrainX = logCasesOnly[t * stride:t * stride + 30]

        linearTestX = bestLinearData[t * stride + 30:t * stride + 60, 1:]
        linearTesty = bestLinearData[t * stride + 30:t * stride + 60, :1]
        logTestX = bestLogData[t * stride + 30:t * stride + 60, 1:]
        logTesty = bestLogData[t * stride + 30:t * stride + 60, :1]
        linearCasesTestX = linearCasesOnly[t * stride + 30:t * stride + 60]
        logCasesTestX = logCasesOnly[t * stride + 30:t * stride + 60]

        #fit linear model
        linHistory = linear_model.fit(linearTrainX,
                                      linearTrainy,
                                      epochs=maxEpoch,
                                      verbose=0)

        evaluate = linear_model.evaluate(linearTestX, linearTesty, verbose=0)
        predict = linear_model.predict(linearTestX, verbose=0)
        linearMSE.append(np.abs(predict - linearTesty) / linearTesty)

        reset_weights(linear_model)

        #fit log model
        logHistory = linear_model.fit(logTrainX,
                                      logTrainy,
                                      epochs=maxEpoch,
                                      verbose=0)

        evaluate = linear_model.evaluate(logTestX, logTesty, verbose=0)
        predict = linear_model.predict(logTestX, verbose=0)
        predictAdj = np.exp(predict) - 1 + np.min(
            full_dataframe_noDate.to_numpy(
            ))  #convert from log back to raw case number
        logMSEAdj.append(np.abs(predictAdj - linearTesty) / linearTesty)

        reset_weights(linear_model)

        #fit linear DNN model
        linHistory = dnn_model.fit(linearTrainX,
                                   linearTrainy,
                                   epochs=maxEpoch,
                                   verbose=0)

        evaluate = dnn_model.evaluate(linearTestX, linearTesty, verbose=0)
        predict = dnn_model.predict(linearTestX, verbose=0)
        linearDNNMSE.append(np.abs(predict - linearTesty) / linearTesty)

        reset_weights(dnn_model)

        #fit log DNN model
        logHistory = dnn_model.fit(logTrainX,
                                   logTrainy,
                                   epochs=maxEpoch,
                                   verbose=0)

        evaluate = dnn_model.evaluate(logTestX, logTesty, verbose=0)
        predict = dnn_model.predict(logTestX, verbose=0)
        predictAdj = np.exp(predict) - 1 + np.min(
            full_dataframe_noDate.to_numpy(
            ))  #convert from log back to raw case number
        #print(predictAdj-linearTesty)
        logDNNMSEAdj.append(np.abs(predictAdj - linearTesty) / linearTesty)

        reset_weights(dnn_model)

        #fit linear cases only model
        linHistory = cases_model.fit(linearCasesTrainX,
                                     linearTrainy,
                                     epochs=maxEpoch,
                                     verbose=0)

        evaluate = cases_model.evaluate(linearCasesTestX,
                                        linearTesty,
                                        verbose=0)
        predict = cases_model.predict(linearCasesTestX, verbose=0)
        linearCasesMSE.append(np.abs(predict - linearTesty) / linearTesty)
        if showPlot:
            visualize_cases(cases_model, linearCasesTrainX, linearTrainy,
                            linearCasesTestX, linearTesty)

        reset_weights(cases_model)

        #fit log cases only model
        linHistory = cases_model.fit(logCasesTrainX,
                                     logTrainy2,
                                     epochs=maxEpoch,
                                     verbose=0)

        evaluate = cases_model.evaluate(logCasesTestX, logTesty, verbose=0)
        predict = cases_model.predict(logCasesTestX, verbose=0)
        predictAdj = np.exp(
            predict) - 1  #convert from log back to raw case number
        logCasesMSE.append(np.abs(predictAdj - linearTesty) / linearTesty)
        if showPlot:
            visualize_cases(cases_model, logCasesTrainX, logTrainy2,
                            logCasesTestX, logTesty)

        reset_weights(cases_model)

    plt.plot(np.array(linearMSE).mean(axis=0), label='Linear')
    plt.plot(np.array(logMSEAdj).mean(axis=0), label='Log Adjusted')
    plt.plot(np.array(linearDNNMSE).mean(axis=0), label='Linear DNN')
    plt.plot(np.array(logDNNMSEAdj).mean(axis=0), label='Log DNN Adjusted')
    plt.plot(np.array(linearCasesMSE).mean(axis=0), label='Linear Cases')
    plt.plot(np.array(logCasesMSE).mean(axis=0), label='Log Cases')
    plt.legend(loc="upper left")
    plt.show()
コード例 #10
0
    def build_train(self):
        AUTOTUNE = self.AUTOTUNE
        spectrogram_ds = self.spectrogram_ds
        commands = self.commands

        # repeat the training set preprocessing on the validation and test sets.
        train_ds = spectrogram_ds
        val_ds = self.preprocess_dataset(self.val_files)
        test_ds = self.preprocess_dataset(self.test_files)

        # Batch the training and validation sets for model training.
        batch_size = 64
        train_ds = train_ds.batch(batch_size)
        val_ds = val_ds.batch(batch_size)

        # Add dataset cache() and prefetch() operations to reduce read latency while training the model.
        train_ds = train_ds.cache().prefetch(AUTOTUNE)
        val_ds = val_ds.cache().prefetch(AUTOTUNE)

        self.test_ds = test_ds

        if not os.path.exists("speech.model"):
            for spectrogram, _ in spectrogram_ds.take(1):
                input_shape = spectrogram.shape
            print('Input shape:', input_shape)
            num_labels = len(commands)
            norm_layer = preprocessing.Normalization()
            norm_layer.adapt(spectrogram_ds.map(lambda x, _: x))
            model = models.Sequential([
                layers.Input(shape=input_shape),
                preprocessing.Resizing(32, 32),
                norm_layer,
                layers.Conv2D(32, 3, activation='relu'),
                layers.Conv2D(64, 3, activation='relu'),
                layers.MaxPooling2D(),
                layers.Dropout(0.25),
                layers.Flatten(),
                layers.Dense(128, activation='relu'),
                layers.Dropout(0.5),
                layers.Dense(num_labels),
            ])

            model.summary()
            model.compile(
                optimizer=tf.keras.optimizers.Adam(),
                loss=tf.keras.losses.SparseCategoricalCrossentropy(
                    from_logits=True),
                metrics=['accuracy'],
            )

            EPOCHS = 10
            history = model.fit(
                train_ds,
                validation_data=val_ds,
                epochs=EPOCHS,
                callbacks=tf.keras.callbacks.EarlyStopping(verbose=1,
                                                           patience=2),
            )

            metrics = history.history
            fig4 = plt.figure()
            plt.plot(history.epoch, metrics['loss'], metrics['val_loss'])
            plt.legend(['loss', 'val_loss'])

            model.save('speech.model')
コード例 #11
0
ファイル: main.py プロジェクト: long1710/Song-Extraction
 def normalization(spectrogram_ds):
     norm_layer = preprocessing.Normalization()
     norm_layer.adapt(spectrogram_ds.map(lambda x, _: x))
     return norm_layer
コード例 #12
0
def train_model(directory):
    file_names, accel, gyro, barometer, pedometer, data = read_data(directory)
    accelMeanList = list()
    gyroMeanList = list()
    speedList = list()
    timestampsList = list()
    df = pd.DataFrame(columns=["speed", "accel", "gyro", "time"])
    # df = pd.DataFrame(
    #     {
    #         "speed":[0],
    #         "accel":[0],
    #         "gyro":[0]
    #     }
    # )
    for i in range(len(accel)):
        accelMean = pd.DataFrame()
        accelMean.insert(loc=0, column=0, value=accel[i].iloc[:, 0])
        accelMean.insert(
            loc=1,
            column=1,
            value=np.sqrt(accel[i].iloc[:, 1]**2 + accel[i].iloc[:, 2]**2 +
                          accel[i].iloc[:, 3]**2))
        accelMean = (accelMean.iloc[:, 1].groupby(accelMean.iloc[:, 0]).mean())
        accelMean = (accelMean.reindex(range(accelMean.index.max() + 1)))
        accelMean = (accelMean.fillna(0)).values

        gyroMean = pd.DataFrame()
        gyroMean.insert(loc=0, column=0, value=gyro[i].iloc[:, 0])
        gyroMean.insert(
            loc=1,
            column=1,
            value=np.sqrt(gyro[i].iloc[:, 1]**2 + gyro[i].iloc[:, 2]**2 +
                          gyro[i].iloc[:, 3]**2))
        gyroMean = (gyroMean.iloc[:, 1].groupby(gyroMean.iloc[:, 0]).mean())
        gyroMean = (gyroMean.reindex(range(gyroMean.index.max() + 1)))
        gyroMean = (gyroMean.fillna(0)).values

        steps = pd.DataFrame(pedometer[i].iloc[:, 1].groupby(
            pedometer[i].iloc[:, 0]).sum())
        steps = steps.reindex(range(steps.index.max() + 1))
        steps = (steps.fillna(0))
        stride = (float)(data[i].iloc[0, 0])
        speed = steps.iloc[:, 0].values * stride
        speedList.append(speed)
        # timestamps = range(len(accelMean) + 1)
        timestampsList.append(range(len(accelMean) + 1))

        dfi = pd.DataFrame({
            "speed": speed,
            "accel": accelMean,
            "gyro": gyroMean
        })
        dfi = dfi.iloc[1:]
        dfi["time"] = len(dfi.index)
        df = pd.concat([df, dfi])
    df = df.sample(frac=1).reset_index(drop=True)

    train_size = round((len(df) / 100) * 80)
    train_dataset = df.sample(frac=0.8, random_state=0)
    test_dataset = df.drop(train_dataset.index)
    x_train = train_dataset.copy()
    x_test = test_dataset.copy()
    y_train = x_train.pop("time")
    y_test = x_test.pop("time")
    x_train = np.asarray(x_train).astype('float32')
    x_test = np.asarray(x_test).astype('float32')
    y_train = np.asarray(y_train).astype('float32')
    y_test = np.asarray(y_test).astype('float32')
    # print(train_dataset.describe().transpose()[['mean', 'std']])
    normalizer = preprocessing.Normalization()
    normalizer.adapt(np.array(x_train))
    linear_model = tf.keras.Sequential([normalizer, layers.Dense(units=1)])
    linear_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.1),
                         loss='mean_absolute_error')
    linear_model.fit(
        x_train,
        y_train,
        epochs=100,
        verbose=0,
        validation_data=(x_test, y_test),
    )
    print(x_test[:1])
    print(linear_model.predict(x_test[:1]))
    # tf.saved_model.save(linear_model, "")
    # linear_model.save("mnist.h5")
    converter = tf.compat.v2.lite.TFLiteConverter.from_keras_model(
        linear_model)
    tflite_model = converter.convert()
    open('linear.tflite', 'wb').write(tflite_model)
コード例 #13
0
def main():
    # In memory data
    url = 'https://storage.googleapis.com/download.tensorflow.org/data/abalone_train.csv'
    abalone_train = pd.read_csv(url,
                                names=[
                                    'Length', 'Diamenter', 'Height',
                                    'Whole weight', 'Viscera weight',
                                    'Shell weight', 'Age'
                                ])

    print(abalone_train.head())

    abalone_features = abalone_train.copy()
    abalone_labels = abalone_features.pop('Age')

    abalone_features = np.array(abalone_features)
    print(f'Features: {abalone_features}')

    abalone_model = tf.keras.Sequential([layers.Dense(64), layers.Dense(1)])

    abalone_model.compile(loss=tf.losses.MeanSquaredError(),
                          optimizer=tf.optimizers.Adam())

    # Basic preprocessing
    normalize = preprocessing.Normalization()

    normalize.adapt(abalone_features)

    norm_abalone_model = tf.keras.Sequential(
        [normalize, layers.Dense(64),
         layers.Dense(1)])

    norm_abalone_model.compile(loss=tf.losses.MeanSquaredError(),
                               optimizer=tf.optimizers.Adam())
    norm_abalone_model.fit(abalone_features, abalone_labels, epochs=10)

    # Mixed data types
    url = 'https://storage.googleapis.com/tf-datasets/titanic/train.csv'
    titanic = pd.read_csv(url)
    print(titanic.head())

    titanic_features = titanic.copy()
    titanic_labels = titanic_features.pop('survived')

    # Create a symbolic input
    input = tf.keras.Input(shape=(), dtype=tf.float32)

    # Do a calculation using is
    result = 2 * input + 1

    # The result doesn't have a value
    print(f'Result: {result}')

    calc = tf.keras.Model(inputs=input, outputs=result)

    print(f'calc(1) = {calc(1).numpy()}')
    print(f'calc(2) = {calc(2).numpy()}')

    inputs = {}
    for name, column in titanic_features.items():
        dtype = column.dtype
        if dtype == object:
            dtype = tf.string
        else:
            dtype = tf.float32

        inputs[name] = tf.keras.Input(shape=(1, ), name=name, dtype=dtype)

    inputs

    numeric_inputs = {
        name: input
        for name, input in inputs.items() if input.dtype == tf.float32
    }

    x = layers.Concatenate()(list(numeric_inputs.values()))
    norm = preprocessing.Normalization()
    norm.adapt(np.array(titanic[numeric_inputs.keys()]))
    all_numeric_inputs = norm(x)

    all_numeric_inputs

    preprocessed_inputs = [all_numeric_inputs]

    for name, input in inputs.items():
        if input.dtype == tf.float32:
            continue

        lookup = preprocessing.StringLookup(
            vocabulary=np.unique(titanic_features[name]))
        one_hot = preprocessing.CategoryEncoding(
            max_tokens=lookup.vocab_size())

        x = lookup(input)
        x = one_hot(x)
        preprocessed_inputs.append(x)

    preprocessed_inputs_cat = layers.Concatenate()(preprocessed_inputs)

    titanic_preprocessing = tf.keras.Model(inputs, preprocessed_inputs_cat)

    tf.keras.utils.plot_model(model=titanic_preprocessing,
                              rankdir='LR',
                              dpi=72,
                              show_shapes=True)

    titanic_features_dict = {
        name: np.array(value)
        for name, value in titanic_features.items()
    }

    features_dict = {
        name: values[:1]
        for name, values in titanic_features_dict.items()
    }
    titanic_preprocessing(features_dict)

    titanic_model = get_titanic_model(titanic_preprocessing, inputs)

    titanic_model.fit(x=titanic_features_dict, y=titanic_labels, epochs=10)

    titanic_model.save('test')
    reloaded = tf.keras.models.load_model('test')

    features_dict = {
        name: values[:1]
        for name, values in titanic_features_dict.items()
    }

    before = titanic_model(features_dict)
    after = reloaded(features_dict)
    assert (before - after) < 1e-3
    print(f'Before: {before}')
    print(f'After: {after}')

    # Using tf.data
    # On in memory datasets
    for example in slices(titanic_features_dict):
        for name, value in example.items():
            print(f'{name:19s}: {value}')
        break

    titanic_ds = tf.data.Dataset.from_tensor_slices(
        (titanic_features_dict, titanic_labels))

    titanic_batches = titanic_ds.shuffle(len(titanic_labels)).batch(32)

    titanic_model.fit(titanic_batches, epochs=5)

    # From a single file
    url = 'https://storage.googleapis.com/tf-datasets/titanic/train.csv'
    titanic_file_path = tf.keras.utils.get_file('train.csv', url)

    titanic_csv_ds = tf.data.experimental.make_csv_dataset(
        titanic_file_path,
        batch_size=5,  # Artificiallly small to make examples easier to show.
        label_name='survived',
        num_epochs=1,
        ignore_errors=True,
    )

    for batch, label in titanic_csv_ds.take(1):
        for key, value in batch.items():
            print(f'{key:20s}: value')
        print()
        print(f'{"label":20s}: {label}')

    url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00492/Metro_Interstate_Traffic_Volume.csv.gz'
    traffic_volume_csv_gz = tf.keras.utils.get_file(
        'Metro_Interstate_Traffic_Volume.csv.gz',
        url,
        cache_dir='.',
        cache_subdir='traffic')

    traffic_volume_csv_gz_ds = tf.data.experimental.make_csv_dataset(
        traffic_volume_csv_gz,
        batch_size=256,
        label_name='traffic_volume',
        num_epochs=1,
        compression_type='GZIP')

    for batch, label in traffic_volume_csv_gz_ds.take(1):
        for key, value in batch.items():
            print(f'{key:20s}: {value[:5]}')
        print()
        print(f'{"label":20s}: {label[:5]}')

    #Caching
    start = time.time()
    for i, (batch, label) in enumerate(traffic_volume_csv_gz_ds.repeat(20)):
        if i % 40 == 0:
            print('.', end='')
    print(f'Total time: {time.time() - start:.3f}')

    caching = traffic_volume_csv_gz_ds.cache().shuffle(1000)

    start = time.time()
    for i, (batch, label) in enumerate(caching.shuffle(1000).repeat(20)):
        if i % 40 == 0:
            print('.', end='')
    print(f'Total time: {time.time() - start:.3f}')

    start = time.time()
    snapshot = tf.data.experimental.snapshot('titanic.tfsnap')
    snapshotting = traffic_volume_csv_gz_ds.apply(snapshot).shuffle(1000)

    for i, (batch, label) in enumerate(snapshotting.shuffle(1000).repeat(20)):
        if i % 40 == 0:
            print('.', end='')
    print(f'Total time: {time.time() - start:.3f}')

    # Multiple files
    url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00417/fonts.zip'
    _ = tf.keras.utils.get_file('fonts.zip',
                                url,
                                cache_dir='.',
                                cache_subdir='fonts',
                                extract=True)

    fonts_csvs = sorted(str(p) for p in pathlib.Path('fonts').glob('*.csv'))

    print(f'Fonts: {fonts_csvs[:10]}')
    print(f'Fonts len: {len(fonts_csvs)}')

    fonts_ds = tf.data.experimental.make_csv_dataset(
        file_pattern='fonts/*.csv',
        batch_size=10,
        num_epochs=1,
        num_parallel_reads=20,
        shuffle_buffer_size=10000)

    for features in fonts_ds.take(1):
        for i, (name, value) in enumerate(features.items()):
            if i > 15:
                break
            print(f'{name:20s}: {value}')
    print('...')
    print(f'[total: {len(features)} features]')

    # Optional: Packing fields
    fonts_image_ds = fonts_ds.map(make_images)

    for features in fonts_image_ds.take(1):
        break

    plt.figure(figsize=(6, 6), dpi=120)

    for n in range(9):
        plt.subplot(3, 3, n + 1)
        plt.imshow(features['image'][..., n])
        plt.title(chr(features['m_label'][n]))
        plt.axis('off')

    plt.show()

    # Lower level functions
    # `tf.io.decode_csv`
    text = pathlib.Path(titanic_file_path).read_text()
    lines = text.split('\n')[1:-1]

    all_strings = [str()] * 10
    print(f'{all_strings}')

    features = tf.io.decode_csv(lines, record_defaults=all_strings)

    for f in features:
        print(f'type: {f.dtype.name}, shape: {f.shape}')

    print(f'Sample record: {lines[0]}')

    titanic_types = [
        int(),
        str(),
        float(),
        int(),
        int(),
        float(),
        str(),
        str(),
        str(),
        str()
    ]
    print(f'Data types: {titanic_types}')

    features = tf.io.decode_csv(lines, record_defaults=titanic_types)

    for f in features:
        print(f'type: {f.dtype.name}, shape: {f.shape}')

    # `tf.data.experimental.CsvDataset`
    simple_titanic = tf.data.experimental.CsvDataset(
        titanic_file_path, record_defaults=titanic_types, header=True)

    for example in simple_titanic.take(1):
        print(f'Sample record: {[e.numpy() for e in example]}')

    def decode_titanic_line(line):
        return tf.io.decode_csv(line, titanic_types)

    manual_titanic = (
        # Load the lines of text
        tf.data.TextLineDataset(titanic_file_path)
        # Skip the header row
        .skip(1)
        # Decode the line
        .map(decode_titanic_line))

    for example in manual_titanic.take(1):
        print(f'Sample record: {[e.numpy() for e in example]}')

    # Multiple files
    font_line = pathlib.Path(fonts_csvs[0]).read_text().splitlines()[1]
    print(f'Sample: {font_line}')

    num_font_features = font_line.count(',') + 1
    font_column_types = [str(), str()] + [float()] * (num_font_features - 2)

    print(f'Fonts[0]: {fonts_csvs[0]}')

    simple_font_ds = tf.data.experimental.CsvDataset(
        fonts_csvs, record_defaults=font_column_types, header=True)

    for row in simple_font_ds.take(10):
        print(f'CSV first column: {row[0].numpy()}')

    font_files = tf.data.Dataset.list_files('fonts/*.csv')

    print('Epoch 1:')
    for f in list(font_files)[:5]:
        print(f'    {f.numpy()}')
    print('    ...')
    print()
    print('Epoch 2:')
    for f in list(font_files)[:5]:
        print(f'    {f.numpy()}')
    print('    ...')

    def make_font_csv_ds(path):
        return tf.data.experimental.CsvDataset(
            path, record_defaults=font_column_types, header=True)

    font_rows = font_files.interleave(make_font_csv_ds, cycle_length=3)

    fonts_dict = {'font_name': [], 'character': []}

    for row in font_rows.take(10):
        fonts_dict['font_name'].append(row[0].numpy().decode())
        fonts_dict['character'].append(chr(row[2].numpy()))

    print(pd.DataFrame(fonts_dict))

    # Performance
    BATCH_SIZE = 2048
    font_ds = tf.data.experimental.make_csv_dataset(file_pattern='fonts/*.csv',
                                                    batch_size=BATCH_SIZE,
                                                    num_epochs=1,
                                                    num_parallel_reads=100)

    start = time.time()
    for i, batch in enumerate(font_ds.take(20)):
        print('.', end='')
    print(f'Total time: {time.time() - start:.3f}')
コード例 #14
0
sns.pairplot(train_dataset[[
    'Energia', 'Corrente', 'Voltaggio', 'ActivePower', 'Response', 'Waste'
]],
             diag_kind='kde').savefig("Response.png")

print(train_dataset.describe().transpose())

train_features = train_dataset.copy()
test_features = test_dataset.copy()

print(train_dataset.describe().transpose()[['mean', 'std']])
train_labels = train_features.pop('Corrente')
test_labels = test_features.pop('Corrente')

normalizer = preprocessing.Normalization()
normalizer.adapt(np.array(train_features))

print(normalizer.mean.numpy())

first = np.array(train_features[:1])

with np.printoptions(precision=2, suppress=True):
    print('First example:', first)
    print()
    print('Normalized:', normalizer(first).numpy())

horsepower = np.array(train_features['Waste'])

horsepower_normalizer = preprocessing.Normalization(input_shape=[
    1,
コード例 #15
0
ファイル: Model.py プロジェクト: gsaha009/HHbbWWAnalysis
def NeuralNetModel(x_train, y_train, x_val, y_val, params):
    """
    Keras model for the Neural Network, used to scan the hyperparameter space by Talos
    Uses the data provided as inputs
    """
    # Split y = [target,weight], Talos does not leave room for the weight so had to be included in one of the arrays
    w_train = y_train[:, -1]
    w_val = y_val[:, -1]
    y_train = y_train[:, :-1]
    y_val = y_val[:, :-1]

    x_train_lbn = x_train[:, -len(parameters.LBN_inputs):].reshape(
        -1, 4,
        len(parameters.LBN_inputs) // 4)
    x_train = x_train[:, :-len(parameters.LBN_inputs)]

    x_val_lbn = x_val[:, -len(parameters.LBN_inputs):].reshape(
        -1, 4,
        len(parameters.LBN_inputs) // 4)
    x_val = x_val[:, :-len(parameters.LBN_inputs)]

    # Scaler #
    with open(parameters.scaler_path,
              'rb') as handle:  # Import scaler that was created before
        scaler = pickle.load(handle)

    # Design network #

    # Left branch : classic inputs -> Preprocess -> onehot
    inputs_numeric = []
    means = []
    variances = []
    inputs_all = []
    encoded_all = []
    for idx in range(x_train.shape[1]):
        inpName = parameters.inputs[idx].replace('$', '')
        input_layer = tf.keras.Input(shape=(1, ), name=inpName)
        # Categorical inputs #
        if parameters.mask_op[idx]:
            operation = getattr(Operations, parameters.operations[idx])()
            encoded_all.append(operation(input_layer))
        # Numerical inputs #
        else:
            inputs_numeric.append(input_layer)
            means.append(scaler.mean_[idx])
            variances.append(scaler.var_[idx])
        inputs_all.append(input_layer)

    # Concatenate all numerical inputs #
    if int(tf_version[1]) < 4:
        normalizer = preprocessing.Normalization(name='Normalization')
        x_dummy = np.ones((10, len(means)))
        # Needs a dummy to call the adapt method before setting the weights
        normalizer.adapt(x_dummy)
        normalizer.set_weights([np.array(means), np.array(variances)])
    else:
        normalizer = preprocessing.Normalization(mean=means,
                                                 variance=variances,
                                                 name='Normalization')
    encoded_all.append(
        normalizer(tf.keras.layers.concatenate(inputs_numeric,
                                               name='Numerics')))

    if len(encoded_all) > 1:
        all_features = tf.keras.layers.concatenate(encoded_all,
                                                   axis=-1,
                                                   name="Features")
    else:
        all_features = encoded_all[0]

    # Right branch : LBN
    input_lbn_Layer = Input(shape=x_train_lbn.shape[1:], name='LBN_inputs')
    lbn_layer = LBNLayer(
        x_train_lbn.shape[1:],
        n_particles=max(params['n_particles'],
                        1),  # Hack so that 0 does not trigger error
        boost_mode=LBN.PAIRS,
        features=["E", "px", "py", "pz", "pt", "p", "m", "pair_cos"],
        name='LBN')(input_lbn_Layer)
    batchnorm = tf.keras.layers.BatchNormalization(name='batchnorm')(lbn_layer)

    # Concatenation of left and right #
    concatenate = tf.keras.layers.Concatenate(axis=-1)(
        [all_features, batchnorm])
    L1 = Dense(params['first_neuron'],
               activation=params['activation'],
               kernel_regularizer=l2(params['l2']))(
                   concatenate if params['n_particles'] > 0 else all_features)
    hidden = hidden_layers(params, 1, batch_normalization=True).API(L1)
    out = Dense(y_train.shape[1],
                activation=params['output_activation'],
                name='out')(hidden)

    # Check preprocessing #
    preprocess = Model(inputs=inputs_numeric, outputs=encoded_all[-1])
    x_numeric = x_train[:, [not m for m in parameters.mask_op]]
    out_preprocess = preprocess.predict(np.hsplit(x_numeric,
                                                  x_numeric.shape[1]),
                                        batch_size=params['batch_size'])
    mean_scale = np.mean(out_preprocess)
    std_scale = np.std(out_preprocess)
    if abs(mean_scale) > 0.01 or abs(
        (std_scale - 1) /
            std_scale) > 0.1:  # Check that scaling is correct to 1%
        logging.warning(
            "Something is wrong with the preprocessing layer (mean = %0.6f, std = %0.6f), maybe you loaded an incorrect scaler"
            % (mean_scale, std_scale))

    # Tensorboard logs #
    #path_board = os.path.join(parameters.main_path,"TensorBoard")
    #suffix = 0
    #while(os.path.exists(os.path.join(path_board,"Run_"+str(suffix)))):
    #    suffix += 1
    #path_board = os.path.join(path_board,"Run_"+str(suffix))
    #os.makedirs(path_board)
    #logging.info("TensorBoard log dir is at %s"%path_board)

    # Callbacks #
    # Early stopping to stop learning if val_loss plateau for too long #
    early_stopping = EarlyStopping(**parameters.early_stopping_params)
    # Reduce learnign rate in case of plateau #
    reduceLR = ReduceLROnPlateau(**parameters.reduceLR_params)
    # Custom loss function plot for debugging #
    loss_history = LossHistory()
    # Tensorboard for checking live the loss curve #
    #board = TensorBoard(log_dir=path_board,
    #                    histogram_freq=1,
    #                    batch_size=params['batch_size'],
    #                    write_graph=True,
    #                    write_grads=True,
    #                    write_images=True)
    Callback_list = [loss_history, early_stopping, reduceLR]

    # Compile #
    if 'resume' not in params:  # Normal learning
        # Define model #
        model_inputs = [inputs_all]
        if params['n_particles'] > 0:
            model_inputs.append(input_lbn_Layer)
        model = Model(inputs=model_inputs, outputs=[out])
        initial_epoch = 0
    else:  # a model has to be imported and resumes training
        #custom_objects =  {'PreprocessLayer': PreprocessLayer,'OneHot': OneHot.OneHot}
        logging.info("Loaded model %s" % params['resume'])
        a = Restore(params['resume'],
                    custom_objects=custom_objects,
                    method='h5')
        model = a.model
        initial_epoch = params['initial_epoch']

    model.compile(optimizer=Adam(lr=params['lr']),
                  loss=params['loss_function'],
                  metrics=[
                      tf.keras.metrics.CategoricalAccuracy(),
                      tf.keras.metrics.AUC(multi_label=True),
                      tf.keras.metrics.Precision(),
                      tf.keras.metrics.Recall()
                  ])
    model.summary()
    fit_inputs = np.hsplit(x_train, x_train.shape[1])
    fit_val = (np.hsplit(x_val, x_val.shape[1]), y_val, w_val)
    if params['n_particles'] > 0:
        fit_inputs.append(x_train_lbn)
        fit_val[0].append(x_val_lbn)
    # Fit #
    history = model.fit(x=fit_inputs,
                        y=y_train,
                        sample_weight=w_train,
                        epochs=params['epochs'],
                        batch_size=params['batch_size'],
                        verbose=1,
                        validation_data=fit_val,
                        callbacks=Callback_list)

    # Plot history #
    PlotHistory(loss_history, params)

    return history, model
コード例 #16
0
true_k = 1.3  # slope
true_d = 0.3  # intercept

NUM_EXAMPLES = 100
X = tf.random.normal(shape=(NUM_EXAMPLES, ))
noise = tf.random.normal(shape=(NUM_EXAMPLES, ))
y = X * true_k + true_d + noise

train_x = np.array(X)

# Register TensorBoard tracing callback
tensorboard_callback = keras.callbacks.TensorBoard(log_dir='./logs')

# We use a single-variable linear regression, to predict the y values from a given X values.
reg_normalizer = preprocessing.Normalization(input_shape=[
    1,
])
reg_normalizer.adapt(train_x)

regression_model = tf.keras.Sequential([reg_normalizer, layers.Dense(units=1)])

regression_model.summary()

print(regression_model.predict(X[:10]))

regression_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.1),
                         loss='mean_absolute_error')

history = regression_model.fit(
    X,
    y,
コード例 #17
0
ファイル: Model.py プロジェクト: gsaha009/HHbbWWAnalysis
def NeuralNetGeneratorModel(x_train, y_train, x_val, y_val, params):
    """
    Keras model for the Neural Network, used to scan the hyperparameter space by Talos
    Uses the generator rather than the input data (which are dummies)
    """
    # Scaler #
    with open(parameters.scaler_path,
              'rb') as handle:  # Import scaler that was created before
        scaler = pickle.load(handle)

    # Design network #

    # Left branch : classic inputs -> Preprocess -> onehot
    inputs_numeric = []
    means = []
    variances = []
    inputs_all = []
    encoded_all = []
    for idx in range(x_train.shape[1]):
        inpName = parameters.inputs[idx].replace('$', '').replace(' ',
                                                                  '').replace(
                                                                      '_', '')
        input_layer = tf.keras.Input(shape=(1, ), name=inpName)
        # Categorical inputs #
        if parameters.mask_op[idx]:
            operation = getattr(Operations, parameters.operations[idx])()
            encoded_all.append(operation(input_layer))
        # Numerical inputs #
        else:
            inputs_numeric.append(input_layer)
            means.append(scaler.mean_[idx])
            variances.append(scaler.var_[idx])
        inputs_all.append(input_layer)

    # Concatenate all numerical inputs #
    if int(tf_version[1]) < 4:
        normalizer = preprocessing.Normalization(name='Normalization')
        x_dummy = np.ones((10, len(means)))
        # Needs a dummy to call the adapt method before setting the weights
        normalizer.adapt(x_dummy)
        normalizer.set_weights([np.array(means), np.array(variances)])
    else:
        normalizer = preprocessing.Normalization(mean=means,
                                                 variance=variances,
                                                 name='Normalization')
    encoded_all.append(
        normalizer(tf.keras.layers.concatenate(inputs_numeric,
                                               name='Numerics')))

    if len(encoded_all) > 1:
        all_features = tf.keras.layers.concatenate(encoded_all,
                                                   axis=-1,
                                                   name="Features")
    else:
        all_features = encoded_all[0]

    # Right branch : LBN
    lbn_input_shape = (len(parameters.LBN_inputs) // 4, 4)
    input_lbn_Layer = Input(shape=lbn_input_shape, name='LBN_inputs')
    lbn_layer = LBNLayer(
        lbn_input_shape,
        n_particles=max(params['n_particles'],
                        1),  # Hack so that 0 does not trigger error
        boost_mode=LBN.PAIRS,
        features=["E", "px", "py", "pz", "pt", "p", "m", "pair_cos"],
        name='LBN')(input_lbn_Layer)
    batchnorm = tf.keras.layers.BatchNormalization(name='batchnorm')(lbn_layer)

    # Concatenation of left and right #
    concatenate = tf.keras.layers.Concatenate(axis=-1)(
        [all_features, batchnorm])
    L1 = Dense(params['first_neuron'],
               activation=params['activation'],
               kernel_regularizer=l2(params['l2']))(
                   concatenate if params['n_particles'] > 0 else all_features)
    hidden = hidden_layers(params, 1, batch_normalization=True).API(L1)
    out = Dense(y_train.shape[1],
                activation=params['output_activation'],
                name='out')(hidden)

    # Tensorboard logs #
    #    path_board = os.path.join(parameters.main_path,"TensorBoard")
    #    suffix = 0
    #    while(os.path.exists(os.path.join(path_board,"Run_"+str(suffix)))):
    #        suffix += 1
    #    path_board = os.path.join(path_board,"Run_"+str(suffix))
    #    os.makedirs(path_board)
    #    logging.info("TensorBoard log dir is at %s"%path_board)

    # Callbacks #
    # Early stopping to stop learning if val_loss plateau for too long #
    early_stopping = EarlyStopping(**parameters.early_stopping_params)
    # Reduce learnign rate in case of plateau #
    reduceLR = ReduceLROnPlateau(**parameters.reduceLR_params)
    # Custom loss function plot for debugging #
    loss_history = LossHistory()
    # Tensorboard for checking live the loss curve #
    #    board = TensorBoard(log_dir=path_board,
    #                        histogram_freq=1,
    #                        batch_size=params['batch_size'],
    #                        write_graph=True,
    #                        write_grads=True,
    #                        write_images=True)
    #    Callback_list = [loss_history,early_stopping,reduceLR,board]
    Callback_list = [loss_history, early_stopping, reduceLR]

    # Compile #
    if 'resume' not in params:  # Normal learning
        # Define model #
        model_inputs = [inputs_all]
        if params['n_particles'] > 0:
            model_inputs.append(input_lbn_Layer)
        model = Model(inputs=model_inputs, outputs=[out])
        initial_epoch = 0
    else:  # a model has to be imported and resumes training
        #custom_objects =  {'PreprocessLayer': PreprocessLayer,'OneHot': OneHot.OneHot}
        logging.info("Loaded model %s" % params['resume'])
        a = Restore(params['resume'],
                    custom_objects=custom_objects,
                    method='h5')
        model = a.model
        initial_epoch = params['initial_epoch']

    model.compile(optimizer=Adam(lr=params['lr']),
                  loss=params['loss_function'],
                  metrics=[
                      tf.keras.metrics.CategoricalAccuracy(),
                      tf.keras.metrics.AUC(multi_label=True),
                      tf.keras.metrics.Precision(),
                      tf.keras.metrics.Recall()
                  ])
    model.summary()

    # Generator #
    training_generator = DataGenerator(
        path=parameters.config,
        inputs=parameters.inputs,
        outputs=parameters.outputs,
        inputsLBN=parameters.LBN_inputs if params['n_particles'] > 0 else None,
        cut=parameters.cut,
        weight=parameters.weight,
        batch_size=params['batch_size'],
        state_set='training',
        model_idx=params['model_idx'] if parameters.crossvalidation else None)
    validation_generator = DataGenerator(
        path=parameters.config,
        inputs=parameters.inputs,
        outputs=parameters.outputs,
        inputsLBN=parameters.LBN_inputs if params['n_particles'] > 0 else None,
        cut=parameters.cut,
        weight=parameters.weight,
        batch_size=params['batch_size'],
        state_set='validation',
        model_idx=params['model_idx'] if parameters.crossvalidation else None)

    # Some verbose logging #
    logging.info("Will use %d workers" % parameters.workers)
    logging.warning("Tensorflow location " + tf.__file__)
    if len(tf.config.experimental.list_physical_devices('XLA_GPU')) > 0:
        logging.info("GPU detected")
    #logging.warning(K.tensorflow_backend._get_available_gpus())
    # Fit #
    history = model.fit_generator(
        generator=training_generator,  # Training data from generator instance
        validation_data=
        validation_generator,  # Validation data from generator instance
        epochs=params['epochs'],  # Number of epochs
        verbose=1,
        max_queue_size=parameters.workers * 2,  # Length of batch queue
        callbacks=Callback_list,  # Callbacks
        initial_epoch=
        initial_epoch,  # In case of resumed training will be different from 0
        workers=parameters.
        workers,  # Number of threads for batch generation (0 : all in same)
        shuffle=True,  # Shuffle order at each epoch
        use_multiprocessing=True)  # Needs to be turned on for queuing batches

    # Plot history #
    PlotHistory(loss_history)

    return history, model
コード例 #18
0
                     compression='gzip')
    f.create_dataset('spectr_valid',data=spectr_valid,
                     compression='gzip')
    f.create_dataset('label_valid',data=label_valid,
                     compression='gzip')
    f.create_dataset('spectr_test',data=spectr_test,
                     compression='gzip')
    f.create_dataset('label_test',data=label_test,
                     compression='gzip')
    f.close()
print('file size: %s'%list(os.stat(h5f))[6])

for spectrogram,_ in train_ds.take(1):
  input_shape=spectrogram.shape
num_labels=len(names)
norm_layer=tkp.Normalization()
norm_layer.adapt(train_ds.map(lambda x,_:x))
model=tkm.Sequential([
    tkl.InputLayer(input_shape=input_shape),
    tkp.Resizing(32,32), 
    norm_layer,
    tkl.Conv2D(32,3,activation='relu'),
    tkl.Conv2D(96,3,activation='relu'),
    tkl.MaxPooling2D(),
    tkl.Dropout(.25),
    tkl.Flatten(),
    tkl.Dense(256,activation='relu'),
    tkl.Dropout(.5),
    tkl.Dense(num_labels),
])
model.summary()
コード例 #19
0
def run_whole_thing(out_dir):
    os.makedirs(out_dir, exist_ok=True)

    # Set seed for experiment reproducibility
    seed = 55
    tf.random.set_seed(seed)
    np.random.seed(seed)

    data_dir = pathlib.Path("data/mini_speech_commands")

    if not data_dir.exists():
        # Get the files from external source and put them in an accessible directory
        tf.keras.utils.get_file(
            'mini_speech_commands.zip',
            origin=
            "http://storage.googleapis.com/download.tensorflow.org/data/mini_speech_commands.zip",
            extract=True)

    # Convert the binary audio file to a tensor
    def decode_audio(audio_binary):
        audio, _ = tf.audio.decode_wav(audio_binary)

        return tf.squeeze(audio, axis=-1)

    # Get the label (yes, no, up, down, etc) for an audio file.
    def get_label(file_path):
        parts = tf.strings.split(file_path, os.path.sep)

        # Note: You'll use indexing here instead of tuple unpacking to enable this to work in a TensorFlow graph.
        return parts[-2]

    # Create a tuple that has the labeled audio files
    def get_waveform_and_label(file_path):
        label = get_label(file_path)
        audio_binary = tf.io.read_file(file_path)
        waveform = decode_audio(audio_binary)

        return waveform, label

    # Convert audio files to images
    def get_spectrogram(waveform):
        # Padding for files with less than 16000 samples
        zero_padding = tf.zeros([16000] - tf.shape(waveform), dtype=tf.float32)

        # Concatenate audio with padding so that all audio clips will be of the
        # same length
        waveform = tf.cast(waveform, tf.float32)
        equal_length = tf.concat([waveform, zero_padding], 0)
        spectrogram = tf.signal.stft(equal_length,
                                     frame_length=255,
                                     frame_step=128)

        spectrogram = tf.abs(spectrogram)

        return spectrogram

    # Label the images created from the audio files and return a tuple
    def get_spectrogram_and_label_id(audio, label):
        spectrogram = get_spectrogram(audio)
        spectrogram = tf.expand_dims(spectrogram, -1)
        label_id = tf.argmax(label == commands)
        return spectrogram, label_id

    # Preprocess any audio files
    def preprocess_dataset(files, autotune, commands):
        # Creates the dataset
        files_ds = tf.data.Dataset.from_tensor_slices(files)

        # Matches audio files with correct labels
        output_ds = files_ds.map(get_waveform_and_label,
                                 num_parallel_calls=autotune)

        # Matches audio file images to the correct labels
        output_ds = output_ds.map(get_spectrogram_and_label_id,
                                  num_parallel_calls=autotune)

        return output_ds

    # Get all of the commands for the audio files
    commands = np.array(tf.io.gfile.listdir(str(data_dir)))
    commands = commands[commands != 'README.md']

    # Get a list of all the files in the directory
    filenames = tf.io.gfile.glob(str(data_dir) + '/*/*')

    # Shuffle the file names so that random bunches can be used as the training, testing, and validation sets
    filenames = tf.random.shuffle(filenames)

    # Create the list of files for training data
    train_files = filenames[:6400]
    # Create the list of files for validation data
    validation_files = filenames[6400:6400 + 800]
    # Create the list of files for test data
    test_files = filenames[-800:]

    autotune = tf.data.AUTOTUNE

    # Get the converted audio files for training the model
    files_ds = tf.data.Dataset.from_tensor_slices(train_files)
    waveform_ds = files_ds.map(get_waveform_and_label,
                               num_parallel_calls=autotune)
    spectrogram_ds = waveform_ds.map(get_spectrogram_and_label_id,
                                     num_parallel_calls=autotune)

    # Preprocess the training, test, and validation datasets
    train_ds = preprocess_dataset(train_files, autotune, commands)
    validation_ds = preprocess_dataset(validation_files, autotune, commands)
    test_ds = preprocess_dataset(test_files, autotune, commands)

    # Batch datasets for training and validation
    batch_size = 64
    train_ds = train_ds.batch(batch_size)
    validation_ds = validation_ds.batch(batch_size)

    # Reduce latency while training
    train_ds = train_ds.cache().prefetch(autotune)
    validation_ds = validation_ds.cache().prefetch(autotune)

    # Build model
    for spectrogram, _ in spectrogram_ds.take(1):
        input_shape = spectrogram.shape

    num_labels = len(commands)

    norm_layer = preprocessing.Normalization()
    norm_layer.adapt(spectrogram_ds.map(lambda x, _: x))

    model = models.Sequential([
        layers.Input(shape=input_shape),
        preprocessing.Resizing(32, 32),
        norm_layer,
        layers.Conv2D(32, 3, activation='relu'),
        layers.Conv2D(64, 3, activation='relu'),
        layers.MaxPooling2D(),
        layers.Dropout(0.25),
        layers.Flatten(),
        layers.Dense(128, activation='relu'),
        layers.Dropout(0.5),
        layers.Dense(num_labels),
    ])

    model.summary()

    # Configure built model with losses and metrics
    model.compile(
        optimizer=tf.keras.optimizers.Adam(),
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'],
    )

    # Finally train the model and return info about each epoch
    EPOCHS = 10
    model.fit(
        train_ds,
        validation_data=validation_ds,
        epochs=EPOCHS,
        callbacks=tf.keras.callbacks.EarlyStopping(verbose=1, patience=2),
    )

    # Test the model
    test_audio = []
    test_labels = []

    for audio, label in test_ds:
        test_audio.append(audio.numpy())
        test_labels.append(label.numpy())

    test_audio = np.array(test_audio)
    test_labels = np.array(test_labels)

    # See how accurate the model is when making predictions on the test dataset
    y_pred = np.argmax(model.predict(test_audio), axis=1)
    y_true = test_labels

    test_acc = sum(y_pred == y_true) / len(y_true)

    print(f'Test set accuracy: {test_acc:.0%}')
コード例 #20
0
print(y_train)
"""## Training the model

### Linear regression model
"""

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.layers import InputLayer

print(tf.__version__)
"""Normalisation layer"""

normalizer = preprocessing.Normalization()

normalizer.adapt(np.array(X_train))

print(normalizer.mean.numpy())


def plot_loss(history):
    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.ylim([0, 3500])
    plt.xlabel('Epoch')
    plt.ylabel('Error [Shares]')
    plt.legend()
    plt.grid(True)
コード例 #21
0
import collections
import inspect

import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.python.util import nest

CombinerPreprocessingLayer = inspect.getmro(preprocessing.Normalization)[1]
Combiner = inspect.getmro(preprocessing.Normalization()._combiner.__class__)[1]

INT = 'int'
NONE = 'none'
ONE_HOT = 'one-hot'


class CategoricalEncoding(CombinerPreprocessingLayer):
    """Encode the categorical features to numerical features.

    # Arguments
        encoding: A list of strings, which has the same number of elements as the
            columns in the structured data. Each of the strings specifies the
            encoding method used for the corresponding column. Use 'int' for
            categorical columns and 'none' for numerical columns.
    """

    # TODO: Support one-hot encoding.

    def __init__(self, encoding, **kwargs):
        super().__init__(combiner=CategoricalEncodingCombiner(encoding),