def multi_hot_sequences(sequences, dimension):
    results = np.zeros((len(sequences), dimension))
    for i, word_indices in enumerate(sequences):
        results[i, word_indices] = 1.0
    return results


train_data = multi_hot_sequences(train_data, dimension=NUM_WORDS)
test_data = multi_hot_sequences(test_data, dimension=NUM_WORDS)

plt.plot(train_data[0])
plt.show()

# 降低正则化:减少模型每层的单元数,降低记忆容量;如果容量优先,则难以与训练数据拟合

baseline_model = keras.Sequential()
baseline_model.add(
    keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS, )))
baseline_model.add(keras.layers.Dense(16, activation=tf.nn.relu))
baseline_model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))

baseline_model.compile(optimizer='adam',
                       loss='binary_crossentropy',
                       metrics=['accuracy', 'binary_crossentropy'])
baseline_model.summary()
baseline_history = baseline_model.fit(train_data,
                                      train_labels,
                                      epochs=20,
                                      batch_size=512,
                                      validation_data=(test_data, test_labels),
                                      verbose=2)
import numpy as np
import matplotlib.pyplot as plt

fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()

class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
               'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']

train_images = train_images / 255.0

test_images = test_images / 255.0

model = keras.Sequential([
    keras.layers.Flatten(input_shape=(28, 28)),  # input data
    keras.layers.Dense(128, activation='relu'),  # hidden layer
    keras.layers.Dense(10, activation='softmax') # output data
])
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(train_images, train_labels, epochs=10)  # epochs rodando 10 vezes

#evaluating the model

test_loss, test_acc = model.evaluate(test_images,  test_labels, verbose=1)

print('Test accuracy:', test_acc)

#previsao
Esempio n. 3
0
strip_chars = "!\"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"
strip_chars = strip_chars.replace("<", "")
strip_chars = strip_chars.replace(">", "")

vectorization = TextVectorization(
    max_tokens=VOCAB_SIZE,
    output_mode="int",
    output_sequence_length=SEQ_LENGTH,
    standardize=custom_standardization,
)
vectorization.adapt(text_data)

# Data augmentation for image data
image_augmentation = keras.Sequential([
    layers.RandomFlip("horizontal"),
    layers.RandomRotation(0.2),
    layers.RandomContrast(0.3),
])
"""
## Building a `tf.data.Dataset` pipeline for training

We will generate pairs of images and corresponding captions using a `tf.data.Dataset` object.
The pipeline consists of two steps:

1. Read the image from the disk
2. Tokenize all the five captions corresponding to the image
"""


def decode_and_resize(img_path):
    img = tf.io.read_file(img_path)
Esempio n. 4
0
plt.show()

# Building the neural network requires configuring the layers of the model and building it

# Basic building block of a neural network is a layer
# Layers extract representations from the data fed into them
# Most of deep learning consists of chaining together layers
# Most layers have parameters that are learned during training

# Keras is a high level API to build and train deep learning models
# Can put these together and extend functionality

model = keras.Sequential([
    keras.layers.Flatten(
        input_shape=(28, 28)),  # Flattens from 2d array to on 1d array
    keras.layers.Dense(128, activation=tf.nn.relu
                       ),  # 128 nodes or neuron densely connected neural layer
    keras.layers.Dense(10, activation=tf.nn.softmax)
])
# 10 node or neuron softmax layer. Returns array  of 10 probability

# Input shape has to be defined on the first layer in sequential model

# Compile step below:
# Optimizer: This is how the the model is updated based on data it sees and loss function

# Loss function: measures how accurate the model is during training.
# Goal is to minimize this function to steer the model in right direction

# Metrics: self explanatory. This one measures how many are labeled correctly
model.compile(optimizer='adam',
Esempio n. 5
0
# In this example, we build a simple model to learn to do xor operation.
#%%
import tensorflow as tf
from tensorflow import keras
import numpy as np
from random import uniform

#%%
# Definition of the model.
model = keras.Sequential([
    keras.layers.Dense(4, activation='relu', input_shape=(2, )),
    keras.layers.Dense(4, activation='relu'),
    keras.layers.Dense(1, activation='sigmoid')
])

#%%
model.compile(optimizer='sgd',
              loss='binary_crossentropy',
              metrics=['accuracy'])

#%%
data = []
for i in range(0, 200):
    data.append([uniform(-10, 10), uniform(-10, 10)])

data = np.array(data)


def xor(pair):
    x1, x2 = pair
    if (x1 >= 0 and x2 < 0) or (x1 < 0 and x2 >= 0):
        rotation_range=2, horizontal_flip=True, zoom_range=.1)

    train_generator.fit(x_train)
    test_generator.fit(x_test)

    lrr = K.callbacks.ReduceLROnPlateau(monitor='val_acc',
                                        factor=.01,
                                        patience=3,
                                        min_lr=1e-5)

    base_model_1 = K.applications.VGG19(include_top=False,
                                        weights='imagenet',
                                        input_shape=(32, 32, 3),
                                        classes=y_train.shape[1])

    model_1 = K.Sequential()
    model_1.add(base_model_1)
    model_1.add(K.layers.Flatten())
    model_1.add(K.layers.Dense(1024, activation=('relu'), input_dim=512))
    model_1.add(K.layers.Dense(512, activation=('relu')))
    model_1.add(K.layers.Dense(256, activation=('relu')))
    model_1.add(K.layers.Dropout(.3))
    model_1.add(K.layers.Dense(128, activation=('relu')))
    model_1.add(K.layers.Dropout(.2))
    model_1.add(K.layers.Dense(10, activation=('softmax')))

    model_1.summary()

    batch = 100
    epochs = 1
    learn_rate = .001
Esempio n. 7
0
def create_model(X_train, Y_train, X_test, Y_test):
    initial_size = {{choice([12, 32, 64, 128])}}
    model = keras.Sequential([
        keras.layers.Conv2D(filters=initial_size,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            activation='relu',
                            input_shape=(192, 192, 1)),
        keras.layers.Conv2D(filters=initial_size,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            activation='relu'),
        keras.layers.Conv2D(filters=initial_size,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            activation='relu'),
        keras.layers.MaxPooling2D(pool_size=(2, 2), padding='valid'),
        keras.layers.Conv2D(filters=initial_size * 2,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            activation='relu'),
        keras.layers.Conv2D(filters=initial_size * 2,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            activation='relu'),
        keras.layers.Conv2D(filters=initial_size * 2,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            activation='relu'),
        keras.layers.MaxPooling2D(pool_size=(2, 2), padding='valid'),
        keras.layers.Conv2D(filters=initial_size * 4,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            activation='relu'),
        keras.layers.Conv2D(filters=initial_size * 4,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            activation='relu'),
        keras.layers.Conv2D(filters=initial_size * 4,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            activation='relu'),
        keras.layers.MaxPooling2D(pool_size=(2, 2), padding='valid'),
        keras.layers.Conv2D(filters=initial_size * 8,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            activation='relu'),
        keras.layers.Conv2D(filters=initial_size * 8,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            activation='relu'),
        keras.layers.Conv2D(filters=initial_size * 8,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            activation='relu'),
        keras.layers.MaxPooling2D(pool_size=(2, 2),
                                  strides=(2, 2),
                                  padding='valid'),

        # first flatten
        keras.layers.GlobalAveragePooling2D(),
        keras.layers.Flatten(),
        keras.layers.Dense(8, activation=tf.nn.softmax)
    ])
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.summary()

    result = model.fit(X_train,
                       Y_train,
                       validation_split=0.1,
                       epochs=4,
                       verbose=1,
                       batch_size=32,
                       shuffle=True)

    print(result.history)
    validation_acc = np.amax(result.history['val_accuracy'])
    return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
Esempio n. 8
0
train_stats = train_stats.transpose()

train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')


def norm(x):
    return (x - train_stats['mean']) / train_stats['std']


normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)

model = keras.Sequential([
    keras.layers.Dense(64, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),
    keras.layers.Dense(64, activation=tf.nn.relu),
    keras.layers.Dense(1)
])

optimizer = tf.keras.optimizers.RMSprop(0.001)

model.compile(loss='mean_squared_error',
              optimizer=optimizer,
              metrics=['mean_absolute_error', 'mean_squared_error'])


# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs):
        if epoch % 100 == 0:
            print('')
Esempio n. 9
0
    def run(data_dir, **kwargs):

        # Load configuration file for the FootDownNetwork
        config_path = os.path.join(
            os.path.dirname(__file__),
            os.path.pardir,
            "module",
            "platform",
            "darwin",
            "SensorFilter",
            "data",
            "config",
            "FootDownNetwork.yaml",
        )
        with open(config_path, "r") as f:
            config = yaml.safe_load(f)

            lr_duplicate = config["train"]["lr_duplicate"]
            foot_delta = config["train"]["foot_delta"]
            servos = config["network"]["input"]["servos"]
            fields = config["network"]["input"]["fields"]
            use_accel = config["network"]["input"]["accelerometer"]
            use_gyro = config["network"]["input"]["gyroscope"]

        print("Loading data from NBS files")
        group_xs = []
        group_ys = []
        group_desc = []
        for state in tqdm(os.listdir(data_dir), dynamic_ncols=True,
                          unit="dir"):
            state_dir = os.path.join(data_dir, state)
            if os.path.isdir(state_dir):
                state_info = re.match(
                    "left_(up|down|mixed)_right_(up|down|mixed)", state)
                if state_info is not None:
                    l_state = state_info.group(1)
                    r_state = state_info.group(2)

                    for group in tqdm(os.listdir(state_dir),
                                      dynamic_ncols=True,
                                      unit="group"):
                        group_dir = os.path.join(state_dir, group)
                        if os.path.isdir(group_dir):

                            # Gather all groups into a single dataset for this specific type
                            xs = []
                            ys = []

                            for nbs in tqdm(os.listdir(group_dir),
                                            dynamic_ncols=True,
                                            unit="file"):
                                nbs_path = os.path.join(group_dir, nbs)
                                if nbs_path.endswith(
                                    (".nbs.gz", ".nbs", ".nbz")):

                                    # Load the file
                                    x, y = dataset(
                                        nbs_path,
                                        r_state,
                                        l_state,
                                        servos,
                                        fields,
                                        lr_duplicate,
                                        foot_delta,
                                        use_accel,
                                        use_gyro,
                                    )

                                    # Cut off the end 10% to account for nonsense setup and teardown
                                    x = x[len(x) // 10:-len(x) // 10]
                                    y = y[len(y) // 10:-len(y) // 10]

                                    xs.append(x)
                                    ys.append(y)

                            group_desc.append(group)
                            group_xs.append(np.concatenate(xs, axis=0))
                            group_ys.append(np.concatenate(ys, axis=0))

        # Find the largest size we have and replicate random elements in the other dataset to fill
        mx = max([x.shape[0] for x in group_xs])

        # Oversample our categories so they are the same size
        xs = []
        ys = []
        for x, y in zip(group_xs, group_ys):
            idx = np.random.randint(0, len(x), mx)
            xs.append(x[idx])
            ys.append(y[idx])

        # Join into single dataset
        xs = np.concatenate(xs)
        ys = np.concatenate(ys)

        # Random shuffle the data
        idx = np.arange(len(xs))
        np.random.shuffle(idx)
        xs = xs[idx]
        ys = ys[idx]

        # Split into training and validation
        split = int(len(xs) * 0.8)
        train_x = xs[:split]
        train_y = ys[:split]
        valid_x = xs[split:]
        valid_y = ys[split:]

        # Build our model
        model = keras.Sequential([
            keras.layers.Dense(8, activation=tf.nn.relu),
            keras.layers.Dense(2, activation=tf.nn.sigmoid)
        ])

        model.compile(
            optimizer=keras.optimizers.Adam(),
            loss=keras.losses.BinaryCrossentropy(),
            metrics=[keras.metrics.BinaryAccuracy()],
        )

        history = model.fit(
            train_x,
            train_y,
            batch_size=4096,
            epochs=1000,
            validation_data=(valid_x, valid_y),
            callbacks=[keras.callbacks.EarlyStopping(patience=5)],
        )

        print("Final Accuracy", history.history["val_binary_accuracy"][-1])

        for desc, x, y in zip(group_desc, group_xs, group_ys):
            print("Evaluating", desc)
            model.evaluate(x, y)

        config["network"] = {
            "input": {
                "servos": servos,
                "fields": fields,
                "accelerometer": use_accel,
                "gyroscope": use_gyro
            },
            "layers": [],
        }

        for layer in model.layers:
            h = layer.get_weights()

            weights = np.array(h[0]).tolist()
            biases = np.array(h[1]).tolist()

            config["network"]["layers"].append({
                "weights": weights,
                "biases": biases
            })

        with open(config_path, "w") as f:
            f.write(yaml.dump(config, width=120))
(train_images, train_labels), (test_images, test_labels) = data.load_data()
print("Number train images:\t"+str(len(train_images)))
print("Number test images:\t"+str(len(test_images)))

# Train the model
if(not os.path.exists(model_file_name)):
    #To make it easier to work with the model but matplot will show the same image.
    train_images = train_images/255.0
    test_images = test_images/255.0

    # Since we are working with 28x28 images we are going to have 784 input nodes
    # and since we have 10 labels we are going to have 10 output nodes.
    # We are going to have a hidden layer with 128 nodes.
    model = keras.Sequential([
        keras.layers.Flatten(input_shape=(28,28)),
        keras.layers.Dense(128, activation="relu"), # rectifier linear unit
        keras.layers.Dense(10, activation="softmax") # probability for each given class.
        ])
    model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
    model.fit(train_images, train_labels, epochs=10) #how many times your are going to see the sane image
    # Save model in a file
    model.save(model_file_name)
    # Evaluate accuracy
    test_loss, test_acc = model.evaluate(test_images, test_labels)
    print("Tested Acc: ", test_acc)
# Load the model
else:
    model = keras.models.load_model(model_file_name)

# Show how to the user how the input should be
prediction = model.predict(test_images)
 def __create_base_model(self, **kwargs):
     base_model = keras.Sequential([])
     return base_model
print("x_train_shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")

y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

# Build the model 

model = keras.Sequential(
	[
	keras.Input(shape=input_shape),
	layers.Conv2D(32, kernel_size=(3,3), activation="relu"),
	layers.MaxPooling2D(pool_size=(2,2)),
	layers.Conv2D(64, kernel_size=(3,3), activation="relu"),
	layers.MaxPooling2D(pool_size=(2,2)),
	layers.Flatten(),
	layers.Dropout(0.5),
	layers.Dense(num_classes, activation="softmax"),
	]
)

model.summary()

# train the model
batch_size = 128
epochs = 15
model.compile(loss="categorical_crossentropy", optimizer = "adam", metrics=["accuracy"])
model.fit(x_train, y_train, batch_size = batch_size, epochs=epochs, validation_split=0.1)

# Evaluation 
Esempio n. 13
0
    def __init__(self,
                 width,
                 height,
                 dim_action,
                 gamma=0.9,
                 load_name=None,
                 use_prior=False,
                 use_image=False):
        # tf.enable_eager_execution()

        self.width = width
        self.height = height
        # tf.logging.set_verbosity(tf.logging.ERROR)
        self.priority = use_prior
        if use_image:
            self.state_space = (self.height, self.width, 1)
        else:
            self.state_space = (5, )
        self.action_space = dim_action
        self.use_image = use_image
        self.gamma = gamma
        self.memory = ReplayMemory(10000)

        # self.priority_memory = PrioritizedReplayBuffer(10000,0.5)
        self.epsilon = EPS_START
        # self.pesos = np.ones(BATCH_SIZE, dtype=np.float32)

        # self.global_step = tfe.Variable(0)
        # self.loss_avg = tfe.metrics.Mean()
        self.mapeo = {
            "%": 10,
            "<": 30,
            ">": 30,
            "v": 30,
            "^": 30,
            ".": 150,
            "G": 90,
            " ": 1,
            "o": 10
        }
        self.escala = 255
        if self.use_image:
            self.model = keras.Sequential([
                keras.layers.Conv2D(32, (3, 3), input_shape=self.state_space),
                keras.layers.BatchNormalization(),
                keras.layers.Activation("relu"),
                keras.layers.Conv2D(64, (3, 3), strides=[2, 2],
                                    use_bias=False),
                keras.layers.BatchNormalization(),
                keras.layers.Activation("relu"),
                keras.layers.Conv2D(64, (3, 3), use_bias=False),
                keras.layers.BatchNormalization(),
                keras.layers.Activation("relu"),
                keras.layers.Flatten(),
                keras.layers.Dense(7 * 7 * 64,
                                   activation=tf.nn.tanh,
                                   use_bias=False),
                keras.layers.Dense(512, activation=tf.nn.tanh, use_bias=False),
                # keras.layers.Dropout(rate=0.6),
                keras.layers.Dense(self.action_space, activation="linear")
            ])
            if not use_prior:
                self.model.compile(loss=tf.compat.v1.losses.huber_loss,
                                   optimizer=keras.optimizers.RMSprop(
                                       learning_rate=0.0002, momentum=0.01))

        else:
            self.model = keras.Sequential([
                # keras.layers.Dense(128, activation=tf.nn.tanh, use_bias=False, input_shape=(self.height * self.width,)),
                keras.layers.Dense(32,
                                   activation=tf.nn.tanh,
                                   use_bias=False,
                                   input_shape=self.state_space),
                # keras.layers.Dropout(rate=0.6),
                keras.layers.Dense(self.action_space, activation="linear")
            ])
            self.model.compile(
                loss=lambda y_t, y_pred: self.func(y_pred=y_pred, y_true=y_t),
                optimizer=tf.train.RMSPropOptimizer(0.01))
            if not use_prior:
                self.model.compile(loss="mse",
                                   optimizer=tf.train.RMSPropOptimizer(0.01))

        if load_name is not None:
            self.model = keras.models.load_model(load_name)
Esempio n. 14
0
## Inference

An important feature of the
[preprocessing layers provided by Keras](https://keras.io/guides/preprocessing_layers/)
is that they can be included inside a `tf.keras.Model`. We will export an inference model
by including the `text_vectorization` layer on top of `shallow_mlp_model`. This will
allow our inference model to directly operate on raw strings.

**Note** that during training it is always preferable to use these preprocessing
layers as a part of the data input pipeline rather than the model to avoid
surfacing bottlenecks for the hardware accelerators. This also allows for
asynchronous data processing.
"""

# Create a model for inference.
model_for_inference = keras.Sequential([text_vectorizer, shallow_mlp_model])

# Create a small dataset just for demoing inference.
inference_dataset = make_dataset(test_df.sample(100), is_train=False)
text_batch, label_batch = next(iter(inference_dataset))
predicted_probabilities = model_for_inference.predict(text_batch)

# Perform inference.
for i, text in enumerate(text_batch[:5]):
    label = label_batch[i].numpy()[None, ...]
    print(f"Abstract: {text}")
    print(f"Label(s): {invert_multi_hot(label[0])}")
    predicted_proba = [proba for proba in predicted_probabilities[i]]
    top_3_labels = [
        x for _, x in sorted(
            zip(predicted_probabilities[i], lookup.get_vocabulary()),
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np

def print_title(title_sent):
    print('\n', title_sent)
    return

print_title('Sequential 모델을 사용하는 경우 예제')
# Define Sequential model with 3 layers
model = keras.Sequential(
    [
        layers.Dense(2, activation="relu", name="layer1"),
        layers.Dense(3, activation="relu", name="layer2"),
        layers.Dense(4, name="layer3"),
    ]
)
# Call model on a test input
x = tf.ones((3, 3))
y = model(x)

# 다음과 동일
# Create 3 layers
layer1 = layers.Dense(2, activation="relu", name="layer1")
layer2 = layers.Dense(3, activation="relu", name="layer2")
layer3 = layers.Dense(4, name="layer3")

# Call layers on a test input
x = tf.ones((3, 3))
y = layer3(layer2(layer1(x)))
Esempio n. 16
0
 def _createSimpleSequentialModel(self):
     model = keras.Sequential()
     model.add(keras.layers.Reshape([2, 3], input_shape=[6]))
     model.add(keras.layers.LSTM(10))
     model.add(keras.layers.Dense(1, activation='sigmoid'))
     return model
Esempio n. 17
0
    projection_dim * 2,
    projection_dim,
]  # Size of the transformer layers
transformer_layers = 8
mlp_head_units = [2048,
                  1024]  # Size of the dense layers of the final classifier
"""
## Use data augmentation
"""

data_augmentation = keras.Sequential(
    [
        layers.experimental.preprocessing.Normalization(),
        layers.experimental.preprocessing.Resizing(image_size, image_size),
        layers.experimental.preprocessing.RandomFlip("horizontal"),
        layers.experimental.preprocessing.RandomRotation(factor=0.02),
        layers.experimental.preprocessing.RandomZoom(height_factor=0.2,
                                                     width_factor=0.2),
    ],
    name="data_augmentation",
)
# Compute the mean and the variance of the training data for normalization.
data_augmentation.layers[0].adapt(x_train)
"""
## Implement multilayer perceptron (MLP)
"""


def mlp(x, hidden_units, dropout_rate):
    for units in hidden_units:
        x = layers.Dense(units, activation=tf.nn.gelu)(x)
Esempio n. 18
0
 def _createNestedSequentialModel(self):
     model = keras.Sequential()
     model.add(keras.layers.Dense(6, input_shape=[10], activation='relu'))
     model.add(self._createSimpleSequentialModel())
     return model
Esempio n. 19
0
test_images, test_labels = data.train.next_batch(10000)

train_images = np.array([i.reshape(28, 28) for i in train_images])
test_images = np.array([i.reshape(28, 28) for i in test_images])

#For each clothing object, the name is going to be assosciated with its index
#I.e. if the model returns 3 we know it's dress
class_names = [
    'T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt',
    'Sneaker', 'Bag', 'Ankle boot'
]

#Creates the model, first flattens all inputs from matrices to arrays, then adds two layers to the NN
model = keras.Sequential([
    keras.layers.Flatten(input_shape=(28, 28)),
    keras.layers.Dense(128, activation=tf.nn.relu),
    keras.layers.Dense(10, activation=tf.nn.softmax)
])

print("shape of the training data", train_images.shape)

#Compiles the model so it can be used, loss is the function used to determine the accuracy while training
#Metrics is what's monitored during the training
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

#Train the model with the given images and their respective tags
#Epochs: number of times data passed through the neural network
model.fit(train_images, train_labels, epochs=5, verbose=0)
plt.show()

cpu_utilization_values = cpu_utilization.value.to_list()
# print(cpu_utilization_values)

normalized_cpu_utilization_values, training_mean, training_std = normalize(cpu_utilization_values)

x_train = create_sequences(normalized_cpu_utilization_values, time_steps=32)
print("Training data shape: ", x_train.shape)

# Conv1D based auto-encoder model.
model = keras.Sequential([layers.Input(shape=(x_train.shape[1], x_train.shape[2])),
                          layers.Conv1D(filters=18, kernel_size=7, padding="same", strides=2, activation="relu"),
                          layers.Dropout(rate=0.2),
                          layers.Conv1D(filters=9, kernel_size=7, padding="same", strides=2, activation="relu"),
                          layers.Conv1DTranspose(filters=9, kernel_size=7, padding="same", strides=2,
                                                 activation="relu"),
                          layers.Dropout(rate=0.2),
                          layers.Conv1DTranspose(filters=18, kernel_size=7, padding="same", strides=2,
                                                 activation="relu"),
                          layers.Conv1DTranspose(filters=1, kernel_size=7, padding="same")])
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss="mse")
model.summary()

history = model.fit(x_train, x_train, epochs=50, batch_size=128, validation_split=0.1,
                    callbacks=[keras.callbacks.EarlyStopping(monitor="val_loss", patience=5, mode="min")])

# Plotting the training and validation losses.
plt.figure(figsize=(15, 7))
plt.plot(history.history["loss"], label="Training Loss")
plt.plot(history.history["val_loss"], label="Validation Loss")
plt.legend()
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# Data Augmentation
data_augmentation = keras.Sequential([
    layers.experimental.preprocessing.RandomFlip("horizontal",
                                                 input_shape=(img_height,
                                                              img_width, 3)),
    layers.experimental.preprocessing.RandomRotation(0.1),
    layers.experimental.preprocessing.RandomZoom(0.1),
])
plt.figure(figsize=(10, 10))
for images, _ in train_ds.take(1):
    for i in range(9):
        augmented_images = data_augmentation(images)
        ax = plt.subplot(3, 3, i + 1)
        plt.imshow(augmented_images[0].numpy().astype("uint8"))
        plt.axis("off")
plt.show()

sunflower_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/592px-Red_sunflower.jpg"
sunflower_path = tf.keras.utils.get_file('Red_sunflower', origin=sunflower_url)
Esempio n. 22
0
# indices = [3, 40, 600, 800]
# class_names = ['Tshirt', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Boot']

# for i in indices:
#     image = train_images[i]
#     image = np.array(image, dtype='float')
#     pixels = image.reshape((28, 28))
#     im = Image.fromarray(pixels).convert("L")
#     im.save("images/" + str(class_names[train_labels[i]]) + str(i) + ".jpeg")

train_labels = tf.one_hot(train_labels, 10)
test_labels = tf.one_hot(test_labels, 10)

model = keras.Sequential([
    keras.layers.Reshape((28, 28, 1), input_shape=(28, 28)),
    keras.layers.Conv2D(8, (3, 3), activation=tf.nn.relu),
    keras.layers.Flatten(input_shape=()),
    keras.layers.Dense(128, activation=tf.nn.relu),
    keras.layers.Dense(10, activation=tf.nn.softmax)
])

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.fit(train_images, train_labels, epochs=20, steps_per_epoch=10)

test_loss, test_acc = model.evaluate(test_images, test_labels, steps=10)

print('Test accuracy:', test_acc)
Esempio n. 23
0
from tensorflow.keras import layers

# https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz下载解压到C:\Users\codewj\.keras\datasets\cifar-10-batches-py
cifar = keras.datasets.cifar10  # 10分类
(train_image, train_label), (test_image, test_label) = cifar.load_data()
print(
    train_image.shape,
    test_image.shape,
)  # (50000, 32, 32, 3) (10000, 32, 32, 3)
print(train_label)

# 归一化处理
train_image = train_image / 255
test_image = test_image / 255

model = keras.Sequential()
model.add(layers.Conv2D(64, (3, 3), activation='relu',
                        input_shape=(32, 32, 3)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D())
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
"""
BatchNormalization:
    将该层特征值分布重新拉回标准正态分布,特征值将落在激活函数对于输入较为敏感的区间,输入的小变化可导致损失函数较大的变化,使得梯度变大,避免梯度消失,同时也可加快收敛。
    需要计算均值与方差,不适合动态网络或者RNN。计算均值方差依赖每批次,因此数据最好足够打乱
"""

model.add(layers.BatchNormalization())
# train_images = train_images[:1000].reshape(-1, 28, 28, 1) / 255.0 #.reshape(-1, 28 * 28, 1) / 255.0
# test_images = test_images[:1000].reshape(-1, 28, 28, 1) / 255.0 #.reshape(-1, 28 * 28, 1) / 255.0

print('...creating model...')

model = keras.Sequential([
    keras.layers.Conv2D(32,
                        kernel_size=(5, 5),
                        strides=(1, 1),
                        activation='relu',
                        input_shape=(TestImageGen.size_th,
                                     TestImageGen.size_th, 1),
                        name='Conv2D_first'),
    keras.layers.MaxPooling2D(pool_size=(2, 2),
                              strides=(2, 2),
                              name='Pooler_first'),
    keras.layers.Dropout(0.1, noise_shape=None, seed=None, name='Dropper_one'),
    keras.layers.Conv2D(64, (5, 5), activation='relu', name='Conv2D_second'),
    keras.layers.MaxPooling2D(pool_size=(2, 2), name='Pooler_second'),
    keras.layers.Dropout(0.1, noise_shape=None, seed=None, name='Dropper_two'),
    keras.layers.Flatten(name='Flattener'),
    keras.layers.Dense(1000, activation='relu', name='Dense_Relu'),
    keras.layers.Dense(TestImageGen.num_classes,
                       activation='softmax',
                       name='Dense_Softmax')
])

model.compile(optimizer=tf.train.AdamOptimizer(),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
from tensorflow import keras
import numpy as np
'''
This NN learns simple relationships between two series of numbers. 
In this case, the second series of numbers is always three times the first. 
This relationship is recognized so that new inputs (to_predict) can be predicted based on the model.
'''

model = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')

xs = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
ys = [3, 6, 9, 12, 15, 18, 21, 24, 27, 30]

model.fit(xs, ys, epochs=4000)

to_predict = 50
result = model.predict([to_predict])
rounded_result = np.round(result[0][0], 2)

print(50 * '-')

print(rounded_result)
Esempio n. 26
0
test_images = test_images / 255.0

plt.figure(figsize=(10, 10))
for i in range(25):
    plt.subplot(5, 5, i + 1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.imshow(train_images[i], cmap=plt.cm.binary)
    plt.xlabel(class_names[train_labels[i]])
plt.show()

model = keras.Sequential([
    keras.layers.Flatten(input_shape=(28, 28)),
    keras.layers.Dense(128, activation='relu'),
    keras.layers.Dense(10, activation='softmax')
])

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(train_images, train_labels, epochs=10)

test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)

print('\nTest accuracy:', test_acc)

predictions = model.predict(test_images)
Esempio n. 27
0
print("encoder.decode(train_example)", encoder.decode(train_example))

BUFFER_SIZE = 1000

train_batches = (train_data.shuffle(BUFFER_SIZE).padded_batch(
    32, train_data.output_shapes))

test_batches = (test_data.padded_batch(32, train_data.output_shapes))

for example_batch, label_batch in train_batches.take(2):
    print("Batch shape:", example_batch.shape)
    print("label shape:", label_batch.shape)

model = keras.Sequential([
    keras.layers.Embedding(encoder.vocab_size, 16),
    keras.layers.GlobalAveragePooling1D(),
    keras.layers.Dense(1, activation='sigmoid')
])

model.summary()

model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

history = model.fit(train_batches,
                    epochs=10,
                    validation_data=test_batches,
                    validation_steps=30)

loss, accuracy = model.evaluate(test_batches)
Esempio n. 28
0
            batch_graph = tfg.BatchGraph.from_graphs(batch_graph_list)
            # print("num_nodes: ", batch_graph.num_nodes)
            yield batch_graph

        if not infinite:
            break


batch_size = 100

drop_rate = 0.2
gin0 = tfg.layers.GIN(100, activation=tf.nn.relu)
gin1 = tfg.layers.GIN(100, activation=tf.nn.relu)
mlp = keras.Sequential([
    keras.layers.Dense(50),
    keras.layers.Dropout(drop_rate),
    keras.layers.Dense(num_classes)
])
# dense = keras.layers.Dense(num_classes)


def forward(batch_graph, training=False, pooling="sum"):
    # GCN Encoder
    h = gin0([batch_graph.x, batch_graph.edge_index, batch_graph.edge_weight])
    h = gin1([h, batch_graph.edge_index, batch_graph.edge_weight])

    # Pooling
    if pooling == "mean":
        h = tfg.nn.mean_pool(h, batch_graph.node_graph_index)
    elif pooling == "sum":
        h = tfg.nn.mean_pool(h, batch_graph.node_graph_index)
    results = np.zeros((len(sequences), dimension))
    for i, word_indices in enumerate(sequences):
        results[i, word_indices] = 1
    return results


train_data = multi_hot_sequences(train_data, NUM_WORDS)
test_data = multi_hot_sequences(test_data, NUM_WORDS)

plt.plot(train_data[0])

# %%

baseline_model = keras.Sequential([
    # `input_shape` is only required here so that `.summary` works.
    keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),
    keras.layers.Dense(16, activation=tf.nn.relu),
    keras.layers.Dense(1, activation=tf.nn.sigmoid)
])

baseline_model.compile(optimizer='adam',
                       loss='binary_crossentropy',
                       metrics=['accuracy', 'binary_crossentropy'])

baseline_model.summary()

baseline_history = baseline_model.fit(train_data,
                                      train_labels,
                                      epochs=20,
                                      batch_size=512,
                                      validation_data=(test_data, test_labels),
                                      verbose=2)
Esempio n. 30
0
#   keras.layers.Conv2D(64, (3,3), activation='relu'),
#   keras.layers.MaxPooling2D(2,2),
#   keras.layers.Flatten(),
#   keras.layers.Dropout(0.5),
#   keras.layers.Dense(2048, activation='relu'),
#   keras.layers.Dense(2, activation="softmax")
# ])

model = keras.Sequential([
  keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=shape),
  keras.layers.MaxPooling2D(),
  keras.layers.Conv2D(128, (3,3), activation='relu'),
  keras.layers.MaxPooling2D(),
  keras.layers.Conv2D(192, (3,3), activation='relu'),
  keras.layers.MaxPooling2D(),
  keras.layers.Conv2D(256, (3,3), activation='relu'),
  keras.layers.MaxPooling2D(),
  keras.layers.Flatten(),
  keras.layers.Dropout(0.5),
  keras.layers.Dense(1024, activation='relu'),
  keras.layers.Dense(1024, activation='relu'),
  keras.layers.Dense(2, activation="softmax")
])
filepath = "saved-model.h5"

#model.summary()
#model.compile(optimizer='adam', loss="sparse_categorical_crossentropy", metrics=['accuracy'])
#model.fit_generator(datagen.flow(train_images,train_labels,shuffle=True),epochs=30,validation_data=(test_images,test_labels),callbacks = [keras.callbacks.EarlyStopping(patience=8,verbose=1,restore_best_weights=True),keras.callbacks.ReduceLROnPlateau(factor=0.5,patience=3,verbose=1),keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)])

chara_model = keras.models.load_model('saved-model.h5')
chara_model.compile(optimizer='adam', loss="sparse_categorical_crossentropy", metrics=['accuracy'])