示例#1
0
config.hidden_nodes = 100

# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
img_width = X_train.shape[1]
img_height = X_train.shape[2]

X_train = X_train.astype('float32')
X_train /= 255.
X_test = X_test.astype('float32')
X_test /= 255.

# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
num_classes = y_train.shape[1]

y_test = np_utils.to_categorical(y_test)

# create model
model=Sequential()
model.add(Flatten(input_shape=(img_width,img_height)))
model.add(Dense(config.hidden_nodes, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=config.optimizer,
                    metrics=['accuracy'])

# Fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test),
        callbacks=[WandbKerasCallback()], epochs=config.epochs)
示例#2
0
from keras.utils import np_utils
import wandb
from wandb.wandb_keras import WandbKerasCallback

# logging code
run = wandb.init()
config = run.config

# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()

img_width = X_train.shape[1]
img_height = X_train.shape[2]

# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)

num_classes = y_train.shape[1]
print(y_train[1])
# create model
model=Sequential()
model.add(Flatten(input_shape=(img_width,img_height)))
model.add(Dense(num_classes, activation='softmax', kernel_initializer='zeros'))
model.compile(loss='categorical_crossentropy', optimizer='adam',
                metrics=['accuracy'])

# Fit the model
model.fit(X_train, y_train, epochs=10, validation_data=(X_test, y_test),
                    batch_size=200,callbacks=[WandbKerasCallback()])
示例#3
0
            sentence = text[start_index:start_index + maxlen]
            generated += sentence
            print('----- Generating with seed: "' + sentence + '"')
            sys.stdout.write(generated)

            for i in range(50):
                x_pred = np.zeros((1, maxlen, len(chars)))
                for t, char in enumerate(sentence):
                    x_pred[0, t, char_indices[char]] = 1.

                preds = model.predict(x_pred, verbose=0)[0]
                next_index = sample(preds, diversity)
                next_char = indices_char[next_index]

                generated += next_char
                sentence = sentence[1:] + next_char

                sys.stdout.write(next_char)
                sys.stdout.flush()
            print()


# train the model, output generated text after each iteration
filepath = str(run.dir) + "/model-{epoch:02d}-{loss:.4f}.hdf5"

model.fit(x,
          y,
          batch_size=config.hidden_nodes,
          epochs=1000,
          callbacks=[SampleText(), WandbKerasCallback()])
示例#4
0
import cv2
from wandb.wandb_keras import WandbKerasCallback

run = wandb.init()
config = run.config
# parameters
config.batch_size = 32
config.num_epochs = 10
config.dense_layer_size = 100
config.img_width = 48
config.img_height = 48
config.first_layer_conv_width = 3
config.first_layer_conv_height = 3
input_shape = (48, 48, 1)

wandb_callback=  WandbKerasCallback(save_model=False)


def load_fer2013():
    
    data = pd.read_csv("fer2013/fer2013.csv")
    pixels = data['pixels'].tolist()
    width, height = 48, 48
    faces = []
    for pixel_sequence in pixels:
        face = [int(pixel) for pixel in pixel_sequence.split(' ')]
        face = np.asarray(face).reshape(width, height)
        face = cv2.resize(face.astype('uint8'), (width, height))
        faces.append(face.astype('float32'))

    faces = np.asarray(faces)
示例#5
0
X_train /= 255.
X_test = X_test.astype('float32')
X_test /= 255.

# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)

num_classes = y_train.shape[1]

#tensorboard
tensorboard = TensorBoard(log_dir="logs")

# create model
model=Sequential()
model.add(Flatten(input_shape=(img_width,img_height)))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.summary()

# Fit the model
history = model.fit(X_train, y_train, epochs=config.epochs,
        batch_size=config.batch_size, validation_data=(X_test, y_test),
        callbacks=[tensorboard, WandbKerasCallback()])

# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)

with open('metrics.json', 'w') as outfile:
    json.dump(scores, outfile)
示例#6
0
config = run.config
config.epochs = 10

# load data
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()

img_width = X_train.shape[1]
img_height = X_train.shape[2]
labels =["T-shirt/top","Trouser","Pullover","Dress",
    "Coat","Sandal","Shirt","Sneaker","Bag","Ankle boot"]

# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)

num_classes = y_train.shape[1]

# create model
model=Sequential()
model.add(Flatten(input_shape=(img_width,img_height)))
model.add(Dense(num_classes))
model.compile(loss='mse', optimizer='adam',
                metrics=['accuracy'])

# Fit the model
model.fit(X_train, y_train, epochs=config.epochs, validation_data=(X_test, y_test),
                    callbacks=[WandbKerasCallback(data_type="image", labels=labels)])



示例#7
0
opt = keras.optimizers.SGD(lr=config.learn_rate)

# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255


datagen = ImageDataGenerator(
        width_shift_range=0.1)


datagen.fit(x_train)

    # Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
                                     batch_size=config.batch_size),
                        steps_per_epoch=x_train.shape[0] // config.batch_size,
                        epochs=config.epochs,
                        validation_data=(x_test, y_test),
                        workers=4,
                        callbacks=[WandbKerasCallback(data_type="image", labels=class_names)]
   )


示例#8
0
model.add(Flatten())
model.add(Dense(config.dense_layer_nodes, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

opt = keras.optimizers.SGD(lr=config.learn_rate)

# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255

datagen = ImageDataGenerator(width_shift_range=0.1)

datagen.fit(x_train)

# Fit the model on the batches generated by datagen.flow().
model.fit_generator(
    datagen.flow(x_train, y_train, batch_size=config.batch_size),
    steps_per_epoch=x_train.shape[0] // config.batch_size,
    epochs=config.epochs,
    validation_data=(x_test, y_test),
    workers=4,
    callbacks=[WandbKerasCallback(validation_data=x_test, labels=class_names)])
示例#9
0
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(1, (3, 3), activation='relu', padding='same'))
model.add(Reshape((28, 28)))

model.compile(optimizer='adam', loss='mse')

model.summary()


class Images(Callback):
    def on_epoch_end(self, epoch, logs):
        indices = np.random.randint(self.validation_data[0].shape[0], size=8)
        test_data = self.validation_data[0][indices]
        pred_data = self.model.predict(test_data)
        run.history.row.update({
            "examples": [
                wandb.Image(np.hstack([data, pred_data[i]]), caption=str(i))
                for i, data in enumerate(test_data)
            ]
        })


model.fit(x_train,
          x_train,
          epochs=config.epochs,
          validation_data=(x_test, x_test),
          callbacks=[Images(), WandbKerasCallback()])

model.save('auto-cnn.h5')
示例#10
0
config.max_words = 1000
config.max_length = 300
# Puts tweets into a data frame
df = pd.read_csv('tweets.csv')

target = df['is_there_an_emotion_directed_at_a_brand_or_product']
text = df['tweet_text'].astype(str)

category_to_num = {"I can't tell": 0, "Negative emotion": 1, "Positive emotion": 2, "No emotion toward brand or product": 3}
target_num = [category_to_num[t] for t in target]
target_one_hot = np_utils.to_categorical(target_num)

tokenizer = Tokenizer(num_words=config.max_words)
tokenizer.fit_on_texts(list(text))
sequences = tokenizer.texts_to_sequences(list(text))
data = pad_sequences(sequences, maxlen=config.max_length)

train_data = data[:6000]
test_data = data[6000:]
train_target = target_one_hot[:6000]
test_target = target_one_hot[6000:]

model = Sequential()
model.add(Embedding(config.max_words, 128, input_length=300))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(4, activation='sigmoid'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

model.fit(train_data, train_target, callbacks=[WandbKerasCallback()], validation_data=(test_data, test_target))
示例#11
0
# create model
model=Sequential()
model.add(Flatten(input_shape=(img_width,img_height)))
model.add(Dense(config.hidden_nodes, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=config.optimizer,
                    metrics=['accuracy'])

class Images(Callback):
      def on_epoch_end(self, epoch, logs):
#            indices = np.random.randint(self.validation_data[0].shape[0], size=8)
            test_data = self.validation_data[0][:10]
            val_data = self.validation_data[1][:10]

            test_data = X_test[:10]
            val_data = y_test[:10]
            print(val_data)

            pred_data = self.model.predict(test_data)
            run.history.row.update({
                  "examples": [
                        wandb.Image(test_data[i], caption=str(val_data[i])+str(np.argmax(val_data[i]))) for i in range(8)
                        ]
            })



# Fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test),
        callbacks=[Images(), WandbKerasCallback()], epochs=config.epochs)
示例#12
0
    # callbacks
    log_file_path = base_path + dataset_name + '_emotion_training.log'
    csv_logger = CSVLogger(log_file_path, append=False)
    early_stop = EarlyStopping('val_loss', patience=config.patience)
    reduce_lr = ReduceLROnPlateau('val_loss',
                                  factor=0.1,
                                  patience=int(config.patience / 4),
                                  verbose=1)
    trained_models_path = base_path + dataset_name + '_mini_XCEPTION'
    model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
    model_checkpoint = ModelCheckpoint(model_names,
                                       'val_loss',
                                       verbose=1,
                                       save_best_only=True)
    wandb_callback = WandbKerasCallback()
    callbacks = [
        model_checkpoint, csv_logger, early_stop, reduce_lr, wandb_callback
    ]

    # loading dataset
    data_loader = DataManager(dataset_name, image_size=input_shape[:2])
    faces, emotions = data_loader.get_data()
    faces = preprocess_input(faces)
    num_samples, num_classes = emotions.shape
    train_data, val_data = split_data(faces, emotions, validation_split)
    train_faces, train_emotions = train_data
    model.fit_generator(data_generator.flow(train_faces, train_emotions,
                                            config.batch_size),
                        steps_per_epoch=len(train_faces) / config.batch_size,
                        epochs=config.num_epochs,
示例#13
0
# create model
model = Sequential()
model.add(
    Reshape((img_width, img_height, 1), input_shape=(img_width, img_height)))
model.add(Dropout(0.4))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(Dropout(0.4))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()
# Fit the model
model.fit(
    X_train,
    y_train,
    epochs=config.epochs,
    validation_data=(X_test, y_test),
    callbacks=[WandbKerasCallback(validation_data=X_test, labels=labels)])

print("Predictions", model.predict(X_train[:50]))
print("Truth", y_train[:50])
示例#14
0
                x_pred = np.zeros((1, maxlen, len(chars)))
                for t, char in enumerate(sentence):
                    x_pred[0, t, char_indices[char]] = 1.

                preds = model.predict(x_pred, verbose=0)[0]
                next_index = sample(preds, diversity)
                next_char = indices_char[next_index]

                generated += next_char
                sentence = sentence[1:] + next_char

                sys.stdout.write(next_char)
                sys.stdout.flush()
            print()


# train the model, output generated text after each iteration
filepath = str(run.dir) + "/model-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min')

model.fit(x,
          y,
          batch_size=config.hidden_nodes,
          epochs=1000,
          callbacks=[SampleText(),
                     WandbKerasCallback(), checkpoint])
示例#15
0
            result = int_to_char[index]
            seq_in = [int_to_char[value] for value in pattern]
            sys.stdout.write(result)
            sys.stdout.flush()
            pattern.append(index)
            pattern = pattern[1:len(pattern)]


# define the LSTM model
model = Sequential()
model.add(
    LSTM(config.nodes,
         input_shape=(X.shape[1], X.shape[2]),
         return_sequences=True))
model.add(LSTM(config.nodes))
model.add(Dropout(config.dropout))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')

filepath = "weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min')
callbacks_list = [checkpoint, WandbKerasCallback(), SampleText()]
print("Ready to go")

model.fit(X, y, epochs=config.epochs, batch_size=128, callbacks=callbacks_list)
model.save("book-lstm.h5")
示例#16
0
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)

num_classes = y_train.shape[1]

# create model
model=Sequential()
model.add(Flatten(input_shape=(img_width,img_height)))
model.add(Dense(num_classes))
model.compile(loss='mse', optimizer='adam',
                metrics=['accuracy'])

# Fit the model
model.fit(X_train, y_train, epochs=config.epochs, validation_data=(X_test, y_test),
                    callbacks=[WandbKerasCallback()])


# Output some predictions

from PIL import Image
from PIL import ImageDraw 
import numpy as np

model_output = model.predict(X_test)

labels =["T-shirt/top","Trouser","Pullover","Dress",
    "Coat","Sandal","Shirt","Sneaker","Bag","Ankle boot"]

for i in range(10):
    prediction = np.argmax(model_output[i])
config.encoding_dim = 32
config.epochs = 1

(x_train, _), (x_test, _) = mnist.load_data()
(x_train_noisy, x_test_noisy) = add_noise(x_train, x_test)


x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.


model = Sequential()
model.add(Flatten(input_shape=(28,28)))
model.add(Dense(config.encoding_dim, activation='relu'))
model.add(Dense(784, activation='sigmoid'))
model.add(Reshape((28,28)))
model.compile(optimizer='adam', loss='mse')


model.fit(x_train_noisy, x_train,
                epochs=config.epochs,
                validation_data=(x_test_noisy, x_test), callbacks=[WandbKerasCallback()])


model.save("auto-denoise.h5")