コード例 #1
0
def main():
    x_train, y_train, x_test, y_test = load_data()

    model = Sequential()

    model.add(
        Conv2D(32,
               kernel_size=(11, 11),
               strides=4,
               padding="same",
               activation='relu',
               input_shape=(48, 48, 1)))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=2, padding="valid"))
    model.add(
        Conv2D(32,
               kernel_size=(5, 5),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=2, padding="valid"))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(Dense(1024, activation='relu'))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(7, activation='softmax'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    model.fit(x_train,
              y_train,
              batch_size=128,
              epochs=5,
              verbose=1,
              validation_data=(x_test, y_test))

    model.save(expanduser("~/emotion/alex_net.h5"))

    accuracy, fbeta = test_model(model, x_test, y_test)
    print("Accuracy: %s" % accuracy)
    print("F-Beta: %s" % fbeta)
コード例 #2
0
def fit_lstm(train, batch_size, nb_epoch, neurons):
	X, y = train[:, 0:-1], train[:, -1]
	X = X.reshape(X.shape[0], 1, X.shape[1])
	model = Sequential()
	model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
	model.add(Dense(1))
	model.compile(loss='mean_squared_error', optimizer='adam')
	for i in range(nb_epoch):
		model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
		model.reset_states()
	return model
コード例 #3
0
class WindPuller(object):
    def __init__(self, input_shape, lr=0.01, n_layers=2, n_hidden=8, rate_dropout=0.2, loss=risk_estimation):
        print("initializing..., learing rate %s, n_layers %s, n_hidden %s, dropout rate %s." % (
        lr, n_layers, n_hidden, rate_dropout))
        self.model = Sequential()
        self.model.add(Dropout(rate=rate_dropout, input_shape=(input_shape[0], input_shape[1])))
        for i in range(0, n_layers - 1):
            self.model.add(LSTM(n_hidden * 4, return_sequences=True, activation='tanh',
                                recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                                recurrent_initializer='orthogonal', bias_initializer='zeros',
                                dropout=rate_dropout, recurrent_dropout=rate_dropout))
        self.model.add(LSTM(n_hidden, return_sequences=False, activation='tanh',
                            recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform',
                            recurrent_initializer='orthogonal', bias_initializer='zeros',
                            dropout=rate_dropout, recurrent_dropout=rate_dropout))
        self.model.add(Dense(1, kernel_initializer=initializers.glorot_uniform()))
        # self.model.add(BatchNormalization(axis=-1, moving_mean_initializer=Constant(value=0.5),
        #               moving_variance_initializer=Constant(value=0.25)))
        self.model.add(BatchNormalization(axis=-1))
        self.model.add(Activation("relu(alpha=0., max_value=1.0)"))
        opt = RMSprop(lr=lr)
        self.model.compile(loss=loss,
                           optimizer=opt,
                           metrics=['accuracy'])

    def fit(self, x, y, batch_size=32, nb_epoch=100, verbose=1, callbacks=None,
            validation_split=0., validation_data=None, shuffle=True,
            class_weight=None, sample_weight=None, initial_epoch=0):
        self.model.fit(x, y, batch_size, nb_epoch, verbose, callbacks,
                       validation_split, validation_data, shuffle, class_weight, sample_weight,
                       initial_epoch)

    def save(self, path):
        self.model.save(path)

    def load_model(self, path):
        self.model = load_model(path)
        return self

    def evaluate(self, x, y, batch_size=32, verbose=1,
                 sample_weight=None, **kwargs):
        return self.model.evaluate(x, y, batch_size, verbose,
                                   sample_weight)

    def predict(self, x, batch_size=32, verbose=0):
        return self.model.predict(x, batch_size, verbose)
コード例 #4
0
# set loss function to be mse, print out accuracy
model.compile(loss='mse', optimizer=sgd, metrics=['accuracy'])
# set loss function to be binary_crossentropy, print accuracy
model1.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])

#######################################################
# batch_size = 1, update weights every sample;
# batch_size = 100, update weights every 100 sample;
# without validation_split, all dataset are trained
# with validation_split=0.25, 25% dataset is saved for validation,rest for training; during training, there will be loss and val_loss, accu, and val_accu
# shuffle = True, all samples of training set will be shuffled for each epoch training
# epochs = 10, train with the entire dataset for 10 times
# hist = fit() will record a loss for each epoch
#######################################################

hist1 = model.fit(X, y, batch_size=1, validation_split=0.25,
                  epochs=10)  # accuracy 0.75
hist2 = model.fit(X, y, batch_size=1, epochs=1000)  # accuracy 0.75

# See how weights changes to make function more close to xor function
epochs = 5
for epoch in range(epochs):
    print("epoch:", epoch)
    model.fit(X, y, batch_size=1, epochs=1)
    print("Layer1 weights shape:")
    print(model.layers[0].weights)
    print("Layer1 kernel:")
    print(model.layers[0].get_weights()
          [0])  # each training, network step closer to xor function
    print("Layer1 bias:")
    print(model.layers[0].get_weights()[1])
# Generate dummy data
import numpy as np
data = np.random.random((1000, 100))
labels1 = np.random.randint(2, size=(1000, 1))

model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
              loss='binary_crossentropy',
              metrics=['accuracy'])

# Train the model, iterating on the data in batches of 32 samples
print("\nNo validation_set split")
model.fit(data, labels1, epochs=1, batch_size=32)

#######################################
# For a single-input model with 10 classes (categorical classification):
# Generate dummy data
import numpy as np
data = np.random.random((1000, 100))
labels2 = np.random.randint(10, size=(1000, 1))
from tensorflow.contrib.keras.python.keras.utils import to_categorical
# Convert labels to categorical one-hot encoding
one_hot_labels = to_categorical(labels2, num_classes=10)

model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
コード例 #6
0
                shifted_movies[i, t, x_shift - w:x_shift + w,
                               y_shift - w:y_shift + w, 0] += 1

    # Cut to a 40x40 window
    noisy_movies = noisy_movies[::, ::, 20:60, 20:60, ::]
    shifted_movies = shifted_movies[::, ::, 20:60, 20:60, ::]
    noisy_movies[noisy_movies >= 1] = 1
    shifted_movies[shifted_movies >= 1] = 1
    return noisy_movies, shifted_movies


# Train the network
noisy_movies, shifted_movies = generate_movies(n_samples=1200)
seq.fit(noisy_movies[:1000],
        shifted_movies[:1000],
        batch_size=10,
        epochs=300,
        validation_split=0.05)

# Testing the network on one movie
# feed it with the first 7 positions and then
# predict the new positions
which = 1004
track = noisy_movies[which][:7, ::, ::, ::]

for j in range(16):
    new_pos = seq.predict(track[np.newaxis, ::, ::, ::, ::])
    new = new_pos[::, -1, ::, ::, ::]
    track = np.concatenate((track, new), axis=0)

# And then compare the predictions
コード例 #7
0
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)

# design network
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')

# fit network
history = model.fit(train_X,
                    train_y,
                    epochs=50,
                    batch_size=72,
                    validation_data=(test_X, test_y),
                    verbose=2,
                    shuffle=False)
# plot history
pyplot.plot(history.history['loss'], label='Training Loss')
pyplot.plot(history.history['val_loss'], label='Validation Loss')
pyplot.legend()
pyplot.show()

# make a prediction
yhat = model.predict(test_X)
test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))
# invert scaling for forecast to revert data into original form
inv_yhat = concatenate((yhat, test_X[:, 1:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
コード例 #8
0
# with validation_split=0.25, 25% dataset is saved for validation,rest for training; during training, there will be loss and val_loss, accu, and val_accu
# shuffle = True, all samples of training set will be shuffled for each epoch training
# epochs = 10, train with the entire dataset for 10 times
# hist = fit() will record a loss for each epoch
#######################################################

# hist1 = model.fit(X, y, batch_size=1, validation_split=0.25, epochs=10) # accuracy 0.75
# hist2 = model.fit(X, y, batch_size=1, epochs=1000) # accuracy 100%, loss keep dropping down to 0.0016

#######################################################
# save all weights, loss, accuracy changes
#######################################################
epochs = 1000
for epoch in range(epochs):
    print("epoch:", epoch)
    hist = model.fit(X, y, batch_size=1, epochs=1)
    print("save each and every weight element into list")
    w_1_1_1.append(model.get_weights()[0][0, 0])
    w_1_1_2.append(model.get_weights()[0][1, 0])
    w_1_2_1.append(model.get_weights()[0][0, 1])
    w_1_2_2.append(model.get_weights()[0][1, 1])
    w_1_3_1.append(model.get_weights()[0][0, 2])
    w_1_3_2.append(model.get_weights()[0][1, 2])
    w_1_4_1.append(model.get_weights()[0][0, 3])
    w_1_4_2.append(model.get_weights()[0][1, 3])
    w_2_1.append(model.get_weights()[2][0, 0])
    w_2_2.append(model.get_weights()[2][1, 0])
    w_2_3.append(model.get_weights()[2][2, 0])
    w_2_4.append(model.get_weights()[2][3, 0])
    print("save each and every loss and accuracy")
    losses.append(hist.history['loss'][0])
コード例 #9
0
model.add(Dropout(0.6))
model.add(Dense(2))

model.add(Activation("softmax"))
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.summary()

csv_logger = CSVLogger('training.log')
es_cb = EarlyStopping(monitor='val_loss', mode='auto', patience=30, verbose=1)
tb_cb = TensorBoard(log_dir='./logs', write_graph=True)
hist = model.fit(train,
                 train_label,
                 epochs=epochs,
                 batch_size=batch_size,
                 validation_split=1 / 7,
                 shuffle=True,
                 verbose=1,
                 callbacks=[es_cb, tb_cb, csv_logger])

score = model.evaluate(test, test_label, verbose=1)
print('test loss:', score[0])
print('test acc:', score[1])

# plot results
plt.subplot(3, 1, 1)
loss = hist.history['loss']
val_loss = hist.history['val_loss']

epochs = len(loss)
plt.plot(range(epochs), loss, marker='.', label='loss')
コード例 #10
0
#create train and test lists. X-patterns, Y-intents
train_x = list(training[:, 0])
train_y = list(training[:, 1])
#print("Training data created")

#create a model Tensorflow

model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]), ), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))

#compile model. Stochastic gradient descent with nesterov accelerated gradient gives good result

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.5, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)

#fitting and saving model
hist = model.fit(np.array(train_x),
                 np.array(train_y),
                 epochs=200,
                 batch_size=5,
                 verbose=1)

model.save('chatbot_model.h5', hist)

print("model created")
コード例 #11
0
if os.path.isfile(savefile):
     model = load_model(savefile)
else:
    model = Sequential()
    model.add(Dense(128, input_dim=input_dimension, kernel_initializer=hidden_initializer, activation='relu'))
    #model.add(Dense(128, input_dim=input_dimension,  activation='relu'))
    model.add(Dropout(dropout_rate))
    model.add(Dense(64, kernel_initializer=hidden_initializer, activation='relu'))
    model.add(Dense(2, kernel_initializer=hidden_initializer, activation='softmax'))
    #model.add(Dense(64, activation='relu'))
    #model.add(Dense(2, activation='softmax'))

    sgd = SGD(lr=learning_rate, momentum=momentum)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['acc'])

model.fit(X_train, y_train, epochs=50, batch_size=128)
predictions = model.predict_proba(X_test)

ans = pd.DataFrame(predictions,columns=['target','dmy'])
print ans
outdf = pd.concat([outdf,ans['target']],axis=1)
outdf.to_csv('./submit_keras.csv',index=False)


    #save the model
#model_json = model.to_json()
#with open("./ans.json", "w") as json_file:
#    json_file.write(model_json)

model.save(savefile)
コード例 #12
0
model = Sequential()

# if return_sequences = Flase, only last LSTM cell will slpit the output.
model.add(LSTM(32, input_shape=(look_back, data_dim), return_sequences=True))

model.add(LSTM(2))
#model.add(Dense(10))

model.add(Dropout(0.2))

model.add(Dense(1, activation='softmax'))

# try using different optimizers and different optimizer configs

model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])

time_train_X, time_train_y = create_dataset(w2v_tfIdf_train_X,
                                            train_y,
                                            look_back=look_back)

time_test_X, time_test_y = create_dataset(w2v_tf_Idf_test_X,
                                          test_y,
                                          look_back=look_back)

model.fit(time_train_X,
          time_train_y,
          verbose=2,
          epochs=10,
          batch_size=100,
          validation_data=[time_test_X, time_test_y])
コード例 #13
0
              optimizer='rmsprop',
              metrics=['accuracy'])

# Generate dummy training data
x_train = np.random.random((1000, timesteps, data_dim))
y_train = np.random.randint(10, size=(1000, 1))
y_train = to_categorical(y_train, num_classes=10)

# Generate dummy validation data
x_val = np.random.random((100, timesteps, data_dim))
y_val = np.random.randint(10, size=(100, 1))
y_val = to_categorical(y_val, num_classes=10)

model.fit(x_train,
          y_train,
          batch_size=64,
          epochs=1,
          validation_data=(x_val, y_val))
"""
LSTM

  '  def __init__(self,\n',
  '               units,\n',
  "               activation='tanh',\n",
  "               recurrent_activation='hard_sigmoid',\n",
  '               use_bias=True,\n',
  "               kernel_initializer='glorot_uniform',\n",
  "               recurrent_initializer='orthogonal',\n",
  "               bias_initializer='zeros',\n",
  '               unit_forget_bias=True,\n',
  '               kernel_regularizer=None,\n',
コード例 #14
0
           input_shape=(20, 20, 1),
           activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

# Second convolutional layer with max pooling
model.add(Conv2D(50, (5, 5), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

# Hidden layer with 500 nodes
model.add(Flatten())
model.add(Dense(500, activation="relu"))

# Output layer with 32 nodes (one for each possible letter/number we predict)
model.add(Dense(32, activation="softmax"))

# Ask Keras to build the TensorFlow model behind the scenes
model.compile(loss="categorical_crossentropy",
              optimizer="adam",
              metrics=["accuracy"])

# Train the neural network
model.fit(X_train,
          Y_train,
          validation_data=(X_test, Y_test),
          batch_size=32,
          epochs=10,
          verbose=1)

# Save the trained model to disk
model.save(MODEL_FILENAME)
コード例 #15
0
def main():

    start = time.time()

    # generate multiple time-series sequences
    dataframe = generate_sine_data()

    dataset = dataframe.values.astype('float32')
    # put dataset of multiple time-series sequences
    dataset = Normalize(dataset)

    # create dataset
    length = len(dataset)
    train_size = int(length * 0.67)
    test_size = length - train_size
    train, test = dataset[:train_size], dataset[train_size:]

    trainX, trainY = create_dataset(train)
    testX, testY = create_dataset(test)

    trainX = trainX[len(trainX) % BATCH_SIZE:]
    trainY = trainY[len(trainY) % BATCH_SIZE:]
    length_test = len(testX)
    testX = testX[len(testX) % BATCH_SIZE:]
    testY = testY[len(testY) % BATCH_SIZE:]

    trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 3))
    testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 3))

    # construct the DNN model (LSTM + fully_connected_layer)
    model = Sequential()
    model.add(LSTM(HIDDEN_SIZE, batch_input_shape=(BATCH_SIZE, Tau, 3)))
    model.add(Dense(3))
    model.summary()
    model.compile(loss='mean_squared_error',
                  optimizer='adam',
                  metrics=['accuracy'])

    # learn the DNN model on training dataset
    hist = model.fit(trainX,
                     trainY,
                     batch_size=BATCH_SIZE,
                     epochs=EPOCHS,
                     verbose=0,
                     shuffle=True)

    # plot the leargning curve
    if PLT:
        epochs = range(1, 11)
        plt.figure()
        plt.plot(epochs, hist.history['loss'], label='loss/training')
        plt.plot(epochs, hist.history['acc'], label='acc/training')
        plt.xlabel('epoch')
        plt.ylabel('acc / loss')
        plt.legend()
        plt.show()
        plt.close()

    # evaluate the DNN model on test dataset
    score = model.evaluate(testX, testY, batch_size=BATCH_SIZE, verbose=0)
    print('loss: {0[0]}, acc: {0[1]} on test dataset'.format(score))

    # forecast Ls-steps-ahead value on the test dataset
    predicted = model.predict(testX, batch_size=BATCH_SIZE)

    # plot testY and predictedY
    if PLT:
        df_out = pd.DataFrame(predicted[:200])
        df_out.columns = [
            "predicted_sine", 'predicted_sine_rand', 'predicted_sine_int'
        ]
        df_out = pd.concat([
            df_out,
            pd.DataFrame(
                testY[:200],
                columns=["input_sine", "input_sine_rand", "input_sine_int"])
        ])
        plt.figure()
        df_out.plot()
        plt.show()
        plt.close()

    # plot the forecasting results on test dataset
    if PLT:
        plt.ion()
        i = 0
        while i < 20:
            K = 3  # the number of sensor
            fig = plt.figure(figsize=(8, K * 4))
            plt.subplots_adjust(hspace=0.2)
            for j in range(K):
                plt.subplot(K, 1, j + 1)
                plt.plot(range(i, i + Tau + Ls + 1),
                         test[length_test % BATCH_SIZE:][i:i + Tau + Ls + 1,
                                                         j],
                         color='silver',
                         label='original')
                plt.plot(range(i, i + Tau),
                         testX[i, :, j],
                         color='dodgerblue',
                         label='input')
                plt.scatter(i + Tau + Ls,
                            predicted[i, j],
                            s=15,
                            color='orange',
                            label='forecast')
                plt.legend()
            plt.draw()
            plt.pause(1.2)
            plt.clf()
            i += 1
        plt.close()

    end = time.time()
    print('elapsed_time: {}[s]'.format(end - start))
コード例 #16
0
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Dense(2, kernel_regularizer=l2(0.01)))
model.add(BatchNormalization())
model.add(Activation('softmax'))
model.compile(optimizer='Nadam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.summary()

csv_logger = CSVLogger('training.log')
hist = model.fit(train,
                 train_label,
                 epochs=epochs,
                 batch_size=batch_size,
                 validation_split=2 / 7,
                 callbacks=[csv_logger],
                 shuffle=True,
                 verbose=1)

score = model.evaluate(test, test_label, verbose=1)
print('test loss:', score[0])
print('test acc:', score[1])

# plot results
plt.subplot(3, 1, 1)
loss = hist.history['loss']
val_loss = hist.history['val_loss']

epochs = len(loss)
plt.plot(range(epochs), loss, marker='.', label='loss')
コード例 #17
0
ファイル: RNN.py プロジェクト: willytell/Deep-Learning-models
max_features = 20000
maxlen = 100
batch_size = 32


(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=max_features)

X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)


model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen))

#model.add(SimpleRNN(128))
#model.add(GRU(128))
model.add(LSTM(128))

model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))


model.compile(loss='binary_crossentropy', optimizer='adam')


model.fit(X_train, y_train, batch_size=batch_size, epochs=1,
          validation_data=(X_test, y_test))
コード例 #18
0
model.add(
    LSTM(
        32,
        return_sequences=True,
        stateful=True,  #input(32,8,16)
        batch_input_shape=(batch_size, timesteps, data_dim)))  #output(32,?,32)
model.add(LSTM(32, return_sequences=True, stateful=True))  # output(32,?,32)
model.add(LSTM(32, stateful=True))  # output (32,32)
model.add(Dense(10, activation='softmax'))  # output (32, 10)

model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

# Generate dummy training data
x_train = np.random.random((batch_size * 10, timesteps, data_dim))
y_train = np.random.random((batch_size * 10, num_classes))

# Generate dummy validation data
x_val = np.random.random((batch_size * 3, timesteps, data_dim))
y_val = np.random.random(
    (batch_size * 3, num_classes))  # lazy? or must? to_categorical?

model.fit(
    x_train,
    y_train,
    batch_size=batch_size,
    epochs=5,
    shuffle=False,  # order is important
    validation_data=(x_val, y_val))
コード例 #19
0
ファイル: cnn_lstm.py プロジェクト: sd12037/tensorflow
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Dense(2, kernel_regularizer=l2(0.01)))
model.add(BatchNormalization())
model.add(Activation('softmax'))
model.compile(optimizer='Nadam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.summary()

csv_logger = CSVLogger('training.log')
print('num of training_data {}'.format(train.shape[0]))
print('num of test_data {}'.format(test.shape[0]))
hist = model.fit(train,
                 train_label,
                 epochs=epochs,
                 batch_size=batch_size,
                 validation_data=(test, test_label),
                 callbacks=[csv_logger])

score = model.evaluate(test, test_label, verbose=0)
print('test loss:', score[0])
print('test acc:', score[1])

# plot results
plt.subplot(3, 1, 1)
loss = hist.history['loss']
val_loss = hist.history['val_loss']

epochs = len(loss)
plt.plot(range(epochs), loss, marker='.', label='loss')
plt.plot(range(epochs), val_loss, marker='.', label='val_loss')
コード例 #20
0
from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers import Dense, Dropout, Activation
from tensorflow.contrib.keras.python.keras.optimizers import SGD
from tensorflow.contrib.keras.python.keras.utils import to_categorical
import numpy as np

# Generate dummy data
x_train = np.random.random((1000, 20))
y_train = np.random.randint(2, size=(1000, 1))
x_test = np.random.random((100, 20))
y_test = np.random.randint(2, size=(100, 1))

model = Sequential()
model.add(Dense(64, input_dim=20, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy', # binary classification
              optimizer='rmsprop',
              metrics=['accuracy'])

hist = model.fit(x_train, y_train,
          validation_split=0.2,
          epochs=1,
          batch_size=128)

hist.history
score = model.evaluate(x_test, y_test, batch_size=128)
コード例 #21
0
### both BatchNormalization and Droput have some basic operations prior to Normalization and Droping, Diving into the source when feeling so

#### Access middle layer output when First Dropout and then BatchNormalization

model_seq = Sequential()
model_seq.add(Dropout(0.3, input_shape=(10, )))
model_seq.add(BatchNormalization())
model_seq.add(Dense(1))
# check out weights before training
# model_seq.get_weights()

# compile and train
model_seq.compile(optimizer='SGD', loss='mse')

model_seq.fit(input_array_small, target_small, epochs=10)
model_seq.get_weights()
model_seq.save("to_delete.h5")
model_best = load_model("to_delete.h5")

###### compare two weights from two different training
# model_seq.get_weights()

###### check output
batchNorm_test = K.function(
    [model_best.input, K.learning_phase()],
    [model_best.layers[-2].output])([input_array_small, 0])[0]
batchNorm_train = K.function(
    [model_best.input, K.learning_phase()],
    [model_best.layers[-2].output])([input_array_small, 1])[0]
    uniques, ids = np.unique(
        arr, return_inverse=True)  # convert 3 words into 0, 1, 2
    return to_categorical(ids, len(uniques))  # convert 0, 1, 2 to one-hot


train_y_ohe = one_hot_encode_object_array(train_y)
test_y_ohe = one_hot_encode_object_array(test_y)

model = Sequential()

model.add(Dense(16, input_shape=(4, )))  # each sample has 4 features
model.add(Activation('sigmoid'))  # add non-linearity to hidden layer 1

model.add(Dense(3))  # add another 3 neuron final layer
model.add(Activation('softmax'))  # give it non-linearity as output
model.summary()

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=["accuracy"])

model.fit(train_X,
          train_y_ohe,
          validation_split=0.2,
          epochs=10,
          batch_size=1,
          verbose=1)

loss, accuracy = model.evaluate(test_X, test_y_ohe, batch_size=32, verbose=1)
print("Accuracy = {:.2f}".format(accuracy))
コード例 #23
0
class MLP_keras:
    def __init__(self, learning_rate, layers, functions, optimizer_name,
                 beta=0.0, dropout=1.0):
        
        self.n_input = layers[0]
        self.n_hidden = layers[1:-1]
        self.n_output = layers[-1]
        
        self.model = Sequential()
        
        if len(self.n_hidden) == 0:
            # single layer
            self.model.add(Dense(self.n_output, activation=functions[0],
                             kernel_regularizer=regularizers.l2(beta),
                             input_shape=(self.n_input,)))
            
        elif len(self.n_hidden) == 1:
            # hidden layer
            self.model.add(Dense(self.n_hidden[0], activation=functions[0],
                                 kernel_regularizer=regularizers.l2(beta),
                                 input_shape=(self.n_input,)))
            self.model.add(Dropout(dropout))
            # output layer
            self.model.add(Dense(self.n_output, activation=functions[1],
                                 kernel_regularizer=regularizers.l2(beta)))
            
        else:
            # the first hidden layer
            self.model.add(Dense(self.n_hidden[0], activation=functions[0],
                                 kernel_regularizer=regularizers.l2(beta),
                                 input_shape=(self.n_input,)))
            self.model.add(Dropout(dropout))
            # the second hidden layer
            self.model.add(Dense(self.n_hidden[1], activation=functions[1],
                                 kernel_regularizer=regularizers.l2(beta)))
            self.model.add(Dropout(dropout))
            # the output layer
            self.model.add(Dense(self.n_output, activation=functions[2],
                                 kernel_regularizer=regularizers.l2(beta)))
        
        self.model.summary()
        
        if optimizer_name == 'Adam': optimizer = Adam(learning_rate)
        
        #self.model.compile(loss='mean_squared_error',
        #                   optimizer=optimizer,
        #                   metrics=['accuracy'])
        
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy'])
    
    def train(self, epochs, trn, vld=None, batch_size=32, es=0):
        if vld is not None:
            validation_data = (vld.x, vld.y)
            if es > 0:
                callbacks = [EarlyStopping(monitor='val_loss', patience=es)]
            else:
                callbacks = None
        else:
            validation_data = None
            callbacks = None
        
        self.model.fit(trn.x, trn.y,
                       batch_size=batch_size,
                       epochs=epochs,
                       verbose=2,
                       callbacks=callbacks,
                       validation_data=validation_data)
        
        loss, tst_acc = self.model.evaluate(trn.x, trn.y, verbose=0)
        if vld is not None:
            _, vld_acc = self.model.evaluate(vld.x, vld.y, verbose=0)
        else:
            vld_acc = 0
        return ['', loss, tst_acc, vld_acc, 0]
    
    def evaluate(self, x_test, y_test):
        score = self.model.evaluate(x_test, y_test, verbose=0)
        print('Test loss:', score[0])
        print('Test accuracy:', score[1])
    
    def score(self, tst):
        return self.model.evaluate(tst.x, tst.y, verbose=0)[1]
    
    def predict_proba(self, x):
        if x is None: return None
        return self.model.predict_proba(x, verbose=0)
model.summary()

model = Sequential()
model.add(Dense(32, input_shape=(784, 10)))  # return tensor (?, 784, 32)
model.add(Dense(1))  # return tensor shape (?, 784, 1)
model.summary()

import numpy as np
x = np.random.random((100, 784, 10))
y = np.random.random((100, 784, 1))
"""
## Compilation
- optimzier: 'rmsprop', 'SGD', ...
- loss: 'mse', 'categorical_crossentropy', 'binary_crossentropy', ...
- metrics: 'accuracy', ... or user_made_func
"""
# For custom metrics
from tensorflow.contrib.keras.python.keras import backend as K


def mean_pred(y_true, y_pred):  # score_array = fn(y_true, y_pred) must 2 args
    return K.mean(y_pred)


# For a binary classification problem
model.compile(optimizer='rmsprop', loss='mse', metrics=['accuracy', mean_pred])

hist = model.fit(x, y, validation_split=0.3, epochs=2)

print(hist.history)
コード例 #25
0
ファイル: CNN.py プロジェクト: willytell/Deep-Learning-models
# Model
model = Sequential()

#defaultformat: (samples, rows, col
model.add(
    Convolution2D(32,
                  kernel_size=(3, 3),
                  strides=(1, 1),
                  activation='relu',
                  input_shape=(28, 28, 1)))
model.add(
    Convolution2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(X_train, Y_train, batch_size=32, epochs=10, verbose=1)

score = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
コード例 #26
0
print('x_train shape :', x_train.shape)
print('x_test shape :', x_test.shape)
print('y_train shape :', y_train.shape)
print('y_test shape :', y_test.shape)

batch_size = 32
epochs = 5

model = Sequential()
model.add(Dense(512, input_shape=(max_words, )))
model.add(Activation('relu'))
model.add(Dense(num_classes))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

history = model.fit(x_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_split=0.1)

score = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])

y_train
コード例 #27
0
model.compile(loss="categorical_crossentropy",
              optimizer="SGD",
              metrics=["accuracy"])
#optimizer="SGD" stohastic gradient decend, method of learning
#loss="categorical_crossentropy" - error by category
#metrics=["accuracy"] - optimization metric is a accuracy
#metrics=["mae"] - percent of right answers of data set

#mse - mean squared error
#mae - mean absolute error

print(model.summary())

#obuchaem setb | learning
model.fit(X_train, y_train, batch_size=200, np_epoch=100, verbose=1)
#batch_size - number of batch stohastic
#verbose - diagnostic info when model is training66666666666666669
#validation_split=0.2 - 20% for validation set

predictions = model.predict(X_train)

#convert output data to a single number
predictions = np_utils.categorical_probas_to_classes(predictions)

score = model.evaluate(X_test, y_test, verbose=0)
print('Accurate: ', score[1] * 100)

##############
model = Sequential()
model.add(Dense(800, input_dim=784, init="normal", activation='relu'))