Ejemplo n.º 1
0
def test_tf_keras_mnist_cnn():
    """ This is the basic mnist cnn example from keras.
    """

    try:
        import tensorflow as tf
        from tensorflow.python import keras
        from tensorflow.python.keras.models import Sequential
        from tensorflow.python.keras.layers import Dense, Dropout, Flatten, Activation
        from tensorflow.python.keras.layers import Conv2D, MaxPooling2D
        from tensorflow.python.keras import backend as K
    except Exception as e:
        print("Skipping test_tf_keras_mnist_cnn!")
        return
    import shap

    batch_size = 128
    num_classes = 10
    epochs = 1

    # input image dimensions
    img_rows, img_cols = 28, 28

    # the data, split between train and test sets
    (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()

    if K.image_data_format() == 'channels_first':
        x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(32, activation='relu'))  # 128
    model.add(Dropout(0.5))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adadelta(),
                  metrics=['accuracy'])

    model.fit(x_train[:1000, :],
              y_train[:1000, :],
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(x_test[:1000, :], y_test[:1000, :]))

    # explain by passing the tensorflow inputs and outputs
    np.random.seed(0)
    inds = np.random.choice(x_train.shape[0], 20, replace=False)
    e = shap.GradientExplainer((model.layers[0].input, model.layers[-1].input),
                               x_train[inds, :, :])
    shap_values = e.shap_values(x_test[:1], nsamples=1000)

    sess = tf.keras.backend.get_session()
    diff = sess.run(model.layers[-1].input, feed_dict={model.layers[0].input: x_test[:1]}) - \
    sess.run(model.layers[-1].input, feed_dict={model.layers[0].input: x_train[inds,:,:]}).mean(0)

    sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
    d = np.abs(sums - diff).sum()
    assert d / np.abs(diff).sum(
    ) < 0.05, "Sum of SHAP values does not match difference! %f" % (
        d / np.abs(diff).sum())
Ejemplo n.º 2
0
    seq_out = alphabet[i + seq_length]
    dataX.append([char_to_int[char] for char in seq_in])
    dataY.append(char_to_int[seq_out])
    print(seq_in, '->', seq_out)

# reshape X to be [samples, time steps, features]
X = numpy.reshape(dataX, (len(dataX), seq_length, 1))
# normalize
X = X / float(len(alphabet))
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
# create and fit the model
model = Sequential()
model.add(LSTM(32, input_shape=(X.shape[1], X.shape[2])))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit(X, y, epochs=1, batch_size=1, verbose=2)
# summarize performance of the model
scores = model.evaluate(X, y, verbose=0)
print("Model Accuracy: %.2f%%" % (scores[1] * 100))
# demonstrate some model predictions
for pattern in dataX:
    x = numpy.reshape(pattern, (1, len(pattern), 1))
    x = x / float(len(alphabet))
    prediction = model.predict(x, verbose=0)
    index = numpy.argmax(prediction)
    result = int_to_char[index]
    seq_in = [int_to_char[value] for value in pattern]
    print(seq_in, "->", result)
Ejemplo n.º 3
0
model = Sequential()
model.add(
    layers.Embedding(input_dim=vocab_size,
                     output_dim=embedding_dim,
                     input_length=maxlen))
model.add(layers.LSTM(units=50, return_sequences=True))
model.add(layers.LSTM(units=10))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(8))
model.add(layers.Dense(1, activation="sigmoid"))
model.compile(optimizer="adam",
              loss="binary_crossentropy",
              metrics=['accuracy'])
model.summary()

model.fit(xtrain, y_train, epochs=20, batch_size=16, verbose=True)
model.save('my_model')
print('model saved!')

loss, acc = model.evaluate(xtrain, y_train, verbose=True)
print("Training Accuracy: ", acc)

loss, acc = model.evaluate(xtest, y_test, verbose=True)
print("Test Accuracy: ", acc)

ypred = model.predict(xtest)

result = zip(x_test[:10], y_test[:10], ypred[:10])
for i in result:
    print(i)
yscale = scaler.transform(y)

X_train, X_test, y_train, y_test = train_test_split(x, y)

model = Sequential()
model.add(
    Dense(12, input_dim=5, kernel_initializer='normal', activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='linear'))
model.summary()

model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae'])

history = model.fit(X_train,
                    y_train,
                    epochs=150,
                    batch_size=50,
                    verbose=1,
                    validation_split=0.2)

# print(history.history.keys())
# # "Loss"
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'validation'], loc='upper left')
# plt.show()

Xnew = np.array([[40, 0, 26, 9000, 8000]])
ynew = model.predict(Xnew)
Ejemplo n.º 5
0
    test_y.shape,
))

# Build the model
model = Sequential()
#model.add(Dense(32, activation='relu', input_dim=num_words))
#model.add(Dense(1, activation='sigmoid'))
model.add(Dense(1, activation='sigmoid', input_dim=train_x.shape[1]))
model.compile(optimizer='adagrad',
              loss='binary_crossentropy',
              metrics=['accuracy'])

# Train the model
training = model.fit(train_x,
                     train_y,
                     epochs=20,
                     batch_size=32,
                     validation_split=0.2)

# Compute the model's score
score = model.evaluate(test_x, test_y)
print("Metrics: {}".format(model.metrics_names))
print("Score: {}".format(score))
print(classification(model, test_x, test_y))

print("(Precision, Recall, F1) = ({}, {}, {})".format(
    precision(model, test_x, test_y), recall(model, test_x, test_y),
    F1_score(model, test_x, test_y)))

F1scores = list(
    enumerate(
Ejemplo n.º 6
0
model = Sequential()

# 퍼셉트론: 전구(0, 1)처럼 0과 1의 값을 가지고 있는 신경망
# 퍼셉트론이 회귀나 로지스틱이나 다른 머신러닝으로 바뀌기 위해서는 함수를 지정해 줄 필요가 있음
# 이 때 사용되는 함수를 활성화 함수(activation function)라고 부름
# 선형 회귀: linear, 로지스틱 회귀: sigmoid 등
model.add(Dense(units=y_column, input_dim=x_column, activation='sigmoid'))

# 비용(손실, loss) 함수는 이진 분류이므로 'binary_crossentropy'를 사용
# optimizer 사용시 지정 문자열을 사용해도 되지만, 객체 생성을 통해 만드는 것도 가능
learning_rate = 0.1  # 학습률

sgd = SGD(lr=learning_rate)  # 객체 생성, lr: 학습률
model.compile(loss='binary_crossentropy', optimizer=sgd)

model.fit(x=x_train, y=y_train, epochs=2000, verbose=0)

# 해당 모델에 훈련용 데이터를 이용하여 확률값을 예측
H2 = model.predict(x_train)
print(H2)
print('-' * 30)

for item in x_test:
    H = model.predict(np.array([item]))
    print(H)
    print('-' * 30)

    # predict_classes : 정답이 가지고 있는 클래스의 값을 출력해 줌
    # pred = model.predict_classes(np.array([item]))  # deprecated
    pred = (model.predict(np.array([item])) > 0.5).astype("int32")
    print('테스트 데이터:', np.array([item]))
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))

model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer=RMSprop(),
              metrics=['accuracy'])

history = model.fit(x_train, y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

assert score[1] > 0.95
Ejemplo n.º 8
0
def build_model():

    # CREATING THE RECURRENT NEURAL NETWORK
    model = Sequential()
    # Add a Gated Recurrent Network to the network, has 512 outputs for each time-step
    # Input shape requires the shape of its input, None means of arbitrary length
    model.add(
        GRU(units=512,
            return_sequences=True,
            input_shape=(
                None,
                num_x_signals,
            )))
    # As output signals are limited between 0 and 1 we must do the same for the output from the neural network
    model.add(Dense(num_y_signals, activation='sigmoid'))

    # LOSS FUNCTION

    global warmup_steps
    warmup_steps = 50

    # Defining the start Learning Rate we will be using
    optimizer = tf.keras.optimizers.RMSprop(lr=1e-10)
    # Compiles the model:
    model.compile(loss=loss_mse_warmup, optimizer=optimizer)
    model.summary()

    # Callback for writing checkpoints during training
    path_checkpoint = 'checkpoint.keras'
    callback_checkpoint = ModelCheckpoint(filepath=path_checkpoint,
                                          monitor='val_loss',
                                          verbose=1,
                                          save_weights_only=True,
                                          save_best_only=True)

    # Callback for stopping optimization when performance worsens on the validation set
    callback_early_stopping = EarlyStopping(monitor='val_loss',
                                            patience=5,
                                            verbose=1)

    # Callback for writing the TensorBoard log during training
    callback_tensorboard = TensorBoard(log_dir='./23_logs/',
                                       histogram_freq=0,
                                       write_graph=False)
    # Callback reduces learning rate for the optimizer if the validation-loss has not improved since the last epoch
    callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                           factor=0.1,
                                           min_lr=1e-4,
                                           patience=0,
                                           verbose=1)

    callbacks = [
        callback_early_stopping, callback_checkpoint, callback_tensorboard,
        callback_reduce_lr
    ]

    model.fit(x=generator,
              epochs=20,
              steps_per_epoch=100,
              validation_data=validation_data,
              callbacks=callbacks)

    try:
        model.load_weights(path_checkpoint)
    except Exception as error:
        print("Error trying to load checkpoint.")
        print(error)

    result = model.evaluate(x=np.expand_dims(x_test_scaled, axis=0),
                            y=np.expand_dims(y_test_scaled, axis=0))
    print("loss (test-set):", result)
Ejemplo n.º 9
0
    model2.add(MaxPool2D(pool_size=(2, 2)))
    # model2.add(tf.keras.layers.BatchNormalization())
    model2.add(Conv2D(64, (3, 3), activation='relu'))
    model2.add(MaxPool2D(pool_size=(2, 2)))
    model2.add(Flatten())
    model2.add(Dropout(0.5))
    model2.add(Dense(64, activation='relu'))
    model2.add(Dense(1, activation='sigmoid'))
    model2.compile(optimizer='RMSprop',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

    # 训练模型
    train_result2 = model2.fit(reshaped_x_train,
                               train_y,
                               epochs=15,
                               validation_data=(reshaped_x_validation,
                                                validation_y))

    # 保存模型
    model2.save('cat_and_dogs_a_20_5.h5')
    # cat_and_dogs_a_20 训练了10次 80% 拟合情况挺好
    # mode2.save('cat_and_dogs_a.h5') 85% v_acc

    test_loss, test_acc = model2.evaluate(reshaped_x_test, test_y)
    print(test_acc)
    # 检查模型精确度
    acc = train_result2.history['acc']
    val_acc = train_result2.history['val_acc']
    plt.figure()
    plt.plot(acc)
Ejemplo n.º 10
0
print(pd.DataFrame(X_train).head())
# create the model 
embedding_vector_length = 128

from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import LSTM,Dense, Dropout
from tensorflow.python.keras.layers import SpatialDropout1D
from tensorflow.python.keras.layers import Embedding
model = Sequential()
model.add(Embedding(15001, embedding_vector_length,     
                                     input_length=250) )
model.add(LSTM(100))
model.add(Dense(2, activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam', 
                           metrics=['accuracy'])
r=model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=64) 
# Final evaluation of the model 
import tensorflow as tf 
filename = "my_model.h5"
model.save(filename)
model=tf.keras.models.load_model(filename)
scores = model.evaluate(X_test, y_test, verbose=0) 
plt.plot(r.history['loss'], label='loss')
plt.plot(r.history['val_loss'], label='val_loss')
plt.legend()
plt.show()
if "binary_accuracy" in r.history.keys():
	plt.plot(r.history['binary_accuracy'], label='acc')
plt.plot(r.history['accuracy'], label='acc')
 
plt.plot(r.history['val_accuracy'], label='val_acc')
Ejemplo n.º 11
0
import tensorflow as tf
from tensorflow.python import keras
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense

mod_q = Sequential()
mod_q.add(Dense(20, activation='sigmoid', input_dim=2))
mod_q.add(Dense(30, activation='sigmoid'))
mod_q.add(Dense(10, activation='sigmoid'))
mod_q.add(Dense(1, activation='sigmoid'))

mod_q.compile(loss='mean_squared_error',
              optimizer='adam',
              metrics=['mse'])

def XOR(x):
    if (x[0] or x[1]) and not (x[0] and x[1]):
        return 1
    else:
        return 0

X = np.random.randint(2, size=(2))
Y = XOR(X)

mod_q.predict(X.reshape(1,2))
mod_q.fit(X.reshape(1, 2), np.array([Y])
for i in range(10000):
    X = np.random.randint(2, size=(2))
    Y = XOR(X)
    mod_q.fit(X.reshape(1, 2), np.array([Y]), verbose=False)
train_size = 30000
train_file = "D:/Darse ha/kaggle/Digit Recognizer/train.csv"
raw_data = pd.read_csv(train_file)

x, y = data_prep(raw_data)

model = Sequential()
model.add(
    Conv2D(30,
           kernel_size=(3, 3),
           strides=2,
           activation='relu',
           input_shape=(img_rows, img_cols, 1)))
model.add(Dropout(0.5))
model.add(Conv2D(30, kernel_size=(3, 3), strides=2, activation='relu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer='adam',
              metrics=['accuracy'])
model.fit(x, y, batch_size=128, epochs=2, validation_split=0.2)

test_file = "D:/Darse ha/kaggle/Digit Recognizer/test.csv"
raw_test = pd.read_csv(test_file)
raw_test = raw_test.values
test_shaped_array = raw_test.reshape(28000, img_rows, img_cols, 1)
preds = model.predict_classes(test_shaped_array)
Ejemplo n.º 13
0
trainX = inputX[:80000]
trainY = inputY[:80000]
valX = inputX[80000:]
valY = inputY[80000:]

es = EarlyStopping(monitor='val_mae', mode='min', verbose=1, patience=50)

# design network
model = Sequential()
model.add(Dense(64))
model.add(Dense(32))
model.add(Dense(16))
model.add(Dense(valY.shape[1]))
model.compile(loss='mse', optimizer='adam', metrics=['mae'])

# fit network
history = model.fit(trainX,
                    trainY,
                    epochs=5000,
                    batch_size=5000,
                    verbose=2,
                    validation_data=(valX, valY),
                    shuffle=False,
                    callbacks=[es])

model.save('Pend_Action_Dense_Network1.keras')
print(model.summary())

np.save("history_Pend_Action_Dense_Network1.npy",
        history.history,
        allow_pickle=True)
Ejemplo n.º 14
0
#split training set and test set
x_train, x_test, y_train, y_test = train_test_split(x,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=0)

#set up neural network. Structure is 21->12->1
model = Sequential()
model.add(Dense(12, input_dim=21, activation='sigmoid'))
model.add(Dense(1, activation='sigmoid'))

#Compile and train neural network
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
output = model.fit(x_train, y_train, epochs=500, batch_size=x_train.shape[0])
scores = model.evaluate(x_test, y_test)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))

#plot accuracy-epoch figure
plt.plot(output.history['acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('Accuracy.png', dpi=100)
plt.show()

#plot loss-epoch figure
plt.plot(output.history['loss'])
plt.title('model loss')
Ejemplo n.º 15
0
# Initialise the ANN :)
classifier = Sequential()

# Adding input layer and the first hidden layer, for 6 layers (len(arr)// 2), with softmax graph, with an input of 11 values - dataset_variables
classifier.add(Dense(6, activation='softmax', input_dim=11))

# Adding our second hidden layer - second hidden layer with a softmax graph
classifier.add(Dense(6, activation='softmax'))

# Adding the ouput layer - output variable with a sigmoid variable, showing the percentage/ likelihood someone will leave or stay with the bank
classifier.add(Dense(1, activation='sigmoid'))

# Compling the ANN -
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

# Fitting the ANN to the Training set - Testing the ANN, by 100 epochs
classifier.fit(dataset_variables_train,
               dataset_results_train,
               batch_size=10,
               nb_epoch=100)

# Predicting the Test set results
dataset_results_pred = classifier.predict(dataset_variables_test)

# Presenting the predicted variables for the test results
dataset_results_pred = [
    True if (i > .5) else False for i in dataset_results_pred
]
Ejemplo n.º 16
0
model.add(Activation('relu'))
model.add(Dense(100))
model.add(Dropout(0.4))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer=Adam(lr=0.01),
              loss="binary_crossentropy",
              metrics=["accuracy"])
annealer = LearningRateScheduler(lambda x: 1e-2 * 0.95**x)

# TRAIN MODEL
model.fit(X_train,
          Y_train,
          batch_size=32,
          epochs=8,
          callbacks=[annealer, printAUC(X_train, Y_train)],
          validation_data=(X_val, Y_val),
          verbose=2)

del df_train
del X_train, X_val, Y_train, Y_val
x = gc.collect()

# LOAD BEST SAVED NET
from keras.models import load_model
model = load_model('bestNet.h5')

pred = np.zeros((7853253, 1))
id = 1
chunksize = 2000000
    Conv2D(32, (3, 3),
           activation='relu',
           input_shape=(32, 32, 3),
           padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax'))
#print(model.summary())

# Step 3: Compile the Model
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# Step 4: Train the Model
model.fit(X_train,
          y_train,
          epochs=training_epochs,
          validation_data=[X_test, y_test])

# Step 5: Evaluate the Model
loss, acc = model.evaluate(X_test, y_test)
print('Test accuracy:', acc)

# Step 6: Save the Model
model.save("./models/cifar_cnn.h5")
Ejemplo n.º 18
0
# k = 10
# step = math.floor(len(labels)/k)
# for i in range(0,k-1):
#     train_images= general_images[:i*step-1]
#     train_labels= general_labels[:i*step-1]
#     validation_images= general_images[i*step:(i+1)*step-1]
#     validation_labels= general_labels[i*step:(i+1)*step-1]
#     train_images= np.concatenate((train_images, general_images[(i+1)*step:]), axis=0)
#     train_labels= np.concatenate((train_labels, general_labels[(i+1)*step:]), axis=0)
#     model.fit(x=train_images,
#           y=train_labels,
#           epochs=5, batch_size=100,verbose=2) #,validation_split=0.2

model.fit(images_train,
          labels_train,
          batch_size=128,
          epochs=1,
          verbose=1,
          validation_split=0.1)

#Evaluación del modelo
# result = model.evaluate(x=test_images,
#                     y=test_labels)

score = model.evaluate(images_test, labels_test, verbose=0)

print('Testing set accuracy:', score[1])

#Imprimir perdida y precision
# for name, value in zip(model.metrics_names, result):
#     print(name, value)
Ejemplo n.º 19
0
    'EarthonCanvas/Aerialsketch',
    target_size=(image_size, image_size),
    batch_size=BATCH_SIZE_VALIDATION,
    class_mode='categorical',
    subset='validation')

cb_early_stopper = EarlyStopping(monitor='val_loss',
                                 patience=EARLY_STOP_PATIENCE)
cb_checkpointer = ModelCheckpoint(filepath='../best.hdf5',
                                  monitor='val_loss',
                                  save_best_only=True,
                                  mode='auto')
print('hi')
fit_history = model.fit((train_generator),
                        steps_per_epoch=STEPS_PER_EPOCH_TRAINING,
                        epochs=NUM_EPOCHS,
                        validation_data=(validation_generator),
                        validation_steps=STEPS_PER_EPOCH_VALIDATION,
                        callbacks=[cb_checkpointer, cb_early_stopper])
print('hi2')
model.load_weights("../best.hdf5")

print(fit_history.history.keys())

plt.figure(1, figsize=(15, 8))

plt.subplot(221)
plt.plot(fit_history.history['acc'])
plt.plot(fit_history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
Ejemplo n.º 20
0
    recall = recall_m(y_true, y_pred)
    return 2*((precision*recall)/(precision+recall+K.epsilon()))

def recall_m(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall

def precision_m(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision

def f1_m(y_true, y_pred):
    precision = precision_m(y_true, y_pred)
    recall = recall_m(y_true, y_pred)
    return 2*((precision*recall)/(precision+recall+K.epsilon()))

model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc',f1_m,precision_m, recall_m])

model.fit(np.array(x_train_pad), np.array(y_train),validation_split=0.2, epochs=25, batch_size=64)


loss, accuracy, f1_score, precision, recall = model.evaluate(np.array(x_train_pad), np.array(y_train))


print(loss,f1_score)

Ejemplo n.º 21
0
# prepare output data
y_train_enc, y_test_enc = prepare_targets(y_train, y_test)
print('Finished preparing outputs.')
# define the  model
model = Sequential()
model.add(Dense(187, input_dim=X_train_enc.shape[1], activation="tanh", kernel_initializer='he_normal'))
model.add(Dropout(0.2))
model.add(Dense(64, input_dim=X_train_enc.shape[1], activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(32, input_dim=X_train_enc.shape[1], activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# compile the keras model
opt = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
# fit the keras model on the dataset
model.fit(X_train_enc, y_train_enc, epochs=20, batch_size=128, verbose=1, use_multiprocessing=True)
# evaluate the keras model
_, accuracy = model.evaluate(X_test_enc, y_test_enc, verbose=0)
print('Accuracy: %.2f' % (accuracy * 100))



'''
Accuracy: 49.95
model = Sequential()
model.add(Dense(32, input_dim=X_train_enc.shape[1], activation="tanh", kernel_initializer='he_normal'))
model.add(Dropout(0.4))
model.add(Dense(16, input_dim=X_train_enc.shape[1], activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(8, input_dim=X_train_enc.shape[1], activation='relu'))
model.add(Dense(1, activation='sigmoid'))
Ejemplo n.º 22
0
earlystopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1)

# 自动降低learning rate
lr_reduction = ReduceLROnPlateau(monitor='val_loss',
                                 factor=0.1,
                                 min_lr=1e-8,
                                 patience=0,
                                 verbose=1)

# 定义callback函数
callbacks = [earlystopping, checkpoint, lr_reduction]

# 开始训练
model.fit(X_train,
          y_train,
          validation_split=0.1,
          epochs=20,
          batch_size=128,
          callbacks=callbacks)

result = model.evaluate(X_test, y_test)
print('Accuracy:{0:.2%}'.format(result[1]))


def predict_sentiment(text):
    print(text)
    # 去标点
    text = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()]+", "", text)
    # 分词
    cut = jieba.cut(text)
    cut_list = [i for i in cut]
    # tokenize
Ejemplo n.º 23
0
ax1.plot(x_train, y_train, label='Ground Truth')
ax1.set_xlabel("x")
ax1.set_ylabel("f(x)")

batch_size = 1
epochs = 100

model = Sequential()
model.add(Dense(5, activation='tanh', input_shape=(1,)))
model.add(Dense(5, activation='tanh'))
model.add(Dense(1, activation='linear'))

model.compile(loss='mse', optimizer='sgd', metrics=['accuracy'])

line = ax1.plot([], [], linestyle="-.", label="Predict")
plt.legend()

for ep in range(epochs):
    history = model.fit(x_train, y_train, batch_size=batch_size,
                        epochs=1, verbose=1)
    y_predict = model.predict(x_train)
    line[0].set_data([x_train, y_predict])
    if ep > 0:
        ax2.plot([ep - 1, ep], [prev_loss, history.history["loss"][0]], c="C0")
    prev_loss = history.history["loss"][0]
    ax1.set_title("Epochs: %d" % ep)
    plt.pause(0.1)
    plt.draw()
plt.show()
os.environ['TF_CP_MIN_LOG_LEVEL'] = '2'

callbacks_list = [
    EarlyStopping(monitor='acc', patience=1),
    ModelCheckpoint(
        filepath='files/chkpt.h5',
        monitor='val_loss',
        save_best_only=True,
    )
]

# Generate fake data
x = np.random.random((1000, 2))
y = np.random.random((1000))
val_x = np.random.random((100, 2))
val_y = np.random.random((100))

# Model
model = Sequential()
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
model.fit(x,
          y,
          epochs=10,
          batch_size=8,
          callbacks=callbacks_list,
          validation_data=[val_x, val_y])
Ejemplo n.º 25
0
    def run(self, epochs, units, batch_size):
        d = self.data
        X_train, X_test, y_train, y_test = d.X_train, d.X_test, d.y_train, d.y_test
        X_train_t = d.X_train_t
        X_test_t = d.X_test_t

        K.clear_session()
        model = Sequential()  # Sequeatial Model  # 190709 : 20
        model.add(LSTM(units, input_shape=(177, 1)))  # (timestep, feature)
        model.add(Dense(3))  # output = 1
        model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
        # self.loss = 'mean_squared_error'
        # self.optimizer = 'adam'

        model.compile(loss=d.loss, optimizer=d.optimizer, metrics=['accuracy'])
        model.summary()

        self.epochs = epochs
        self.units = units
        self.batch_size = batch_size
        # self.model = model


        early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)

        hist = model.fit(d.X_train_t, d.y_train, epochs=epochs,
                         batch_size=batch_size, verbose=1)
        # , callbacks=[early_stop]))
        print("result : \n" + str(hist))

        from keras.models import load_model
        model.save('190711_200_unit256_batch128.h5')

        target_names = ['inner_temperature', 'inner_humidity', 'inner_co2']

        y_pred = model.predict(X_test_t)

        # %matplotlib inline

        fig, loss_ax = plt.subplots()

        acc_ax = loss_ax.twinx()

        loss_ax.plot(hist.history['loss'], 'y', label='train loss')
        acc_ax.plot(hist.history['acc'], 'b', label='train acc')

        loss_ax.set_xlabel('epoch')
        loss_ax.set_ylabel('loss')
        acc_ax.set_ylabel('accuray')

        loss_ax.legend(loc='upper left')
        acc_ax.legend(loc='lower left')

        plt.show()

        y_pred = model.predict(X_test_t)
        plt.figure(figsize=(10, 5))
        plt.ylabel(target_names[1])
        plt.plot(y_test[:, 1])
        plt.plot(y_pred[:, 1])
        plt.show()


        y_pred = model.predict(X_test_t)
        plt.figure(figsize=(10, 5))
        plt.ylabel(target_names[0])
        plt.plot(y_test[:, 0])
        plt.plot(y_pred[:, 0])
        plt.show()


        y_pred = model.predict(X_test_t)
        plt.figure(figsize=(10, 5))

        plt.ylabel(target_names[2])
        plt.plot(y_test[:, 2])
        plt.plot(y_pred[:, 2])
        plt.show()


        print("\n Acc : %.4f" % (model.evaluate(X_train_t, y_train)[1]))

        print("\n Loss : %.4f" % (model.evaluate(X_train_t, y_train)[0]))
Ejemplo n.º 26
0
def train(x_train, y_train, x_test, y_test, epochs):

    #  calculate classes
    if np.unique(y_train).shape[0] == np.unique(y_test).shape[0]:
        #
        num_classes = np.unique(y_train).shape[0]
    else:
        print('Error in class data...')
        return -2

    # set validation data
    '''val_size = int(0.1 * x_train.shape[0])
    r = np.random.randint(0, x_train.shape[0], size=val_size)
    x_val = x_train[r, :, :]
    y_val = y_train[r]
    x_train = np.delete(x_train, r, axis=0)
    y_train = np.delete(y_train, r, axis=0)'''
    step = int(x_train.shape[0] * 0.005)
    length = int(x_train.shape[0] * 0.1 * 0.005)
    r = []
    for i in range(0, x_train.shape[0] - length, step):
        r.extend(range(i, i + length))
    x_val = x_train[r, :, :]
    y_val = y_train[r]
    x_train = np.delete(x_train, r, axis=0)
    y_train = np.delete(y_train, r, axis=0)

    print('\nInitializing CNN2D...')
    print('\nclasses:', num_classes)
    print('x train shape:',
          x_train.shape), print('x val shape:',
                                x_val.shape), print('x test shape:',
                                                    x_test.shape)
    print('y train shape:',
          y_train.shape), print('y val shape:',
                                y_val.shape), print('y test shape:',
                                                    y_test.shape)
    print("\nTrain split with mean|std {:.2f}|{:.2f}".format(
        np.mean(x_train), np.std(x_train)))
    print("Test split with mean|std {:.2f}|{:.2f}".format(
        np.mean(x_test), np.std(x_test)))

    # shape data
    x_train = x_train.reshape(x_train.shape[0], x_train.shape[1],
                              x_train.shape[2], 1)
    x_val = x_val.reshape(x_val.shape[0], x_val.shape[1], x_val.shape[2], 1)
    x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2],
                            1)
    y_train = tf.keras.utils.to_categorical(y_train, num_classes)
    y_val = tf.keras.utils.to_categorical(y_val, num_classes)
    y_test = tf.keras.utils.to_categorical(y_test, num_classes)

    # define the model
    activation = 'elu'
    regularizer = 0.0000
    dropout = 0.25

    # preprocessing
    '''
    offset = 1.0 * np.std(x_train)
    dc0 = (x)
    dc1 = GaussianNoise(offset*0.1)(x)
    dc2 = GaussianDropout(dropout)(x)
    dc3 = Lambda(lambda r: r + __import__('keras').backend.random_uniform((1,), -offset, offset))(x)
    dc4 = Lambda(lambda r: r + __import__('keras').backend.random_uniform((1,), -offset, offset))(x)
    m = Concatenate()([dc0, dc1, dc2, dc3, dc4])
    m = Lambda(lambda r: r - __import__('keras').backend.mean(r))(x)
    '''

    # sequential

    model = Sequential()
    model.add(
        Conv2D(16,
               kernel_size=(3, 3),
               strides=(2, 1),
               activation='elu',
               kernel_regularizer=regularizers.l2(regularizer),
               input_shape=(x_train.shape[1], x_train.shape[2], 1)))
    model.add(EntropyPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=(1, 1),
               activation='elu',
               kernel_regularizer=regularizers.l2(regularizer)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    model.add(
        Conv2D(64,
               kernel_size=(3, 3),
               strides=(1, 1),
               activation='elu',
               kernel_regularizer=regularizers.l2(regularizer)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    # model.add(Conv2D(128, kernel_size=(3, 3), strides=(1, 1), activation='elu', kernel_regularizer=regularizers.l2(regularizer)))
    # model.add(MaxPooling2D(pool_size=(1, 2)))
    # model.add(Dropout(dropout))
    model.add(Flatten())
    model.add(
        Dense(64,
              activation='elu',
              kernel_regularizer=regularizers.l2(regularizer)))
    model.add(Dropout(dropout))
    model.add(Dense(num_classes, activation='softmax'))

    # functional
    '''
    x = Input((x_train.shape[1], x_train.shape[2], x_train.shape[3]))
    m = Conv2D(16, 3, activation=activation , kernel_regularizer=regularizers.l2(regularizer))(x)
    m = EntropyPooling2D((2, 2))(m)
    m = Dropout(dropout)(m)
    m = Conv2D(32, 3, activation=activation, kernel_regularizer=regularizers.l2(regularizer))(m)
    m = EntropyPooling2D((2, 2))(m)
    m = Dropout(dropout)(m)
    m = Conv2D(64, 3, activation=activation, kernel_regularizer=regularizers.l2(regularizer))(m)
    m = EntropyPooling2D((2, 2))(m)
    m = Dropout(dropout)(m)
    if x_train.shape[1] < 50:
        #
        m = Flatten()(m)
    else:
        m = Conv2D(128, 3, activation=activation, kernel_regularizer=regularizers.l2(regularizer))(m)
        m = GlobalAveragePooling2D()(m)
        m = Dropout(dropout)(m)
    m = (Dense(64, activation=activation, kernel_regularizer=regularizers.l2(regularizer)))(m)
    m = Dropout(dropout)(m)
    y = Dense(num_classes, activation='softmax')(m)
    model = Model(inputs=[x], outputs=[y])
    '''

    # summarize model
    for i in range(0, len(model.layers)):
        if i == 0:
            plot_model(model, to_file='Models\\model_cnn2d.png')
            # f = open('Models\\model_cnn2d.txt', 'w')
            # print(' ')
        # print('{}. Layer {} with input / output shapes: {} / {}'.format(i, model.layers[i].name, model.layers[i].input_shape, model.layers[i].output_shape))
        # f.write('{}. Layer {} with input / output shapes: {} / {} \n'.format(i, model.layers[i].name, model.layers[i].input_shape, model.layers[i].output_shape))
        if i == len(model.layers) - 1:
            # f.close()
            print(' ')
            model.summary()

    # compile, fit evaluate
    callback = [
        callbacks.EarlyStopping(monitor='val_acc',
                                min_delta=0.01,
                                patience=10,
                                restore_best_weights=True)
    ]
    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.Adam(),
                  metrics=['accuracy'])
    model.fit(x_train,
              y_train,
              batch_size=256,
              epochs=epochs,
              verbose=2,
              validation_data=(x_val, y_val),
              callbacks=callback)
    score = model.evaluate(x_test, y_test, verbose=2)

    # evaluate on larger frames
    aggr_size = 5
    for i in range(0, y_test.shape[0] - aggr_size, aggr_size):
        if i == 0:
            y_pred = model.predict(x_test)
            y_pred = np.argmax(y_pred, axis=1)
            y_test = np.argmax(y_test, axis=1)
            y_aggr_test = []
            y_aggr_pred = []
        if np.unique(y_test[i:i + aggr_size]).shape[0] == 1:
            y_aggr_test.append(stats.mode(y_test[i:i + aggr_size])[0][0])
            y_aggr_pred.append(stats.mode(y_pred[i:i + aggr_size])[0][0])
    # print(confusion_matrix(np.argmax(y_test, axis=1), np.argmax(y_pred, axis=1)))
    scipy_score = classification_report(y_aggr_test,
                                        y_aggr_pred,
                                        output_dict=True)['accuracy']
    print('short {:.2f} and aggr {:.2f}'.format(score[1], scipy_score))

    # save model
    open("Models\\model_cnn2d.json", "w").write(model.to_json())
    pickle.dump(model.get_config(), open("Models\\model_cnn2d.pickle", "wb"))
    model.save_weights("Models\\model_cnn2d.h5")

    # results
    return score[1]
Ejemplo n.º 27
0
def _train(mutated, module_name):
    mutated = mutated[mutated['mod_keys_found_string'] == module_name]
    train_set, val_set, test_set = np.split(
        mutated.sample(frac=1),
        [int(.6 * len(mutated)),
         int(.8 * len(mutated))])
    tasks_sent_train = [row for row in train_set['task_complete']]
    model_tasks3 = Word2Vec(tasks_sent_train,
                            sg=0,
                            size=100,
                            window=6,
                            min_count=1,
                            workers=4,
                            iter=1000)

    train_set['task_complete_one_string'] = train_set['task_complete'].apply(
        lambda x: list_to_string(x))
    test_set['task_complete_one_string'] = test_set['task_complete'].apply(
        lambda x: list_to_string(x))
    val_set['task_complete_one_string'] = val_set['task_complete'].apply(
        lambda x: list_to_string(x))

    y_train = train_set['consistent'].astype(int)
    print(y_train.value_counts(), y_train.shape)

    y_test = test_set['consistent'].astype(int)
    print(y_test.value_counts(), y_test.shape)

    y_val = val_set['consistent'].astype(int)

    tokenizer_train = Tokenizer(lower=False)
    tokenizer_train.fit_on_texts(train_set['task_complete'])
    print(tokenizer_train)
    tokenizer_train = Tokenizer(lower=False)
    tokenizer_train.fit_on_texts(train_set['task_complete'])
    print(tokenizer_train)

    tokenizer_test = Tokenizer(lower=False)
    tokenizer_test.fit_on_texts(test_set['task_complete'])
    print(tokenizer_test)

    tokenizer_val = Tokenizer(lower=False)
    tokenizer_val.fit_on_texts(val_set['task_complete'])

    tasks_train_tokens = tokenizer_train.texts_to_sequences(
        train_set['task_complete_one_string'])
    tasks_test_tokens = tokenizer_test.texts_to_sequences(
        test_set['task_complete_one_string'])
    tasks_val_tokens = tokenizer_val.texts_to_sequences(
        val_set['task_complete_one_string'])

    num_tokens = [len(tokens) for tokens in tasks_train_tokens]
    num_tokens = np.array(num_tokens)
    np.max(num_tokens)
    np.argmax(num_tokens)
    max_tokens = np.mean(num_tokens) + 2 * np.std(num_tokens)
    max_tokens = int(max_tokens)
    tasks_train_pad = pad_sequences(tasks_train_tokens,
                                    maxlen=max_tokens,
                                    padding='post')
    tasks_test_pad = pad_sequences(tasks_test_tokens,
                                   maxlen=max_tokens,
                                   padding='post')
    tasks_val_pad = pad_sequences(tasks_val_tokens,
                                  maxlen=max_tokens,
                                  padding='post')

    embedding_size = 100
    num_words = len(list(tokenizer_train.word_index)) + 1

    embedding_matrix = np.random.uniform(-1, 1, (num_words, embedding_size))
    for word, i in tokenizer_train.word_index.items():
        if i < num_words:
            embedding_vector = model_tasks3[word]
            if embedding_vector is not None:
                embedding_matrix[i] = embedding_vector

    sequence_length = max_tokens
    batch_size = 256

    tensorflow.compat.v1.disable_eager_execution()

    # CNN architecture

    num_classes = 2

    # Training params
    num_epochs = 20

    # Model parameters
    num_filters = 64
    weight_decay = 1e-4

    print("training CNN ...")
    model = Sequential()

    # Model add word2vec embedding

    model.add(
        Embedding(
            input_dim=num_words,
            output_dim=embedding_size,
            weights=[embedding_matrix],
            input_length=max_tokens,
            trainable=True,  # the layer is trained
            name='embedding_layer'))
    model.add(
        layers.Conv1D(filters=num_filters,
                      kernel_size=max_tokens,
                      activation='relu',
                      padding='same',
                      kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(layers.MaxPooling1D(2))
    model.add(Dropout(0.25))

    model.add(
        layers.Conv1D(filters=num_filters + num_filters,
                      kernel_size=max_tokens,
                      activation='relu',
                      padding='same',
                      kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(layers.GlobalMaxPooling1D())
    model.add(Dropout(0.25))

    model.add(layers.Flatten())
    model.add(
        layers.Dense(128,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(layers.Dense(num_classes, activation='softmax'))

    sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss=tensorflow.keras.losses.MeanAbsoluteError(),
                  optimizer=sgd,
                  metrics=['accuracy'])
    model.summary()

    model.fit(tasks_train_pad,
              to_categorical(y_train),
              batch_size=batch_size,
              epochs=num_epochs,
              validation_data=(tasks_test_pad, to_categorical(y_test)),
              shuffle=True,
              verbose=2)

    score = model.evaluate(tasks_val_pad, to_categorical(y_val), verbose=0)
    print('loss:', score[0])
    print('Validation accuracy:', score[1])
    y_pred = model.predict_classes(tasks_val_pad)

    cm = confusion_matrix(y_val, y_pred)
    tp = cm[1][1]
    fp = cm[0][1]
    fn = cm[1][0]
    tn = cm[0][0]
    precision = round(tp / (tp + fp), 2)
    print('Consistent: precision=%.3f' % (precision))
    recall = round(tp / (tp + fn), 2)
    print('Consistent: recall=%.3f' % (recall))
    f1_score = (2 * precision * recall) / (precision + recall)
    print('Consistent: f1_score=%.3f' % (f1_score))
    precision_neg = round(tn / (tn + fn), 2)
    print('Inconsistent: precision=%.3f' % (precision_neg))
    recall_neg = round(tn / (tn + fp), 2)
    print('Inconsistent: recall=%.3f' % (recall_neg))
    f1_score_neg = (2 * precision_neg * recall_neg) / (precision_neg +
                                                       recall_neg)
    print('Inconsistent: f1_score=%.3f' % (f1_score_neg))
    ns_probs = [0 for _ in range(len(y_val))]
    ns_auc = roc_auc_score(y_val, ns_probs)
    lr_auc = roc_auc_score(y_val, y_pred)
    mcc = matthews_corrcoef(y_val, y_pred)
    print(precision)
    print('No Skill: ROC AUC=%.3f' % (ns_auc))
    print('Our model: ROC AUC=%.3f' % (lr_auc))
    print('Our model: MCC=%.3f' % (mcc))

    json_out = {"module": module_name, "MCC": mcc, "AUC": lr_auc}
    model.save('models/' + module_name)
    return json_out
Ejemplo n.º 28
0
model.add(Flatten())
#Capa densa
model.add(Dense(128,activation='relu'))


#Capa salida
model.add(Dense(num_clases,activation='softmax'))

#Compilacioon del modelo
optimizador=Adam(lr=1e-3)
model.compile(optimizer=optimizador,
              loss='categorical_crossentropy',
              metrics=['accuracy']
)

#Entrenamiento del modelo
print(imagenes.shape)
print(probabilidades.shape)
model.fit(x=imagenes,y=probabilidades,epochs=12,batch_size=100)

limiteImagenesPrueba=33
imagenesPrueba,etiquetasPrueba,probabilidadesPrueba=cargarDatos("test/",num_clases,limiteImagenesPrueba)
resultados=model.evaluate(x=imagenesPrueba,y=probabilidadesPrueba)
print("Resultados pruebas:")
print("{0}: {1:.2%}".format(model.metrics_names[1], resultados[1]))
#Carpeta y nombre del archivo como se almacenará el modelo
nombreArchivo='models/modeloReconocimientoComida.keras'
model.save(nombreArchivo)
model.summary()

Ejemplo n.º 29
0
from tensorflow.python.keras import regularizers
import matplotlib.pyplot as plt
import numpy as np
model = Sequential()
model.add(
    Dense(8,
          activation='relu',
          kernel_regularizer=regularizers.l2(0.001),
          input_shape=(1, )))
model.add(
    Dense(8, activation='relu', kernel_regularizer=regularizers.l2(0.001)))
model.add(Dense(1))

model.compile(optimizer=Adam(), loss='mse')

# generate 10,000 random numbers in [-50, 50], along with their squares
x = np.random.random((10000, 1)) * 100 - 50
y = x**2

# fit the model, keeping 2,000 samples as validation set
hist = model.fit(x, y, validation_split=0.2, epochs=15000, batch_size=256)

# check some predictions:
print(model.predict([4, -4, 11, 20, 8, -5]))
# result:
# [[ 16.633354]
#  [ 15.031291]
#  [121.26833 ]
#  [397.78638 ]
#  [ 65.70035 ]
#  [ 27.040245]]
Ejemplo n.º 30
0
            for l in range(conv_layer-1):
                model.add(Conv2D(layer_size, (3,3)))
                model.add(Activation("relu"))
                model.add(MaxPooling2D(pool_size=(2,2)))

            model.add(Flatten())
            for l in range(dense_layer):
                model.add(Dense(layer_size))
                model.add(Activation("relu"))

            model.add(Dense(1))
            model.add(Activation('sigmoid'))

            model.compile(loss="binary_crossentropy", optimizer="adam", metrics=['accuracy'])

            history = model.fit(x, y, batch_size=32, epochs=epochs, validation_split=0.1)

            acc = history.history['acc']
            val_acc = history.history['val_acc']

            loss = history.history['loss']
            val_loss = history.history['val_loss']

            epochs_range = range(epochs)

            plt.figure(figsize=(8,8))
            plt.subplot(1,2,1)
            plt.plot(epochs_range, acc, label='training accuracy')
            plt.plot(epochs_range, val_acc, label='validation accuracy')
            plt.legend(loc='lower right')
            plt.title('training and validation accuracy')
Ejemplo n.º 31
0
class KerasCNN(object):
    """Support vector machine class."""

    # Convolutional Layer 1.
    filter_size1 = 5  # Convolution filters are 5 x 5 pixels.
    num_filters1 = 16  # There are 16 of these filters.

    # Convolutional Layer 2.
    filter_size2 = 5  # Convolution filters are 5 x 5 pixels.
    num_filters2 = 36  # There are 36 of these filters.

    # Fully-connected layer.
    fc_size = 128  # Number of neurons in fully-connected laye

    # Get data from files
    data = MNIST(data_dir='/tmp/data/MNIST/')

    # The number of pixels in each dimension of an image.
    img_size = data.img_size

    # The images are stored in one-dimensional arrays of this length.
    img_size_flat = data.img_size_flat

    # Tuple with height and width of images used to reshape arrays.
    img_shape = data.img_shape

    # Tuple with height, width and depth used to reshape arrays.
    # This is used for reshaping in Keras.
    img_shape_full = data.img_shape_full

    # Number of classes, one class for each of 10 digits.
    num_classes = data.num_classes

    # Number of colour channels for the images: 1 channel for gray-scale.
    num_channels = data.num_channels

    def __init__(self):
        """Instantiate the class.

        Args:
            train_batch_size: Training batch size

        Returns:
            None

        """
        # Initialize variables
        epochs = 2
        """
        print('{0: <{1}} {2}'.format('Encoded X image:', fill, self.x_image))
        """

        # Start construction of the Keras Sequential model.
        self.model = Sequential()

        # Add an input layer which is similar to a feed_dict in TensorFlow.
        # Note that the input-shape must be a tuple containing the image-size.
        self.model.add(InputLayer(input_shape=(self.img_size_flat, )))

        # The input is a flattened array with 784 elements,
        # but the convolutional layers expect images with shape (28, 28, 1)
        self.model.add(Reshape(self.img_shape_full))

        # First convolutional layer with ReLU-activation and max-pooling.
        self.model.add(
            Conv2D(kernel_size=5,
                   strides=1,
                   filters=16,
                   padding='same',
                   activation='relu',
                   name='layer_conv1'))
        self.model.add(MaxPooling2D(pool_size=2, strides=2))

        # Second convolutional layer with ReLU-activation and max-pooling.
        self.model.add(
            Conv2D(kernel_size=5,
                   strides=1,
                   filters=36,
                   padding='same',
                   activation='relu',
                   name='layer_conv2'))
        self.model.add(MaxPooling2D(pool_size=2, strides=2))

        # Flatten the 4-rank output of the convolutional layers
        # to 2-rank that can be input to a fully-connected / dense layer.
        self.model.add(Flatten())

        # First fully-connected / dense layer with ReLU-activation.
        self.model.add(Dense(128, activation='relu'))

        # Last fully-connected / dense layer with softmax-activation
        # for use in classification.
        self.model.add(Dense(self.num_classes, activation='softmax'))

        # Model Compilation
        '''
        The Neural Network has now been defined and must be finalized by adding
        a loss-function, optimizer and performance metrics. This is called
        model "compilation" in Keras.

        We can either define the optimizer using a string, or if we want more
        control of its parameters then we need to instantiate an object. For
        example, we can set the learning-rate.
        '''

        optimizer = Adam(lr=1e-3)
        '''
        For a classification-problem such as MNIST which has 10 possible
        classes, we need to use the loss-function called
        categorical_crossentropy. The performance metric we are interested in
        is the classification accuracy.
        '''

        self.model.compile(optimizer=optimizer,
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])

        # Training
        '''
        Now that the model has been fully defined with loss-function and
        optimizer, we can train it. This function takes numpy-arrays and
        performs the given number of training epochs using the given
        batch-size. An epoch is one full use of the entire training-set. So for
        10 epochs we would iterate randomly over the entire training-set 10
        times.
        '''

        self.model.fit(x=self.data.x_train,
                       y=self.data.y_train,
                       epochs=epochs,
                       batch_size=128)

        # Evaluation
        '''
        Now that the model has been trained we can test its performance on the
        test-set. This also uses numpy-arrays as input.
        '''

        result = self.model.evaluate(x=self.data.x_test, y=self.data.y_test)
        '''
        Print actual versus predicted values
        '''

        print('\nActual vs Predicted X values')
        start = 0
        stop = 300
        predictions = self.model.predict(self.data.x_test[start:stop])
        for pointer in range(start, stop):
            predicted = np.argmax(predictions[pointer])
            actual = np.argmax(self.data.y_test[pointer])
            print('{}: Actual: {}\tPredicted: {}\tMatch: {}'.format(
                str(pointer).zfill(3), predicted, actual, predicted == actual))
        '''
        We can print all the performance metrics for the test-set.
        '''
        print('\nPerfomance metrics')
        for name, value in zip(self.model.metrics_names, result):
            print('{} {}'.format(name, value))
        '''
        Print the model summary
        '''

        print('\n\nModel Summary\n\n{}'.format(self.model.summary()))

    def plot_example_errors(self, cls_pred):
        """Plot 9 images in a 3x3 grid.

        Function used to plot 9 images in a 3x3 grid, and writing the true and
        predicted classes below each image.

        Args:
            cls_pred: Array of the predicted class-number for all images in the
                test-set.

        Returns:
            None

        """
        # Boolean array whether the predicted class is incorrect.
        incorrect = (cls_pred != self.data.y_test_cls)

        # Get the images from the test-set that have been
        # incorrectly classified.
        images = self.data.x_test[incorrect]

        # Get the predicted classes for those images.
        cls_pred = cls_pred[incorrect]

        # Get the true classes for those images.
        cls_true = self.data.y_test_cls[incorrect]

        # Plot the first 9 images.
        plot_images(images[0:9],
                    self.img_shape,
                    cls_true[0:9],
                    cls_pred=cls_pred[0:9])
Ejemplo n.º 32
0
class KerasCNN(object):
    """Support vector machine class."""

    # Convolutional Layer 1.
    filter_size1 = 5          # Convolution filters are 5 x 5 pixels.
    num_filters1 = 16         # There are 16 of these filters.

    # Convolutional Layer 2.
    filter_size2 = 5          # Convolution filters are 5 x 5 pixels.
    num_filters2 = 36         # There are 36 of these filters.

    # Fully-connected layer.
    fc_size = 128             # Number of neurons in fully-connected laye

    # Get data from files
    data = MNIST(data_dir='/tmp/data/MNIST/')

    # The number of pixels in each dimension of an image.
    img_size = data.img_size

    # The images are stored in one-dimensional arrays of this length.
    img_size_flat = data.img_size_flat

    # Tuple with height and width of images used to reshape arrays.
    img_shape = data.img_shape

    # Tuple with height, width and depth used to reshape arrays.
    # This is used for reshaping in Keras.
    img_shape_full = data.img_shape_full

    # Number of classes, one class for each of 10 digits.
    num_classes = data.num_classes

    # Number of colour channels for the images: 1 channel for gray-scale.
    num_channels = data.num_channels

    def __init__(self):
        """Instantiate the class.

        Args:
            train_batch_size: Training batch size

        Returns:
            None

        """
        # Initialize variables
        epochs = 2

        """
        print('{0: <{1}} {2}'.format('Encoded X image:', fill, self.x_image))
        """

        # Start construction of the Keras Sequential model.
        self.model = Sequential()

        # Add an input layer which is similar to a feed_dict in TensorFlow.
        # Note that the input-shape must be a tuple containing the image-size.
        self.model.add(InputLayer(input_shape=(self.img_size_flat,)))

        # The input is a flattened array with 784 elements,
        # but the convolutional layers expect images with shape (28, 28, 1)
        self.model.add(Reshape(self.img_shape_full))

        # First convolutional layer with ReLU-activation and max-pooling.
        self.model.add(
            Conv2D(kernel_size=5, strides=1, filters=16, padding='same',
                   activation='relu', name='layer_conv1'))
        self.model.add(MaxPooling2D(pool_size=2, strides=2))

        # Second convolutional layer with ReLU-activation and max-pooling.
        self.model.add(
            Conv2D(kernel_size=5, strides=1, filters=36, padding='same',
                   activation='relu', name='layer_conv2'))
        self.model.add(MaxPooling2D(pool_size=2, strides=2))

        # Flatten the 4-rank output of the convolutional layers
        # to 2-rank that can be input to a fully-connected / dense layer.
        self.model.add(Flatten())

        # First fully-connected / dense layer with ReLU-activation.
        self.model.add(Dense(128, activation='relu'))

        # Last fully-connected / dense layer with softmax-activation
        # for use in classification.
        self.model.add(Dense(self.num_classes, activation='softmax'))

        # Model Compilation

        '''
        The Neural Network has now been defined and must be finalized by adding
        a loss-function, optimizer and performance metrics. This is called
        model "compilation" in Keras.

        We can either define the optimizer using a string, or if we want more
        control of its parameters then we need to instantiate an object. For
        example, we can set the learning-rate.
        '''

        optimizer = Adam(lr=1e-3)

        '''
        For a classification-problem such as MNIST which has 10 possible
        classes, we need to use the loss-function called
        categorical_crossentropy. The performance metric we are interested in
        is the classification accuracy.
        '''

        self.model.compile(
            optimizer=optimizer,
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        # Training

        '''
        Now that the model has been fully defined with loss-function and
        optimizer, we can train it. This function takes numpy-arrays and
        performs the given number of training epochs using the given
        batch-size. An epoch is one full use of the entire training-set. So for
        10 epochs we would iterate randomly over the entire training-set 10
        times.
        '''

        self.model.fit(x=self.data.x_train,
                       y=self.data.y_train,
                       epochs=epochs, batch_size=128)

        # Evaluation

        '''
        Now that the model has been trained we can test its performance on the
        test-set. This also uses numpy-arrays as input.
        '''

        result = self.model.evaluate(x=self.data.x_test, y=self.data.y_test)

        '''
        Print actual versus predicted values
        '''

        print('\nActual vs Predicted X values')
        start = 0
        stop = 300
        predictions = self.model.predict(self.data.x_test[start:stop])
        for pointer in range(start, stop):
            predicted = np.argmax(predictions[pointer])
            actual = np.argmax(self.data.y_test[pointer])
            print(
                '{}: Actual: {}\tPredicted: {}\tMatch: {}'.format(
                    str(pointer).zfill(3),
                    predicted, actual, predicted == actual))

        '''
        We can print all the performance metrics for the test-set.
        '''
        print('\nPerfomance metrics')
        for name, value in zip(self.model.metrics_names, result):
            print('{} {}'.format(name, value))

        '''
        Print the model summary
        '''

        print('\n\nModel Summary\n\n{}'.format(self.model.summary()))

    def plot_example_errors(self, cls_pred):
        """Plot 9 images in a 3x3 grid.

        Function used to plot 9 images in a 3x3 grid, and writing the true and
        predicted classes below each image.

        Args:
            cls_pred: Array of the predicted class-number for all images in the
                test-set.

        Returns:
            None

        """
        # Boolean array whether the predicted class is incorrect.
        incorrect = (cls_pred != self.data.y_test_cls)

        # Get the images from the test-set that have been
        # incorrectly classified.
        images = self.data.x_test[incorrect]

        # Get the predicted classes for those images.
        cls_pred = cls_pred[incorrect]

        # Get the true classes for those images.
        cls_true = self.data.y_test_cls[incorrect]

        # Plot the first 9 images.
        plot_images(
            images[0:9], self.img_shape, cls_true[0:9], cls_pred=cls_pred[0:9])