Ejemplo n.º 1
0
                    validation_data=valid_batches, validation_steps=4, epochs=9, verbose =2)

# observe metrics from training- compare with 3X64 node CNN created previously- use the better one
# also check for overfitting

"""# Predictions"""

#
test_imgs, test_labels = next(test_batches)
plots(test_imgs, titles=test_labels)

# 0th index so that empty/occupied gets values 0/1 or vice versa
test_labels = test_labels[:,0]

# set steps according to : test set image count (eg. 10) and batch size: (eg. 10), so it takes 1 step to run through the batch of imgs
predictions = model.predict_generator(test_batches, steps=1, verbose=0)

# create confusion matrix variable
cm = confusion_matrix(test_labels, np.round(predictions[:,0]))

cm_plot_labels = ['empty', 'occupied']
plot_confusion_matrix(cm, cm_plot_labels, title='Confusion Matrix') # plots predicted labels vs true labels on x-y axis -observe values
# accuracy might depend on amount of data used for training, epochs (can use library/tool "..." to get optimal value), and other tweaks
# compare confusion matrix with previously built 3X64 CNN model - use the best one

"""# Confusion Matrix
-Plot Predicted labels vs True labels
"""

# insert code (included above)
#.
Ejemplo n.º 2
0
print("acc : ", acc)

print('(ง˙∇˙)ว {오늘 안에 조지고만다!!!]')
print('(ง˙∇˙)ว {오늘 안에 조지고만다!!!]')
print('(ง˙∇˙)ว {오늘 안에 조지고만다!!!]')
print('(ง˙∇˙)ว {오늘 안에 조지고만다!!!]')
print('(ง˙∇˙)ว {오늘 안에 조지고만다!!!]')
print('(ง˙∇˙)ว {오늘 안에 조지고만다!!!]')
print('(ง˙∇˙)ว {오늘 안에 조지고만다!!!]')
print('(ง˙∇˙)ว {오늘 안에 조지고만다!!!]')
print('(ง˙∇˙)ว {오늘 안에 조지고만다!!!]')

x_pred = np.load('../data/vision2/predict_data.npy')

x_pred = x_pred.reshape(-1, 128, 128, 1)
print(x_pred.shape)

x_pred = x_pred / 255.0

pred_generator = idg2.flow(x_pred, shuffle=False)

y_predict = model.predict_generator(pred_generator, verbose=True)

print("결과!!!!!!!!!!!!")
print(y_predict)

sub = pd.read_csv('../data/vision2/sample_submission.csv')

sub['a'] = np.where(y_predict > 0.5, 1, 0)

sub.to_csv('../data/vision2/file/submission2.csv', index=False)
Ejemplo n.º 3
0
                                              validation_data=valid_generator,
                                              callbacks=[es, mc2, lr])
    learning_history_3 = model3.fit_generator(train_generator3,
                                              epochs=1000,
                                              validation_data=valid_generator,
                                              callbacks=[es, mc3, lr])

    # predict
    model1.load_weights(
        f'c:/data/test/mnist/checkpoint/mnist_checkpoint9_1.hdf5')
    model2.load_weights(
        f'c:/data/test/mnist/checkpoint/mnist_checkpoint9_2.hdf5')
    model3.load_weights(
        f'c:/data/test/mnist/checkpoint/mnist_checkpoint9_3.hdf5')

    result1 += model1.predict_generator(test_generator, verbose=True) / 40
    result2 += model2.predict_generator(test_generator, verbose=True) / 40
    result3 += model3.predict_generator(test_generator, verbose=True) / 40

    # save val_loss
    hist1 = pd.DataFrame(learning_history_1.history)
    hist2 = pd.DataFrame(learning_history_2.history)
    hist3 = pd.DataFrame(learning_history_3.history)

    val_loss_min1.append(hist1['val_loss'].min())
    val_loss_min2.append(hist2['val_loss'].min())
    val_loss_min3.append(hist3['val_loss'].min())

    nth += 1
    print(nth, '번째 학습을 완료했습니다.')
callbacks = [
    TensorBoard(log_dir="logs/{}".format(NAME)),
    EarlyStopping(monitor='val_loss',
                  min_delta=0,
                  patience=2,
                  verbose=0,
                  mode='auto',
                  baseline=None,
                  restore_best_weights=False)
]

model.fit_generator(train_generator,
                    callbacks=callbacks,
                    steps_per_epoch=TRAIN_STEP,
                    epochs=EPOCHS,
                    validation_data=validation_generator,
                    validation_steps=VALIDATION_STEP)

score = model.evaluate_generator(validation_generator,
                                 VALIDATION_STEP / BATCH_SIZE,
                                 workers=12)
scores = model.predict_generator(validation_generator,
                                 VALIDATION_STEP / BATCH_SIZE,
                                 workers=12)
model.save(FILE_NAME)

with open("logs/mylog.txt", "a") as f:
    f.write("\n\n--------" + NAME + "----------\n")
    f.write(str(score) + "\n")
Ejemplo n.º 5
0
    model.add(Dense(10, activation='softmax'))

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=Adam(lr=0.002, epsilon=None),
                  metrics=['acc'])
    #sparse_categorical_crossentropy : 다중분류 손실 함수. categorical_crossentropy와 동일하지만 원핫인코딩안해도 된다.
    # epsilon : 0으로 나누어지는 것을 방지

    learning_history = model.fit_generator(train_generator,
                                           epochs=1000,
                                           validation_data=valid_generator,
                                           callbacks=[es, mc, reLR])

    # predict
    model.load_weights('../dacon7/check/best_cvision.h5')
    result += model.predict_generator(test_generator, verbose=True) / 80

    # save val_loss
    hist = pd.DataFrame(learning_history.history)
    val_loss_min.append(hist['val_loss'].min())

    nth += 1
    print(nth, '번째 학습을 완료했습니다.')

print(val_loss_min, np.mean(val_loss_min))

model.summary()

# Submission

sub['digit'] = result.argmax(1)
Ejemplo n.º 6
0
    model.add(Dense(128,activation='relu'))
    model.add(BatchNormalization())
 
    model.add(Dense(64,activation='relu'))
    model.add(BatchNormalization())

    model.add(Dense(10,activation='softmax'))
    

    model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=0.002,epsilon=None),metrics=['acc'])
    
    learning_history = model.fit_generator(train_generator,epochs=2000, validation_data=valid_generator, callbacks=[es,mc,reLR])

    #4. Evaluate, Predict
    loss, acc = model.evaluate(test_generator)
    print("loss : ", loss)
    print("acc : ", acc)
    
    # predict
    result += model.predict_generator(pred_generator, verbose=True)/n_splits_num
    
    # save val_loss
    hist = pd.DataFrame(learning_history.history)
    val_loss_min.append(hist['val_loss'].min())
    
    nth += 1
    print(nth, '번째 학습을 완료했습니다.')
    

sub['digit'] = result.argmax(1)
sub.to_csv('C:/data/dacon/mnist1/submit/210204_2.csv', index = False)
Ejemplo n.º 7
0
              metrics=['accuracy'])

nb_epochs = 2
history = model.fit_generator(
    train_generator,
    steps_per_epoch=train_generator.samples // batch_size,
    validation_data=validation_generator,
    validation_steps=validation_generator.samples // batch_size,
    epochs=nb_epochs)

# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')

name = 'jmatt_arch_sketches_orig_tune'
model.save(f'../output/{name}.h5')
plt.savefig(f'../output/{name}.png')

acc = model.evaluate_generator(validation_generator,
                               steps=np.floor(validation_generator.n /
                                              batch_size),
                               verbose=1)

print(acc)

probs = model.predict_generator(validation_generator)
Ejemplo n.º 8
0
                                      batch_size=batch_size,
                                      shuffle=False)
test_generator = TimeseriesGenerator(X_test,
                                     y_test,
                                     length=window_size,
                                     batch_size=1,
                                     shuffle=False)

model = Sequential()
model.add(CuDNNGRU(128, input_shape=(
    window_size,
    X_train.shape[1],
)))
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(y_train.shape[1], activation='softmax'))

# Run training
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.fit_generator(train_generator, epochs=epochs)
print(model.evaluate_generator(test_generator))

y_true = np.argmax(y_test[window_size:], axis=1)
y_pred = np.argmax(model.predict_generator(test_generator), axis=1)

print('Confusion matrix')
print(confusion_matrix(y_true, y_pred))
# fit model
history = model.fit_generator(generator=training_generator,
                              epochs=100,
                              verbose=2,
                              validation_data=validation_generator,
                              callbacks=[checkpoint, early_stopping_callback],
                              use_multiprocessing=False)

# plot history
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()

# make a prediction
test_pred = model.predict_generator(validation_generator)
print(test_pred.shape)
test_pred = test_pred.reshape(test_pred.shape[0] // 10, 10, test_pred.shape[1])

test_y_list = []
test_pred_list = []
x_labels = []

for k in range(10):
    x_labels.append('Day' + str(k))

for i in range(test_pred.shape[0]):
    for j in range(test_pred.shape[1]):
        temp = np.load('data/data_rows_target/' + 'shop_' + str(i) +
                       '_target' + '.npy')
        sum_per_day_y = 0
def build_model():

    model = Sequential()
    model.add(
        Conv2D(filters=32,
               kernel_size=(3, 3),
               activation="relu",
               padding="same",
               input_shape=(256, 256, 1)))
    model.add(
        Conv2D(filters=32,
               kernel_size=(3, 3),
               activation="relu",
               padding="same"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(rate=0.25))
    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               activation="relu",
               padding="same"))
    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               activation="relu",
               padding="same"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(rate=0.25))
    model.add(Flatten())
    model.add(Dense(1024, activation="relu"))
    model.add(BatchNormalization())
    model.add(Dropout(rate=0.4))
    model.add(Dense(6, activation="softmax"))

    gen = ImageDataGenerator(rescale=1. / 255,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True)

    train_batches = gen.flow_from_directory("runes/mutated",
                                            model.input_shape[1:3],
                                            color_mode="grayscale",
                                            shuffle=True,
                                            seed=1,
                                            batch_size=16)
    valid_batches = gen.flow_from_directory("runes/validation",
                                            model.input_shape[1:3],
                                            color_mode="grayscale",
                                            shuffle=True,
                                            seed=1,
                                            batch_size=16)
    test_batches = gen.flow_from_directory("runes/testing",
                                           model.input_shape[1:3],
                                           shuffle=False,
                                           color_mode="grayscale",
                                           batch_size=8)

    model.compile(Adam(lr=0.001),
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    history1 = model.fit_generator(train_batches,
                                   steps_per_epoch=163,
                                   epochs=5,
                                   validation_data=valid_batches,
                                   validation_steps=624)

    p = model.predict_generator(test_batches, verbose=True)

    # recall_score(pre["label"], pre["pre"])

    #roc_auc_score(pre["label"], pre[1])

    #true_positive_rate, false_positive_rate, threshold = roc_curve(pre["label"], pre[1])
    # roc = DataFrame([true_positive_rate, false_positive_rate]).T
    # roc.plot(x=0,y=1)

    plt.plot(history1.history['accuracy'])
    plt.plot(history1.history['val_accuracy'])
    plt.axhline(0, color="black")
    plt.axvline(0, color="black")
    plt.title('Model Accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Training set', 'Validation set'], loc='upper left')
    plt.show()

    plt.plot(history1.history['val_loss'])
    plt.plot(history1.history['loss'])
    plt.axhline(0, color="black")
    plt.axvline(0, color="black")
    plt.title('Model Loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Training set', 'Test set'], loc='upper left')
    plt.show()

    model_json = model.to_json()

    with open("model.json", "w") as json_file:
        json_file.write(model_json)

    model.save_weights_only = False
    model.save_weights("keras_model.h5")
Ejemplo n.º 11
0
class Model:
    def __init__(self, dataset, model_path):
        self.dataset = dataset
        self.model = Sequential()
        self.model_path = model_path

    def create(self, tile_size: (int, int)):
        input_shape = (*tile_size, 3)
        model = Sequential()
        model.add(Conv2D(filters=32, kernel_size=3, activation='relu',
                         padding='same', input_shape=input_shape))
        # model.add(Conv2D(filters=32, kernel_size=3,
        #                  padding='same', activation='relu'))
        model.add(BatchNormalization())
        model.add(MaxPool2D())
        model.add(Conv2D(filters=64, kernel_size=3,
                         padding='same', activation='relu'))
        # model.add(Conv2D(filters=64, kernel_size=3,
        #                  padding='same', activation='relu'))
        model.add(BatchNormalization())
        model.add(MaxPool2D())
        model.add(Flatten())
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.3))
        model.add(Dense(16, activation='relu'))
        model.add(Dense(1, activation='sigmoid'))
        model.compile(
            loss='binary_crossentropy',
            optimizer='adam',
            metrics=['accuracy'])
        model.summary()

        self.model = model

    def train(self, epochs, verbose=2):
        train_datagen = self._get_scaling_generator(self.dataset.X_train,
                                                    self.dataset.Y_train)
        val_datagen = self._get_scaling_generator(self.dataset.X_test,
                                                  self.dataset.Y_test)

        train_history = self.model.fit_generator(train_datagen,
                                                 epochs=epochs,
                                                 validation_data=val_datagen,
                                                 verbose=verbose)

        fig, axes = plt.subplots(1, 2, sharex='all')
        axes[0].plot(train_history.history['loss'])
        axes[0].plot(train_history.history['val_loss'])
        axes[0].set_ylabel('loss')
        axes[1].plot(train_history.history['acc'])
        axes[1].plot(train_history.history['val_acc'])
        axes[1].set_ylabel('accuracy')
        axes[0].legend(['train loss', 'validation loss'], loc='best')
        axes[1].legend(['train accuracy', 'validation accuracy'], loc='best')
        fig.text(0.5, 0.02, "Number of epochs", horizontalalignment='center')

        plt.show()

    def evaluate(self):
        test_datagen = self._get_scaling_generator(self.dataset.X_test,
                                                   self.dataset.Y_test)
        outputs = self.model.evaluate_generator(test_datagen)
        logger.info("Results: Loss: %.3f, Accuracy: %.3f", *outputs)

    def predict(self, verbose=1):
        test_datagen = self._get_scaling_generator(self.dataset.X_test, shuffle=False)
        results = self.model.predict_generator(test_datagen, verbose=verbose)
        return results

    def save(self):
        model_path = self.model_path
        self.model.save(model_path)

    def load(self):
        model_path = self.model_path
        self.model = load_model(model_path)

    @staticmethod
    def _get_scaling_generator(x, y=None, shuffle=True):
        datagen = ImageDataGenerator(rescale=1. / 255)
        return datagen.flow(x, y, shuffle=shuffle)
Ejemplo n.º 12
0
model.add(Dense(n_classes, activation='sigmoid'))
#
# compile the model using binary cross-entropy rather than
# categorical cross-entropy -- this may seem counterintuitive for
# multi-label classification, but keep in mind that the goal here
# is to treat each output label as an independent Bernoulli
# distribution
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=METRICS)

## Fit model for multiple labels and print accuracy
history = model.fit_generator(generator=MyBatchGenerator(X_train,
                                                         Y_train,
                                                         batch_size=1),
                              epochs=5)
pred = model.predict_generator(generator=MyBatchGenerator(X_test,
                                                          Y_test,
                                                          batch_size=1),
                               verbose=2)
print(pred)
# pred_proba = model.predict_proba(X_test)
# preds = model.predict(X_test)
# preds[preds>=0.5] = 1
# preds[preds<0.5] = 0
# # acc = history.history['accuracy']
# print(Y_test)
# print(preds)
#
#
# n_classes = len(ExtraSensoryFeaturesLabels.labels)
# # Compute ROC curve and ROC area for each class
# fpr = dict()
# tpr = dict()
Ejemplo n.º 13
0
# ===== Load the model

classifier = load_model("Smiling_recognition_classifier.h5")

# %%

# ===== Print the confusion matrix

test_set_no_shuffle = test_data_gen.flow_from_directory(
    testing_folder,
    target_size=(IMG_HEIGHT, IMG_WIDTH),
    batch_size=BATCH_SIZE,
    class_mode='binary',
    shuffle=False)

Y_pred = classifier.predict_generator(test_set_no_shuffle)
y_pred = np.array(list(map(round, Y_pred.reshape(Y_pred.size))))
print('Confusion Matrix')
print(confusion_matrix(test_set_no_shuffle.classes, y_pred))

# %%

# ===== Function to test on a single image


def test_single_image(path):
    X_test_input = []
    img = image.load_img(path, target_size=(IMG_HEIGHT, IMG_WIDTH))
    img = image.img_to_array(img).astype('float32') / 255
    img = np.expand_dims(img, axis=0)
    X_test_input.append(img)
Ejemplo n.º 14
0
                    validation_steps=64,
                    callbacks=[tb_callback, cp_callback])

print('TRAINING COMPLETE')
model.save(MODEL_STRUCT_PATH)

for dp in glob(os.path.join(TEST_DATA_PATH, '*')):
    for fp in glob(os.path.join(TEST_DATA_PATH, dp, '*')):
        (fn, _) = os.path.splitext(fp)
        arr = numpy.array(
            load_img(fp,
                     target_size=(SIGN_IMG_HEIGHT, SIGN_IMG_WIDTH),
                     grayscale=False,
                     color_mode='rgb',
                     interpolation='nearest'))
        a = get_activations(model, [[arr]], auto_compile=True)
        rp = os.path.join(KERACT_PATH, relative_path(fn, TEST_DATA_PATH))
        display_activations(a, directory=rp, save=True)
        print(f'VISUALIZATION SAVED: {rp}')
        break

print('DONE')

yp = model.predict_generator(test_generator)
yp = numpy.argmax(yp, axis=1)

print('CONFUSION MATRIX:')
print(confusion_matrix(test_generator.classes, yp))
print('Classification Report')
print(classification_report(test_generator.classes, yp, target_names=TYPES))
Ejemplo n.º 15
0
print("evaluation time")
evaluation = model.evaluate_generator(test_generator,
                                      steps=test_generator.n //
                                      test_generator.batch_size,
                                      verbose=1)

print(evaluation)
with open(output_dir + '/evaluation.txt', 'w') as f:
    f.write(str(evaluation[0]) + "\n")
    f.write(str(evaluation[1]))

print("prediction time")
test_generator.reset()

pred = model.predict_generator(test_generator,
                               steps=test_generator.n //
                               test_generator.batch_size,
                               verbose=1)

predicted_class_indices = np.argmax(pred, axis=1)

labels = (train_generator.class_indices)
labels = dict((v, k) for k, v in labels.items())
predictions = [labels[k] for k in predicted_class_indices]

filenames = test_generator.filenames
results = pd.DataFrame({"Filename": filenames, "Predictions": predictions})
results.to_csv(output_dir + "/predictions.csv", index=False)

np.save(output_dir + '/class_indices', train_generator.class_indices)
model.save(output_dir + '/model.h5')
Ejemplo n.º 16
0
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=Adam(lr=0.002, epsilon=None),
                  metrics=['acc'])
    # epsilon : 0으로 나눠지는 것을 피하기 위함
    learning_hist = model.fit_generator(train_generator,
                                        epochs=1000,
                                        validation_data=valid_generator,
                                        callbacks=[es, mc, reLR])
    # model.load_weights('../data/DACON_vision1/cp/0203_4_cp.hdf5')

    #4. Evaluate, Predict
    loss, acc = model.evaluate(test_generator)
    print("loss : ", loss)
    print("acc : ", acc)

    result += model.predict_generator(pred_generator, verbose=True) / 40

    # save val_loss
    hist = pd.DataFrame(learning_hist.history)
    val_loss_min.append(hist['val_loss'].min())
    val_acc_max.append(hist['val_acc'].max())

    nth += 1
    print(nth, "번째 학습을 완료했습니다.")

    print("val_loss_min :",
          np.mean(val_loss_min))  # val_loss_mean : 0.1835539501160383
    print("val_acc_max :",
          np.mean(val_acc_max))  # val_acc_max : 0.9512500002980232
    # model.summary()
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=False)

train_data_gen = TimeseriesGenerator(X_train, y_train, length=window_size, batch_size=batch_size, shuffle=False)
test_data_gen = TimeseriesGenerator(X_test, y_test, length=window_size, batch_size=batch_size, shuffle=False)

model = Sequential()
model.add(CuDNNGRU(4, input_shape=(window_size, 1,)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
history = model.fit_generator(train_data_gen, epochs=epochs).history

index = [df['Open'][0]]
for i, d in enumerate(scaler.inverse_transform(data)):
    index.append(index[i] + d)

index_train = [df['Open'][0]]
for i, d in enumerate(scaler.inverse_transform(model.predict_generator(train_data_gen))):
    index_train.append(index_train[i] + d)

index_test = [index_train[-1]]
for i, d in enumerate(scaler.inverse_transform(model.predict_generator(test_data_gen))):
    index_test.append(index_test[i] + d)

begin = window_size
join = begin + len(index_train)
end = join + len(index_test)
plt.plot(index)
plt.plot(list(range(begin, join)), index_train)
plt.plot(list(range(join, end)), index_test)
plt.show()