Пример #1
0
                 activation=config.activation)
elif config.model == "Nestnet":
    model = Nestnet(backbone_name=config.backbone,
                    encoder_weights=config.weights,
                    decoder_block_type=config.decoder_block_type,
                    classes=config.nb_class,
                    activation=config.activation)
elif config.model == "Xnet":
    model = Xnet(backbone_name=config.backbone,
                 encoder_weights=config.weights,
                 decoder_block_type=config.decoder_block_type,
                 classes=config.nb_class,
                 activation=config.activation)
else:
    raise
model.load_weights(os.path.join(model_path, config.exp_name + ".h5"))
model.compile(optimizer="Adam",
              loss=dice_coef_loss,
              metrics=["binary_crossentropy", mean_iou, dice_coef])
p_test = model.predict(x_test,
                       batch_size=config.batch_size,
                       verbose=config.verbose)
eva = model.evaluate(x_test,
                     y_test,
                     batch_size=config.batch_size,
                     verbose=config.verbose)
IoU = compute_iou(y_test, p_test)
print("\nSetup: {}".format(config.exp_name))
print(">> Testing dataset mIoU  = {:.2f}%".format(np.mean(IoU)))
print(">> Testing dataset mDice = {:.2f}%".format(eva[3] * 100.0))
Пример #2
0
        plt.show()
    if False:
        n = 5
        ids = np.random.choice(np.arange(len(test)), size=n)
        for i in ids:
            print(x_test[i])
            image, gt_mask = test[i]
            image = np.expand_dims(image, axis=0)
            pr_mask = model.predict(image)
            visualize(
                image=denormalize(image.squeeze()),
                gt_mask=gt_mask.squeeze(),
                pr_mask=pr_mask.squeeze(),
            )

    scores = model.evaluate(test_dataloader)

    print("Loss: {:.5}".format(scores[0]))
    for metric, value in zip(metrics, scores[1:]):
        print("mean {}: {:.5}".format(metric.__name__, value))

    if True:
        confusionMatrix = np.zeros((6, 6))
        for i in range(len(test)):
            print(i, len(test) - 1)
            image, gt_mask = test[i]
            image = np.expand_dims(image, axis=0)
            pr_mask = model.predict(image)
            for x in range(pr_mask.shape[1]):
                for y in range(pr_mask.shape[2]):
                    confusionMatrix[np.argmax(pr_mask[0][x][y])][np.argmax(
Пример #3
0
                          steps_per_epoch=(NO_OF_TRAINING_IMAGES //
                                           BATCH_SIZE),
                          validation_data=(val_x / 255, val_y),
                          shuffle=True,
                          callbacks=callbacks)
''' save model structure '''
model_json = m.to_json()
with open(os.path.join(args.ckpt_path, "model.json"), "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
print("Saved model to disk")
m.save(os.path.join(args.ckpt_path, 'model.h5'))
'''Evaluate and Test '''
print('======Start Evaluating======')
#don't use generator but directly from array
score = m.evaluate(val_x / 255, val_y, verbose=0)
print("%s: %.2f%%" % (m.metrics_names[0], score[0] * 100))
print("%s: %.2f%%" % (m.metrics_names[1], score[1] * 100))
with open(os.path.join(args.ckpt_path, 'output.txt'), "w") as file:
    file.write("%s: %.2f%%" % (m.metrics_names[0], score[0] * 100))
    file.write("%s: %.2f%%" % (m.metrics_names[1], score[1] * 100))

print('======Start Testing======')
#test_x, test_y = xy_formarray(mask_path, frame_path, 'test',256, cl)
# test_y = np.eye(cl)[test_y]
#predict_y = m.predict(test_x / 255)

#save image
#print('======Save Results======')
#mkdir(args.results_path)
#save_results(mask_path, args.results_path, test_x, test_y, predict_y, 'test')
Пример #4
0
datagen = ImageDataGenerator(
        featurewise_center=False, 
        samplewise_center=True,  # set each sample mean to 0
        featurewise_std_normalization=False,  
        samplewise_std_normalization=True) 
X_train, Y_train = load()
X_test, Y_test = load_val()
X_train = X_train.reshape( len(X_train), len(X_train[0]), len(X_train[0][0]),1)
X_test = X_test.reshape( len(X_test), len(X_test[0]), len(X_test[0][0]),1)
Y_train = Y_train.reshape( len(Y_train),len(Y_train[0]), len(Y_train[0][0]),1)
Y_test = Y_test.reshape( len(Y_test), len(Y_test[0]), len(Y_test[0][0]),1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
datagen.fit(X_train) 
for i in range(len(X_test)):
      X_test[i] = datagen.standardize(X_test[i])
earlystop=EarlyStopping(monitor='val_iou_score', min_delta=0, patience=80, verbose=1, mode='max', restore_best_weights=True)
callbacks_list = [earlystop]
history = model.fit_generator(datagen.flow(X_train, Y_train,batch_size=batch_size),steps_per_epoch=32,epochs=1000,validation_data=(X_test,Y_test),callbacks=callbacks_list,verbose=1)
score, acc = model.evaluate(X_test,Y_test,batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
model.save("UNET_" + str(time.time())+".h5") #model are saved with time stamps
print("Model saved")