예제 #1
0
def unet_crf():
    model = unet()
    model.load_weights("./unet_membrane.hdf5")
    testGene = testGenerator("./dataset/membrane/test/img", 1)
    labelGene = labelGenerator("./dataset/membrane/test/label", 1)
    dataset = zip(testGene, labelGene)
    for idx, (data, target) in enumerate(dataset):
        pred = model.predict(data, 1, verbose=1)
        pred = pred.squeeze()
        print("pred", pred.shape)

        # original image, 0-255, 3 channel
        img = cv2.imread("./dataset/membrane/test/img/%s" % str(idx) + ".png")
        # perform dense crf
        final_mask = dense_crf(idx, np.array(img).astype(np.uint8), pred)
        # binarization score map to get predict result of Unet
        mask_pos = pred >= 0.5
        mask_neg = pred < 0.5
        pred[mask_pos] = 1
        pred[mask_neg] = 0
        pred = np.uint8(pred)
        pred = pred * 255
        pred = Image.fromarray(pred, 'L')
        pred.save('./{}.png'.format(idx))
        # draw result after denseCRF
        final_mask = np.uint8(final_mask)
        final_mask = final_mask * 255
        crf = Image.fromarray(final_mask, 'L')
        crf.save('./img/{}.png'.format(idx))
예제 #2
0
def draw(model_path, image_path):

    model = unet()
    model.load_weights(model_path)
    testGene = testGenerator(image_path, 5, target_size=(512, 512))
    results = model.predict_generator(testGene, 5, verbose=1)
    results = results > 0.5

    for i in range(results.shape[0]):
        array = np.uint8(results[i].squeeze())
        array = array * 255
        img = Image.fromarray(array, 'L')
        img.save('{}.png'.format(i))
예제 #3
0
def cal_acc(model_path, image_path, label_path, num_image):
    model = unet()
    model.load_weights(model_path)
    testGene = testGenerator(image_path, num_image, target_size=(512, 512))
    labelGene = labelGenerator(label_path, num_image, target_size=(512, 512))
    dataset = zip(testGene, labelGene)
    acc = 0
    total_num = 0
    for idx, (data, target) in enumerate(dataset):

        pred = model.predict(data, 1, verbose=1)
        pred = pred > 0.5
        pred = pred.squeeze()
        acc += (target == pred).mean()
        total_num += data.shape[0]

    print('Avg Accuracy:', acc / total_num)
예제 #4
0
def test_unet(cfg):
    pretrained_fpath = cfg.pretrained_fpath
    test_dpath = cfg.test_dpath
    postfix = cfg.postfix

    tf.keras.backend.set_learning_phase(0)
    K.set_learning_phase(0)

    # 从文件中加载模型
    print("Loading: {}".format(pretrained_fpath))
    #model = models.load_model(pretrained_fpath, custom_objects=custom_objects)
    model = UNetH5(pretrained_fpath, (256, 256, 1))

    # 创建数据生成器,预测并保存结果
    test_gen = testGenerator(test_dpath)
    results = model.predict_generator(test_gen, 30, verbose=1)
    saveResult(test_dpath, results, postfix=postfix)
# model.fit_generator(myGene, steps_per_epoch=2000, epochs=5, callbacks=[model_checkpoint])
#
# testGene = testGenerator(os.path.join(dataset_folder, 'em_test/raw_xz/'), num_image=512, target_size=(64, 512))
# model = unet(input_size=(64, 512, 1))
# model.load_weights(os.path.join(experiment_folder, 'unet_membrane_xz.h5'))
# results = model.predict_generator(testGene, 512, verbose=1)
# if not os.path.exists(os.path.join(experiment_folder, 'result_unet_xz')):
#     os.mkdir(os.path.join(experiment_folder, 'result_unet_xz'))
# saveResult(os.path.join(experiment_folder, 'result_unet_xz'), results)

#
# # ZY
# myGene = trainGenerator(
#     2, os.path.join(dataset_folder, 'em_gt'), 'raw_zy', 'mem_gt_2_zy',
#     data_gen_args, save_to_dir=None, target_size=(512, 64)
# )
#
# model = unet(input_size=(512, 64, 1),
#              pretrained_weights=None)
# model_checkpoint = ModelCheckpoint(
#     os.path.join(experiment_folder, 'unet_membrane_zy.h5'), monitor='loss', verbose=1, save_best_only=True
# )
# model.fit_generator(myGene, steps_per_epoch=2000, epochs=5, callbacks=[model_checkpoint])

testGene = testGenerator(os.path.join(dataset_folder, 'em_test/raw_zy/'), num_image=512, target_size=(512, 64))
model = unet(input_size=(512, 64, 1))
model.load_weights(os.path.join(experiment_folder, 'unet_membrane_zy.h5'))
results = model.predict_generator(testGene, 512, verbose=1)
if not os.path.exists(os.path.join(experiment_folder, 'result_unet_zy')):
    os.mkdir(os.path.join(experiment_folder, 'result_unet_zy'))
saveResult(os.path.join(experiment_folder, 'result_unet_zy'), results)
예제 #6
0
import model 
import data
import sys

# PREMENNE
test_dir = "../data/test"
generated_dir = "../data/test_generated"
if (len(sys.argv) > 1):
    num_of_test = int(sys.argv[1]) # zistit pocet od pouzivatela
else:
    num_of_test = 4

# NACITAT MODEL
# load json and create model
json_file = open('../model/modelStructure.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
myModel = model.model_from_json(loaded_model_json)
# load weights into new model
myModel.load_weights("../model/modelWeights.h5")
print("Model loaded from disk") 

# evaluate loaded model on test data
myModel.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])


# SPUSTIT NA TESTOVACICH VZORKACH
testGene = data.testGenerator(test_dir) #generated_dir)
results = myModel.predict_generator(testGene,num_of_test,verbose=1)
data.saveResult(test_dir,results)
예제 #7
0
from data import trainGenerator, testGenerator, saveResult
from model import unet, ModelCheckpoint

data_gen_args = dict(
    rotation_range=0.2,
    width_shift_range=0.05,
    height_shift_range=0.05,
    shear_range=0.05,
    zoom_range=0.05,
    horizontal_flip=True,
    fill_mode='nearest'
)

myGene = trainGenerator(
    2, '/g/schwab/hennies/teaching/datasets/em_gt/', 'raw', 'mem_gt',
    data_gen_args, save_to_dir=None, target_size=(512, 512)
)

model = unet(input_size=(512, 512, 1))
model_checkpoint = ModelCheckpoint('unet_membrane.h5', monitor='loss', verbose=1, save_best_only=True)
model.fit_generator(myGene, steps_per_epoch=100, epochs=3, callbacks=[model_checkpoint])

testGene = testGenerator('/g/schwab/hennies/teaching/datasets/em_test/raw/', num_image=64, target_size=(512, 512))
model = unet(input_size=(512, 512, 1))
model.load_weights('unet_membrane.h5')
results = model.predict_generator(testGene, 64, verbose=1)
saveResult('/g/schwab/hennies/teaching/datasets/em_test/result_unet/', results)
예제 #8
0
                    monitor='val_loss',
                    save_best_only=True,
                    verbose=1)
]
# Training
history_dilated = model_dilated.fit_generator(
    generator=trainGen,
    steps_per_epoch=NUM_TRAINING_STEP,
    validation_data=valGen,
    validation_steps=NUM_VALIDATION_STEP,
    epochs=NUM_EPOCH,
    callbacks=callbacks)

print("Predict and save results...")
print("...For U-Net with 32 filters...")
testGene = testGenerator(test_path)
result_1 = model_32.predict_generator(testGene, TEST_SIZE, verbose=1)
print("...For U-Net with 64 filters...")
testGene = testGenerator(test_path)
result_2 = model_64.predict_generator(testGene, TEST_SIZE, verbose=1)
print("...For U-Net with dilated convolution...")
testGene = testGenerator(test_path)
result_3 = model_dilated.predict_generator(testGene, TEST_SIZE, verbose=1)
print("...Averaging the prediction results...")
result = (result_1 + result_2 + result_3) / 3
save_result(predict_path, result)

print("Make submission...")
make_submission(predict_path,
                test_size=TEST_SIZE,
                submission_filename=os.path.join(submission_path,
예제 #9
0
파일: main.py 프로젝트: JacobSal/UNET
from os import chdir
chdir(r"C:\Users\jsalm\Documents\Python Scripts\Automated_Histo\unet-master\unet-master")
from model import *
from data import testGenerator,trainGenerator,saveResult


#os.environ["CUDA_VISIBLE_DEVICES"] = "0"


data_gen_args = dict(rotation_range=0.2,
                    width_shift_range=0.05,
                    height_shift_range=0.05,
                    shear_range=0.05,
                    zoom_range=0.05,
                    horizontal_flip=True,
                    fill_mode='nearest')
myGene = trainGenerator(2,'data/membrane/train','image','label',data_gen_args,save_to_dir = None)

model = unet()
model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True)
model.fit_generator(myGene,steps_per_epoch=300,epochs=5,callbacks=[model_checkpoint])

testGene = testGenerator("data/membrane/test")
results = model.predict_generator(testGene,56657,verbose=1)
saveResult("data/membrane/test",results)
    verbose=1,
    save_best_only=True)
history = model_obj.fit_generator(myGene,
                                  steps_per_epoch=100,
                                  epochs=6,
                                  callbacks=[model_checkpoint])

import data as data3
import model
num_images = 2
model_obj = model.unet(
    pretrained_weights=
    '/content/drive/My Drive/Stomata_Project/training_data/unet_stomata_1.hdf5'
)
testGene = data3.testGenerator(
    '/content/drive/My Drive/Stomata_Project/training_data/train/test',
    num_images)
results = model_obj.predict_generator(testGene, num_images, verbose=1)
data3.saveResult(
    '/content/drive/My Drive/Stomata_Project/training_data/train/test_output_1',
    results)

import cv2
import os


def image_resizer(file_name):
    print(file_name)
    if ('true' in file_name) or ('resize0 in file_name'): return

    image = cv2.imread(file_name)
if IS_REDUCE_LR:
    reduce_lr = ReduceLROnPlateau(monitor='loss',
                                  factor=0.2,
                                  patience=5,
                                  verbose=1,
                                  epsilon=1e-4)
    call_back_ls.append(reduce_lr)
if call_back_ls == []:
    call_back_ls = None

print('Start training ...')
# start training
model.fit_generator(train_gen,
                    steps_per_epoch=train_n // BATCH_SIZE,
                    validation_data=val_gen,
                    validation_steps=max(val_n // BATCH_SIZE, 1),
                    epochs=EPOCHS,
                    callbacks=call_back_ls)

############### 4. TEST EVALUATION ###################
print('Test Set Evaluation')
test_f_ls = [
    i for i in os.listdir(os.path.join(TEST_PATH)) if i.endswith('.jpg')
]
test_n = len(test_f_ls)
test_gen = testGenerator(TEST_PATH, num_image=1, target_size=INPUT_SHAPE)
# shape: (steps, height, width, 1)
results = model.predict_generator(test_gen, steps=test_n, verbose=1)
if VIS_PATH is not None:
    saveResult(VIS_PATH, results, test_f_ls)
예제 #12
0
    else:
        print('File {} does not exists'.format(filename))
print(len(test_imgs))

predict_path = "predict_images"
submission_path = "submission"
weight_path = "weights"
weight_list = ["weights_32.h5", "weights_64.h5", "weights_dilated.h5" ]
# weight_list = ["weights_32_dice.h5"]

print("Check weights...")
missing_weight = list(set(weight_list) - set(os.listdir(weight_path)))
if len(missing_weight):
    raise FileNotFoundError("Can not find: " + str(missing_weight))

print("Load models and predict...")
results = 0
for w in weight_list:
    print("...Load " + w + "...")
    model = load_model(os.path.join(weight_path, w), custom_objects={"dice_loss": dice_loss, "f1": f1})
    print("...Predict...")
    testGene = testGenerator(test_imgs)
    results += model.predict_generator(testGene, TEST_SIZE, verbose=1)
results /= len(weight_list)
save_result(predict_path, results, test_index)

print("Make submission...")
make_submission(predict_path, test_size=TEST_SIZE, indices=test_index, submission_filename=os.path.join(submission_path, "submission.csv"))

print("Done!")
예제 #13
0
파일: main.py 프로젝트: yeLer/UNet
from data import trainGenerator, geneTrainNpy, testGenerator, saveResult
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"

data_gen_args = dict(rotation_range=0.2,
                     width_shift_range=0.05,
                     height_shift_range=0.05,
                     shear_range=0.05,
                     zoom_range=0.05,
                     horizontal_flip=True,
                     fill_mode='nearest')
myGene = trainGenerator(2,
                        './data/membrane/train',
                        'image',
                        'label',
                        data_gen_args,
                        save_to_dir=None)

model = unet()
model_checkpoint = ModelCheckpoint('unet_membrane.hdf5',
                                   monitor='loss',
                                   verbose=1,
                                   save_best_only=True)
model.fit_generator(myGene,
                    steps_per_epoch=300,
                    epochs=5,
                    callbacks=[model_checkpoint])

testGene = testGenerator("./data/membrane/test/images")
results = model.predict_generator(testGene, 30, verbose=1)
saveResult("./data/membrane/test/predicts", results)
예제 #14
0
파일: main.py 프로젝트: sjain-stanford/unet
import tensorflow as tf

from model import unet
from data import trainGenerator, testGenerator, saveResult


os.environ["TF_CPP_MIN_LOG_LEVEL"]="2" # This is to filter out TensorFlow INFO and WARNING logs
#os.environ["CUDA_VISIBLE_DEVICES"]="0" # Make 1 GPU visible for training


data_gen_args = dict(rotation_range=0.2,
                    width_shift_range=0.05,
                    height_shift_range=0.05,
                    shear_range=0.05,
                    zoom_range=0.05,
                    horizontal_flip=True,
                    fill_mode='nearest')

myGene = trainGenerator(batch_size=2, train_path='data/membrane/train',
                        image_folder='image', mask_folder='label',
                        aug_dict=data_gen_args, save_to_dir=None)

model = unet()
model_checkpoint = tf.keras.callbacks.ModelCheckpoint('unet_membrane.hdf5', monitor='loss', verbose=1, save_best_only=True)
model.fit_generator(myGene, steps_per_epoch=2000, epochs=5, callbacks=[model_checkpoint])

testGene = testGenerator(test_path='data/membrane/test')
results = model.predict_generator(testGene, 30, verbose=1)
saveResult(save_path='data/membrane/test', npyfile=results)
    flag_multi_class=constant_model.flag_multi_class,
    num_class=constant_model.num_class,
    save_to_dir=constant_model.data_gen_save_to_dir,
    target_size=constant_model.target_size)
model = model.unet()
model_checkpoint = ModelCheckpoint(constant_model.save_model_name,
                                   monitor='loss',
                                   verbose=1,
                                   save_best_only=True)
model.fit_generator(myGenerator,
                    steps_per_epoch=constant_model.steps_per_epoch,
                    epochs=constant_model.epochs,
                    callbacks=[model_checkpoint, constant_model.tbCallBack])

testGene = data.testGenerator(constant_model.test_path,
                              num_image=constant_model.test_image_nums,
                              target_size=constant_model.target_size)
results = model.predict_generator(testGene,
                                  constant_model.test_image_nums,
                                  verbose=1)
data.saveResult(constant_model.save_path, results)

from_path = constant_model.get_test_image_from_path
predict_png_result_path = constant_model.save_path
predict_nii_gz_result_path = constant_model.predict_nii_gz_result_path
if not os.path.isdir(predict_nii_gz_result_path):
    os.makedirs(predict_nii_gz_result_path)
save_data_dir(from_path, predict_png_result_path, predict_nii_gz_result_path)

roi_train = ROI('../../data/result/c0gt_0_45', './center_radii.csv', 1, 46)
roi_train.copy_and_rename('../../data/result/c0gt_0_45')
# # Predict ZY
# testGene = testGenerator(os.path.join(dataset_folder, 'em_test/crop_256/raw_zy_10/'),
#                          num_image=256, target_size=(256, 64), filename='raw_10_{:04d}.tif')
# model = unet(input_size=(256, 64, 1))
# model.load_weights(os.path.join(experiment_folder, 'unet_membrane_zy.h5'))
# results = model.predict_generator(testGene, 256, verbose=1)
# if not os.path.exists(os.path.join(experiment_folder, 'result_unet_zy_10')):
#     os.mkdir(os.path.join(experiment_folder, 'result_unet_zy_10'))
# saveResult(os.path.join(experiment_folder, 'result_unet_zy_10'), results)

# Predict train cubes 00 and 11 (Those that were not trained on for these networks)
# XY 00
testGene = testGenerator(os.path.join(dataset_folder,
                                      'em_gt/crop_256/xy/raw_xy_00/'),
                         num_image=64,
                         target_size=(256, 256),
                         filename='raw_00_{:04d}.tif')
model = unet(input_size=(256, 256, 1))
model.load_weights(os.path.join(experiment_folder, 'unet_membrane_xy.h5'))
results = model.predict_generator(testGene, 64, verbose=1)
if not os.path.exists(
        os.path.join(experiment_folder, 'result_unet_train_xy_00')):
    os.mkdir(os.path.join(experiment_folder, 'result_unet_train_xy_00'))
saveResult(os.path.join(experiment_folder, 'result_unet_train_xy_00'), results)
# XY 11
testGene = testGenerator(os.path.join(dataset_folder,
                                      'em_gt/crop_256/xy/raw_xy_11/'),
                         num_image=64,
                         target_size=(256, 256),
                         filename='raw_11_{:04d}.tif')
예제 #17
0
def predict(file_name, callback=None):
    # доступные классы
    # 'ground', 'tree', 'bush', 'tower', 'wire', 'copter', 'car', 'build'
    # для каких классов была обучена?
    channels = ['bush']

    # file_name - имя SVO папки, например: rec2018_07_21-6
    root_path = predict_path + "\\" + file_name
    right_views_path = root_path + "\\right"
    right_views_marked_path = root_path + "\\right_marked"
    right_measures_path = root_path + "\\right_measure"
    mask_path = root_path + "\\mask"
    analyzed_result_path = root_path + "\\report.json"

    distances = []
    timestamps = []
    frame_numbers = []
    coordinates = []
    probabilities = []

    if not os.path.exists(weights_path):
        print(f"Файл весов {weights_path} не существует!")
    if not os.path.exists(right_views_marked_path):
        os.makedirs(right_views_marked_path)
    if not os.path.exists(right_measures_path):
        os.makedirs(right_measures_path)
    if not os.path.exists(mask_path):
        os.makedirs(mask_path)

    else:
        frames_length = len(os.listdir(right_views_path))
        model = unet(len(channels), pretrained_weights=weights_path)
        testGene = testGenerator(right_views_path)
        proc, dif = float(frames_length) / 10
        for frame_number in range(frames_length):
            pred = []
            if callback is not None:
                callback.emit(f"Файлов обработано {frame_number}")
            for _ in range(15):
                pred.append(next(testGene))
            results = model.predict_generator(iter(pred), 15, verbose=1)
            # расчет минимальной дистанции до ДКР, получение времени съемки, координат наиболее близкого пикселя
            min_depth, time_milliseconds, coordinate, probability = analyze_bush_depth(
                results, right_measures_path, frame_number)
            # запись результирующих данных
            distances.append(min_depth)
            timestamps.append(time_milliseconds)
            frame_numbers.append(frame_number)
            coordinates.append(coordinate)
            probabilities.append(probability)
            print(f'{frame_number+1}/{frames_length} completed')
            if callback:
                if float(frame_number) / frames_length >= proc:
                    proc += dif
                    callback.emit(f"{int(proc)*100}% готово")
            # сохранение распознанного и помеченного кадров
            saveResult(mask_path, results, channels, frame_number, coordinate,
                       right_views_path, right_views_marked_path)
        # сохранение результатов анализа
        analyzed_data = {
            'distances': distances,
            'timestamps': timestamps,
            'frame_numbers': frame_numbers,
            'coordinates': coordinates,
            'probabilities': probabilities
        }
        save_to_json(analyzed_data, analyzed_result_path)
    return "ok"
예제 #18
0
#vis_filters(model, num_layers = 35)

#Treina-se o modelo
#Caso se queira treinar o modelo de novo deve anular-se model.load_weights
model.fit_generator(
    trainGene,
    epochs=100,
    steps_per_epoch=(num_imgs_train * fator_img_aug / batch_size),
    validation_data=validationGene,
    validation_steps=(num_imgs_validation * fator_img_aug) / batch_size,
    callbacks=[model_checkpoint, tensorboard])

#Aplica-se o modelo treinado às imagens de teste e procede-se à gravação dos
#resultados
testGene = testGenerator("data/test/epicardio/image",
                         num_image=num_imgs_test,
                         dicom_file=False)
validationGene = testGenerator("data/validation/epicardio/image",
                               num_image=num_imgs_validation,
                               dicom_file=False)

print("----Creating and saving test prediction results----")
results_test = model.predict_generator(testGene, num_imgs_test, verbose=1)
saveResult("predicts/epicardio/test",
           "data/test/epicardio/image",
           results_test,
           dicom_file=False)
'''
print("----Creating and saving validation prediction results----")
results_validation = model.predict_generator(validationGene, num_imgs_validation, verbose=1)
saveResult("predicts/epicardio/validation", "data/validation/epicardio/image", results_validation, dicom_file = False)
예제 #19
0
dataset_folder = '/g/schwab/hennies/phd_project/image_analysis/autoseg/membrane_predictions'

data_gen_args = dict(rotation_range=0.2,
                     width_shift_range=0.05,
                     height_shift_range=0.05,
                     shear_range=0.05,
                     zoom_range=0.05,
                     horizontal_flip=True,
                     fill_mode='nearest')

# myGene = trainGenerator(
#     2, os.path.join(dataset_folder, 'em_gt'), 'raw', 'mem_gt',
#     data_gen_args, save_to_dir=None, target_size=(512, 512)
# )
#
# model = unet(input_size=(512, 512, 1))
# model_checkpoint = ModelCheckpoint(
#     os.path.join(experiment_folder, 'unet_membrane.h5'), monitor='loss', verbose=1, save_best_only=True
# )
# model.fit_generator(myGene, steps_per_epoch=2000, epochs=5, callbacks=[model_checkpoint])

testGene = testGenerator(os.path.join(dataset_folder, 'em_test/raw/'),
                         num_image=64,
                         target_size=(512, 512))
model = unet(input_size=(512, 512, 1))
model.load_weights(os.path.join(experiment_folder, 'unet_membrane.h5'))
results = model.predict_generator(testGene, 64, verbose=1)
if not os.path.exists(os.path.join(experiment_folder, 'result_unet')):
    os.mkdir(os.path.join(experiment_folder, 'result_unet'))
saveResult(os.path.join(experiment_folder, 'result_unet'), results)
                        os.path.join(dataset_folder, 'em_gt'),
                        'mem_pred',
                        'mem_gt_2',
                        data_gen_args,
                        save_to_dir=None,
                        target_size=(512, 512))

model = unet(input_size=(512, 512, 1),
             pretrained_weights=os.path.join(experiment_folder,
                                             'unet_membrane.h5'))
model_checkpoint = ModelCheckpoint(os.path.join(experiment_folder,
                                                'unet_membrane.h5'),
                                   monitor='loss',
                                   verbose=1,
                                   save_best_only=True)
model.fit_generator(myGene,
                    steps_per_epoch=2000,
                    epochs=5,
                    callbacks=[model_checkpoint])

testGene = testGenerator(os.path.join(dataset_folder, 'em_test/mem_pred/'),
                         num_image=64,
                         target_size=(512, 512),
                         filename='slice_{:04d}.tif')
model = unet(input_size=(512, 512, 1))
model.load_weights(os.path.join(experiment_folder, 'unet_membrane.h5'))
results = model.predict_generator(testGene, 64, verbose=1)
if not os.path.exists(os.path.join(experiment_folder, 'result_unet')):
    os.mkdir(os.path.join(experiment_folder, 'result_unet'))
saveResult(os.path.join(experiment_folder, 'result_unet'), results)
예제 #21
0
def testing(test_dir):
    testGene = data.testGenerator(test_dir)
    results = myModel.predict_generator(testGene, num_of_test_imgs, verbose=1)
    data.saveResult(test_dir,results)