示例#1
0
def test_unet(cfg):
    pretrained_fpath = cfg.pretrained_fpath
    test_dpath = cfg.test_dpath
    postfix = cfg.postfix

    tf.keras.backend.set_learning_phase(0)
    K.set_learning_phase(0)

    # 从文件中加载模型
    print("Loading: {}".format(pretrained_fpath))
    #model = models.load_model(pretrained_fpath, custom_objects=custom_objects)
    model = UNetH5(pretrained_fpath, (256, 256, 1))

    # 创建数据生成器,预测并保存结果
    test_gen = testGenerator(test_dpath)
    results = model.predict_generator(test_gen, 30, verbose=1)
    saveResult(test_dpath, results, postfix=postfix)
                        os.path.join(dataset_folder, 'em_gt'),
                        'mem_pred',
                        'mem_gt_2',
                        data_gen_args,
                        save_to_dir=None,
                        target_size=(512, 512))

model = unet(input_size=(512, 512, 1),
             pretrained_weights=os.path.join(experiment_folder,
                                             'unet_membrane.h5'))
model_checkpoint = ModelCheckpoint(os.path.join(experiment_folder,
                                                'unet_membrane.h5'),
                                   monitor='loss',
                                   verbose=1,
                                   save_best_only=True)
model.fit_generator(myGene,
                    steps_per_epoch=2000,
                    epochs=5,
                    callbacks=[model_checkpoint])

testGene = testGenerator(os.path.join(dataset_folder, 'em_test/mem_pred/'),
                         num_image=64,
                         target_size=(512, 512),
                         filename='slice_{:04d}.tif')
model = unet(input_size=(512, 512, 1))
model.load_weights(os.path.join(experiment_folder, 'unet_membrane.h5'))
results = model.predict_generator(testGene, 64, verbose=1)
if not os.path.exists(os.path.join(experiment_folder, 'result_unet')):
    os.mkdir(os.path.join(experiment_folder, 'result_unet'))
saveResult(os.path.join(experiment_folder, 'result_unet'), results)
示例#3
0
文件: main.py 项目: JacobSal/UNET
from os import chdir
chdir(r"C:\Users\jsalm\Documents\Python Scripts\Automated_Histo\unet-master\unet-master")
from model import *
from data import testGenerator,trainGenerator,saveResult


#os.environ["CUDA_VISIBLE_DEVICES"] = "0"


data_gen_args = dict(rotation_range=0.2,
                    width_shift_range=0.05,
                    height_shift_range=0.05,
                    shear_range=0.05,
                    zoom_range=0.05,
                    horizontal_flip=True,
                    fill_mode='nearest')
myGene = trainGenerator(2,'data/membrane/train','image','label',data_gen_args,save_to_dir = None)

model = unet()
model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True)
model.fit_generator(myGene,steps_per_epoch=300,epochs=5,callbacks=[model_checkpoint])

testGene = testGenerator("data/membrane/test")
results = model.predict_generator(testGene,56657,verbose=1)
saveResult("data/membrane/test",results)
示例#4
0
import model 
import data
import sys

# PREMENNE
test_dir = "../data/test"
generated_dir = "../data/test_generated"
if (len(sys.argv) > 1):
    num_of_test = int(sys.argv[1]) # zistit pocet od pouzivatela
else:
    num_of_test = 4

# NACITAT MODEL
# load json and create model
json_file = open('../model/modelStructure.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
myModel = model.model_from_json(loaded_model_json)
# load weights into new model
myModel.load_weights("../model/modelWeights.h5")
print("Model loaded from disk") 

# evaluate loaded model on test data
myModel.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])


# SPUSTIT NA TESTOVACICH VZORKACH
testGene = data.testGenerator(test_dir) #generated_dir)
results = myModel.predict_generator(testGene,num_of_test,verbose=1)
data.saveResult(test_dir,results)
# saveResult(os.path.join(experiment_folder, 'result_unet_zy_10'), results)

# Predict train cubes 00 and 11 (Those that were not trained on for these networks)
# XY 00
testGene = testGenerator(os.path.join(dataset_folder,
                                      'em_gt/crop_256/xy/raw_xy_00/'),
                         num_image=64,
                         target_size=(256, 256),
                         filename='raw_00_{:04d}.tif')
model = unet(input_size=(256, 256, 1))
model.load_weights(os.path.join(experiment_folder, 'unet_membrane_xy.h5'))
results = model.predict_generator(testGene, 64, verbose=1)
if not os.path.exists(
        os.path.join(experiment_folder, 'result_unet_train_xy_00')):
    os.mkdir(os.path.join(experiment_folder, 'result_unet_train_xy_00'))
saveResult(os.path.join(experiment_folder, 'result_unet_train_xy_00'), results)
# XY 11
testGene = testGenerator(os.path.join(dataset_folder,
                                      'em_gt/crop_256/xy/raw_xy_11/'),
                         num_image=64,
                         target_size=(256, 256),
                         filename='raw_11_{:04d}.tif')
model = unet(input_size=(256, 256, 1))
model.load_weights(os.path.join(experiment_folder, 'unet_membrane_xy.h5'))
results = model.predict_generator(testGene, 64, verbose=1)
if not os.path.exists(
        os.path.join(experiment_folder, 'result_unet_train_xy_11')):
    os.mkdir(os.path.join(experiment_folder, 'result_unet_train_xy_11'))
saveResult(os.path.join(experiment_folder, 'result_unet_train_xy_11'), results)
# XZ 00
testGene = testGenerator(os.path.join(dataset_folder,
示例#6
0
def predict(file_name, callback=None):
    # доступные классы
    # 'ground', 'tree', 'bush', 'tower', 'wire', 'copter', 'car', 'build'
    # для каких классов была обучена?
    channels = ['bush']

    # file_name - имя SVO папки, например: rec2018_07_21-6
    root_path = predict_path + "\\" + file_name
    right_views_path = root_path + "\\right"
    right_views_marked_path = root_path + "\\right_marked"
    right_measures_path = root_path + "\\right_measure"
    mask_path = root_path + "\\mask"
    analyzed_result_path = root_path + "\\report.json"

    distances = []
    timestamps = []
    frame_numbers = []
    coordinates = []
    probabilities = []

    if not os.path.exists(weights_path):
        print(f"Файл весов {weights_path} не существует!")
    if not os.path.exists(right_views_marked_path):
        os.makedirs(right_views_marked_path)
    if not os.path.exists(right_measures_path):
        os.makedirs(right_measures_path)
    if not os.path.exists(mask_path):
        os.makedirs(mask_path)

    else:
        frames_length = len(os.listdir(right_views_path))
        model = unet(len(channels), pretrained_weights=weights_path)
        testGene = testGenerator(right_views_path)
        proc, dif = float(frames_length) / 10
        for frame_number in range(frames_length):
            pred = []
            if callback is not None:
                callback.emit(f"Файлов обработано {frame_number}")
            for _ in range(15):
                pred.append(next(testGene))
            results = model.predict_generator(iter(pred), 15, verbose=1)
            # расчет минимальной дистанции до ДКР, получение времени съемки, координат наиболее близкого пикселя
            min_depth, time_milliseconds, coordinate, probability = analyze_bush_depth(
                results, right_measures_path, frame_number)
            # запись результирующих данных
            distances.append(min_depth)
            timestamps.append(time_milliseconds)
            frame_numbers.append(frame_number)
            coordinates.append(coordinate)
            probabilities.append(probability)
            print(f'{frame_number+1}/{frames_length} completed')
            if callback:
                if float(frame_number) / frames_length >= proc:
                    proc += dif
                    callback.emit(f"{int(proc)*100}% готово")
            # сохранение распознанного и помеченного кадров
            saveResult(mask_path, results, channels, frame_number, coordinate,
                       right_views_path, right_views_marked_path)
        # сохранение результатов анализа
        analyzed_data = {
            'distances': distances,
            'timestamps': timestamps,
            'frame_numbers': frame_numbers,
            'coordinates': coordinates,
            'probabilities': probabilities
        }
        save_to_json(analyzed_data, analyzed_result_path)
    return "ok"
    epochs=100,
    steps_per_epoch=(num_imgs_train * fator_img_aug / batch_size),
    validation_data=validationGene,
    validation_steps=(num_imgs_validation * fator_img_aug) / batch_size,
    callbacks=[model_checkpoint, tensorboard])

#Aplica-se o modelo treinado às imagens de teste e procede-se à gravação dos
#resultados
testGene = testGenerator("data/test/epicardio/image",
                         num_image=num_imgs_test,
                         dicom_file=False)
validationGene = testGenerator("data/validation/epicardio/image",
                               num_image=num_imgs_validation,
                               dicom_file=False)

print("----Creating and saving test prediction results----")
results_test = model.predict_generator(testGene, num_imgs_test, verbose=1)
saveResult("predicts/epicardio/test",
           "data/test/epicardio/image",
           results_test,
           dicom_file=False)
'''
print("----Creating and saving validation prediction results----")
results_validation = model.predict_generator(validationGene, num_imgs_validation, verbose=1)
saveResult("predicts/epicardio/validation", "data/validation/epicardio/image", results_validation, dicom_file = False)
'''
#Avaliação dos resultados
evaluateResults(predicts_path="predicts/epicardio/test",
                test_path="data/test/epicardio/label",
                num_imgs=num_imgs_test)
if IS_REDUCE_LR:
    reduce_lr = ReduceLROnPlateau(monitor='loss',
                                  factor=0.2,
                                  patience=5,
                                  verbose=1,
                                  epsilon=1e-4)
    call_back_ls.append(reduce_lr)
if call_back_ls == []:
    call_back_ls = None

print('Start training ...')
# start training
model.fit_generator(train_gen,
                    steps_per_epoch=train_n // BATCH_SIZE,
                    validation_data=val_gen,
                    validation_steps=max(val_n // BATCH_SIZE, 1),
                    epochs=EPOCHS,
                    callbacks=call_back_ls)

############### 4. TEST EVALUATION ###################
print('Test Set Evaluation')
test_f_ls = [
    i for i in os.listdir(os.path.join(TEST_PATH)) if i.endswith('.jpg')
]
test_n = len(test_f_ls)
test_gen = testGenerator(TEST_PATH, num_image=1, target_size=INPUT_SHAPE)
# shape: (steps, height, width, 1)
results = model.predict_generator(test_gen, steps=test_n, verbose=1)
if VIS_PATH is not None:
    saveResult(VIS_PATH, results, test_f_ls)
                                  epochs=6,
                                  callbacks=[model_checkpoint])

import data as data3
import model
num_images = 2
model_obj = model.unet(
    pretrained_weights=
    '/content/drive/My Drive/Stomata_Project/training_data/unet_stomata_1.hdf5'
)
testGene = data3.testGenerator(
    '/content/drive/My Drive/Stomata_Project/training_data/train/test',
    num_images)
results = model_obj.predict_generator(testGene, num_images, verbose=1)
data3.saveResult(
    '/content/drive/My Drive/Stomata_Project/training_data/train/test_output_1',
    results)

import cv2
import os


def image_resizer(file_name):
    print(file_name)
    if ('true' in file_name) or ('resize0 in file_name'): return

    image = cv2.imread(file_name)
    original_image = cv2.imread(
        file_name.replace('_output', '').replace('_predict.png', '.jpg'))
    size = original_image.shape
示例#10
0
文件: main.py 项目: yeLer/UNet
from data import trainGenerator, geneTrainNpy, testGenerator, saveResult
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"

data_gen_args = dict(rotation_range=0.2,
                     width_shift_range=0.05,
                     height_shift_range=0.05,
                     shear_range=0.05,
                     zoom_range=0.05,
                     horizontal_flip=True,
                     fill_mode='nearest')
myGene = trainGenerator(2,
                        './data/membrane/train',
                        'image',
                        'label',
                        data_gen_args,
                        save_to_dir=None)

model = unet()
model_checkpoint = ModelCheckpoint('unet_membrane.hdf5',
                                   monitor='loss',
                                   verbose=1,
                                   save_best_only=True)
model.fit_generator(myGene,
                    steps_per_epoch=300,
                    epochs=5,
                    callbacks=[model_checkpoint])

testGene = testGenerator("./data/membrane/test/images")
results = model.predict_generator(testGene, 30, verbose=1)
saveResult("./data/membrane/test/predicts", results)
示例#11
0
import tensorflow as tf

from model import unet
from data import trainGenerator, testGenerator, saveResult


os.environ["TF_CPP_MIN_LOG_LEVEL"]="2" # This is to filter out TensorFlow INFO and WARNING logs
#os.environ["CUDA_VISIBLE_DEVICES"]="0" # Make 1 GPU visible for training


data_gen_args = dict(rotation_range=0.2,
                    width_shift_range=0.05,
                    height_shift_range=0.05,
                    shear_range=0.05,
                    zoom_range=0.05,
                    horizontal_flip=True,
                    fill_mode='nearest')

myGene = trainGenerator(batch_size=2, train_path='data/membrane/train',
                        image_folder='image', mask_folder='label',
                        aug_dict=data_gen_args, save_to_dir=None)

model = unet()
model_checkpoint = tf.keras.callbacks.ModelCheckpoint('unet_membrane.hdf5', monitor='loss', verbose=1, save_best_only=True)
model.fit_generator(myGene, steps_per_epoch=2000, epochs=5, callbacks=[model_checkpoint])

testGene = testGenerator(test_path='data/membrane/test')
results = model.predict_generator(testGene, 30, verbose=1)
saveResult(save_path='data/membrane/test', npyfile=results)
    save_to_dir=constant_model.data_gen_save_to_dir,
    target_size=constant_model.target_size)
model = model.unet()
model_checkpoint = ModelCheckpoint(constant_model.save_model_name,
                                   monitor='loss',
                                   verbose=1,
                                   save_best_only=True)
model.fit_generator(myGenerator,
                    steps_per_epoch=constant_model.steps_per_epoch,
                    epochs=constant_model.epochs,
                    callbacks=[model_checkpoint, constant_model.tbCallBack])

testGene = data.testGenerator(constant_model.test_path,
                              num_image=constant_model.test_image_nums,
                              target_size=constant_model.target_size)
results = model.predict_generator(testGene,
                                  constant_model.test_image_nums,
                                  verbose=1)
data.saveResult(constant_model.save_path, results)

from_path = constant_model.get_test_image_from_path
predict_png_result_path = constant_model.save_path
predict_nii_gz_result_path = constant_model.predict_nii_gz_result_path
if not os.path.isdir(predict_nii_gz_result_path):
    os.makedirs(predict_nii_gz_result_path)
save_data_dir(from_path, predict_png_result_path, predict_nii_gz_result_path)

roi_train = ROI('../../data/result/c0gt_0_45', './center_radii.csv', 1, 46)
roi_train.copy_and_rename('../../data/result/c0gt_0_45')
roi_train.save_csv()
示例#13
0
from data import trainGenerator, testGenerator, saveResult
from model import unet, ModelCheckpoint

data_gen_args = dict(
    rotation_range=0.2,
    width_shift_range=0.05,
    height_shift_range=0.05,
    shear_range=0.05,
    zoom_range=0.05,
    horizontal_flip=True,
    fill_mode='nearest'
)

myGene = trainGenerator(
    2, '/g/schwab/hennies/teaching/datasets/em_gt/', 'raw', 'mem_gt',
    data_gen_args, save_to_dir=None, target_size=(512, 512)
)

model = unet(input_size=(512, 512, 1))
model_checkpoint = ModelCheckpoint('unet_membrane.h5', monitor='loss', verbose=1, save_best_only=True)
model.fit_generator(myGene, steps_per_epoch=100, epochs=3, callbacks=[model_checkpoint])

testGene = testGenerator('/g/schwab/hennies/teaching/datasets/em_test/raw/', num_image=64, target_size=(512, 512))
model = unet(input_size=(512, 512, 1))
model.load_weights('unet_membrane.h5')
results = model.predict_generator(testGene, 64, verbose=1)
saveResult('/g/schwab/hennies/teaching/datasets/em_test/result_unet/', results)
示例#14
0
def testing(test_dir):
    testGene = data.testGenerator(test_dir)
    results = myModel.predict_generator(testGene, num_of_test_imgs, verbose=1)
    data.saveResult(test_dir,results)