Exemplo n.º 1
0
    'model_file': os.path.join(model_dir, 'InceptionResnetV2-004-0.984.hdf5'),
    'input_shape': (299, 299, 3),
    'model_weight': 1
}
dicts_models.append(dict_model1)
dict_model1 = {
    'model_file': os.path.join(model_dir, 'Xception-004-0.984.hdf5'),
    'input_shape': (299, 299, 3),
    'model_weight': 1
}
dicts_models.append(dict_model1)

filename_csv = os.path.join(dir_dest, 'LaserSpot_predict_dir.csv')
if GEN_CSV:
    os.makedirs(os.path.dirname(filename_csv), exist_ok=True)
    write_csv_dir_nolabel(filename_csv, dir_preprocess)
df = pd.read_csv(filename_csv)
all_files, all_labels = get_images_labels(filename_csv_or_pd=df)

prob_total, y_pred_total, prob_list, pred_list = \
    do_predict(dicts_models, filename_csv, argmax=True)

import pickle

os.makedirs(os.path.dirname(pkl_prob), exist_ok=True)
with open(pkl_prob, 'wb') as file:
    pickle.dump(prob_total, file)

if COMPUTE_DIR_FILES:
    op_files_multiclass(filename_csv,
                        prob_total,
Exemplo n.º 2
0
    crop_optic_disc_dir(dir_source=dir_preprocess512,
                        dir_dest=dir_crop_optic_disc,
                        server_port=21000,
                        mask=True)
    print('crop optic disc 112 OK!')

if GEN_CSV:
    if GET_LABELS_FROM_DIR:
        dict_mapping = {}
        for i in range(30):
            dict_mapping[str(i)] = str(i)

        my_data.write_csv_based_on_dir(filename_csv, dir_crop_optic_disc,
                                       dict_mapping)
    else:
        my_data.write_csv_dir_nolabel(filename_csv, dir_crop_optic_disc)

df = pd.read_csv(filename_csv)
all_files, all_labels = my_data.get_images_labels(filename_csv_or_pd=df)

prob_total, y_pred_total, prob_list, pred_list =\
    LIBS.DLP.my_predict_helper.do_predict_batch(dicts_models, filename_csv, gpu_num=GPU_NUM)

import pickle
if not os.path.exists(os.path.dirname(pkl_prob)):
    os.makedirs(os.path.dirname(pkl_prob))
with open(pkl_prob, 'wb') as file:
    pickle.dump(prob_total, file)

# pkl_file = open(pkl_prob, 'rb')
# prob_total = pickle.load(pkl_file)
Exemplo n.º 3
0
COMPUTE_DIR_FILES = True

dir_original = '/media/ubuntu/data1/ROP项目/人机比赛用图_20200317/original/三标签'
dir_blood_vessel_seg = '/media/ubuntu/data1/ROP项目/人机比赛用图_20200317/results/Plus/blood_vessel_seg'
dir_dest = '/media/ubuntu/data1/ROP项目/人机比赛用图_20200317/results_2020_5_20/Plus/result_2020_5_20'

# dir_original = '/tmp5/ROP_human_AI/mydataset/正常/original'
# dir_blood_vessel_seg = '/tmp5/ROP_human_AI/mydataset/正常/blood_vessel_seg_result'
# dir_dest = '/tmp5/ROP_human_AI/mydataset/正常/Plus_two_steps'
pkl_prob = os.path.join(dir_dest, 'probs.pkl')

filename_csv = os.path.join(dir_dest, 'plus_two_stage_results.csv')

if GEN_CSV:
    os.makedirs(os.path.dirname(filename_csv), exist_ok=True)
    write_csv_dir_nolabel(filename_csv, dir_blood_vessel_seg)

# dicts_models = []
# model_file1 = '/home/ubuntu/dlp/deploy_models/ROP/plus_two_stages/2020_4_28/InceptionResnetV2-008-0.973.hdf5'
# dict_model1 = {'model_file': model_file1,
#                'input_shape': (299, 299, 3), 'model_weight': 1}
# dicts_models.append(dict_model1)
# model_file1 = '/home/ubuntu/dlp/deploy_models/ROP/plus_two_stages/2020_4_28/InceptionV3-006-0.969.hdf5'
# dict_model1 = {'model_file': model_file1,
#                'input_shape': (299, 299, 3), 'model_weight': 1}
# dicts_models.append(dict_model1)
# model_file1 = '/home/ubuntu/dlp/deploy_models/ROP/plus_two_stages/2020_4_28/Xception-005-0.967.hdf5'
# dict_model1 = {'model_file': model_file1,
#                'input_shape': (299, 299, 3), 'model_weight': 1}
# dicts_models.append(dict_model1)
        from LIBS.ImgPreprocess import my_preprocess_dir
        image_size = 512
        my_preprocess_dir.do_process_dir(dir_original, dir_preprocess, image_size=image_size)

    if GEN_CSV:
        if not os.path.exists(os.path.dirname(filename_csv)):
            os.makedirs(os.path.dirname(filename_csv))

        if GET_LABELS_FROM_DIR:
            dict_mapping = {}
            for i in range(30):
                dict_mapping[str(i)] = str(i)

            my_data.write_csv_based_on_dir(filename_csv, dir_preprocess, dict_mapping)
        else:
            my_data.write_csv_dir_nolabel(filename_csv, dir_preprocess)


    df = pd.read_csv(filename_csv)
    all_files, all_labels = my_data.get_images_labels(filename_csv_or_pd=df)

    prob_total, y_pred_total, prob_list, pred_list =\
        LIBS.DLP.my_predict_helper.do_predict_batch(dicts_models, filename_csv, gpu_num=GPU_NUM)


    if not os.path.exists(os.path.dirname(pkl_prob)):
        os.makedirs(os.path.dirname(pkl_prob))
    with open(pkl_prob, 'wb') as file:
        pickle.dump(prob_total, file)

    # pkl_file = open(prob_pkl', 'rb')
Exemplo n.º 5
0
DIR_PREPROCESS = '/media/ubuntu/data1/糖网项目/DR分级英国标准_20190119_无杂病/DR/preprocess384/'
DIR_DEST = '/media/ubuntu/data1/糖网项目/DR分级英国标准_20190119_无杂病/DR/results/CAM/'

from LIBS.ImgPreprocess import my_preprocess_dir
if DO_PREPROCESS:
    my_preprocess_dir.do_preprocess_dir(DIR_ORIGINAL,
                                        DIR_PREPROCESS,
                                        image_size=384,
                                        is_rop=False,
                                        add_black_pixel_ratio=0.07)

filename_csv = os.path.join(DIR_DEST, 'csv', 'predict_dir.csv')
if GEN_CSV:
    os.makedirs(os.path.dirname(filename_csv), exist_ok=True)
    from LIBS.DataPreprocess.my_data import write_csv_dir_nolabel
    write_csv_dir_nolabel(filename_csv, DIR_PREPROCESS)

#region load and convert models

model_dir = '/tmp5/models_2020_6_19/DR_english/v1'
dicts_models = []
dict_model1 = {
    'model_file': os.path.join(model_dir, 'InceptionResnetV2-004-0.984.hdf5'),
    'input_shape': (299, 299, 3),
    'model_weight': 1
}
dicts_models.append(dict_model1)

for dict1 in dicts_models:
    print('prepare to load model:' + dict1['model_file'])
    dict1['model'] = keras.models.load_model(dict1['model_file'],