예제 #1
0
blend_original_image = False

model_dir = '/home/ubuntu/dlp/deploy_models/ROP/hemorrhage/2020_3_7'
dicts_models = []
#xception batch_size:6, inception-v3 batch_size:24, InceptionResnetV2 batch_size:12
# dict_model1 = {'model_file': os.path.join(model_dir, 'InceptionResnetV2-007-0.993.hdf5'),
#                'input_shape': (299, 299, 3), 'batch_size': 12}
# dicts_models.append(dict_model1)
dict_model1 = {'model_file': os.path.join(model_dir, 'Xception-005-0.989.hdf5'),
               'input_shape': (299, 299, 3), 'batch_size': 8}
dicts_models.append(dict_model1)
# dict_model1 = {'model_file': os.path.join(model_dir, 'InceptionV3-008-0.991.hdf5'),
#                'input_shape': (299, 299, 3),  'batch_size': 24}
# dicts_models.append(dict_model1)

my_deepshap = My_deepshap(dicts_models, reference_file=REFERENCE_FILE, num_reference=NUM_REFERENCE)

#region generate heatmaps

MODEL_NO = 0
image_shape = dicts_models[MODEL_NO]['input_shape']

for predict_type_name in ['Hemorrhage_split_patid_train', 'Hemorrhage_split_patid_valid', 'Hemorrhage_split_patid_test']:
    save_dir = os.path.join(DIR_SAVE_RESULTS, predict_type_name)
    DATFILE_TYPE = 'dataset9'
    filename_csv = os.path.join(os.path.abspath('../../../'),
                                'datafiles', DATFILE_TYPE, predict_type_name + '.csv')

    df = pd.read_csv(filename_csv)
    for _, row in df.iterrows():
        image_file = row['images']
예제 #2
0
reference_file = os.path.join(os.path.abspath('.'), 'ref_rop.npy')
num_reference = 24  # background  24

import my_config
dir_tmp = os.path.join(my_config.dir_tmp, 'rop_deep_shap')

model_dir = my_config.dir_deploy_models
dicts_models = []
#xception batch_size:6, inception-v3 batch_size:24, InceptionResnetV2 batch_size:12
dict_model1 = {'model_file': os.path.join(model_dir, 'STAGE/2020_3_7',  'Xception-010-0.981.hdf5'),
               'input_shape': (299, 299, 3), 'batch_size': 6}
dicts_models.append(dict_model1)
dict_model1 = {'model_file': os.path.join(model_dir, 'hemorrhage/2020_3_7',  'Xception-005-0.989.hdf5'),
               'input_shape': (299, 299, 3),  'batch_size': 6}
dicts_models.append(dict_model1)
my_deepshap = My_deepshap(dicts_models, reference_file=reference_file, num_reference=num_reference)

def server_shap_deep_explainer(model_no, img_source,
                     ranked_outputs=1, blend_original_image=False):

    image_shape = dicts_models[model_no]['input_shape']
    if isinstance(img_source, str):
        img_input = LIBS.ImgPreprocess.my_image_helper.my_gen_img_tensor(
            img_source, image_shape=image_shape)
    else:
        img_input = img_source

    list_classes, list_images = my_deepshap.shap_deep_explainer(
        model_no=model_no, num_reference=num_reference,
        img_input=img_input, ranked_outputs=ranked_outputs,
        blend_original_image=blend_original_image, base_dir_save=dir_tmp)