def count_falseNegativeCase_to_pkl(bbox_conf_threshold = 0.95):
    #*************retreive mask RCNN******************
    # retreive path to model and config class
    model_path = 'pneumonia20181020T2012_0165/mask_rcnn_pneumonia_0032.h5'  # find_lastest_model(ROOT_DIR)
    # retrieve DetectorConfig from pickle file
    bbox_conf_threshold = 0.95

    class inference_config(Config):
        NAME = 'pneumonia'
        GPU_COUNT = 1
        IMAGES_PER_GPU = 1
        BACKBONE = "resnet50"  # 'resnet50'
        NUM_CLASSES = 2
        IMAGE_MAX_DIM = 256
        IMAGE_MIN_DIM = 256
        RPN_ANCHOR_SCALES = (32, 64, 128, 256)
        TRAIN_ROIS_PER_IMAGE = 32
        MAX_GT_INSTANCES = 4
        DETECTION_MAX_INSTANCES = 3
        DETECTION_MIN_CONFIDENCE = 0.78
        DETECTION_NMS_THRESHOLD = 0.01

    inference_config = inference_config()
    # Recreate the model in inference mode
    model = modellib.MaskRCNN(mode='inference',
                              config=inference_config,
                              model_dir=ROOT_DIR)

    # Load trained weights (fill in path to trained weights here)
    print('Retrieving mask RCNN...')
    assert model_path != "", "Provide path to trained weights for mask RCNN"
    print("Loading weights from ", model_path)
    model.load_weights(model_path, by_name=True)

    # *************retreive classifier******************
    DROPOUT = 0.2
    DENSE_COUNT = 128
    classifier_path = 'classifier/ResNet50/384_384_lr_1e-4_balanced_orig_data_prtrained_imagenet_20epoch_all_layers/RESNET50_full_model.h5'
    #LEARN_RATE = 1e-4
    SHAPE = 2  # t_y.shape[1] in lung_opacity_classifier.py
    print('Retrieving classifier...')
    assert classifier_path != "", "Provide path to trained weights for classifier"
    from keras.applications.resnet50 import ResNet50 as PTModel, preprocess_input
    base_pretrained_model = PTModel(input_shape=(384, 384, 3),
                                    include_top=False)

    from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten, Input, Conv2D, multiply, \
        LocallyConnected2D, Lambda, AvgPool2D
    from keras.models import Model
    from keras.optimizers import Adam
    # catch the output as feature from picked pre-trained model
    pt_features = Input(base_pretrained_model.get_output_shape_at(0)[1:], name='feature_input')
    pt_depth = base_pretrained_model.get_output_shape_at(0)[-1]
    # Normalize the activations of the previous layer at each batch,
    # i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1.
    from keras.layers import BatchNormalization
    bn_features = BatchNormalization()(pt_features)
    # Global average pooling operation for spatial data
    gap = GlobalAveragePooling2D()(bn_features)
    # Dropout layer
    gap_dr = Dropout(DROPOUT)(gap)

    # Another Dropout after a fully connected network after first dropout layer
    dr_steps = Dropout(DROPOUT)(Dense(DENSE_COUNT, activation='elu')(gap_dr))
    out_layer = Dense(SHAPE, activation='softmax')(dr_steps)

    attn_model = Model(inputs=[pt_features],
                       outputs=[out_layer], name='trained_model')

    from keras.models import Sequential
    from keras.optimizers import Adam
    classifier = Sequential(name='combined_model')
    classifier.add(base_pretrained_model)
    classifier.add(attn_model)
    # load pre-trained weights
    print("Loading weights from ", classifier_path)
    classifier.load_weights(classifier_path)

    dataset = read_training_data_from_pkl()
    pn = []
    fn = []
    for image_id in dataset.image_ids:
        print(image_id)
        #load_image_gt() resizes image according to config already
        original_image, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset, inference_config,
                                   image_id, use_mini_mask=False)
        results = model.detect([original_image])  # , verbose=1)
        r = results[0]
        #find negative only: goal-to find classifier threshold to reduce false negative
        index2keep = r['scores'] > bbox_conf_threshold
        if len(index2keep[index2keep == True]) > 0:
            continue #has bbox
        #resize for classifier (may be changed to the same size later)
        image = dataset.load_image(image_id)
        image, window, scale, padding, crop = utils.resize_image(
            image,
            min_dim=384,
            min_scale=0,
            max_dim=384,
            mode=inference_config.IMAGE_RESIZE_MODE)
        image = np.expand_dims(image, axis=0)
        pred = classifier.predict(image) #[no opacity, opacity]
        #print('GT: ', ('Opacity' if len(gt_bbox) > 0 else 'No opacity'))
        #print('Prediction: ', ('No opacity' if pred[0][0] > pred[0][1] else 'Opacity'))
        #print(pred)
        if len(gt_bbox) == 0: #pn
            pn.append(pred[0][1])
        else: #fn
            fn.append(pred[0][1])
    print('tn______________')
    print(len(pn))
    print('fn______________')
    print(len(fn))
    #save result to pkl
    import pickle
    output = 'classifier_threshold_analysis_n.pkl'
    f = open(output, 'wb')
    print('Dumping data to ' + output + '...')
    pickle.dump((pn, fn), f)
    print('Done.')
def count_falsePositiveCase_to_pkl(bbox_conf_threshold = 0.95):
    #*************retreive mask RCNN******************
    # retreive path to model and config class
    model_path = 'pneumonia20181020T2012_0165/mask_rcnn_pneumonia_0032.h5'  # find_lastest_model(ROOT_DIR)
    # retrieve DetectorConfig from pickle file
    bbox_conf_threshold = 0.95

    class inference_config(Config):
        NAME = 'pneumonia'
        GPU_COUNT = 1
        IMAGES_PER_GPU = 1
        BACKBONE = "resnet50"  # 'resnet50'
        NUM_CLASSES = 2
        IMAGE_MAX_DIM = 256
        IMAGE_MIN_DIM = 256
        RPN_ANCHOR_SCALES = (32, 64, 128, 256)
        TRAIN_ROIS_PER_IMAGE = 32
        MAX_GT_INSTANCES = 4
        DETECTION_MAX_INSTANCES = 3
        DETECTION_MIN_CONFIDENCE = 0.78
        DETECTION_NMS_THRESHOLD = 0.01

    inference_config = inference_config()
    # Recreate the model in inference mode
    model = modellib.MaskRCNN(mode='inference',
                              config=inference_config,
                              model_dir=ROOT_DIR)

    resize_factor = ORIG_SIZE / inference_config.IMAGE_SHAPE[0]
    # Load trained weights (fill in path to trained weights here)
    print('Retrieving mask RCNN...')
    assert model_path != "", "Provide path to trained weights for mask RCNN"
    print("Loading weights from ", model_path)
    model.load_weights(model_path, by_name=True)

    # *************retreive classifier******************
    DROPOUT = 0.2
    DENSE_COUNT = 128
    classifier_path = 'bbox_classifier/Xception_200_200_lr1e-4/Xception_full_model.h5'
    #LEARN_RATE = 1e-4
    SHAPE = 2  # t_y.shape[1] in lung_opacity_classifier.py
    print('Retrieving bbox classifier...')
    assert classifier_path != "", "Provide path to trained weights for bbox classifier"
    from keras.applications.xception import Xception as PTModel, preprocess_input
    base_pretrained_model = PTModel(input_shape=(200, 200, 3),
                                    include_top=False)

    from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten, Input, Conv2D, multiply, \
        LocallyConnected2D, Lambda, AvgPool2D
    from keras.models import Model
    from keras.optimizers import Adam
    # catch the output as feature from picked pre-trained model
    pt_features = Input(base_pretrained_model.get_output_shape_at(0)[1:], name='feature_input')
    pt_depth = base_pretrained_model.get_output_shape_at(0)[-1]
    # Normalize the activations of the previous layer at each batch,
    # i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1.
    from keras.layers import BatchNormalization
    bn_features = BatchNormalization()(pt_features)
    # Global average pooling operation for spatial data
    gap = GlobalAveragePooling2D()(bn_features)
    # Dropout layer
    gap_dr = Dropout(DROPOUT)(gap)

    # Another Dropout after a fully connected network after first dropout layer
    dr_steps = Dropout(DROPOUT)(Dense(DENSE_COUNT, activation='elu')(gap_dr))
    out_layer = Dense(SHAPE, activation='softmax')(dr_steps)

    attn_model = Model(inputs=[pt_features],
                       outputs=[out_layer], name='trained_model')

    from keras.models import Sequential
    from keras.optimizers import Adam
    classifier = Sequential(name='combined_model')
    classifier.add(base_pretrained_model)
    classifier.add(attn_model)
    # load pre-trained weights
    print("Loading weights from ", classifier_path)
    classifier.load_weights(classifier_path)

    dataset = read_training_data_from_pkl()
    tp = []
    fp = []
    from scipy import misc
    for image_id in dataset.image_ids:
        #image_id = random.choice(dataset.image_ids)
        print(image_id)
        #load_image_gt() resizes image according to config already
        image_256, image_meta, gt_class_id, gt_bbox, gt_mask = \
            modellib.load_image_gt(dataset, inference_config,
                                   image_id, use_mini_mask=False)
        results = model.detect([image_256])  # , verbose=1)
        r = results[0]
        #find positive only: goal-to find classifier threshold to reduce false positive
        index2remove = r['scores'] <= bbox_conf_threshold
        if len(index2remove[index2remove == True]) == len(r['scores']):
            continue #no bbox
        #resize for classifier (may be changed to the same size later)
        original_image = dataset.load_image(image_id)
        num_instances = len(r['rois'])
        for i in range(num_instances):
            if r['scores'][i] > bbox_conf_threshold:  # threshold and view_position != 'PA':
                # prediction on bbox
                x = r['rois'][i][1]
                y = r['rois'][i][0]
                x1 = int(x * resize_factor)
                y1 = int(y * resize_factor)
                width1 = int((r['rois'][i][3] - x) * resize_factor)
                height1 = int((r['rois'][i][2] - y) * resize_factor)
                masked = np.array(original_image[y1:y1 + height1, x1:x1 + width1, :], dtype=np.int32)
                resized_patch = misc.imresize(masked, (200, 200), 'nearest')
                #print(resized_patch.shape)
                #print(resized_patch)
                image = np.expand_dims(resized_patch.astype(np.float32), axis=0)
                image = preprocess_input(image)
                #image = preprocess_input(image)
                pred = classifier.predict(image) # [bbox as FP, bbox as TP]
                #print('GT: ', ('Opacity' if len(gt_bbox) > 0 else 'No opacity'))
                #print('Prediction: ', ('No opacity' if pred[0][0] > pred[0][1] else 'Opacity'))
                print(pred)
                if len(gt_bbox): #pred[0][0] probability of being false positive bbox
                    tp.append(pred[0][0])
                else: #fp
                    fp.append(pred[0][0])
    print('tp______________')
    print(len(tp))
    print('fp______________')
    print(len(fp))
    #save result to pkl
    import pickle
    output = 'classifier_threshold_analysis_bbox_classifier.pkl'
    f = open(output, 'wb')
    print('Dumping data to ' + output + '...')
    pickle.dump((tp, fp), f)
    print('Done.')