Esempio n. 1
0
def demo_attribute(img_nos: list = None, att: Attributer = None):
    if att is None:
        model_name = VGG
        att = Attributer(model_name=model_name)
    if img_nos is None:
        img_nos = [11, 13, 15]
        #img_nos = [6, 97, 278]
    for img_no in img_nos:
        # image handler for later (method attributions)
        ih = ImageHandler(img_no=img_no, model_name=VGG)
        # predictions
        max_pred, max_p = att.predict_for_model(ih)
        plt.figure(figsize=(15, 10))
        plt.suptitle(
            'Attributions for example {}, prediction = `{}`, probability = {:.2f}'
            .format(img_no, max_pred, max_p))
        # original image
        plt.subplot(2, 4, 1)
        plt.axis('off')
        plt.title('ImageNet Example {}'.format(img_no))
        plt.imshow(
            plt.imread(get_image_file_name(IMG_BASE_PATH, img_no) + '.JPEG'))
        # annotated image
        plt.subplot(2, 4, 2)
        plt.title('Annotated Example {}'.format(img_no))
        plt.imshow(
            plt.imread(
                get_image_file_name(ANNOTATE_BASE_PATH, img_no) + '.JPEG'))
        # processed image
        plt.subplot(2, 4, 3)
        plt.title('Reshaped Example')
        plt.imshow(demo_resizer(img_no=img_no, target_size=ih.get_size()))
        # processed image
        plt.subplot(2, 4, 4)
        plt.title('Annotation Mask')
        plt.imshow(get_mask_for_eval(img_no=img_no, target_size=ih.get_size()),
                   cmap='seismic',
                   clim=(-1, 1))

        attributions = att.attribute_panel(ih=ih,
                                           methods=METHODS,
                                           save=False,
                                           visualise=False,
                                           take_threshold=False,
                                           take_absolute=False,
                                           sigma_multiple=1)
        # show attributions
        for i, a in enumerate(attributions.keys()):
            plt.subplot(2, 4, 5 + i)
            plt.title(a)
            plt.axis('off')
            plt.imshow(ih.get_original_img(), cmap='gray', alpha=0.75)
            plt.imshow(attributions[a],
                       cmap='seismic',
                       clim=(-1, 1),
                       alpha=0.8)
        plt.show()
        plt.clf()
        plt.close()
Esempio n. 2
0
 def __init__(self, metric: str, model_name: str, att: Attributer = None):
     if att is None:
         self.att = Attributer(model_name=model_name)
     else:
         self.att = att
     self.metric = metric
     self.model_name = model_name
     self.file_headers = METHODS  # [m + "+_" + self.model_name for m in METHODS]
     self.result_file = "{}/{}/{}_results.csv".format(
         RESULTS_EVAL_PATH, model_name, metric)
     self.results_df = self.read_file(self.result_file, wipe=False)
Esempio n. 3
0
def attribute_panel_wrapper(model_name: str):
    methods = [LIME, LIFT, GRAD]
    att = Attributer(model_name)
    for i in [11]:  # range(6, 7):
        ih = ImageHandler(img_no=i, model_name=model_name)
        att.attribute_panel(ih=ih,
                            methods=methods,
                            save=True,
                            visualise=True,
                            take_threshold=True,
                            take_absolute=True,
                            sigma_multiple=1)
Esempio n. 4
0
def repl_wrapper():
    model_name = VGG
    att = Attributer(model_name)
    # REPL loop
    input_line = input(' >> ')
    while input_line != 'exit':
        split_input = input_line.split(' ')
        # demo attribute (visualiser)
        if split_input[0] == 'demo_attribute':
            img_nos = None
            if len(split_input) >= 2:
                img_nos = [int(i) for i in split_input[1:]]
            demo_attribute(img_nos, att)
        if split_input[0] == 'demo_evaluate':
            if split_input[1] not in METRICS:
                print('Invalid evaluation metric')
            else:
                img_nos = None
                if len(split_input) >= 3:
                    img_nos = [int(i) for i in split_input[2:]]
                demo_evaluate(img_nos,
                              att,
                              split_input[1],
                              model_name=model_name)
        if split_input[0] == 'demo_analyse':
            if len(split_input) == 2:
                if split_input[1] == 'filter':
                    analyser_wrapper(split_input[1])
            else:
                analyser_wrapper()
        input_line = input(' >> ')
Esempio n. 5
0
def attributer_wrapper(method: str, model: str):
    # draw_annotations([i for i in range(16, 300)])
    # run some attributions
    att = Attributer(model)
    start_time = time.time()

    for i in range(1, 101):
        ih = ImageHandler(img_no=i, model_name=model)
        att.attribute(ih=ih,
                      method=method,
                      save=False,
                      visualise=False,
                      take_threshold=False,
                      take_absolute=False,
                      sigma_multiple=1)
    print('{} total seconds for {}'.format(str(time.time() - start_time),
                                           method))
Esempio n. 6
0
def performance_timer():
    # performance timing
    for model in [INCEPT]:
        att = Attributer(model)
        for method in [GRAD]:
            print('Performance test for {} and {}:'.format(method, model))
            start_time = time.time()
            for i in range(1, 101):
                ih = ImageHandler(img_no=i, model_name=model)
                att.attribute(ih=ih,
                              method=method,
                              save=False,
                              visualise=False,
                              take_threshold=False,
                              take_absolute=False,
                              sigma_multiple=1)
            print('{} total seconds for {}'.format(
                str(time.time() - start_time), method))
Esempio n. 7
0
def demo_evaluate(img_nos: list = None,
                  att: Attributer = None,
                  metric: str = None,
                  model_name: str = VGG):
    if att is None:
        att = Attributer(model_name=model_name)
    if img_nos is None:
        img_nos = [6, 97, 278]
    evaluator = Evaluator(metric=metric, model_name=model_name, att=att)
    evaluator.collect_panel_result_batch(img_nos)
Esempio n. 8
0
def print_confident_predictions(att: Attributer, model_name: str,
                                experiment_range: list):
    """ Prints confident predictions within a specified range of image numbers
        For collecting model predictions see Attributer.predict_for_model()
        Source of GOOD_EXAMPLES constant
    """
    for i in experiment_range:
        label, max_p = att.predict_for_model(ih=ih)
        if max_p > 0.75:
            print('image_no: {}, label: {}, probability: {:.2f}'.format(
                i, label, max_p))
Esempio n. 9
0
def model_tester():
    att = Attributer(INCEPT)
    good_examples = att.get_good_examples()
    print(good_examples)
    print(len(good_examples))
Esempio n. 10
0
class Evaluator:
    def __init__(self, metric: str, model_name: str, att: Attributer = None):
        if att is None:
            self.att = Attributer(model_name=model_name)
        else:
            self.att = att
        self.metric = metric
        self.model_name = model_name
        self.file_headers = METHODS  # [m + "+_" + self.model_name for m in METHODS]
        self.result_file = "{}/{}/{}_results.csv".format(
            RESULTS_EVAL_PATH, model_name, metric)
        self.results_df = self.read_file(self.result_file, wipe=False)

    def read_file(self, file_path: str, wipe=False):
        # gets a dataframe from the results file
        if wipe:
            f = open(file_path, "w+")
            f.close()
            df = pd.DataFrame(columns=['img_no'] +
                              self.file_headers).set_index('img_no')
            return df
        if not os.path.exists(file_path):
            with open(file_path, "w") as f:
                f.write(','.join(['img_no'] + self.file_headers))
        df = pd.read_csv(file_path).set_index('img_no')
        return df

    def write_results_to_file(self):
        self.results_df.to_csv(self.result_file,
                               index=True,
                               index_label='img_no')

    def get_image_handler_and_mask(self, img_no):
        # this gets the image wrapped in the ImageHandler object, and the
        # bounding box annotation mask for the image,
        # ImageHandler is used to calculate attributions by each method, and the
        # mask is used for evaluation
        ih = ImageHandler(img_no=img_no, model_name=self.model_name)
        # bounding box in the format of the model's input shape / attribution shape
        annotation_mask = get_mask_for_eval(img_no=img_no,
                                            target_size=ih.get_size(),
                                            save=False,
                                            visualise=False)
        return ih, annotation_mask

    def collect_panel_result_batch(self, experiment_range: list):
        new_rows = {}
        for img_no in experiment_range:
            new_row = {}
            ih, annotation_mask = self.get_image_handler_and_mask(img_no)
            for method in METHODS:
                result = self.collect_result(ih, annotation_mask, method)
                if img_no <= len(self.results_df.index):
                    self.results_df.at[img_no, method] = result
                else:
                    new_row[method] = result
            new_rows[img_no] = new_row
            if (img_no % 10) == 0:
                self.append_to_results_df(new_rows)
                new_rows = {}
        self.append_to_results_df(new_rows)

    def collect_result_batch(self, method: str, experiment_range: range):
        new_rows = {}
        for img_no in experiment_range:
            ih, annotation_mask = self.get_image_handler_and_mask(img_no)
            result = self.collect_result(ih, annotation_mask, method)
            if img_no <= len(self.results_df.index):
                self.results_df.at[img_no, method] = result
            else:
                new_rows[img_no] = {method: result}
            if img_no % 10 == 0:
                self.append_to_results_df(new_rows)
                new_rows = {}

        self.append_to_results_df(new_rows)

    def append_to_results_df(self, new_rows_dict, write=True):
        new_data = pd.DataFrame.from_dict(new_rows_dict,
                                          columns=self.file_headers,
                                          orient='index')
        self.results_df = self.results_df.append(new_data, sort=True)
        if write:
            self.write_results_to_file()

    def collect_result(self, ih: ImageHandler, mask, method: str):
        # threshold for each attribution's "explainability" is the number of std
        # deviations above the mean contribution score for a pixel
        if self.metric == INTERSECT:
            return self.evaluate_intersection(ih,
                                              mask,
                                              method,
                                              sigma=INTERSECT_THRESHOLD)
        elif self.metric == INTENSITY:
            return self.evaluate_intensity(ih,
                                           mask,
                                           method,
                                           sigma=INTENSITY_THRESHOLD)

    def evaluate_intersection(self,
                              ih: ImageHandler,
                              mask,
                              method: str,
                              sigma: int,
                              print_debug: bool = False) -> float:
        # calculate an attribution and use a provided bounding box mask to
        # calculate the IOU metric. attribution has threshold applied, and abs
        # value set (positive and negative evidence treated the same)
        attribution = self.att.attribute(
            ih=ih,
            method=method,
            layer_no=LAYER_TARGETS[method][self.model_name],
            take_threshold=True,
            sigma_multiple=sigma,
            take_absolute=True,
            visualise=False,
            save=False)
        # calculate the intersection of the attribution and the bounding box mask
        intersect_array = np.zeros(attribution.shape)
        intersect_array[(attribution > 0.0) * (mask > 0.0)] = 1
        # get the union array for the IOU calculation
        union_array = np.zeros(attribution.shape)
        union_array[(attribution > 0.0) + (mask > 0.0)] = 1
        # calculate intersection and union areas for numerator and
        # denominator respectively
        intersect_area = intersect_array.sum()
        union_area = union_array.sum()
        intersection_over_union = intersect_area / union_area
        print('Evaluating `{}` on example `{}` ({})'.format(
            method, ih.img_no, 'intersection'))
        if print_debug:
            #print('--Mask Area =\t {}'.format(mask_area))
            print('--Intersect Area =\t {}'.format(intersect_area))
            print('--Union Area =\t {}'.format(union_area))
            print('--Intersection / Union =\t{:.2f}%'.format(
                intersection_over_union * 100))
            print('')

        return intersection_over_union

    def evaluate_intensity(self,
                           ih: ImageHandler,
                           mask,
                           method: str,
                           sigma: int,
                           print_debug: bool = False) -> float:
        # # calculate an attribution and use a provided bounding box mask to
        # calculate the IOU metric attribution has threshold applied, and abs
        # value set (positive and negative evidence treated the same)
        attribution = self.att.attribute(
            ih=ih,
            method=method,
            layer_no=LAYER_TARGETS[method][self.model_name],
            take_threshold=True,
            sigma_multiple=sigma,
            take_absolute=True,
            visualise=False,
            save=True)
        # calculate the weight/confidence of the attribution intersected with
        # the bounding box mask
        intensity_array = np.copy(attribution)
        intensity_array[(attribution > 0.0) * (mask < 0.1)] = 0
        # get the union array for the IOU* calculation
        union_array = np.zeros(attribution.shape)
        union_array[(attribution > 0.0) + (mask > 0.0)] = 1
        intensity_area = intensity_array.sum()
        union_area = union_array.sum()
        intensity_over_union = intensity_area / union_area
        print('Evaluating `{}` on example `{}` ({})'.format(
            method, ih.img_no, 'intensity'))
        if print_debug:
            print('--Intersect Area =\t {}'.format(intensity_area))
            print('--Union Area =\t {}'.format(union_area))
            print('--Intensity / Union =\t{:.2f}%'.format(
                intensity_over_union * 100))
            print('')

        return intensity_over_union