Beispiel #1
0
def confusion_matrix(results_dir, masks_dir):
    """
    Calculate confusion matrix.

    :param results_dir: Directory with calculated masks
    :param masks_dir: Ground truth masks
    :return: Confusion matrix
    """

    # Getting calculated masks
    result_imgs = get_files_from_dir(results_dir)

    # List with values TP, FP, FN, TN
    tf_values = np.zeros(4)

    # Iterate over image paths
    for img_path in result_imgs:
        # Convert image path to mask path
        mask_path = img_name_to_mask_name(img_path)

        # Compute perfomance measures
        tf_val = np.array(
            performance_accumulation_pixel(get_img(results_dir, img_path),
                                           get_img(masks_dir, mask_path)))

        # Add them up
        tf_values += tf_val

    return tf_values.tolist()
def validate(analysis, dataset_manager, pixel_methods):
    """In each job, the methods are executed with the same dataset split and their results are put in an array."""
    results = []
    train, verify = dataset_manager.get_data_splits()
    if analysis is True:
        data_analysis(train)
    for pixel_method in pixel_methods:
        tp = 0
        fn = 0
        fp = 0
        tn = 0
        time = 0

        pixel_method.train(train)

        start = timer()
        for dat in verify:
            im = dat.get_img()

            mask, im = pixel_method.get_mask(im)
            mask_solution = dat.get_mask_img()

            [local_tp, local_fp, local_fn, local_tn
             ] = evalf.performance_accumulation_pixel(mask, mask_solution)
            tp += local_tp
            fp += local_fp
            fn += local_fn
            tn += local_tn

        time += timer() - start

        results.append(
            Result(tp=tp, fp=fp, fn=fn, tn=tn, time=(time / len(verify))))
    return results
def validateMethod(train: List[Data], verify: List[Data], method):
    method.train(train)
    tp = 0
    fn = 0
    fp = 0
    tn = 0
    t = 0
    tp_w = 0
    fn_w = 0
    fp_w = 0

    for pos, dat in enumerate(verify):
        print(method, str(pos) + '/' + str(len(verify)))
        im = dat.get_img()

        start = time.time()
        regions, mask, im = method.get_mask(im)
        mask_solution = dat.get_mask_img()
        t += time.time() - start

        [local_tp, local_fp, local_fn, local_tn] = evalf.performance_accumulation_pixel(
            mask, mask_solution)

        [local_tp_w, local_fn_w, local_fp_w] = performance_accumulation_window(regions, dat.gt)

        tp += local_tp
        fp += local_fp
        fn += local_fn
        tn += local_tn

        tp_w += local_tp_w
        fn_w += local_fn_w
        fp_w += local_fp_w

        """import matplotlib.pyplot as plt
        for region in regions:
            cv2.rectangle(mask, (region.top_left[1], region.top_left[0]),
                          (region.get_bottom_right()[1], region.get_bottom_right()[0]), (255,), thickness=5)
        for gt in dat.gt:
            cv2.rectangle(mask, (gt.top_left[1], gt.top_left[0]),
                          (gt.get_bottom_right()[1], gt.get_bottom_right()[0]), (128,), thickness=5)

        plt.imshow(mask, 'gray', vmax=255)
        plt.title(cv2.countNonZero(mask))
        plt.show()
        pass"""

    return Result(
        tp=tp,
        fp=fp,
        fn=fn,
        tn=tn,
        time=(t / len(verify)),
        tp_w=tp_w,
        fn_w=fn_w,
        fp_w=fp_w
    )
def score_pixel_masks(result_masks, test_masks):
    pixelTP = 0
    pixelFN = 0
    pixelFP = 0
    pixelTN = 0

    for ii, mask_name in enumerate(result_masks):

        # Read mask file
        pixelCandidates = imageio.imread(mask_name) > 0
        if len(pixelCandidates.shape) == 3:
            pixelCandidates = pixelCandidates[:, :, 0]

        # Accumulate pixel performance of the current image %%%%%%%%%%%%%%%%%
        gt_mask_name = test_masks[ii]

        pixelAnnotation = imageio.imread(gt_mask_name) > 0
        if len(pixelAnnotation.shape) == 3:
            pixelAnnotation = pixelAnnotation[:, :, 0]

        if pixelAnnotation.shape != pixelCandidates.shape:
            print(
                'Error: hypothesis ({}) and  GT masks ({})dimensions do not match!'
                .format(pixelCandidates.shape, pixelAnnotation.shape))
            sys.exit()

        [localPixelTP, localPixelFP, localPixelFN, localPixelTN
         ] = evalf.performance_accumulation_pixel(pixelCandidates,
                                                  pixelAnnotation)
        pixelTP = pixelTP + localPixelTP
        pixelFP = pixelFP + localPixelFP
        pixelFN = pixelFN + localPixelFN
        pixelTN = pixelTN + localPixelTN

    [pixelPrecision, pixelAccuracy, pixelSpecificity, pixelSensitivity
     ] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN, pixelTN)
    pixelF1 = 0
    if (pixelPrecision + pixelSensitivity) != 0:
        pixelF1 = 2 * ((pixelPrecision * pixelSensitivity) /
                       (pixelPrecision + pixelSensitivity))

    return pixelPrecision, pixelSensitivity, pixelF1
Beispiel #5
0
def segmentate(im_directory, mask_directory, maskOut_directory):
    file_names = sorted(fnmatch.filter(os.listdir(im_directory), '*.jpg'))

    pixelTP = 0
    pixelFP = 0
    pixelFN = 0
    pixelTN = 0

    from main import CONSOLE_ARGUMENTS
    nf = CONSOLE_ARGUMENTS.numFiles
    #For each file
    for name in file_names[:nf]:
        base, extension = os.path.splitext(name)

        imageNameFile = im_directory + "/" + name
        maskNameFile = mask_directory + "/mask." + base + ".png"

        print(imageNameFile)
        image = cv.imread(imageNameFile)
        maskImage = cv.imread(maskNameFile)

        # image = grayWorld(image)
        msk = LabFilter(image)

        img = image * msk
        msk = msk.astype(float)

        cv.imshow("masked image", img)
        cv.imshow("original image", image)
        cv.waitKey(0)

        # cv.imwrite(os.path.join(maskOut_directory,("mask." + base + ".png")),msk)
        pTP, pFP, pFN, pTN = performance_accumulation_pixel(msk, maskImage)
        pixelTP += pTP
        pixelFP += pFP
        pixelFN += pFN
        pixelTN += pTN

    print("precision \t accuracy \t specificity \t sensitivity")
    print(performance_evaluation_pixel(pixelTP, pixelFP, pixelFN, pixelTN))
Beispiel #6
0
def confusion_matrix(results_dir, masks_dir):
    """
    Calculate confusion matrix.

    :param results_dir: Directory with calculated masks
    :param masks_dir: Ground truth masks
    :return: Confusion matrix
    """

    # Getting calculated masks
    #result_imgs = get_files_from_dir(results_dir)
    result_imgs = glob.glob(results_dir + "/*.png")

    # List with values TP, FP, FN, TN
    tf_values = np.zeros(4)

    # Iterate over image paths
    for img_path in result_imgs:
        print(img_path)
        image_name = img_path.split("/")[-1]
        # Convert image path to mask path
        # mask_path = img_name_to_mask_name(img_path) if not img_path.startswith('mask.') else img_path
        mask_path = os.path.join(masks_dir, image_name)
        print(mask_path)

        # Compute perfomance measures
        tf_val = np.array(
            performance_accumulation_pixel(
                #get_img(results_dir, img_path),
                #get_img(masks_dir, mask_path)
                imageio.imread(img_path),
                imageio.imread(mask_path)))

        # Add them up
        tf_values += tf_val

    return tf_values.tolist()
        for ii in range(len(result_files)):

            # Read mask file
            candidate_masks_name = '{}/{}/{}'.format(results_dir, method, result_files[ii])
            print ('File: {}'.format(candidate_masks_name), file = sys.stderr)

            pixelCandidates = imageio.imread(candidate_masks_name)>0
            if len(pixelCandidates.shape) == 3:
                pixelCandidates = pixelCandidates[:,:,0]
            
            # Accumulate pixel performance of the current image %%%%%%%%%%%%%%%%%
            name, ext = os.path.splitext(test_files[ii])
            gt_mask_name = '{}/mask/mask.{}.png'.format(test_dir, name)

            pixelAnnotation = imageio.imread(gt_mask_name)>0
            [localPixelTP, localPixelFP, localPixelFN, localPixelTN] = evalf.performance_accumulation_pixel(pixelCandidates, pixelAnnotation)
            pixelTP = pixelTP + localPixelTP
            pixelFP = pixelFP + localPixelFP
            pixelFN = pixelFN + localPixelFN
            pixelTN = pixelTN + localPixelTN

            if window_evaluation == 1:
                # Read .pkl file
            
                name_r, ext_r = os.path.splitext(result_files[ii])
                pkl_name      = '{}/{}/{}.pkl'.format(results_dir, method, name_r)
                

                with open(pkl_name, "rb") as fp:   # Unpickling
                    windowCandidates = pickle.load(fp)
def traffic_sign_detection(directory, output_dir, pixel_method, window_method):

    pixelTP = 0
    pixelFN = 0
    pixelFP = 0
    pixelTN = 0

    windowTP = 0
    windowFN = 0
    windowFP = 0

    window_precision = 0
    window_accuracy = 0

    # Load image names in the given directory
    file_names = sorted(fnmatch.filter(os.listdir(directory), '*.jpg'))

    pixel_time = 0
    for name in file_names:
        base, extension = os.path.splitext(name)

        # Read file
        im = imageio.imread('{}/{}'.format(directory, name))
        print('{}/{}'.format(directory, name))

        # Candidate Generation (pixel) ######################################
        start = time.time()
        pixel_candidates = candidate_generation_pixel(im, pixel_method)
        end = time.time()
        pixel_time += (end - start)

        fd = '{}/{}_{}'.format(output_dir, pixel_method, window_method)
        if not os.path.exists(fd):
            os.makedirs(fd)

        out_mask_name = '{}/{}.png'.format(fd, base)
        imageio.imwrite(out_mask_name, np.uint8(np.round(pixel_candidates)))

        if window_method != 'None':
            window_candidates = candidate_generation_window(
                im, pixel_candidates, window_method)

            out_list_name = '{}/{}.pkl'.format(fd, base)

            with open(out_list_name, "wb") as fp:  #Pickling
                pickle.dump(window_candidates, fp)

        # Accumulate pixel performance of the current image #################
        pixel_annotation = imageio.imread('{}/mask/mask.{}.png'.format(
            directory, base)) > 0

        [localPixelTP, localPixelFP, localPixelFN, localPixelTN
         ] = evalf.performance_accumulation_pixel(pixel_candidates,
                                                  pixel_annotation)
        pixelTP = pixelTP + localPixelTP
        pixelFP = pixelFP + localPixelFP
        pixelFN = pixelFN + localPixelFN
        pixelTN = pixelTN + localPixelTN

        [
            pixel_precision, pixel_accuracy, pixel_recall, pixel_specificity,
            pixel_sensitivity, pixel_F1, pixel_TP, pixel_FP, pixel_FN
        ] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN,
                                               pixelTN)

        if window_method != 'None':
            # Accumulate object performance of the current image ################
            window_annotationss = load_annotations('{}/gt/gt.{}.txt'.format(
                directory, base))
            [localWindowTP, localWindowFN, localWindowFP
             ] = evalf.performance_accumulation_window(window_candidates,
                                                       window_annotationss)

            windowTP = windowTP + localWindowTP
            windowFN = windowFN + localWindowFN
            windowFP = windowFP + localWindowFP

            # Plot performance evaluation
            [window_precision, window_sensitivity, window_accuracy
             ] = evalf.performance_evaluation_window(windowTP, windowFN,
                                                     windowFP)

    pixel_time /= len(file_names)

    return [
        pixel_precision, pixel_accuracy, pixel_recall, pixel_specificity,
        pixel_sensitivity, pixel_F1, pixel_TP, pixel_FP, pixel_FN, pixel_time
    ]
def traffic_sign_detection_test(directory, output_dir, pixel_method,
                                window_method):
    """
	Calculates all statistical evaluation metrics of different pixel selector method (TRAINING AND VALIDATION)
	* Inputs:
	- directory = path to train images
	- outpit_dir = Directory where to store output masks, etc. For instance '~/m1-results/week1/test'
	- pixel_method = pixel method that will segmentate the image
    - window_method = -------
	*Outputs:
	- pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity, window_precision, window_accuracy
	"""
    from main import CONSOLE_ARGUMENTS

    pixelTP = 0
    pixelFN = 0
    pixelFP = 0
    pixelTN = 0

    windowTP = 0
    windowFN = 0
    windowFP = 0

    window_precision = 0
    window_accuracy = 0

    print("splitting in trainning test")
    # Load image names in the given directory
    file_names = sorted(fnmatch.filter(os.listdir(directory), '*.jpg'))

    signals_type_dict = get_dictionary()

    training, validation = [], []
    for key in signals_type_dict:
        sig_subdict = signals_type_dict[key]
        training_type, validation_type = divide_training_validation_SL(
            sig_subdict['signal_list'])
        training.extend(training_type)
        validation.extend(validation_type)

    print("extracting mask")
    dataset = training
    if (CONSOLE_ARGUMENTS.use_validation):
        dataset = validation
    # if(CONSOLE_ARGUMENTS.use_test):
    totalTime = 0
    for signal in dataset:
        signal_path = signal.img_orig_path
        _, name = signal_path.rsplit('/', 1)
        base, extension = os.path.splitext(name)

        # Read file
        im = imageio.imread('{}/{}'.format(directory, name))
        print('{}/{}'.format(directory, name))

        # Candidate Generation (pixel) ######################################
        start = time.time()
        pixel_candidates = candidate_generation_pixel(im, pixel_method)
        totalTime += time.time() - start

        fd = '{}/{}_{}'.format(output_dir, pixel_method, window_method)
        if not os.path.exists(fd):
            os.makedirs(fd)

        out_mask_name = '{}/{}.png'.format(fd, base)
        imageio.imwrite(out_mask_name, np.uint8(np.round(pixel_candidates)))

        if window_method != 'None':
            window_candidates = candidate_generation_window(
                im, pixel_candidates, window_method)

        # Accumulate pixel performance of the current image #################
        pixel_annotation = imageio.imread('{}/mask/mask.{}.png'.format(
            directory, base)) > 0

        [localPixelTP, localPixelFP, localPixelFN, localPixelTN
         ] = evalf.performance_accumulation_pixel(pixel_candidates,
                                                  pixel_annotation)
        pixelTP = pixelTP + localPixelTP
        pixelFP = pixelFP + localPixelFP
        pixelFN = pixelFN + localPixelFN
        pixelTN = pixelTN + localPixelTN

        [
            pixel_precision, pixel_accuracy, pixel_specificity,
            pixel_sensitivity
        ] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN,
                                               pixelTN)

        if window_method != 'None':
            # Accumulate object performance of the current image ################
            window_annotationss = load_annotations('{}/gt/gt.{}.txt'.format(
                directory, base))
            [localWindowTP, localWindowFN, localWindowFP
             ] = evalf.performance_accumulation_window(window_candidates,
                                                       window_annotationss)
            windowTP = windowTP + localWindowTP
            windowFN = windowFN + localWindowFN
            windowFP = windowFP + localWindowFP

            # Plot performance evaluation
            [window_precision, window_sensitivity, window_accuracy
             ] = evalf.performance_evaluation_window(windowTP, windowFN,
                                                     windowFP)

    print("meanTime", totalTime / len(dataset))
    print("pixelTP", pixelTP, "\t", pixelFP, "\t", pixelFN)
    return [
        pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity,
        window_precision, window_accuracy
    ]
Beispiel #10
0
def traffic_sign_detection(directory, output_dir, pixel_method, window_method,
                           calculate_metrics):

    pixelTP = 0
    pixelFN = 0
    pixelFP = 0
    pixelTN = 0

    pixel_F1 = 0

    windowTP = 0
    windowFN = 0
    windowFP = 0

    window_precision = 0
    window_accuracy = 0
    window_F1 = 0

    counter = 0

    # Load image names in the given directory
    file_names = sorted(fnmatch.filter(os.listdir(directory), '*.jpg'))

    for name in file_names:
        counter += 1
        base, extension = os.path.splitext(name)

        # Read file
        im = imageio.imread('{}/{}'.format(directory, name))
        #print ('{}/{}'.format(directory,name))

        # Candidate Generation (pixel) ######################################
        pixel_candidates = candidate_generation_pixel(im, pixel_method)
        # pixel_candidates = morphological_operators(pixel_candidates)
        pixel_candidates = connected_labels_pixel_cand(im, pixel_candidates)

        fd = '{}/{}_{}'.format(output_dir, pixel_method, window_method)
        if not os.path.exists(fd):
            os.makedirs(fd)

        out_mask_name = '{}/{}.png'.format(fd, base)

        if window_method != 'None':

            window_candidates = candidate_generation_window(
                im, pixel_candidates, window_method)
            window_mask = np.zeros(pixel_candidates.shape)
            for window_candidate in window_candidates:
                window_mask[window_candidate[0]:window_candidate[2],
                            window_candidate[1]:
                            window_candidate[3]] = pixel_candidates[
                                window_candidate[0]:window_candidate[2],
                                window_candidate[1]:window_candidate[3]]
            out_list_name = '{}/{}.pkl'.format(fd, base)
            pixel_candidates = window_mask
            with open(out_list_name, "wb") as fp:  #Pickling
                pickle.dump(window_candidates, fp)

        imageio.imwrite(out_mask_name, np.uint8(np.round(pixel_candidates)))

        pixel_precision = 0
        pixel_accuracy = 0
        pixel_specificity = 0
        pixel_sensitivity = 0
        window_precision = 0
        window_accuracy = 0

        if (calculate_metrics):
            # Accumulate pixel performance of the current image #################
            pixel_annotation = imageio.imread('{}/mask/mask.{}.png'.format(
                directory, base)) > 0

            [localPixelTP, localPixelFP, localPixelFN, localPixelTN
             ] = evalf.performance_accumulation_pixel(pixel_candidates,
                                                      pixel_annotation)
            pixelTP = pixelTP + localPixelTP
            pixelFP = pixelFP + localPixelFP
            pixelFN = pixelFN + localPixelFN
            pixelTN = pixelTN + localPixelTN

            # [pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity, pixel_F1] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN, pixelTN)

            if window_method != 'None':
                # Accumulate object performance of the current image ################
                window_annotationss = load_annotations(
                    '{}/gt/gt.{}.txt'.format(directory, base))
                [localWindowTP, localWindowFN,
                 localWindowFP] = evalf.performance_accumulation_window(
                     window_candidates, window_annotationss)

                windowTP = windowTP + localWindowTP
                windowFN = windowFN + localWindowFN
                windowFP = windowFP + localWindowFP

                # Plot performance evaluation
                # [window_precision, window_sensitivity, window_accuracy, window_F1] = evalf.performance_evaluation_window(windowTP, windowFN, windowFP)

    if (calculate_metrics):
        [
            pixel_precision, pixel_accuracy, pixel_specificity,
            pixel_sensitivity, pixel_F1
        ] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN,
                                               pixelTN)
        print("Pixel precision: " + str(pixel_precision))
        print("Pixel accuracy: " + str(pixel_accuracy))
        print("Pixel recall: " + str(pixel_sensitivity))
        print("Pixel F1-measure: " + str(pixel_F1))
        print("Pixel TP: " + str(pixelTP))
        print("Pixel FP: " + str(pixelFP))
        print("Pixel FN: " + str(pixelFN))
        print("Pixel TN: " + str(pixelTN))

        if window_method != 'None':
            [window_precision, window_sensitivity, window_accuracy, window_F1
             ] = evalf.performance_evaluation_window(windowTP, windowFN,
                                                     windowFP)
            print("Window precision: " + str(window_precision))
            print("Window accuracy: " + str(window_accuracy))
            print("Window F1-measure: " + str(window_F1))

    return [
        pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity,
        pixel_F1, window_precision, window_accuracy, window_F1, counter
    ]
Beispiel #11
0
    for ii in range(len(result_files)):

        # Read mask file
        candidate_masks_name = '{}/{}/{}'.format(results_dir, method,
                                                 result_files[ii])
        print('File: {}'.format(candidate_masks_name))

        pixelCandidates = imageio.imread(candidate_masks_name) > 0

        # Accumulate pixel performance of the current image %%%%%%%%%%%%%%%%%
        name, ext = os.path.splitext(test_files[ii])
        gt_mask_name = '{}/mask/mask.{}.png'.format(test_dir, name)

        pixelAnnotation = imageio.imread(gt_mask_name) > 0
        [localPixelTP, localPixelFP, localPixelFN, localPixelTN
         ] = evalf.performance_accumulation_pixel(pixelCandidates,
                                                  pixelAnnotation)
        pixelTP = pixelTP + localPixelTP
        pixelFP = pixelFP + localPixelFP
        pixelFN = pixelFN + localPixelFN
        pixelTN = pixelTN + localPixelTN

        if window_evaluation == 1:
            # Read .pkl file

            name_r, ext_r = os.path.splitext(result_files[ii])
            pkl_name = '{}/{}/{}.pkl'.format(results_dir, method, name_r)

            with open(pkl_name, "rb") as fp:  # Unpickling
                windowCandidates = pickle.load(fp)

            gt_annotations_name = '{}/gt/gt.{}.txt'.format(test_dir, name)
Beispiel #12
0
def traffic_sign_detection_test(directory, output_dir, pixel_method, window_method, use_dataset="training"):
    """
	Calculates all statistical evaluation metrics of different pixel selector method (TRAINING AND VALIDATION)
	* Inputs:
	- directory = path to train images
	- outpit_dir = Directory where to store output masks, etc. For instance '~/m1-results/week1/test'
	- pixel_method = pixel method that will segmentate the image
    - window_method = -------
	*Outputs:
	- pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity, window_precision, window_accuracy
	"""
    pixelTP  = 0
    pixelFN  = 0
    pixelFP  = 0
    pixelTN  = 0

    windowTP = 0
    windowFN = 0
    windowFP = 0

    window_precision = 0
    window_accuracy  = 0
    window_sensitivity  = 0

    # print("splitting in trainning test")
    # Load image names in the given directory
    # file_names = sorted(fnmatch.filter(os.listdir(directory), '*.jpg'))
    
    signals_type_dict = get_dictionary()
    
    training, validation = [], []
    for key in signals_type_dict:
        sig_subdict = signals_type_dict[key] 
        training_type, validation_type = divide_training_validation_SL(sig_subdict['signal_list'])
        training.extend(training_type)
        validation.extend(validation_type)

    # print("extracting mask")
    dataset = training
    if(use_dataset == 'validation'):
        dataset = validation
    # if(CONSOLE_ARGUMENTS.use_test):
    totalTime = 0
    dataset_paths = [signal.img_orig_path for signal in dataset]
    
    for signal_path in tqdm(dataset_paths, ascii=True, desc="Calculating Statistics"):
        startTime = time.time()
        rgb_mask, bb_list = get_pixel_candidates(signal_path)
        totalTime = time.time() - startTime
        
        if(bb_list is not None): bb_list = convertBBFormat(bb_list)
        _, name = signal_path.rsplit('/', 1)
        base, extension = os.path.splitext(name)
        # Accumulate pixel performance of the current image #################
        pixel_annotation = imageio.imread('{}/mask/mask.{}.png'.format(directory,base)) > 0

        [localPixelTP, localPixelFP, localPixelFN, localPixelTN] =\
        evalf.performance_accumulation_pixel(rgb_mask, pixel_annotation)
        pixelTP = pixelTP + localPixelTP
        pixelFP = pixelFP + localPixelFP
        pixelFN = pixelFN + localPixelFN
        pixelTN = pixelTN + localPixelTN
        
        [pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity] =\
        evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN, pixelTN)

        if(bb_list != None):
            # Accumulate object performance of the current image ################
            window_annotationss = load_annotations('{}/gt/gt.{}.txt'.format(directory, base))                

            [localWindowTP, localWindowFN, localWindowFP] = \
            evalf.performance_accumulation_window(bb_list, window_annotationss)
            windowTP = windowTP + localWindowTP
            windowFN = windowFN + localWindowFN
            windowFP = windowFP + localWindowFP

            # Plot performance evaluation
            [window_precision, window_sensitivity, window_accuracy] = \
            evalf.performance_evaluation_window(windowTP, windowFN, windowFP)
    
    print("meanTime", totalTime/len(dataset))
    print("pixelTP", pixelTP, "\t", pixelFP, "\t", pixelFN)
    print("windowTP", windowTP, "\t", windowFP, "\t", windowFN)
    return [pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity,\
            window_precision, window_accuracy, window_sensitivity]