コード例 #1
0
def score_pixel_masks(result_masks, test_masks):
    pixelTP = 0
    pixelFN = 0
    pixelFP = 0
    pixelTN = 0

    for ii, mask_name in enumerate(result_masks):

        # Read mask file
        pixelCandidates = imageio.imread(mask_name) > 0
        if len(pixelCandidates.shape) == 3:
            pixelCandidates = pixelCandidates[:, :, 0]

        # Accumulate pixel performance of the current image %%%%%%%%%%%%%%%%%
        gt_mask_name = test_masks[ii]

        pixelAnnotation = imageio.imread(gt_mask_name) > 0
        if len(pixelAnnotation.shape) == 3:
            pixelAnnotation = pixelAnnotation[:, :, 0]

        if pixelAnnotation.shape != pixelCandidates.shape:
            print(
                'Error: hypothesis ({}) and  GT masks ({})dimensions do not match!'
                .format(pixelCandidates.shape, pixelAnnotation.shape))
            sys.exit()

        [localPixelTP, localPixelFP, localPixelFN, localPixelTN
         ] = evalf.performance_accumulation_pixel(pixelCandidates,
                                                  pixelAnnotation)
        pixelTP = pixelTP + localPixelTP
        pixelFP = pixelFP + localPixelFP
        pixelFN = pixelFN + localPixelFN
        pixelTN = pixelTN + localPixelTN

    [pixelPrecision, pixelAccuracy, pixelSpecificity, pixelSensitivity
     ] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN, pixelTN)
    pixelF1 = 0
    if (pixelPrecision + pixelSensitivity) != 0:
        pixelF1 = 2 * ((pixelPrecision * pixelSensitivity) /
                       (pixelPrecision + pixelSensitivity))

    return pixelPrecision, pixelSensitivity, pixelF1
コード例 #2
0
def segmentate(im_directory, mask_directory, maskOut_directory):
    file_names = sorted(fnmatch.filter(os.listdir(im_directory), '*.jpg'))

    pixelTP = 0
    pixelFP = 0
    pixelFN = 0
    pixelTN = 0

    from main import CONSOLE_ARGUMENTS
    nf = CONSOLE_ARGUMENTS.numFiles
    #For each file
    for name in file_names[:nf]:
        base, extension = os.path.splitext(name)

        imageNameFile = im_directory + "/" + name
        maskNameFile = mask_directory + "/mask." + base + ".png"

        print(imageNameFile)
        image = cv.imread(imageNameFile)
        maskImage = cv.imread(maskNameFile)

        # image = grayWorld(image)
        msk = LabFilter(image)

        img = image * msk
        msk = msk.astype(float)

        cv.imshow("masked image", img)
        cv.imshow("original image", image)
        cv.waitKey(0)

        # cv.imwrite(os.path.join(maskOut_directory,("mask." + base + ".png")),msk)
        pTP, pFP, pFN, pTN = performance_accumulation_pixel(msk, maskImage)
        pixelTP += pTP
        pixelFP += pFP
        pixelFN += pFN
        pixelTN += pTN

    print("precision \t accuracy \t specificity \t sensitivity")
    print(performance_evaluation_pixel(pixelTP, pixelFP, pixelFN, pixelTN))
コード例 #3
0
                name_r, ext_r = os.path.splitext(result_files[ii])
                pkl_name      = '{}/{}/{}.pkl'.format(results_dir, method, name_r)
                

                with open(pkl_name, "rb") as fp:   # Unpickling
                    windowCandidates = pickle.load(fp)

                gt_annotations_name = '{}/gt/gt.{}.txt'.format(test_dir, name)
                windowAnnotations = load_annotations(gt_annotations_name)

                [localWindowTP, localWindowFN, localWindowFP] = evalf.performance_accumulation_window(windowCandidates, windowAnnotations)
                windowTP = windowTP + localWindowTP
                windowFN = windowFN + localWindowFN
                windowFP = windowFP + localWindowFP

        # Plot performance evaluation
        [pixelPrecision, pixelAccuracy, pixelSpecificity, pixelSensitivity] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN, pixelTN)
        pixelF1 = 0
        if (pixelPrecision + pixelSensitivity) != 0:
            pixelF1 = 2*((pixelPrecision*pixelSensitivity)/(pixelPrecision + pixelSensitivity))
        
        print ('Team {:02d} pixel, method {} : {:.2f}, {:.2f}, {:.2f}\n'.format(team, method, pixelPrecision, pixelSensitivity, pixelF1))      

        if window_evaluation == 1:
            [windowPrecision, windowSensitivity, windowAccuracy] = evalf.performance_evaluation_window(windowTP, windowFN, windowFP) # (Needed after Week 3)
            windowF1 = 0
            if (windowPrecision + windowSensitivity) != 0:
                windowF1 = 2*((windowPrecision*windowSensitivity)/(windowPrecision + windowSensitivity))

            print ('Team {:02d} window, method {} : {:.2f}, {:.2f}, {:.2f}\n'.format(team, method, windowPrecision, windowSensitivity, windowF1)) 
コード例 #4
0
def traffic_sign_detection(directory, output_dir, pixel_method, window_method):

    pixelTP = 0
    pixelFN = 0
    pixelFP = 0
    pixelTN = 0

    windowTP = 0
    windowFN = 0
    windowFP = 0

    window_precision = 0
    window_accuracy = 0

    # Load image names in the given directory
    file_names = sorted(fnmatch.filter(os.listdir(directory), '*.jpg'))

    pixel_time = 0
    for name in file_names:
        base, extension = os.path.splitext(name)

        # Read file
        im = imageio.imread('{}/{}'.format(directory, name))
        print('{}/{}'.format(directory, name))

        # Candidate Generation (pixel) ######################################
        start = time.time()
        pixel_candidates = candidate_generation_pixel(im, pixel_method)
        end = time.time()
        pixel_time += (end - start)

        fd = '{}/{}_{}'.format(output_dir, pixel_method, window_method)
        if not os.path.exists(fd):
            os.makedirs(fd)

        out_mask_name = '{}/{}.png'.format(fd, base)
        imageio.imwrite(out_mask_name, np.uint8(np.round(pixel_candidates)))

        if window_method != 'None':
            window_candidates = candidate_generation_window(
                im, pixel_candidates, window_method)

            out_list_name = '{}/{}.pkl'.format(fd, base)

            with open(out_list_name, "wb") as fp:  #Pickling
                pickle.dump(window_candidates, fp)

        # Accumulate pixel performance of the current image #################
        pixel_annotation = imageio.imread('{}/mask/mask.{}.png'.format(
            directory, base)) > 0

        [localPixelTP, localPixelFP, localPixelFN, localPixelTN
         ] = evalf.performance_accumulation_pixel(pixel_candidates,
                                                  pixel_annotation)
        pixelTP = pixelTP + localPixelTP
        pixelFP = pixelFP + localPixelFP
        pixelFN = pixelFN + localPixelFN
        pixelTN = pixelTN + localPixelTN

        [
            pixel_precision, pixel_accuracy, pixel_recall, pixel_specificity,
            pixel_sensitivity, pixel_F1, pixel_TP, pixel_FP, pixel_FN
        ] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN,
                                               pixelTN)

        if window_method != 'None':
            # Accumulate object performance of the current image ################
            window_annotationss = load_annotations('{}/gt/gt.{}.txt'.format(
                directory, base))
            [localWindowTP, localWindowFN, localWindowFP
             ] = evalf.performance_accumulation_window(window_candidates,
                                                       window_annotationss)

            windowTP = windowTP + localWindowTP
            windowFN = windowFN + localWindowFN
            windowFP = windowFP + localWindowFP

            # Plot performance evaluation
            [window_precision, window_sensitivity, window_accuracy
             ] = evalf.performance_evaluation_window(windowTP, windowFN,
                                                     windowFP)

    pixel_time /= len(file_names)

    return [
        pixel_precision, pixel_accuracy, pixel_recall, pixel_specificity,
        pixel_sensitivity, pixel_F1, pixel_TP, pixel_FP, pixel_FN, pixel_time
    ]
コード例 #5
0
def traffic_sign_detection_test(directory, output_dir, pixel_method,
                                window_method):
    """
	Calculates all statistical evaluation metrics of different pixel selector method (TRAINING AND VALIDATION)
	* Inputs:
	- directory = path to train images
	- outpit_dir = Directory where to store output masks, etc. For instance '~/m1-results/week1/test'
	- pixel_method = pixel method that will segmentate the image
    - window_method = -------
	*Outputs:
	- pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity, window_precision, window_accuracy
	"""
    from main import CONSOLE_ARGUMENTS

    pixelTP = 0
    pixelFN = 0
    pixelFP = 0
    pixelTN = 0

    windowTP = 0
    windowFN = 0
    windowFP = 0

    window_precision = 0
    window_accuracy = 0

    print("splitting in trainning test")
    # Load image names in the given directory
    file_names = sorted(fnmatch.filter(os.listdir(directory), '*.jpg'))

    signals_type_dict = get_dictionary()

    training, validation = [], []
    for key in signals_type_dict:
        sig_subdict = signals_type_dict[key]
        training_type, validation_type = divide_training_validation_SL(
            sig_subdict['signal_list'])
        training.extend(training_type)
        validation.extend(validation_type)

    print("extracting mask")
    dataset = training
    if (CONSOLE_ARGUMENTS.use_validation):
        dataset = validation
    # if(CONSOLE_ARGUMENTS.use_test):
    totalTime = 0
    for signal in dataset:
        signal_path = signal.img_orig_path
        _, name = signal_path.rsplit('/', 1)
        base, extension = os.path.splitext(name)

        # Read file
        im = imageio.imread('{}/{}'.format(directory, name))
        print('{}/{}'.format(directory, name))

        # Candidate Generation (pixel) ######################################
        start = time.time()
        pixel_candidates = candidate_generation_pixel(im, pixel_method)
        totalTime += time.time() - start

        fd = '{}/{}_{}'.format(output_dir, pixel_method, window_method)
        if not os.path.exists(fd):
            os.makedirs(fd)

        out_mask_name = '{}/{}.png'.format(fd, base)
        imageio.imwrite(out_mask_name, np.uint8(np.round(pixel_candidates)))

        if window_method != 'None':
            window_candidates = candidate_generation_window(
                im, pixel_candidates, window_method)

        # Accumulate pixel performance of the current image #################
        pixel_annotation = imageio.imread('{}/mask/mask.{}.png'.format(
            directory, base)) > 0

        [localPixelTP, localPixelFP, localPixelFN, localPixelTN
         ] = evalf.performance_accumulation_pixel(pixel_candidates,
                                                  pixel_annotation)
        pixelTP = pixelTP + localPixelTP
        pixelFP = pixelFP + localPixelFP
        pixelFN = pixelFN + localPixelFN
        pixelTN = pixelTN + localPixelTN

        [
            pixel_precision, pixel_accuracy, pixel_specificity,
            pixel_sensitivity
        ] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN,
                                               pixelTN)

        if window_method != 'None':
            # Accumulate object performance of the current image ################
            window_annotationss = load_annotations('{}/gt/gt.{}.txt'.format(
                directory, base))
            [localWindowTP, localWindowFN, localWindowFP
             ] = evalf.performance_accumulation_window(window_candidates,
                                                       window_annotationss)
            windowTP = windowTP + localWindowTP
            windowFN = windowFN + localWindowFN
            windowFP = windowFP + localWindowFP

            # Plot performance evaluation
            [window_precision, window_sensitivity, window_accuracy
             ] = evalf.performance_evaluation_window(windowTP, windowFN,
                                                     windowFP)

    print("meanTime", totalTime / len(dataset))
    print("pixelTP", pixelTP, "\t", pixelFP, "\t", pixelFN)
    return [
        pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity,
        window_precision, window_accuracy
    ]
コード例 #6
0
def traffic_sign_detection(directory, output_dir, pixel_method, window_method,
                           calculate_metrics):

    pixelTP = 0
    pixelFN = 0
    pixelFP = 0
    pixelTN = 0

    pixel_F1 = 0

    windowTP = 0
    windowFN = 0
    windowFP = 0

    window_precision = 0
    window_accuracy = 0
    window_F1 = 0

    counter = 0

    # Load image names in the given directory
    file_names = sorted(fnmatch.filter(os.listdir(directory), '*.jpg'))

    for name in file_names:
        counter += 1
        base, extension = os.path.splitext(name)

        # Read file
        im = imageio.imread('{}/{}'.format(directory, name))
        #print ('{}/{}'.format(directory,name))

        # Candidate Generation (pixel) ######################################
        pixel_candidates = candidate_generation_pixel(im, pixel_method)
        # pixel_candidates = morphological_operators(pixel_candidates)
        pixel_candidates = connected_labels_pixel_cand(im, pixel_candidates)

        fd = '{}/{}_{}'.format(output_dir, pixel_method, window_method)
        if not os.path.exists(fd):
            os.makedirs(fd)

        out_mask_name = '{}/{}.png'.format(fd, base)

        if window_method != 'None':

            window_candidates = candidate_generation_window(
                im, pixel_candidates, window_method)
            window_mask = np.zeros(pixel_candidates.shape)
            for window_candidate in window_candidates:
                window_mask[window_candidate[0]:window_candidate[2],
                            window_candidate[1]:
                            window_candidate[3]] = pixel_candidates[
                                window_candidate[0]:window_candidate[2],
                                window_candidate[1]:window_candidate[3]]
            out_list_name = '{}/{}.pkl'.format(fd, base)
            pixel_candidates = window_mask
            with open(out_list_name, "wb") as fp:  #Pickling
                pickle.dump(window_candidates, fp)

        imageio.imwrite(out_mask_name, np.uint8(np.round(pixel_candidates)))

        pixel_precision = 0
        pixel_accuracy = 0
        pixel_specificity = 0
        pixel_sensitivity = 0
        window_precision = 0
        window_accuracy = 0

        if (calculate_metrics):
            # Accumulate pixel performance of the current image #################
            pixel_annotation = imageio.imread('{}/mask/mask.{}.png'.format(
                directory, base)) > 0

            [localPixelTP, localPixelFP, localPixelFN, localPixelTN
             ] = evalf.performance_accumulation_pixel(pixel_candidates,
                                                      pixel_annotation)
            pixelTP = pixelTP + localPixelTP
            pixelFP = pixelFP + localPixelFP
            pixelFN = pixelFN + localPixelFN
            pixelTN = pixelTN + localPixelTN

            # [pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity, pixel_F1] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN, pixelTN)

            if window_method != 'None':
                # Accumulate object performance of the current image ################
                window_annotationss = load_annotations(
                    '{}/gt/gt.{}.txt'.format(directory, base))
                [localWindowTP, localWindowFN,
                 localWindowFP] = evalf.performance_accumulation_window(
                     window_candidates, window_annotationss)

                windowTP = windowTP + localWindowTP
                windowFN = windowFN + localWindowFN
                windowFP = windowFP + localWindowFP

                # Plot performance evaluation
                # [window_precision, window_sensitivity, window_accuracy, window_F1] = evalf.performance_evaluation_window(windowTP, windowFN, windowFP)

    if (calculate_metrics):
        [
            pixel_precision, pixel_accuracy, pixel_specificity,
            pixel_sensitivity, pixel_F1
        ] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN,
                                               pixelTN)
        print("Pixel precision: " + str(pixel_precision))
        print("Pixel accuracy: " + str(pixel_accuracy))
        print("Pixel recall: " + str(pixel_sensitivity))
        print("Pixel F1-measure: " + str(pixel_F1))
        print("Pixel TP: " + str(pixelTP))
        print("Pixel FP: " + str(pixelFP))
        print("Pixel FN: " + str(pixelFN))
        print("Pixel TN: " + str(pixelTN))

        if window_method != 'None':
            [window_precision, window_sensitivity, window_accuracy, window_F1
             ] = evalf.performance_evaluation_window(windowTP, windowFN,
                                                     windowFP)
            print("Window precision: " + str(window_precision))
            print("Window accuracy: " + str(window_accuracy))
            print("Window F1-measure: " + str(window_F1))

    return [
        pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity,
        pixel_F1, window_precision, window_accuracy, window_F1, counter
    ]
コード例 #7
0
            with open(pkl_name, "rb") as fp:  # Unpickling
                windowCandidates = pickle.load(fp)

            gt_annotations_name = '{}/gt/gt.{}.txt'.format(test_dir, name)
            windowAnnotations = load_annotations(gt_annotations_name)

            [localWindowTP, localWindowFN, localWindowFP
             ] = evalf.performance_accumulation_window(windowCandidates,
                                                       windowAnnotations)
            windowTP = windowTP + localWindowTP
            windowFN = windowFN + localWindowFN
            windowFP = windowFP + localWindowFP

    # Plot performance evaluation
    [pixelPrecision, pixelAccuracy, pixelSpecificity, pixelSensitivity
     ] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN, pixelTN)
    pixelF1 = 2 * ((pixelPrecision * pixelSensitivity) /
                   max(pixelPrecision + pixelSensitivity, 1))

    print('Team {:02d} pixel, method {} : {:.2f}, {:.2f}, {:.2f}\n'.format(
        team, method, pixelPrecision, pixelSensitivity, pixelF1))

    if window_evaluation == 1:
        [windowPrecision, windowSensitivity,
         windowAccuracy] = evalf.performance_evaluation_window(
             windowTP, windowFN, windowFP)  # (Needed after Week 3)
        windowF1 = 0

        print(
            'Team {:02d} window, method {} : {:.2f}, {:.2f}, {:.2f}\n'.format(
                team, method, windowPrecision, windowSensitivity, windowF1))
コード例 #8
0
def traffic_sign_detection_test(directory, output_dir, pixel_method, window_method, use_dataset="training"):
    """
	Calculates all statistical evaluation metrics of different pixel selector method (TRAINING AND VALIDATION)
	* Inputs:
	- directory = path to train images
	- outpit_dir = Directory where to store output masks, etc. For instance '~/m1-results/week1/test'
	- pixel_method = pixel method that will segmentate the image
    - window_method = -------
	*Outputs:
	- pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity, window_precision, window_accuracy
	"""
    pixelTP  = 0
    pixelFN  = 0
    pixelFP  = 0
    pixelTN  = 0

    windowTP = 0
    windowFN = 0
    windowFP = 0

    window_precision = 0
    window_accuracy  = 0
    window_sensitivity  = 0

    # print("splitting in trainning test")
    # Load image names in the given directory
    # file_names = sorted(fnmatch.filter(os.listdir(directory), '*.jpg'))
    
    signals_type_dict = get_dictionary()
    
    training, validation = [], []
    for key in signals_type_dict:
        sig_subdict = signals_type_dict[key] 
        training_type, validation_type = divide_training_validation_SL(sig_subdict['signal_list'])
        training.extend(training_type)
        validation.extend(validation_type)

    # print("extracting mask")
    dataset = training
    if(use_dataset == 'validation'):
        dataset = validation
    # if(CONSOLE_ARGUMENTS.use_test):
    totalTime = 0
    dataset_paths = [signal.img_orig_path for signal in dataset]
    
    for signal_path in tqdm(dataset_paths, ascii=True, desc="Calculating Statistics"):
        startTime = time.time()
        rgb_mask, bb_list = get_pixel_candidates(signal_path)
        totalTime = time.time() - startTime
        
        if(bb_list is not None): bb_list = convertBBFormat(bb_list)
        _, name = signal_path.rsplit('/', 1)
        base, extension = os.path.splitext(name)
        # Accumulate pixel performance of the current image #################
        pixel_annotation = imageio.imread('{}/mask/mask.{}.png'.format(directory,base)) > 0

        [localPixelTP, localPixelFP, localPixelFN, localPixelTN] =\
        evalf.performance_accumulation_pixel(rgb_mask, pixel_annotation)
        pixelTP = pixelTP + localPixelTP
        pixelFP = pixelFP + localPixelFP
        pixelFN = pixelFN + localPixelFN
        pixelTN = pixelTN + localPixelTN
        
        [pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity] =\
        evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN, pixelTN)

        if(bb_list != None):
            # Accumulate object performance of the current image ################
            window_annotationss = load_annotations('{}/gt/gt.{}.txt'.format(directory, base))                

            [localWindowTP, localWindowFN, localWindowFP] = \
            evalf.performance_accumulation_window(bb_list, window_annotationss)
            windowTP = windowTP + localWindowTP
            windowFN = windowFN + localWindowFN
            windowFP = windowFP + localWindowFP

            # Plot performance evaluation
            [window_precision, window_sensitivity, window_accuracy] = \
            evalf.performance_evaluation_window(windowTP, windowFN, windowFP)
    
    print("meanTime", totalTime/len(dataset))
    print("pixelTP", pixelTP, "\t", pixelFP, "\t", pixelFN)
    print("windowTP", windowTP, "\t", windowFP, "\t", windowFN)
    return [pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity,\
            window_precision, window_accuracy, window_sensitivity]