def worker(x): image_file, output_dir, pixel_method, window_method = x name = os.path.splitext(os.path.split(image_file)[1])[0] im = imageio.imread(image_file) print(image_file) pixel_candidates = candidate_generation_pixel(im, pixel_method) window_candidates = candidate_generation_window(im, pixel_candidates, window_method) fd = os.path.join(output_dir, '{}_{}'.format(pixel_method, window_method)) if not os.path.exists(fd): os.makedirs(fd) out_mask_name = '{}/{}.png'.format(fd, name) imageio.imwrite(out_mask_name, pixel_candidates.astype(np.uint8)) out_list_name = '{}/{}.pkl'.format(fd, name) with open(out_list_name, "wb") as fp: pickle.dump(window_candidates, fp)
def traffic_sign_detection(directory, output_dir, pixel_method, window_method): pixelTP = 0 pixelFN = 0 pixelFP = 0 pixelTN = 0 windowTP = 0 windowFN = 0 windowFP = 0 window_precision = 0 window_accuracy = 0 # Load image names in the given directory file_names = sorted(fnmatch.filter(os.listdir(directory), '*.jpg')) pixel_time = 0 for name in file_names: base, extension = os.path.splitext(name) # Read file im = imageio.imread('{}/{}'.format(directory, name)) print('{}/{}'.format(directory, name)) # Candidate Generation (pixel) ###################################### start = time.time() pixel_candidates = candidate_generation_pixel(im, pixel_method) end = time.time() pixel_time += (end - start) fd = '{}/{}_{}'.format(output_dir, pixel_method, window_method) if not os.path.exists(fd): os.makedirs(fd) out_mask_name = '{}/{}.png'.format(fd, base) imageio.imwrite(out_mask_name, np.uint8(np.round(pixel_candidates))) if window_method != 'None': window_candidates = candidate_generation_window( im, pixel_candidates, window_method) out_list_name = '{}/{}.pkl'.format(fd, base) with open(out_list_name, "wb") as fp: #Pickling pickle.dump(window_candidates, fp) # Accumulate pixel performance of the current image ################# pixel_annotation = imageio.imread('{}/mask/mask.{}.png'.format( directory, base)) > 0 [localPixelTP, localPixelFP, localPixelFN, localPixelTN ] = evalf.performance_accumulation_pixel(pixel_candidates, pixel_annotation) pixelTP = pixelTP + localPixelTP pixelFP = pixelFP + localPixelFP pixelFN = pixelFN + localPixelFN pixelTN = pixelTN + localPixelTN [ pixel_precision, pixel_accuracy, pixel_recall, pixel_specificity, pixel_sensitivity, pixel_F1, pixel_TP, pixel_FP, pixel_FN ] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN, pixelTN) if window_method != 'None': # Accumulate object performance of the current image ################ window_annotationss = load_annotations('{}/gt/gt.{}.txt'.format( directory, base)) [localWindowTP, localWindowFN, localWindowFP ] = evalf.performance_accumulation_window(window_candidates, window_annotationss) windowTP = windowTP + localWindowTP windowFN = windowFN + localWindowFN windowFP = windowFP + localWindowFP # Plot performance evaluation [window_precision, window_sensitivity, window_accuracy ] = evalf.performance_evaluation_window(windowTP, windowFN, windowFP) pixel_time /= len(file_names) return [ pixel_precision, pixel_accuracy, pixel_recall, pixel_specificity, pixel_sensitivity, pixel_F1, pixel_TP, pixel_FP, pixel_FN, pixel_time ]
def traffic_sign_detection_test(directory, output_dir, pixel_method, window_method): """ Calculates all statistical evaluation metrics of different pixel selector method (TRAINING AND VALIDATION) * Inputs: - directory = path to train images - outpit_dir = Directory where to store output masks, etc. For instance '~/m1-results/week1/test' - pixel_method = pixel method that will segmentate the image - window_method = ------- *Outputs: - pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity, window_precision, window_accuracy """ from main import CONSOLE_ARGUMENTS pixelTP = 0 pixelFN = 0 pixelFP = 0 pixelTN = 0 windowTP = 0 windowFN = 0 windowFP = 0 window_precision = 0 window_accuracy = 0 print("splitting in trainning test") # Load image names in the given directory file_names = sorted(fnmatch.filter(os.listdir(directory), '*.jpg')) signals_type_dict = get_dictionary() training, validation = [], [] for key in signals_type_dict: sig_subdict = signals_type_dict[key] training_type, validation_type = divide_training_validation_SL( sig_subdict['signal_list']) training.extend(training_type) validation.extend(validation_type) print("extracting mask") dataset = training if (CONSOLE_ARGUMENTS.use_validation): dataset = validation # if(CONSOLE_ARGUMENTS.use_test): totalTime = 0 for signal in dataset: signal_path = signal.img_orig_path _, name = signal_path.rsplit('/', 1) base, extension = os.path.splitext(name) # Read file im = imageio.imread('{}/{}'.format(directory, name)) print('{}/{}'.format(directory, name)) # Candidate Generation (pixel) ###################################### start = time.time() pixel_candidates = candidate_generation_pixel(im, pixel_method) totalTime += time.time() - start fd = '{}/{}_{}'.format(output_dir, pixel_method, window_method) if not os.path.exists(fd): os.makedirs(fd) out_mask_name = '{}/{}.png'.format(fd, base) imageio.imwrite(out_mask_name, np.uint8(np.round(pixel_candidates))) if window_method != 'None': window_candidates = candidate_generation_window( im, pixel_candidates, window_method) # Accumulate pixel performance of the current image ################# pixel_annotation = imageio.imread('{}/mask/mask.{}.png'.format( directory, base)) > 0 [localPixelTP, localPixelFP, localPixelFN, localPixelTN ] = evalf.performance_accumulation_pixel(pixel_candidates, pixel_annotation) pixelTP = pixelTP + localPixelTP pixelFP = pixelFP + localPixelFP pixelFN = pixelFN + localPixelFN pixelTN = pixelTN + localPixelTN [ pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity ] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN, pixelTN) if window_method != 'None': # Accumulate object performance of the current image ################ window_annotationss = load_annotations('{}/gt/gt.{}.txt'.format( directory, base)) [localWindowTP, localWindowFN, localWindowFP ] = evalf.performance_accumulation_window(window_candidates, window_annotationss) windowTP = windowTP + localWindowTP windowFN = windowFN + localWindowFN windowFP = windowFP + localWindowFP # Plot performance evaluation [window_precision, window_sensitivity, window_accuracy ] = evalf.performance_evaluation_window(windowTP, windowFN, windowFP) print("meanTime", totalTime / len(dataset)) print("pixelTP", pixelTP, "\t", pixelFP, "\t", pixelFN) return [ pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity, window_precision, window_accuracy ]
def traffic_sign_detection(directory, output_dir, pixel_method, window_method, calculate_metrics): pixelTP = 0 pixelFN = 0 pixelFP = 0 pixelTN = 0 pixel_F1 = 0 windowTP = 0 windowFN = 0 windowFP = 0 window_precision = 0 window_accuracy = 0 window_F1 = 0 counter = 0 # Load image names in the given directory file_names = sorted(fnmatch.filter(os.listdir(directory), '*.jpg')) for name in file_names: counter += 1 base, extension = os.path.splitext(name) # Read file im = imageio.imread('{}/{}'.format(directory, name)) #print ('{}/{}'.format(directory,name)) # Candidate Generation (pixel) ###################################### pixel_candidates = candidate_generation_pixel(im, pixel_method) # pixel_candidates = morphological_operators(pixel_candidates) pixel_candidates = connected_labels_pixel_cand(im, pixel_candidates) fd = '{}/{}_{}'.format(output_dir, pixel_method, window_method) if not os.path.exists(fd): os.makedirs(fd) out_mask_name = '{}/{}.png'.format(fd, base) if window_method != 'None': window_candidates = candidate_generation_window( im, pixel_candidates, window_method) window_mask = np.zeros(pixel_candidates.shape) for window_candidate in window_candidates: window_mask[window_candidate[0]:window_candidate[2], window_candidate[1]: window_candidate[3]] = pixel_candidates[ window_candidate[0]:window_candidate[2], window_candidate[1]:window_candidate[3]] out_list_name = '{}/{}.pkl'.format(fd, base) pixel_candidates = window_mask with open(out_list_name, "wb") as fp: #Pickling pickle.dump(window_candidates, fp) imageio.imwrite(out_mask_name, np.uint8(np.round(pixel_candidates))) pixel_precision = 0 pixel_accuracy = 0 pixel_specificity = 0 pixel_sensitivity = 0 window_precision = 0 window_accuracy = 0 if (calculate_metrics): # Accumulate pixel performance of the current image ################# pixel_annotation = imageio.imread('{}/mask/mask.{}.png'.format( directory, base)) > 0 [localPixelTP, localPixelFP, localPixelFN, localPixelTN ] = evalf.performance_accumulation_pixel(pixel_candidates, pixel_annotation) pixelTP = pixelTP + localPixelTP pixelFP = pixelFP + localPixelFP pixelFN = pixelFN + localPixelFN pixelTN = pixelTN + localPixelTN # [pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity, pixel_F1] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN, pixelTN) if window_method != 'None': # Accumulate object performance of the current image ################ window_annotationss = load_annotations( '{}/gt/gt.{}.txt'.format(directory, base)) [localWindowTP, localWindowFN, localWindowFP] = evalf.performance_accumulation_window( window_candidates, window_annotationss) windowTP = windowTP + localWindowTP windowFN = windowFN + localWindowFN windowFP = windowFP + localWindowFP # Plot performance evaluation # [window_precision, window_sensitivity, window_accuracy, window_F1] = evalf.performance_evaluation_window(windowTP, windowFN, windowFP) if (calculate_metrics): [ pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity, pixel_F1 ] = evalf.performance_evaluation_pixel(pixelTP, pixelFP, pixelFN, pixelTN) print("Pixel precision: " + str(pixel_precision)) print("Pixel accuracy: " + str(pixel_accuracy)) print("Pixel recall: " + str(pixel_sensitivity)) print("Pixel F1-measure: " + str(pixel_F1)) print("Pixel TP: " + str(pixelTP)) print("Pixel FP: " + str(pixelFP)) print("Pixel FN: " + str(pixelFN)) print("Pixel TN: " + str(pixelTN)) if window_method != 'None': [window_precision, window_sensitivity, window_accuracy, window_F1 ] = evalf.performance_evaluation_window(windowTP, windowFN, windowFP) print("Window precision: " + str(window_precision)) print("Window accuracy: " + str(window_accuracy)) print("Window F1-measure: " + str(window_F1)) return [ pixel_precision, pixel_accuracy, pixel_specificity, pixel_sensitivity, pixel_F1, window_precision, window_accuracy, window_F1, counter ]