def uploaded_file(filename): # read image file and make prediction here = os.getcwd() image_path = os.path.join(here, app.config['UPLOAD_FOLDER'], filename) image = cv2.imread(image_path) (class_ids, labels, boxes, confidences) = yolo_forward(net, LABELS, image, confidence_level=0.3) # format data for template rendering # found emotions, save images with bounding boxes. if len(class_ids) > 0: found = True new_filename = 'yolo_' + filename file_path = os.path.join(here, app.config['UPLOAD_FOLDER'], new_filename) yolo_save_img(image, class_ids, boxes, labels, confidences, COLORS, file_path=file_path) # help function to format result sentences. def and_syntax(alist): if len(alist) == 1: alist = "".join(alist) return alist elif len(alist) == 2: alist = " and ".join(alist) return alist elif len(alist) > 2: alist[-1] = "and " + alist[-1] alist = ", ".join(alist) return alist else: return # confidences: rounding and changing to percent, putting in function format_confidences = [] for percent in confidences: format_confidences.append(str(round(percent * 100)) + '%') format_confidences = and_syntax(format_confidences) # labels: sorting and capitalizing, putting into function labels = set(labels) labels = [oreo.capitalize() for oreo in labels] labels = and_syntax(labels) # return template with data return render_template('results.html', confidences=format_confidences, labels=labels, old_filename=filename, filename=new_filename) else: found = False return render_template('results.html', labels='no Oreo', old_filename=filename, filename=filename)
def get_prediction(image_folder_root, LABELS, cfg_path, weight_path, confidence_level=0.5): """ Run every image in a given folder through darknet. Return a list of dictionaries with pertinent information about each image: image_path, class_ids, labels, boxes, true_labels, and true_boxes. """ # Loads YOLO into Python net = get_yolo_net(cfg_path, weight_path) # np.random.seed(42) # colors = np.random.randint(0, 255, size=(len(LABELS), 3), dtype='uint8') # Lists all files in the directory and splits them into two lists: # one for images and one for txt files files = os.listdir(image_folder_root) image_paths = sorted( [os.path.join(image_folder_root, f) for f in files if '.jpg' in f]) txt_paths = sorted( [os.path.join(image_folder_root, f) for f in files if '.txt' in f]) # Loops over each image and txt file in the directory results = [] for image_path, txt_path in zip(image_paths, txt_paths): try: # Get image height and width image = cv2.imread(image_path) (H, W) = image.shape[:2] # Get darknet prediction data class_ids, labels, boxes, confidences = yolo_forward( net, LABELS, image, confidence_level) print( f"SUCCESS: Predicted Class IDs: {class_ids}; Labels: {labels}; for image: {image_path}.\n" ) except Exception: # Catch occasional errors print(f"ERROR: This image had an error: {image_path}") continue # Reads ground truth data from txt file with open(txt_path, "r") as f: txt_labels = f.readlines() # Splits data into two lists: labels and boxes true_labels = [int(label.split()[0]) for label in txt_labels] true_boxes = [label.split()[1:] for label in txt_labels] # Convert boxes from YOLO dimensions to standard dimensions for i, box in enumerate(true_boxes): box = [float(num) for num in box] true_boxes[i] = yolo_to_standard_dims(box, H, W) # Adds pertinent information to a dictionary and adds the dictionary to the return list result = { 'image_path': image_path, 'class_ids': class_ids, 'labels': labels, 'boxes': boxes, 'confidences': confidences, 'true_labels': true_labels, 'true_boxes': true_boxes } results.append(result) return results
def evaluate(): labels = ["Mom's Car"] positive_img_dir = os.path.join(os.getcwd(), 'labeled_car_data') negative_img_dir = os.path.join(os.getcwd(), 'unlabeled_car_data') confidence_level = 0.5 threshold = 0.3 img_list = [] total_counter = 0 counter = 0 y_actu = [] y_pred = [] net_clement = get_yolo_net('yolov3.cfg', 'yolov3_final.weights') for img in os.listdir(positive_img_dir): if img.endswith('.JPG'): image = cv2.imread(os.path.join(positive_img_dir, img)) img_detect = yolo_forward(net_clement, labels, image, confidence_level, threshold) total_counter += 1 if img_detect == ([], [], [], []): y_actu.append(1) y_pred.append(0) continue else: img_list.append(img_detect) print(img_detect) counter += 1 y_actu.append(1) y_pred.append(1) else: continue for img in os.listdir(negative_img_dir): if img.endswith('.JPG'): image = cv2.imread(os.path.join(negative_img_dir, img)) img_detect = yolo_forward(net_clement, labels, image, confidence_level, threshold) total_counter += 1 if img_detect == ([], [], [], []): y_actu.append(0) y_pred.append(0) continue else: img_list.append(img_detect) print(img_detect) counter += 1 y_actu.append(0) y_pred.append(1) else: continue print('\n{} images detected out of {} total'.format( counter, total_counter)) moms_car_counter = 0 for i in img_list: moms_car_counter += len(i[0]) print("Mom's car detected {} times".format(moms_car_counter)) return y_actu, y_pred