def main(): model = M.yolo_darknet19(input_shape=(416, 416, 3), output_depth=5) yolo_outputs = Y.yolo_head(model.output) to_evaluate = Y.yolo_eval(yolo_outputs) sess = K.get_session() Y.predict(sess, data, to_evaluate)
def getupload(): global image, motor, helmet if request.method == "POST": # memory = request.files['memory'] # image, mot, hel = predict(memory.filename) # motor+=mot # helmet+=hel # return redirect('/upload') try: memory = request.files['memory'] description = request.form['description'] print(memory.filename == "", description == "") if memory.filename != "" and description != "": print("One") return render_template( 'upload.html', errorMessage="Please either upload a photo or link a url") # elif memory.filename != "" and description != "": # return render_template('upload.html', errorMessage="Please either upload a photo or link a url.") elif memory.filename != "": print("Two") print(memory.filename) print("here1") image, mot, hel = predict(memory.filename) cv.imwrite("/static/new_output.jpg", image) print("here2") motor += mot helmet += hel print(memory.filename) print("END") elif description != "": print("Three") print(description) image, stats = predict(description) motor += stats[0] helmet += stats[1] print(memory.filename) else: print("Four") return render_template( 'upload.html', errorMessage="Please either upload a photo or link a url") except: print("EXCEPT") return render_template( 'upload.html', errorMessage="Please either upload a photo or link a url.") return redirect("/results")
def test_train_yolo_framework(setup_model_config, setup_weights_file, setup_dataset_folder, setup_image_and_its_boxes): model_config = setup_model_config pretrained_feature_file, weight_file = setup_weights_file img_folder, ann_folder = setup_dataset_folder # 1. Construct the model yolo = create_yolo(model_config['architecture'], model_config['labels'], model_config['input_size'], model_config['anchors'], pretrained_feature_file) # 2. warmup training yolo.train(img_folder, ann_folder, 2, weight_file, 2, False, 1e-4, 10, 1, 2, img_folder, ann_folder) # 3. Load the warmup trained weights yolo.load_weights(weight_file) # 4. actual training yolo.train(img_folder, ann_folder, 12, weight_file, 2, False, 1e-4, 10, 1, 0, img_folder, ann_folder) # 5. Load training image & predict objects image, true_boxes = setup_image_and_its_boxes boxes, probs = yolo.predict(image) boxes = to_centroid(boxes) assert len(boxes) == 2 assert len(probs) == 2 assert np.allclose(np.argmax(probs, axis=1), [0, 3]) for box, true_box in zip(boxes, true_boxes): iou = centroid_box_iou(box, true_box) assert iou > 0.4
def detect(img_path): # tar in en bild och kör modellen över den. image = img_path # om man bar vill testa med en bild byt mot cv2.imread(img_path) boxes, probs = yolo.predict(image, float(DEFAULT_THRESHOLD)) image = draw_scaled_boxes(image, boxes, probs, labels,1) return image
def get_digits_boxes_img(image): im_size = np.shape(image) boxes, probs = yolo.predict(image, float(threshold)) labels = np.argmax(probs, axis=1) if len(probs) > 0 else [] probs = np.max(probs, axis=1) if len(probs) > 0 else [] # 4. save detection result image = draw_scaled_boxes(image, boxes, probs, config['model']['labels']) centers = [] for count in range(len(boxes)): box = boxes[count] x = (((box[0] + box[2] / 2) / im_size[0])) y = (((box[1] + box[3] / 2) / im_size[1])) centers.append([x, y]) centers = np.array(centers) jersey_numbers = [] if len(centers) > 1: cluster.fit_predict(centers) cluster_labels = cluster.labels_ clusters = np.unique(cluster_labels) conf_score = [] for c_id in clusters: g_centers = centers[list(cluster_labels == c_id)] g_labels = labels[list(cluster_labels == c_id)] g_probs = probs[list(cluster_labels == c_id)] center_x = [center[0] for center in g_centers] g_labels = g_labels[np.argsort(center_x)] number = int((''.join(str(label) for label in g_labels))) jersey_numbers.append(number) conf_score.append(np.mean(g_probs)) else: jersey_numbers = list(labels) conf_score = [np.mean(probs)] # print("{}-boxes are detected".format(len(boxes))) # print("jersey numbers detected are {}".format(jersey_numbers)) return jersey_numbers, conf_score
def predict(real_time=False): sess = K.get_session() X_test, Y_test = utils.load_data('data.h5', 0, 1) model = M.yolo_darknet19(input_shape=(416, 416, 3), output_depth=5) model.load_weights('darknet19_weights_full_66.h5') yolo_outputs = Y.yolo_head(model.output) to_evaluate = Y.yolo_eval(yolo_outputs) p, x, y = Y.predict(sess, X_test, to_evaluate, model) m = p.shape[0] if not real_time: for i in range(m): print('Image_' + str(i) + ' : with probability of ' + str(p[i]) + 'ball coords are (' + str(x[i]) + '; ' + str(y[i]) + ')') utils.draw_result(X_test[i, ...], x[i], y[i]) else: pass
def test_predict(setup_image_and_its_boxes, setup_outputs, setup_config): # 1. Given image, true_boxes = setup_image_and_its_boxes model_config = setup_config desired_boxes, desired_probs = setup_outputs # 2. When run # 1. Construct the model yolo = create_yolo(model_config['architecture'], model_config['labels'], model_config['input_size'], model_config['anchors']) yolo.load_weights(os.path.join(TEST_SAMPLE_DIR, "mobile_288_weights.h5")) boxes, probs = yolo.predict(image) boxes = to_centroid(boxes) assert len(boxes) == 2 assert len(probs) == 2 for box, true_box in zip(boxes, true_boxes): iou = centroid_box_iou(box, true_box) assert iou > 0.5
config['train']['valid_annot_folder'], config['train']['valid_image_folder'], config['model']['labels'], is_only_detect=config['train']['is_only_detect']) n_true_positives = 0 n_truth = 0 n_pred = 0 for i in range(len(annotations)): img_path = annotations.fname(i) img_fname = os.path.basename(img_path) image = cv2.imread(img_path) true_boxes = annotations.boxes(i) true_labels = annotations.code_labels(i) boxes, probs = yolo.predict(image, float(args.threshold)) labels = np.argmax(probs, axis=1) if len(probs) > 0 else [] # 4. save detection result image = draw_scaled_boxes(image, boxes, probs, config['model']['labels']) output_path = os.path.join(write_dname, os.path.split(img_fname)[-1]) cv2.imwrite(output_path, image) print("{}-boxes are detected. {} saved.".format( len(boxes), output_path)) n_true_positives += count_true_positives(boxes, true_boxes, labels, true_labels) n_truth += len(true_boxes) n_pred += len(boxes)
elif spotifyObject_1.currently_playing( )["is_playing"]: # spela om musiken är pausad spotifyObject_1.pause_playback() print("pause playback") time.sleep(0.5) postion = [] cap = cv2.VideoCapture(0) Connect_to_token() creat_list() while True: _, frame = cap.read() #startar detektering boxes, probs = yolo.predict(frame, config["model"]["DEFAULT_THRESHOLD"]) labels = np.argmax(probs, axis=1) if len(probs) > 0 else [] #sparar detekterings resultat frame = draw_scaled_boxes(frame, boxes, probs, config['model']['labels']) label_list = config['model']['labels'] cv2.imshow("imageo", frame) cv2.waitKey(1) if 0 in labels: #om en tummeup hittas try: Add_to_list() except: input( " No active device found ... press ENTER when you are back on track" )
import numpy as np import cv2 from yolo import predict from PIL import Image cap = cv2.VideoCapture(0) cv2.waitKey(1000) print("Start.") i=0 key=1 while(i<100): ret, frame = cap.read() if cv2.waitKey(200) == 27: break i+=1 frame=predict(frame) cv2.imshow("PIC",frame) cv2.imwrite(f"/Users/Dennis/Desktop/hand genture project/pics/image-{i}.png",frame) cap.release()
def get_digits_boxes(img, show_img=False, save_img=False): img_path = img_path.replace('\\', '/') # img_path = img_path.replace('\','/') image = cv2.imread(img_path) im_size = np.shape(image) boxes, probs = yolo.predict(image, float(threshold)) labels = np.argmax(probs, axis=1) if len(probs) > 0 else [] probs = np.max(probs, axis=1) if len(probs) > 0 else [] # 4. save detection result image = draw_scaled_boxes(image, boxes, probs, config['model']['labels']) centers = [] for count in range(len(boxes)): box = boxes[count] x = (((box[0] + box[2] / 2) / im_size[0])) y = (((box[1] + box[3] / 2) / im_size[1])) centers.append([x, y]) centers = np.array(centers) jersey_numbers = [] if len(centers) > 1: cluster.fit_predict(centers) cluster_labels = cluster.labels_ clusters = np.unique(cluster_labels) conf_score = [] for c_id in clusters: g_centers = centers[list(cluster_labels == c_id)] g_labels = labels[list(cluster_labels == c_id)] g_probs = probs[list(cluster_labels == c_id)] center_x = [center[0] for center in g_centers] g_labels = g_labels[np.argsort(center_x)] number = int((''.join(str(label) for label in g_labels))) jersey_numbers.append(number) conf_score.append(np.mean(g_probs)) else: jersey_numbers = labels conf_score = np.mean(probs) numbers_string = '' for number in jersey_numbers: numbers_string = numbers_string + str(number) + ' ' cv2.putText(image, 'Jersey Number: %s' % numbers_string, (25, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2) # labels=labels[np.argsort(center_x)] # number = int((''.join(str(label) for label in labels))) if save_img: output_path = os.path.join( ntpath.dirname(img_path) + '/' + (img_path.split("/")[-1]).split('.')[0] + '_out.jpg') cv2.imwrite(output_path, image) if show_img: cv2.imshow("jersey number detected image", image) cv2.waitKey(5000) cv2.destroyAllWindows() print("{}-boxes are detected".format(len(boxes))) print("jersey numbers detected are {}".format(jersey_numbers)) return jersey_numbers, conf_score, labels, boxes
# -*- coding: utf-8 -*- """image_od Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1bYWenJ9BGhpIs7HgljiKhs0lX9_hzv73 # **Import libraries** """ import argparse from yolo import predict from od_utils import * """**construct the argument parse and parse the arguments**""" ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help='input image') #ap.add_argument('-y', '--yolo', required=True, help='base path to YOLO model') ap.add_argument('-c', '--confidence', type=float, default=0.6, help='minimum prob. to filter weak detections') ap.add_argument('-t', '--threshold', type=float, default=0.5, help='threshold when applying non-maxima suppression') args = vars(ap.parse_args()) """**Run the graph on an image**""" sess = K.get_session() out_scores, out_boxes, out_classes = predict(sess, args)
annotations = parse_annotation(config['train']['valid_annot_folder'], config['train']['valid_image_folder'], config['model']['labels'], is_only_detect=config['train']['is_only_detect']) n_true_positives = 0 n_truth = 0 n_pred = 0 for i in range(len(annotations)): img_path = annotations.fname(i) img_fname = os.path.basename(img_path) image = cv2.imread(img_path) true_boxes = annotations.boxes(i) true_labels = annotations.code_labels(i) boxes, probs = yolo.predict(image, float(args.threshold)) labels = np.argmax(probs, axis=1) if len(probs) > 0 else [] # 4. save detection result image = draw_scaled_boxes(image, boxes, probs, config['model']['labels']) output_path = os.path.join(write_dname, os.path.split(img_fname)[-1]) cv2.imwrite(output_path, image) print("{}-boxes are detected. {} saved.".format(len(boxes), output_path)) n_true_positives += count_true_positives(boxes, true_boxes, labels, true_labels) n_truth += len(true_boxes) n_pred += len(boxes) print(calc_score(n_true_positives, n_truth, n_pred))
config['train']['valid_annot_folder'], config['train']['valid_image_folder'], config['model']['labels'], is_only_detect=config['train']['is_only_detect']) # n_true_positives = 0 # n_truth = 0 # n_pred = 0 # for i in range(len(annotations)): for filename in os.listdir('datasets/' + evaluation_object + '/images_test'): img_path = os.path.join('datasets', evaluation_object, 'images_test', filename) img_fname = filename image = cv2.imread(img_path) boxes, probs = yolo.predict(image, float(DEFAULT_THRESHOLD)) labels = np.argmax(probs, axis=1) if len(probs) > 0 else [] # 4. save detection result image = draw_scaled_boxes(image, boxes, probs, config['model']['labels']) output_path = os.path.join(write_dname, os.path.split(img_fname)[-1]) label_list = config['model']['labels'] # cv2.imwrite(output_path, image) print("{}-boxes are detected. {} saved.".format( len(boxes), output_path)) if len(probs) > 0: create_ann(filename, image, boxes, labels, label_list) cv2.imwrite(output_path, image)