def on_epoch_begin(self, epoch, logs=None): for i in range(3): plt.clf() radmon_int = random.randint(0, test_dataset_2.__len__() - 1) image, _ = test_dataset_2.__getitem__(radmon_int) # print(image[0].shape) same_l1 = detect_image(image[0], image[1], model=pred_model) diff_l2 = detect_image(image[0], image[2], model=pred_model) plt.subplot(1, 3, 1) plt.imshow(np.array(image[0])) plt.subplot(1, 3, 2) plt.imshow(np.array(image[1])) plt.text(-12, -12, 'same:%.3f' % same_l1, ha='center', va='bottom', fontsize=11) plt.subplot(1, 3, 3) plt.imshow(np.array(image[2])) plt.text(-24, -12, 'diff:%.3f' % diff_l2, ha='center', va='bottom', fontsize=11) plt.savefig(r'image\test_epoch_%s_%s.png' % (epoch, i))
def hello(): K.clear_session() if request.method == 'POST': algo = request.form['algo'] if 'file_input' not in request.files: flash('No file part') return redirect(request.url) file = request.files['file_input'] if file.filename == '': flash('No selected file') return redirect(request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) # file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) file_data = file.stream.read() nparr = np.fromstring(file_data, np.uint8) img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) if algo: yolo = YOLO(0.6, 0.5, 'yolo.h5') # result = detect_image(cv2.imread(os.path.join(app.config['UPLOAD_FOLDER'], filename)), yolo) result = detect_image(img, yolo) print( result[1] ) # result will be list of tuple where each tuple is ( class, prob, [box coords]) img_str = cv2.imencode('.jpg', result[0])[1].tostring() encoded = base64.b64encode(img_str).decode("utf-8") mime = "image/jpg;" out_image = f"data:{mime}base64,{encoded}" # cv2.imwrite(os.path.join(app.config['RESULT_FOLDER'],filename), result[0]) else: yolo = YOLO(0.8, 0.5, 'tiny_yolo.h5') # result = detect_image(cv2.imread(os.path.join(app.config['UPLOAD_FOLDER'], filename)), yolo) result = detect_image(img, yolo) print( result[1] ) # result will be list of tuple where each tuple is ( class, prob, [box coords]) img_str = cv2.imencode('.jpg', result[0])[1].tostring() encoded = base64.b64encode(img_str).decode("utf-8") mime = "image/jpg;" out_image = f"data:{mime}base64,{encoded}" # cv2.imwrite(os.path.join(app.config['RESULT_FOLDER'],filename), result[0]) return render_template('result.html', out_image=out_image) else: return "File extension not supported" return render_template('index.html')
def post(self): try: model = load_model() except: print("Model Failed to load") try: nparr = np.fromstring(req.data, np.uint8) img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) except: print("Image failed to load") try: response = detect_image(img, model) print(response[1:]) response_pickled = jsonpickle.encode(response) except: print("meh") return Response(response=response_pickled, status=200, mimetype="application/json")
def detect_traffic_lights(PATH_TO_TEST_IMAGES_DIR, Num_images, plot_flag=False): """ Detect traffic lights and draw bounding boxes around the traffic lights :param PATH_TO_TEST_IMAGES_DIR: testing image directory :param MODEL_NAME: name of the model used in the task :return: commands: True: go, False: stop """ # file_data = file.stream.read() # nparr = np.fromstring(file_data, np.uint8) # img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) TEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES_DIR, '{}'.format( i)) for i in os.listdir(PATH_TO_TEST_IMAGES_DIR)] for image_path in TEST_IMAGE_PATHS: Image.MAX_IMAGE_PIXELS = None image = Image.open(image_path) image_np = load_image_into_numpy_array(image) yolo = YOLO(0.6, 0.5) # result = detect_image(cv2.imread(os.path.join(app.config['UPLOAD_FOLDER'], filename)), yolo) result = detect_image(cv2.imread(image_path), yolo, coco_classes) # result will be list of tuple where each tuple is ( class, score, [box coords]) classes = result[0] scores = result[1] boxes = result[2] # print("Result :",result) # print("Classes :",classes) # print("Boxes :",boxes) # print("Scores :",scores) red_flag, crop_img = read_traffic_lights(image, np.array(boxes), np.array(scores), np.array(classes).astype(np.int32)) cv2.imwrite('images/res/' + image_path.rsplit('/', 1) [-1], crop_img[..., ::-1]) if red_flag: print('{}: stop'.format(image_path)) # red or yellow else: print('{}: go'.format(image_path))
import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' import cv2 import numpy as np import tensorflow as tf from utils import detect_image, detect_realtime, detect_video, Load_Yolo_model, detect_video_realtime_mp from configs import * image_path = "" video_path = "" yolo = Load_Yolo_model() detect_image(yolo, image_path, "", input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0)) #detect_video(yolo, video_path, "", input_size=YOLO_INPUT_SIZE, show=False, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0)) #detect_realtime(yolo, "", input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0))
def update_dashboard(generate_dashboard): #Download all the images from blob storage and processing with deeplearning model images_div = [] blob_service_client = BlobServiceClient.from_connection_string(connect_str) container_client = blob_service_client.get_container_client("pruebasds4") my_blobs = container_client.list_blobs() #Column name list column_names = ['id', 'x1', 'y1', 'x2', 'y2', 'Score', 'Class'] #row list rows = [] names = [] images = [] for blob in my_blobs: image_downloaded = container_client.download_blob(blob.name).readall() decoded_data = base64.b64decode(image_downloaded) np_data = np.fromstring(decoded_data, np.uint8) img = cv2.imdecode(np_data, cv2.IMREAD_UNCHANGED) #INGRESARLAS AL MODELO prediction = detect_image( yolo, img, "./IMAGES/HU8_detect.jpg", input_size=416, show=True, CLASSES="./model_data/license_plate_names.txt", rectangle_colors=(255, 0, 0)) if prediction[1][0]: for box_stats in prediction[1]: box_stats['id'] = blob.name rows.append(box_stats) #CAST TO BASE64 retval, buffer_img = cv2.imencode('.jpg', prediction[0]) data = base64.b64encode(buffer_img) images_div.append('data:image/png;base64,{}'.format(data.decode())) #Create empty dataframe df = pd.DataFrame(rows, columns=column_names) df['Class'] = df['Class'].apply(lambda x: change_classname(x)) children = [generate_thumbnail(data) for data in images_div] df2 = df.groupby(['Class']).size().reset_index(name='counts') #GENERATE PLOTS colors = ['#61892F', '#6B6E70', '#86C232'] ploty1 = go.Figure( data=[go.Pie(labels=df2['Class'], values=df2['counts'], hole=.5)]) ploty1.update_layout(height=350, width=470, paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)', font=dict(family="Enriqueta, Times New Roman", size=16, color='rgb(97,137,47)'), title="Faillure Distribution", title_x=0.5) ploty1.update_traces(marker=dict(colors=colors)) table = generate_table(df) return children, table, ploty1
input_size = YOLO_INPUT_SIZE path_in = os.path.join(PATH_DIR, "Images", "401-2") save_to = os.path.join(PATH_DIR, "Images", "crop.jpg") # Load Keras weights yolo = Create_Yolov3(input_size=input_size, CLASSES=TRAIN_CLASSES) yolo.load_weights("weights/checkpoints2/yolov3_custom") # Detect bounding boxes in image for i, filename in enumerate(glob.glob(path_in + r"\*.jpg")): print(filename) orig, img, locs = detect_image(yolo, filename, save_to, input_size=input_size, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0)) for j, crops in enumerate(locs): new_img = orig[crops[1]:crops[3], crops[0]:crops[2], :3] new_img = Image.fromarray(new_img) w_size = int( float(new_img.size[0]) * float(32 / float(new_img.size[1]))) new_img = new_img.resize((w_size, 32), Image.ANTIALIAS) # path_ext = str(i+1200) + "-" + str(j) + ".jpg" # new_img.save(os.path.join(r"C:\Users\sid_a\PycharmProjects\MTO_Highway_Analysis\time_stamps", path_ext)) print(i)
import matplotlib.pyplot as plt import numpy as np os.environ["CUDA_VISIBLE_DEVICES"] = "-1" test_image_path = r'E:\DataSets\DukeMTMC-reID\DukeMTMC-reID\query' test_dataset_2 = Person_Dataset(test_image_path, batch_size=1, train=False) input_size = (215, 90, 3) model, pred_model = Create_Model(inpt=input_size, num_classes=1812) model.load_weights('logs\ep039-loss0.066.h5') true_ = 0 for i in range(test_dataset_2.__len__()): image, _ = test_dataset_2.__getitem__(i) t1 = time.time() same_l1 = detect_image(image[0], image[1], model=pred_model) t2 = time.time() diff_l2 = detect_image(image[0], image[2], model=pred_model) plt.subplot(1, 3, 1) plt.imshow(np.array(image[0])) plt.subplot(1, 3, 2) plt.imshow(np.array(image[1])) plt.text(-12, -12, 'same:%.3f' % same_l1, ha='center', va='bottom', fontsize=11)