示例#1
0
def predict():
    if request.method == 'POST':

        # Read File and file data
        im_data = request.get_data()

        with open("capture.jpg", "wb") as im:
            im.write(base64.decodebytes(im_data))

        # image_path = file_path
        image_path = 'capture.jpg'

        # Model expects a 4 dimensional Tensor that has the layout: [Batch index,Width,Height,Channel]
        # Make prediction
        yolo = Load_Yolo_model()
        image, score, label = detect_image(yolo,
                                           image_path,
                                           "detect.jpg",
                                           input_size=YOLO_INPUT_SIZE,
                                           show=True,
                                           CLASSES=TRAIN_CLASSES,
                                           rectangle_colors=(255, 0, 0))
        label = str(label)
        score = str(score)
        result = str([score, label])
        return result

        # detect_predict(data)

        # return get_detected_object

    return None
示例#2
0
    def aiPredict(self, image_path):
        # block_image_path = "./block"
        # image_path   = "./test10.jpg"
        # video_path   = "./IMAGES/test2.mp4"

        image = cv2.imread(image_path)
        _, cord = detect_image(self.yolo,
                               image_path,
                               output_path="./answer.jpg",
                               input_size=YOLO_INPUT_SIZE,
                               show=False,
                               CLASSES=TRAIN_CLASSES,
                               rectangle_colors=(255, 0, 0))

        double_check_list = []
        for i, block in enumerate(cord):
            x1 = block[0][0]
            x2 = block[1][0]
            y1 = block[0][1]
            y2 = block[1][1]
            crop_img = image[y1:y2, x1:x2]
            double_check_list.append(crop_img)
            #cv2.imwrite(f"./block/block_{i}.jpg", crop_img)

        x_test = self.load_cnn_data(double_check_list)
        reload_model = tf.keras.models.load_model(
            './cnn_model/resNet_model3_w128_h128.h5')

        predict_class = reload_model.predict(x_test)
        predict_class = np.argmax(predict_class, axis=1)

        ret = self.food_name(predict_class)
        # print(ret)
        return ret
示例#3
0
	def callback(self,data):
		try:
			W, H = YOLO_INPUT_SIZE, YOLO_INPUT_SIZE
			cv_image = self.bridge.imgmsg_to_cv2(data, data.encoding)

			cv_image2, bboxes=detect_image(yolo, cv_image, "", input_size=YOLO_INPUT_SIZE, show=False, rectangle_colors=(255,0,0))
			x1, y1, x2, y2, Score, C = Give_boundingbox_coor_class(bboxes)

			boxes = []
			for i in range(0,len(x1)):
				boxes.append([x1[i], y1[i], x2[i], y2[i], Score[i], C[i]])
			boxes = np.array(boxes)
			self.OH.add(boxes)

			for Object in self.OH.Known:
				if Object[10] <= 5:
					cv2.rectangle(cv_image, (Object[5], Object[6]), (Object[7], Object[8]),(0, 255, 0), 2)
					text = "Class: {}, ID {}".format(self.ClassNames.get(Object[2]),Object[1])
					cv2.putText(cv_image, text, (Object[5], Object[6]-5),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
					cv2.circle(cv_image, (Object[3], Object[4]), 4, (0, 255, 0), -1)
				else:
					cv2.circle(cv_image, (Object[3], Object[4]), 4, (255, 0, 0), -1)

			cv2.imshow("Image_window", cv_image)
			cv2.waitKey(3)
		except CvBridgeError as e:
			print(e)
示例#4
0
    def callback(self, data):
        try:
            #np_arr = np.fromstring(data.data, np.uint8)
            #cv_image=cv2.imdecode(np_arr,cv2.COLOR_BGR2RGB)
            cv_image = self.bridge.imgmsg_to_cv2(data, data.encoding)
            #cv_cam=cv_image
            #cv2.imshow("cam",cv_cam)
        except CvBridgeError as e:
            print(e)

        ####
        cv_image, bboxes = detect_image(yolo,
                                        cv_image,
                                        "",
                                        input_size=YOLO_INPUT_SIZE,
                                        show=False,
                                        rectangle_colors=(255, 0, 0))
        #cv_image=detect_image(yolo, cv_image, "", input_size=YOLO_INPUT_SIZE, show=False, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0)) # Used later for custom weigths
        # bboxes are
        #print(bboxes)
        x1, y1, x2, y2, Score, C = Give_boundingbox_coor_class(bboxes)
        #print(x1)
        ####

        cv2.imshow("Image window", cv_image)
        cv2.waitKey(3)

        try:
            self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
        except CvBridgeError as e:
            print(e)
示例#5
0
def detection(request):

    # base64이미지를 jpg로 저장
    im = get_image_from_data_url(request.data['image'])

    image = Images()
    image.images = im
    image.save()

    image = Images.objects.last()
    image.images = im
    image.save()

    # 비교할 정답
    answer = request.data['question']

    # YOLO사용 준비
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    video_path = ""

    # YOLO 모델로 detecting
    last_image = Images.objects.last().images
    yolo = Load_Yolo_model()
    is_correct, info = detect_image(yolo,
                                    image.images.url,
                                    answer,
                                    input_size=YOLO_INPUT_SIZE,
                                    show=True,
                                    rectangle_colors=(255, 0, 0))

    return Response({
        'message': '사진 테스트 완료!!',
        'is_correct': is_correct,
        'info': info
    })
示例#6
0
def image_crop_2_array(img_path, debug=True):
    pil_image = PIL.Image.open(img_path).convert('RGB')
    print(pil_image)
    original_image = np.array(pil_image)  #cv2.imread(img_path)
    if debug:
        plt.figure(figsize=(30, 15))
        plt.imshow(original_image)
        pil_image.show()
    image_data = image_preprocess(np.copy(original_image),
                                  [YOLO_INPUT_SIZE, YOLO_INPUT_SIZE])
    image_data = image_data[np.newaxis, ...].astype(np.float32)

    pred_bbox = yolo.predict(image_data)
    image = detect_image(yolo,
                         image_path,
                         "",
                         input_size=YOLO_INPUT_SIZE,
                         show=False,
                         CLASSES=TRAIN_CLASSES,
                         rectangle_colors=(255, 0, 0))
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    if debug:
        plt.figure(figsize=(30, 15))
        plt.imshow(image)
        #image.show()
    pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
    pred_bbox = tf.concat(pred_bbox, axis=0)

    bboxes = postprocess_boxes(pred_bbox, original_image, YOLO_INPUT_SIZE,
                               TEST_SCORE_THRESHOLD)
    bboxes = nms(bboxes, TEST_IOU_THRESHOLD, method='nms')
    if len(bboxes) != 0:
        return original_image[int(bboxes[0][1]):int(bboxes[0][3]),
                              int(bboxes[0][0]):int(bboxes[0][2])]
示例#7
0
    def callback(self):
        image = rospy.wait_for_message(
            "/zed2/zed_node/left/image_rect_color/compressed", CompressedImage)
        time1 = rospy.Time.now().to_sec()
        self.timer.header = image.header
        self.timer.header.frame_id = "zed2_left_camera_frame"
        self.timer.time_ref = rospy.Time.now()
        self.timer_pub.publish(self.timer)
        cv_image = self.bridge.compressed_imgmsg_to_cv2(image, "bgr8")
        _, bboxes = detect_image(self.yolo,
                                 cv_image,
                                 "",
                                 input_size=YOLO_INPUT_SIZE,
                                 show=False,
                                 CLASSES=TRAIN_CLASSES,
                                 score_threshold=0.3,
                                 iou_threshold=0.1,
                                 rectangle_colors=(255, 0, 0))
        detect = Detection2DArray()
        detect.header = image.header

        for Object in bboxes:
            detection = Detection2D()
            hypo = ObjectHypothesisWithPose()
            #Start x
            x1 = Object[0]
            #End x
            x2 = Object[2]
            #Start y
            y1 = Object[1]
            #end y
            y2 = Object[3]

            #Size x
            Sx = x2 - x1
            #Size y
            Sy = y2 - y1
            #Center x
            Cx = x1 + Sx / 2
            #Center y
            Cy = y1 + Sy / 2

            detection.bbox.center.x = Cx
            detection.bbox.center.y = Cy
            detection.bbox.size_x = Sx
            detection.bbox.size_y = Sy

            hypo.id = int(Object[5])
            hypo.score = Object[4]

            detection.results = [
                hypo,
            ]
            detection.is_tracking = False
            detect.detections.append(detection)
        self.boxes_pub.publish(detect)

        self.callback()
示例#8
0
def plate_number(image_path):
    yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
    yolo.load_weights("./checkpoints/yolov3_custom")  # use keras weights
    image, bb = detect_image(yolo,
                             image_path,
                             '',
                             input_size=YOLO_INPUT_SIZE,
                             show=False,
                             rectangle_colors=(255, 0, 0))
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    c = np.array(bb[0][:4], dtype=np.int32)
    org_image = cv2.imread(image_path)
    cropped = org_image[c[1]:c[3], c[0]:c[2]]
    cropped = cv2.cvtColor(cropped, cv2.COLOR_BGR2RGB)
    cropped = cv2.resize(cropped, (224, 224))
    gray = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (7, 7), 0)
    binary = cv2.threshold(blur, 180, 255,
                           cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
    kernel3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    thre_mor = cv2.morphologyEx(binary, cv2.MORPH_DILATE, kernel3)
    cont, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL,
                               cv2.CHAIN_APPROX_SIMPLE)

    # creat a copy version "test_roi" of plat_image to draw bounding box

    test_roi = cropped.copy()

    # Initialize a list which will be used to append charater image
    crop_characters = []

    # define standard width and height of character
    digit_w, digit_h = 30, 60

    for c in sort_contours(cont):
        (x, y, w, h) = cv2.boundingRect(c)
        ratio = h / w
        if 3 <= ratio < 5:  # Only select contour with defined ratio
            #if h/plate_image.shape[0]>=0.5: # Select contour which has the height larger than 50% of the plate
            # Draw bounding box arroung digit number
            cv2.rectangle(test_roi, (x, y), (x + w, y + h), (0, 255, 0), 2)

            # Sperate number and gibe prediction
            curr_num = thre_mor[y:y + h, x:x + w]
            curr_num = cv2.resize(curr_num, dsize=(digit_w, digit_h))
            _, curr_num = cv2.threshold(curr_num, 220, 255,
                                        cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            crop_characters.append(curr_num)

    #print("Detect {} letters...".format(len(crop_characters)))
    final_string = ''
    for i, character in enumerate(crop_characters):
        title = np.array2string(predict_from_model(character))
        final_string += title.strip("'[]")

    return final_string
示例#9
0
    def callback(self, image, cloud):
        # RBG image
        print("running")
        cv_image = self.bridge.imgmsg_to_cv2(image, image.encoding)
        # Yolo to get Boundary Boxes
        _, bboxes = detect_image(yolo,
                                 cv_image,
                                 "",
                                 input_size=YOLO_INPUT_SIZE,
                                 show=False,
                                 rectangle_colors=(255, 0, 0))

        cv2.imshow("Depth", cv_image)
        cv2.waitKey(3)
        """
示例#10
0
    def callback_yolo(self, image):
        image = self.image
        cv_image = self.bridge.compressed_imgmsg_to_cv2(image, "bgr8")

        _, bboxes = detect_image(self.yolo,
                                 cv_image,
                                 "",
                                 input_size=YOLO_INPUT_SIZE,
                                 show=False,
                                 rectangle_colors=(255, 0, 0))
        detect = Detection2DArray()
        detect.header = image.header

        for Object in bboxes:
            detection = Detection2D()
            hypo = ObjectHypothesisWithPose()
            #Start x
            x1 = Object[0]
            #End x
            x2 = Object[2]
            #Start y
            y1 = Object[1]
            #end y
            y2 = Object[3]

            #Size x
            Sx = x2 - x1
            #Size y
            Sy = y2 - y1
            #Center x
            Cx = x1 + Sx / 2
            #Center y
            Cy = y1 + Sy / 2

            detection.bbox.center.x = Cx
            detection.bbox.center.y = Cy
            detection.bbox.size_x = Sx
            detection.bbox.size_y = Sy

            hypo.id = int(Object[5])
            hypo.score = Object[4]

            detection.results = [
                hypo,
            ]
            detect.detections.append(detection)

        self.bbox_pub.publish(detect)
示例#11
0
 def calculation(self):
   imagecv_cam=[]
   imagecv_depth=[] 
   imagecv_cam=self.cv_image_cam
   imagecv_depth=self.cv_image_depth
   if imagecv_cam != []:
     imagecv_cam, bboxes=detect_image(yolo, imagecv_cam, "", input_size=YOLO_INPUT_SIZE, show=False, rectangle_colors=(255,0,0))
     x1, y1, x2, y2, _, _ = Give_boundingbox_coor_class(bboxes)
     print(x1,y1,x2,y2)
   if imagecv_depth != []:
     for i in range(len(bboxes)):
       patch=(int(x2[i]-x1[i]),int(y2[i]-y1[i])) # gives width and height of bbox
       center=(int(x1[i]+patch[0]/2),int(y1[i]+patch[1]/2)) # gives center coodintes of bbox global
       Distance_to_center_of_bbox_wrt_global=imagecv_depth[center[1],center[0]] #height (y), width (x)
       print("dyb") 
   return imagecv_cam
示例#12
0
    def callback(self, image, cloud):
        Time = float("%.6f" % image.header.stamp.to_sec()
                     )  # get time stamp for image in callback
        # Generate images from msgs
        cv_image = self.bridge.imgmsg_to_cv2(image, image.encoding)
        #cv_image_depth = self.bridge.imgmsg_to_cv2(depth, depth.encoding)
        pc_list, cv_image_pc = PC_dataxyz_to_PC_image(cloud,
                                                      Org_img_height=376,
                                                      Org_img_width=672)
        # Yolo to get Boundary Boxes
        _, bboxes = detect_image(self.yolo,
                                 cv_image,
                                 "",
                                 input_size=YOLO_INPUT_SIZE,
                                 show=False,
                                 rectangle_colors=(255, 0, 0))
        # Convert Boundary boxes to readable values
        PC_image_bbox_sub_series = Sub_pointcloud(cv_image_pc, bboxes)
        _, _, xyzcoord_series = DBSCAN_pointcloud(PC_image_bbox_sub_series,
                                                  bboxes,
                                                  seg_plot=self.seg_plot)

        x1, y1, x2, y2, Score, C = Give_boundingbox_coor_class(bboxes)
        boxes = []
        for i in range(len(bboxes)):
            boxes.append([
                x1[i], y1[i], x2[i], y2[i], Score[i], C[i], xyzcoord_series[i],
                Time
            ])
        boxes = np.array(boxes)
        self.OH.add(boxes)

        TrackID = 0

        if len(self.OH.Known) > 0:
            Target = self.OH.Known[TrackID]
            TargetOrder = self.OH.KnownOrder.get

            Reduced_PC = PC_reduc(Target, TargetOrder, pc_list, cloud)
            self.reduc_cloud_pub.publish(Reduced_PC)

            Pose = Waypoint_planter(Target, TargetOrder,
                                    "zed2_left_camera_frame", rospy.Time.now())
            self.pose_pub.publish(Pose)

        if self.show == True:
            self.show_img(cv_image, segmentation_img)
示例#13
0
    def calculation(self):
        imagecv_cam = self.cv_image_cam
        imagecv_depth = self.cv_image_depth
        imagecv_depth_series = []
        img_seg = []
        boxes = []
        if len(imagecv_cam) != 0:
            imagecv_cam, bboxes = detect_image(yolo,
                                               imagecv_cam,
                                               "",
                                               input_size=YOLO_INPUT_SIZE,
                                               show=False,
                                               rectangle_colors=(255, 0, 0))
            x1, y1, x2, y2, Score, C = Give_boundingbox_coor_class(bboxes)
        if len(imagecv_depth) != 0:
            for i in range(len(bboxes)):
                patch = (int(x2[i] - x1[i]), int(y2[i] - y1[i])
                         )  # gives width and height of bbox
                center = (int(x1[i] + patch[0] / 2), int(y1[i] + patch[1] / 2)
                          )  # gives center coodintes of bbox global
                cv_image_bbox_sub = cv2.getRectSubPix(
                    imagecv_depth, patch,
                    center)  # Extract bbox in depth image
                cv_image_bbox_sub = np.where(np.isnan(cv_image_bbox_sub),
                                             self.min_depth,
                                             cv_image_bbox_sub)  # set nan to 0
                cv_image_bbox_sub = np.where(
                    np.isinf(cv_image_bbox_sub), self.min_depth,
                    cv_image_bbox_sub)  # set +/-inf to 0
                avg_depth, img_seg = k_means_depth(cv_image_bbox_sub)
                D_to_C_of_bbox_L = cv_image_bbox_sub[int(
                    patch[1] / 2
                ), int(
                    patch[0] / 2
                )]  #height (y), width (x) gives distance to center coordinate of bbox with resprct to local
                imagecv_depth_series.append(cv_image_bbox_sub)

                boxes.append(
                    [x1[i], y1[i], x2[i], y2[i], Score[i], C[i], avg_depth])

        boxes = np.array(boxes)
        return boxes
示例#14
0
def logo_predict(img_path, output_folder_path, input_size, logo_classes, yolo_model):

    # # input size 416x416
    # input_size = YOLO_INPUT_SIZE
    # # list of class names
    # logo_classes = TRAIN_CLASSES

    image_path = img_path

    yolo = Create_Yolov3(input_size=input_size, CLASSES=logo_classes)
    yolo.load_weights(yolo_model)

    date_time = datetime.now().strftime("%d_%m_%y|%H:%M:%S")
    LogoFileName = os.path.join(output_folder_path, "logo_pred_" + date_time + ".jpg")

    # this function returns tuple with label values and image
    label, _ = detect_image(yolo, image_path, LogoFileName, input_size=input_size, show=True, CLASSES=logo_classes, rectangle_colors=(255,0,0))

    predicted_label = label[0]

    return (predicted_label, LogoFileName)
import cv2
import numpy as np
import tensorflow as tf
from yolov3.yolov3 import Create_Yolov3
from yolov3.utils import load_yolo_weights, detect_image, detect_video, detect_realtime
from yolov3.configs import *

input_size = YOLO_INPUT_SIZE
Darknet_weights = YOLO_DARKNET_WEIGHTS
if TRAIN_YOLO_TINY:
    Darknet_weights = YOLO_DARKNET_TINY_WEIGHTS

image_path = "./IMAGES/TATA_12_aug_8.jpg"
video_path = "./IMAGES/city.mp4"

yolo = Create_Yolov3(input_size=input_size, CLASSES=TRAIN_CLASSES)
yolo.load_weights("./checkpoints/yolov3_custom_logo")  # use keras weights

# this function returns tuple with label values and image
label, _ = detect_image(yolo,
                        image_path,
                        "./IMAGES/logo_tata12_detect.jpg",
                        input_size=input_size,
                        show=True,
                        CLASSES=TRAIN_CLASSES,
                        rectangle_colors=(255, 0, 0))
print(label)

#detect_video(yolo, video_path, './IMAGES/detected.mp4', input_size=input_size, show=False, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
#detect_realtime(yolo, '', input_size=input_size, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0))
示例#16
0
from yolov3.configs import *
import glob
import time

from tensorflow.keras import Input, Model

IMAGE_HEIGHT = 416
IMAGE_WIDTH = 416
CHANNELS = 3

ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
input_size = YOLO_INPUT_SIZE

model = Create_Yolov3(input_size=input_size, CLASSES=TRAIN_CLASSES)
model.load_weights("./checkpoints/yolov3_custom")

dir_path = "Dataset\\Testing\\*"
print(len(glob.glob(dir_path)))
start = time.time()

for i, filename in enumerate(glob.glob(dir_path)):
    detect_image(model,
                 filename,
                 "Dataset/Testing_out/" + str(i + 1) + ".jpg",
                 input_size=input_size,
                 show=True,
                 CLASSES=TRAIN_CLASSES,
                 rectangle_colors=(255, 0, 0))
    print(i)
print("Time taken to run : " + str(time.time() - start))
示例#17
0
#   Website     : https://pylessons.com/
#   GitHub      : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
#   Description : mnist object detection example
#
#================================================================
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import cv2
import numpy as np
import random
import time
import tensorflow as tf
from yolov3.yolov3 import Create_Yolov3
from yolov3.utils import detect_image
from yolov3.configs import *

input_size=YOLO_INPUT_SIZE

while True:
    ID = random.randint(0, 200)
    label_txt = "calculator_dataset/calculator_test.txt"
    image_info = open(label_txt).readlines()[ID].split()

    image_path = image_info[0]

    yolo = Create_Yolov3(input_size=input_size, CLASSES=TRAIN_CLASSES)
    yolo.load_weights("./checkpoints/yolov3_calculator") # use the calculator model

    detect_image(yolo, image_path, "number_detection_test.jpg", input_size=input_size, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
    time.sleep(5)
示例#18
0
# ================================================================
#
#   File name   : detection_custom.py
#   Description : Performs object detection for images
#
#================================================================
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import numpy as np
import tensorflow as tf
from yolov3.utils import detect_image, detect_realtime, detect_video, Load_Yolo_model, detect_video_realtime_mp
from yolov3.configs import *


image_path = "/home/014505660/fl_project/kitti_dataset/testing/image_2/000010.png"

yolo = Load_Yolo_model()
detect_image(yolo, image_path, "./IMAGES/test_1_detect.jpg", input_size=YOLO_INPUT_SIZE, show=False, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
示例#19
0
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import numpy as np
import tensorflow as tf
from yolov3.utils import detect_image, detect_realtime, detect_video, Load_Yolo_model, detect_video_realtime_mp
from yolov3.configs import *

image_path   = "image.jpg"
video_path   = "test.mp4"

yolo = Load_Yolo_model()
detect_image(yolo, image_path, "detect.jpg", input_size=YOLO_INPUT_SIZE, show=True, rectangle_colors=(255,0,0))
#detect_video(yolo, video_path, "", input_size=YOLO_INPUT_SIZE, show=False, rectangle_colors=(255,0,0))
#detect_realtime(yolo, '', input_size=YOLO_INPUT_SIZE, show=True, rectangle_colors=(255, 0, 0))

#detect_video_realtime_mp(video_path, "Output.mp4", input_size=YOLO_INPUT_SIZE, show=False, rectangle_colors=(255,0,0), realtime=False)
示例#20
0
#   File name   : detection_demo.py
#   Author      : PyLessons
#   Created date: 2020-09-27
#   Website     : https://pylessons.com/
#   GitHub      : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
#   Description : object detection image and video example
#
#================================================================
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import numpy as np
import tensorflow as tf
from yolov3.utils import detect_image, detect_realtime, detect_video, Load_Yolo_model, detect_video_realtime_mp
from yolov3.configs import *

image_path = "./IMAGES/street.jpg"
video_path = "./IMAGES/test.mp4"

yolo = Load_Yolo_model()
detect_image(yolo,
             image_path,
             "./IMAGES/street_pred.jpg",
             input_size=YOLO_INPUT_SIZE,
             show=True,
             rectangle_colors=(255, 0, 0))
#detect_video(yolo, video_path, "", input_size=YOLO_INPUT_SIZE, show=False, rectangle_colors=(255,0,0))
#detect_realtime(yolo, '', input_size=YOLO_INPUT_SIZE, show=True, rectangle_colors=(255, 0, 0))

#detect_video_realtime_mp(video_path, "Output.mp4", input_size=YOLO_INPUT_SIZE, show=False, rectangle_colors=(255,0,0), realtime=False)
#   Website     : https://pylessons.com/
#   GitHub      : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
#   Description : object detection image and video example
#
#================================================================
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import cv2
import numpy as np
import tensorflow as tf
from yolov3.yolov3 import Create_Yolov3
from yolov3.utils import load_yolo_weights, detect_image, detect_video, detect_realtime
from yolov3.configs import *

input_size = YOLO_INPUT_SIZE
Darknet_weights = YOLO_DARKNET_WEIGHTS
if TRAIN_YOLO_TINY:
    Darknet_weights = YOLO_DARKNET_TINY_WEIGHTS

image_path   = "./IMAGES/a.jpg"
video_path   = "./IMAGES/city.mp4"

yolo = Create_Yolov3(input_size=input_size, CLASSES="2_names.txt")
yolo.load_weights("./checkpoints/yolov3_custom") # use keras weights

orig, img, locs = detect_image(yolo, image_path, "x.jpg", input_size=input_size, show=True, CLASSES="2_names.txt", rectangle_colors=(255,0,0))
print("Pokemon(s) Found in Image at:")
print("[Start X, Start Y, End X, End Y]:", locs)
#detect_video(yolo, video_path, './IMAGES/detected.mp4', input_size=input_size, show=False, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
#detect_realtime(yolo, '', input_size=input_size, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0))
示例#22
0
 def detect(index, image):
     bboxes = detect_image(yolo, image, input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES,
                             score_threshold=0.5, iou_threshold=0.3)
     draw_bboxes(image, bboxes)
from yolov3.yolov3 import Create_Yolov3
from yolov3.utils import load_yolo_weights, detect_image, detect_video, detect_realtime
from yolov3.configs import *

input_size = YOLO_INPUT_SIZE
Darknet_weights = YOLO_DARKNET_WEIGHTS
if TRAIN_YOLO_TINY:
    Darknet_weights = YOLO_DARKNET_TINY_WEIGHTS

#image_path   = "./IMAGES/boat.jpg"
image_path = "./IMAGES/group.jpg"
#image_path   = "./IMAGES/plate_2.jpg"
video_path = "./IMAGES/city.mp4"

yolo = Create_Yolov3(input_size=input_size, CLASSES=TRAIN_CLASSES)
yolo.load_weights("./checkpoints/yolov3_custom_Tiny")  # use keras weights
#yolo.load_weights("./checkpoints/yolov3_custom") # use keras weights

#detect_image(yolo, image_path, "./IMAGES/boat_detect.jpg", input_size=input_size, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
detect_image(yolo,
             image_path,
             "./IMAGES/group_detect.jpg",
             input_size=input_size,
             show=True,
             CLASSES=TRAIN_CLASSES,
             rectangle_colors=(255, 0, 0))

#detect_image(yolo, image_path, "./IMAGES/plate_1_detect.jpg", input_size=input_size, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
#detect_video(yolo, video_path, './IMAGES/detected.mp4', input_size=input_size, show=False, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
#detect_realtime(yolo, '', input_size=input_size, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0))
示例#24
0
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import numpy as np
import random
import time
import tensorflow as tf
from yolov3.yolov4 import Create_Yolo
from yolov3.utils import detect_image
from yolov3.configs import *

while True:
    ID = random.randint(0, 200)
    label_txt = "mnist/mnist_test.txt"
    image_info = open(label_txt).readlines()[ID].split()

    image_path = image_info[0]

    yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
    yolo.load_weights(f"./checkpoints/{TRAIN_MODEL_NAME}")

    detect_image(yolo,
                 image_path,
                 "mnist_test.jpg",
                 input_size=YOLO_INPUT_SIZE,
                 show=True,
                 CLASSES=TRAIN_CLASSES,
                 rectangle_colors=(255, 0, 0))
示例#25
0
    def calculation(self):
        imagecv_cam = self.cv_image_cam
        imagecv_depth = self.cv_image_depth
        imagecv_depth_series = []
        img_seg = []
        if len(imagecv_cam) != 0:
            imagecv_cam, bboxes = detect_image(yolo,
                                               imagecv_cam,
                                               "",
                                               input_size=YOLO_INPUT_SIZE,
                                               show=False,
                                               rectangle_colors=(255, 0, 0))
            x1, y1, x2, y2, _, C = Give_boundingbox_coor_class(bboxes)
            print("Bounding box of object(s) = ", x1, y1, x2, y2, C)
        if len(imagecv_depth) != 0:
            for i in range(len(bboxes)):
                patch = (int(x2[i] - x1[i]), int(y2[i] - y1[i])
                         )  # gives width and height of bbox
                center = (int(x1[i] + patch[0] / 2), int(y1[i] + patch[1] / 2)
                          )  # gives center coodintes of bbox global
                cv_image_bbox_sub = cv2.getRectSubPix(
                    imagecv_depth, patch,
                    center)  # Extract bbox in depth image
                cv_image_bbox_sub = np.where(np.isnan(cv_image_bbox_sub), 0.3,
                                             cv_image_bbox_sub)  # set nan to 0
                cv_image_bbox_sub = np.where(
                    np.isinf(cv_image_bbox_sub), 0.3,
                    cv_image_bbox_sub)  # set +/-inf to 0
                #print(cv_image_bbox_sub)
                #print(imagecv_depth.dtype)
                #avg_depth,img_seg=k_means_depth(cv_image_bbox_sub)
                avg_depth, img_seg = k_means_depth(cv_image_bbox_sub)
                print("Average depth of object [m]=", avg_depth)
                #cv2.imwrite("depth2"+str(i)+".png",(cv_image_bbox_sub*2**16).astype(np.uint16)) #safe images
                #cv2.imwrite("depthclean"+str(i)+".png",cv_image_bbox_sub)
                #cv2.imwrite("depthseg"+str(i)+".png",img_seg)
                print(imagecv_depth.shape)

                D_to_C_of_bbox_L = cv_image_bbox_sub[int(
                    patch[1] / 2
                ), int(
                    patch[0] / 2
                )]  #height (y), width (x) gives distance to center coordinate of bbox with resprct to local
                print("Distance to center of object [m]= ", D_to_C_of_bbox_L,
                      " Class number= ", C[i])
                #Distance_to_center_of_bbox_wrt_global=imagecv_depth[center[1],center[0]] #height (y), width (x)
                #print(Distance_to_center_of_bbox_wrt_global)
                """
        ## For plotting
        max_depth=20 #Maximum depth the camera can detect objects [m]
        cv_image_bbox_sub *= 255/(max_depth/cv_image_bbox_sub) # relate meter to pixels
        cv_image_bbox_sub = cv_image_bbox_sub.astype('uint8') # pixel float to int
        D_to_C_of_bbox_L_p=cv_image_bbox_sub[int(patch[1]/2),int(patch[0]/2)]
        #print(D_to_C_of_bbox_L_p)
        # 1 pixel is approx 7.5 cm
        cv_image_bbox_sub = (np.where(cv_image_bbox_sub>(D_to_C_of_bbox_L_p+1),0,(np.where(cv_image_bbox_sub<(D_to_C_of_bbox_L_p-1),0,cv_image_bbox_sub)))) #makes every pixel black if outside thresshole
        cv_image_bbox_sub = np.where(cv_image_bbox_sub>(D_to_C_of_bbox_L_p-2),255,cv_image_bbox_sub)
        #(cv_image_bbox_sub.shape)
        """
                imagecv_depth_series.append(cv_image_bbox_sub)
        return imagecv_cam, imagecv_depth_series, bboxes, img_seg
num = num_row * num_col
image_pred = []
for i in range(num):
    label_txt = "mnist/mnist_test.txt"
    image_info = open(label_txt).readlines()[i].split()

    image_path = image_info[0]
    print(image_path)

    yolo = Create_Yolov3(input_size=input_size, CLASSES=TRAIN_CLASSES)
    yolo.load_weights("./checkpoints/yolov3_custom_Tiny")  # use keras weights

    image = detect_image(yolo,
                         image_path,
                         "mnist_test.jpg",
                         input_size=input_size,
                         show=True,
                         CLASSES=TRAIN_CLASSES,
                         rectangle_colors=(255, 0, 0))
    image_pred.append(image)

#%%
# plot images
fig, axes = plt.subplots(num_row,
                         num_col,
                         figsize=(1.5 * num_col, 2 * num_row))
for i in range(num):
    ax = axes[i // num_col, i % num_col]
    ax.imshow(image_pred[i])
plt.tight_layout()
plt.show()
#   GitHub      : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
#   Description : object detection image and video example
#
#================================================================
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import cv2
import numpy as np
import tensorflow as tf
from yolov3.yolov3 import Create_Yolov3
from yolov3.utils import load_yolo_weights, detect_image, detect_video
from yolov3.configs import *

input_size = YOLO_INPUT_SIZE
Darknet_weights = YOLO_DARKNET_WEIGHTS

image_path = "./IMAGES/street.jpg"
video_path = "./IMAGES/city_drive.mp4"

yolo = Create_Yolov3(input_size=input_size)
load_yolo_weights(yolo, Darknet_weights)  # use Darknet weights

detect_image(yolo,
             image_path,
             "",
             input_size=input_size,
             show=True,
             rectangle_colors=(255, 0, 0))
#detect_video(yolo, video_path, '', input_size=input_size, show=True, rectangle_colors=(255,0,0))
#detect_realtime(yolo, input_size=input_size, rectangle_colors=(255, 0, 0))
# Intended to be used together with node-red.
# An image with the name "image_to_classify.jpg" should be uploaded via node-red to this folder.
# Then this script should be run which will classify the objects in the image and save a new image "image_classified.jpg".
# The image should then be uploaded to node-red and shown on the dashboard

import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import numpy as np
import tensorflow as tf
from yolov3.utils import detect_image, detect_realtime, detect_video, Load_Yolo_model, detect_video_realtime_mp
from yolov3.configs import *

image_path = "image_to_classify.jpg"

#image_path   = "./IMAGES/kite.jpg"

yolo = Load_Yolo_model()
detect_image(yolo,
             image_path,
             "image_classified.jpg",
             input_size=YOLO_INPUT_SIZE,
             show=False,
             rectangle_colors=(255, 0, 0))

print("done classifying")
 def test_yolov3_results(self):
     model = load_yolo_model()
     temp_res = detect_image(model, "../model_data/car2.jpg", '')
     temp_res2 = detect_image(model, "../model_data/car2.jpg", '')
     self.assertTrue(np.alltrue(temp_res == temp_res2), "Should be equal")
    def callback(self, image, cloud):
        print("start")
        # Generate images from msgs
        cv_image = self.bridge.imgmsg_to_cv2(image, image.encoding)
        #cv_image_depth = self.bridge.imgmsg_to_cv2(depth, depth.encoding)
        _, cv_image_pc = PC_dataxyz_to_PC_image(cloud,
                                                Org_img_height=376,
                                                Org_img_width=672)
        # Yolo to get Boundary Boxes
        image_detec, bboxes = detect_image(self.yolo,
                                           cv_image,
                                           "",
                                           input_size=YOLO_INPUT_SIZE,
                                           show=False,
                                           rectangle_colors=(255, 0, 0))
        print("detected")
        # Convert Boundary boxes to readable values
        PC_image_bbox_sub_series = Sub_pointcloud(cv_image_pc, bboxes)
        """
		t3=time.time()
		avg_depth_kmean, segmentation_img_kmeans,_ = k_means_pointcloud(PC_image_bbox_sub_series, bboxes, PC=True, seg_plot=self.seg_plot)
		t4=time.time()
		print(avg_depth_kmean)
		print(t4-t3, "time for kmean")

		t1=time.time()
		avg_depth_dbscan_0_1, segmentation_img_DBSCAN_0_1 = DBSCAN_pointcloud(PC_image_bbox_sub_series,bboxes, seg_plot=self.seg_plot ,eps=0.1,min_samples=50)
		t2=time.time()
		print(avg_depth_dbscan_0_1)
		print(t2-t1,"time for dbscan")
		"""
        #_, segmentation_img_DBSCAN_0_05 = DBSCAN_pointcloud(PC_image_bbox_sub_series,bboxes, seg_plot=self.seg_plot ,eps=0.05,min_samples=50)
        #_, segmentation_img_DBSCAN_0_05 = DBSCAN_pointcloud(PC_image_bbox_sub_series,bboxes, seg_plot=self.seg_plot ,eps=0.05)
        #_, segmentation_img_DBSCAN_0_047 = DBSCAN_pointcloud(PC_image_bbox_sub_series,bboxes, seg_plot=self.seg_plot ,eps=0.047)
        t1 = time.time()
        avg_depth_dbscan, segmentation_img_DBSCAN, xyzcoord_DBSCAN = DBSCAN_pointcloud(
            PC_image_bbox_sub_series, bboxes, seg_plot=self.seg_plot)
        t2 = time.time()

        t3 = time.time()
        avg_depth_kmean, segmentation_img_Kmeans, xyzcoord_Kmeans = k_means_pointcloud(
            PC_image_bbox_sub_series, bboxes, PC=True, seg_plot=self.seg_plot)
        t4 = time.time()
        #print(avg_depth_kmean,"Kmeans", avg_depth_dbscan,"DBSCAN")
        #print(xyzcoord_Kmeans,"Kmeans", xyzcoord_DBSCAN,"DBSCAN")
        #print(t4-t3, "time for kmean",t2-t1,"time for dbscan")
        #_, segmentation_img_DBSCAN_0_045 = DBSCAN_pointcloud(PC_image_bbox_sub_series,bboxes, seg_plot=self.seg_plot ,eps=0.045)
        #_, segmentation_img_DBSCAN_0_04 = DBSCAN_pointcloud(PC_image_bbox_sub_series,bboxes, seg_plot=self.seg_plot ,eps=0.04)
        #_, segmentation_img_DBSCAN_0_05_80 = DBSCAN_pointcloud(PC_image_bbox_sub_series,bboxes, seg_plot=self.seg_plot ,eps=0.05,min_samples=80)
        #_, segmentation_img_DBSCAN_0_1_100 = DBSCAN_pointcloud(PC_image_bbox_sub_series,bboxes, seg_plot=self.seg_plot ,eps=0.1,min_samples=100)
        #_, segmentation_img_DBSCAN_0_07 = DBSCAN_pointcloud(PC_image_bbox_sub_series,bboxes, seg_plot=self.seg_plot ,eps=0.07)
        #_, segmentation_img_DBSCAN_0_06 = DBSCAN_pointcloud(PC_image_bbox_sub_series,bboxes, seg_plot=self.seg_plot ,eps=0.06)
        #_, segmentation_img_DBSCAN_0_055 = DBSCAN_pointcloud(PC_image_bbox_sub_series,bboxes, seg_plot=self.seg_plot ,eps=0.055)
        x1, y1, x2, y2, Score, C = Give_boundingbox_coor_class(bboxes)

        if self.seg_plot == True:
            cv2.imshow("detected img", image_detec)
            cv2.imwrite("detected_img.png", image_detec)
            for i in range(len(bboxes)):
                #cv2.imshow("segmented_DBSCAN_eps0.1_sam50"+str(0),segmentation_img_DBSCAN_0_1[0])
                #cv2.imshow("segmented_DBSCAN_eps0.1_sam100"+str(0),segmentation_img_DBSCAN_0_1_100[0])
                #cv2.imshow("segmented_DBSCAN_eps0.05_sam50"+str(0),segmentation_img_DBSCAN_0_05[0])
                #cv2.imshow("segmented_DBSCAN_eps0.05_sam80"+str(0),segmentation_img_DBSCAN_0_05_80[0])
                #cv2.imshow("segmented_DBSCAN_eps0.04",segmentation_img_DBSCAN_0_04[0])
                #cv2.imshow("segmented_DBSCAN_eps0.045",segmentation_img_DBSCAN_0_045[0])
                cv2.imshow("segmented_DBSCAN " + str(0),
                           segmentation_img_DBSCAN[0])
                cv2.imwrite("segmented_DBSCAN_" + str(i) + ".png",
                            segmentation_img_DBSCAN[i])
                #cv2.imshow("segmented_DBSCAN_eps0.047",segmentation_img_DBSCAN_0_047[0])
                #cv2.imshow("segmented_DBSCAN_eps0.05",segmentation_img_DBSCAN_0_05[0])
                #cv2.imshow("segmented_DBSCAN_eps0.055 "+str(i),segmentation_img_DBSCAN_0_055[i])
                #cv2.imshow("segmented_DBSCAN_eps0.06",segmentation_img_DBSCAN_0_06[0])
                #cv2.imshow("segmented_DBSCAN_eps0.07",segmentation_img_DBSCAN_0_07[0])
                #cv2.imshow("segmented_Kmeans"+str(i),segmentation_img_Kmeans[i])
                cv2.imwrite("segmented_Kmean_" + str(i) + ".png",
                            segmentation_img_Kmeans[i])
            cv2.waitKey(3)
        """