Esempio n. 1
0
def setup():
    # global yolo, all_classes
    yolo = YOLO(0.6, 0.5)
    file = 'data/VTAS_YoloV3_coco_classes.txt'
    # file = 'data/coco_classes.txt'
    all_classes = get_classes(file)
    return yolo, all_classes
Esempio n. 2
0
    def run(self):
        yolo = YOLO(0.3, 0.5)
        file = 'data/coco_classes.txt'
        all_classes = get_classes(file)

        f2 = 'test8.jpg'
        path2 = 'images/Snapshots/' + f2
        image2 = cv2.imread(path2)
        image2, boxes_multi_tracking = detect_image(image2, yolo, all_classes)
        cv2.imwrite('images/res/' + f2, image2)
        print("ok1")
        t_prev = time.time()
        while self.threading_event.wait():
            (success, boxes_multi_tracking) = self.trackers.update(
                self.door_to_heaven.get_frame())
            self.door_to_heaven.tracker_update(boxes_multi_tracking)
            if time.time() - t_prev > 1:
                print(time.time() - t_prev)
                t_prev = time.time()
                # créer encore un nouveau thread qui effctue cette tâche
                image, boxes_yolo_detection = detect_image(
                    self.door_to_heaven.get_frame(), yolo, all_classes)

                replayDetection = self.door_to_heaven.reliable_tracking(
                    boxes_multi_tracking, image, boxes_yolo_detection)
def load_saved_artifacts():
    print("loading saved artifacts...start")

    global yolo
    global all_classes
    if yolo is None or all_classes is None:

        yolo = YOLO(0.6, 0.5)
        all_classes = get_classes('data/coco_classes.txt')

    print("loading saved artifacts...done")
Esempio n. 4
0
def detect_objects_live(yolo=YOLO(0.6, 0.5),
                        all_classes=get_classes('data/coco_classes.txt')):
    cv2.namedWindow('window_frame')
    cap = cv2.VideoCapture(0)  # Webcam sourceK
    while cap.isOpened():
        bgr_image, gray_image, rgb_image = get_images_from_webcam(cap)
        im = detect_image(bgr_image, yolo, all_classes)
        cv2.imshow("detection", im)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()
Esempio n. 5
0
    def pictures_test():
        yolo = YOLO(0.6, 0.5)
        file = 'data/coco_classes.txt'
        all_classes = get_classes(file)

        # detect images in test floder.
        for (root, dirs, files) in os.walk('images/test'):
            if files:
                for f in files:
                    print(f)
                    path = os.path.join(root, f)
                    image = cv2.imread(path)
                    image = detect_image(image, yolo, all_classes)
                    cv2.imwrite('images/res/' + f, image)

        # detect vedio.
        """
def start(path):
    yolo = YOLO(0.6, 0.5)
    file = 'data/coco_classes.txt'
    all_classes = get_classes(file)

    image = cv2.imread(path)
    image, classes = detect_image(image, yolo, all_classes)
    # cv2.imwrite('images/res/' + f, image)
    K.clear_session()
    str = ""
    if(classes is not None):
        for cl in classes:
            # print('class: '+ all_classes[cl])
            str += all_classes[cl] + " "
        return str
                
    return str
Esempio n. 7
0
def main(src):
    yolo = YOLO(0.6, 0.5)
    file = 'data/coco_classes.txt'
    all_classes = get_classes(file)

    # detect images in test floder.
    for (root, dirs, files) in os.walk('images/test'):
        if files:
            for f in files:
                print(f)
                path = os.path.join(root, f)
                image = cv2.imread(path)
                image = detect_image(image, yolo, all_classes)
                cv2.imwrite('images/res/' + f, image)

    # detect videos one at a time in videos/test folder
    video = src
    detect_video(video, yolo, all_classes)
    aa = "/final"
    return aa
Esempio n. 8
0
def train(img):
    yolo = YOLO(0.6, 0.5)
    file = 'data/coco_classes.txt'
    all_classes = get_classes(file)
    # image_size = 224
    # img=load_img(img,target_size=(image_size,image_size))
    # img_array=np.array([img_to_array(img)])
    # image = cv2.imread(img)
    #read image file string data
    # filestr = request.files['file'].read()
    #convert string data to numpy array
    npimg = np.fromstring(img, np.uint8)
    # convert numpy array to image
    image = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
    # image=url_to_image(img)
    # cv2.waitKey(0)
    image = detect_image(image, yolo, all_classes)
    return image


# train('https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1535197235311&di=4068a16bbed08b9d226304e90e83afa4&imgtype=0&src=http%3A%2F%2Fimg3.duitang.com%2Fuploads%2Fitem%2F201609%2F19%2F20160919154716_AmExk.jpeg')
Esempio n. 9
0
tflite_interpreter.set_tensor(input_details[0]['index'], converted_image)
tflite_interpreter.invoke()

def rs(ten):
  a = list(ten.shape)
  a = tuple(a)
  print(a)
  return ten.reshape(a)

tflite_model_predictions = [
                            rs(tflite_interpreter.get_tensor(output_details[0]['index'])),
                            rs(tflite_interpreter.get_tensor(output_details[1]['index'])),
                            rs(tflite_interpreter.get_tensor(output_details[2]['index']))]

y = YOLO(0.6, 0.5)
w, h , c= image.shape

start = time.time()
boxes, classes, scores = y._yolo_out(tflite_model_predictions, (w,h))
end = time.time()
print('time: {0:.2f}s'.format(end - start))

all_classes = load_labels('data/coco_classes.txt')

def draw(image, boxes, scores, classes, all_classes):
    for box, score, cl in zip(boxes, scores, classes):
        x, y, w, h = box
        print(box)

        top = max(0, np.floor(x + 0.5).astype(int))
Esempio n. 10
0
def processing_YOLOv3(main_view_videocap,
                      whole_view_videocap,
                      output_videowriter,
                      fps,
                      length,
                      yolo_object_threshold=0.6,
                      yolo_nms_threshold=0.5,
                      visible=False):
    """processing with YOLOv3 for FastClip 0.0.2.

    # Argument:
        main_view_videocap: the main view video capture.
        whole_view_videocap: the whole view video capture.
        output_videowriter: the output video writer.
        fps: the fps of the video.
        length: the total amount of frames of the video.
        yolo_object_threshold: threshold for object.
        yolo_nms_threshold: threshold for non maximum suppression.

    # Returns:
        int: tag for success or error.
        0: Fastclip succeed!
        1: Error: wrong filename!
        2: Error: different fps!
        3: Error: different resolution!
        4: Error: different length!
        5: Error: read frame error!
        6: Error: the video is too short, which is less than 20s!
    """

    total_processed_frame_count = 0
    left_length = length

    # Initialize YOLOv3
    global yolo_model, classes_file, all_classes
    if yolo_model == None:
        yolo_model = YOLO(yolo_object_threshold, yolo_nms_threshold)
        classes_file = 'data/coco_classes.txt'
        all_classes = get_classes(classes_file)

    #################################### stage 0 ####################################
    # Start: Output whole view 10s

    beginning_length = 10
    for i in range(fps * beginning_length):
        # read frame
        success_main, frame_main = main_view_videocap.read()
        success_whole, frame_whole = whole_view_videocap.read()
        if success_main and success_whole != True:
            return 5

        # write to video
        output_videowriter.write(frame_whole)

        # visible
        if visible:
            cv2.imshow("test", frame_whole)
            if cv2.waitKey(10) & 0xff == 27:
                break

        # accumulate processed frame count and left length
        total_processed_frame_count += 1
        left_length -= 1
        print(total_processed_frame_count)

    #################################### stage 1 ####################################
    # Alternative view

    # State to check whether the whole view could alter to main view
    # Condition: main view has a person (p >= 0.90) and maintain this not less than 3 frames
    # That means there is a sliding window of 3 frames
    main_view_has_person_state = False

    # Read and detect window 0
    window_0_state = False
    success_main, frame_main_0 = main_view_videocap.read()
    success_whole, frame_whole_0 = whole_view_videocap.read()
    boxes0, classes0, scores0 = detect_image_4_results(frame_main_0,
                                                       yolo_model, all_classes)
    if classes0 is not None:
        if classes0[0] == 0 and scores0[0] >= 0.90:
            window_0_state = True

    # Read and detect window 1
    window_1_state = False
    success_main, frame_main_1 = main_view_videocap.read()
    success_whole, frame_whole_1 = whole_view_videocap.read()
    boxes1, classes1, scores1 = detect_image_4_results(frame_main_1,
                                                       yolo_model, all_classes)
    if classes1 is not None:
        if classes1[0] == 0 and scores1[0] >= 0.90:
            window_1_state = True

    # Read and detect window 2
    window_2_state = False
    success_main, frame_main_2 = main_view_videocap.read()
    success_whole, frame_whole_2 = whole_view_videocap.read()
    boxes2, classes2, scores2 = detect_image_4_results(frame_main_2,
                                                       yolo_model, all_classes)
    if classes2 is not None:
        if classes2[0] == 0 and scores2[0] >= 0.90:
            window_2_state = True

    # check reading state
    if success_main and success_whole != True:
        return 6

    # change window state
    main_view_has_person_state = window_0_state and window_1_state and window_2_state
    print("state:", main_view_has_person_state)

    tail_length = 10
    while left_length > fps * tail_length:

        random_length_main = random.randrange(5 * fps, 10 * fps, fps)
        random_length_whole = random.randrange(10 * fps, 20 * fps, fps)

        # if left whole video is too short
        if left_length <= fps * tail_length + random_length_main:
            break

        print("Main begin:")
        # main view
        main_view_count = 0
        while main_view_count < random_length_main and left_length > fps * tail_length:

            # output
            output_videowriter.write(frame_main_0)
            main_view_count += 1

            # visible
            if visible:
                cv2.imshow("test", frame_main_0)
                if cv2.waitKey(10) & 0xff == 27:
                    break

            # update state
            frame_main_0 = frame_main_1
            frame_whole_0 = frame_whole_1
            window_0_state = window_1_state
            frame_main_1 = frame_main_2
            frame_whole_1 = frame_whole_2
            window_1_state = window_2_state

            success_main, frame_main_2 = main_view_videocap.read()
            success_whole, frame_whole_2 = whole_view_videocap.read()
            boxes2, classes2, scores2 = detect_image_4_results(
                frame_main_2, yolo_model, all_classes)
            window_2_state = False
            if classes2 is not None:
                if classes2[0] == 0 and scores2[0] >= 0.90:
                    window_2_state = True
            main_view_has_person_state = window_0_state and window_1_state and window_2_state
            print("state:", main_view_has_person_state)

            # accumulate processed frame count and left length
            total_processed_frame_count += 1
            left_length -= 1
            print(total_processed_frame_count)
            print("Main\n")

            if main_view_count >= fps * 5 and main_view_has_person_state == False:
                break

        # whole view
        print("Whole begin:")
        whole_view_count = 0
        while left_length > fps * tail_length:

            # output
            output_videowriter.write(frame_whole_0)
            whole_view_count += 1

            # visible
            if visible:
                cv2.imshow("test", frame_whole_0)
                if cv2.waitKey(10) & 0xff == 27:
                    break

            # update state
            frame_main_0 = frame_main_1
            frame_whole_0 = frame_whole_1
            window_0_state = window_1_state
            frame_main_1 = frame_main_2
            frame_whole_1 = frame_whole_2
            window_1_state = window_2_state

            success_main, frame_main_2 = main_view_videocap.read()
            success_whole, frame_whole_2 = whole_view_videocap.read()
            boxes2, classes2, scores2 = detect_image_4_results(
                frame_main_2, yolo_model, all_classes)
            window_2_state = False
            if classes2 is not None:
                if classes2[0] == 0 and scores2[0] >= 0.90:
                    window_2_state = True
            main_view_has_person_state = window_0_state and window_1_state and window_2_state
            print("state:", main_view_has_person_state)

            # accumulate processed frame count and left length
            total_processed_frame_count += 1
            left_length -= 1
            print(total_processed_frame_count)
            print("Whole\n")

            if whole_view_count > random_length_whole and main_view_has_person_state == True:
                break

    #################################### stage 2 ####################################
    # End: Output whole view 10s

    # output the surplus frames
    output_videowriter.write(frame_whole_0)
    output_videowriter.write(frame_whole_1)
    output_videowriter.write(frame_whole_2)

    # read new frame
    success_main, frame_main = main_view_videocap.read()
    success_whole, frame_whole = whole_view_videocap.read()
    if success_main and success_whole != True:
        return 5

    while left_length > 0:

        # write to video
        output_videowriter.write(frame_whole)

        # visible
        if visible:
            cv2.imshow("test", frame_whole)
            if cv2.waitKey(10) & 0xff == 27:
                break

        # read frame
        success_main, frame_main = main_view_videocap.read()
        success_whole, frame_whole = whole_view_videocap.read()
        if success_main and success_whole != True:
            return 5

        # accumulate processed frame count and left length
        total_processed_frame_count += 1
        left_length -= 1
        print(total_processed_frame_count)

    output_videowriter.write(frame_whole)

    #################################################################################

    return 0
    boxes, num_class, accuracy = yolo.predict(feed_img, img.shape)
    for box, num, acc in zip(boxes, num_class, accuracy):
        x, y, w, h = tuple(map(int, box))
        cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 3)
        cv2.putText(img, classes[num] + ':' + str(acc), (x, y),
                    cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 1, (0, 255, 0), 3,
                    cv2.LINE_AA)
    return img


#------

#Loading Model
#1st parameter: %Threshold for object detection
#2nd parameter: %Threshold for box detection
yolo = YOLO(0.5, 0.5)
#-----

#Display Video
cap = cv2.VideoCapture('D:/projects/highway.mp4')
_, frame = cp.read()

plt.imshow(detect(frame))
while True:
    _, frame = cap.read()
    frame = detect(frame, coco_classes, yolo)
    cv2.imshow('obj_detection', frame)
    if cv2.waitKey(30) & 0xff == 27:
        break
cap.release()
cv2.destroyAllWindows()
Esempio n. 12
0
        image = detect_image(frame, yolo, all_classes)
        cv2.imshow("detection", image)

        # Save the video frame by frame
        vout.write(image)

        if cv2.waitKey(110) & 0xff == 27:
            break

    vout.release()
    camera.release()


if __name__ == '__main__':
    # Need to increase threshold and reduce NMS to get decent results with tiny
    yolo = YOLO(0.75, 0.2, name='data/yolov3-tiny.h5')
    file = 'data/coco_classes.txt'
    all_classes = get_classes(file)

    # detect images in test floder.
    for (root, dirs, files) in os.walk('images/test'):
        if files:
            for f in files:
                print(f)
                path = os.path.join(root, f)
                image = cv2.imread(path)
                image = detect_image(image, yolo, all_classes)
                cv2.imwrite('images/res/tiny_' + f, image)

    # # detect videos one at a time in videos/test folder
    # video = 'library1.mp4'
Esempio n. 13
0
        #cv2.imshow("detection", image)

        # Save the video frame by frame
        vout.write(image)

        if cv2.waitKey(1) & 0xff == 27:
            break

    vout.release()
    camera.release()
    print("done")


# In[19]:

yolo = YOLO(0.25, 0.5)
file = 'data/coco_classes.txt'
all_classes = get_classes(file)

# ### Detecting Images

# In[35]:

# DEFINE MISSION:
# choose objects from classes list: data/coco_classes.txt
bad_guy = "cat"
forbidden_place = "toilet"

with open("data/coco_classes.txt") as f:
    if bad_guy and forbidden_place in f.read():
        print("Objects are valid")
Esempio n. 14
0
if __name__ == '__main__':
    args = parser.parse_args()

    if args.model_path is not None:
        model_path = os.path.expanduser(args.model_path)
        assert model_path.endswith(
            '.h5'), 'Model path {} is not a .h5 file'.format(model_path)
    else:
        model_path = 'data/yolov3-416.h5'

    if args.classes_path is not None:
        classes_path = os.path.expanduser(args.classes_path)
        assert classes_path.endswith(
            '.txt'), 'Classes path {} is not a .txt file'.format(classes_path)
    else:
        classes_path = 'data/coco_classes.txt'

    yolo = YOLO(model_path, 0.6, 0.5)
    all_classes = get_classes(classes_path)

    # detect images in test floder.
    for (root, dirs, files) in os.walk('images/test'):
        if files:
            for f in files:
                print(f)
                path = os.path.join(root, f)
                image = cv2.imread(path)
                image = detect_image(image, yolo, all_classes)
                cv2.imwrite('images/res/' + f, image)
Esempio n. 15
0
def detectobj():

    import time
    import numpy as np
    from model.yolo_model import YOLO

    def process_image(img):

        image = cv2.resize(img, (416, 416), interpolation=cv2.INTER_CUBIC)
        image = np.array(image, dtype='float32')
        image /= 255.
        image = np.expand_dims(image, axis=0)

        return image

    def get_classes(file):

        with open(file) as f:
            class_names = f.readlines()
        class_names = [c.strip() for c in class_names]

        return class_names

    def draw(image, boxes, scores, classes, all_classes):

        for box, score, cl in zip(boxes, scores, classes):
            x, y, w, h = box

            top = max(0, np.floor(x + 0.5).astype(int))
            left = max(0, np.floor(y + 0.5).astype(int))
            right = min(image.shape[1], np.floor(x + w + 0.5).astype(int))
            bottom = min(image.shape[0], np.floor(y + h + 0.5).astype(int))

            cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
            cv2.putText(image, '{0} {1:.2f}'.format(all_classes[cl], score),
                        (top, left - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.6,
                        (0, 0, 255), 1, cv2.LINE_AA)

            print('class: {0}, score: {1:.2f}'.format(all_classes[cl], score))
            print('box coordinate x,y,w,h: {0}'.format(box))

        print()

    def detect_image(image, yolo, all_classes):

        pimage = process_image(image)

        start = time.time()
        boxes, classes, scores = yolo.predict(pimage, image.shape)
        end = time.time()

        print('time: {0:.2f}s'.format(end - start))

        if boxes is not None:
            draw(image, boxes, scores, classes, all_classes)

        return image

    yolo = YOLO(0.6, 0.5)
    file = 'data/coco_classes.txt'
    all_classes = get_classes(file)

    f = path.get()
    f = f.split('.')
    f = f[0] + '_edited.jpg'
    #path = 'C:/Users/OMKAR/YOLOv3/images/'+f
    image = cv2.imread(path.get())
    image = detect_image(image, yolo, all_classes)
    cv2.imwrite(f, image)

    imgpath = f
    inputimg = resizeimg(imgpath)  #we get resized image in PIL format

    #inputimg = Image.open(inputimg) #takes input image in any format
    inputimg = ImageTk.PhotoImage(
        inputimg)  #converts the image into tkinter supported format

    panel.configure(image=inputimg)
    panel.image = inputimg
Esempio n. 16
0
                path + '/Distortions of images {0} to {1}.npy'.format(
                    i, i + self.batch_size), dists)
            for j in range(len(X_adv)):
                io.imsave(
                    path + '/Best example of {1} Distortion {2}.png'.format(
                        self.confidence, i + j, dists[j]), X_adv[j])
            r.extend(X_adv)
            ds.extend(dists)
        return np.array(r), np.array(ds)


if __name__ == '__main__':
    sess = tf.InteractiveSession()
    init = tf.global_variables_initializer()
    sess.run(init)
    ORACLE = YOLO(0.6, 0.5)  # The auguments do not matter.
    X_test = []
    for (root, dirs, files) in os.walk('../Datasets/COCO/val2017/'):
        if files:
            for f in files:
                print(f)
                path = os.path.join(root, f)
                image = cv2.imread(path)
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)  # RGB
                image = process_image(image)
                #image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
                X_test.append(image)
                EXAMPLE_NUM -= 1
                if EXAMPLE_NUM == 0:
                    break
    X_test = np.concatenate(X_test, axis=0)
Esempio n. 17
0
        res, frame = camera.read()

        if not res:
            break

        image = detect_image(frame, yolo, all_classes)
        cv2.imshow("detection", image)

        if cv2.waitKey(110) & 0xff == 27:
            break

    camera.release()


if __name__ == '__main__':
    yolo = YOLO(0.6, 0.5)
    file = 'data/coco_classes.txt'
    all_classes = get_classes(file)

    # detect images in test floder.
    for (root, dirs, files) in os.walk('images/test'):
        if files:
            for f in files:
                print(f)
                path = os.path.join(root, f)
                image = cv2.imread(path)
                image = detect_image(image, yolo, all_classes)
                cv2.imwrite('images/res/' + f, image)

    # detect vedio.
    video = 'E:/video/car.flv'
Esempio n. 18
0
        image = detect_image(frame, yolo, all_classes)
        cv2.imshow("detection", image)

        # Save the video frame by frame
        vout.write(image)

        if cv2.waitKey(110) & 0xff == 27:
            break

    vout.release()
    camera.release()


if __name__ == '__main__':
    yolo = YOLO(0.4, 0.4)
    file = 'data/coco_classes.txt'
    all_classes = get_classes(file)

    # detect images in test floder.
    for (root, dirs, files) in os.walk('images/test'):
        if files:
            for f in files:
                print(f)
                path = os.path.join(root, f)
                image = cv2.imread(path)
                image = detect_image(image, yolo, all_classes)
                cv2.imwrite('images/res/' + f, image)

    # detect videos one at a time in videos/test folder
    video = 'library1.mp4'