# save a frame
            out.write(frame)
            frame_index = frame_index + 1

        fps_imutils.update()

        if not asyncVideo_flag:
            fps = (fps + (1. / (time.time() - t1))) / 2
            print("FPS = %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps_imutils.stop()
    print('imutils FPS: {}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    cv2.destroyAllWindows()


if __name__ == '__main__':
    main(YOLO())
Esempio n. 2
0
#-------------------------------------#
#       调用摄像头检测
#-------------------------------------#
from keras.layers import Input
from yolo import YOLO
from PIL import Image
import numpy as np
import cv2
yolo = YOLO()
# 调用摄像头
capture = cv2.VideoCapture(1)  # capture=cv2.VideoCapture("1.mp4")

while (True):
    # 读取某一帧
    ref, frame = capture.read()
    # 格式转变,BGRtoRGB
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    # 转变成Image
    frame = Image.fromarray(np.uint8(frame))

    # 进行检测
    frame = np.array(yolo.detect_image(frame))

    # RGBtoBGR满足opencv显示格式
    frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
    cv2.imshow("video", frame)
    c = cv2.waitKey(30) & 0xff
    if c == 27:
        capture.release()
        break
Esempio n. 3
0
from utilities import assign_next_frame, get_data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image as PILImage
from PIL import ImageFont, ImageDraw
from tqdm import tqdm_notebook as tqdm
import cv2
from datetime import datetime

from yolo import YOLO
from resnet_occupancy import PREDICT

yolo = YOLO(score=0.1)


def plot_detection(images, index=0, figsize=(33, 64)):
    photo = cv2.imread(images[index])
    detected = yolo.detect_image(PILImage.fromarray(photo))
    f = plt.figure(figsize=figsize)
    sp = f.add_subplot(1, 2, 1)
    sp.axis('Off')
    plt.imshow(photo)
    sp = f.add_subplot(1, 2, 2)
    sp.axis('Off')
    plt.imshow(detected)


def create_boxes(images):
    data = pd.DataFrame(
    )  #[],columns =["x1","y1","x2","y2", "score","class","file"])
The purpose for this file is to constantly display the labels in each frames within a video or a live show based on the
trained model

variables needs to be altered before running:
'***'(cap = cv2.VideoCapture('***')): should be the address of the source video or live show

"""

import time

from yolo import YOLO
from PIL import Image
import cv2
import numpy as np

yolo = YOLO()

# cap = cv2.VideoCapture("http://*****:*****@192.168.90.40:8081/video")
cap = cv2.VideoCapture('./V&I_process/test_nails_0817_1.mp4')
normal_list = []
overall_bool = ''
b_dis_count = 0
while cap.isOpened():
    ret, frame = cap.read()

    frame = cv2.rotate(frame, 0)
    # if not ret:
    #     cap = cv2.VideoCapture("http://*****:*****@192.168.90.40:8081/video")
    #     continue

    image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
Esempio n. 5
0
    cv2.imshow("test", frame)

    k = cv2.waitKey(1)
    if k % 256 == 27:
        # ESC pressed
        print("Escape hit, closing...")
        break
    elif k % 256 == 32:
        # SPACE pressed
        img_name = "opencv_frame_{}.png".format(img_counter)
        print("{} written!".format(img_name))
        x = frame
        img_counter += 1

        yolo = YOLO(
            "/Users/Denny/Desktop/Hack_The_Northeast/yolo-hand-detection/models/cross-hands.cfg",
            "/Users/Denny/Desktop/Hack_The_Northeast/yolo-hand-detection/models/cross-hands.weights",
            ["hand"])
        width, height, inference_time, results = yolo.inference(x)
        frame = x
        print(frame)
        for detection in results:
            id, name, confidence, x, y, w, h = detection
            cx = x + (w / 2)
            cy = y + (h / 2)
            crop_img = frame[y - 50:y + h + 50, x - 50:x + w + 50]

            im = Image.fromarray(crop_img)
            im.save("your_file.png")
            im = image.load_img('your_file.png',
                                target_size=(28, 28),
                                color_mode='grayscale')
Esempio n. 6
0
from yolo import YOLO
from PIL import Image
import numpy as np
import cv2
import time
import tensorflow as tf

gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(gpu, True)

yolo = YOLO()
# 调用摄像头
capture = cv2.VideoCapture(0)  # capture=cv2.VideoCapture("1.mp4")
fps = 0.0
average_fps = 0
nums = 0
while (True):
    t1 = time.time()
    # 读取某一帧
    ref, frame = capture.read()
    # 格式转变,BGRtoRGB
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    # 转变成Image
    frame = Image.fromarray(np.uint8(frame))

    # 进行检测
    frame = np.array(yolo.detect_image(frame))

    # RGBtoBGR满足opencv显示格式
    frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
Esempio n. 7
0
            out.write(scaled_frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index) + ' ')
            if len(boxs) != 0:
                for i in range(0, len(boxs)):
                    list_file.write(
                        str(boxs[i][0]) + ' ' + str(boxs[i][1]) + ' ' +
                        str(boxs[i][2]) + ' ' + str(boxs[i][3]) + ' ')
            list_file.write('\n')

        fps = (fps + (1. / (time.time() - t1))) / 2
        # print("fps= %f"%(fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()


if __name__ == '__main__':

    parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
    parser.add_argument('--video_path', help='비디오 경로를 입력해주세요')

    main(YOLO(), parser.parse_args())
import sys

if len(sys.argv) < 2:
    print("Usage: $ python {0} [video_path] [output_path(optional)]", sys.argv[0])
    exit()

from yolo import YOLO
from yolo import detect_video

if __name__ == '__main__':
    #detect_video(YOLO(),"railway.avi")
    video_path = sys.argv[1]
    if len(sys.argv) > 2:
        output_path = sys.argv[2]
        detect_video(YOLO(), video_path, output_path)
    else:
        detect_video(YOLO(), video_path)
Esempio n. 9
0
    def start(self):
        logger.info('Starting ImageStreaming Server')
        self.serverInstance = ServerSocket('0.0.0.0', 4444)
        self.serverInstance.startReceiveRecentImage(self.recentImage)
        logger.info('Started Image Streaming Server')

        logger.info('Starting Yolo Instance')
        self.yoloInstance = YOLO()
        logger.info('Started Yolo Instance')

        logger.info('Starting DeepSort Instance')
        encoder = gdet.create_box_encoder('model_data/mars-small128.pb', batch_size=1)
        metric = nn_matching.NearestNeighborDistanceMetric("cosine", 0.3, None)
        tracker = Tracker(metric)
        logger.info('Started DeepSort Instance')

        logger.info('Starting MaskRCNN Instance')
        self.maskrcnnInstance = CrosswalkMask()
        logger.info('Starting MaskRCNN Instance')


        testImage = cv2.imread('crosswalk14.PNG')
        self.recentImage.append(testImage)

        fps = 0.0
        recent_tracktime = time.time()
        recent_crosswalktime = time.time()
        class_names = {0: 'bg', 1: 'crosswalk'}

        # self.redetected_crosswalk()

        while True:

            t1 = time.time()
            if not len(self.recentImage): continue

            # if self.bestCrosswalk == None: self.redetected_crosswalk()

            #if time.time() - recent_crosswalktime > 30:
            #    recent_crosswalktime = time.time()
            #    self.redetected_crosswalk()

            image = self.recentImage[0]

            ### 객체인식 파트
            image_pil = Image.fromarray(image)
            boxs, labels = self.yoloInstance.detect_image(image_pil)

            # print("box_num",len(boxs))
            features = encoder(image, boxs)

            # score to 1.0 here).
            detections = [Detection(bbox, 0.5, feature, label) for bbox, feature, label in zip(boxs, features, labels)]

            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            indices = preprocessing.non_max_suppression(boxes, 1.0, scores)
            detections = [detections[i] for i in indices]

            # Call the tracker
            tracker.predict()
            tracker.update(detections)

            # image = visualize.display_instances(image, self.bestCrosswalk['rois'], self.bestCrosswalk['masks'], self.bestCrosswalk['class_ids'], class_names, self.bestCrosswalk['scores'],
            #                            title="Predictions")

            scaled_frame = cv2.resize(image, dsize=(image.shape[1] * self.imageScale, image.shape[0] * self.imageScale))

            for track in tracker.tracks:
                #if frame_index % 5 == 0:
                if time.time() - recent_tracktime > 0.1:
                    self.objHistoryManager.insertTrack(deepcopy(track))
                    recent_tracktime = time.time()
                bbox = track.to_tlbr() * self.imageScale
                cv2.rectangle(scaled_frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 0), 1)

                if not track.is_confirmed() or track.time_since_update > 1:
                    continue

                # cv2.putText(scaled_frame, str(track.track_id),(int(bbox[0]), int(bbox[1])), 1, 0.5 * scale, (255,255,255), 1)

            self.objHistoryManager.drawOnImage(scaled_frame, self.imageScale)

            for det in detections:
                bbox = det.to_tlbr() * self.imageScale
                cv2.rectangle(scaled_frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255),
                              1)
                cv2.putText(scaled_frame, str(det.label), (int(bbox[0]), int(bbox[1])), 1, 0.5 * self.imageScale, (255, 255, 255),
                            1)
            cv2.putText(scaled_frame, "FPS %.1f" % (fps), (20, 30), 1, 1 * self.imageScale, (255, 255, 255), 2)



            if ShowImage:
                cv2.imshow('Result Detected Result', scaled_frame)
                cv2.waitKey(1)

            fps = (fps + (1. / (time.time() - t1))) / 2
Esempio n. 10
0
        img.save(os.path.join(outdir, os.path.basename(jpgfile)))

    yolo.close_session()


FLAGS = None

if __name__ == '__main__':
    # class YOLO defines the default value, so suppress any default here
    parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
    '''
    Command line options
    '''
    parser.add_argument(
        '--model', type=str,
        help='path to model weight file, default ' + YOLO.get_defaults("model_path")
    )

    parser.add_argument(
        '--anchors', type=str,
        help='path to anchor definitions, default ' + YOLO.get_defaults("anchors_path")
    )

    parser.add_argument(
        '--classes', type=str,
        help='path to class definitions, default ' + YOLO.get_defaults("classes_path")
    )

    parser.add_argument(
        '--gpu_num', type=int,
        help='Number of GPU to use, default ' + str(YOLO.get_defaults("gpu_num"))
Esempio n. 11
0
from yolo import YOLO, detect_video
from PIL import Image
import cv2
import numpy as np

if __name__ == '__main__':
    cap = cv2.VideoCapture(0)
    yolo = YOLO()
    while 1:
        ret, img = cap.read()
        # img = cv2.imread("./0.jpg")
        cv2img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # cv2和PIL中颜色的hex码的储存顺序不同
        pilimg = Image.fromarray(cv2img)
        r_image = yolo.detect_image(pilimg)
        # r_image.show()
        cv2charimg = cv2.cvtColor(np.array(r_image), cv2.COLOR_RGB2BGR)
        cv2.imshow('img', cv2charimg)
        k = cv2.waitKey(30) & 0xff

    yolo.close_session()
Esempio n. 12
0
from flask import Flask, request, Response
import jsonpickle
import numpy as np
import cv2
import json
from yolo import YOLO

model = 'model_data/yolo_person.h5'
classes = 'model_data/person.txt'
yolo = YOLO(model_path=model, classes_path=classes)
app = Flask(__name__)


# route http posts to this method
@app.route('/api/facedetection', methods=['POST'])
def detection():
    r = request
    nparr = np.fromstring(r.data, np.uint8)
    img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
    yolo.detect_cvImage(img)
    #print(yolo.det_res)
    response = {
        'message': 'image received. size={}x{}'.format(img.shape[1],
                                                       img.shape[0]),
        'yolo_res': yolo.det_res
    }

    return Response(response=json.dumps(response),
                    status=200,
                    mimetype="application/json")
Esempio n. 13
0
                frame = cv2.flip(frame, 1)
                roi = hand_detect(frame)
            continue
        cv2.imshow('output', frame_copy)
        print(frame.shape)
        if cv2.waitKey(1) & 0xFF == 27:
            break

    cap.release()
    cv2.destroyAllWindows()


cascade = cv2.CascadeClassifier('haarcascade/fist.xml')
cap = cv2.VideoCapture('video.mp4')

yolo = YOLO("./models/cross-hands.cfg", "./models/cross-hands.weights",
            ["hand"])
yolo.size = 416
yolo.confidence = 0.2


def hand_detect(img, type='haar'):

    cv2.imshow('output', img)
    key = cv2.waitKey(20)
    if key == 27:  # exit on ESC
        cv2.destroyAllWindows()

    hand_img = img.copy()
    if type == 'yolo':
        width, height, inference_time, results = yolo.inference(img)
    elif type == 'haar':
Esempio n. 14
0
import random
from cube_solve import solve_cube, generate_cube_string, verify_cube_string_is_valid


os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

Class_file = "detect_data/label.names"
Anchor_file = "detect_data/yolo_tiny_anchors.txt"
weights_file = "detect_data/trained_weights_final.h5"

# define YOLO detector
yolo = YOLO(
    **{
        "model_path": weights_file,
        "anchors_path": Anchor_file,
        "classes_path": Class_file,
        "score": 0.5,
        "gpu_num": 1,
        "model_image_size": (416, 416),
    }
)

# same colors as face_detect.py
colors = {
    "green": [35, 170, 80],
    "blue": [220, 75, 40],
    "red": [50, 37, 125],
    "yellow": [50, 140, 130],
    "white": [230, 150, 140],
    "orange": [50, 70, 150]
}
random_color = random.choice(list(colors.values()))
Esempio n. 15
0
#-----------------------------------------------------------------------#
#   predict.py将单张图片预测、摄像头检测、FPS测试和目录遍历检测等功能
#   整合到了一个py文件中,通过指定mode进行模式的修改。
#-----------------------------------------------------------------------#
import time

import cv2
import numpy as np
from PIL import Image

from yolo import YOLO

if __name__ == "__main__":
    yolo = YOLO()
    #----------------------------------------------------------------------------------------------------------#
    #   mode用于指定测试的模式:
    #   'predict'表示单张图片预测,如果想对预测过程进行修改,如保存图片,截取对象等,可以先看下方详细的注释
    #   'video'表示视频检测,可调用摄像头或者视频进行检测,详情查看下方注释。
    #   'fps'表示测试fps,使用的图片是img里面的street.jpg,详情查看下方注释。
    #   'dir_predict'表示遍历文件夹进行检测并保存。默认遍历img文件夹,保存img_out文件夹,详情查看下方注释。
    #----------------------------------------------------------------------------------------------------------#
    mode = "predict"
    #----------------------------------------------------------------------------------------------------------#
    #   video_path用于指定视频的路径,当video_path=0时表示检测摄像头
    #   想要检测视频,则设置如video_path = "xxx.mp4"即可,代表读取出根目录下的xxx.mp4文件。
    #   video_save_path表示视频保存的路径,当video_save_path=""时表示不保存
    #   想要保存视频,则设置如video_save_path = "yyy.mp4"即可,代表保存为根目录下的yyy.mp4文件。
    #   video_fps用于保存的视频的fps
    #   video_path、video_save_path和video_fps仅在mode='video'时有效
    #   保存视频时需要ctrl+c退出或者运行到最后一帧才会完成完整的保存步骤。
    #----------------------------------------------------------------------------------------------------------#
Esempio n. 16
0
    def yolo_frames(image_hub, unique_name):
        device = unique_name[1]

        show_detections = False

        gdet = import_module('tools.generate_detections')
        nn_matching = import_module('deep_sort.nn_matching')
        Tracker = import_module('deep_sort.tracker').Tracker

        # Definition of the parameters
        max_cosine_distance = 0.3
        nn_budget = None

        # deep_sort
        model_filename = 'model_data/mars-small128.pb'
        encoder = gdet.create_box_encoder(model_filename, batch_size=1)

        metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
        tracker = Tracker(metric)

        yolo = YOLO()
        nms_max_overlap = 1.0

        num_frames = 0

        current_date = datetime.datetime.now().date()
        count_dict = {}  # initiate dict for storing counts

        total_counter = 0
        up_count = 0
        down_count = 0

        class_counter = Counter()  # store counts of each detected class
        already_counted = deque(maxlen=50)  # temporary memory for storing counted IDs
        intersect_info = []  # initialise intersection list

        memory = {}
        while True:
            cam_id, frame = image_hub.recv_image()
            image_hub.send_reply(b'OK')  # this is needed for the stream to work with REQ/REP pattern
            # image_height, image_width = frame.shape[:2]

            if frame is None:
                break

            num_frames += 1

            '''
            if num_frames % 2 != 0:  # only process frames at set number of frame intervals
                continue
            '''

            image = Image.fromarray(frame[..., ::-1])  # convert bgr to rgb
            boxes, confidence, classes = yolo.detect_image(image)
            features = encoder(frame, boxes)

            detections = [Detection(bbox, confidence, cls, feature) for bbox, confidence, cls, feature in
                          zip(boxes, confidence, classes, features)]

            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            classes = np.array([d.cls for d in detections])
            indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]

            # Call the tracker
            tracker.predict()
            tracker.update(detections)
            #line = [ (int(0.3 * frame.shape[1]), 0),   ( int(0.3 * frame.shape[1]), int(frame.shape[0])) ]
            if cam_id == 'Camera 1':
                line = [ (int(0.3 * frame.shape[1]), 0),   ( int(0.3 * frame.shape[1]), int(frame.shape[0])) ]
            else:
                line = [ (int(0.7 * frame.shape[1]), 0),   ( int(0.7 * frame.shape[1]), int(frame.shape[0])) ]
            
            # draw yellow line
            #cv2.line(frame, line[0], line[1], (0, 255, 255), 2)
            # draw red line
            cv2.line(frame, line[0], line[1], (0, 0, 255), 2)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()
                track_cls = track.cls  # most common detection class for track

                midpoint = track.tlbr_midpoint(bbox)
                origin_midpoint = (midpoint[0], frame.shape[0] - midpoint[1])  # get midpoint respective to botton-left

                if track.track_id not in memory:
                    memory[track.track_id] = deque(maxlen=2)

                memory[track.track_id].append(midpoint)
                previous_midpoint = memory[track.track_id][0]

                origin_previous_midpoint = (previous_midpoint[0], frame.shape[0] - previous_midpoint[1])

                cv2.line(frame, midpoint, previous_midpoint, (0, 255, 0), 2)

                # Add to counter and get intersection details
                if Camera.intersect(midpoint, previous_midpoint, line[0], line[1]) and track.track_id not in already_counted:
                    class_counter[track_cls] += 1
                    total_counter += 1

                    # draw red line
                    #cv2.line(frame, line[0], line[1], (0, 0, 255), 2)
                    # draw yellow line
                    cv2.line(frame, line[0], line[1], (0, 255, 255), 2)

                    already_counted.append(track.track_id)  # Set already counted for ID to true.

                    intersection_time = datetime.datetime.now() - datetime.timedelta(microseconds=datetime.datetime.now().microsecond)
                    angle = Camera.vector_angle(origin_midpoint, origin_previous_midpoint)
                    intersect_info.append([track_cls, origin_midpoint, angle, intersection_time])

                    if angle > 0:
                        up_count += 1
                    if angle < 0:
                        down_count += 1
                    
                    # 2020-0919-20:26 -송이삭
                    ###
                    total_filename = '{}.txt'.format(cam_id)
                    counts_folder = './counts/'
                    if not os.access(counts_folder + '/total', os.W_OK):
                        os.makedirs(counts_folder + '/total')
                    total_count_file = open(counts_folder + '/total/' +total_filename, 'w')
                    #print('{} writing...'.format(rounded_now))
                    #print('Writing current total count ({}) and directional counts to file.'.format(total_counter))
                    total_count_file.write('camera: {}\ntotal: {}, up: {}, down: {}\ntotal_object: {}'.format(device, str(total_counter), up_count, down_count, class_counter))
                    total_count_file.close()    
                    ### 맨 아래 코드에서 가져옴
                    
                    # 2020-0919-11:25 -이종호
                    output_html()

                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)  # WHITE BOX
                
                #200920 11:45 이종호 - 초록글씨 삭제
                cv2.putText(frame, "ID: " + str(track.track_id), (int(bbox[0]), int(bbox[1])), 0, 1.5e-3 * frame.shape[0], (0, 255, 0), 2)

                if not show_detections:
                    adc = "%.2f" % (track.adc * 100) + "%"  # Average detection confidence
                    cv2.putText(frame, str(track_cls), (int(bbox[0]), int(bbox[3])), 0,
                                1e-3 * frame.shape[0], (0, 255, 0), 2)
                    cv2.putText(frame, 'ADC: ' + adc, (int(bbox[0]), int(bbox[3] + 2e-2 * frame.shape[1])), 0,
                                1e-3 * frame.shape[0], (0, 255, 0), 2)

            # Delete memory of old tracks.
            # This needs to be larger than the number of tracked objects in the frame.
            if len(memory) > 50:
                del memory[list(memory)[0]]

            # Draw total count.
            #cv2.putText(frame, "Total: {} ({} up, {} down)".format(str(total_counter), str(up_count),
            #            str(down_count)), (int(0.05 * frame.shape[1]), int(0.1 * frame.shape[0])), 0,
            #            1.5e-3 * frame.shape[0], (0, 255, 255), 2)

            if show_detections:
                for det in detections:
                    bbox = det.to_tlbr()
                    score = "%.2f" % (det.confidence * 100) + "%"
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)  # BLUE BOX
                    if len(classes) > 0:
                        det_cls = det.cls
                        cv2.putText(frame, str(det_cls) + " " + score, (int(bbox[0]), int(bbox[3])), 0,
                                    1.5e-3 * frame.shape[0], (0, 255, 0), 2)

            # display counts for each class as they appear
            #200920 11:26 이종호 수정 - 텍스트 위치 올림
            #y = 0.2 * frame.shape[0]
            y = 0.05 * frame.shape[0]
            for cls in class_counter:
                class_count = class_counter[cls]
                cv2.putText(frame, str(cls) + " " + str(class_count), (int(0.05 * frame.shape[1]), int(y)), 0,
                            1.5e-3 * frame.shape[0], (0, 255, 255), 2)
                y += 0.05 * frame.shape[0]
            
                    
            
            # 2020-0919-20:26 - 송이삭 [저장되는 파일에 시간이랑 날짜 지움]
            # calculate current minute
            #now = datetime.datetime.now()
            #rounded_now = now - datetime.timedelta(microseconds=now.microsecond)  # round to nearest second
            #current_minute = now.time().minute

            #if current_minute == 0 and len(count_dict) > 1:
            #    count_dict = {}  # reset counts every hour
            #else:
                # write counts to file for every set interval of the hour
                #write_interval = 5
                #if current_minute % write_interval == 0:  # write to file once only every write_interval minutes
                #    if current_minute not in count_dict:
                #        count_dict[current_minute] = True
                #        total_filename = 'Total counts for {}, {}.txt'.format(current_date, cam_id)
                #        counts_folder = './counts/'
                #        if not os.access(counts_folder + str(current_date) + '/total', os.W_OK):
                #            os.makedirs(counts_folder + str(current_date) + '/total')
                #        total_count_file = open(counts_folder + str(current_date) + '/total/' + total_filename, 'a')
                #        print('{} writing...'.format(rounded_now))
                #        print('Writing current total count ({}) and directional counts to file.'.format(total_counter))
                #        total_count_file.write('{}, {}, {}, {}, {}\n'.format(str(rounded_now), device,
                #                                                             str(total_counter), up_count, down_count))
                #        total_count_file.close()

                        # if class exists in class counter, create file and write counts

                #        if not os.access(counts_folder + str(current_date) + '/classes', os.W_OK):
                #            os.makedirs(counts_folder + str(current_date) + '/classes')
                #        for cls in class_counter:
                #            class_count = class_counter[cls]
                #            print('Writing current {} count ({}) to file.'.format(cls, class_count))
                #            class_filename = 'Class counts for {}, {}.txt'.format(current_date, cam_id)
                #            class_count_file = open(counts_folder + str(current_date) + '/classes/' + class_filename, 'a')
                #            class_count_file.write("{}, {}, {}\n".format(rounded_now, device, str(class_count)))
                #            class_count_file.close()

                        # write intersection details
                #       if not os.access(counts_folder + str(current_date) + '/intersections', os.W_OK):
                #            os.makedirs(counts_folder + str(current_date) + '/intersections')
                #        print('Writing intersection details for {}'.format(cam_id))
                #        intersection_filename = 'Intersection details for {}, {}.txt'.format(current_date, cam_id)
                #        intersection_file = open(counts_folder + str(current_date) + '/intersections/' + intersection_filename, 'a')
                #        for i in intersect_info:
                #            cls = i[0]

                #            midpoint = i[1]
                #            x = midpoint[0]
                #            y = midpoint[1]

                #            angle = i[2]

                #            intersect_time = i[3]

                #            intersection_file.write("{}, {}, {}, {}, {}, {}\n".format(str(intersect_time), device, cls,
                #                                                                      x, y, str(angle)))
                #        intersection_file.close()
                #        intersect_info = []  # reset list after writing

            yield cam_id, frame
Esempio n. 17
0
    else:
        print('Input %s is not a valid directory' % (img))


options=None

if __name__ == '__main__':
    # class YOLO defines the default value, so suppress any default here
    parser=argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
    '''
    Command line options
    '''
    parser.add_argument(
        '--model', type=str,
        help='path to model weight file, default ' +
        YOLO.get_defaults("model_path")
    )

    parser.add_argument(
        '--anchors', type=str,
        help='path to anchor definitions, default ' +
        YOLO.get_defaults("anchors_path")
    )

    parser.add_argument(
        '--classes', type=str,
        help='path to class definitions, default ' +
        YOLO.get_defaults("classes_path")
    )

    parser.add_argument(
Esempio n. 18
0
# -*- coding:utf-8 -*-
# author:平手友梨奈ii
# e-mail:[email protected]
# datetime:1993/12/01
# filename:my_queue.py
# software: PyCharm

from yolo import YOLO
from PIL import Image

yolo = YOLO()

while True:
    img = input('Input image filename:')
    try:
        image = Image.open(img)
    except:
        print('Open Error! Try again!')
        continue
    else:
        result = yolo.detect_image(image)
        result.show()
        result.save('./img/result_girl.jpg')
yolo.close_session()
        im_name = im_name[0][24:]
        #画像を保存
        print(type(r_image))
        print(im_name)
        r_image.save("31_done/" + im_name + '_done.png')
        #r_image.show()
    #yolo.close_session()


FLAGS = None
if __name__ == '__main__':
    folder = "annotation_snowflower/31"
    files = glob.glob(folder + "/*.jpg")
    for file in files:
        img = file
        parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
        parser.add_argument(
            '--image',
            default="False",
            action="store_true",
            help='Image detection mode, will ignore all positional arguments')
        FLAGS = parser.parse_args()
        output = detect_img(YOLO(**vars(FLAGS)), img)
        #detect_img(YOLO({"image":img}))
        """
        try:
            output.save("17_done"+file)
        except:
            continue
        """
Esempio n. 20
0
from yolo import YOLO
from PIL import Image
import cv2
import time

cap = cv2.VideoCapture(0)
time.sleep(1)
ret, frame = cap.read()
#cv2.imwrite('led.jpg',frame)
cap.release()

image = Image.open('led.jpg')
image.show()

yolo = YOLO()
r_image = yolo.detect_image(image)
r_image.show()
r_image.save('led_find.jpg')
Esempio n. 21
0
    cv2.destroyAllWindows()


class video_open:
    def __init__(self, read_type, video_dir):
        #self.readtype=read_type
        if read_type == 'video':
            self.readtype = 0
        else:
            self.readtype = video_dir

    def generate_video(self):
        video_capture = cv2.VideoCapture(self.readtype)
        return video_capture


######################paraters######################
def parse_args():
    parser = argparse.ArgumentParser(description="Deep SORT")
    parser.add_argument("--read_type",
                        help="camera or video",
                        default='camera',
                        required=False)
    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()
    main(YOLO(), args.read_type)
Esempio n. 22
0
import sys
sys.path.append('keras-yolo3')

from yolo import YOLO, detect_video

model = YOLO(model_path='./models/v4-12k-adam1e3-train10/ep039-loss16.249-val_loss15.892.h5',
             anchors_path='./keras-yolo3/model_data/yolo_anchors.txt',
             classes_path='./classes-yolo-format.txt',
             score=0.01)

# Pretrained YOLO model
# yolo_model = YOLO(model_path='../../../data/yolov3/yolov3-320.h5',
#                   anchors_path='./keras-yolo3/model_data/yolo_anchors.txt',
#                   classes_path='./keras-yolo3/model_data/coco_classes.txt')

detect_video(model, 0)
Esempio n. 23
0
    image_ids = open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Main/test.txt")).read().strip().split()

    if not os.path.exists(map_out_path):
        os.makedirs(map_out_path)
    if not os.path.exists(os.path.join(map_out_path, 'ground-truth')):
        os.makedirs(os.path.join(map_out_path, 'ground-truth'))
    if not os.path.exists(os.path.join(map_out_path, 'detection-results')):
        os.makedirs(os.path.join(map_out_path, 'detection-results'))
    if not os.path.exists(os.path.join(map_out_path, 'images-optional')):
        os.makedirs(os.path.join(map_out_path, 'images-optional'))

    class_names, _ = get_classes(classes_path)

    if map_mode == 0 or map_mode == 1:
        print("Load model.")
        yolo = YOLO(confidence = 0.001, nms_iou = 0.5)
        print("Load model done.")

        print("Get predict result.")
        for image_id in tqdm(image_ids):
            image_path  = os.path.join(VOCdevkit_path, "VOC2007/JPEGImages/"+image_id+".jpg")
            image       = Image.open(image_path)
            if map_vis:
                image.save(os.path.join(map_out_path, "images-optional/" + image_id + ".jpg"))
            yolo.get_map_txt(image_id, image, class_names, map_out_path)
        print("Get predict result done.")
        
    if map_mode == 0 or map_mode == 2:
        print("Get ground truth result.")
        for image_id in tqdm(image_ids):
            with open(os.path.join(map_out_path, "ground-truth/"+image_id+".txt"), "w") as new_f:
Esempio n. 24
0
            r_image.show()
    yolo.close_session()


FLAGS = None

if __name__ == '__main__':
    # class YOLO defines the default value, so suppress any default here
    parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
    '''
    Command line options
    '''
    parser.add_argument('--model',
                        type=str,
                        help='path to model weight file, default ' +
                        YOLO.get_defaults("model_path"))

    parser.add_argument('--anchors',
                        type=str,
                        help='path to anchor definitions, default ' +
                        YOLO.get_defaults("anchors_path"))

    parser.add_argument('--classes',
                        type=str,
                        help='path to class definitions, default ' +
                        YOLO.get_defaults("classes_path"))

    parser.add_argument('--gpu_num',
                        type=int,
                        help='Number of GPU to use, default ' +
                        str(YOLO.get_defaults("gpu_num")))
Esempio n. 25
0
class SignDetector(object):
    def __init__(self):

        FLAGS = {
            "model_path": sys.path[1] + cf.sign_weight_h5_path,
            "anchors_path": sys.path[1] + cf.sign_anchor_path,
            "classes_path": sys.path[1] + cf.sign_class_name_path,
            "score": 0.9,
            "iou": 0.45,
            "model_image_size": (416, 416),
            "gpu_num": 1,
        }
        self.yolo = YOLO(FLAGS)

        return

    def signDetector(self, image):

        im_pil = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

        out_boxes, out_scores, out_classes = self.yolo.detect_image(im_pil)

        has_sign = False
        box = None

        if len(out_boxes) > 0:
            rs = True

            # print('Found', len(out_boxes), 'boxes', out_boxes, out_scores, out_classes))

            c = np.argmax(out_scores)
            class_id = out_classes[c]
            predicted_class = self.yolo.class_names[class_id]
            top, left, bottom, right = out_boxes[c]
            score = out_scores[c]
            left = int(left)
            right = int(right)
            top = int(top)
            bottom = int(bottom)
            w = right - left
            h = bottom - top
            if w >= 40 and h >= 40:
                label = 'Sign {} - {} ({},{})'.format(predicted_class, score,
                                                      w, h)
                box = [
                    left + cf.detect_sign_region['left'], top, w, h,
                    predicted_class, score, label
                ]
                cv2.rectangle(image, (left, top), (right, bottom), blue)

                return predicted_class, box

        return None, None

    def signRecognize(self):
        cf.k += 1
        # if cf.k >= 0 and cf.k % cf.sign_detect_step == 0:
        if True:
            # print('signRecognize', cf.k, cf.sign_detect_step, cf.signTrack)
            if cf.signTrack != -1 and abs(cf.signTrack - cf.k) >= 10:
                cf.sign = []
                cf.signTrack = -1
                print("clear")
                # cf.maxspeed = maxspeed

            img_detect_sign = cf.img_rgb_raw[
                cf.detect_sign_region['top']:cf.detect_sign_region['bottom'],
                cf.detect_sign_region['left']:cf.detect_sign_region['right']]

            cf.signSignal = None
            result, box = self.signDetector(img_detect_sign)
            if result != None:
                # cf.sign.append(result)
                # # cf.speed = 30
                # # cf.maxspeed = 30  # khi phat hien bien bao thi giam toc do
                # cf.signTrack = cf.k
                # print('Sign', cf.sign)
                # if self.acceptSign(result):
                if result:
                    if result == 'thang':
                        # print("THANG")
                        return "thang_certain", box
                    elif result == 'phai':
                        # print("PHAI")
                        return "phai_certain", box
                return result, box
        return None, None

    def acceptSign(self, value):
        if len(cf.sign) >= 2:
            for v in cf.sign:
                if v != value:
                    cf.sign = []
                    return False
            cf.sign = []
            cf.k = -100  # bo qua 100 frame tiep theo
            return True
        else:
            cf.k = cf.k - cf.sign_detect_step + 1  # xem xet 2 frame lien tiep
            return None
Esempio n. 26
0
import sys
import numpy as np
from yolo import YOLO
from yolo_defect import YOLO_Defect
from PIL import Image
import os
import cv2
import keras
import glob
keras.backend.clear_session()
FLAGS = {}
detector = YOLO(**(FLAGS))
defection = YOLO_Defect(**(FLAGS))
path = "./test_insulator/*.jpg"
outdir = "./result"
for jpgfile in glob.glob(path):
    name = os.path.basename(jpgfile)
    print(name)
    img = Image.open(jpgfile)
    img1 = cv2.imread(jpgfile)
    result = detector.detect_image(img)
    leng = len(result)
    print(leng)
    if leng != 0:
        for i in range(leng):
            for j in range(4):
                if result[i][j] < 0:
                    result[i][j] = 0
            rect = img1[int(result[i][0]):int(result[i][2]),
                        int(result[i][1]):int(result[i][3])]
            rect = Image.fromarray(rect)
Esempio n. 27
0
        else:
            r_image = yolo.detect_image(image)
            r_image.show()
    yolo.close_session()

FLAGS = None

if __name__ == '__main__':
    # class YOLO defines the default value, so suppress any default here
    parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
    '''
    Command line options
    '''
    parser.add_argument(
        '--model', type=str,
        help='path to model weight file, default ' + YOLO.get_defaults("/content/gdrive/My Drive/project_folder/keras-yolo3/model_data/yolo.h5")
    )

    parser.add_argument(
        '--anchors', type=str,
        help='path to anchor definitions, default ' + YOLO.get_defaults("/content/gdrive/My Drive/project_folder/keras-yolo3/model_data/yolo_anchors.txt")
    )

    parser.add_argument(
        '--classes', type=str,
        help='path to class definitions, default ' + YOLO.get_defaults("/content/gdrive/My Drive/project_folder/keras-yolo3/model_data/coco_classes.txt")
    )

    parser.add_argument(
        '--gpu_num', type=int,
        help='Number of GPU to use, default ' + str(YOLO.get_defaults("gpu_num"))
Esempio n. 28
0
        f.close()

    yolo.close_session()

FLAGS = None

if __name__ == '__main__':
    # class YOLO defines the default value, so suppress any default here
    parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
    '''
    Command line options
    '''
    parser.add_argument(
        '--model_path', type=str,
        help='path to model weight file, default ' + YOLO.get_defaults("model_path")
    )

    parser.add_argument(
        '--anchors_path', type=str,
        help='path to anchor definitions, default ' + YOLO.get_defaults("anchors_path")
    )

    parser.add_argument(
        '--classes_path', type=str,
        help='path to class definitions, default ' + YOLO.get_defaults("classes_path")
    )

    parser.add_argument(
        '--gpu_num', type=int,
        help='Number of GPU to use, default ' + str(YOLO.get_defaults("gpu_num"))
Esempio n. 29
0
                        type=str,
                        required=False,
                        default=0,
                        help="Video input path")

    parser.add_argument("--output",
                        nargs='?',
                        type=str,
                        default="",
                        help="[Optional] Video output path")

    FLAGS = parser.parse_args()

    if FLAGS.network == "yolo":
        from yolo import YOLO
        model = YOLO(**vars(FLAGS))

    elif FLAGS.network == "mrcnn":
        from mask_rcnn import MaskRCNN
        model = MaskRCNN(FLAGS.model_path, FLAGS.classes_path)

    elif FLAGS.network == "keras-centernet":
        from centernet import CENTERNET
        model = CENTERNET(FLAGS.model_path, FLAGS.classes_path)

    else:
        parser.error("Unknown network")

    detect_img(model)
    # detect_video(model, FLAGS.input, FLAGS.output)
Esempio n. 30
0
        else:
            r_image = yolo.detect_image(image)
            r_image.show()
    yolo.close_session()

FLAGS = None

if __name__ == '__main__':
    # class YOLO defines the default value, so suppress any default here
    parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
    '''
    Command line options
    '''
    parser.add_argument(
        '--model', type=str,
        help='path to model weight file, default ' + YOLO.get_defaults("model_path")
    )

    parser.add_argument(
        '--anchors', type=str,
        help='path to anchor definitions, default ' + YOLO.get_defaults("anchors_path")
    )

    parser.add_argument(
        '--classes', type=str,
        help='path to class definitions, default ' + YOLO.get_defaults("classes_path")
    )

    parser.add_argument(
        '--gpu_num', type=int,
        help='Number of GPU to use, default ' + str(YOLO.get_defaults("gpu_num"))
Esempio n. 31
0
def run():

    # step 1: initialization
    # set configures: parameters are set here
    start_pre = time.time()
    configs = Configs()

    # detector: detect objects in a frame; return us detected results
    # detector = faceboxes()

    # video helper: read in / write out video rames
    video_helper = VideoHelper(configs)
    yolo = YOLO()
    # object controller: objects are managed in this class
    object_controller = MultipleObjectController(configs, video_helper)
    print("Pre Time: ", (time.time() - start_pre) * 1000, " ms")

    # step 2: main loop
    cur_frame_counter = 0
    detection_loop_counter = 0
    while (video_helper.not_finished(cur_frame_counter)):
        print(
            "####################################################### frame: ",
            cur_frame_counter)

        # get frame from video
        # frame is the raw frame, frame_show is used to show the result
        frame_PIL, frame, frame_show = video_helper.get_frame()
        """ Detection Part """
        # if we detect every frame
        # (because detection now costs a lot so in order we can run in real time,
        # we may neglect some frames between detection)
        if configs.NUM_JUMP_FRAMES == 0:
            # detected results: [{'tag1':[bbx1]}, {'tag2':[bbx2]}, ..., {'tagn':[bbxn]}]
            detects = yolo.detect_image(frame_PIL)  #(frame,cur_frame_counter)
            # for detect in detects:
            # update current bbxes for each instance
            start_time_of_tracking = time.time()
            object_controller.update(detects, cur_frame_counter, frame)
            time_spend = time.time() - start_time_of_tracking
            print("Tracking Time: ", time_spend * 1000, " ms.")
        else:
            # we ignore to detect some frames
            if detection_loop_counter % configs.NUM_JUMP_FRAMES == 0:
                start_turn = time.time()
                # here we need to detect the frame
                detection_loop_counter = 0
                detects = yolo.detect_image(
                    frame_PIL)  # (frame,cur_frame_counter)
                object_controller.update(detects, cur_frame_counter, frame)
                print("detection_loop_counter time span: ",
                      (time.time() - start_turn) * 1000, " ms")
            else:
                # here we needn't to detect the frame
                start_turn_no_detect = time.time()
                object_controller.update_without_detection(
                    cur_frame_counter, frame)
                #object_controller.update_still(cur_frame_counter)
                print("detection_loop_counter time span: ",
                      (time.time() - start_turn_no_detect) * 1000, " ms")

        # 可视化
        visualizer = Visualizer(configs)
        show_temporal_information = True
        visualizer.drawing_tracking(frame_show, object_controller.instances,
                                    cur_frame_counter,
                                    show_temporal_information)

        cur_frame_counter += 1
        detection_loop_counter += 1
        print()

    video_helper.end()