Пример #1
0
def get_detect(camer_id):
    global log_list
    # print("[INFO] Confirming inputs...")
    # mylog.addHandler(filehandler)

    image_paths = []
    batch_size = 1
    count = 0
    count_hat = 0
    count_frock = 0
    count_glove = 0
    count_shoe = 0
    raw_hat = 0
    raw_frock = 0
    raw_glove = 0
    raw_shoe = 0
    label_hat = '1'
    label_frock = '1'
    label_glove = '1'
    label_shoe = '1'

    faces_label = 1000
    count_faces = 0
    #    last_faces = faces_label

    video_reader = cv2.VideoCapture(camer_id)
    video_reader.set(3, 640)

    with ThreadPoolExecutor(10) as executor:
        while (True):
            # while(True):
            ret, image = video_reader.read()
            if ret == True:
                image_paths += [image]
                count += 1
                # cv2.imshow('Image',image)
            if (len(image_paths) == batch_size) or (ret == False
                                                    and len(image_paths) > 0):
                for img in image_paths:
                    out = preson_detect(img)
                    img_h, img_w, img_ch = img.shape
                    for i in range(len(out)):
                        start = time.time()
                        x0, y0, x1, y1 = out[i, 3:7]
                        # print ("coordinate:",x0,y0,x1,y1)
                        start_x = int(img_w * x0)
                        start_y = int(img_h * y0)
                        end_x = int(img_w * x1)
                        end_y = int(img_h * y1)
                        if out[i, 2] < 0.8:
                            continue
                        if start_x < 30 or end_x > 580:
                            continue
                        w = end_x - start_x
                        h = end_y - start_y
                        images = img[start_y:end_y, start_x:end_x]
                        image_h, image_w, image_ch = images.shape
                        if image_h < norm_size or image_w < norm_size or image_h < 300 or image_w > 200:
                            continue
                            #    print("image")
                        ####head_start##
                        head_width = int(h * 0.25 + h * 0.05)
                        if (int(start_x + end_x - head_width) / 2 < start_x
                                or int((start_x + end_x + head_width) / 2) >
                                end_x):
                            continue
                        if int((start_x + end_x - head_width) /
                               2) > start_x + 0.1 * w:
                            image_head = img[
                                int(start_y - h * 0.05):int(start_y +
                                                            h * 0.25),
                                int((start_x + end_x - head_width) /
                                    2):int((start_x + end_x + head_width) / 2)]
                        else:
                            image_head = img[int(start_y -
                                                 h * 0.05):int(start_y +
                                                               h * 0.25),
                                             int(start_x +
                                                 0.1 * w):int(end_x - 0.1 * w)]
                        ####head_end##

                        image_hand = img[int(start_y + h * 0.2):int(start_y +
                                                                    h * 0.5),
                                         (start_x - 15):(end_x + 20)]
                        image_shoe = img[int(start_y + h * 0.8):(end_y),
                                         start_x:end_x]
                        if image_shoe.shape[0] == 0:
                            continue
                        #    print("image_head",image_head)
                        if image_hand.shape[0] == 0:
                            continue
                        if image_hand.shape[1] == 0:
                            continue
                        if image_head.shape[0] == 0:
                            continue
                        image_body = img[int(start_y + w * 0.6):end_y,
                                         start_x:end_x]
                        if image_body.shape[0] == 0:
                            continue
                        label_hat = predict_hat(image_head)  # 0代表true
                        if raw_hat == int(label_hat):
                            count_hat += 1
                        else:
                            raw_hat = int(label_hat)
                            count_hat = 0

                        if label_hat == '1':
                            if img.shape[0] == 0 or img.shape[1] == 0:
                                continue

                        label_frock = predict_frock(image_body)  # 0代表true
                        if raw_frock == int(label_frock):
                            count_frock += 1
                        else:
                            raw_frock = int(label_frock)
                            count_frock = 0

                        if label_frock == '0':
                            label_glove = predict_glove(image_hand)

                        if raw_glove == int(label_glove):
                            count_glove += 1
                        else:
                            raw_glove = int(label_glove)
                            count_glove = 0

                        label_shoe = predict_shoe(image_shoe)
                        # if int(label_shoe) == 0:
                        #     label_shoe = predict_edge(image_shoe)
                        if raw_shoe == int(label_shoe):
                            count_shoe += 1
                        else:
                            raw_shoe = int(label_shoe)
                            count_shoe = 0

                        label_dic = {"label_hat":label_hat,"label_frock":label_frock,"label_glove":label_glove,"label_shoe":label_shoe,\
                                        "count_hat":count_hat,"count_frock":count_frock, "count_glove":count_glove , "count_shoe":count_shoe
                                     }
                        local_list = [start_x, start_y, end_x, end_y]
                        count_list = [
                            count_hat, count_frock, count_glove, count_shoe
                        ]
                        drew_rectangle(img, label_dic, local_list, count_list)
                        if count_faces < 1:
                            faces_label = "unknown"
#                        myThread(label_dic,count_list,count,faces_label,img,camer_id+1).start()
                        executor.submit(myThread, label_dic, count_list, count,
                                        faces_label, img, camer_id + 1)

                        # upload_log(label_dic,count_list,count,faces_label,img)
                        end = time.time()
                        print("=" * 50, (end - start) * 1000)
                    image_paths = []
                    # cv2.imshow("image", img)
                    ret, jpeg = cv2.imencode('.jpg', img)
                    yield (b'--frame\r\n'
                           b'application/octet-stream: image/jpeg\r\n\r\n' +
                           jpeg.tobytes() + b'\r\n\r\n')
def get_detect(id_video):

    global control_color
    dq = deque(maxlen=1)
    t_read_video = threading.Thread(target=read_video, args=(dq, ))
    # t_read_video = threading.Thread(target=read_video_vlc, args=(dq,))
    t_put_data = threading.Thread(target=put_data)
    t_read_video.start()
    t_put_data.start()

    # myout = save_video(video_reader, "./video.mp4", sz)
    my_track_dict = {"0": None}  #save the info of track_id
    track_smooth_dict = {}  #smooth the imshow

    #创建追踪器
    tracker = cv2.MultiTracker_create()
    init_once = False

    save_file = mk_dir()
    num = 0  #有数据循环次数
    success_num = 0  #有检测次数
    t1 = time.time()  #重新检测时间初始值
    my_result = {}  #存储缓冲数据
    index_key = 0
    lenth_boxs = 0

    while True:

        #avoid the memory error.
        if len(my_track_dict) > 50:
            my_track_dict = {}  #save the info of track_id
        if len(my_result) > 50:
            my_result = {}  #存储缓冲数据
        # print(len(my_track_dict))

        #read camera data
        if dq:
            img = dq.popleft()
            print("=====================", img.shape)
        else:
            time.sleep(0.05)
            continue

        start_time = time.time()  #开始计时,测试单贞照片处理时间
        num += 1

        if num % 100 == 1:
            cv2.imwrite(save_file + "/_{}.jpg".format(num), img)
        img_h, img_w, img_ch = img.shape
        print(img.shape)

        #2、防止裁剪或推理时把画的框裁剪上
        show_image = img.copy()
        frame = img.copy()

        #the predict of person.
        boxs, confidence, class_names = [], [], []
        out = preson_detect(img)

        #过滤规定区域内的人性框进行判断,transform the object detection data to input tracter
        for i in range(len(out)):
            #========my_setting==============
            if out[i, 2] > 0.7:
                # print(out[i])
                left = int(out[i, 3] * img_w)
                top = int(out[i, 4] * img_h)
                p_w = int(out[i, 5] * img_w - out[i, 3] * img_w)
                p_h = int(out[i, 6] * img_h - out[i, 4] * img_h)

                right = left + p_w
                bottom = top + p_h

                #detect the person in setting area.
                point1 = [int((left + right) / 2), bottom]
                # my_index = inner_point(point1)
                my_index = True
                print("my_index", my_index)
                if my_index:
                    boxs.append([left, top, p_w, p_h])
                    class_names.append("person")
                    confidence.append(out[i, 2])

        #========my_setting==============
        t2 = time.time()
        detect_time = t2 - t1
        control_time = 2  ##setting detect time, detect one time in m second
        control_buttom = False
        if detect_time > control_time:
            control_buttom = True
            t1 = time.time()

        # if not init_once or control_time>5 or lenth_boxs!=len(boxs):
        if not init_once or control_buttom:
            # tracker = cv2.MultiTracker_create()
            for one_box in boxs:
                ok = tracker.add(cv2.TrackerMIL_create(), frame,
                                 tuple(one_box))
            init_once = True
            index_key = max([int(i_key) for i_key in my_track_dict.keys()])

        ok, boxes = tracker.update(frame)
        print(ok, boxes)

        for index, newbox in enumerate(boxes):
            p1 = (int(newbox[0]), int(newbox[1]))
            p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))

            cv2.rectangle(frame, p1, p2, (200, 0, 0))
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(frame, str(index),
                        (int(newbox[0]), int(newbox[1] - 20)), font, 1.2,
                        (0, 0, 225), 2)

        cv2.namedWindow("aidong_unicom", 0)
        cv2.imshow('aidong_unicom', frame)
        key = cv2.waitKey(33)
Пример #3
0
def get_detect(id_video):

    global control_color
    from collections import deque
    dq = deque(maxlen=1)
    t1 = threading.Thread(target=read_video, args=(dq,))
    t1.start() 

    my_num = 0
    while True:
        start_time = time.time()
        if dq:
            img = dq.pop()
        else:
            cv2.waitKey(20)
            continue
        
        show_image = img.copy()
        out = preson_detect(img)
        img_h, img_w, img_ch = img.shape
                           
        for i in range(len(out)):
            #人形框限定条件,根据置信度限定不符合要求的人形框。
            if out[i, 2] < 0.7:
                continue
            
            #attentions
            x0, y0, x1, y1 = out[i, 3:7]
            start_x = int(img_w * x0)
            start_y = int(img_h * y0)
            end_x = int(img_w * x1)
            end_y = int(img_h * y1)
            point1 = [int((start_x+end_x)/2), end_y] 

            my_index = inner_point(point1)
            if not my_index:
                continue

            clip_images = img[start_y:end_y, start_x:end_x]
            h, w, c = clip_images.shape
            #根据图像大小限定不符合要求的人形框。
            if h > 96 and w > 48:
                #3、保存图片,修改保存名称
                my_num += 1
                #save_name = os.path.join(save_dir, "v1_person{}.jpg".format(my_num))
                #cv2.imwrite(save_name, clip_images)

                label_dict = muti_attr(clip_images)
                print(label_dict)
                
#                i_count=0
#                font=cv2.FONT_HERSHEY_COMPLEX
                if "coat" in label_dict.keys():
                    #4、在原始图像上画矩形。
                    if label_dict["coat"] == "Yes":
                        cv2.rectangle(show_image, (start_x, start_y), (end_x, end_y), (0, 255, 0), 2)
                    else:
                        cv2.rectangle(show_image, (start_x, start_y), (end_x, end_y), (0, 0, 255), 2)     
                        
                    #5、在原始图像加标注。
                    if control_color:
                        show_image = draw_person_attr(show_image, label_dict, end_x, start_y)
                       # for key in label_dict:
                       #     text=key+":"+str(label_dict[key])
                       #     if label_dict[key] == "Yes":
                       #         cv2.putText(show_image,text,(end_x,start_y+i_count),font,0.7,(0,255,0),2)
                       #     else:
                       #         cv2.putText(show_image,text,(end_x,start_y+i_count),font,0.7,(0,0,255),2)
                       #     i_count+=20
        
        draw_muti(show_image)        
        end_time = time.time()
        mytime = (end_time-start_time)*1000
        print("================", show_image.shape,mytime)

        ret2, jpeg = cv2.imencode('.jpg', show_image)
        yield (b'--frame\r\n'
               b'application/octet-stream: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n\r\n')
        
        # cv2.namedWindow("image{}".format("benchi"), cv2.WINDOW_NORMAL)    
        # cv2.imshow("image{}".format("benchi"), show_image)
        # key = cv2.waitKey(1)
        # if key == 27:
        #     break
        
    cv2.destroyAllWindows()
def get_detect(id_video):
    global control_color
    dq = deque(maxlen=1)

    t_read_video = threading.Thread(target=read_video, args=(dq, ))
    t_put_data = threading.Thread(target=put_data)
    t_read_video.start()
    t_put_data.start()

    # myout = save_video(video_reader, "./video.mp4", sz)
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    counter = []
    my_track_dict = {}  #save the info of track_id
    track_smooth_dict = {}  #smooth the imshow
    pts = [deque(maxlen=30) for _ in range(9999)]

    #deep_sort
    model_filename = 'model_data/market1501.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)
    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)
    list_file = open('detection_rslt.txt', 'w')

    save_file = mk_dir()
    num = 0
    t1 = time.time()

    while True:
        #avoid the memory error.
        if len(my_track_dict) > 50:
            my_track_dict = {}
        print(len(my_track_dict))

        if dq:
            img = dq.pop()
        else:
            time.sleep(0.05)
            continue

        start_time = time.time()

        num += 1
        if num % 500 == 1:
            cv2.imwrite(save_file + "/_{}.jpg".format(num), img)
        img_h, img_w, img_ch = img.shape
        print(img.shape)
        #2、防止裁剪或推理时把画的框裁剪上
        show_image = img.copy()
        frame = img.copy()

        #the predict of person.
        boxs, confidence, class_names = [], [], []
        out = preson_detect(img)

        #transform the object detection data to input tracter
        for i in range(len(out)):
            #========my_setting==============
            if out[i, 2] > 0.7:
                # print(out[i])
                left = int(out[i, 3] * img_w)
                top = int(out[i, 4] * img_h)
                p_w = int(out[i, 5] * img_w - out[i, 3] * img_w)
                p_h = int(out[i, 6] * img_h - out[i, 4] * img_h)

                right = left + p_w
                bottom = top + p_h

                #detect the person in setting area.
                point1 = [int((left + right) / 2), bottom]
                my_index = inner_point(point1)
                if my_index:
                    boxs.append([left, top, p_w, p_h])
                    class_names.append("person")
                    confidence.append(out[i, 2])

        #start use the tracker
        features = encoder(frame, boxs)
        # score to 1.0 here.
        detections = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(boxs, features)
        ]
        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]
        # Call the tracker
        tracker.predict()
        tracker.update(detections)
        i = int(0)
        indexIDs = []
        #setting detect time
        t2 = time.time()
        detect_time = t2 - t1
        #========my_setting==============
        control_time = 0.2  #detect one time in m second
        if detect_time > control_time:
            t1 = time.time()

        for det, track in zip(detections, tracker.tracks):
            # if not track.is_confirmed() or track.time_since_update > 1:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue

            #print(track.track_id)
            #draw the boxs of object detection.
            pbox = det.to_tlbr()
            #cv2.rectangle(frame,(int(pbox[0]), int(pbox[1])), (int(pbox[2]), int(pbox[3])),(255,255,255), 2)

            my_key = str(int(track.track_id))
            #========my_setting==============
            #if my_key increase or time lt 3s, will be re_detection.
            if my_key not in my_track_dict.keys(
            ) or detect_time > control_time:
                # print(my_key)
                # print(my_track_dict.keys())
                #the code of processing the person box.
                label_dict = get_labels(img, pbox)
                print("**" * 20, label_dict)

                if type(label_dict) == type(None):
                    continue

                if "coat" not in label_dict.keys():
                    continue
                my_track_dict[my_key] = label_dict

            # draw the attr of person.
            frame = draw_person_attr(frame, my_track_dict[my_key], pbox,
                                     control_color)

            indexIDs.append(int(track.track_id))
            counter.append(int(track.track_id))
            bbox = track.to_tlbr()
            color = [int(c) for c in COLORS[indexIDs[i] % len(COLORS)]]

            #define the color of rectangle.
            if my_track_dict[my_key]["coat"] == "Yes":
                color_rect = (0, 255, 0)
            else:
                color_rect = (0, 0, 255)

            #center_loc = [int((bbox[0]+bbox[2])/2), int((bbox[1]+bbox[3])/2)]
            if my_key not in track_smooth_dict.keys():
                print("---------------------------------------------------->")
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (color_rect), 3)
                track_smooth_dict[my_key] = bbox
            else:
                fbox = track_smooth_dict[my_key]
                a = int((bbox[0] + fbox[0]) / 2)
                b = int((bbox[1] + fbox[1]) / 2)
                c = int((bbox[2] + fbox[2]) / 2)
                d = int((bbox[3] + fbox[3]) / 2)
                cv2.rectangle(frame, (a, b), (c, d), (color_rect), 3)
                track_smooth_dict[my_key] = bbox

            #draw the boxs of track.
            #cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(color), 3)
            if True:
                cv2.putText(frame, str(track.track_id),
                            (int(bbox[0]), int(bbox[1] - 50)), 0, 5e-3 * 150,
                            (color), 2)
                if len(class_names) > 0:
                    class_name = class_names[0]
                    cv2.putText(frame, str(class_names[0]),
                                (int(bbox[0]), int(bbox[1] - 20)), 0,
                                5e-3 * 150, (color), 2)
            i += 1

            # 控制上传频率
            if num % 200 == 1:
                my_result = my_track_dict[my_key]
                pic_name = str(int(time.time())) + "_" + my_key
                # put_data(my_key, my_result, frame)
                q_put_img.append([pic_name, my_result, frame])

        count = len(set(counter))
        #draw the gurdline.
        draw_muti(frame)

        # cv2.putText(frame, "Total Pedestrian Counter: "+str(count),(int(20), int(120)),0, 5e-3 * 200, (0,255,0),2)
        # cv2.putText(frame, "Current Pedestrian Counter: "+str(i),(int(20), int(80)),0, 5e-3 * 200, (0,255,0),2)

        end_time = time.time()
        my_one_time = (end_time - start_time) * 1000
        print("====={}=====".format(num), my_one_time)

        frame = cv2.resize(frame, (640, 360))
        ret2, jpeg = cv2.imencode('.jpg', frame)
        yield (b'--frame\r\n'
               b'application/octet-stream: image/jpeg\r\n\r\n' +
               jpeg.tobytes() + b'\r\n\r\n')
def get_detect(id_video):
    global control_color
    dq = deque(maxlen=1)
    thread_start(dq)

    fbox = []
    bbox = []
    person_dit = {}
    threshold_iou = 0.8

    my_track_dict = {}

    save_file = mk_dir()
    num = 0
    t1 = time.time()
    while True:
        #avoid the memory error.
        if len(my_track_dict) > 50:
            my_track_dict = {}
        if dq:
            img = dq.pop()
        else:
            time.sleep(0.05)
            continue

        start_time = time.time()

        num += 1
        if num % 5000 == 1:
            cv2.imwrite(save_file + "/_{}.jpg".format(num), img)

        img_h, img_w, img_ch = img.shape
        print(img.shape)
        #2、防止裁剪或推理时把画的框裁剪上
        show_image = img.copy()
        frame = img.copy()

        #the predict of person.
        out = preson_detect(img)
        # print("------------->", out)

        boxes = []

        #transform the object detection data to input tracter
        for i in range(len(out)):
            #========my_setting==============
            if out[i, 2] > 0.7:
                # print(out[i])
                left = int(out[i, 3] * img_w)
                top = int(out[i, 4] * img_h)
                p_w = int(out[i, 5] * img_w - out[i, 3] * img_w)
                p_h = int(out[i, 6] * img_h - out[i, 4] * img_h)

                right = left + p_w
                bottom = top + p_h

                #detect the person in setting area.
                point1 = [int((left + right) / 2), bottom]
                my_index = inner_point(point1)
                # if my_index:
                #========my_setting==============
                if True:
                    boxes.append([left, top, p_w, p_h])
                    print(boxes)
        if not boxes:
            # time.sleep(0.02)
            continue

        #tracker algorithm 追踪算法
        if not fbox:
            fbox = boxes
            init_len = len(fbox)
            for i_d in range(1, init_len + 1):
                person_dit[str(i_d)] = fbox[i_d - 1]
            continue
        else:
            bbox = boxes

        print("---------->fbox", fbox)
        print("---------->bbox", bbox)
        person_new = person_dit.copy()
        #删除上一贞出现这贞消失的数据,并更新一直存在的坐标
        for key in person_dit:
            for one in bbox:
                my_iou = iou_cal(person_new[key], one)
                if my_iou > threshold_iou:
                    person_new[key] = new_loc(person_new[key], one)
                    break
            else:
                del person_new[key]

        #添加这贞新增而上一贞不存在的数据
        for one in bbox:
            for key in person_dit:
                my_iou = iou_cal(person_dit[key], one)
                if my_iou > threshold_iou:
                    break
            else:
                init_len += 1
                person_new[str(init_len)] = one  #等于the value of this frame

        fbox = bbox

        if not person_dit:
            continue

        t2 = time.time()
        detect_time = t2 - t1
        #========my_setting==============
        control_time = 1  #detect one time in m second
        if detect_time > control_time:
            t1 = time.time()

        indexIDs = []
        for my_key in person_new:
            #if my_key increase or time lt 3s, will be re_detection.
            if my_key not in my_track_dict.keys(
            ) or detect_time > control_time:
                # print(my_key)
                # print(my_track_dict.keys())
                #the code of processing the person box.
                label_dict = get_labels(img, person_new[my_key])
                print("**" * 20, label_dict)

                if type(label_dict) == type(None):
                    continue

                if "coat" not in label_dict.keys():
                    continue
                my_track_dict[my_key] = label_dict
                # draw the attr of person.

            indexIDs.append(int(my_key))
            frame = draw_person_attr(frame, my_track_dict[my_key],
                                     person_new[my_key], control_color)
            # color = [int(c) for c in COLORS[indexIDs[i] % len(COLORS)]]

            #define the color of rectangle.
            if my_track_dict[my_key]["coat"] == "Yes":
                color_rect = (0, 255, 0)
            else:
                color_rect = (0, 0, 255)

            cv2.rectangle(
                frame,
                (int(person_new[my_key][0]), int(person_new[my_key][1])),
                (int(person_new[my_key][2]), int(person_new[my_key][3])),
                (color_rect), 3)

            #draw the boxs of track.
            #cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(color), 3)
            if True:
                cv2.putText(frame, str(my_key), (int(
                    person_new[my_key][0]), int(person_new[my_key][1] - 50)),
                            0, 5e-3 * 150, (225, 0, 0), 2)
                class_names = "person"
                cv2.putText(frame, str(class_names), (int(
                    person_new[my_key][0]), int(person_new[my_key][1] - 20)),
                            0, 5e-3 * 150, (225, 0, 0), 2)
            i += 1

        cv2.imshow("img", frame)
        key = cv2.waitKey(33)
        if key == 27:
            cv2.destroyAllWindows()
            video_reader.release()
            break
def get_detect(id_video):
    global control_color
    dq = deque(maxlen=1)
    t_read_video = threading.Thread(target=read_video, args=(dq,))
    # t_read_video = threading.Thread(target=read_video_vlc, args=(dq,))
    t_put_data = threading.Thread(target=put_data)
    t_read_video.start() 
    t_put_data.start() 
    
    # myout = save_video(video_reader, "./video.mp4", sz)
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    counter = []
    my_track_dict = {} #save the info of track_id
    track_smooth_dict = {} #smooth the imshow
    pts = [deque(maxlen=30) for _ in range(9999)]
    
    #deep_sort
    model_filename = 'model_data/market1501.pb'
    encoder = gdet.create_box_encoder(model_filename,batch_size=1)
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)
    list_file = open('detection_rslt.txt', 'w')
    
    save_file = mk_dir()
    num = 0    #有数据循环次数
    success_num = 0 #有检测次数
    t1 = time.time()  #重新检测时间初始值
    my_result = {} #存储缓冲数据


    while True:
        #avoid the memory error.
        if len(my_track_dict)>50:
            my_track_dict = {}  #save the info of track_id
        if len(my_result) > 50:
            my_result = {}      #存储缓冲数据
        # print(len(my_track_dict))
        
        #read camera data
        if dq:
            img = dq.popleft()
            print("=====================", img.shape)
        else:
            time.sleep(0.05)
            continue
        
 
        start_time = time.time()  #开始计时,测试单贞照片处理时间
        num += 1 

        
        if num % 100 == 1:
            cv2.imwrite(save_file+"/_{}.jpg".format(num), img)
        img_h, img_w, img_ch = img.shape
        print(img.shape)

        #2、防止裁剪或推理时把画的框裁剪上
        show_image = img.copy()
        frame = img.copy()

        #the predict of person.
        boxs, confidence, class_names = [], [], []
        out = preson_detect(img)
    
        #过滤规定区域内的人性框进行判断,transform the object detection data to input tracter
        for i in range(len(out)):
            #========my_setting==============
            if out[i, 2] > 0.7:
                # print(out[i])
                left = int(out[i, 3]*img_w)
                top = int(out[i, 4]*img_h)
                p_w = int(out[i, 5]*img_w-out[i, 3]*img_w)
                p_h = int(out[i, 6]*img_h-out[i, 4]*img_h)

                right = left + p_w
                bottom = top + p_h
                
                #detect the person in setting area.
                point1 = [int((left+right)/2), bottom] 
                my_index = inner_point(point1)
                print("my_index", my_index)
                if my_index:
                    boxs.append([left, top, p_w, p_h]) 
                    class_names.append("person")
                    confidence.append(out[i, 2])

        #start use the tracker        
        features = encoder(frame, boxs)
        # score to 1.0 here.
        detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]
        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        detections = [detections[i] for i in indices]
        # Call the tracker
        tracker.predict()
        tracker.update(detections)
        i = int(0)
        indexIDs = []

        #========my_setting==============
        t2 = time.time()
        detect_time = t2 - t1      
        control_time = 0.2  ##setting detect time, detect one time in m second
        if detect_time > control_time:
            t1 = time.time()
        

        #对追踪结果进行处理,和对单个人性框进行识别
        for det, track in zip(detections, tracker.tracks): 
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            
            pbox = det.to_tlbr()   #draw the boxs of object detection. #pbox为人单个人形框
            my_key = str(int(track.track_id))

            #========my_setting==============
            #if my_key increase or time lt xxs, will be re_detection.
            if my_key not in my_track_dict.keys() or detect_time>control_time:
                # print(my_key)
                # print(my_track_dict.keys())
                #the code of processing the person box.
                label_dict = get_labels(img, pbox)   #pbox为人单个人形框
                # print("============================================", label_dict)
                
                #label_dict 可能为None
                if type(label_dict) == type(None):
                    continue
                if "coat" not in label_dict.keys():
                    continue


                my_track_dict[my_key] = label_dict
            
            frame = draw_person_attr(frame, my_track_dict[my_key], pbox, control_color)   # draw the attr of person.

            indexIDs.append(int(track.track_id))
            counter.append(int(track.track_id))
            bbox = track.to_tlbr()

            #有负值,注意----------------------
            bbox = [0 if i < 0 else int(i) for i in bbox]
            color = [int(c) for c in COLORS[indexIDs[i] % len(COLORS)]]

            # define the color of rectangle.
            if my_track_dict[my_key]["coat"] == 1:
                color_rect = (0, 255, 0)
            else:
                color_rect = (0, 0, 255) 
            
            # smooth the rectangle. 平滑矩形框 
            #center_loc = [int((bbox[0]+bbox[2])/2), int((bbox[1]+bbox[3])/2)]
            if my_key not in track_smooth_dict.keys():
                cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]),(color_rect), 3)
                track_smooth_dict[my_key] = bbox
            else:
                fbox = track_smooth_dict[my_key]
                a = int((bbox[0]+fbox[0])/2)
                b = int((bbox[1]+fbox[1])/2)
                c = int((bbox[2]+fbox[2])/2)
                d = int((bbox[3]+fbox[3])/2)
                cv2.rectangle(frame, (a, b), (c, d),(color_rect), 3)
                track_smooth_dict[my_key] = bbox

            #draw the boxs of track.
            #cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(color), 3)

            #添加文字显示
            if True:
                cv2.putText(frame,str(track.track_id),(bbox[0], bbox[1] -50),0, 5e-3 * 150, (color),2) 
                if len(class_names) > 0:
                   class_name = class_names[0]
                   cv2.putText(frame, str(class_names[0]),(bbox[0], bbox[1] -20),0, 5e-3 * 150, (color),2)
            i += 1

            # print("my_track_dict[my_key]", my_track_dict[my_key])
            if my_key not in my_result:
                #不要直接等于,会出错,深浅拷贝问题
                my_result[my_key] = my_track_dict[my_key].copy()
                my_result[my_key]["mysum"] = 0
                # print("++++++init++++++++++",my_result[my_key])
            else:
                for key1 in  my_track_dict[my_key]:
                    if key1 in my_result[my_key]:
                        if key1 == "face":
                            if my_track_dict[my_key]["face"] != "Unknown":
                                my_result[my_key]["face"] = my_track_dict[my_key]["face"]
                        else:
                            my_result[my_key][key1] = my_result[my_key][key1] + my_track_dict[my_key][key1]
                    else:
                        my_result[my_key][key1] = my_track_dict[my_key][key1]

            my_result[my_key]["mysum"] = my_result[my_key]["mysum"] + 1                
            # print(my_key, "-------->", my_result[my_key])

            if my_result[my_key]["mysum"] > 50:
                # print("-------->", my_result[my_key])
                my_cum_result = {}
                for key2 in my_result[my_key]:
                    if key2 == "face":
                        my_cum_result[key2] = my_result[my_key][key2]
                    else:
                        if my_result[my_key][key2]/my_result[my_key]["mysum"] < 0.3:
                            my_cum_result[key2] = 0
                        else:
                            my_cum_result[key2] = 1

                del my_cum_result["mysum"]  #删除“mysum”属性再上传
                del my_result[my_key]    #减少内存负担,上传的人员要删除
                

                if "coat" in my_cum_result and "hat" in my_cum_result and "gloves" in my_cum_result and "shoes" in my_cum_result:
                    pic_name = str(id_video) +  "_" + str(int(time.time())) + "_" + my_key
                    # put_data(my_key, my_result, frame)
                    # print("--------put_data-------->", pic_name, bbox)
                    q_put_img.append([pic_name, my_cum_result, show_image[bbox[1]:bbox[3], bbox[0]:bbox[2]]])
                    # my_result[my_key] = {"hat":0, "coat":0, "gloves":0,"shoes":0, "mysum":0}
            
        count = len(set(counter))
        
        #draw the gurdline.画警戒线
        draw_muti(frame)

        # cv2.putText(frame, "Total Pedestrian Counter: "+str(count),(int(20), int(120)),0, 5e-3 * 200, (0,255,0),2)
        # cv2.putText(frame, "Current Pedestrian Counter: "+str(i),(int(20), int(80)),0, 5e-3 * 200, (0,255,0),2)

        end_time = time.time()  #结束计时,测试单贞照片处理时间
        my_one_time = (end_time - start_time) * 1000
        print("====={}=====".format(num), my_one_time)  



        # frame = cv2.resize(frame, (640, 360))
        # ret2, jpeg = cv2.imencode('.jpg', frame)
        # yield (b'--frame\r\n'
        #        b'application/octet-stream: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n\r\n')
        cv2.namedWindow("aidong_unicom", 0)
        cv2.imshow('aidong_unicom', frame)
        key = cv2.waitKey(33)
        if key == 27:
            myout.release()
            video_reader.release()
            cv2.destroyAllWindows()            
            break
Пример #7
0
def get_detect(id_video):
    from collections import deque
    dq = deque(maxlen=1)

    t1 = threading.Thread(target=read_video, args=(dq, ))
    t1.start()

    # myout = save_video(video_reader, "./video.mp4", sz)
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    counter = []
    my_track_dict = {}  #save the info of track_id
    track_smooth_dict = {}  #smooth the imshow
    pts = [deque(maxlen=30) for _ in range(9999)]

    #deep_sort
    model_filename = 'model_data/market1501.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)
    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)
    list_file = open('detection_rslt.txt', 'w')

    my_num = 0
    num = 0

    t1 = time.time()
    fps = 0
    fps1 = time.time()
    while True:
        if dq:
            img = dq.pop()
        else:
            key = cv2.waitKey(20)
            continue

        #TEST FPS
        fps2 = time.time()
        fps += 1
        if fps2 - fps1 > 1:
            print(fps)
            fps = 0
            fps1 = time.time()

        start_time = time.time()
        num += 1

        #frame = cv2.imread(filename)
        #1、读取中文路径
        #img = cv2.imdecode(np.fromfile(filename,dtype=np.uint8), cv2.IMREAD_COLOR)
        img_h, img_w, img_ch = img.shape
        print(img.shape)
        #2、防止裁剪或推理时把画的框裁剪上
        show_image = img.copy()
        frame = img.copy()

        #the predict of person.
        boxs, confidence, class_names = [], [], []
        out = preson_detect(img)

        #transform the object detection data to input tracter
        for i in range(len(out)):
            #========my_setting==============
            if out[i, 2] > 0.7:
                # print(out[i])
                left = int(out[i, 3] * img_w)
                top = int(out[i, 4] * img_h)
                p_w = int(out[i, 5] * img_w - out[i, 3] * img_w)
                p_h = int(out[i, 6] * img_h - out[i, 4] * img_h)

                right = left + p_w
                bottom = top + p_h

                #detect the person in setting area.
                point1 = [int((left + right) / 2), bottom]
                my_index = inner_point(point1)
                if my_index:
                    boxs.append([left, top, p_w, p_h])
                    class_names.append("person")
                    confidence.append(out[i, 2])

        #start use the tracker
        features = encoder(frame, boxs)
        # score to 1.0 here).
        detections = [
            Detection(bbox, 1.0, feature)
            for bbox, feature in zip(boxs, features)
        ]
        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]
        # Call the tracker
        tracker.predict()
        tracker.update(detections)

        i = int(0)
        indexIDs = []

        #setting detect time
        t2 = time.time()
        detect_time = t2 - t1
        #========my_setting==============
        control = 1  #detect one time in 3 second
        if detect_time > control:
            t1 = time.time()

        for det, track in zip(detections, tracker.tracks):
            # if not track.is_confirmed() or track.time_since_update > 1:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue

            #print(track.track_id)

            #draw the boxs of object detection.
            pbox = det.to_tlbr()
            #cv2.rectangle(frame,(int(pbox[0]), int(pbox[1])), (int(pbox[2]), int(pbox[3])),(255,255,255), 2)

            my_key = str(int(track.track_id))
            #========my_setting==============
            #if my_key increase or time lt 3s, will be re_detection.
            if my_key not in my_track_dict.keys() or detect_time > control:
                print(my_key)
                print(my_track_dict.keys())
                #the code of processing the person box.
                label_dict = person_detect(img, pbox)
                if not label_dict:
                    continue
                my_track_dict[my_key] = label_dict

            frame = draw_person_attr(frame, my_track_dict[my_key], pbox)

            indexIDs.append(int(track.track_id))
            counter.append(int(track.track_id))
            bbox = track.to_tlbr()
            color = [int(c) for c in COLORS[indexIDs[i] % len(COLORS)]]

            #center_loc = [int((bbox[0]+bbox[2])/2), int((bbox[1]+bbox[3])/2)]
            if my_key not in track_smooth_dict.keys():
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (color), 3)
                track_smooth_dict[my_key] = bbox
            else:
                fbox = track_smooth_dict[my_key]
                a = int((bbox[0] + fbox[0]) / 2)
                b = int((bbox[1] + fbox[1]) / 2)
                c = int((bbox[2] + fbox[2]) / 2)
                d = int((bbox[3] + fbox[3]) / 2)
                cv2.rectangle(frame, (a, b), (c, d), (color), 3)
                track_smooth_dict[my_key] = bbox

            #draw the boxs of track.
            #cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(color), 3)
            cv2.putText(frame, str(track.track_id),
                        (int(bbox[0]), int(bbox[1] - 50)), 0, 5e-3 * 150,
                        (color), 2)

            if len(class_names) > 0:
                class_name = class_names[0]
                cv2.putText(frame, str(class_names[0]),
                            (int(bbox[0]), int(bbox[1] - 20)), 0, 5e-3 * 150,
                            (color), 2)
            i += 1

        count = len(set(counter))
        #draw the gurdline.
        draw_muti(frame)

        cv2.putText(frame, "Total Pedestrian Counter: " + str(count),
                    (int(20), int(120)), 0, 5e-3 * 200, (0, 255, 0), 2)
        cv2.putText(frame, "Current Pedestrian Counter: " + str(i),
                    (int(20), int(80)), 0, 5e-3 * 200, (0, 255, 0), 2)
        #cv2.putText(frame, "FPS: %f"%(fps),(int(20), int(40)),0, 5e-3 * 200, (0,255,0),3)
        # cv2.namedWindow("YOLO4_Deep_SORT", 0)
        #cv2.resizeWindow('YOLO4_Deep_SORT', 640, 480)

        # cv2.imshow('YOLO4_Deep_SORT', frame)
        # myout.write(frame)

        # frame = cv2.resize(frame, (640, 360))
        ret2, jpeg = cv2.imencode('.jpg', frame)
        yield (b'--frame\r\n'
               b'application/octet-stream: image/jpeg\r\n\r\n' +
               jpeg.tobytes() + b'\r\n\r\n')

        end_time = time.time()
        my_one_time = (end_time - start_time) * 1000
        print("====={}=====".format(num), my_one_time)
        start_time = time.time()
        num += 1  

        #frame = cv2.imread(filename)
        #1、读取中文路径
        #img = cv2.imdecode(np.fromfile(filename,dtype=np.uint8), cv2.IMREAD_COLOR)
        img_h, img_w, img_ch = img.shape
        print(img.shape)
        #2、防止裁剪或推理时把画的框裁剪上
        show_image = img.copy()
        frame = img.copy()
        
        #the predict of person.
        boxs, confidence, class_names = [], [], []
        out = preson_detect(img)

        #transform the object detection data to input tracter
        for i in range(len(out)):
            #========my_setting==============
            if out[i, 2] > 0.7:
                # print(out[i])
                left = int(out[i, 3]*img_w)
                top = int(out[i, 4]*img_h)
                p_w = int(out[i, 5]*img_w-out[i, 3]*img_w)
                p_h = int(out[i, 6]*img_h-out[i, 4]*img_h)

                right = left + p_w
                bottom = top + p_h
                
                #detect the person in setting area.
Пример #9
0
def get_detect(id_video):

    global control_color
    dq = deque(maxlen=1)
    t_read_video = threading.Thread(target=read_video, args=(dq,))
    # t_read_video = threading.Thread(target=read_video_vlc, args=(dq,))
    t_put_data = threading.Thread(target=put_data)
    t_read_video.start() 
    t_put_data.start() 
    
    # myout = save_video(video_reader, "./video.mp4", sz)
    my_track_dict = {} #save the info of track_id
    track_smooth_dict = {} #smooth the imshow
    

    save_file = mk_dir()
    num = 0    #有数据循环次数
    success_num = 0 #有检测次数
    t1 = time.time()  #重新检测时间初始值
    my_result = {} #存储缓冲数据

    fbox = []
    bbox = []
    person_dit = {}

    while True:
        
        #avoid the memory error.
        if len(my_track_dict)>50:
            my_track_dict = {}  #save the info of track_id
        if len(my_result) > 50:
            my_result = {}      #存储缓冲数据
        # print(len(my_track_dict))
        
        #read camera data
        if dq:
            img = dq.popleft()
            print("=====================", img.shape)
        else:
            time.sleep(0.05)
            continue
 
        start_time = time.time()  #开始计时,测试单贞照片处理时间
        num += 1 

        if num % 1000 == 1:
            cv2.imwrite(save_file+"/_{}.jpg".format(num), img)

        img_h, img_w, img_ch = img.shape
        print(img.shape)

        #2、防止裁剪或推理时把画的框裁剪上
        show_image = img.copy()
        frame = img.copy()

        #the predict of person.
        boxes = []
        out = preson_detect(img)
        
        #print(out)
        #过滤规定区域内的人性框进行判断,transform the object detection data to input tracter
        for i in range(len(out)):
            #========my_setting==============
            if out[i, 2] > 0.7:
                #print("------------------>===================================>", out)
                # print(out[i])
                left = int(out[i, 3]*img_w)
                top = int(out[i, 4]*img_h)
                right = int(out[i, 5]*img_w)
                bottom = int(out[i, 6]*img_h)
                
                #detect the person in setting area.
                point1 = [int((left+right)/2), bottom] 
                # my_index = inner_point(point1)
                my_index = True
                print("my_index", my_index)
                if my_index:
                    boxes.append([left, top, right, bottom]) 
                
        
        
        if boxes:
            #跟踪算法
            if not fbox:
                fbox = boxes
                init_len = len(fbox)
                for i_d in range(0, init_len):
                    person_dit[str(i_d)] = fbox[i_d][-4:]
                continue
            else:
                bbox = boxes
            #print("------------------>===================================>", boxes)
            # print("------------------>===================================>", 2)
            del_list = []
            for key_a in person_dit:
                for one_a in bbox:
                    my_flag = edn_distance(person_dit[key_a], one_a)
                    if my_flag:
                        person_dit[key_a] = new_loc(person_dit[key_a], one_a)
                        # person_dit[key_a] = one_a
                        break
                else:
                    del_list.append(key_a)

            for key_b in del_list:
                del person_dit[key_b]

            for one_c in bbox:
                for key_c in person_dit:
                    my_flag = edn_distance(person_dit[key_c], one_c)
                    if my_flag:
                        break
                else:
                    init_len += 1
                    person_dit[str(init_len)] = one_c[-4:]

            fbox = bbox

            #print("---------------------------------------------------------->===================================>", 3)
            #检测条件设置
            t2 = time.time()
            detect_time = t2 - t1      
            control_time = 1  ##setting detect time, detect one time in m second
            if detect_time > control_time:
                t1 = time.time() 
            
            #属性检测
            for my_key in person_dit:
                #加入控制条件
                # if my_key not in my_track_dict.keys() or detect_time>control_time:
                if True:
                    #the code of processing the person box.
                    label_dict = get_labels(frame, person_dit[my_key])   #pbox为人单个人形框
                    # print("============================================", label_dict)
                    
                    #label_dict 可能为None
                    if type(label_dict) == type(None):
                        continue
                    if "coat" not in label_dict.keys():
                        continue

                    my_track_dict[my_key] = label_dict

               
                #draw the boxs of track.
                new_box =  person_dit[my_key]
                p1 = (new_box[0], new_box[1])
                p2 = (new_box[2], new_box[3])
                cv2.rectangle(show_image, p1, p2, (255,0,0), 2, 1)
                #添加文字显示
                cv2.putText(show_image, "person:"+my_key, (new_box[0], new_box[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2)

                show_image = draw_person_attr(show_image, my_track_dict[my_key], new_box, control_color)   # draw the attr of person.


                #结果上传
                if my_key not in my_result:
                    #不要直接等于,会出错,深浅拷贝问题
                    my_result[my_key] = my_track_dict[my_key].copy()
                    my_result[my_key]["mysum"] = 0
                    # print("++++++init++++++++++",my_result[my_key])
                else:
                    for key1 in  my_track_dict[my_key]:
                        if key1 in my_result[my_key]:
                            if key1 == "face":
                                if my_track_dict[my_key]["face"] != "Unknown":
                                    my_result[my_key]["face"] = my_track_dict[my_key]["face"]
                            else:
                                my_result[my_key][key1] = my_result[my_key][key1] + my_track_dict[my_key][key1]
                        else:
                            my_result[my_key][key1] = my_track_dict[my_key][key1]

                my_result[my_key]["mysum"] = my_result[my_key]["mysum"] + 1                
                # print(my_key, "-------->", my_result[my_key])

                if my_result[my_key]["mysum"] > 20:
                    # print("-------->", my_result[my_key])
                    my_cum_result = {}
                    for key2 in my_result[my_key]:
                        if key2 == "face":
                            my_cum_result[key2] = my_result[my_key][key2]
                        else:
                            if my_result[my_key][key2]/my_result[my_key]["mysum"] < 0.3:
                                my_cum_result[key2] = 0
                            else:
                                my_cum_result[key2] = 1

                    del my_cum_result["mysum"]  #删除“mysum”属性再上传
                    del my_result[my_key]    #减少内存负担,上传的人员要删除
                    

                    if "coat" in my_cum_result and "hat" in my_cum_result and "gloves" in my_cum_result and "shoes" in my_cum_result:
                        pic_name = str(id_video) +  "_" + str(int(time.time())) + "_" + my_key
                        # put_data(my_key, my_result, frame)
                        # print("--------put_data-------->", pic_name, bbox)
                        loction = person_dit[my_key]
                        q_put_img.append([pic_name, my_cum_result, show_image[loction[1]:loction[3], loction[0]:loction[2]]])
                        # my_result[my_key] = {"hat":0, "coat":0, "gloves":0,"shoes":0, "mysum":0}
        
            #draw the gurdline.画警戒线
            draw_muti(show_image)
            end_time = time.time()  #结束计时,测试单贞照片处理时间
            my_one_time = (end_time - start_time) * 1000
            print("====={}=====".format(num), my_one_time)  
            
            #print("-------------->", num, my_result)
            #cv2.namedWindow("aidong_unicom", 0)
            #cv2.imshow('aidong_unicom', show_image)
            #key = cv2.waitKey(1)

            show_image = cv2.resize(show_image, (640, 360))
            ret2, jpeg = cv2.imencode('.jpg', show_image)
            yield (b'--frame\r\n'
                   b'application/octet-stream: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n\r\n')
        else:
            #draw the gurdline.画警戒线
            draw_muti(show_image)
            end_time = time.time()  #结束计时,测试单贞照片处理时间
            my_one_time = (end_time - start_time) * 1000
            print("====={}=====".format(num), my_one_time)  
            
            print("-------------->", num, person_dit)
            #cv2.namedWindow("aidong_unicom", 0)
            #cv2.imshow('aidong_unicom', show_image)
            #key = cv2.waitKey(1)  

            show_image = cv2.resize(show_image, (640, 360))
            ret2, jpeg = cv2.imencode('.jpg', show_image)
            yield (b'--frame\r\n'
                   b'application/octet-stream: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n\r\n')         
Пример #10
0
def get_detect(rtsp_addr, camerId):
    global control_color
    dq = deque(maxlen=5)

    t_read_video = threading.Thread(target=read_video, args=(dq, rtsp_addr))
    #t_read_video = threading.Thread(target=read_video_vlc, args=(dq, rtsp_addr))
    t_put_data = threading.Thread(target=put_data, args=(camerId, ))

    t_read_video.start()
    t_put_data.start()

    # myout = save_video(video_reader, "./video.mp4", sz)
    my_track_dict = {}  #save the info of track_id

    save_file = mk_dir()
    num = 0  #有数据循环次数
    t1 = time.time()  #重新检测时间初始值
    my_result = {}  #存储缓冲数据

    mystage = 1  #默认都检测
    fbox = []
    bbox = []
    person_dit = {}

    #加载电子围栏
    try:
        points2 = get_fence(camerId)
        #print("-------"*100, points2)
        points2 = [[int(float(pt[0]) * 1280),
                    int(float(pt[1]) * 720)] for pt in points2]
        #print("-------"*100, points2)
    except:
        points2 = [[0, 0], [0, 10], [10, 10], [10, 0]]
        print("no gardline!please create new line.")

    while True:

        # avoid the memory error.
        if len(my_track_dict) > 50:
            my_track_dict = {}  #save the info of track_id
        if len(my_result) > 50:
            my_result = {}  #存储缓冲数据
        # print(len(my_track_dict))

        #read camera data
        if dq:
            img = dq.popleft()
            print("=====================", img.shape)
        else:
            print(
                "------------------------------>dq------------------->is nan.............."
            )
            time.sleep(0.02)
            continue

        start_time = time.time()  #开始计时,测试单贞照片处理时间
        num += 1

        if num % 1000 == 1:
            pass
            #cv2.imwrite(save_file+"/_{}.jpg".format(num), img)

        img_h, img_w, img_ch = img.shape
        print(img.shape)

        #2、防止裁剪或推理时把画的框裁剪上
        show_image = img.copy()
        frame = img.copy()

        #增加拆解阶段的判断
        if num % 10 == 1:
            stage_result = mul_stage_model(img)
            #print(stage_result)
            for i in range(len(stage_result)):
                if stage_result[i, 2] > 0.97:
                    mystage = int(stage_result[i, 1])
                    #mytest = stage_result[i]
                    print(stage_result[i, 1], "----------mystage-------->" * 2,
                          stage_result[i, 2])
                    break
            else:
                mystage = 1

        #if mystage == 2:
        #cv2.rectangle(show_image, (int(mytest[3]*1280), int(mytest[4]*720)), (int(mytest[3]*1280+mytest[5]*1280), int(mytest[4]*720+mytest[6]*720)), (0, 149, 230), 1)

        #mystage = 1
        cv2.rectangle(show_image, (10, 10), (400, 100), (120, 149, 230), -1)
        if mystage == 2:
            show_image = add_ch_text(show_image,
                                     "第一拆解阶段:",
                                     15,
                                     15,
                                     textColor=(255, 255, 255),
                                     textSize=30)
            show_image = add_ch_text(show_image,
                                     "仅检测衣服和鞋子",
                                     15,
                                     60,
                                     textColor=(255, 255, 255),
                                     textSize=30)
        elif mystage == 1:
            show_image = add_ch_text(show_image,
                                     "第二拆解阶段:",
                                     15,
                                     15,
                                     textColor=(255, 255, 255),
                                     textSize=30)
            show_image = add_ch_text(show_image,
                                     "检测帽子、衣服、手套、鞋子",
                                     15,
                                     60,
                                     textColor=(255, 255, 255),
                                     textSize=30)

        #the predict of person.
        boxes = []
        out = preson_detect(img)

        #print(out)
        #过滤规定区域内的人性框进行判断,transform the object detection data to input tracter
        for i in range(len(out)):
            #========my_setting==============
            if out[i, 2] > 0.7:
                #print("------------------>===================================>", out)
                # print(out[i])
                left = int(out[i, 3] * img_w)
                top = int(out[i, 4] * img_h)
                right = int(out[i, 5] * img_w)
                bottom = int(out[i, 6] * img_h)

                #detect the person in setting area.
                point1 = [int((left + right) / 2), bottom]
                my_index = inner_point(point1, points2)
                # my_index = True
                print("my_index", my_index)
                if my_index:
                    boxes.append([left, top, right, bottom])

        if boxes:
            #if False:
            #跟踪算法
            if not fbox:
                fbox = boxes
                init_len = len(fbox)
                for i_d in range(0, init_len):
                    person_dit[str(i_d)] = fbox[i_d][-4:]
                continue
            else:
                bbox = boxes
            #print("------------------>===================================>", boxes)
            # print("------------------>===================================>", 2)
            del_list = []
            for key_a in person_dit:
                for one_a in bbox:
                    my_flag = edn_distance(person_dit[key_a], one_a)
                    if my_flag:
                        person_dit[key_a] = new_loc(person_dit[key_a], one_a)
                        # person_dit[key_a] = one_a
                        break
                else:
                    del_list.append(key_a)

            for key_b in del_list:
                del person_dit[key_b]

            for one_c in bbox:
                for key_c in person_dit:
                    my_flag = edn_distance(person_dit[key_c], one_c)
                    if my_flag:
                        break
                else:
                    init_len += 1
                    person_dit[str(init_len)] = one_c[-4:]

            fbox = bbox

            #print("---------------------------------->===================================>", 3)
            #检测条件设置
            t2 = time.time()
            detect_time = t2 - t1
            control_time = 1  ##setting detect time, detect one time in m second
            if detect_time > control_time:
                t1 = time.time()

            #属性检测
            for my_key in person_dit:
                #加入控制条件
                # if my_key not in my_track_dict.keys() or detect_time>control_time:
                if True:
                    #the code of processing the person box.
                    label_dict = get_labels(frame, person_dit[my_key],
                                            mystage)  #pbox为人单个人形框
                    print("============================================",
                          label_dict)

                    #label_dict 可能为None
                    if type(label_dict) == type(None):
                        continue
                    if "coat" not in label_dict.keys():
                        continue

                    my_track_dict[my_key] = label_dict

                #draw the boxs of track.
                new_box = person_dit[my_key]
                p1 = (new_box[0], new_box[1])
                p2 = (new_box[2], new_box[3])
                cv2.rectangle(show_image, p1, p2, (255, 0, 0), 2, 1)
                #添加文字显示
                cv2.putText(show_image, "person:" + my_key,
                            (new_box[0], new_box[1] - 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

                show_image = draw_person_attr(
                    show_image, my_track_dict[my_key], new_box,
                    control_color)  # draw the attr of person.

                #结果上传
                if my_key not in my_result:
                    #不要直接等于,会出错,深浅拷贝问题
                    my_result[my_key] = my_track_dict[my_key].copy()
                    my_result[my_key]["mysum"] = 0
                    # print("++++++init++++++++++",my_result[my_key])
                else:
                    for key1 in my_track_dict[my_key]:
                        if key1 in my_result[my_key]:
                            if key1 == "face":
                                if my_track_dict[my_key]["face"] != "":
                                    my_result[my_key]["face"] = my_track_dict[
                                        my_key]["face"]
                            else:
                                my_result[my_key][key1] = my_result[my_key][
                                    key1] + my_track_dict[my_key][key1]
                        else:
                            my_result[my_key][key1] = my_track_dict[my_key][
                                key1]

                my_result[my_key]["mysum"] = my_result[my_key]["mysum"] + 1
                # print(my_key, "-------->", my_result[my_key])

                if my_result[my_key]["mysum"] > 30:
                    # print("-------->", my_result[my_key])
                    my_cum_result = {}
                    for key2 in my_result[my_key]:
                        if key2 == "face":
                            my_cum_result[key2] = my_result[my_key][key2]
                        else:
                            if my_result[my_key][key2] / my_result[my_key][
                                    "mysum"] < 0.3:
                                my_cum_result[key2] = 0
                            else:
                                my_cum_result[key2] = 1

                    del my_cum_result["mysum"]  #删除“mysum”属性再上传
                    del my_result[my_key]  #减少内存负担,上传的人员要删除

                    #if "coat" in my_cum_result and "hat" in my_cum_result and "gloves" in my_cum_result and "shoes" in my_cum_resulti:
                    print(my_cum_result)
                    if "coat" in my_cum_result and "shoes" in my_cum_result:
                        pic_name = str(camerId) + "_" + str(int(
                            time.time())) + "_" + my_key
                        # put_data(my_key, my_result, frame)
                        print("--------put_data-------->" * 2)
                        loction = person_dit[my_key]
                        q_put_img.append([
                            mystage, pic_name, my_cum_result,
                            frame[loction[1]:loction[3], loction[0]:loction[2]]
                        ])
                        # my_result[my_key] = {"hat":0, "coat":0, "gloves":0,"shoes":0, "mysum":0}

            #draw the gurdline.画警戒线
            draw_muti(show_image, points2)
            end_time = time.time()  #结束计时,测试单贞照片处理时间
            my_one_time = (end_time - start_time) * 1000
            print("====={}=====".format(num), my_one_time)
            print("-------------->", num, my_result)
            #cv2.namedWindow("aidong_unicom", 0)
            #cv2.imshow('aidong_unicom', show_image)
            #key = cv2.waitKey(1)

            show_image = cv2.resize(show_image, (640, 360))
            ret2, jpeg = cv2.imencode('.jpg', show_image)
            yield (b'--frame\r\n'
                   b'application/octet-stream: image/jpeg\r\n\r\n' +
                   jpeg.tobytes() + b'\r\n\r\n')
        else:
            #draw the gurdline.画警戒线
            draw_muti(show_image, points2)
            end_time = time.time()  #结束计时,测试单贞照片处理时间
            my_one_time = (end_time - start_time) * 1000
            print("=====a{}=====".format(num), my_one_time)
            print("-------------->", num, person_dit)
            #cv2.namedWindow("aidong_unicom", 0)
            #cv2.imshow('aidong_unicom', show_image)
            #key = cv2.waitKey(1)

            show_image = cv2.resize(show_image, (640, 360))
            ret2, jpeg = cv2.imencode('.jpg', show_image)
            jpg_data = jpeg.tobytes()
            yield (b'--frame\r\n'
                   b'application/octet-stream: image/jpeg\r\n\r\n' + jpg_data +
                   b'\r\n\r\n')
    def process_my(self, id_video):
        global control_color

        num = 0
        t1 = time.time()
        self.mkdir_my()
        while True:
            #avoid the memory error.
            if len(self.my_track_dict)>50:
                self.my_track_dict = {}
            print(len(self.my_track_dict))

            if self.dq:
                img = self.dq.pop()
            else:
                key = cv2.waitKey(20)
                continue
            
            start_time = time.time()
            num += 1 
            if num % 500 == 1:
                cv2.imwrite(save_file+"/_{}.jpg".format(num), img)

            img_h, img_w, img_ch = img.shape
            print(img.shape)
            #2、防止裁剪或推理时把画的框裁剪上
            show_image = img.copy()
            frame = img.copy()
            
            #the predict of person.
            boxs, confidence, class_names = [], [], []
            out = preson_detect(img)
        
            #transform the object detection data to input tracter
            for i in range(len(out)):
                #========my_setting==============
                if out[i, 2] > 0.7:
                    # print(out[i])
                    left = int(out[i, 3]*img_w)
                    top = int(out[i, 4]*img_h)
                    p_w = int(out[i, 5]*img_w-out[i, 3]*img_w)
                    p_h = int(out[i, 6]*img_h-out[i, 4]*img_h)

                    right = left + p_w
                    bottom = top + p_h
                    
                    #detect the person in setting area.
                    point1 = [int((left+right)/2), bottom] 
                    my_index = inner_point(point1)
                    if my_index:
                        boxs.append([left, top, p_w, p_h]) 
                        class_names.append("person")
                        confidence.append(out[i, 2])

            #start use the self.tracker        
            features = self.encoder(frame,boxs)
            # score to 1.0 here).
            detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]
            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            indices = preprocessing.non_max_suppression(boxes, self.nms_max_overlap, scores)
            detections = [detections[i] for i in indices]
            # Call the self.tracker
            self.tracker.predict()
            self.tracker.update(detections)

            i = int(0)
            indexIDs = []
            
            #setting detect time
            t2 = time.time()
            detect_time = t2-t1
            #========my_setting==============
            control_time = 0.2  #detect one time in m second
            if detect_time > control_time:
                t1 = time.time()
           
            for det, track in zip(detections, self.tracker.tracks):
                # if not track.is_confirmed() or track.time_since_update > 1:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue

                #print(track.track_id)
                #draw the boxs of object detection.
                pbox = det.to_tlbr()
                #cv2.rectangle(frame,(int(pbox[0]), int(pbox[1])), (int(pbox[2]), int(pbox[3])),(255,255,255), 2)

                my_key = str(int(track.track_id))
                #========my_setting==============
                #if my_key increase or time lt 3s, will be re_detection.
                if my_key not in self.my_track_dict.keys() or detect_time>control_time:
                    print(my_key)
                    print(self.my_track_dict.keys())
                    #the code of processing the person box.
                    label_dict = person_detect(img, pbox)
                    print("**"*20,label_dict)
                    if type(label_dict) == type(None):
                        continue
                    if "coat" not in label_dict.keys():
                        continue
                        
                    self.my_track_dict[my_key] = label_dict

                # draw the attr of person.
                frame = draw_person_attr(frame, self.my_track_dict[my_key], pbox, control_color)

                indexIDs.append(int(track.track_id))
                self.counter.append(int(track.track_id))
                bbox = track.to_tlbr()
                color = [int(c) for c in COLORS[indexIDs[i] % len(COLORS)]]

                #define the color of rectangle.
                if self.my_track_dict[my_key]["coat"] == "Yes":
                    color_rect = (0, 255, 0)
                else:
                    color_rect = (0, 0, 255) 

                #center_loc = [int((bbox[0]+bbox[2])/2), int((bbox[1]+bbox[3])/2)]
                if my_key not in self.track_smooth_dict.keys():
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(color_rect), 3)
                    self.track_smooth_dict[my_key] = bbox
                else:
                    fbox = self.track_smooth_dict[my_key]
                    a = int((bbox[0]+fbox[0])/2)
                    b = int((bbox[1]+fbox[1])/2)
                    c = int((bbox[2]+fbox[2])/2)
                    d = int((bbox[3]+fbox[3])/2)
                    cv2.rectangle(frame, (a, b), (c, d),(color_rect), 3)
                    self.track_smooth_dict[my_key] = bbox

                #draw the boxs of track.
                #cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(color), 3)
                if True:
                    cv2.putText(frame,str(track.track_id),(int(bbox[0]), int(bbox[1] -50)),0, 5e-3 * 150, (color),2) 
                    if len(class_names) > 0:
                       class_name = class_names[0]
                       cv2.putText(frame, str(class_names[0]),(int(bbox[0]), int(bbox[1] -20)),0, 5e-3 * 150, (color),2)
                i += 1

                #put_data
                # if num % 50 == 1:
                #     my_result = self.my_track_dict[my_key]
                #     put_data(my_key, my_result, frame)

            count = len(set(self.counter))
            #draw the gurdline.
            draw_muti(frame)

            # cv2.putText(frame, "Total Pedestrian self.counter: "+str(count),(int(20), int(120)),0, 5e-3 * 200, (0,255,0),2)
            # cv2.putText(frame, "Current Pedestrian self.counter: "+str(i),(int(20), int(80)),0, 5e-3 * 200, (0,255,0),2)

            end_time = time.time()
            my_one_time = (end_time - start_time) * 1000
            print("====={}=====".format(num), my_one_time)

            # frame = cv2.resize(frame, (640, 360))
            ret2, jpeg = cv2.imencode('.jpg', frame)
            yield (b'--frame\r\n'
                   b'application/octet-stream: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n\r\n')
Пример #12
0
def get_detect(id_video):

    my_num = 0
    num = 0
    video_reader = cv2.VideoCapture(0)
    #video_reader = cv2.VideoCapture("rtsp://*****:*****@192.168.199.200/Streaming/Channels/1")
    #video_reader.set(cv2.CAP_PROP_FPS, 20)
    video_reader.set(cv2.CAP_PROP_FOURCC,
                     cv2.VideoWriter.fourcc('M', 'J', 'P', 'G'))
    video_reader.set(3, 1280)
    video_reader.set(4, 720)
    video_reader.set(cv2.CAP_PROP_FPS, 25)

    while True:
        start_time = time.time()
        num += 1
        print("======", num)

        ret1, img = video_reader.read()

        if not ret1:
            continue

        if type(img) == type(None):
            continue
        #frame = cv2.imread(filename)
        #1、读取中文路径
        #img = cv2.imdecode(np.fromfile(filename,dtype=np.uint8), cv2.IMREAD_COLOR)
        print(img.shape)
        #2、防止裁剪或推理时把画的框裁剪上
        show_image = img.copy()
        if num % 2 == 1:
            out = preson_detect(img)
            img_h, img_w, img_ch = img.shape

            for i in range(len(out)):
                #人形框限定条件,根据置信度限定不符合要求的人形框。
                if out[i, 2] < 0.7:
                    continue

                #attentions
                x0, y0, x1, y1 = out[i, 3:7]
                start_x = int(img_w * x0)
                start_y = int(img_h * y0)
                end_x = int(img_w * x1)
                end_y = int(img_h * y1)

                point1 = [int((start_x + end_x) / 2), end_y]
                my_index = inner_point(point1)
                if not my_index:
                    continue

                clip_images = img[start_y:end_y, start_x:end_x]
                h, w, c = clip_images.shape

                #根据图像大小限定不符合要求的人形框。
                if h > 96 and w > 48:

                    #3、保存图片,修改保存名称
                    my_num += 1
                    #save_name = os.path.join(save_dir, "v1_person{}.jpg".format(my_num))
                    #cv2.imwrite(save_name, clip_images)

                    label_dict = muti_attr(clip_images)
                    print(label_dict)

                    i_count = 0
                    font = cv2.FONT_HERSHEY_COMPLEX
                    if "coat" in label_dict.keys():
                        #4、在原始图像上画矩形。
                        if label_dict["coat"] == "Yes":
                            cv2.rectangle(show_image, (start_x, start_y),
                                          (end_x, end_y), (0, 255, 0), 2)
                        else:
                            cv2.rectangle(show_image, (start_x, start_y),
                                          (end_x, end_y), (0, 0, 255), 2)

                        #5、在原始图像上画矩形。
                        for key in label_dict:
                            if key == "hat":
                                a = "帽子"
                                if label_dict[key] == "Yes":
                                    b = "合格"
                                else:
                                    b = "不合格"
                            elif key == "coat":
                                a = "衣服"
                                if label_dict[key] == "Yes":
                                    b = "合格"
                                else:
                                    b = "不合格"
                            elif key == "gloves":
                                a = "手套"
                                if label_dict[key] == "Yes":
                                    b = "合格"
                                else:
                                    b = "不合格"
                            elif key == "shoes":
                                a = "鞋子"
                                if label_dict[key] == "Yes":
                                    b = "合格"
                                else:
                                    b = "不合格"
                            text = a + ":" + str(b)
                            print(text)
                            if label_dict[key] == "Yes":
                                show_image = add_ch_text(show_image,
                                                         text,
                                                         end_x,
                                                         start_y + i_count,
                                                         textColor=(0, 255, 0),
                                                         textSize=30)
                                #cv2.putText(show_image,text,(end_x,start_y+i_count),font,0.7,(0,255,0),2)
                            elif label_dict[key] == "No":
                                show_image = add_ch_text(show_image,
                                                         text,
                                                         end_x,
                                                         start_y + i_count,
                                                         textColor=(255, 0, 0),
                                                         textSize=30)
                                #cv2.putText(show_image,text,(end_x,start_y+i_count),font,0.7,(0,0,255),2)
                            i_count += 35

            draw_muti(show_image)
            ret2, jpeg = cv2.imencode('.jpg', show_image)
            yield (b'--frame\r\n'
                   b'application/octet-stream: image/jpeg\r\n\r\n' +
                   jpeg.tobytes() + b'\r\n\r\n')
        else:
            time.sleep(0.01)

        end_time = time.time()
        mytime = (end_time - start_time) * 1000
        print(mytime)