Beispiel #1
0
             # object detection model
            t1 = threading.Thread(target = thread_detect, args=(model_od, frame_pil_img, q_detect))
            #t2 = threading.Thread(target = thread_seg_models, args=(model_seg_road, model_seg_lane ,frame_pil_img, BGRimg, q_sed))
            t2 = threading.Thread(target = thread_seg_models, args=(model_seg_road, model_seg_lane ,frame_pil_img, q_sed))
            t1.start()
            t2.start()
            t1.join()
            t2.join()
            bboxes = q_detect.get()
            argmax_feats_road, argmax_feats_lane, color_map_display_road, color_map_display_lane = q_sed.get()
            argmax_feats_road[argmax_feats_road == 11] = 100
            argmax_feats_lane[argmax_feats_lane == 11] = 100
            print('inference time:{}s'.format(time.time() - st_st))

            argmax_feats_lane, argmax_feats_road = fun_intergate_seg_LaneandRoad(argmax_feats_lane, argmax_feats_road)
            decision_boxes, img_result = fun_detection_TrafficViolation(frame_np_img, bboxes, argmax_feats_lane, argmax_feats_road)
            t_start_decision = time.time()            
            
            img_fusion = frame_np_img.copy()
            for bbox in bboxes:
                img_fusion = plot_bbox(img_fusion, bbox)
            img_fusion = cv2.addWeighted(img_fusion, 1, color_map_display_road, 0.5, 0)
            imgs = np.hstack([color_map_display_lane, img_fusion, img_result])
            
            
            # RGB → BGR
            imgs = imgs[...,[2,1,0]]
            #videoWriter.write(img_result)
            videoWriter.write(cv2.resize(imgs, (int(1920), int(1080/3))))
            #videoWriter.write(imgs)
            print('decision time : {}s'.format(time.time() - t_start_decision))
Beispiel #2
0
                                                              original_image,
                                                              inWidth=512,
                                                              inHeight=256,
                                                              flag_road=1)
    # lane segmentation model
    argmax_feats_lane, color_map_display_lane = evaluateModel(model_seg_lane,
                                                              original_image,
                                                              inWidth=512,
                                                              inHeight=256,
                                                              flag_road=0)

    argmax_feats_road[argmax_feats_road == 11] = 100
    argmax_feats_lane[argmax_feats_lane == 11] = 100
    original_image = np.array(original_image)

    decision_boxes, img_result = fun_detection_TrafficViolation(
        original_image, bboxes, argmax_feats_lane, argmax_feats_road)
    map_seg_label_line = argmax_feats_lane
    map_seg_label_road = argmax_feats_road

    #    annotated_image_od_ = cv2.cvtColor(np.asarray(annotated_image_od),cv2.COLOR_RGB2BGR)
    #    imfusion = cv2.addWeighted(color_map_display_road, 0.1, annotated_image_od_, 1, 0)
    #    imfusion = cv2.addWeighted(color_map_display_lane, 0.5, annotated_image_od_, 1, 0)

    img_fusion = original_image.copy()

    for bbox in bboxes:
        img_fusion = plot_bbox(img_fusion, bbox)
    img_fusion = cv2.addWeighted(img_fusion, 1, color_map_display_road, 0.5, 0)
    img_fusion = cv2.addWeighted(img_fusion, 1, color_map_display_lane, 1, 0)

    imgs = np.hstack([img_fusion, img_result])