def thread_seg_line(model_seg_line, frame_pil_img, q_sed_line): argmax_feats_road, color_map_display_road = evaluateModel(model_seg_line, frame_pil_img, inWidth=512, inHeight=256, flag_road=0) q_sed_line.put([argmax_feats_road, color_map_display_road])
success, frame_np_img = videoCapture1.read() c=0 while success : print('{} frame:'.format(c+1)) c+=1 st_st=time.time() # BGR → RGB and numpy image to PIL image frame_np_img = frame_np_img[...,[2,1,0]] frame_pil_img = im = Image.fromarray(frame_np_img) # object detection model st=time.time() annotated_image_od, bboxes = detect(model_od, frame_pil_img, min_score=0.3, max_overlap=0.5, top_k=100) print('object detection:{}s'.format(time.time()-st)) # road segmentation model st=time.time() argmax_feats_road, color_map_display_road = evaluateModel(model_seg_road, frame_pil_img, inWidth=512, inHeight=256, flag_road=1) print('road segmentation:{}s'.format(time.time()-st)) # lane segmentation model st=time.time() argmax_feats_lane, color_map_display_lane = evaluateModel(model_seg_lane, frame_pil_img, inWidth=512, inHeight=256, flag_road=0) print('lane segmentation:{}s'.format(time.time()-st)) argmax_feats_road[argmax_feats_road==11]=100 argmax_feats_lane[argmax_feats_lane==11]=100 decision_boxes, img_result = fun_detection_TrafficViolation(frame_np_img, bboxes, argmax_feats_lane,argmax_feats_road) map_seg_label_line=argmax_feats_lane map_seg_label_road=argmax_feats_road # annotated_image_od_ = cv2.cvtColor(np.asarray(annotated_image_od),cv2.COLOR_RGB2BGR)
if __name__ == '__main__': img_path = 'D:\\專案管理\\新加坡專案\\label\\OV_001-1-Segmentation\\OV_001-1 0036.jpg' original_image = Image.open(img_path, mode='r') original_image = original_image.convert('RGB') # object detection model annotated_image_od, bboxes = detect(model_od, original_image, min_score=0.3, max_overlap=0.5, top_k=100) # road segmentation model argmax_feats_road, color_map_display_road = evaluateModel(model_seg_road, original_image, inWidth=512, inHeight=256, flag_road=1) # lane segmentation model argmax_feats_lane, color_map_display_lane = evaluateModel(model_seg_lane, original_image, inWidth=512, inHeight=256, flag_road=0) argmax_feats_road[argmax_feats_road == 11] = 100 argmax_feats_lane[argmax_feats_lane == 11] = 100 original_image = np.array(original_image) decision_boxes, img_result = fun_detection_TrafficViolation( original_image, bboxes, argmax_feats_lane, argmax_feats_road)