def car_number(frame): layerOutputs, start, end = yolo.YOLO_Detect(frame) #yolo detection idxs, boxes, classIDs, confidences = yolo.YOLO_BOX_INFO( frame, layerOutputs) #detected object info Vehicle_x = [] Vehicle_y = [] Vehicle_w = [] Vehicle_h = [] Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h = yolo.Position( idxs, classIDs, boxes, Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h) yolo.Draw_Points(frame, Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h) """ cv2.imshow('piece', frame) cv2.waitKey(0) cv2.destroyAllWindows() """ number = len(idxs) return number
def main(): fps = FPS().start() cap = cv2.VideoCapture(Input_Video) (grabbed, frame) = cap.read() #make background frame first_frame = frame.copy() first_gray = cv2.cvtColor(first_frame, cv2.COLOR_BGR2GRAY) first_gray = cv2.GaussianBlur(first_gray, (5, 5), 0) f_num = 0 #parking area variable RED_cnt_1 = 0 BLUE_cnt_1 = 0 initBB_1 = None tracker_1 = None RED_cnt_2 = 0 BLUE_cnt_2 = 0 initBB_2 = None tracker_2 = None RED_cnt_3 = 0 BLUE_cnt_3 = 0 initBB_3 = None tracker_3 = None RED_cnt_4 = 0 BLUE_cnt_4 = 0 initBB_4 = None tracker_4 = None vertices = [] vertices, pos = parkinglot(vertices) area = [] area = parkinglot_piece(frame, area) l_up = 0 l_down = 0 r_up = 0 r_down = 0 #parking area counting variable l_up, l_down, r_up, r_down = preprocess( frame, area) #already parking car counting yolo.YOLOTINYINIT() #tiny yolo initialization while (cap.isOpened()): csv_file = open_csv() #csv file open wr = csv.writer(csv_file, delimiter=' ') f_num = f_num + 1 (grabbed, frame) = cap.read() #counting variable temp_r_1 = RED_cnt_1 temp_b_1 = BLUE_cnt_1 temp_r_2 = RED_cnt_2 temp_b_2 = BLUE_cnt_2 temp_r_3 = RED_cnt_3 temp_b_3 = BLUE_cnt_3 temp_r_4 = RED_cnt_4 temp_b_4 = BLUE_cnt_4 #error exception if (l_up < 0 or l_up > 3 or l_down < 0 or l_down > 3 or r_up < 0 or r_up > 3 or r_down < 0 or r_down > 3): area = parkinglot_piece(frame, area) l_up, l_down, r_up, r_down = error_detection( area, l_up, l_down, r_up, r_down) yolo.YOLOTINYINIT() if f_num % 2 == 0: #detection per two frame #black space at upper frame blank_image = np.zeros((64, 1920, 3), np.uint8) frame[0:64, 0:1920] = blank_image #set list for loop park_cnt = [l_up, l_down, r_up, r_down] RED_cnt = [RED_cnt_1, RED_cnt_2, RED_cnt_3, RED_cnt_4] BLUE_cnt = [BLUE_cnt_1, BLUE_cnt_2, BLUE_cnt_3, BLUE_cnt_4] Substracted = substraction(frame, first_gray) #background image layerOutputs, start, end = yolo.YOLO_Detect(frame) #yolo detection idxs, boxes, classIDs, confidences = yolo.YOLO_BOX_INFO( frame, layerOutputs) #detected object info #point of detected car Vehicle_x = [] Vehicle_y = [] Vehicle_w = [] Vehicle_h = [] Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h = yolo.Position( idxs, classIDs, boxes, Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h) #draw bounding box of detected car yolo.Draw_Points(frame, Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h) #left up tracker_1, initBB_1, RED_cnt_1, BLUE_cnt_1 = Passing_Counter_Zone(Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h, initBB_1, frame, tracker_1, Substracted,\ RED_cnt_1, BLUE_cnt_1, vertices[0]) l_up, temp_r_1, temp_b_1 = park_count(park_cnt[0], temp_r_1, temp_b_1, RED_cnt_1, BLUE_cnt_1) #left down tracker_2, initBB_2, RED_cnt_2, BLUE_cnt_2 = Passing_Counter_Zone(Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h, initBB_2, frame, tracker_2, Substracted,\ RED_cnt_2, BLUE_cnt_2, vertices[1]) l_down, temp_r_2, temp_b_2 = park_count(park_cnt[1], temp_r_2, temp_b_2, RED_cnt_2, BLUE_cnt_2) #right up tracker_3, initBB_3, RED_cnt_3, BLUE_cnt_3 = Passing_Counter_Zone(Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h, initBB_3, frame, tracker_3, Substracted,\ RED_cnt_3, BLUE_cnt_3, vertices[2]) r_up, temp_r_2, temp_b_2 = park_count(park_cnt[2], temp_r_3, temp_b_3, RED_cnt_3, BLUE_cnt_3) #right down tracker_4, initBB_4, RED_cnt_4, BLUE_cnt_4 = Passing_Counter_Zone(Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h, initBB_4, frame, tracker_4, Substracted,\ RED_cnt_4, BLUE_cnt_4, vertices[3]) r_down, temp_r_2, temp_b_2 = park_count(park_cnt[3], temp_r_4, temp_b_4, RED_cnt_4, BLUE_cnt_4) #draw lines for i in range(0, 4): draw_line(frame, vertices[i], RED_cnt[i], BLUE_cnt[i]) pts = detecting_zone(vertices[i]) cv2.polylines(frame, [pts], True, (0, 255, 0), 2) fps.update() fps.stop() #text at upper frame cv2.putText(frame, "FPS : " + "{:.2f}".format(fps.fps()), (50, 45), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (200, 200, 200), 2) cv2.putText( frame, pos[0] + ": {} / ".format(park_cnt[0]) + pos[1] + ": {} / ".format(park_cnt[1]) + pos[2] + ": {} / ".format(park_cnt[2]) + pos[3] + ": {}".format(park_cnt[3]), (450, 45), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (200, 200, 200), 2) cv2.putText(frame, "Frame : " + "{}".format(f_num), (1500, 45), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (200, 200, 200), 2) frame = cv2.resize(frame, (960, 540), interpolation=cv2.INTER_AREA ) #1920,1080 -> 1280,720 -> 960,540 Substracted = cv2.resize(Substracted, (960, 540), interpolation=cv2.INTER_CUBIC) cv2.imshow("frame", frame) cv2.imshow("sub", Substracted) wr.writerow([park_cnt[0], park_cnt[1], park_cnt[2], park_cnt[3]]) if cv2.waitKey(1) & 0xFF == ord('q'): break csv_file.close() cap.release() return