예제 #1
0
                            elif i.going_DOWN(line_down, line_up) == True:
                                cnt_down += 1
                                #print("ID:", i.getId(), 'crossed going up at', time.strftime("%c"))
                            break
                        if i.getState() == '1':
                            if i.getDir() == 'down' and i.getY() > down_limit:
                                i.setDone()
                            elif i.getDir() == 'up' and i.getY() < up_limit:
                                i.setDone()
                        if i.timedOut():
                            index = cars.index(i)
                            cars.pop(index)
                            del i

                    if new == True:  #If nothing is detected,create new
                        p = vehicles.Car(pid, cx, cy, max_p_age)
                        cars.append(p)
                        pid += 1

                cv2.circle(frame, (cx, cy), 5, (0, 0, 255), -1)
                img = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                                    2)

        #for i in cars:
        #cv2.putText(frame, str(i.getId()), (i.getX(), i.getY()), font, 0.3, i.getRGB(), 1, cv2.LINE_AA)

        str_up = 'UP: ' + str(cnt_up)
        str_down = 'DOWN: ' + str(cnt_down)
        frame = cv2.polylines(frame, [pts_L1],
                              False,
                              line_down_color,
예제 #2
0
 def start_vid(self):
     cnt_up   = 0
     cnt_down = 0
     
     #Fuente de video
     #cap = cv2.VideoCapture(0)
     cap = cv2.VideoCapture('vid2s.mp4')
     
     #Propiedades del video
     ##cap.set(3,160) #Width
     ##cap.set(4,120) #Height
     
     #Imprime las propiedades de captura a consola
     #for i in range(19):
     #    print(i, cap.get(i))
     
     w = cap.get(3)
     h = cap.get(4)
     frameArea = h*w
     areaTH = frameArea/350
     #print('Area Threshold', areaTH)
     
     #Lineas de entrada/salida
     line_up = int(2*(h/5)) + 250
     line_down   = int(3*(h/5))
     
     up_limit =   int(1*(h/5))
     down_limit = int(4*(h/5)) + 50
     
     #print("Red line y:",str(line_down))
     #print("Blue line y:", str(line_up))
     #line_down_color = (255,0,0)
     line_up_color = (0,0,255)
     #pt1 =  [0, line_down];
     #pt2 =  [w, line_down];
     #pts_L1 = np.array([pt1,pt2], np.int32)
     #pts_L1 = pts_L1.reshape((-1,1,2))
     pt3 =  [0, line_up];
     pt4 =  [w, line_up];
     pts_L2 = np.array([pt3,pt4], np.int32)
     pts_L2 = pts_L2.reshape((-1,1,2))
     
     pt5 =  [0, up_limit];
     pt6 =  [w, up_limit];
     pts_L3 = np.array([pt5,pt6], np.int32)
     pts_L3 = pts_L3.reshape((-1,1,2))
     pt7 =  [0, down_limit];
     pt8 =  [w, down_limit];
     pts_L4 = np.array([pt7,pt8], np.int32)
     pts_L4 = pts_L4.reshape((-1,1,2))
     
     #Substractor de fondo
     fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows = True)
     
     #Elementos estructurantes para filtros morfoogicos
     kernelOp = np.ones((3,3),np.uint8)
     kernelOp2 = np.ones((5,5),np.uint8)
     kernelCl = np.ones((11,11),np.uint8)
     
     #Variables
     font = cv2.FONT_HERSHEY_SIMPLEX
     persons = []
     max_p_age = 5
     pid = 1
     
     while(cap.isOpened()):
     ##for image in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
         #Lee una imagen de la fuente de video
         ret, frame = cap.read()
     ##    frame = image.array
     
         for i in persons:
             i.age_one() #age every person one frame
         #########################
         #   PRE-PROCESAMIENTO   #
         #########################
         
         #Aplica substraccion de fondo
         fgmask = fgbg.apply(frame)
         fgmask2 = fgbg.apply(frame)
     
         #Binariazcion para eliminar sombras (color gris)
         try:
             ret,imBin= cv2.threshold(fgmask,200,255,cv2.THRESH_BINARY)
             ret,imBin2 = cv2.threshold(fgmask2,200,255,cv2.THRESH_BINARY)
             #Opening (erode->dilate) para quitar ruido.
             mask = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernelOp)
             mask2 = cv2.morphologyEx(imBin2, cv2.MORPH_OPEN, kernelOp)
             #Closing (dilate -> erode) para juntar regiones blancas.
             mask =  cv2.morphologyEx(mask , cv2.MORPH_CLOSE, kernelCl)
             mask2 = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernelCl)
         except:
     #        print('EOF')
     #        print('UP:',cnt_up)
     #        print('DOWN:',cnt_down)
             break
         #################
         #   CONTORNOS   #
         #################
         
         # RETR_EXTERNAL returns only extreme outer flags. All child contours are left behind.
         _, contours0, hierarchy = cv2.findContours(mask2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
         for cnt in contours0:
             area = cv2.contourArea(cnt)
             if area > areaTH:
                 #################
                 #   TRACKING    #
                 #################
                 
                 #Falta agregar condiciones para multipersonas, salidas y entradas de pantalla.
                 
                 M = cv2.moments(cnt)
                 cx = int(M['m10']/M['m00'])
                 cy = int(M['m01']/M['m00'])
                 x,y,w,h = cv2.boundingRect(cnt)
     
                 new = True
                 if cy in range(up_limit,down_limit):
                     for i in persons:
                         if abs(cx-i.getX()) <= w and abs(cy-i.getY()) <= h:
                             # el objeto esta cerca de uno que ya se detecto antes
                             new = False
                             i.updateCoords(cx,cy)   #actualiza coordenadas en el objeto and resets age
                             if i.going_UP(line_down,line_up) == True:
                                 cnt_up += 1;
                                 #print("ID:",i.getId(),'crossed going up at',time.strftime("%c"))
                             elif i.going_DOWN(line_down,line_up) == True:
                                 cnt_down += 1;
                                 #print("ID:",i.getId(),'crossed going down at',time.strftime("%c"))
                             break
                         if i.getState() == '1':
                             if i.getDir() == 'down' and i.getY() > down_limit:
                                 i.setDone()
                             elif i.getDir() == 'up' and i.getY() < up_limit:
                                 i.setDone()
                         if i.timedOut():
                             #sacar i de la lista persons
                             index = persons.index(i)
                             persons.pop(index)
                             del i     #liberar la memoria de i
                     if new == True:
                         p = vehicles.Car(pid,cx,cy, max_p_age)
                         persons.append(p)
                         pid += 1     
                 #################
                 #   DIBUJOS     #
                 #################
     #            cv2.circle(frame,(cx,cy), 5, (0,0,255), -1)
                 img = cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)            
                 #cv2.drawContours(frame, cnt, -1, (0,255,0), 3)
                 
         #END for cnt in contours0
                 
         #########################
         # DIBUJAR TRAYECTORIAS  #
         #########################
     #    for i in persons:
     ##        if len(i.getTracks()) >= 2:
     ##            pts = np.array(i.getTracks(), np.int32)
     ##            pts = pts.reshape((-1,1,2))
     ##            frame = cv2.polylines(frame,[pts],False,i.getRGB())
     ##        if i.getId() == 9:
     ##            print str(i.getX()), ',', str(i.getY())
     #        cv2.putText(frame, str(i.getId()),(i.getX(),i.getY()),font,0.3,i.getRGB(),1,cv2.LINE_AA)
             
         #################
         #   IMAGANES    #
         #################
         str_up = 'Number of Cars: '+ str(cnt_up)
         str_up2 = 'Total Estimated Passing Time: '+ str(cnt_up * 3) + 'second(s)' 
     #    str_down = 'DOWN: '+ str(cnt_down)
     #    frame = cv2.polylines(frame,[pts_L1],False,line_down_color,thickness=2)
     #    frame = cv2.polylines(frame,[pts_L2],False,line_up_color,thickness=2)
     #    frame = cv2.polylines(frame,[pts_L3],False,(255,255,255),thickness=1)
     #    frame = cv2.polylines(frame,[pts_L4],False,(255,255,255),thickness=1)
         cv2.putText(frame, str_up ,(10,40),font,0.5,(255,255,255),2,cv2.LINE_AA)
         cv2.putText(frame, str_up ,(10,40),font,0.5,(0,0,255),1,cv2.LINE_AA)
         cv2.putText(frame, str_up2 ,(10,60),font,0.5,(255,255,255),2,cv2.LINE_AA)
         cv2.putText(frame, str_up2 ,(10,60),font,0.5,(0,0,255),1,cv2.LINE_AA)
     #    cv2.putText(frame, str_down ,(10,90),font,0.5,(255,255,255),2,cv2.LINE_AA)
     #    cv2.putText(frame, str_down ,(10,90),font,0.5,(255,0,0),1,cv2.LINE_AA)
         cv2.namedWindow("Frame", 0)
         cv2.resizeWindow("Frame", 1080, 1920)
         cv2.imshow('Frame',frame)
         #cv2.imshow('Mask',mask)    
         
         #preisonar ESC para salir
         k = cv2.waitKey(30) & 0xff
         if k == 27:
             break
     #END while(cap.isOpened())
         
     #################
     #   LIMPIEZA    #
     #################
     cap.release()
     cv2.destroyAllWindows()
예제 #3
0
def count_dir():
    cnt_up = 0
    cnt_down = 0
    cnt_right = 0
    cnt_left = 0

    cap = cv2.VideoCapture("cars.mp4")

    # Get width and height of video

    w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)  # taking defult
    h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    frameArea = h * w
    areaTH = frameArea / 400

    # Lines
    line_up = int(2 * (h / 5))  # 2nd our of 4 lines
    line_down = int(3 * (h / 5))  # 3rd our of 4 lines

    up_limit = int(1 * (h / 5))  # 1st our of 4 lines
    down_limit = int(4 * (h / 5))  # last our of 4 lines

    print("Red line y:", str(line_down))
    print("Blue line y:", str(line_up))
    line_down_color = (255, 0, 0)
    line_up_color = (255, 0, 255)
    pt1 = [0, line_down]
    pt2 = [w, line_down]
    pts_L1 = np.array([pt1, pt2], np.int32)
    pts_L1 = pts_L1.reshape((-1, 1, 2))
    pt3 = [0, line_up]
    pt4 = [w, line_up]
    pts_L2 = np.array([pt3, pt4], np.int32)
    pts_L2 = pts_L2.reshape((-1, 1, 2))

    pt5 = [0, up_limit]
    pt6 = [w, up_limit]
    pts_L3 = np.array([pt5, pt6], np.int32)
    pts_L3 = pts_L3.reshape((-1, 1, 2))
    pt7 = [0, down_limit]
    pt8 = [w, down_limit]
    pts_L4 = np.array([pt7, pt8], np.int32)
    pts_L4 = pts_L4.reshape((-1, 1, 2))

    line_left = int(2 * (w / 5))  # 2nd our of 4 lines
    line_right = int(3 * (w / 5))  # 3rd our of 4 lines

    left_limit = int(1 * (w / 5))  # 1st our of 4 lines
    right_limit = int(4 * (w / 5))  # last our of 4 lines

    line_right_color = (255, 10, 10)
    line_left_color = (255, 10, 155)

    pt11 = [line_right, 0]
    pt22 = [line_right, h]
    pts_L11 = np.array([pt11, pt22], np.int32)
    pts_L11 = pts_L11.reshape((-1, 1, 2))
    pt33 = [line_left, 0]
    pt44 = [line_left, h]
    pts_L22 = np.array([pt33, pt44], np.int32)
    pts_L22 = pts_L22.reshape((-1, 1, 2))

    pt55 = [left_limit, 0]
    pt66 = [left_limit, h]
    pts_L33 = np.array([pt55, pt66], np.int32)
    pts_L33 = pts_L33.reshape((-1, 1, 2))
    pt77 = [right_limit, 0]
    pt88 = [right_limit, h]
    pts_L44 = np.array([pt77, pt88], np.int32)
    pts_L44 = pts_L44.reshape((-1, 1, 2))

    # Background Subtractor
    fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True)

    # Kernals
    kernalOp = np.ones((4, 4), np.uint8)
    kernalOp2 = np.ones((5, 5), np.uint8)
    kernalCl = np.ones((11, 11), np.uint8)  # unit to unit8

    font = cv2.FONT_HERSHEY_SIMPLEX
    cars = []
    max_p_age = 5
    pid = 1

    while (cap.isOpened()):  # to capture the frame of the video
        ret, frame = cap.read()
        for i in cars:
            i.age_one()
        fgmask = fgbg.apply(frame)
        fgmask2 = fgbg.apply(frame)

        if ret == True:

            # Binarization
            ret, imBin = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)
            ret, imBin2 = cv2.threshold(fgmask2, 200, 255, cv2.THRESH_BINARY)
            # OPening i.e First Erode the dilate
            mask = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernalOp)
            mask2 = cv2.morphologyEx(imBin2, cv2.MORPH_CLOSE, kernalOp)

            # Closing i.e First Dilate then Erode
            mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernalCl)
            mask2 = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernalCl)

            # Find Contours
            countours0, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)  # remove -,
            for cnt in countours0:
                area = cv2.contourArea(cnt)
                print(area)
                if area > areaTH:
                    ####Tracking######
                    m = cv2.moments(cnt)
                    cx = int(m['m10'] / m['m00'])
                    cy = int(m['m01'] / m['m00'])
                    x, y, w, h = cv2.boundingRect(cnt)

                    if cnt_left == 0 or cnt_right == 0:
                        new = True
                        if cy in range(up_limit, down_limit):
                            for i in cars:
                                if abs(x - i.getX()) <= w and abs(y - i.getY()) <= h:

                                    new = False
                                    i.updateCoords(cx, cy)

                                    if i.going_UP(line_down, line_up) == True:
                                        cnt_up += 1
                                        print("ID:", i.getId(), 'crossed going up at', time.strftime("%c"))
                                    elif i.going_DOWN(line_down, line_up) == True:
                                        cnt_down += 1
                                        print("ID:", i.getId(), 'crossed going up at', time.strftime("%c"))
                                    break

                                if i.getState() == '1':
                                    if i.getDir() == 'down' and i.getY() > down_limit:
                                        i.setDone()
                                    elif i.getDir() == 'up' and i.getY() < up_limit:
                                        i.setDone()

                                if i.timedOut():
                                    index = cars.index(i)
                                    cars.pop(index)
                                    del i

                            if new == True:  # If nothing is detected,create new
                                p = vehicles.Car(pid, cx, cy, max_p_age)
                                cars.append(p)
                                pid += 1

                    if cnt_up == 0 and cnt_down == 0:
                        new = True
                        if cx in range(left_limit, right_limit):
                            for i in cars:
                                if abs(x - i.getX()) <= w and abs(y - i.getY()) <= h:
                                    new = False
                                    i.updateCoords(cx, cy)

                                    if i.going_LEFT(line_right, line_left) == True:
                                        cnt_left += 1
                                        # print("left",cnt_left)
                                        print("ID:", i.getId(), 'crossed going left at', time.strftime("%c"))
                                    elif i.going_RIGHT(line_right, line_left) == True:
                                        cnt_right += 1
                                        # print("left",cnt_right)
                                        print("ID:", i.getId(), 'crossed going right at', time.strftime("%c"))
                                    break
                                if i.getState() == '1':
                                    if i.getDir() == 'right' and i.getY() > right_limit:
                                        i.setDone()
                                    elif i.getDir() == 'left' and i.getY() < left_limit:
                                        i.setDone()
                                if i.timedOut():
                                    index = cars.index(i)
                                    cars.pop(index)
                                    del i
                            if new == True:  # If nothing is detected,create new
                                p = vehicles.Car(pid, cx, cy, max_p_age)
                                cars.append(p)
                                pid += 1

                    # print("ansssss :::: ",str(cnt_right))
                    # print("ansssss :::: ",str(cnt_left))
                    cv2.circle(frame, (cx, cy), 5, (0, 0, 255), -1)
                    img = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            for i in cars:
                cv2.putText(frame, str(i.getId()), (i.getX(), i.getY()), font, 0.3, i.getRGB(), 1, cv2.LINE_AA)

            str_down = 'DOWN: ' + str(cnt_down)
            str_up = 'UP: ' + str(cnt_up)
            str_right = 'RIGHT: ' + str(cnt_right)
            str_left = 'LEFT: ' + str(cnt_left)

            frame = cv2.polylines(frame, [pts_L1], False, line_down_color, thickness=2)
            frame = cv2.polylines(frame, [pts_L2], False, line_up_color, thickness=2)
            frame = cv2.polylines(frame, [pts_L3], False, (255, 255, 255), thickness=1)
            frame = cv2.polylines(frame, [pts_L4], False, (255, 255, 255), thickness=1)

            frame = cv2.polylines(frame, [pts_L11], False, line_right_color, thickness=2)
            frame = cv2.polylines(frame, [pts_L22], False, line_left_color, thickness=2)
            frame = cv2.polylines(frame, [pts_L33], False, (255, 255, 255), thickness=1)
            frame = cv2.polylines(frame, [pts_L44], False, (255, 255, 255), thickness=1)

            cv2.putText(frame, str_up, (10, 40), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
            cv2.putText(frame, str_up, (10, 40), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
            cv2.putText(frame, str_down, (10, 90), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
            cv2.putText(frame, str_down, (10, 90), font, 0.5, (255, 0, 0), 1, cv2.LINE_AA)

            cv2.putText(frame, str_left, (100, 40), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
            cv2.putText(frame, str_left, (100, 40), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
            cv2.putText(frame, str_right, (100, 90), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
            cv2.putText(frame, str_right, (100, 90), font, 0.5, (255, 0, 0), 1, cv2.LINE_AA)

            cv2.imshow('Counting and Detection', frame)

            if cv2.waitKey(10) == 27:
                break

        else:
            break
    cap.release()
    cv2.destroyAllWindows()
예제 #4
0
from vehicles import Car, ElectricCar

ferrari = Car("Ferrari", "200")
ferrari.run_car()

leaf = ElectricCar("Leaf", "70", "all electric")
leaf.run_car()
leaf.display_electric_type()

# alternatively one can import all the classes at once by just naming
# the module.  but then you
# can't access the classes unless you prefix them with the
# module name:
import vehicles

ferrari = vehicles.Car("Ferrari", "200")
ferrari.run_car()

leaf = vehicles.ElectricCar("Leaf", "70", "all electric")
leaf.run_car()
leaf.display_electric_type()


예제 #5
0
def video_process(video_file: str, result_file: str, lines: List) -> None:
    """
    Process video and count the number of vehicles passing by

    Parameters
    ----------
    video_file : str,
        The abosulte path of the video file to be processed.
    result_file : str,
        The result file
    lines : List,
        The lines that are used to determine the moving direction of the vehicles in the video.
        Each element of the list is a dictionary, of which,
        Key is the moving direction, i.e., up or down;
        Values are three lines, and each line consists of 4 numbers (relative to width or height, range is [0, 1]),
        i.e., x_left, y_left, x_right, y_right
    """
    if debug:
        if lines:
            for direction_and_line in lines:
                print(direction_and_line['direction'])
                for line in direction_and_line['lines']:
                    print(line)

    cap = cv2.VideoCapture(video_file)

    # Find OpenCV version
    major_ver, _, _ = (cv2.__version__).split('.')
    if int(major_ver) < 3:
        fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
        if debug:
            print("Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps))
    else:
        fps = cap.get(cv2.CAP_PROP_FPS)
        if debug:
            print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps))

    # Get width and height of video
    width = cap.get(3)
    height = cap.get(4)
    frame_area = height * width
    area_threshold = frame_area / 800  # the area threshold (minimum) for each single vehicle in the video;
    # TODO: should be adjustable for different camera
    # one option is to get the width of the lane
    # then use that as the baseline

    fast_play = 1.0  # TODO: adjust the process speed

    # construct Hexagon area using the passed/picked up lines
    hexagons = []
    for direction_and_line in lines:
        direction = direction_and_line['direction']
        one_group_lines = direction_and_line['lines']

        # change the number in lines from ratio to actual values
        # based on the width and height of the window
        new_group_lines = []
        for line in one_group_lines:
            new_line = [int(num * width) if idx % 2 == 0 else int(num * height) for idx, num in enumerate(line)]
            new_group_lines.append(new_line)
        hexagon = Hexagon(new_group_lines, direction)
        hexagons.append(hexagon)
    all_counts = [0] * len(lines)
    all_cars = [[] for _ in range(len(lines))]  # the Car in each hexagon

    # Background Subtractor (contains binary image of moving objects)
    background_subtractor = cv2.createBackgroundSubtractorMOG2(detectShadows=True)

    # Kernals
    kernalOp = np.ones((3, 3))
    kernalCl = np.ones((11, 11))

    font = cv2.FONT_HERSHEY_SIMPLEX
    max_car_age = 6
    car_id = 1

    while cap.isOpened():
        read_success, frame = cap.read()
        last_frame_time = time.time()

        for cars in all_cars:
            for car in cars:
                car.age_one()

        all_cars = [[car for car in cars if not car.timedOut()] for cars in all_cars]

        masked_frame = background_subtractor.apply(frame)

        if read_success == True:

            # Binarization
            _, imBin = cv2.threshold(masked_frame, 200, 255, cv2.THRESH_BINARY)

            # Opening i.e First Erode the dilate
            mask = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernalOp)

            # Closing i.e First Dilate then Erode
            mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernalCl)

            # Find Contours
            # Creates rectangles around each vehicle
            countours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
            for countour in countours:
                countour_area = cv2.contourArea(countour)
                # print(f"Detected area size: {countour_area}")
                if countour_area > area_threshold:
                    ####Tracking######
                    m = cv2.moments(countour)
                    if m['m00'] == 0:
                        # normally this should not happen since the area is already larger than threshold.
                        # Just in case of a bad threshold
                        continue
                    cx = int(m['m10'] / m['m00'])  # Centroid, https://www.learnopencv.com/find-center-of-blob-centroid-using-opencv-cpp-python/
                    cy = int(m['m01'] / m['m00'])
                    x, y, w, h = cv2.boundingRect(countour)

                    existing_car = False
                    car_area_id = -1
                    in_interested_area = False
                    centroid_point = Point(cx, cy)
                    for idx, hexagon in enumerate(hexagons):
                        if not hexagon.inside(centroid_point):
                            continue

                        in_interested_area = True
                        for car in all_cars[idx]:
                            # the countour has match with an existing/previous car
                            # TODO: this may not hold for all cases
                            # e.g. if the frame rate is too low,
                            # then the same car in consecutive frames will be far away from each other
                            if abs(cx - car.getX()) <= w and abs(cy - car.getY()) <= h:
                                existing_car = True
                                car.updateCoords(cx, cy)

                                mid_line = (hexagon.lines[1].point1.y + hexagon.lines[1].point2.y) / 2

                                count_it = False
                                if hexagon.direction == 'up' and car.going_UP(mid_line):
                                    count_it = True
                                elif hexagon.direction == 'down' and car.going_DOWN(mid_line):
                                    count_it = True

                                if count_it:
                                    all_counts[idx] += 1

                                break

                        car_area_id = idx
                        # always break here, since one point can only be in one area
                        break

                    if in_interested_area:
                        if not existing_car:
                            new_car = vehicles.Car(car_id, cx, cy, max_car_age)
                            all_cars[car_area_id].append(new_car)
                            car_id += 1

                        cv2.circle(frame, (cx, cy), 5, (0, 0, 255), -1)
                        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                    else:
                        cv2.circle(frame, (cx, cy), 5, (0, 255, 255), -1)
                        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)
                else:
                    # countours with smaller area
                    pass
            ## The end of processing countours in current frame

            for cars in all_cars:
                for car in cars:
                    cv2.putText(frame, str(car.getId()), (car.getX(), car.getY()),
                            font, 0.3, car.getRGB(), 1, cv2.LINE_AA)

            start_y = 40
            delta_y = 50
            for idx, hexagon in enumerate(hexagons):
                for line in hexagon.lines:
                    _line = np.array(line.as_list_of_points(), dtype=np.int32).reshape((-1, 1, 2))
                    # pdb.set_trace()
                    frame = cv2.polylines(frame, [_line], False, (255, 255, 255), 1)

                count_str = hexagon.direction + ": " + str(all_counts[idx])
                cv2.putText(frame, count_str, (10, start_y + delta_y * idx), font, 0.5, (255, 0, 0), 2, cv2.LINE_AA)

            cv2.imshow('Vehicle Counting In Progress (Press q to quit)', frame)

            if cv2.waitKey(1) & 0xff == ord('q'):
                break

            cur_time = time.time()
            while cur_time - last_frame_time < fast_play / fps:
                time.sleep(0.001)
                cur_time = time.time()
        else:
            # video open failed
            # print(f"ERROR: cannot open video {video_file}")
            break

    cap.release()
    cv2.destroyAllWindows()
예제 #6
0
def counter(videos, road):
    count = 0
    url = videos
    road = str(road)
    cap = cv2.VideoCapture(url)

    # for real time capture
    # cap = cv2.VideoCapture(0)

    #Get width and height of video

    w = cap.get(3)
    h = cap.get(4)
    frameArea = h * w
    areaTH = frameArea / 400

    #Lines
    start_line = int(2 * (h / 5))
    end_line = int(3 * (h / 5))
    end_limit = int(4 * (h / 5))

    line_color = (255, 0, 0)
    pt1 = [0, end_line]
    pt2 = [w, end_line]
    pts_L1 = np.array([pt1, pt2], np.int32)
    pts_L1 = pts_L1.reshape((-1, 1, 2))

    pt3 = [0, end_limit]
    pt4 = [w, end_limit]
    pts_L2 = np.array([pt3, pt4], np.int32)
    pts_L2 = pts_L2.reshape((-1, 1, 2))

    #Background Subtractor
    fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=False)

    #Kernals
    kernalOp = np.ones((3, 3), np.uint8)
    kernalOp2 = np.ones((5, 5), np.uint8)
    kernalCl = np.ones((11, 11), np.uint8)

    font = cv2.FONT_HERSHEY_SIMPLEX
    cars = []
    max_p_age = 5
    pid = 1

    while (cap.isOpened()):
        ret, frame = cap.read()
        for i in cars:
            i.age_one()
        fgmask = fgbg.apply(frame)
        fgmask2 = fgbg.apply(frame)

        if ret == True:

            #Binarization
            ret, imBin = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)
            ret, imBin2 = cv2.threshold(fgmask2, 200, 255, cv2.THRESH_BINARY)

            #Opening i.e First Erode then dilate to remove background noise
            mask = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernalOp)
            mask2 = cv2.morphologyEx(imBin2, cv2.MORPH_OPEN, kernalOp)

            #Closing i.e First Dilate then Erode to remove foreground noise
            mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernalCl)
            mask2 = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernalCl)

            #Find Contours
            _, countours0, hierarchy = cv2.findContours(
                mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
            for cnt in countours0:
                area = cv2.contourArea(cnt)
                if area > areaTH:
                    ####Tracking######
                    m = cv2.moments(cnt)
                    cx = int(m['m10'] / m['m00'])
                    cy = int(m['m01'] / m['m00'])
                    x, y, w, h = cv2.boundingRect(cnt)

                    new = True
                    if cy in range(end_limit):
                        for i in cars:
                            if abs(x - i.getX()) <= w and abs(y -
                                                              i.getY()) <= h:
                                new = False
                                i.updateCoords(cx, cy)

                                if i.going_DOWN(end_line, start_line) == True:
                                    count += 1
                                    print("Vehicle : ", (count), ' crossed at',
                                          time.strftime("%c"))
                                break
                            if i.getState() == '1':
                                if i.getDir(
                                ) == 'move' and i.getY() > end_limit:
                                    i.setDone()
                            if i.timedOut():
                                index = cars.index(i)
                                cars.pop(index)
                                del i

                        if new == True:  #If nothing is detected,create new
                            p = vehicles.Car(pid, cx, cy, max_p_age)
                            cars.append(p)
                            pid += 1

                    cv2.circle(frame, (cx, cy), 5, (0, 0, 255), -1)
                    img = cv2.rectangle(frame, (x, y), (x + w, y + h),
                                        (0, 255, 0), 2)

            for i in cars:
                cv2.putText(frame, str(i.getId()), (i.getX(), i.getY()), font,
                            0.3, i.getRGB(), 1, cv2.LINE_AA)

            disp_text = 'Count : ' + str(count)
            frame = cv2.polylines(frame, [pts_L1],
                                  False,
                                  line_color,
                                  thickness=2)
            frame = cv2.polylines(frame, [pts_L2],
                                  False, (255, 255, 255),
                                  thickness=1)
            cv2.putText(frame, road, (10, 20), font, 0.5, (0, 0, 0), 2,
                        cv2.LINE_AA)
            cv2.putText(frame, road, (10, 20), font, 0.5, (0, 255, 0), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, disp_text, (10, 40), font, 0.7, (0, 0, 0), 2,
                        cv2.LINE_AA)
            cv2.putText(frame, disp_text, (10, 40), font, 0.7, (255, 255, 255),
                        1, cv2.LINE_AA)
            cv2.imshow('Frame', frame)

            if cv2.waitKey(1) & 0xff == ord('q'):
                return count
        else:
            return count

    cap.release()
    cv2.destroyAllWindows()
    def count_CARS(self):
        
        cnt_down=0
        
        cap=cv2.VideoCapture(self.video_name)

        #Get width and height of video

        w=cap.get(3)
        h=cap.get(4)
        frameArea=h*w
        areaTH=frameArea/400

        #Lines

        line_down=int(4*(h/5))
        up_limit=int(1*(h/5))
        down_limit=int(4.5*(h/5))

        line_down_color=(255,0,0)
        pt1 =  [0, line_down]
        pt2 =  [w, line_down]
        pts_L1 = np.array([pt1,pt2], np.int32)
        pts_L1 = pts_L1.reshape((-1,1,2))

        pt5 =  [0, up_limit]
        pt6 =  [w, up_limit]
        pts_L3 = np.array([pt5,pt6], np.int32)
        pts_L3 = pts_L3.reshape((-1,1,2))
        pt7 =  [0, down_limit]
        pt8 =  [w, down_limit]
        pts_L4 = np.array([pt7,pt8], np.int32)
        pts_L4 = pts_L4.reshape((-1,1,2))

        #Background Subtractor
        fgbg=cv2.createBackgroundSubtractorMOG2(detectShadows=True)

        #Kernals
        kernalOp = np.ones((3,3),np.uint8)

        font = cv2.FONT_HERSHEY_SIMPLEX
        cars = []
        max_p_age = 5
        pid = 1

        #time constraints
        timeout = 5
        timeout_start = time.time()

        while(time.time() < timeout_start + timeout):
            ret,frame=cap.read()
            for i in cars:
                i.age_one()
            fgmask=fgbg.apply(frame)
            fgmask2=fgbg.apply(frame)

            if ret==True:

                #Binarization
                ret,imBin=cv2.threshold(fgmask,200,255,cv2.THRESH_BINARY)
                ret,imBin2=cv2.threshold(fgmask2,200,255,cv2.THRESH_BINARY)
                #Opening i.e First Erode the dilate
                mask=cv2.morphologyEx(imBin,cv2.MORPH_OPEN,kernalOp)

                #Find Contours
                _, countours0,hierarchy=cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
                for cnt in countours0:
                    area=cv2.contourArea(cnt)
                    if area>areaTH:
                        ####Tracking######
                        m=cv2.moments(cnt)
                        cx=int(m['m10']/m['m00'])
                        cy=int(m['m01']/m['m00'])
                        x,y,w,h=cv2.boundingRect(cnt)

                        new=True
                        if cy in range(up_limit,down_limit):
                            for i in cars:
                                if abs(x - i.getX()) <= w and abs(y - i.getY()) <= h:
                                    new = False
                                    i.updateCoords(cx, cy)

                                    if i.going_DOWN(line_down)==True:
                                        cnt_down+=1
                                        print("Count:",cnt_down)
                                    break
                                if i.getState()=='1':
                                    if i.getDir()=='down'and i.getY()>down_limit:
                                        i.setDone()
                                if i.timedOut():
                                    index=cars.index(i)
                                    cars.pop(index)
                                    del i

                            if new==True: #If nothing is detected,create new
                                p=vehicles.Car(pid,cx,cy,max_p_age)
                                cars.append(p)
                                pid+=1
                                
                        cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)

        
                str_down='DOWN: '+str(cnt_down)
                frame=cv2.polylines(frame,[pts_L1],False,line_down_color,thickness=2)
                frame=cv2.polylines(frame,[pts_L3],False,(255,255,255),thickness=1)
                frame=cv2.polylines(frame,[pts_L4],False,(255,255,255),thickness=1)
                cv2.putText(frame, str_down, (10, 90), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
                cv2.putText(frame, str_down, (10, 90), font, 0.5, (255, 0, 0), 1, cv2.LINE_AA)
                cv2.imshow('Frame',frame)

                if cv2.waitKey(1)&0xff==ord('q'):
                    break

            else:
                break

        cap.release()
        cv2.destroyAllWindows()
        return cnt_down
예제 #8
0
def execute(directory, file_n, result_n, up_dwn, lines=None):
    """ 
        purpose: it counts cars going up
        and down based on parameters. When it is run it analyzes the video,
        and produces a txt file in the end showing what times the cars 
        passed the intersection. It also includes information about the total
        cars going up and down.

        parameters
        ----------
        directory: string,
            The directory that contains the video and the result

        file_n: type = String, optimal: must be a video type that cv2.VideoCaputure accepts

        result_n: type = String, optimal: file should not exist, otherwise it overwrites the file

        up_dwn: type = Integer, optimal: 0 = count cars going down, 1 = count cars going up, 2 = count cars going both up and down

        lines : list,
                The lines that are used to determine the moving direction of the vehicles in the video.
                Each element of the list is a dictionary, of which,
                Key is the moving direction, i.e., up or down;
                Values are three lines, and each line consists of 4 numbers (relative, range is [0, 1]), i.e., x_left, y_left, x_right, y_right

        returns
        ----------
        none
    """
    if debug:
        if lines:
            for direction_and_line in lines:
                print(direction_and_line['direction'])
                for line in direction_and_line['lines']:
                    print(line)

    os.chdir(directory)
    file_name = file_n
    result_name = result_n
    up_down = up_dwn

    # cnt_up is how many cars are going up relative to the highway
    # cnt_down is how many cars are going down relative to the highway
    cnt_up = 0
    cnt_down = 0

    cap = cv2.VideoCapture(file_name)

    # Find OpenCV version
    major_ver, minor_ver, subminor_ver = (cv2.__version__).split('.')
    if int(major_ver) < 3:
        fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
        print("Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps))
    else :
        fps = cap.get(cv2.CAP_PROP_FPS)
        print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps))

    # Get width and height of video
    w = cap.get(3)
    h = cap.get(4)
    frameArea = h * w
    areaTH = frameArea / 400  # the area threshold (minimum) for each single vehicle in the video;
    # TODO: should be adjustable for different camera

    if lines:
        line_downs = []
        line_ups = []
        for direction_and_line in lines:
            all_lines = direction_and_line['lines']
            if direction_and_line['direction'] == 'down':
                line_downs.append(all_lines)
            else:
                line_ups.append(all_lines)

        # TODO: For now just take the first one
        if line_ups:
            line_up = int(line_ups[0][1][1] * h)
        else:
            line_up = int(line_downs[0][1][1] * h)
        line_down = int(line_downs[0][1][1] * h)

        up_limit = int(line_downs[0][0][1] * h)
        down_limit = int(line_downs[0][2][1] * h)

        pts_L1 = np.array([[int(line_downs[0][1][0] * w), int(line_downs[0][1][1] * h)],
                        [int(line_downs[0][1][2] * w), int(line_downs[0][1][3] * h)]]).reshape((-1, 1, 2)) # line down
        pts_L2 = pts_L1 # line up # TODO
        pts_L3 = np.array([[int(line_downs[0][0][0] * w), int(line_downs[0][0][1] * h)],
                        [int(line_downs[0][0][2] * w), int(line_downs[0][0][3] * h)]]).reshape((-1, 1, 2)) # up limit
        pts_L4 = np.array([[int(line_downs[0][2][0] * w), int(line_downs[0][2][1] * h)],
                        [int(line_downs[0][2][2] * w), int(line_downs[0][2][3] * h)]]).reshape((-1, 1, 2)) # down limit
    else:
        # Lines
        # Create Lines for video
        if file_name == "test.mov":
            line_up = int(2.5 * (h / 5))
            line_down = int(4 * (h / 5))
        elif file_name == "test2.mov":
            line_up = int(2.5 * (h / 5))
            line_down = int(4 * (h / 5))
        elif file_name == "test3.mov":
            line_up = int(2.5 * (h / 5))
            line_down = int(4 * (h / 5))
        elif file_name == "surveillance.m4v":
            line_up = int(2.5 * (h / 5))
            line_down = int(3 * (h / 5))
        else:
            line_up = int(3.25 * (h / 5))
            line_down = int(3 * (h / 5))

        up_limit = int(2 * (h / 5))
        down_limit = int(4.5 * (h / 5))

        pt1 = [0, line_down]
        pt2 = [w, line_down]
        pts_L1 = np.array([pt1, pt2], np.int32)
        pts_L1 = pts_L1.reshape((-1, 1, 2))
        pt3 = [0, line_up]
        pt4 = [w, line_up]
        pts_L2 = np.array([pt3, pt4], np.int32)
        pts_L2 = pts_L2.reshape((-1, 1, 2))

        pt5 = [0, up_limit]
        pt6 = [w, up_limit]
        pts_L3 = np.array([pt5, pt6], np.int32)
        pts_L3 = pts_L3.reshape((-1, 1, 2))
        pt7 = [0, down_limit]
        pt8 = [w, down_limit]
        pts_L4 = np.array([pt7, pt8], np.int32)
        pts_L4 = pts_L4.reshape((-1, 1, 2))

    print("Red line y: ", str(line_down))
    print("Blue line y: ", str(line_up))
    # Sets line_down_color to red
    line_down_color = (255, 0, 0)
    # Sets line_up_color to purple
    line_up_color = (255, 0, 255)

    # Background Subtractor (contains binary image of moving objects)
    fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True)

    # Kernals
    kernalOp = np.ones((3, 3), np.uint8)
    kernalCl = np.ones((11, 11), np.uint8)

    font = cv2.FONT_HERSHEY_SIMPLEX
    cars = []
    max_p_age = 5
    car_id = 1
    speed_up = 0.0
    speed_down = 0.0
    distance = .5
    t2 = 0
    t1 = 0
    contents = []

    while cap.isOpened():
        read_success, frame = cap.read()
        last_frame_time = time.time()

        for i in cars:
            i.age_one()
        fgmask = fgbg.apply(frame)

        if read_success == True:

            # Binarization
            _, imBin = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)

            # Opening i.e First Erode the dilate
            mask = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernalOp)

            # Closing i.e First Dilate then Erode
            mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernalCl)

            # Find Contours
            # Creates rectangles around each vehicle
            countours0, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
            for cnt in countours0:
                area = cv2.contourArea(cnt)
                # print(f"Detected area size: {area}")
                if area > areaTH:
                    ####Tracking######
                    m = cv2.moments(cnt)
                    if m['m00'] == 0:
                        # generally this should not happen since the area is already larger than threshold.
                        # Just in case of a bad threshold
                        continue
                    cx = int(m['m10'] / m['m00'])  # Centroid, https://www.learnopencv.com/find-center-of-blob-centroid-using-opencv-cpp-python/
                    cy = int(m['m01'] / m['m00'])
                    x, y, w, h = cv2.boundingRect(cnt)

                    new = True
                    # detect cars in between up_limit and down_limit
                    if cy in range(up_limit, down_limit):
                        for i in cars:
                            # TODO: this may not hold for all cases
                            # e.g. if the frame rate is too low,
                            # then the same car in consecutive frames will be far away from each other
                            if abs(x - i.getX()) <= w and abs(y - i.getY()) <= h:
                                new = False
                                i.updateCoords(cx, cy)

                                if i.going_UP(line_up):
                                    cnt_up += 1
                                    content = "ID: " + str(i.getId()) + ' crossed going up at ' + time.strftime("%c")
                                    contents.append(content)
                                    print(content)
                                    t1 = time.time()
                                    if distance / abs(t1 - t2) * 10 < 80:
                                        speed_up = distance / abs(t1 - t2) * 10

                                elif i.going_DOWN(line_down):
                                    cnt_down += 1
                                    content = "ID: " + str(i.getId()) + ' crossed going down at ' + time.strftime("%c")
                                    contents.append(content)
                                    print(content)
                                    t1 = time.time()
                                    if distance / abs(t1 - t2) * 10 < 80:
                                        speed_down = distance / abs(t1 - t2) * 10

                                break
                            if i.getState() == '1':
                                if i.getDir() == 'down' and i.getY() > down_limit:
                                    i.setDone()
                                elif i.getDir() == 'up' and i.getY() < up_limit:
                                    i.setDone()
                            if i.timedOut():
                                index = cars.index(i)
                                cars.pop(index)
                                del i

                        if new:  # If nothing is detected, create new
                            p = vehicles.Car(car_id, cx, cy, max_p_age)
                            cars.append(p)
                            car_id += 1
                            t2 = time.time()

                    cv2.circle(frame, (cx, cy), 5, (0, 0, 255), -1)
                    img = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            for i in cars:
                cv2.putText(frame, str(i.getId()), (i.getX(), i.getY()), font, 0.3, i.getRGB(), 1, cv2.LINE_AA)

            str_up = 'UP: ' + str(cnt_up)
            str_down = 'DOWN: ' + str(cnt_down)

            frame = cv2.polylines(frame, [pts_L3], False, (255, 255, 255), thickness=1)
            frame = cv2.polylines(frame, [pts_L4], False, (255, 255, 255), thickness=1)

            if up_down == 0:
                frame = cv2.polylines(frame, [pts_L1], False, line_down_color, thickness=2)
                cv2.putText(frame, 'SPEED_DOWN: ' + str(speed_down), (100, 40), font, 0.5, (255, 255, 255), 2,
                            cv2.LINE_AA)
                cv2.putText(frame, 'SPEED_DOWN: ' + str(speed_down), (100, 40), font, 0.5, (255, 0, 0), 1, cv2.LINE_AA)
                cv2.putText(frame, str_down, (10, 40), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
                cv2.putText(frame, str_down, (10, 40), font, 0.5, (255, 0, 0), 1, cv2.LINE_AA)
            elif up_down == 1:
                frame = cv2.polylines(frame, [pts_L2], False, line_up_color, thickness=2)
                cv2.putText(frame, str_up, (10, 40), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
                cv2.putText(frame, str_up, (10, 40), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
                cv2.putText(frame, 'SPEED_UP: ' + str(speed_up), (100, 40), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
                cv2.putText(frame, 'SPEED_UP: ' + str(speed_up), (100, 40), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
            elif up_down == 2:
                frame = cv2.polylines(frame, [pts_L2], False, line_up_color, thickness=2)
                frame = cv2.polylines(frame, [pts_L1], False, line_down_color, thickness=2)
                cv2.putText(frame, str_up, (10, 40), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
                cv2.putText(frame, str_up, (10, 40), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
                cv2.putText(frame, 'SPEED_UP: ' + str(speed_up), (100, 40), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
                cv2.putText(frame, 'SPEED_UP: ' + str(speed_up), (100, 40), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
                cv2.putText(frame, 'SPEED_DOWN: ' + str(speed_down), (100, 90), font, 0.5, (255, 255, 255), 2,
                            cv2.LINE_AA)
                cv2.putText(frame, 'SPEED_DOWN: ' + str(speed_down), (100, 90), font, 0.5, (255, 0, 0), 1, cv2.LINE_AA)
                cv2.putText(frame, str_down, (10, 90), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
                cv2.putText(frame, str_down, (10, 90), font, 0.5, (255, 0, 0), 1, cv2.LINE_AA)

            cv2.imshow('Frame', frame)

            if cv2.waitKey(1) & 0xff == ord('q'):
                break

            cur_time = time.time()
            while cur_time - last_frame_time < 1 / fps:
                time.sleep(0.001)
                cur_time = time.time()
        else:
            # video/stream open failed
            break

    with open(result_name, 'w') as f:
        for item in contents:
            f.write(item + '\n')
        f.write("Count going up: " + str_up + "\n")
        f.write("Count going down: " + str_down + "\n")

    open(result_name)
    cap.release()
    cv2.destroyAllWindows()

    return cnt_up, cnt_down