##      category_index,
    ##      instance_masks=output_dict.get('detection_masks'),
    ##      use_normalized_coordinates=True,
    ##      line_thickness=8)

    # insert information text to video frame
    font = cv2.FONT_HERSHEY_SIMPLEX

    # Visualization of the results of a detection.
    counter, csv_line, counting_mode = vis_util.visualize_boxes_and_labels_on_image_array_x_axis(
        cap.get(1),
        input_frame,
        1,
        is_color_recognition_enabled,
        np.squeeze(boxes),
        np.squeeze(classes).astype(np.int32),
        np.squeeze(scores),
        category_index,
        x_reference=roi,
        deviation=10,
        use_normalized_coordinates=True,
        line_thickness=1)

    # when the vehicle passed over line and counted, make the color of ROI line green
    if realCounter != counter:
        realCounter = counter
        if realCounter == 1:
            total_passed_vehicle = total_passed_vehicle + counter

    if counter == 1:
        cv2.line(input_frame, (roi, 0), (roi, height), (0, 0xFF, 0), 2)
Пример #2
0
def cumulative_object_custom_counting_x_axis(input_video, detection_graph,
                                             category_index,
                                             is_color_recognition_enabled, fps,
                                             width, height, roi, deviation):
    total_passed_vehicle = 0

    # initialize .csv
    with open('custom.csv', 'w') as f:
        writer = csv.writer(f)
        csv_line = "Date, Time, Object, Count"
        writer.writerows([csv_line.split(',')])

    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    output_movie = cv2.VideoWriter('the_output.avi', fourcc, fps,
                                   (width, height))

    # input video
    cap = cv2.VideoCapture(input_video)

    total_car = 0
    total_pedesteriane: int = 0
    total_bicycle: int = 0
    total_truck: int = 0
    total_motorcycle: int = 0
    total_others: int = 0

    total_passed_vehicle = 0
    speed = "waiting..."
    direction = "waiting..."
    size = "waiting..."
    color = "waiting..."
    counting_mode = "..."
    width_heigh_taken = True
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name(
                'detection_boxes:0')

            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            detection_scores = detection_graph.get_tensor_by_name(
                'detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name(
                'detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name(
                'num_detections:0')

            # for all the frames that are extracted from input video
            while (cap.isOpened()):
                ret, frame = cap.read()
                # print(ret)
                if not ret:
                    print("end of the video file...")
                    break
                else:
                    input_frame = frame
                    # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                    image_np_expanded = np.expand_dims(input_frame, axis=0)

                    # Actual detection.
                    (boxes, scores, classes, num) = sess.run(
                        [
                            detection_boxes, detection_scores,
                            detection_classes, num_detections
                        ],
                        feed_dict={image_tensor: image_np_expanded})

                    # insert information text to video frame
                    font = cv2.FONT_HERSHEY_SIMPLEX

                    # Visualization of the results of a detection.
                    counter, csv_line, counting_mode = vis_util.visualize_boxes_and_labels_on_image_array_x_axis(
                        cap.get(1),
                        input_frame,
                        1,
                        is_color_recognition_enabled,
                        np.squeeze(boxes),
                        np.squeeze(classes).astype(np.int32),
                        np.squeeze(scores),
                        category_index,
                        x_reference=roi,
                        deviation=deviation,
                        use_normalized_coordinates=True,
                        line_thickness=4)

                    # when the vehicle passed over line and counted, make the color of ROI line green
                    # print("Values:" + str(counter)+" : "+str(csv_line)+" : "+str(counting_mode))
                    if counter == 1:
                        cv2.line(input_frame, (roi, 0), (roi, height),
                                 (0, 0xFF, 0), 2)
                        if (csv_line.__contains__('car')):
                            total_car = total_car + counter
                            database.mysqlInsert("Car", counter)
                        elif (csv_line.__contains__('person')):
                            total_pedesteriane = total_pedesteriane + counter
                            database.mysqlInsert("Person", counter)
                        elif (csv_line.__contains__('bicycle')):
                            total_bicycle = total_bicycle + counter
                            database.mysqlInsert("Bicycle", counter)
                        elif (csv_line.__contains__('truck')):
                            total_truck = total_truck + counter
                            database.mysqlInsert("Truck", counter)
                        elif (csv_line.__contains__('motorcycle')):
                            total_motorcycle = total_motorcycle + counter
                            database.mysqlInsert("MoterCycle", counter)
                        else:
                            total_others = total_others + counter
                            database.mysqlInsert("Other", counter)
                    else:
                        cv2.line(input_frame, (roi, 0), (roi, height),
                                 (0xFF, 0, 0), 2)

                    total_passed_vehicle = total_passed_vehicle + counter

                    # insert information text to video frame
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(
                        input_frame,
                        'All: ' + str(total_passed_vehicle),
                        (10, 30),
                        font,
                        0.6,
                        (0, 0xFF, 0xFF),
                        2,
                        cv2.FONT_HERSHEY_SIMPLEX,
                    )
                    cv2.putText(
                        input_frame,
                        'MotorCycle: ' + str(total_motorcycle),
                        (10, 130),
                        font,
                        0.6,
                        (0, 0xFF, 0xFF),
                        2,
                        cv2.FONT_HERSHEY_SIMPLEX,
                    )
                    cv2.putText(
                        input_frame,
                        'Truck: ' + str(total_truck),
                        (10, 110),
                        font,
                        0.6,
                        (0, 0xFF, 0xFF),
                        2,
                        cv2.FONT_HERSHEY_SIMPLEX,
                    )
                    cv2.putText(
                        input_frame,
                        'bicycle: ' + str(total_bicycle),
                        (10, 90),
                        font,
                        0.6,
                        (0, 0xFF, 0xFF),
                        2,
                        cv2.FONT_HERSHEY_SIMPLEX,
                    )
                    cv2.putText(
                        input_frame,
                        'Car: ' + str(total_car),
                        (10, 70),
                        font,
                        0.6,
                        (0, 0xFF, 0xFF),
                        2,
                        cv2.FONT_HERSHEY_SIMPLEX,
                    )

                    cv2.putText(
                        input_frame,
                        'Person: ' + str(total_pedesteriane),
                        (10, 50),
                        font,
                        0.6,
                        (0, 0xFF, 0xFF),
                        2,
                        cv2.FONT_HERSHEY_SIMPLEX,
                    )
                # print("restart  video file...")
                output_movie.write(input_frame)
                # print("writing frame")
                cv2.imshow('object counting', input_frame)
                '''  if(csv_line != "not_available"):
                        with open('traffic_measurement.csv', 'a') as f:
                                writer = csv.writer(f)                          
                                size, direction = csv_line.split(',')                                             
                                writer.writerows([csv_line.split(',')])'''
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            cap.release()
            cv2.destroyAllWindows()
Пример #3
0
def cumulative_object_counting_x_axis(input_video, detection_graph, category_index, is_color_recognition_enabled, roi, deviation):
        total_passed_vehicle = 0              

        # input video
        cap = cv2.VideoCapture(input_video)

        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        fps = int(cap.get(cv2.CAP_PROP_FPS))

        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        output_movie = cv2.VideoWriter('the_output.avi', fourcc, fps, (width, height))

        total_passed_vehicle = 0
        speed = "waiting..."
        direction = "waiting..."
        size = "waiting..."
        color = "waiting..."
        counting_mode = "..."
        width_heigh_taken = True
        with detection_graph.as_default():
          with tf.Session(graph=detection_graph) as sess:
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')

            # for all the frames that are extracted from input video
            while(cap.isOpened()):
                ret, frame = cap.read()                

                if not  ret:
                    print("end of the video file...")
                    break
                
                input_frame = frame

                # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                image_np_expanded = np.expand_dims(input_frame, axis=0)

                # Actual detection.
                (boxes, scores, classes, num) = sess.run(
                    [detection_boxes, detection_scores, detection_classes, num_detections],
                    feed_dict={image_tensor: image_np_expanded})

                # insert information text to video frame
                font = cv2.FONT_HERSHEY_SIMPLEX

                # Visualization of the results of a detection.        
                counter, csv_line, counting_mode = vis_util.visualize_boxes_and_labels_on_image_array_x_axis(cap.get(1),
                                                                                                             input_frame,
                                                                                                             1,
                                                                                                             is_color_recognition_enabled,
                                                                                                             np.squeeze(boxes),
                                                                                                             np.squeeze(classes).astype(np.int32),
                                                                                                             np.squeeze(scores),
                                                                                                             category_index,
                                                                                                             x_reference = roi,
                                                                                                             deviation = deviation,
                                                                                                             use_normalized_coordinates=True,
                                                                                                             line_thickness=4)
                               
                # when the vehicle passed over line and counted, make the color of ROI line green
                if counter == 1:
                  cv2.line(input_frame, (roi, 0), (roi, height), (0, 0xFF, 0), 5)
                else:
                  cv2.line(input_frame, (roi, 0), (roi, height), (0, 0, 0xFF), 5)

                total_passed_vehicle = total_passed_vehicle + counter

                # insert information text to video frame
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(
                    input_frame,
                    'Detected Pedestrians: ' + str(total_passed_vehicle),
                    (10, 35),
                    font,
                    0.8,
                    (0, 0xFF, 0xFF),
                    2,
                    cv2.FONT_HERSHEY_SIMPLEX,
                    )


                cv2.putText(
                    input_frame,
                    'ROI Line',
                    (545, roi-10),
                    font,
                    0.6,
                    (0, 0, 0xFF),
                    2,
                    cv2.LINE_AA,
                    )

                output_movie.write(input_frame)
                print ("writing frame")
                #cv2.imshow('object counting',input_frame)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                        break

            cap.release()
            cv2.destroyAllWindows()
Пример #4
0
def object_counting_webcam(detection_graph, category_index, is_color_recognition_enabled, roi, deviation):
        outcount = 0

        cap = cv2.VideoCapture(0)
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        fps = int(cap.get(cv2.CAP_PROP_FPS))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        output_movie = cv2.VideoWriter('the_output.avi', fourcc, fps, (width, height))

        incount = 0
        speed = "waiting..."
        direction = "waiting..."
        size = "waiting..."
        color = "waiting..."
        counting_mode = "..."
        width_heigh_taken = True
        #height = 0
        #width = 0


        with detection_graph.as_default():
          with tf.compat.v1.Session(graph=detection_graph) as sess:
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')

            cap = cv2.VideoCapture(0)
            (ret, frame) = cap.read()

            real_time = -1
            latest_count = 0
            # for all the frames that are extracted from input video
            while True:
                # Capture frame-by-frame
                (ret, frame) = cap.read()          

                if not  ret:
                    print("end of the video file...")
                    break
                
                input_frame = frame

                r=requests.get('https://withpresso.gq/machine/get_num_of_customer?cafe_asin=1').text
                original = []
                original = r.split('"')
                incount = 0
                for i in range(len(original)):
                    if (i == 2):
                        incount = int(original[i][1])
                outcount = 0
                #print(incount, outcount)
                ########################## 서버에서 현재 사람수 받아오기. ########################
                """                        incount = from server                          """
                """                        outcount = from server                         """

                ####################### 방향 지표 일단 둘다 false 세팅 ##########################


                # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                image_np_expanded = np.expand_dims(input_frame, axis=0)

                # Actual detection.
                (boxes, scores, classes, num) = sess.run(
                    [detection_boxes, detection_scores, detection_classes, num_detections],
                    feed_dict={image_tensor: image_np_expanded})

                # insert information text to video frame
                font = cv2.FONT_HERSHEY_SIMPLEX

                # Visualization of the results of a detection.        
                counter, csv_line, counting_mode, inoutdir = vis_util.visualize_boxes_and_labels_on_image_array_x_axis(cap.get(1),
                                                                                                      input_frame,
                                                                                                      1,
                                                                                                      is_color_recognition_enabled,
                                                                                                      np.squeeze(boxes),
                                                                                                      np.squeeze(classes).astype(np.int32),
                                                                                                      np.squeeze(scores),
                                                                                                      category_index,
                                                                                                      latest_count,
                                                                                                      x_reference = roi,
                                                                                                      deviation = deviation,
                                                                                                      use_normalized_coordinates=True,
                                                                                                      line_thickness=4)
                # when the vehicle passed over line and counted, make the color of ROI line green
                if counter == 1:
                    cv2.line(input_frame, (roi, 0), (roi, height), (0, 0xFF, 0), 5)
                    latest_count = counter
                else:
                    cv2.line(input_frame, (roi, 0), (roi, height), (0, 0, 0xFF), 5)

                ######### count된 직후에는 방향을 알기 위해 직전에 사람이 지나갔음을 알리는 지표 ########
                #latest_count = counter

                ###############counter의 방향 측정 #####################
                if(latest_count == 1):
                    if(inoutdir == 2):
                        outcount = outcount + latest_count
                        latest_count = 0
                    elif(inoutdir == 1):
                        incount = incount + latest_count
                        latest_count = 0
                    #print("in: ", incount)
                    #print("out: ", outcount)
                ############### 이 프린트는 서버에 보내는 것. ##############

                ###############################################################
                #####################printprint################################
                ###############################################################

                temp = real_time
                # print(temp, real_time)
                ######################## 10에 해당하는 총좌석수 stdin으로 구현 ##############
                real_time = sd.seat_detecting_model1(10, incount, outcount)

                if (temp != real_time):
                    #print("보행자:", incount-outcount)
                    #print(real_time)
                    current = incount-outcount
                    r2 = requests.post('https://withpresso.gq/machine/num_of_customer',
                                       {'cafe_asin': 1, 'num_of_customer': current, 'level': real_time+1}).text
                    #print("post?", r2)
                #########################################################################
                """-------------------------------------------------------------------"""
                """----------------real time 프린트는 서버로 보내는것---------------------"""
                """-------------------------------------------------------------------"""
                #########################################################################
                # insert information text to video frame
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(
                    input_frame,
                    'in count: '+str(incount)  +', out count: '+ str(outcount),
                    (10, 35),
                    font,
                    0.8,
                    (0, 0xFF, 0),
                    2,
                    cv2.FONT_HERSHEY_SIMPLEX,
                )

                cv2.putText(
                    input_frame,
                    'ROI Line',
                    (545, roi - 10),
                    font,
                    0.6,
                    (0, 0, 0xFF),
                    2,
                    cv2.LINE_AA,
                )

                output_movie.write(input_frame)
                # print ("writing frame")
                """
                if(len(counting_mode) == 0):
                    cv2.putText(input_frame, "...", (10, 35), font, 0.8, (0,255,255),2,cv2.FONT_HERSHEY_SIMPLEX)                       
                else:
                    cv2.putText(input_frame, counting_mode, (10, 35), font, 0.8, (0,255,255),2,cv2.FONT_HERSHEY_SIMPLEX)
                """
                cv2.imshow('object counting',input_frame)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                        break

            cap.release()
            cv2.destroyAllWindows()
    def get_localization(self, image):
        category_index = {1: {'id': 1, 'name': u'person'}}

        with self.detection_graph.as_default():
            image_expanded = np.expand_dims(image, axis=0)
            (boxes, scores, classes, num_detections) = self.sess.run(
                [self.boxes, self.scores, self.classes, self.num_detections],
                feed_dict={self.image_tensor: image_expanded})

            # Visualization of the results of a detection.
            counter, csv_line, counting_mode = visualization_utils.visualize_boxes_and_labels_on_image_array_x_axis(
                self.cap.get(1),
                image,
                1,
                0,
                np.squeeze(boxes),
                np.squeeze(classes).astype(np.int32),
                np.squeeze(scores),
                category_index,
                x_reference=self.roi_increment,
                deviation=self.deviation_increment,
                use_normalized_coordinates=True,
                line_thickness=4)

            # Visualization of the results of a detection.
            counter2, csv_line2, counting_mode2 = visualization_utils.visualize_boxes_and_labels_on_image_array_x_axis(
                self.cap.get(1),
                image,
                1,
                0,  # color recognition
                np.squeeze(boxes),
                np.squeeze(classes).astype(np.int32),
                np.squeeze(scores),
                category_index,
                x_reference=self.roi_decrement,
                deviation=self.deviation_decrement,
                use_normalized_coordinates=True,
                line_thickness=4)

            if counter == 1:
                self.cv2.line(image, (self.roi_increment, 0),
                              (self.roi_increment, 720), (0, 0xFF, 0), 5)
            else:
                self.cv2.line(image, (self.roi_increment, 0),
                              (self.roi_increment, 720), (0, 0, 0xFF), 5)

            self.total_passed_people = self.total_passed_people + counter

            if counter2 == 1:
                self.cv2.line(image, (self.roi_decrement, 0),
                              (self.roi_decrement, 720), (0, 0xFF, 0), 5)
            else:
                self.cv2.line(image, (self.roi_decrement, 0),
                              (self.roi_decrement, 720), (0xFF, 0, 0), 5)

            total_passed_people = self.total_passed_people - counter2

            font = self.cv2.FONT_HERSHEY_SIMPLEX
            self.cv2.putText(
                image,
                'Detected People: ' + str(total_passed_people),
                (10, 45),
                font,
                0.8,
                (0, 0xFF, 0xFF),
                2,
                self.cv2.FONT_HERSHEY_SIMPLEX,
            )

            self.cv2.putText(
                image,
                'Increment',
                (545, self.roi_increment - 10),
                font,
                0.6,
                (0, 0, 0xFF),
                2,
                self.cv2.LINE_AA,
            )

            self.cv2.putText(
                image,
                'Decrement',
                (525, self.roi_decrement - 10),
                font,
                0.6,
                (0xFF, 0, 0),
                2,
                self.cv2.LINE_AA,
            )
            boxes = np.squeeze(boxes)
            classes = np.squeeze(classes)
            scores = np.squeeze(scores)

            cls = classes.tolist()

            idx_vec = [
                i for i, v in enumerate(cls)
                if ((v == 1) and (scores[i] > 0.3))
            ]

            if len(idx_vec) == 0:
                pass
            else:
                tmp_person_boxes = []
                for idx in idx_vec:
                    dim = image.shape[0:2]
                    box = self.box_normal_to_pixel(boxes[idx], dim)
                    box_h = box[2] - box[0]
                    box_w = box[3] - box[1]
                    ratio = box_h / (box_w + 0.01)

                    tmp_person_boxes.append(box)
                    print(box, ', confidence: ', scores[idx], 'ratio:', ratio)

                self.person_boxes = tmp_person_boxes

        return self.person_boxes
Пример #6
0
def cumulative_object_counting_x_axis(input_video, detection_graph, category_index, is_color_recognition_enabled, fps, width, height, roi, deviation):
    total_passed_vehicle = 0

    # initialize .csv
    with open('report.csv', 'w') as f:
        writer = csv.writer(f)
        csv_line = "x,y,accuracy"
        writer.writerows([csv_line.split(',')])

    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    output_movie = cv2.VideoWriter('output.mp4', fourcc, fps, (width, height))

    # input video
    cap = cv2.VideoCapture(input_video)

    total_passed_vehicle = 0
    speed = "waiting..."
    direction = "waiting..."
    size = "waiting..."
    color = "waiting..."
    counting_mode = "..."
    width_heigh_taken = True
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name(
                'detection_boxes:0')

            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            detection_scores = detection_graph.get_tensor_by_name(
                'detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name(
                'detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name(
                'num_detections:0')

            # for all the frames that are extracted from input video
            while(cap.isOpened()):
                ret, frame = cap.read()

                if not ret:
                    print("\nfim de jogo...")
                    break

                input_frame = frame

                # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                image_np_expanded = np.expand_dims(input_frame, axis=0)

                # Actual detection.
                (boxes, scores, classes, num) = sess.run(
                    [detection_boxes, detection_scores,
                        detection_classes, num_detections],
                    feed_dict={image_tensor: image_np_expanded})

                # insert information text to video frame
                font = cv2.FONT_HERSHEY_SIMPLEX

                # Visualization of the results of a detection.
                counter, csv_line, counting_mode = \
                    vis_util.visualize_boxes_and_labels_on_image_array_x_axis(
                        cap.get(1),
                        input_frame,
                        1,
                        is_color_recognition_enabled,
                        np.squeeze(boxes),
                        np.squeeze(classes).astype(np.int32),
                        np.squeeze(scores),
                        category_index,
                        x_reference=roi,
                        deviation=deviation,
                        use_normalized_coordinates=True,
                        line_thickness=4
                    )

                # when the vehicle passed over line and counted, make the color of ROI line green
                if counter == 1:
                    cv2.line(input_frame, (roi, 0),
                             (roi, height), (0, 0xFF, 0), 5)
                else:
                    cv2.line(input_frame, (roi, 0),
                             (roi, height), (0, 0, 0xFF), 5)

                total_passed_vehicle = total_passed_vehicle + counter

                # insert information text to video frame
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(
                    input_frame,
                    'Peixes detectados: ' + str(total_passed_vehicle),
                    (10, 35),
                    font,
                    0.8,
                    (0, 0xFF, 0xFF),
                    2,
                    cv2.FONT_HERSHEY_SIMPLEX,
                )
                # add this part to count objects

                cv2.putText(
                    input_frame,
                    '',
                    (545, roi-10),
                    font,
                    0.6,
                    (0, 0, 0xFF),
                    2,
                    cv2.LINE_AA,
                )

                output_movie.write(input_frame)
                print(".", end="")
                cv2.imshow('object counting', input_frame)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

                '''if(csv_line != "not_available"):
                        with open('traffic_measurement.csv', 'a') as f:
                                writer = csv.writer(f)                          
                                size, direction = csv_line.split(',')                                             
                                writer.writerows([csv_line.split(',')])         '''

            cap.release()
            cv2.destroyAllWindows()
Пример #7
0
def cumul_object_counting_roi_line(input_video,
                                   detection_graph,
                                   category_index,
                                   roi,
                                   roi_axis,
                                   interval=5,
                                   is_color_recognition_enabled=0,
                                   deviation=1):

    total_passed_object = 0
    # output path
    output_path = 'output/'
    # input video
    cap = cv2.VideoCapture(input_video)

    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    vid_name = input_video[input_video.rfind('/') + 1:input_video.rfind('.')]

    print('height: ' + str(height))
    print('width: ' + str(width))
    print('frame count: ' + str(total_frame))
    print('fps: ' + str(fps))
    print('video name: ' + vid_name)

    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    output_video = cv2.VideoWriter(output_path + vid_name + '.mp4', fourcc,
                                   fps, (width, height))

    # set roi by percentage of video size
    if roi > 0 and roi < 1:
        if roi_axis == 0:
            roi = int(width * roi)
        elif roi_axis == 1:
            roi = int(height * roi)

    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name(
                'detection_boxes:0')

            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            detection_scores = detection_graph.get_tensor_by_name(
                'detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name(
                'detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name(
                'num_detections:0')

            # variables to work with excel
            frame_counter = 1
            total_passed_object_per_interval = 0
            final_col = 0
            current_row = 0

            workbook = xl.Workbook(output_path + vid_name + '_' +
                                   str(interval) + 's_interval.xlsx')
            worksheet = workbook.add_worksheet()
            bold = workbook.add_format({'bold': True})

            # write table header
            worksheet.write('A1', 'TIME (S)', bold)
            worksheet.write('B1', 'COUNT', bold)
            worksheet.write('C1', 'CUMULATIVE', bold)

            # for all the frames that are extracted from input video
            while (cap.isOpened()):
                ret, frame = cap.read()

                if not ret:
                    print("end of the video file...")
                    break

                # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                image_np_expanded = np.expand_dims(frame, axis=0)

                # Actual detection.
                (boxes, scores, classes,
                 num) = sess.run([
                     detection_boxes, detection_scores, detection_classes,
                     num_detections
                 ],
                                 feed_dict={image_tensor: image_np_expanded})

                # insert information text to video frame
                font = cv2.FONT_HERSHEY_SIMPLEX

                # Visualization of the results of a detection
                if roi_axis == 0:  # if x axis
                    counter, csv_line, counting_mode = vis_util.visualize_boxes_and_labels_on_image_array_x_axis(
                        cap.get(1),
                        frame,
                        1,
                        is_color_recognition_enabled,
                        np.squeeze(boxes),
                        np.squeeze(classes).astype(np.int32),
                        np.squeeze(scores),
                        category_index,
                        x_reference=roi,
                        deviation=deviation,
                        use_normalized_coordinates=True,
                        line_thickness=4)

                    # when the vehicle passed over line and counted, make the color of ROI line green
                    if counter == 1:
                        cv2.line(frame, (roi, 0), (roi, height), (0, 0xFF, 0),
                                 5)
                    else:
                        cv2.line(frame, (roi, 0), (roi, height), (0, 0, 0xFF),
                                 5)

                elif roi_axis == 1:  # if y axis
                    counter, csv_line, counting_mode = vis_util.visualize_boxes_and_labels_on_image_array_x_axis(
                        cap.get(1),
                        frame,
                        2,
                        is_color_recognition_enabled,
                        np.squeeze(boxes),
                        np.squeeze(classes).astype(np.int32),
                        np.squeeze(scores),
                        category_index,
                        y_reference=roi,
                        deviation=deviation,
                        use_normalized_coordinates=True,
                        line_thickness=4)

                    # when the vehicle passed over line and counted, make the color of ROI line green
                    if counter == 1:
                        cv2.line(frame, (0, roi), (width, roi), (0, 0xFF, 0),
                                 5)
                    else:
                        cv2.line(frame, (0, roi), (width, roi), (0, 0, 0xFF),
                                 5)

                total_passed_object += counter
                total_passed_object_per_interval += counter

                # insert information text to video frame
                cv2.putText(
                    frame,
                    'Detected objects: ' + str(total_passed_object),
                    (10, 35),
                    font,
                    0.8,
                    (0, 0xFF, 0xFF),
                    2,
                    font,
                )

                cv2.putText(
                    frame,
                    'ROI Line',
                    (545, roi - 10),
                    font,
                    0.6,
                    (0, 0, 0xFF),
                    2,
                    cv2.LINE_AA,
                )

                output_video.write(frame)
                print("writing frame " + str(frame_counter) + '/' +
                      str(total_frame))

                if frame_counter % (interval * fps) == 0:
                    current_row = frame_counter // (interval * fps)
                    worksheet.write(current_row, 0, frame_counter // fps)
                    worksheet.write(current_row, 1,
                                    total_passed_object_per_interval)
                    if current_row == 1:
                        worksheet.write(current_row, 2, '=B2')
                    else:
                        worksheet.write(current_row, 2,
                                        '=B' + str(current_row + 1) + '+C' +
                                        str(current_row))  # =B(x+1)+C(x)
                    total_passed_object_per_interval = 0

                # # print for last frame
                # if frame_counter == total_frame:
                # 	worksheet.write(current_row, 0, frame_counter//fps)
                # 	worksheet.write(current_row, 1, total_passed_object_per_interval)
                # 	if current_row == 1:
                # 		worksheet.write(current_row, 2, '=B' + str(current_row+1))
                # 	else:
                # 		worksheet.write(current_row, 2, '=B' + str(current_row+1) + '+C' + str(current_row))

                final_col = current_row

                frame_counter += 1

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            # crate the graph consisting of bar and line chart
            bar_chart = workbook.add_chart({'type': 'column'})
            bar_chart.add_series({
                'name':
                '=Sheet1!B1',
                'categories':
                '=Sheet1!A2:A' + str(final_col + 1),
                'values':
                '=Sheet1!B2:B' + str(final_col + 1)
            })
            line_chart = workbook.add_chart({'type': 'line'})
            line_chart.add_series({
                'name':
                '=Sheet1!C1',
                'categories':
                '=Sheet1!A2:A' + str(final_col + 1),
                'values':
                '=Sheet1!C2:C' + str(final_col + 1)
            })
            bar_chart.combine(line_chart)

            bar_chart.set_title({'name': 'No of Pedestrians'})
            bar_chart.set_x_axis({'name': '=Sheet1!A1'})
            bar_chart.set_y_axis({'name': 'Pedestrians'})
            worksheet.insert_chart('F2', bar_chart)

            # mode, median and mean of data (count)
            worksheet.write('A' + str(final_col + 3), 'MODE', bold)
            worksheet.write('A' + str(final_col + 4), 'MEDIAN', bold)
            worksheet.write('A' + str(final_col + 5), 'MEAN', bold)
            worksheet.write('A' + str(final_col + 6), 'SD', bold)
            worksheet.write('B' + str(final_col + 3),
                            '=MODE(B2:B' + str(final_col + 1) + ')')
            worksheet.write('B' + str(final_col + 4),
                            '=MEDIAN(B2:B' + str(final_col + 1) + ')')
            worksheet.write('B' + str(final_col + 5),
                            '=AVERAGE(B2:B' + str(final_col + 1) + ')')
            worksheet.write('B' + str(final_col + 6),
                            '=STDEV(B2:B' + str(final_col + 1) + ')')
            worksheet.write('C' + str(final_col + 3),
                            '=MODE(C2:C' + str(final_col + 1) + ')')
            worksheet.write('C' + str(final_col + 4),
                            '=MEDIAN(C2:C' + str(final_col + 1) + ')')
            worksheet.write('C' + str(final_col + 5),
                            '=AVERAGE(C2:C' + str(final_col + 1) + ')')
            worksheet.write('C' + str(final_col + 6),
                            '=STDEV(C2:C' + str(final_col + 1) + ')')

            workbook.close()

            cap.release()
            cv2.destroyAllWindows()
Пример #8
0
def cumulative_object_counting_x_axis(input_video, detection_graph,
                                      category_index,
                                      is_color_recognition_enabled, roi,
                                      deviation):
    total_passed_vehicle = 0

    # input video
    cap = cv2.VideoCapture(input_video)

    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    fps = int(cap.get(cv2.CAP_PROP_FPS))

    total_passed_vehicle = 0
    speed = "waiting..."
    direction = "waiting..."
    size = "waiting..."
    color = "waiting..."
    counting_mode = "..."
    width_heigh_taken = True
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name(
                'detection_boxes:0')

            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            detection_scores = detection_graph.get_tensor_by_name(
                'detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name(
                'detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name(
                'num_detections:0')

            # for all the frames that are extracted from input video
            import math
            while (cap.isOpened()):
                ret, frame = cap.read()

                if not ret:
                    print("end of the video file...")
                    break

                input_frame = frame

                # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                image_np_expanded = np.expand_dims(input_frame, axis=0)

                # Actual detection.
                (boxes, scores, classes,
                 num) = sess.run([
                     detection_boxes, detection_scores, detection_classes,
                     num_detections
                 ],
                                 feed_dict={image_tensor: image_np_expanded})

                # insert information text to video frame
                bboxes = np.squeeze(boxes).copy()
                nnum = int(num[0])
                print("num ", nnum)
                width = input_frame.shape[0]
                height = input_frame.shape[1]
                dist = 10000
                if nnum >= 2:

                    bboxes[0][0] = bboxes[0][0] * width
                    bboxes[0][1] = bboxes[0][1] * height
                    bboxes[0][2] = bboxes[0][2] * width
                    bboxes[0][3] = bboxes[0][3] * height

                    bboxes[1][0] = bboxes[1][0] * width
                    bboxes[1][1] = bboxes[1][1] * height
                    bboxes[1][2] = bboxes[1][2] * width
                    bboxes[1][3] = bboxes[1][3] * height

                    centroid1 = ((bboxes[0][2] - bboxes[0][2]) / 2,
                                 (bboxes[0][3] - bboxes[0][1]) / 2)
                    centroid2 = ((bboxes[1][2] - bboxes[1][0]) / 2,
                                 (bboxes[1][3] - bboxes[1][1]) / 2)
                    #print("centroid1 ",centroid1, bboxes[0][2] , bboxes[0][3])

                    try:
                        dist = math.sqrt(((centroid2[0]**2 - centroid1[0]**2) +
                                          (centroid2[1]**2 - centroid1[1]**2)))
                        print(dist, "  ", nnum, '\n')
                    except Exception as e:
                        print(e)
                    #print("*******************************", dist)
                if dist < 140:
                    flag = 1
                else:
                    flag = 0
                font = cv2.FONT_HERSHEY_SIMPLEX

                # Visualization of the results of a detection.
                counter, csv_line, counting_mode = vis_util.visualize_boxes_and_labels_on_image_array_x_axis(
                    cap.get(1),
                    input_frame,
                    1,
                    is_color_recognition_enabled,
                    np.squeeze(boxes),
                    np.squeeze(classes).astype(np.int32),
                    np.squeeze(scores),
                    category_index,
                    x_reference=roi,
                    deviation=deviation,
                    use_normalized_coordinates=True,
                    line_thickness=4)

                # when the vehicle passed over line and counted, make the color of ROI line green
                # if counter == 1:
                #   cv2.line(input_frame, (roi, 0), (roi, height), (0, 255, 0), 5)
                # else:
                #   cv2.line(input_frame, (roi, 0), (roi, height), (0, 0, 255), 5)

                total_passed_vehicle = total_passed_vehicle + counter

                # insert information text to video frame
                text = 'Pedestrians Detected '
                if flag:
                    text = text + " Without distance "
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(
                    input_frame,
                    text + str(nnum),
                    (10, 35),
                    font,
                    0.8,
                    (0, 0xFF, 0xFF),
                    2,
                    cv2.FONT_HERSHEY_SIMPLEX,
                )

                cv2.putText(
                    input_frame,
                    'ROI Thershold',
                    (545, roi - 10),
                    font,
                    0.6,
                    (0, 0, 255),
                    2,
                    cv2.LINE_AA,
                )

                cv2.imshow('object counting', input_frame)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            cap.release()
            cv2.destroyAllWindows()
Пример #9
0
def cumulative_object_counting_x_axis(input_video, detection_graph,
                                      category_index,
                                      is_color_recognition_enabled,
                                      targeted_object, fps, width, height, roi,
                                      deviation, logFile, roiAreas):
    total_passed_vehicle = 0

    #initialize .csv
    with open(logFile, 'w', newline='') as f:
        writer = csv.writer(f)
        columnName = [
            'Timestamp', 'Object_Type', 'Object_Color', 'Movement_Direction',
            'Object_Speed(km/h)'
        ]
        csv_line = "Timestamp, Object Type, Object Color, Object Movement Direction, Object Speed (km/h)"
        #writer = csv.DictWriter(logFile, fieldnames=columnName)
        #writer.writeheader()
        writer.writerows([csv_line.split(',')])

    #fourcc = cv2.VideoWriter_fourcc(*'XVID')
    #output_movie = cv2.VideoWriter('the_output.avi', fourcc, fps, (width, height))

    # input video
    cap = cv2.VideoCapture(input_video)
    ## Update variables form actual values
    #width = round(cap.get(3))
    #height = round(cap.get(4))
    total_passed_vehicle = 0
    speed = "waiting..."
    direction = "waiting..."
    size = "waiting..."
    color = "waiting..."
    counting_mode = "..."
    width_heigh_taken = True
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name(
                'detection_boxes:0')

            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            detection_scores = detection_graph.get_tensor_by_name(
                'detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name(
                'detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name(
                'num_detections:0')

            try:
                # for all the frames that are extracted from input video
                while (cap.isOpened()):
                    ret, frame = cap.read()
                    #Scale down
                    #resize(frame, frame, Size(width, height), 0, 0, INTER_CUBIC);
                    frame = cv2.resize(frame, (int(width), int(height)))

                    if not ret:
                        print("end of the video file...")
                        break

                    input_frame = frame

                    # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                    image_np_expanded = np.expand_dims(input_frame, axis=0)

                    # Actual detection.
                    (boxes, scores, classes, num) = sess.run(
                        [
                            detection_boxes, detection_scores,
                            detection_classes, num_detections
                        ],
                        feed_dict={image_tensor: image_np_expanded})

                    # insert information text to video frame
                    font = cv2.FONT_HERSHEY_SIMPLEX

                    # Visualization of the results of a detection.
                    counter, csv_line, counting_mode = vis_util.visualize_boxes_and_labels_on_image_array_x_axis(
                        cap.get(1),
                        input_frame,
                        1,
                        is_color_recognition_enabled,
                        np.squeeze(boxes),
                        np.squeeze(classes).astype(np.int32),
                        np.squeeze(scores),
                        category_index,
                        targeted_objects=targeted_object,
                        x_reference=roi,
                        deviation=deviation,
                        use_normalized_coordinates=True,
                        line_thickness=4,
                        roiArea=roiAreas)
                    print(csv_line.split(','))
                    # when the vehicle passed over line and counted, make the color of ROI line green
                    if counter == 1:
                        cv2.line(input_frame, (roi, 0), (roi, height),
                                 (0, 0xFF, 0), 5)
                    else:
                        cv2.line(input_frame, (roi, 0), (roi, height),
                                 (0, 0, 0xFF), 5)

                    total_passed_vehicle = total_passed_vehicle + counter
                    # draw roi areas
                    for area in roiAreas:
                        print(area)
                        overlay = input_frame.copy()
                        output = input_frame.copy()
                        cv2.rectangle(input_frame, (0, 100), (100, 200),
                                      (255, 0, 0), -1)
                        cv2.rectangle(input_frame, (area[0], area[1]),
                                      (area[2], area[3]), (255, 0, 0), -1)
                        # apply the overlay
                        alpha = 0.5
                        cv2.addWeighted(overlay, alpha, input_frame, 1 - alpha,
                                        0, input_frame)
                    # insert information text to video frame
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(
                        input_frame,
                        'Detected Vehicles: ' + str(total_passed_vehicle),
                        (10, 35),
                        font,
                        0.8,
                        (0, 0xFF, 0xFF),
                        2,
                        cv2.FONT_HERSHEY_SIMPLEX,
                    )

                    cv2.putText(
                        input_frame,
                        'ROI Line',
                        (roi + 10, height - 10),
                        font,
                        0.6,
                        (0, 0, 0xFF),
                        2,
                        cv2.LINE_AA,
                    )

                    #output_movie.write(input_frame)
                    #print ("writing frame")
                    cv2.imshow('object counting', input_frame)

                    if cv2.waitKey(1) & 0xFF == ord('q') or cv2.waitKey(
                            1) == 27:
                        break

                    if (csv_line != "not_available"):
                        with open(logFile, 'a') as f:
                            writer = csv.writer(f)
                            #size, direction = csv_line.split(',')
                            logRow = csv_line.split(',')
                            logRow.insert(
                                0,
                                datetime.datetime.now().strftime("%c"))
                            logRow.insert(2, '')
                            writer.writerows([logRow])
                            #writer.writerow({'Timestamp': datetime.datetime.now(), 'Object_Type': size, 'Movement_Direction': direction })
            except KeyboardInterrupt:
                pass

            cap.release()
            cv2.destroyAllWindows()
Пример #10
0
def cumulative_object_counting_x_axis_bps(model_name, input_video,
                                          detection_graph, category_index,
                                          is_color_recognition_enabled, fps,
                                          width, height, roi):
    total_passed_vehicle = 0

    # initialize .csv
    with open(
            input_video.strip('.mp4') + '_' + model_name + '_INOUT' +
            '_measure.txt', 'w') as f:
        writer = csv.writer(f)
        csv_line = "Object Type, Object Color, Object Movement Direction, Object Speed (km/h)"
        writer.writerows([csv_line.split(',')])

    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    output_movie = cv2.VideoWriter(
        input_video.strip('.mp4') + '_' + model_name + '_INOUT' +
        '_output.mp4', fourcc, fps, (width, height))

    # input video
    cap = cv2.VideoCapture(input_video)
    num_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    fps = cap.get(cv2.CAP_PROP_FPS)
    print("Numero de Frames:", num_frames, "FPS:", fps, "Tempo do video (s):",
          num_frames * fps)

    total_passed_vehicle = 0
    total_in = 0
    total_out = 0
    speed = "waiting..."
    direction = "waiting..."
    size = "waiting..."
    color = "waiting..."
    counting_mode = "..."
    width_heigh_taken = True
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name(
                'detection_boxes:0')

            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            detection_scores = detection_graph.get_tensor_by_name(
                'detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name(
                'detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name(
                'num_detections:0')

            inf_media = []
            # for all the frames that are extracted from input video
            while (cap.isOpened()):
                ret, frame = cap.read()

                if not ret:
                    print("end of the video file...")
                    break

                input_frame = frame

                # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                image_np_expanded = np.expand_dims(input_frame, axis=0)

                inf_start = time.time()
                # Actual detection.
                (boxes, scores, classes,
                 num) = sess.run([
                     detection_boxes, detection_scores, detection_classes,
                     num_detections
                 ],
                                 feed_dict={image_tensor: image_np_expanded})

                inf = (time.time() - inf_start)
                inf_media.append(inf)
                moda = stats.mode(inf_media)
                #if cap.get(1) % 50 == 0 :
                #     print("Inference elapsed time (s) %1.4f" % inf, "Moda:", moda[0])

                # insert information text to video frame
                font = cv2.FONT_HERSHEY_SIMPLEX

                vis_start = time.time()
                # Visualization of the results of a detection.
                counter, csv_line, counting_mode = vis_util.visualize_boxes_and_labels_on_image_array_x_axis(
                    cap.get(1),
                    input_frame,
                    1,
                    is_color_recognition_enabled,
                    np.squeeze(boxes),
                    np.squeeze(classes).astype(np.int32),
                    np.squeeze(scores),
                    category_index,
                    targeted_objects="person",
                    x_reference=roi,
                    deviation=1.24,  #variação sobre o roi
                    min_score_thresh=.8,
                    use_normalized_coordinates=True,
                    line_thickness=4)

                vis = time.time() - vis_start
                #size, direction = csv_line.split(',')
                #print("Visualization elapsed time (s) %1.4f" % vis)
                # when the vehicle passed over line and counted, make the color of ROI line green
                if counter == 1:
                    cv2.line(input_frame, (roi, 0), (roi, height),
                             (0, 0xFF, 0), 5)
                    if (csv_line != "not_available"):
                        size, direction = csv_line.split(',')
                        if (direction == "up"):
                            total_passed_vehicle = total_passed_vehicle + counter
                            total_out = total_out + counter
                            #print("Inference elapsed time (s) %1.4f" % inf, "Moda:", moda[0])
                        if (direction == "down"):
                            total_passed_vehicle = total_passed_vehicle - counter
                            total_in = total_in + counter
                            #print("Inference elapsed time (s) %1.4f" % inf, "Moda:", moda[0])
                else:
                    cv2.line(input_frame, (roi, 0), (roi, height),
                             (0, 0, 0xFF), 5)

                #total_passed_vehicle = total_passed_vehicle + counter

                # insert information text to video frame
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(
                    input_frame,
                    #'Saldo: ' + str(total_passed_vehicle) + ' Entradas: ' + str(total_in) + ' Saidas: ' + str(total_out),
                    ' Entradas: ' + str(total_in) + ' Saidas: ' +
                    str(total_out),
                    (10, 35),
                    font,
                    0.8,
                    (0, 0xFF, 0xFF),
                    2,
                    cv2.FONT_HERSHEY_SIMPLEX,
                )

                #cv2.putText(
                #    input_frame,
                #    'ROI Line',
                #    (545, roi - 10),
                #    font,
                #    0.6,
                #    (0, 0, 0xFF),
                #    2,
                #    cv2.LINE_AA,
                #)
                rec_start = time.time()
                output_movie.write(input_frame)
                rec = time.time() - rec_start
                #print("Recording elapsed time (s) %1.4f" % rec)
                #print("csv_line-->",csv_line)
                #print ("writing frame", str(total_passed_vehicle))
                cv2.imshow('object counting', input_frame)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

                if (csv_line != "not_available"):
                    with open(
                            input_video.strip('.mp4') + '_' + model_name +
                            '_INOUT' + '_measure.txt', 'a') as f:
                        writer = csv.writer(f)
                        size, direction = csv_line.split(',')
                        csv_line = csv_line + ',' + str(int(cap.get(
                            1))) + ',' + str(total_in) + ',' + str(total_out)
                        print("csv_line:", csv_line)
                        writer.writerows([csv_line.split(',')])

            cap.release()
            cv2.destroyAllWindows()