Esempio n. 1
0
def manual_setting():
    FLAGS = argHandler()
    FLAGS.setDefaults()

    FLAGS.demo = "Video/20180725_1320.mp4"  # Initial video file to use, or if camera just put "camera"
    FLAGS.model = "darkflow/cfg/yolo_smartcity.cfg"  # tensorflow model
    FLAGS.load = 10912  # 37250  # tensorflow weights
    # FLAGS.pbLoad = "tiny-yolo-voc-traffic.pb" # tensorflow model
    # FLAGS.metaLoad = "tiny-yolo-voc-traffic.meta" # tensorflow weights
    FLAGS.threshold = 0.3  # threshold of decesion confidance (detection if confidance > threshold )
    FLAGS.max_gpu_usage = 0.7
    FLAGS.number_of_parallel_threads = int(os.environ.get("NO_OF_THREADS", 2))
    FLAGS.gpu = FLAGS.max_gpu_usage / FLAGS.number_of_parallel_threads  # how much of the GPU to use (between 0 and 1) 0 means use cpu
    FLAGS.track = True  # wheither to activate tracking or not
    FLAGS.trackObj = [
        'small-car', 'big-car', 'bus', 'truck', 'three-wheeler', 'two-wheeler',
        'lcv', 'bicycle', 'people', 'auto-rickshaw'
    ]
    # ['car', 'bus',
    #               'motorbike']  # ['Bicyclist','Pedestrian','Skateboarder','Cart','Car','Bus']  the object to be tracked
    # FLAGS.trackObj = ["person"]
    FLAGS.saveVideo = False  # whether to save the video or not
    FLAGS.BK_MOG = False  # activate background substraction using cv2 MOG substraction,
    # to help in worst case scenarion when YOLO cannor predict(able to detect mouvement, it's not ideal but well)
    # helps only when number of detection < 3, as it is still better than no detection.
    FLAGS.tracker = "sort"  # which algorithm to use for tracking deep_sort/sort (NOTE : deep_sort only trained for people detection )
    FLAGS.skip = 0  # how many frames to skipp between each detection to speed up the network
    FLAGS.csv = False  # whether to write csv file or not(only when tracking is set to True)
    FLAGS.display = True  # display the tracking or not
    FLAGS.testing = True
    return FLAGS
Esempio n. 2
0
    def run(self):

        if not (self.__run_counting_cars or self.__run_detect_direction
                or self.__run_hscd or self.__run_traffic_violation_detection):
            print(
                "please setup individual modules before running the pipeline")
            sys.exit(0)

        print("OUTPUT WILL BE AT: " + str(self.__show_output_path()))
        print("ALERTS FOR THIS RUN WILL BE AT: " +
              str(self.__path_to_alert_poller()))

        tf.reset_default_graph()

        FLAGS = argHandler()
        FLAGS.setDefaults()

        FLAGS.demo = self.__video_path  # video file to use, or if camera just put "camera"
        FLAGS.model = "darkflow/cfg/yolo.cfg"  # tensorflow model
        FLAGS.load = "darkflow/bin/yolo.weights"  # tensorflow weights
        FLAGS.threshold = 0.35  # threshold of decetion confidance (detection if confidance > threshold )
        FLAGS.gpu = 0.85  # how much of the GPU to use (between 0 and 1) 0 means use cpu
        FLAGS.track = True  # whether to activate tracking or not
        FLAGS.trackObj = "car"  # the object to be tracked
        FLAGS.saveVideo = True  # whether to save the video or not
        FLAGS.BK_MOG = False  # activate background substraction using cv2 MOG substraction,
        # to help in worst case scenarion when YOLO cannor predict(able to detect mouvement, it's not ideal but well)
        # helps only when number of detection < 5, as it is still better than no detection.
        # (NOTE : deep_sort only trained for people detection )
        FLAGS.tracker = "deep_sort"  # wich algorithm to use for tracking deep_sort/sort

        FLAGS.skip = 0  # how many frames to skipp between each detection to speed up the network
        FLAGS.csv = False  # whether to write csv file or not(only when tracking is set to True)
        FLAGS.display = True  # display the tracking or not

        # modules
        FLAGS.counting_cars = self.__run_counting_cars  # to enable counting cars application module
        FLAGS.direction_detection = self.__run_detect_direction  # run direction detection or skip
        FLAGS.speed_estimation = self.__run_hscd  # run speed estimation or skip
        FLAGS.traffic_signal_violation_detection = self.__run_traffic_violation_detection

        # FLAGS.application_dir = os.getcwd()
        # FLAGS.user_input_video_name = self.__user_input_video_name

        FLAGS.location_name = self.__location_name
        FLAGS.path_to_output = self.__path_to_output
        FLAGS.start_time = self.__start_time
        tfnet = TFNet(FLAGS)

        tfnet.camera()
        print("End of Demo.")
Esempio n. 3
0
    def __init__(self, filename):

        FLAGS = argHandler()
        FLAGS.setDefaults()

        FLAGS.demo = filename  # video file to use, or if camera just put "camera"
        FLAGS.model = "darkflow/cfg/yolo.cfg"  # tensorflow model
        FLAGS.load = "darkflow/bin/yolo.weights"  # tensorflow weights
        FLAGS.threshold = 0.25  # threshold of decetion confidance (detection if confidance > threshold )
        FLAGS.gpu = 0  # how much of the GPU to use (between 0 and 1) 0 means use cpu
        FLAGS.track = True  # wheither to activate tracking or not
        FLAGS.trackObj = "person"  # the object to be tracked
        FLAGS.saveVideo = True  # whether to save the video or not
        FLAGS.BK_MOG = False  # activate background substraction using cv2 MOG substraction,
        # to help in worst case scenarion when YOLO cannor predict(able to detect mouvement, it's not ideal but well)
        # helps only when number of detection < 5, as it is still better than no detection.
        FLAGS.tracker = "deep_sort"  # wich algorithm to use for tracking deep_sort/sort (NOTE : dffpl   eep_sort only trained for people detection )
        FLAGS.skip = 2  # how many frames to skipp between each detection to speed up the network
        FLAGS.csv = False  # whether to write csv file or not(only when tracking is set to True)
        FLAGS.display = False  # display the tracking or not
        # FLAGS.queue = 10
        self.tfnet = TFNet(FLAGS)
Esempio n. 4
0
from darkflow.darkflow.defaults import argHandler  #Import the default arguments
import os
from darkflow.darkflow.net.build import TFNet

FLAGS = argHandler()
FLAGS.setDefaults()

FLAGS.demo = "camera"  # video file to use, or if camera just put "camera"
FLAGS.model = "darkflow/cfg/yolo.cfg"  # tensorflow model
FLAGS.load = "darkflow/bin/yolo.weights"  # tensorflow weights
# FLAGS.pbLoad = "tiny-yolo-voc-traffic.pb" # tensorflow model
# FLAGS.metaLoad = "tiny-yolo-voc-traffic.meta" # tensorflow weights
FLAGS.threshold = 0.7  # threshold of decetion confidance (detection if confidance > threshold )
FLAGS.gpu = 0.8  #how much of the GPU to use (between 0 and 1) 0 means use cpu
FLAGS.track = False  # wheither to activate tracking or not
FLAGS.trackObj = [
    'Bicyclist', 'Pedestrian', 'Skateboarder', 'Cart', 'Car', 'Bus'
]  # the object to be tracked
#FLAGS.trackObj = ["person"]
FLAGS.saveVideo = True  #whether to save the video or not
FLAGS.BK_MOG = True  # activate background substraction using cv2 MOG substraction,
#to help in worst case scenarion when YOLO cannor predict(able to detect mouvement, it's not ideal but well)
# helps only when number of detection < 3, as it is still better than no detection.
FLAGS.tracker = "sort"  # wich algorithm to use for tracking deep_sort/sort (NOTE : deep_sort only trained for people detection )
FLAGS.skip = 0  # how many frames to skipp between each detection to speed up the network
FLAGS.csv = False  #whether to write csv file or not(only when tracking is set to True)
FLAGS.display = True  # display the tracking or not

tfnet = TFNet(FLAGS)

tfnet.camera()