コード例 #1
0
    def run(self):

        if not (self.__run_counting_cars or self.__run_detect_direction
                or self.__run_hscd or self.__run_traffic_violation_detection):
            print(
                "please setup individual modules before running the pipeline")
            sys.exit(0)

        print("OUTPUT WILL BE AT: " + str(self.__show_output_path()))
        print("ALERTS FOR THIS RUN WILL BE AT: " +
              str(self.__path_to_alert_poller()))

        tf.reset_default_graph()

        FLAGS = argHandler()
        FLAGS.setDefaults()

        FLAGS.demo = self.__video_path  # video file to use, or if camera just put "camera"
        FLAGS.model = "darkflow/cfg/yolo.cfg"  # tensorflow model
        FLAGS.load = "darkflow/bin/yolo.weights"  # tensorflow weights
        FLAGS.threshold = 0.35  # threshold of decetion confidance (detection if confidance > threshold )
        FLAGS.gpu = 0.85  # how much of the GPU to use (between 0 and 1) 0 means use cpu
        FLAGS.track = True  # whether to activate tracking or not
        FLAGS.trackObj = "car"  # the object to be tracked
        FLAGS.saveVideo = True  # whether to save the video or not
        FLAGS.BK_MOG = False  # activate background substraction using cv2 MOG substraction,
        # to help in worst case scenarion when YOLO cannor predict(able to detect mouvement, it's not ideal but well)
        # helps only when number of detection < 5, as it is still better than no detection.
        # (NOTE : deep_sort only trained for people detection )
        FLAGS.tracker = "deep_sort"  # wich algorithm to use for tracking deep_sort/sort

        FLAGS.skip = 0  # how many frames to skipp between each detection to speed up the network
        FLAGS.csv = False  # whether to write csv file or not(only when tracking is set to True)
        FLAGS.display = True  # display the tracking or not

        # modules
        FLAGS.counting_cars = self.__run_counting_cars  # to enable counting cars application module
        FLAGS.direction_detection = self.__run_detect_direction  # run direction detection or skip
        FLAGS.speed_estimation = self.__run_hscd  # run speed estimation or skip
        FLAGS.traffic_signal_violation_detection = self.__run_traffic_violation_detection

        # FLAGS.application_dir = os.getcwd()
        # FLAGS.user_input_video_name = self.__user_input_video_name

        FLAGS.location_name = self.__location_name
        FLAGS.path_to_output = self.__path_to_output
        FLAGS.start_time = self.__start_time
        tfnet = TFNet(FLAGS)

        tfnet.camera()
        print("End of Demo.")
コード例 #2
0
class ImageConverter:
    def __init__(self, args):
        self.tfnet = None
        self.image_publisher = rospy.Publisher(args.output, Image, queue_size=1)
        self.subscriber = rospy.Subscriber(args.input, Image, self.callback, queue_size = 1, buff_size=2**24)
        self.subscriber = rospy.Subscriber("/cctv_info", String, self.exit)

        metric = nn_matching.NearestNeighborDistanceMetric(
            "cosine", 0.2, 100)
        self.tracker = Tracker(metric)
        self.encoder = generate_detections.create_box_encoder(
            os.path.abspath("deep_sort/resources/networks/mars-small128.ckpt-68577"))

        options = {"model": "darkflow/cfg/yolo.cfg", "load": "darkflow/bin/yolo.weights", "threshold": 0.1,
                   "track": True, "trackObj": ["person"], "BK_MOG": True, "tracker": "deep_sort", "csv": False}

        self.tfnet = TFNet(options)

    def callback(self, image_message):
        bridge = CvBridge()
        image = bridge.imgmsg_to_cv2(image_message, 'bgr8')
        if(self.tfnet != None):
            processedimg, fps = self.tfnet.image_return(image, self.encoder, self.tracker)
            print(fps)
            self.image_publisher.publish(bridge.cv2_to_imgmsg(processedimg, "bgr8"))
            cv2.waitKey(1)

    def exit(self, data):
        if data.data == "quit":
            if self.tfnet is not None:
                os.system("rosnode kill /image_converter_node")
                os.system("rosnode kill /openpose_ros_node")
                os.exit(0)
コード例 #3
0
    def __init__(self, args):
        self.tfnet = None
        self.image_publisher = rospy.Publisher(args.output, Image, queue_size=1)
        self.subscriber = rospy.Subscriber(args.input, Image, self.callback, queue_size = 1, buff_size=2**24)
        self.subscriber = rospy.Subscriber("/cctv_info", String, self.exit)

        metric = nn_matching.NearestNeighborDistanceMetric(
            "cosine", 0.2, 100)
        self.tracker = Tracker(metric)
        self.encoder = generate_detections.create_box_encoder(
            os.path.abspath("deep_sort/resources/networks/mars-small128.ckpt-68577"))

        options = {"model": "darkflow/cfg/yolo.cfg", "load": "darkflow/bin/yolo.weights", "threshold": 0.1,
                   "track": True, "trackObj": ["person"], "BK_MOG": True, "tracker": "deep_sort", "csv": False}

        self.tfnet = TFNet(options)
コード例 #4
0
    def __init__(self):
        self.confidence = 0.4

        self.option = {
            'model': 'cfg/yolo.cfg',
            'load': 'bin/yolo.weights',
            'threshold': self.confidence,
            'gpu': 0.7
        }

        self.tfnet = TFNet(self.option)
        self.colors = {
            'car': (238, 23, 23),
            'truck': (0, 255, 21),
            'bus': (3, 0, 255),
            'person': (0, 255, 243)
        }
コード例 #5
0
class control_p(object):
    '''api for flask'''
    def __init__(self, filename, list_xy):

        self.FLAGS = argHandler()
        self.FLAGS.setDefaults()

        self.FLAGS.demo = filename  # video file to use, or if camera just put "camera"
        self.FLAGS.model = "darkflow/bin/tiny-yolo-person.cfg"  # tensorflow model
        self.FLAGS.load = "darkflow/bin/tiny-yolo-person_595.weights"  # tensorflow weights
        self.FLAGS.threshold = 0.25  # threshold of decetion confidance (detection if confidance > threshold )
        self.FLAGS.gpu = 0.9  # how much of the GPU to use (between 0 and 1) 0 means use cpu
        self.FLAGS.track = True  # wheither to activate tracking or not
        self.FLAGS.trackObj = "person"  # the object to be tracked
        self.FLAGS.saveVideo = True  # whether to save the video or not
        self.FLAGS.BK_MOG = False  # activate background substraction using cv2 MOG substraction,
        # to help in worst case scenarion when YOLO cannor predict(able to detect mouvement, it's not ideal but well)
        # helps only when number of detection < 5, as it is still better than no detection.
        self.FLAGS.tracker = "deep_sort"  # wich algorithm to use for tracking deep_sort/sort (NOTE : dffpl   eep_sort only trained for people detection )
        self.FLAGS.skip = 2  # how many frames to skipp between each detection to speed up the network
        self.FLAGS.csv = False  # whether to write csv file or not(only when tracking is set to True)
        self.FLAGS.display = False  # display the tracking or not
        # FLAGS.queue = 10
        self.FLAGS.list_xy = list_xy
        self.FLAGS.counter = True
        self.tfnet = TFNet(self.FLAGS)

    def setcoo_p(self, list_xy):
        self.tfnet.camera_set(list_xy)

    def start_p(self):
        #self.pstart = WorkerThread(callback)
        self.pstart = WorkerThread(target=self.tfnet.camera, args=(callback))
        self.pstart.setDaemon(True)
        self.pstart.start()

    def stop_p(self):
        self.tfnet.camera_stop()

    def pause_p(self):
        self.tfnet.camera_pause()

    def resume_p(self):
        self.tfnet.camera_resume()

    def get_p(self):
        return self.tfnet.camera_get()
コード例 #6
0
    def __init__(self, filename):

        FLAGS = argHandler()
        FLAGS.setDefaults()

        FLAGS.demo = filename  # video file to use, or if camera just put "camera"
        FLAGS.model = "darkflow/cfg/yolo.cfg"  # tensorflow model
        FLAGS.load = "darkflow/bin/yolo.weights"  # tensorflow weights
        FLAGS.threshold = 0.25  # threshold of decetion confidance (detection if confidance > threshold )
        FLAGS.gpu = 0  # how much of the GPU to use (between 0 and 1) 0 means use cpu
        FLAGS.track = True  # wheither to activate tracking or not
        FLAGS.trackObj = "person"  # the object to be tracked
        FLAGS.saveVideo = True  # whether to save the video or not
        FLAGS.BK_MOG = False  # activate background substraction using cv2 MOG substraction,
        # to help in worst case scenarion when YOLO cannor predict(able to detect mouvement, it's not ideal but well)
        # helps only when number of detection < 5, as it is still better than no detection.
        FLAGS.tracker = "deep_sort"  # wich algorithm to use for tracking deep_sort/sort (NOTE : dffpl   eep_sort only trained for people detection )
        FLAGS.skip = 2  # how many frames to skipp between each detection to speed up the network
        FLAGS.csv = False  # whether to write csv file or not(only when tracking is set to True)
        FLAGS.display = False  # display the tracking or not
        # FLAGS.queue = 10
        self.tfnet = TFNet(FLAGS)
コード例 #7
0
class MyDarkflow:
    def __init__(self):
        self.confidence = 0.4

        self.option = {
            'model': 'cfg/yolo.cfg',
            'load': 'bin/yolo.weights',
            'threshold': self.confidence,
            'gpu': 0.7
        }

        self.tfnet = TFNet(self.option)
        self.colors = {
            'car': (238, 23, 23),
            'truck': (0, 255, 21),
            'bus': (3, 0, 255),
            'person': (0, 255, 243)
        }

    def highlight_vehicles(self, img):
        results = self.tfnet.return_predict(img)
        for result in results:
            #Pega posição e tipo do veículo
            tl = (result['topleft']['x'], result['topleft']['y'])
            br = (result['bottomright']['x'], result['bottomright']['y'])
            label = result['label']

            #Dá cor à label
            if label not in self.colors:
                self.colors[label] = 200 * np.random.rand(3)

            #Desenha quadrado em volta do veículo
            img = cv2.rectangle(img, tl, br, self.colors[label], 3)
            img = cv2.putText(img, label, tl, cv2.FONT_HERSHEY_COMPLEX, 1,
                              (0, 0, 0), 2)
        return (img, len(results))
コード例 #8
0
from darkflow.darkflow.net.build import TFNet

FLAGS = argHandler()
FLAGS.setDefaults()

FLAGS.demo = "camera"  # video file to use, or if camera just put "camera"
FLAGS.model = "darkflow/cfg/yolo.cfg"  # tensorflow model
FLAGS.load = "darkflow/bin/yolo.weights"  # tensorflow weights
# FLAGS.pbLoad = "tiny-yolo-voc-traffic.pb" # tensorflow model
# FLAGS.metaLoad = "tiny-yolo-voc-traffic.meta" # tensorflow weights
FLAGS.threshold = 0.7  # threshold of decetion confidance (detection if confidance > threshold )
FLAGS.gpu = 0.8  #how much of the GPU to use (between 0 and 1) 0 means use cpu
FLAGS.track = False  # wheither to activate tracking or not
FLAGS.trackObj = [
    'Bicyclist', 'Pedestrian', 'Skateboarder', 'Cart', 'Car', 'Bus'
]  # the object to be tracked
#FLAGS.trackObj = ["person"]
FLAGS.saveVideo = True  #whether to save the video or not
FLAGS.BK_MOG = True  # activate background substraction using cv2 MOG substraction,
#to help in worst case scenarion when YOLO cannor predict(able to detect mouvement, it's not ideal but well)
# helps only when number of detection < 3, as it is still better than no detection.
FLAGS.tracker = "sort"  # wich algorithm to use for tracking deep_sort/sort (NOTE : deep_sort only trained for people detection )
FLAGS.skip = 0  # how many frames to skipp between each detection to speed up the network
FLAGS.csv = False  #whether to write csv file or not(only when tracking is set to True)
FLAGS.display = True  # display the tracking or not

tfnet = TFNet(FLAGS)

tfnet.camera()
exit('Demo stopped, exit.')
         ]

if sys.version_info.major == 3:
    PYTHON_VERSION = 3
else:
    PYTHON_VERSION = 2
    
    
options = {
    'model': 'cfg/tiny-yolo-voc-2c.cfg',
    'load': 19125,
    'threshold': 0.05,
    #'gpu': 0.8
}

tfnet = TFNet(options)    


def main(argv):
    tf_device = '/gpu:0'
    with tf.device(tf_device):
        """Build graph
        """
        if FLAGS.color_channel == 'RGB':
            input_data = tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.input_size, FLAGS.input_size, 3],
                                        name='input_image')
        else:
            input_data = tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.input_size, FLAGS.input_size, 1],
                                        name='input_image')

        center_map = tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.input_size, FLAGS.input_size, 1],
コード例 #10
0
def Initialize(FLAGS):
    tfnet = TFNet(FLAGS)
    return tfnet
コード例 #11
0
ファイル: trackbyreid.py プロジェクト: pribadihcr/DLL-RAPI
FLAGS.model = os.path.join(
    args.dir_model, "person_detection/darkflow/yolo.cfg")  # tensorflow model
FLAGS.load = os.path.join(
    args.dir_model,
    "person_detection/darkflow/yolo.weights")  # tensorflow weights
FLAGS.config = os.path.join(this_dir, '..', 'IVALIB', 'darkflow', 'cfg')
#FLAGS.pbLoad = "tiny-yolo-voc-traffic.pb" # tensorflow model
#FLAGS.metaLoad = "tiny-yolo-voc-traffic.meta" # tensorflow weights
FLAGS.threshold = 0.7  # threshold of decetion confidance (detection if confidance > threshold )
FLAGS.gpu = 0.8  #how much of the GPU to use (between 0 and 1) 0 means use cpu
FLAGS.track = True  # wheither to activate tracking or not
#FLAGS.trackObj = ['Bicyclist','Pedestrian','Skateboarder','Cart','Car','Bus'] # the object to be tracked
FLAGS.trackObj = ["person"]
FLAGS.saveVideo = False  #whether to save the video or not
FLAGS.BK_MOG = True  # activate background substraction using cv2 MOG substraction,
#to help in worst case scenarion when YOLO cannor predict(able to detect mouvement, it's not ideal but well)
# helps only when number of detection < 3, as it is still better than no detection.
FLAGS.tracker = "deep_sort"  # wich algorithm to use for tracking deep_sort/sort (NOTE : deep_sort only trained for people detection )
FLAGS.skip = 3  # how many frames to skipp between each detection to speed up the network
FLAGS.csv = False  #whether to write csv file or not(only when tracking is set to True)
FLAGS.display = True  # display the tracking or not
FLAGS.summary = None
FLAGS.csv = True
FLAGS.saveBox = True

darknet = TFNet(FLAGS)
REID.process(FLAGS, args.input_video, args.output_dir, darknet)
# camera(darknet)
#tfnet.camera()
exit('Demo stopped, exit.')
コード例 #12
0
ファイル: gui_experiment.py プロジェクト: grh1cob/Deepsense
 def initTFNet(self):
     from darkflow.darkflow.net.build import TFNet
     self.tfnet = TFNet(self.options)
     self.progressBar.setValue(100)
     print("done")
     self.startButton.setEnabled(True)
コード例 #13
0
ファイル: Run.py プロジェクト: LEESEYUN/seyun
from darkflow.darkflow.net.build import TFNet
import cv2
import tensorflow as tf
#options = {"model": "cfg/yolo-face.cfg", "load": "weight/yolo-face_final.weights", "threshold": 0.1, "gpu": 1.0}
options = {
    "model": "cfg/yolo-face.cfg",
    "load": "weight/yolo-face_4000.weights",
    "threshold": 0.1,
    "gpu": 1.0
}
tfnet = TFNet(options)
count = 0
tracker = cv2.TrackerMIL_create()
cam = cv2.VideoCapture(0)

while True:
    _, camcv = cam.read()
    camcv = cv2.resize(camcv, (448, 448))
    result = tfnet.return_predict(camcv)
    num_people = len(result)
    for i in range(num_people):
        #print(result[i]['topleft'])
        top_left_x = result[i]['topleft']['x']
        top_left_y = result[i]['topleft']['y']
        bottom_right_x = result[i]['bottomright']['x']
        bottom_right_y = result[i]['bottomright']['y']

        if count == 0 and top_left_x != 0:
            count += 1
            bbox = (top_left_x, top_left_y, bottom_right_x, bottom_right_y)
            ok = tracker.init(camcv, bbox)
コード例 #14
0
def predict(videoPath):

    if os.path.exists("yolo.csv"):
        os.remove("yolo.csv")
    with open('yolo.csv', 'a') as writeFile:
        writer = csv.writer(writeFile)
        writer.writerow(['FrameNumber', 'PredictionString'])
        writeFile.close()

    options = {"model": "cfg/455.cfg", "load": 35250, "threshold": 0.4}
    frameNum = 0
    tfnet = TFNet(options)

    skip = False
    fileDir = os.listdir(os.getcwd() + "/frames")
    for filename in fileDir:

        img = cv2.imread(os.getcwd() + "/frames/" + filename, cv2.IMREAD_COLOR)
        #print(os.getcwd() + "/frames/" + filename)
        while img is None:
            print("image is none")
            img = cv2.imread(os.getcwd() + "/frames/" + filename,
                             cv2.IMREAD_COLOR)
            time.sleep(2)
            if img is None:
                skip = True
                os.remove(os.getcwd() + "/frames/" + filename)
                break
        saved_img = img
        if not skip:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            print("not skipping")
            # use YOLO to predict the image
            result = tfnet.return_predict(img)

            #print("result equals " + str(result))
            img.shape

            id = 0
            csvString = ""
            for i in range(0, len(result)):
                tl = (result[i]['topleft']['x'], result[i]['topleft']['y'])
                br = (result[i]['bottomright']['x'],
                      result[i]['bottomright']['y'])
                label = result[i]['label']
                #print(result[i])

                id = id + 1
                csvString += result[i]['label'] + " " + str(result[i]['confidence']) + " " + str(result[i]['bottomright']['x']) + " " \
                    + str(result[i]['bottomright']['x']) + " " + str(result[i]['topleft']['y']) + " " + str(result[i]['bottomright']['y']) + " "
                saved_img = cv2.rectangle(saved_img, tl, br, (0, 255, 0), 7)
                saved_img = cv2.putText(saved_img,
                                        str(id) + " " + label, tl,
                                        cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0),
                                        2)

            cv2.imwrite(os.getcwd() + "/output-images/" + filename, saved_img)
            with open('yolo.csv', 'a') as writeFile:
                writer = csv.writer(writeFile)
                writer.writerow([frameNum, csvString])
                frameNum = frameNum + 1
                csvString = ""
                writeFile.close()

            print("Finished processing image " + str(frameNum) + "/" +
                  str(len(fileDir)))

        skip = False
    print("Making video")
    img_array = []
    for index in range(0, len(os.listdir(os.getcwd() + "/output-images/"))):
        print("sorted file name : " + str(img))
        img = cv2.imread(os.getcwd() + "/output-images/" + str(index) + ".jpg",
                         cv2.IMREAD_COLOR)
        height, width, layers = img.shape
        size = (width, height)
        img_array.append(img)

    print(len(img_array))
    out = cv2.VideoWriter(
        os.getcwd() + "/" + videoPath.split("/")[-1].split(".")[0] +
        "-yolo.avi", cv2.VideoWriter_fourcc(*'DIVX'), 15, size)

    for i in range(len(img_array)):
        out.write(img_array[i])

    out.release()
    return json.dumps({
        'csvpath':
        os.getcwd() + "/yolo.csv",
        'videopath':
        os.getcwd() + "/" + videoPath.split("/")[-1].split(".")[0] +
        "-yolo.avi"
    })
コード例 #15
0
import numpy as np
import sys
sys.path.append("..")

from darkflow.darkflow.net.build import TFNet
import cv2

options = {
    "model": "cfg/yolo-kitti.cfg",
    "load": -1,  #"weights/yolo.weights",
    "batch": 7,
    "epoch": 30,
    "gpu": 0.8,
    "train": True,
    "lr": 1e-7,
    "annotation": "../VOC2012/Annotations/",
    "dataset": "../VOC2012/JPEGImages/"
}

tfnet = TFNet(options)

tfnet.train()
コード例 #16
0
ファイル: tracky.py プロジェクト: lzane/tracky
from SiamFC.SiameseTracker import SiameseTracker
from darkflow.darkflow.net.build import TFNet

cap = cv2.VideoCapture(sys.argv[1])
# cap = cv2.VideoCapture(0)

options = {
    "model": "./darkflow/cfg/tiny-yolo-voc.cfg",
    "load": "./darkflow/tiny-yolo-voc.weights",
    "threshold": 0.3
}
# options = {"model": "./cfg/yolo.cfg", "load": "./yolo.weights", "threshold": 0.3}
tracker_type = 'KCF'

tfnet = TFNet(options)
siamFC = SiameseTracker(
    debug=0,
    checkpoint=
    'SiamFC/Logs/SiamFC/track_model_checkpoints/SiamFC-3s-color-pretrained')
is_target_exist = False
cnt = 0
time_per_frame = 0


def get_tracker(tracker_type):
    if tracker_type == 'BOOSTING':
        tracker = cv2.TrackerBoosting_create()
    if tracker_type == 'MIL':
        tracker = cv2.TrackerMIL_create()
    if tracker_type == 'KCF':
コード例 #17
0
ファイル: run.py プロジェクト: grh1cob/Deepsense
    'demo': "darkflow/sample_vid/multiple.mp4",
    'model': 'darkflow/cfg/yolov2-voc-2c.cfg',
    'load': 14300,
    'threshold': 0.4,
    'gpu': 0.8,
    'display': True,
    'track': True,
    'trackObj': ['drone', 'bird'],
    'saveVideo': False,
    'BK_MOG': False,
    'tracker': 'deep_sort',
    'skip': 0,
    'csv': False
}
buff = deque(maxlen=20)
tfnet = TFNet(FLAGS)
# from piyush import Connect
file = FLAGS["demo"]
SaveVideo = FLAGS["saveVideo"]
# w = Connect()
if FLAGS["track"]:
    if FLAGS["tracker"] == "deep_sort":
        from deep_sort import generate_detections
        from deep_sort.deep_sort import nn_matching
        from deep_sort.deep_sort.tracker import Tracker
        metric = nn_matching.NearestNeighborDistanceMetric("cosine", 0.2, 100)
        tracker = Tracker(metric)
        encoder = generate_detections.create_box_encoder(
            os.path.abspath(
                "deep_sort/resources/networks/mars-small128.ckpt-68577"))
    elif FLAGS["tracker"] == "sort":