Ejemplo n.º 1
0
def main(mode, tiny, iou_threshold, confidence_threshold, path):
  class_names, n_classes = load_class_names()
  if tiny:
    model = YOLOv3_tiny(n_classes=n_classes,
                        iou_threshold=iou_threshold,
                        confidence_threshold=confidence_threshold)
  else:
    model = YOLOv3(n_classes=n_classes,
                  iou_threshold=iou_threshold,
                  confidence_threshold=confidence_threshold)
  inputs = tf.placeholder(tf.float32, [1, *model.input_size, 3])
  detections = model(inputs)
  saver = tf.train.Saver(tf.global_variables(scope=model.scope))

  with tf.Session() as sess:
    saver.restore(sess, './weights/model-tiny.ckpt' if tiny else './weights/model.ckpt')

    if mode == 'image':
      image = load_image(path, input_size=model.input_size)
      result = sess.run(detections, feed_dict={inputs: image})
      draw_boxes(path, boxes_dict=result[0], class_names=class_names, input_size=model.input_size)
      return

    elif mode == 'video':
      cv2.namedWindow("Detections")
      video = cv2.VideoCapture(path)
      fourcc = int(video.get(cv2.CAP_PROP_FOURCC))
      fps = video.get(cv2.CAP_PROP_FPS)
      frame_size = (int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)))
      out = cv2.VideoWriter('./detections/video_output.mp4', fourcc, fps, frame_size)
      print("Video being saved at \"" + './detections/video_output.mp4' + "\"")
      print("Press 'q' to quit")
      while True:
        retval, frame = video.read()
        if not retval:
          break
        resized_frame = cv2.resize(frame, dsize=tuple((x) for x in model.input_size[::-1]), interpolation=cv2.INTER_NEAREST)
        result = sess.run(detections, feed_dict={inputs: [resized_frame]})
        draw_boxes_frame(frame, frame_size, result, class_names, model.input_size)
        cv2.imshow("Detections", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            break
        out.write(frame)
      cv2.destroyAllWindows()
      video.release()
      return

    elif mode == 'webcam':
      while True:
        frame, addr = receive()
        frame_size = (frame.shape[1], frame.shape[0])
        resized_frame = cv2.resize(frame, dsize=tuple((x) for x in model.input_size[::-1]), interpolation=cv2.INTER_NEAREST)
        result = sess.run(detections, feed_dict={inputs: [resized_frame]})
        draw_boxes_frame(frame, frame_size, result, class_names, model.input_size)
        jpgstring = cv2.imencode(".jpg", frame)
        packet = jpgstring[1].tostring()
        udpServSock.sendto(packet, addr)
      return
Ejemplo n.º 2
0
 def __init__(self, iou_threshold, confidence_threshold):
     tf.reset_default_graph()
     class_names, n_classes = load_class_names()
     self.model = YOLOv3(n_classes=n_classes,
                         iou_threshold=iou_threshold,
                         confidence_threshold=confidence_threshold)
     self.inputs = tf.placeholder(
         tf.float32, [1, *self.model.input_size, 3])
     self.detections = self.model(self.inputs)
     saver = tf.train.Saver(tf.global_variables(scope=self.model.scope))
     self.sess = tf.Session()
     saver.restore(self.sess, './weights/model.ckpt')
Ejemplo n.º 3
0
 def __init__(self, iou_threshold, confidence_threshold):
     self.class_names, self.n_classes = load_class_names()
     self.model = YOLOv3(n_classes=self.n_classes,
                         iou_threshold=iou_threshold,
                         confidence_threshold=confidence_threshold)
     self.inputs = tf.placeholder(tf.float32,
                                  [1, *self.model.input_size, 3])
     self.detections = self.model(self.inputs)
     self.saver = tf.train.Saver(
         tf.global_variables(scope=self.model.scope))
     config = tf.ConfigProto()
     config.gpu_options.allow_growth = True
     self.sess = tf.Session(config=config)
     self.saver.restore(self.sess, './weights/model.ckpt')
def main(mode, tiny, iou_threshold, confidence_threshold, path):
    rectangleColor = (0, 255, 0)
    frameCounter = 0
    currentCarID = 0
    fps = 0

    carTracker = {}
    carNumbers = {}
    carLocation1 = {}
    carLocation2 = {}
    speed = [None] * 1000

    class_names, n_classes = load_class_names()
    if tiny:
        model = YOLOv3_tiny(n_classes=n_classes,
                            iou_threshold=iou_threshold,
                            confidence_threshold=confidence_threshold)
    else:
        model = YOLOv3(n_classes=n_classes,
                       iou_threshold=iou_threshold,
                       confidence_threshold=confidence_threshold)
    inputs = tf.placeholder(tf.float32, [1, *model.input_size, 3])
    detections = model(inputs)
    saver = tf.train.Saver(tf.global_variables(scope=model.scope))

    with tf.Session() as sess:
        saver.restore(
            sess, './weights/model-tiny.ckpt' if tiny else './weights/model.ckpt')

        if mode == 'image':
            image = load_image(path, input_size=model.input_size)
            result = sess.run(detections, feed_dict={inputs: image})
            draw_boxes(
                path, boxes_dict=result[0], class_names=class_names, input_size=model.input_size)
            return

        cv2.namedWindow("Detections")
        
        
        print("Video being saved at \"" + './detections/video_output.mp4' + "\"")
        print("Press 'q' to quit")
        pathIn = './data/PETS_2000_Frames/'
        files = [f for f in os.listdir(pathIn)]
        files.sort(key= lambda x: int(x.split('.')[0].split('_')[1]))
        frame=cv2.imread(pathIn+files[0])
        frame_size = frame.shape[:2][::-1]
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        
        out = cv2.VideoWriter(
            './detections/video_output.mp4', fourcc, 20, frame_size)
        for i in range(len(files)):
            frame = cv2.imread(pathIn+files[i])
            resized_frame = cv2.resize(frame, dsize=tuple(
                (x) for x in model.input_size[::-1]), interpolation=cv2.INTER_NEAREST)
            result = sess.run(detections, feed_dict={inputs: [resized_frame]})
            draw_boxes_frame(frame, frame_size, result,
                             class_names, model.input_size)
            start_time = time.time()
              # rc, image = video.read()
            if type(frame) == type(None):
              break

            frame = cv2.resize(frame, frame_size)
            resultImage = frame

            frameCounter = frameCounter + 1

            carIDtoDelete = []

            for carID in carTracker.keys():
                trackingQuality = carTracker[carID].update(frame)

                if trackingQuality < 7:
                    carIDtoDelete.append(carID)

            for carID in carIDtoDelete:
                print('Removing carID ' + str(carID) + \
                      ' from list of trackers.')
                print('Removing carID ' + str(carID) + ' previous location.')
                print('Removing carID ' + str(carID) + ' current location.')
                carTracker.pop(carID, None)
                carLocation1.pop(carID, None)
                carLocation2.pop(carID, None)

            if not (frameCounter % 10):
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                cars = carCascade.detectMultiScale(
                    gray, 1.1, 13, 18, (24, 24))

                for (_x, _y, _w, _h) in cars:
                    x = int(_x)
                    y = int(_y)
                    w = int(_w)
                    h = int(_h)

                    x_bar = x + 0.5 * w
                    y_bar = y + 0.5 * h

                    matchCarID = None

                    for carID in carTracker.keys():
                        trackedPosition = carTracker[carID].get_position()

                        t_x = int(trackedPosition.left())
                        t_y = int(trackedPosition.top())
                        t_w = int(trackedPosition.width())
                        t_h = int(trackedPosition.height())

                        t_x_bar = t_x + 0.5 * t_w
                        t_y_bar = t_y + 0.5 * t_h

                        if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <= (t_y + t_h)) and (x <= t_x_bar <= (x + w)) and (y <= t_y_bar <= (y + h))):
                            matchCarID = carID

                    if matchCarID is None:
                        print('Creating new tracker ' + str(currentCarID))

                        tracker = dlib.correlation_tracker()
                        tracker.start_track(
                            frame, dlib.rectangle(x, y, x + w, y + h))

                        carTracker[currentCarID] = tracker
                        carLocation1[currentCarID] = [x, y, w, h]

                        currentCarID = currentCarID + 1

            for carID in carTracker.keys():
                trackedPosition = carTracker[carID].get_position()
                t_x = int(trackedPosition.left())
                t_y = int(trackedPosition.top())
                t_w = int(trackedPosition.width())
                t_h = int(trackedPosition.height())
                carLocation2[carID] = [t_x, t_y, t_w, t_h]

            end_time = time.time()

            if not (end_time == start_time):
                # print("Reached at 168") 
                fps = 1.0/(end_time - start_time)
            for i in carLocation1.keys():
                if frameCounter % 1 == 0:
                    [x1, y1, w1, h1] = carLocation1[i]
                    [x2, y2, w2, h2] = carLocation2[i]

                    # print 'previous location: ' + str(carLocation1[i]) + ', current location: ' + str(carLocation2[i])
                    carLocation1[i] = [x2, y2, w2, h2]
                    # print("Reached at 177")

                    # print 'new previous location: ' + str(carLocation1[i])
                    if [x1, y1, w1, h1] != [x2, y2, w2, h2]:
                        # print("Reached at 181") 
                        if (speed[i] == None or speed[i] == 0):
                            speed[i] = estimateSpeed(
                                [x1, y1, w1, h1], [x2, y2, w2, h2])

                        # if y1 > 275 and y1 < 285:
                        if speed[i] != None:
                            print(str(int(speed[i])) + " km/hr")
                            cv2.putText(resultImage, str(int(speed[i])) + " km/hr", (int(x1 + w1/2), int(y1-5)), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)
            cv2.imshow("Detections", resultImage)
            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                break
            out.write(resultImage)
        cv2.destroyAllWindows()
Ejemplo n.º 5
0
def detect_dataset(iou_threshold, confidence_threshold, checkpoint_fn):

    t = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
    detection_dir = './detections/' + t + '/'
    if not os.path.exists(detection_dir):
        os.mkdir(detection_dir)
    test_detection_dir = detection_dir + 'test/'
    baseline_detection_dir = detection_dir + 'baseline/'
    if not os.path.exists(test_detection_dir):
        os.mkdir(test_detection_dir)
    if not os.path.exists(baseline_detection_dir):
        os.mkdir(baseline_detection_dir)

    class_names = utils.load_class_names(_CLASS_NAMES_FILE)
    n_classes = len(class_names)

    inputs = tf.placeholder(dtype=tf.float32, shape=[None, *_MODEL_SIZE, 3])
    #mask_placeholders = utils.build_mask_placeholders(_OUTPUT_SIZE)

    label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox')
    label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
    label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')
    true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
    true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
    true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')

    model = Yolo_v3(
        inputs=inputs,
        #mask_placeholders=mask_placeholders,
        trainable=False,
        n_classes=n_classes,
        model_size=_MODEL_SIZE)

    obj_conf_loss, no_obj_conf_loss, obj_class_loss, \
        obj_loc_loss = \
        model.compute_loss(label_sbbox, label_mbbox, label_lbbox,\
                           true_sbboxes, true_mbboxes, true_lbboxes)

    loss = obj_conf_loss + no_obj_conf_loss + obj_class_loss + obj_loc_loss

    #model.eval()

    with tf.name_scope('ema'):
        ema_obj = tf.train.ExponentialMovingAverage(0.9995)
    #saver = tf.train.Saver(tf.global_variables(scope='yolo_v3_model'))

    #saver = tf.compat.v1.train.Saver(ema_obj.variables_to_restore())
    #saver = tf.train.Saver(tf.global_variables(scope='yolo_v3_model'))
    #saver.restore(sess, checkpoint_fn)

    batches = Dataset('test')
    batch_size = batches.batch_size

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        i = 0
        for batch in batches:
            if i >= 50:
                break

            saver = tf.train.Saver(tf.global_variables(scope='yolo_v3_model'))
            saver.restore(sess, checkpoint_fn)

            batch_img, label_boxes, boxes, img_paths = batch
            model.eval(batch_size, _MAX_OUTPUT_SIZE, iou_threshold,
                       confidence_threshold)

            #feed_dict = utils.construct_feed_dict(mask_placeholders, *noobj_masks)
            feed_dict = dict()
            feed_dict.update({
                inputs: batch_img,
                label_sbbox: label_boxes[0],
                label_mbbox: label_boxes[1],
                label_lbbox: label_boxes[2],
                true_sbboxes: boxes[0],
                true_mbboxes: boxes[1],
                true_lbboxes: boxes[2]
            })
            boxes_dicts, detect_loss = sess.run([model.boxes_dicts, loss],
                                                feed_dict=feed_dict)

            recall, precision = eval_precision_recall(batch_size, boxes_dicts,
                                                      label_boxes, n_classes)
            print("Batch: {0}; Loss: {1:.2f}; Precision: {2:.2f}; Recall: {3:.2f}"\
                    .format(i, detect_loss, precision, recall))
            utils.draw_boxes_new(img_paths,
                                 boxes_dicts,
                                 class_names,
                                 _MODEL_SIZE,
                                 test_detection_dir,
                                 shape='e')

            ############################### Bar Between Test and Baseline solutions ######################

            saver = tf.train.Saver(tf.global_variables(scope='yolo_v3_model'))
            saver.restore(sess, _MODEL_CKPT)

            model.eval(batch_size, _MAX_OUTPUT_SIZE, iou_threshold,
                       confidence_threshold)

            baseline_boxes_dicts, baseline_detect_loss = sess.run(
                [model.boxes_dicts, loss], feed_dict=feed_dict)

            bl_recall, bl_precision = eval_precision_recall(batch_size, baseline_boxes_dicts,\
                                     label_boxes, n_classes)
            print("Batch: {0}; Baseline Loss: {1:.2f}; Precision: {2:.2f}; Recall: {3:.2f}"\
                    .format(i, baseline_detect_loss, bl_precision, bl_recall))
            #print("Batch: {0}; Baseline Loss: {1:.2f}".format(i, baseline_detect_loss))
            utils.draw_boxes_new(img_paths,
                                 baseline_boxes_dicts,
                                 class_names,
                                 _MODEL_SIZE,
                                 baseline_detection_dir,
                                 shape='e')

            i += 1

    print('Detections have been saved successfully.')
Ejemplo n.º 6
0
def detect_video(iou_threshold, confidence_threshold, checkpoint_fn, video_fn):

    t = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
    detection_dir = './detections/' + t + '/'
    if not os.path.exists(detection_dir):
        os.mkdir(detection_dir)
    filename = os.path.basename(video_fn)

    class_names = utils.load_class_names(_CLASS_NAMES_FILE)
    n_classes = len(class_names)

    inputs = tf.placeholder(dtype=tf.float32, shape=[1, *_MODEL_SIZE, 3])

    model = Yolo_v3(
        inputs=inputs,
        #mask_placeholders=mask_placeholders,
        trainable=False,
        n_classes=n_classes,
        model_size=_MODEL_SIZE)
    model.eval(1, _MAX_OUTPUT_SIZE, iou_threshold, confidence_threshold)
    with tf.name_scope('ema'):
        ema_obj = tf.train.ExponentialMovingAverage(0.9995)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:

        saver = tf.train.Saver(tf.global_variables(scope='yolo_v3_model'))
        saver.restore(sess, checkpoint_fn)
        #model.eval(batch_size, _MAX_OUTPUT_SIZE, iou_threshold, confidence_threshold)

        ###########################################

        win_name = 'Video detection'
        cv2.namedWindow(win_name)
        cap = cv2.VideoCapture(video_fn)
        frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                      cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        #fourcc = cv2.VideoWriter_fourcc(*'X264')
        fourcc = cv2.VideoWriter_fourcc(*"mp4v")
        fps = cap.get(cv2.CAP_PROP_FPS)
        out = cv2.VideoWriter(detection_dir + 'detection_' + filename, fourcc,
                              fps, (int(frame_size[0]), int(frame_size[1])))

        try:
            while True:
                ret, frame = cap.read()
                if not ret:
                    break
                resized_frame = cv2.resize(frame,
                                           dsize=_MODEL_SIZE[::-1],
                                           interpolation=cv2.INTER_NEAREST)
                resized_frame = resized_frame / 255.
                detection_result = sess.run(
                    model.boxes_dicts, feed_dict={inputs: [resized_frame]})

                utils.draw_frame_new(frame,
                                     frame_size,
                                     detection_result,
                                     class_names,
                                     _MODEL_SIZE,
                                     shape='e')

                cv2.imshow(win_name, frame)

                key = cv2.waitKey(1) & 0xFF

                if key == ord('q'):
                    break

                out.write(frame)
        finally:
            cv2.destroyAllWindows()
            cap.release()
            print('Detections have been saved successfully.')
Ejemplo n.º 7
0
def detect_images(iou_threshold, confidence_threshold, checkpoint_fn,
                  select_fn):
    with open(select_fn, 'r') as f:
        txt = f.readlines()
        img_fns = [line.strip() for line in txt]

    if len(img_fns) == 0:
        raise KeyError("No input images to detect.")

    t = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
    detection_dir = './detections/' + t + '/'
    if not os.path.exists(detection_dir):
        os.mkdir(detection_dir)
    test_detection_dir = detection_dir + 'test/'
    baseline_detection_dir = detection_dir + 'baseline/'
    if not os.path.exists(test_detection_dir):
        os.mkdir(test_detection_dir)
    if not os.path.exists(baseline_detection_dir):
        os.mkdir(baseline_detection_dir)

    class_names = utils.load_class_names(_CLASS_NAMES_FILE)
    n_classes = len(class_names)

    inputs = tf.placeholder(dtype=tf.float32, shape=[None, *_MODEL_SIZE, 3])

    model = Yolo_v3(
        inputs=inputs,
        #mask_placeholders=mask_placeholders,
        trainable=False,
        n_classes=n_classes,
        model_size=_MODEL_SIZE)

    with tf.name_scope('ema'):
        ema_obj = tf.train.ExponentialMovingAverage(0.9995)
    #saver = tf.train.Saver(tf.global_variables(scope='yolo_v3_model'))

    #saver = tf.compat.v1.train.Saver(ema_obj.variables_to_restore())
    #saver = tf.train.Saver(tf.global_variables(scope='yolo_v3_model'))
    #saver.restore(sess, checkpoint_fn)

    batch_size = len(img_fns)
    batch = utils.load_images(img_fns, model_size=_MODEL_SIZE)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:

        saver = tf.train.Saver(tf.global_variables(scope='yolo_v3_model'))
        saver.restore(sess, checkpoint_fn)

        model.eval(batch_size, _MAX_OUTPUT_SIZE, iou_threshold,
                   confidence_threshold)

        feed_dict = {inputs: batch}

        boxes_dicts = sess.run(model.boxes_dicts, feed_dict=feed_dict)

        utils.draw_boxes_new(img_fns,
                             boxes_dicts,
                             class_names,
                             _MODEL_SIZE,
                             test_detection_dir,
                             shape='e')

        ############################### Bar Between Test and Baseline solutions ######################

        saver = tf.train.Saver(tf.global_variables(scope='yolo_v3_model'))
        saver.restore(sess, _MODEL_CKPT)

        model.eval(batch_size, _MAX_OUTPUT_SIZE, iou_threshold,
                   confidence_threshold)

        feed_dict = {inputs: batch}
        baseline_boxes_dicts = sess.run(model.boxes_dicts, feed_dict=feed_dict)

        utils.draw_boxes_new(img_fns,
                             baseline_boxes_dicts,
                             class_names,
                             _MODEL_SIZE,
                             baseline_detection_dir,
                             shape='e')

    print('Detections have been saved successfully.')