Exemple #1
0
def video_img():
    detector = Detector()
    vc=cv2.VideoCapture(r'F:\video\mv.mp4')
    c=1
    if vc.isOpened():
        rval,frame=vc.read()
    else:
        rval=False
    while rval:
        rval,frame=vc.read()
        im = Image.fromarray(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))


        boxes = detector.detect(im)

        for box in boxes:
            x1 = int(box[0])
            y1 = int(box[1])
            x2 = int(box[2])
            y2 = int(box[3])
            _y2 = int(y2 * 0.8+y1*0.2)
            cv2.rectangle(frame,(x1, y1), (x2, _y2), (0,225,0),2)
        cv2.imshow('img',frame)
        # cv2.imwrite(r'F:\video\img\img'+str(c)+'.jpg',frame)
        c=c+1
        cv2.waitKey(1)
    vc.release()
Exemple #2
0
def main():
    parser = ap.ArgumentParser(
        description=
        "Arguments to use in data collection and Arduino communication.")
    parser.add_argument("port", help="The serial port file to use.")
    args = parser.parse_args()

    serial_port = args.port
    if not os.path.exists(serial_port):
        print("Error: file '{}' does not exist or is not a file.".format(
            serial_port))
        exit(1)

    # William's test driver code, but with the serial port as a command line arg.
    # ctl = ac(serial_port)

    # need this delay for arduino initialization
    time.sleep(3)

    print('Starting detection...')
    detector = Detector(serial_port)
    while True:
        try:
            print('writing: ' + str(detector.sample()))
        except Exception as e:
            print(e)
            print('Breaking')
            detector.close()
            break
Exemple #3
0
def run(datafile, data_dir):
    def download(url):
        try:
            with urlopen(url) as response:
                if response.status == 200:
                    data = np.asarray(bytearray(response.read()),
                                      dtype=np.uint8)
                    return cv2.imdecode(data, cv2.IMREAD_COLOR)
        except HTTPError as e:
            print(f'{url}: {e}')
        except (ConnectionError, OSError) as e:
            print(f'{url}: {e}')
            time.sleep(0.1)
        except Exception as e:
            print(f'{url}: {e}')
            time.sleep(0.5)
        return None

    detector = Detector()
    with open(datafile, 'r') as fp:
        r = csv.reader(fp, delimiter='\t')
        for row in r:
            photo_url = row[1]
            print(photo_url)
            img = download(photo_url)
            if img is None:
                continue
            result = detector.detect(img)
            if result is None:
                continue

            basename = os.path.basename(photo_url)
            path0 = f'{ord(basename[0]):02x}'
            path1 = f'{ord(basename[1]):02x}'
            path2 = f'{ord(basename[2]):02x}'
            outdir = os.path.join(data_dir, path0, path1, path2)
            os.makedirs(outdir, exist_ok=True)

            faceimg = result['image']
            del result['image']
            created_at = datetime.fromtimestamp(
                time.mktime(time.strptime(row[3],
                                          '%a %b %d %H:%M:%S +0000 %Y')))
            result['meta'] = {
                'photo_id': row[0],
                'photo_url': row[1],
                'source_url': row[2],
                'published_at': created_at.isoformat(),
                'label_id': row[4],
                'label_name': row[5],
            }

            name = os.path.splitext(basename)[0]
            cv2.imwrite(os.path.join(outdir, f'{name}.jpg'), faceimg,
                        [cv2.IMWRITE_JPEG_QUALITY, 100])
            with open(os.path.join(outdir, f'{name}.json'), 'w') as fp:
                json.dump(result, fp, ensure_ascii=False)
 def __init__(self, opt):
     super(mywindow, self).__init__()
     self.setupUi(self)
     self.setFixedSize(self.width(), self.height())
     self.opt = opt
     self.detector = Detector(cfg=opt.cfg,
                              data_cfg=opt.data_cfg,
                              weights=opt.weights,
                              img_size=opt.img_size,
                              conf_thres=opt.conf_thres,
                              nms_thres=opt.nms_thres)
Exemple #5
0
def main():
    args = parseopt()
    dump_flow(args.movie)
    flow, header = pick_flow(args.movie)
    detector = Detector(gpu=args.gpu)
    draw_box_flow_func = \
        lambda movie, flow: draw_box_flow(movie, flow, detector)
    if args.nobbox:
        vis_flow(args.movie, flow)
    else:
        vis_flow(args.movie, flow, draw=draw_box_flow_func)
Exemple #6
0
def main_func(path):
    """
    Detect 3rd-party libraries in app.
    The detection result is printed.
    :param path: The path of target app.
    :return: None.
    """
    print "--Decoding--"
    detector = Detector()
    decoded_path = detector.get_smali(path)
    detector.get_hash(decoded_path)
Exemple #7
0
    def __init__(self):
        handlers = [
            (r"/config", ConfigHandler),
            (r"/voice", VoiceHandler),
            (r"/detect", HttpDetectHandler),
        ]
        settings = dict(
            debug=options.dev,
            static_path=os.path.join(
                os.path.dirname(os.path.abspath(__file__)), "static"),
            template_path=os.path.join(
                os.path.dirname(os.path.abspath(__file__)), "templates"),
        )
        super(Application, self).__init__(handlers, **settings)

        self.data = None
        self.color_map = collections.defaultdict(lambda: (random.randint(
            0, 255), random.randint(0, 255), random.randint(0, 255)))
        for i in range(1, 21):
            self.color_map[str(i)] = [
                random.randint(0, 255),
                random.randint(0, 255),
                random.randint(0, 255)
            ]

        if options.model.find('det300') >= 0:
            self.model_det300 = Detector('conf/retina_net.conf')
        if options.model.find('det600') >= 0:
            self.model_det600 = Detector('conf/retina_net_600.conf')
        if options.model.find('mask600') >= 0:
            self.model_mask600 = MaskRetinaNet('conf/mask_600.conf')
        if options.model.find('seg512') >= 0:
            self.model_seg512 = Segmentor('conf/cloth_320.conf', rotate=False)
            self.model_det300 = Detector('conf/retina_net.conf')

        self.model_fat = FatDetector()
        self.model_multi_task = MultiTaskDetector()
        logging.info("initialize done")
Exemple #8
0
    def __init__(self, minutes):
        queue = Queue(maxsize=100)
        self.queue = queue
        self.recorder = Recorder()
        self.minutes = minutes
        self.consoleAudioHandler = ConsoleAudioHandler(self.queue,
                                                       config["tmpPath"])

        t = Thread(target=self.worker)
        t.start()

        provider = ConsoleAudioProvider(self.queue)

        detector = Detector(provider)
        detector.detect()
Exemple #9
0
def run(data_dir):
    detector = Detector()
    with open('data.tsv', 'r') as fp:
        r = csv.reader(fp, delimiter='\t')
        next(r)
        for row in r:
            photo_url = row[3]
            basename = os.path.basename(photo_url)

            path0 = f'{ord(basename[0]):02x}'
            path1 = f'{ord(basename[1]):02x}'
            path2 = f'{ord(basename[2]):02x}'
            filepath = os.path.join(data_dir, 'images', path0, path1, path2,
                                    basename)
            print(f'processing {filepath} ...')
            try:
                result = detector.detect(cv2.imread(filepath))
                if result is None:
                    print('detection failed.')
                    continue

                outdir = os.path.join(data_dir, 'results', path0, path1, path2)
                os.makedirs(outdir, exist_ok=True)
                img = result['image']
                del result['image']
                result['meta'] = {
                    'face_id': row[0],
                    'photo_id': row[1],
                    'source_url': row[2],
                    'photo_url': row[3],
                    'posted_at': row[4],
                    'label_id': row[5],
                    'label_name': row[6],
                }
                name = os.path.splitext(basename)[0]
                cv2.imwrite(os.path.join(outdir, f'{name}.png'), img)
                with open(os.path.join(outdir, f'{name}.json'), 'w') as fp:
                    json.dump(result, fp, ensure_ascii=False)
            except:
                print(f'error!!: {filepath}')

            time.sleep(1)
Exemple #10
0
def track(video_path, use_gpu=False):
    video = cv2.VideoCapture(video_path)
    ret, frame = video.read()
    if ret:
        frame = cv2.resize(frame, (input_width, input_height))

    if use_gpu:
        caffe.set_mode_gpu()

    tracker = Sort(max_age=10)
    detector = Detector()
    classes = detector.get_classes()

    while ret:
        frame_disp = np.copy(frame)
        bounding_boxes, counting = detector.infer(frame)
        class_counting = zip(classes, counting)

        for pair in class_counting:
            print('{:s} {:03d}'.format(*pair))
        print('')

        if len(bounding_boxes) > 0:
            bounding_boxes = np.array(bounding_boxes, np.int32)

            # convert (x, y, w, h) to (x1, y1, x2, y2)
            bounding_boxes[:, 2:4] += bounding_boxes[:, 0:2]
            bounding_boxes[:, 2:4] -= 1

        track_results = tracker.update(bounding_boxes)
        draw_tracking_results(track_results, frame_disp)

        cv2.imshow('tracking', frame_disp)

        key = cv2.waitKey(1)
        if key == 27:
            return

        ret, frame = video.read()
        if ret:
            frame = cv2.resize(frame, (input_width, input_height))
Exemple #11
0
def result_video():
    if request.method == 'POST':
        if 'file' not in request.files:
            flash('No file part')
            return redirect(request.url)
        file = request.files['file']
        if file.filename == '':
            flash('No selected file')
            return redirect(request.url)
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            filename = os.path.join(UPLOAD_FOLDER, filename)
            file.save(filename)
            model = Detector(filename, 1)
            model.run_detector()
            model.generate_plots()
            model.write_video()

            return render_template('result_video.html')
            #return redirect(url_for('uploaded_file',filename=filename))
    return render_template('upload_video.html')
Exemple #12
0
        if loc is None:
            loc = Locator(frame)
            # loc.locate_tube()
            # loc.locate_window()
            # loc.locate_scale()
            loc.tube = [(0, 271, 33, 76), (0, 271, 78, 115)]
            loc.window = [[(47, 80, 43, 61), (82, 115, 42, 58),
                           (117, 151, 39, 58), (153, 186, 41, 61),
                           (188, 220, 43, 61)],
                          [(2, 35, 88, 108), (35, 69, 85, 105),
                           (72, 105, 84, 105), (108, 142, 84, 104),
                           (145, 178, 84, 104), (179, 213, 87, 108),
                           (214, 246, 89, 109)]]
            loc.scale = [[110, 199], [102, 199]]

            tube0 = Detector(loc.tube[0], loc.window[0], loc.scale[0], 210, 50)
            tube1 = Detector(loc.tube[1], loc.window[1], loc.scale[1], 160, 33)
        tube0.feed(frame)
        tube1.feed(frame)
        if not count % 20:
            tube0.update_backgrounds()
            tube1.update_backgrounds()

        level = int(tube0.cur_level)
        cv2.line(frame, (45, level), (60, level), (0, 0, 255), 1)
        level = int(tube1.cur_level)
        cv2.line(frame, (90, level), (105, level), (0, 0, 255), 1)
        text0 = 'level0: ' + str(round(tube0.level_scale))
        text1 = 'level1: ' + str(round(tube1.level_scale))
        cv2.putText(frame, text0, (5, 13), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.6,
                    (0, 0, 255), 1)
Exemple #13
0
    parser.add_argument("--reidpath", type=str, default='reidLib', help="outputdir")

    opt = parser.parse_args()
    
    videoname = os.path.join(opt.indir,opt.videoin)
    output_dir = opt.outdir
    work_dir = opt.workdir
    reid_dir = opt.reiddir if opt.reiddir != "" else None
    make_folder(output_dir)
    clean_folder(work_dir)
    clean_folder(reid_dir)
    
    sys.path.append(opt.yolopath)
    from detect import Detector
    
    detector = Detector(conf_thres=opt.detconf, yolo_path=opt.yolopath, resize_size=opt.imgsize, output_dir=work_dir)
    from reid import Reid
    
    reid = Reid(save_path=reid_dir, threshold=opt.threshold, verbose=False)
    
    videoframe = cv2.VideoCapture(videoname)
    framenr = 0
    
    if not videoframe.isOpened():
        print("Error opening video stream or file")
    
    while videoframe.isOpened():
        ret, img = videoframe.read()
        
        if not ret:  # reached end of video
            break
    def __init__(self):
        # Create the olympe.Drone object from its IP address
        self.drone = olympe.Drone(DRONE_IP)
        # subscribe to flight listener
        listener = FlightListener(self.drone)
        listener.subscribe()
        self.last_frame = np.zeros((1, 1, 3), np.uint8)
        self.frame_queue = queue.Queue()
        self.flush_queue_lock = threading.Lock()
        self.detector = Detector()
        self.keypoints_image = np.zeros((1, 1, 3), np.uint8)
        self.keypoints = deque(maxlen=5)
        self.faces = deque(maxlen=10)
        self.f = open("distances.csv", "w")
        self.face_distances = deque(maxlen=10)

        self.image_width = 1280
        self.image_height = 720
        self.half_face_detection_size = 150

        self.poses_model = load("models/posesmodel.joblib")
        self.pose_predictions = deque(maxlen=5)

        self.pause_finding_condition = threading.Condition(threading.Lock())
        self.pause_finding_condition.acquire()
        self.pause_finding = True
        self.person_thread = threading.Thread(target=self.fly_to_person)
        self.person_thread.start()

        # flight parameters in meter
        self.flight_height = 0.0
        self.max_height = 1.0
        self.min_dist = 1.5

        # keypoint map
        self.nose = 0
        self.left_eye = 1
        self.right_eye = 2
        self.left_ear = 3
        self.right_ear = 4
        self.left_shoulder = 5
        self.right_shoulder = 6
        self.left_elbow = 7
        self.right_elbow = 8
        self.left_wrist = 9
        self.right_wrist = 10
        self.left_hip = 11
        self.right_hip = 12
        self.left_knee = 13
        self.right_knee = 14
        self.left_ankle = 15
        self.right_ankle = 16

        # person distance
        self.eye_dist = 0.0

        # save images
        self.save_image = False
        self.image_counter = 243
        self.pose_file = open("poses.csv", "w")
        super().__init__()
        super().start()
 def __init__(self):
     self.detector = Detector()
import logging

import yaml

from detect import Detector

config_dir = "./"
# Load configuration.
config_file_path = config_dir + 'config.yml'
config = yaml.safe_load(open(config_file_path, 'r'))

# start_detect(config)

if __name__ == "__main__":
    print(
        "====================================FCD Started===================================="
    )
    LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
    logging.basicConfig(filename='logs.log',
                        level=logging.DEBUG,
                        format=LOG_FORMAT)
    detector = Detector(config)
    detector.start_detect()
Exemple #17
0
 def __init__(self, **kwargs):
     super(CalcGridLayout, self).__init__(**kwargs)
     Window.bind(on_dropfile=self._on_file_drop)
     self.detector = Detector(self.weights, self.filePath)
     self.ids.model_label.text = self.weights_description[0]
Exemple #18
0
    def work(self, im):
        super(HandCropper, self).work(im)
        self.thresholdGrey(150)
        self.findContours()
        self.filterSudoku()  # minAreaRect旋转矩形, 筛选出九个九宫格块的轮廓
        self.alignPlank()  # 透视变换, 对齐并切割出大板(包含整个九宫格和七段管数字板)
        self.splitSudokuLight()

        self.threshold_im = self.threshold_sudoku_im
        self.findRecContours()
        self.filterRecContoursByArea(9)  # boundingRect, 筛选出九宫格块
        self.rec_contours = self.resumeOrder(self.rec_contours[:9],
                                             ykey=lambda it: it[1],
                                             xkey=lambda it: it[0])
        self.cropRecContours(self.rec_contours,
                             base_im=self.threshold_sudoku_im)

        if SAVE:
            self.saveCropResults()
            self.saveCropResults(results=[self.threshold_light_im])

        return self.results


if __name__ == '__main__':
    # d = Detector(LightCropper(), None)
    # d.work('../test_im/52648.jpg')

    d = Detector(HandCropper(), None)
    d.work('../test_im/real1.jpg')
Exemple #19
0
from detect import Detector
from imgUtils import ImgUtils

if __name__ == "__main__":
    param = {
        (1, 'postbake'): {
            'expectedWidth': 120,
            'expectedHeight': 110,  # detection params
            'transformationTarget':
            'cool',  # select correct image transformations
            'upperKillzone': 550,
            'lowerKillzone': 220,  # select correct tracking parameters
            'rightKillzone': 3000,
            'leftKillzone': -3000,  # select correct tracking parameters
            'timeToDie': 1,
            'timeToLive': 0,
            'partioningRequired': True
        }
    }

    D = Detector(**param.get((1, 'postbake')))

    for i in range(49, 100):
        img = cv2.imread("PostBakeSamples/postbake" + str(i) + ".png")
        x = D.getImgWithBoxes(img)
        while True:
            ImgUtils.show("x", x, 0, 0)
            key = cv2.waitKey(30)
            if key == ord('q'):
                break
Exemple #20
0
import time

#infFile = "/home/leo/qj/object_detection/data/socr/inference_frcnn/frozen_inference_graph.pb"
#mapFile = "/home/leo/qj/object_detection/data/socr/pascal_label_map.pbtxt"
infFile = "/home/leo/qj/object_detection/data/socr/inference_ssd/frozen_inference_graph.pb"
mapFile = "/home/leo/qj/object_detection/data/socr/pascal_label_map.pbtxt"
#infFile = "/home/leo/qj/object_detection/data/Index/inference_ssd/frozen_inference_graph.pb"
#mapFile = "/home/leo/qj/object_detection/data/Index/pascal_label_map.pbtxt"
imFiles = [
    "/home/leo/qj/object_detection/data/origin/socr_20180410/JPEGImages/im0505.jpg",
    "/home/leo/qj/object_detection/data/origin/socr_20180410/JPEGImages/im0506.jpg",
    "/home/leo/qj/object_detection/data/origin/socr_20180410/JPEGImages/im0507.jpg",
    "/home/leo/qj/object_detection/data/origin/socr_20180410/JPEGImages/im0508.jpg"
]

detector = Detector('socr', infFile, mapFile)

for imFile in imFiles:
    img_np = np.asarray(Image.open(imFile))
    img_np.setflags(write=1)
    s = time.time()
    (image_np, tag_boxes, tag_scores, tag_classes,
     tag_num) = detector.detect(img_np, visualize=False)
    e = time.time()

    print 'detecting:', imFile
    print 'detect time is:', (e - s)
    print 'tag_num:', tag_num
    for c in tag_classes:
        if tag_scores[c] >= 0.8:
            print 'tag:(%s,%f)', c, tag_scores[c]
import argparse
import platform

from detect import Detector

if __name__ == '__main__':
  parser = argparse.ArgumentParser(description='Start for face recognition client')
  parser.add_argument('--ip', help='Ip of recognition server')
  parser.add_argument('--port', help='Port of recognition server')

  args = parser.parse_args()

  detector = Detector(args.ip, args.port)

  if platform.system() == 'Windows' or platform.system() == 'Darwin':
    import win_recorder
    win_recorder.record_forever(detector)
  else:  
    import pi_recorder
    pi_recorder.record_forever(detector)
def detect():
    data = request.get_data(cache=False, as_text=False, parse_form_data=False)

    provider = BasicAudioProvider(data)
    detector = Detector(provider)
    return detector.classify()
Exemple #23
0
"""
detector/app.py

Flask-based API webserver to handle inference requests for object detection.

Callum Morrison, 2021
"""

import cv2
import numpy as np
from flask import Flask, Response, abort, jsonify, request

from detect import Detector

app = Flask(__name__)
nn = Detector()


@app.route('/')
def index():
    return 'Please use the API at the /api endpoint'


@app.route('/api/detect', methods=['POST'])
def detect():
    r = request

    array = np.frombuffer(r.data, np.uint8)
    img = cv2.imdecode(array, cv2.IMREAD_COLOR)

    data = nn.bounding_box(img)
def worker():
    provider = ServerAudioProvider(queue)
    detector = Detector(provider)
    detector.detect()
Exemple #25
0
from detect import Detector
import cv2

detector = Detector(resolution=320)
loop = True

detect_path = 'imgs/img1.jpg'

path, objects, image = detector.detect_objects(detect_path)
cv2.imwrite('detected_image.jpg', image)
image_np = cv2.imread('detected_image.jpg')
cv2.imshow('images', image_np)
cv2.waitKey()
Exemple #26
0
                min = distance
                f1 = f
        print("Face recognised", str(f1[:-4]))
        cv2.rectangle(img1, (0, img1.shape[0]), (img1.shape[1], 0),
                      (0, 255, 0), 3)
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(img1, str(f1[:-4]), (30, img1.shape[0] - 20), font, 1,
                    (255, 255, 255), 2, cv2.LINE_AA)
        cv2.imwrite("result.jpg", img1)

        # return [1,str(f[:-4])]

        # return [0,""]


obj2 = Detector()
# obj2.detection()
obj1 = Recognition()
obj1.initialise("")
while True:
    t1 = threading.Thread(target=obj2.detection())
    t2 = threading.Thread(target=obj1.printResults(""))
    t1.start()
    t2.start()
    t1.join()
    t2.join()
# while True:
#     obj1=Recognition()

#     t1.start()
#     t1.join()
import cv2
from detect import Detector
from utils import load_config


def initialize_cameras():
    cam_1 = cv2.VideoCapture(
        f"rtsp://{CAM_USERNAME_1}:{CAM_PASSWORD_1}@{CAM_IP_1}:554/cam/realmonitor?channel=1@subtype=1"
    )
    return cam_1


config = load_config('configuration.yml')
CAM_IP_1 = config['CAMERA']['IP']['_1']
CAM_USERNAME_1 = config['CAMERA']['USERNAME']['_1']
CAM_PASSWORD_1 = config['CAMERA']['PASSWORD']['_1']

cam_1 = initialize_cameras()
net = Detector()
Exemple #28
0
def main():
    yolo = Detector()
    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    # deep_sort
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    writeVideo_flag = True

    video_capture = cv2.VideoCapture("town.avi")

    if writeVideo_flag:
        # Define the codec and create VideoWriter object
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('output.avi', fourcc, 15, (w, h))
        list_file = open('detection.txt', 'w')
        frame_index = -1

    fps = 0.0
    maxframe = 1
    nframe = 0
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break
        t1 = time.time()
        detections = []
        # image = Image.fromarray(frame)
        nframe += 1
        if (nframe >= maxframe):
            boxs, obj = yolo.detect(frame)
            print(len(boxs))

            # print("box_num",len(boxs))
            features = encoder(frame, boxs)

            # score to 1.0 here).
            detections = [
                Detection(bbox, 1.0, feature)
                for bbox, feature in zip(boxs, features)
            ]

            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            indices = preprocessing.non_max_suppression(
                boxes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]
            tracker.predict()
            tracker.update(detections)
            # Call the tracker
            nframe = 0

        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            bbox = track.to_tlbr()

            # # s=recognize(img)
            # try:
            #     if(len(s)>len(track.track_lpn)):
            #         track.track_lpn=s
            # except Exception as e:
            #     print(e)

            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            cv2.putText(frame, str(track.track_id),
                        (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 200,
                        (0, 255, 0), 2)

        # for det in detections:
        #     bbox = det.to_tlbr()
        #     cv2.rectangle(frame,(int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,0,0), 2)

        cv2.imshow('', frame)

        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index) + ' ')

        fps = (fps + (1. / (time.time() - t1))) / 2
        print("fps= %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()