Example #1
0
def unlock():
    if request.method == 'POST':
        if 'file' not in request.files:
            return jsonify({"message": "'No selected file'"})
        file = request.files['file']

        if file.filename == '':
            return jsonify({"message": "'No selected file'"})
        if file and allowed_file(file.filename):
            file.save(os.path.join(UPLOAD_FOLDER, TEST_FILE))
            nn4_small2_pretrained = create_model()
            nn4_small2_pretrained.load_weights(
                os.path.join(ROOT, 'weights/nn4.small2.v1.h5'))
            img1 = load_image(os.path.join(UPLOAD_FOLDER, ANCHOR_FILE))
            img2 = load_image(os.path.join(UPLOAD_FOLDER, TEST_FILE))
            img1 = align_image(img1)
            img2 = align_image(img2)
            embed1 = nn4_small2_pretrained.predict(np.expand_dims(img1,
                                                                  axis=0))[0]
            embed2 = nn4_small2_pretrained.predict(np.expand_dims(img2,
                                                                  axis=0))[0]
            dist = distance(embed1, embed2)
            if dist < THRESHOLD:
                return jsonify({"message": "You have successfully logged in"})
            else:
                return jsonify({
                    "message":
                    "Sorry, Your face doesn't match with our database"
                })
Example #2
0
def high_speed(args):
    # model1 is 1920x1080
    config1, config2 = load_config(args)
    dataset_type1 = config1.get('dataset', 'type')
    detection_thresh1 = config1.getfloat('predict', 'detection_thresh')
    min_num_keypoints1 = config1.getint('predict', 'min_num_keypoints')
    model1 = create_model(args.model1, config1)

    # model2 is 224x224
    dataset_type2 = config2.get('dataset', 'type')
    detection_thresh2 = config2.getfloat('predict', 'detection_thresh')
    min_num_keypoints2 = config2.getint('predict', 'min_num_keypoints')
    model2 = create_model(args.model2, config2)
    model2.to_gpu(2)  # TODO GET DEVICE RIGHT

    if os.path.exists('mask.png'):
        mask = Image.open('mask.png')
        mask = mask.resize((200, 200))
    else:
        mask = None

    # cap = cv2.VideoCapture(0) # get input from usb camera
    cap = cv2.VideoCapture("/home/fabian/Documents/dataset/videos/test4.mp4")
    if cap.isOpened() is False:
        print('Error opening video stream or file')
        exit(1)

    cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
    cap.set(cv2.CAP_PROP_FPS, 60)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
    logger.info('camera will capture {} FPS'.format(cap.get(cv2.CAP_PROP_FPS)))

    capture = Capture(cap)
    # predictor1 = Predictor1(model=model1, cap=capture)
    # predictor2 = Predictor2(model=model2, cap=capture)

    capture.start()
    # predictor1.start()
    # predictor2.start()

    while True:
        pass
Example #3
0
def video(args):
    config = load_config(args)
    model = create_model(args, config)

    cap = cv2.VideoCapture(0)
    # cap = cv2.VideoCapture("/home/fabian/Documents/dataset/videos/test4.mp4")
    if cap.isOpened() is False:
        print('Error opening video stream or file')
        exit(1)

    logger.info('camera will capture {} FPS'.format(cap.get(cv2.CAP_PROP_FPS)))
    if os.path.exists('mask.png'):
        mask = Image.open('mask.png')
        mask = mask.resize((200, 200))
    else:
        mask = None

    fps_time = 0
    degree = 0
    detection_thresh = config.getfloat('predict', 'detection_thresh')
    min_num_keypoints = config.getint('predict', 'min_num_keypoints')
    while cap.isOpened():
        degree += 5
        degree = degree % 360
        ret_val, image = cap.read()
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = cv2.resize(image, model.insize)
        with chainer.using_config('autotune', True):
            humans = estimate(model,
                              image.transpose(2, 0, 1).astype(np.float32),
                              detection_thresh, min_num_keypoints)
        pilImg = Image.fromarray(image)
        pilImg = draw_humans(
            model.keypoint_names,
            model.edges,
            pilImg,
            humans,
            mask=mask.rotate(degree) if mask else None,
            visbbox=config.getboolean('predict', 'visbbox'),
        )
        img_with_humans = cv2.cvtColor(np.asarray(pilImg), cv2.COLOR_RGB2BGR)
        msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF'
        msg += ' ' + config.get('model_param', 'model_name')
        cv2.putText(img_with_humans,
                    'FPS: % f' % (1.0 / (time.time() - fps_time)), (10, 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        # img_with_humans = cv2.resize(img_with_humans, (3 * model.insize[0], 3 * model.insize[1]))
        img_with_humans = cv2.resize(
            img_with_humans, (1 * model.insize[0], 1 * model.insize[1]))
        cv2.imshow('Pose Proposal Network' + msg, img_with_humans)
        fps_time = time.time()
        # press Esc to exit
        if cv2.waitKey(1) == 27:
            break
Example #4
0
def main():
    config = configparser.ConfigParser()
    config.read('config.ini', 'UTF-8')

    # load model
    model = create_model(config)

    cap = cv2.VideoCapture(0)
    if cap.isOpened() is False:
        print('Error opening video stream or file')
        exit(1)

    logger.info('camera will capture {} FPS'.format(cap.get(cv2.CAP_PROP_FPS)))
    if os.path.exists('mask.png'):
        mask = Image.open('mask.png')
        mask = mask.resize((200, 200))
    else:
        mask = None

    fps_time = 0
    degree = 0
    while cap.isOpened():
        degree += 5
        degree = degree % 360
        ret_val, image = cap.read()
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = cv2.resize(image, model.insize)
        with chainer.using_config('autotune', True):
            humans = estimate(model,
                              image.transpose(2, 0, 1).astype(np.float32))
        pilImg = Image.fromarray(image)
        pilImg = draw_humans(
            model.keypoint_names,
            model.edges,
            pilImg,
            humans,
            mask=mask.rotate(degree) if mask else None
        )
        img_with_humans = cv2.cvtColor(np.asarray(pilImg), cv2.COLOR_RGB2BGR)
        msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF'
        msg += ' ' + config.get('model_param', 'model_name')
        cv2.putText(img_with_humans, 'FPS: % f' % (1.0 / (time.time() - fps_time)),
                    (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        img_with_humans = cv2.resize(img_with_humans, (3 * model.insize[0], 3 * model.insize[1]))
        cv2.imshow('Pose Proposal Network' + msg, img_with_humans)
        fps_time = time.time()
        # press Esc to exit
        if cv2.waitKey(1) == 27:
            break
Example #5
0
def high_speed(args):
    config = load_config(args)
    dataset_type = config.get('dataset', 'type')
    detection_thresh = config.getfloat('predict', 'detection_thresh')
    min_num_keypoints = config.getint('predict', 'min_num_keypoints')
    model = create_model(args, config)

    if os.path.exists('mask.png'):
        mask = Image.open('mask.png')
        mask = mask.resize((200, 200))
    else:
        mask = None
		

    capture = Capture(model.insize)
    predictor = Predictor(model=model, cap=capture)

    capture.start()
    predictor.start()

    fps_time = 0
    degree = 0

    main_event = threading.Event()

    try:
        while not main_event.is_set():
            degree += 5
            degree = degree % 360
            try:
                image, feature_map = predictor.get()
                humans = get_humans_by_feature(
                    model,
                    feature_map,
                    detection_thresh,
                    min_num_keypoints
                )
            except queue.Empty:
                continue
            except Exception:
                break
            pilImg = Image.fromarray(image)
            pilImg = draw_humans(
                model.keypoint_names,
                model.edges,
                pilImg,
                humans,
                mask=mask.rotate(degree) if mask else None,
                visbbox=config.getboolean('predict', 'visbbox'),
            )
            img_with_humans = cv2.cvtColor(np.asarray(pilImg), cv2.COLOR_RGB2BGR)
            msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF'
            msg += ' ' + config.get('model_param', 'model_name')
            cv2.putText(img_with_humans, 'FPS: %f' % (1.0 / (time.time() - fps_time)),
                        (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            img_with_humans = cv2.resize(img_with_humans, (3 * model.insize[0], 3 * model.insize[1]))
            cv2.imshow('Pose Proposal Network' + msg, img_with_humans)
            fps_time = time.time()
            # press Esc to exit
            if cv2.waitKey(1) == 27:
                main_event.set()
    except Exception as e:
        print(e)
    except KeyboardInterrupt:
        main_event.set()

    capture.stop()
    predictor.stop()

    capture.join()
    predictor.join()
Example #6
0
import numpy as np
from PIL import ImageDraw, Image

from predict import COLOR_MAP
from predict import estimate, draw_humans, create_model
from utils import parse_size

import matplotlib
matplotlib.use('Agg')

if __name__ == '__main__':

    config = configparser.ConfigParser()
    config.read('config.ini', 'UTF-8')

    model = create_model(config)

    def video_handle(video_file, video_output):
        global FPS_list
        # Video reader
        cam = cv2.VideoCapture(video_file)
        input_fps = cam.get(cv2.CAP_PROP_FPS)
        ret_val, input_image = cam.read()
        video_length = int(cam.get(cv2.CAP_PROP_FRAME_COUNT))

        ending_frame = video_length

        # Video writer
        frame_rate_ratio = 1
        output_fps = input_fps / frame_rate_ratio
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
Example #7
0
def high_speed(args):
    config = load_config(args)
    dataset_type = config.get('dataset', 'type')
    detection_thresh = config.getfloat('predict', 'detection_thresh')
    min_num_keypoints = config.getint('predict', 'min_num_keypoints')
    model = create_model(args, config)

    if os.path.exists('mask.png'):
        mask = Image.open('mask.png')
        mask = mask.resize((200, 200))
    else:
        mask = None

    cap = cv2.VideoCapture(0)  # get input from usb camera
    # cap = cv2.VideoCapture("/home/fabian/Documents/dataset/videos/test4.mp4")
    if cap.isOpened() is False:
        print('Error opening video stream or file')
        exit(1)

    cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
    cap.set(cv2.CAP_PROP_FPS, 60)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
    logger.info('camera will capture {} FPS'.format(cap.get(cv2.CAP_PROP_FPS)))

    capture = Capture(cap, model.insize)
    predictor = Predictor(model=model, cap=capture)

    capture.start()
    predictor.start()

    fps_time = 0
    degree = 0

    main_event = threading.Event()

    try:
        while not main_event.is_set() and cap.isOpened():
            degree += 5
            degree = degree % 360
            try:
                image, feature_map = predictor.get()
                humans = get_humans_by_feature(model, feature_map,
                                               detection_thresh,
                                               min_num_keypoints)
            except queue.Empty:
                continue
            except Exception:
                break
            pilImg = Image.fromarray(image)
            pilImg = draw_humans(
                model.keypoint_names,
                model.edges,
                pilImg,
                humans,
                mask=mask.rotate(degree) if mask else None,
                visbbox=config.getboolean('predict', 'visbbox'),
            )
            img_with_humans = cv2.cvtColor(np.asarray(pilImg),
                                           cv2.COLOR_RGB2BGR)
            msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF'
            msg += ' ' + config.get('model_param', 'model_name')
            cv2.putText(img_with_humans,
                        'FPS: %f' % (1.0 / (time.time() - fps_time)), (10, 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            img_with_humans = cv2.resize(
                img_with_humans, (3 * model.insize[0], 3 * model.insize[1]))
            cv2.imshow('Pose Proposal Network' + msg, img_with_humans)
            fps_time = time.time()
            # press Esc to exit
            if cv2.waitKey(1) == 27:
                main_event.set()
    except Exception as e:
        print(e)
    except KeyboardInterrupt:
        main_event.set()

    capture.stop()
    predictor.stop()

    capture.join()
    predictor.join()
Example #8
0
    def run(self):
        model = create_model(self.modelargs, self.config)
        logger.info('{} started at PID {} - 224x224 model loaded'.format(self.name, self.pid))
        if chainer.backends.cuda.available:
            model = model.to_gpu(1)
        self.model = model
        self.insize = (224, 224)
        self.pipe_end.send(True)  # model loaded sign
        count = 0
        run = False
        if self.pipe_end.recv():
            logger.info("start running 224x224")
            run = True

        # run the first forward path to get model auto tune right
        # than wait for bigger model to complete first forward path
        image, count = self.queue_in.get(timeout=1)
        image = cv2.resize(image, self.insize)
        with chainer.using_config('autotune', True), \
             chainer.using_config('use_ideep', 'auto'):
            feature_map = get_feature(self.model, image.transpose(2, 0, 1).astype(np.float32))
        # self.queue.put((image, feature_map), timeout=1)
        self.queue.put((feature_map), timeout=1)

        if self.pipe_end.recv() == 2:
            pass

        while not self.stop_event.is_set():
            try:
                if run:
                    t_start = time.time()
                    # image, count = self.cap.get(2)
                    image, count = self.queue_in.get(timeout=1)
                    self.queue_get_time += time.time()-t_start
                    try:
                        humans = self.queue_comm.get(timeout=1)
                        # if humans: print(humans)
                        cropped_image_set = self.cut_human(image, humans)
                    except queue.Empty:
                        logger.info('humans queue empty')
                        cropped_image_set = self.random_crop(image, 20)

                    feature_map = self.model.predict_video(cropped_image_set)

                    # logger.info('get img from queue took {} sec'.format(time.time()-t_start))
                    # print('pred2 getting from cap:'+str(count)+'\n')
                    image = cv2.resize(image, self.insize)
                    # print(cropped_image_set)
                    t_start = time.time()
                    with chainer.using_config('autotune', True), \
                         chainer.using_config('use_ideep', 'auto'):
                        feature_map = get_feature(self.model, image.transpose(2, 0, 1).astype(np.float32))
                    if not self.queue.empty():
                        self.inf_time = time.time() - t_start
                    # self.queue.put((image, feature_map), timeout=1)
                    self.queue.put((feature_map), timeout=1) # maybe not needed to be a queue, just internal storage of process
                    #logger.debug("pred2 queue: {}".format(self.queue.qsize()))

                else:
                    logger.info("waiting for other model to load....")
                    if self.pipe_end.recv() == 'stop':
                        print("STOP received via pipe")
                    if self.queue_in.qsize() == 0 and self.queue.qsize() > 0:
                        self.pipe_end.send('stop')
                        self.stop()

            except queue.Full:
                logger.info("queue full")
                pass
            except queue.Empty:
                logger.info("queue empty")
                if self.queue.qsize() > 0:  # self.pipe_end.recv() == 'stop':
                    self.pipe_end.send('stop')
                    self.stop()
                else:
                    pass
            except cv2.error:
                logger.info("CV2 error")
                logger.info('{} exiting'.format(self.name))
                self.pipe_end.send('stop')
                time.sleep(1)
                self.stop()
            except KeyboardInterrupt:
                self.pipe_end.send('stop')
                self.stop()
            except:
                print("Unexpected error:", sys.exc_info()[0])
                self.pipe_end.send('stop')
                time.sleep(1)
                self.stop()
                raise
Example #9
0
    def run(self):
        model = create_model(self.modelargs, self.config)
        logger.info('{} started at PID {} - 1920x1080 model loaded'.format(self.name, self.pid))
        if chainer.backends.cuda.available:
            model = model.to_gpu(0)
        self.model = model
        self.insize = (1920, 1080)
        self.pipe_end.send(True)  # model loaded sign
        count = 0
        run = False
        if self.pipe_end.recv():
            logger.info("start running 1920x1080")
            run = True

        while not self.stop_event.is_set():
            try:
                if run and not self.pipe_end.poll():
                    t_start = time.time()
                    # image, count = self.cap.get(1)
                    image, count = self.queue_in.get(timeout=1)
                    self.queue_get_time += time.time() - t_start
                    # logger.info('get img from queue took {} sec'.format(time.time()-t_start))

                    # print('pred1 getting from cap:'+str(count)+'\n')
                    image = cv2.resize(image, self.insize)
                    t_start = time.time()
                    with chainer.using_config('autotune', True), \
                         chainer.using_config('use_ideep', 'auto'):
                        feature_map = get_feature(self.model, image.transpose(2, 0, 1).astype(np.float32))
                    if not self.queue.empty():
                        self.inf_time += time.time() - t_start
                    # self.queue.put((image, feature_map), timeout=1)
                    self.queue.put((feature_map), timeout=1)
                    humans = get_humans_by_feature(model, feature_map, self.detection_threshold, self.min_num_keypoints)
                    if len(humans) > 0:
                        self.queue_comm.put(humans, timeout=1)
                    # cropped_image_set = self.cut_human(image, humans)
                    # logger.debug("pred1 queue {}: ".format(self.queue.qsize()))
                    if self.queue.qsize() == 1: self.pipe_end.send(2)  # sign that big model passed first forward path

                    ## BLOCK ON PORPUSE ##
                    block = False
                    while block:
                        time.sleep(2)
                        pass
                    ######
                else:
                    # logger.info("waiting for other model to load....")
                    if self.pipe_end.recv() == 'stop':
                        logger.info("STOP received via pipe")
                        self.stop()
                    if self.queue_in.qsize()==0 and self.queue.qsize()>0:
                        self.pipe_end.send('stop')
                        self.stop()
            except queue.Full:
                logger.info("queue full")
                pass
            except queue.Empty:
                logger.info("queue empty")
                if self.queue.qsize() > 0:
                    self.stop()
                else:
                    pass
            except cv2.error:
                logger.info("CV2 error")
                self.pipe_end.send('stop')
                time.sleep(1)
                self.stop()
            except KeyboardInterrupt:
                self.pipe_end.send('stop')
                self.stop()
            except:
                print("Unexpected error:", sys.exc_info()[0])
                self.pipe_end.send('stop')
                time.sleep(1)
                self.stop()
                raise
def load_network(weights_path, device):
    net = create_model(num_classes=6)
    net.load_state_dict(torch.load(weights_path, map_location=device)["model"])
    return net
def high_speed(args):
    # model1 is 1920x1080
    config1, config2 = load_config(args)
    dataset_type1 = config1.get('dataset', 'type')
    detection_thresh1 = config1.getfloat('predict', 'detection_thresh')
    min_num_keypoints1 = config1.getint('predict', 'min_num_keypoints')
    model1 = create_model(args.model1, config1)

    # model2 is 224x224
    dataset_type2 = config2.get('dataset', 'type')
    detection_thresh2 = config2.getfloat('predict', 'detection_thresh')
    min_num_keypoints2 = config2.getint('predict', 'min_num_keypoints')
    model2 = create_model(args.model2, config2)
    model2.to_gpu(2)  # TODO GET DEVICE RIGHT

    if os.path.exists('mask.png'):
        mask = Image.open('mask.png')
        mask = mask.resize((200, 200))
    else:
        mask = None

    # cap = cv2.VideoCapture(0) # get input from usb camera
    cap = cv2.VideoCapture("/home/fabian/Documents/dataset/videos/test4.mp4")
    if cap.isOpened() is False:
        print('Error opening video stream or file')
        exit(1)

    cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
    cap.set(cv2.CAP_PROP_FPS, 60)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
    logger.info('camera will capture {} FPS'.format(cap.get(cv2.CAP_PROP_FPS)))

    capture = Capture(cap)
    predictor1 = Predictor1(model=model1, cap=capture)
    predictor2 = Predictor2(model=model2, cap=capture)

    capture.start()
    predictor1.start()
    predictor2.start()

    while True:
        image, feature_map = predictor1.get()
        image2, feature_map2 = predictor2.get()

    # fps_time = 0
    # degree = 0
    #
    # main_event = threading.Event()
    #
    # try:
    #     while not main_event.is_set() and cap.isOpened():
    #         degree += 5
    #         degree = degree % 360
    #         try:
    #             image, feature_map = predictor1.get()
    #             image2, feature_map2 = predictor2.get()
    #             humans = get_humans_by_feature(
    #                 model1,
    #                 feature_map,
    #                 detection_thresh1,
    #                 min_num_keypoints1
    #             )
    #         except queue.Empty:
    #             continue
    #         except Exception:
    #             break
    #         pilImg = Image.fromarray(image)
    #         pilImg = draw_humans(
    #             model1.keypoint_names,
    #             model1.edges,
    #             pilImg,
    #             humans,
    #             mask=mask.rotate(degree) if mask else None,
    #             visbbox=config1.getboolean('predict', 'visbbox'),
    #         )
    #         img_with_humans = cv2.cvtColor(np.asarray(pilImg), cv2.COLOR_RGB2BGR)
    #         msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF'
    #         msg += ' ' + config1.get('model_param', 'model_name')
    #         cv2.putText(img_with_humans, 'FPS: %f' % (1.0 / (time.time() - fps_time)),
    #                     (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
    #         img_with_humans = cv2.resize(img_with_humans, (int(1/3 * model1.insize[0]), int(1/3 * model1.insize[1])))
    #         cv2.imshow('Pose Proposal Network' + msg, img_with_humans)
    #         fps_time = time.time()
    #         # press Esc to exit
    #         if cv2.waitKey(1) == 27:
    #             main_event.set()
    # except Exception as e:
    #     print(e)
    # except KeyboardInterrupt:
    #     main_event.set()

    capture.stop()
    predictor1.stop()
    predictor2.stop()

    capture.join()
    predictor1.join()
    predictor2.join()
Example #12
0
def high_speed(args, viewer):
    config = load_config(args)
    dataset_type = config.get('dataset', 'type')
    detection_thresh = config.getfloat('predict', 'detection_thresh')
    min_num_keypoints = config.getint('predict', 'min_num_keypoints')
    model = create_model(args, config)
    svo_file_path = None  #"/home/adujardin/Downloads/5m.svo" #config.get('zed', 'svo_file_path')

    init_cap_params = sl.InitParameters()
    if svo_file_path:
        print("Loading SVO file " + svo_file_path)
        init_cap_params.svo_input_filename = svo_file_path
        init_cap_params.svo_real_time_mode = True
    init_cap_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720
    init_cap_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_ULTRA
    init_cap_params.coordinate_units = sl.UNIT.UNIT_METER
    init_cap_params.depth_stabilization = True
    init_cap_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP

    cap = sl.Camera()
    if not cap.is_opened():
        print("Opening ZED Camera...")
    status = cap.open(init_cap_params)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    py_transform = sl.Transform()
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    cap.enable_tracking(tracking_parameters)

    capture = Capture(cap, model.insize)
    predictor = Predictor(model=model, cap=capture)

    capture.start()
    predictor.start()

    fps_time = 0
    main_event = threading.Event()
    viewer.edges = model.edges

    try:
        while not main_event.is_set() and cap.is_opened():
            try:
                image, feature_map, depth = predictor.get()
                humans = get_humans_by_feature(model, feature_map,
                                               detection_thresh,
                                               min_num_keypoints)
                humans_3d = get_humans3d(humans, depth, model)
            except queue.Empty:
                continue
            except Exception as e:
                print(e)
                break
            pilImg = Image.fromarray(image)
            pilImg = draw_humans(
                model.keypoint_names,
                model.edges,
                pilImg,
                humans,
                None,
                visbbox=config.getboolean('predict', 'visbbox'),
            )
            img_with_humans = cv2.cvtColor(np.asarray(pilImg),
                                           cv2.COLOR_RGB2BGR)
            img_with_humans = cv2.resize(
                img_with_humans,
                (700, 400))  #(3 * model.insize[0], 3 * model.insize[1]))
            msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF'
            msg += ' ' + config.get('model_param', 'model_name')
            fps_display = 'FPS: %f' % (1.0 / (time.time() - fps_time))
            str_to_dsplay = msg + " " + fps_display
            cv2.putText(img_with_humans, fps_display, (10, 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.imshow('Pose Proposal Network' + msg, img_with_humans)

            viewer.update_text(str_to_dsplay)
            viewer.update_humans(humans_3d)

            fps_time = time.time()
            key = cv2.waitKey(1)

            # press Esc to exit
            if key == 27:
                exit
                main_event.set()
    except Exception as e:
        print(e)
    except KeyboardInterrupt:
        main_event.set()

    capture.stop()
    predictor.stop()

    capture.join()
    predictor.join()
    cap.close()