Beispiel #1
0
    def update(self):
        """ This function will update the photo according to the
            current values of blur and brightness and set it to photo label.
        """
        img = self.changeBrightness(self.image, self.brightness_value_now)
        img = self.changeBlur(img, self.blur_value_now)

        # Here we add display text to the image
        text = 'FPS: ' + str(self.fps)
        img = ps.putBText(img,
                          text,
                          text_offset_x=20,
                          text_offset_y=30,
                          vspace=20,
                          hspace=10,
                          font_scale=1.0,
                          background_RGB=(10, 20, 222),
                          text_RGB=(255, 255, 255))
        text = str(time.strftime("%H:%M %p"))
        img = ps.putBText(img,
                          text,
                          text_offset_x=self.image.shape[1] - 180,
                          text_offset_y=30,
                          vspace=20,
                          hspace=10,
                          font_scale=1.0,
                          background_RGB=(228, 20, 222),
                          text_RGB=(255, 255, 255))
        text = f"Brightness: {self.brightness_value_now}"
        img = ps.putBText(img,
                          text,
                          text_offset_x=80,
                          text_offset_y=425,
                          vspace=20,
                          hspace=10,
                          font_scale=1.0,
                          background_RGB=(20, 210, 4),
                          text_RGB=(255, 255, 255))
        text = f'Blur: {self.blur_value_now}: '
        img = ps.putBText(img,
                          text,
                          text_offset_x=self.image.shape[1] - 200,
                          text_offset_y=425,
                          vspace=20,
                          hspace=10,
                          font_scale=1.0,
                          background_RGB=(210, 20, 4),
                          text_RGB=(255, 255, 255))

        self.setPhoto(img)
def show_client(addr,client_socket):
	try:
		print('CLIENT {} CONNECTED!'.format(addr))
		if client_socket: # if a client socket exists
			data = b""
			payload_size = struct.calcsize("Q")
			while True:
				while len(data) < payload_size:
					packet = client_socket.recv(4*1024) # 4K
					if not packet: break
					data+=packet
				packed_msg_size = data[:payload_size]
				data = data[payload_size:]
				msg_size = struct.unpack("Q",packed_msg_size)[0]
				
				while len(data) < msg_size:
					data += client_socket.recv(4*1024)
				frame_data = data[:msg_size]
				data  = data[msg_size:]
				frame = pickle.loads(frame_data)
				text  =  f"CLIENT: {addr}"
				frame =  ps.putBText(frame,text,10,10,vspace=10,hspace=1,font_scale=0.7, background_RGB=(255,0,0),text_RGB=(255,250,250))
				cv2.imshow(f"FROM {addr}",frame)
				key = cv2.waitKey(1) & 0xFF
				if key  == ord('q'):
					break
			client_socket.close()
	except Exception as e:
		print(f"SERVER {addr} DISCONNECTED")
		pass
Beispiel #3
0
    def update(self):
        """
            This function will update the image (WebCam input) and add some indicators on it
            """

        img = self.image

        # Here we add display text to the image
        text = str(time.strftime("%H:%M %p"))
        img = ps.putBText(img,
                          text,
                          text_offset_x=self.image.shape[1] - 180,
                          text_offset_y=30,
                          vspace=20,
                          hspace=10,
                          font_scale=1.0,
                          background_RGB=(252, 213, 96),
                          text_RGB=(255, 255, 255))
        text = str(self.prediction)
        if self.prediction == "SAFE":
            img = ps.putBText(img,
                              text,
                              text_offset_x=20,
                              text_offset_y=30,
                              vspace=20,
                              hspace=10,
                              font_scale=1.0,
                              background_RGB=(0, 128, 0),
                              text_RGB=(255, 255, 255))
        elif self.prediction == "NOT SAFE":
            img = ps.putBText(img,
                              text,
                              text_offset_x=20,
                              text_offset_y=30,
                              vspace=20,
                              hspace=10,
                              font_scale=1.0,
                              background_RGB=(255, 0, 0),
                              text_RGB=(255, 255, 255))
            if self.soundOn == True:
                if time.time() - self.lastplayed > 5 and self.isThereAFace:
                    self.lastplayed = time.time()
                    threading.Thread(target=self.play).start()
        img = cv2.resize(img, (711, 501))
        threading.Thread(target=self.setPhoto(img)).start(
        )  # Calling the function to set the real-time image of WebCam in CameraPlace
Beispiel #4
0
def pyshine_process(params):
    print("Parameters:",params)
    """Video streaming generator function."""
    cap = cv2.VideoCapture("https://demo.bahien.com/live/live/playlist.m3u8",cv2.CAP_V4L)
    #cap = cv2.VideoCapture("rtsp://rtmp.bahien.com:1935/live/live")
    print('FUNCTION DONE')
    # Read until video is completed
    fps=0
    st=0
    frames_to_count=20
    cnt=0

    print(cap.isOpened())
    while cap.isOpened():

        ret, img = cap.read()
        if ret == True:
            START_TIME = time.time()
            if cnt == frames_to_count:
                try: # To avoid divide by 0 we put it in try except
                    fps = round(frames_to_count/(time.time()-st))
                    st = time.time()
                    cnt=0
                except:
                    pass
            
            cnt = cnt + 1
            img, cropface = detection(img)
            img = imutils.resize(img, width=1280)

            text  =  'FPS: '+str(fps)
            img = ps.putBText(img,text,text_offset_x=20,text_offset_y=30,background_RGB=(10,20,222))
            frame = cv2.imencode('.JPEG', img,[cv2.IMWRITE_JPEG_QUALITY,100])[1].tobytes()
            # time.sleep(0.016)
            print("time cost: ", time.time() - START_TIME)
            yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
        else:
            pass
Beispiel #5
0
fps = 0
st = 0
frames_to_count = 20
cnt = 0
while True:
    if cnt == frames_to_count:
        try:

            fps = round(frames_to_count / (time.time() - st))
            st = time.time()
            cnt = 0
        except:
            pass
    cnt += 1
    frame = client_socket.recv()
    img = base64.b64decode(frame)
    npimg = np.fromstring(img, dtype=np.uint8)
    source = cv2.imdecode(npimg, 1)
    text = 'FPS: ' + str(fps)
    source = ps.putBText(source,
                         text,
                         text_offset_x=20,
                         text_offset_y=30,
                         background_RGB=(10, 20, 222))
    time.sleep(0.01)
    cv2.imshow("client image", source)
    key = cv2.waitKey(1) & 0xFF
    if key == ord('q'):
        break
cv2.destroyAllWindows()
Beispiel #6
0
def prep_display_for_video(dets_out,
                           img,
                           h=None,
                           w=None,
                           save_folder=None,
                           undo_transform=True,
                           class_color=False,
                           mask_alpha=0.45,
                           fps_str='',
                           override_args: Config = None):
    if undo_transform:
        assert w is not None and h is not None, "with undo_transform=True, w,h params must be specified!"
        img_numpy = undo_image_transformation(img, w, h)
        img_gpu = torch.Tensor(img_numpy).cuda()
    else:
        img_gpu = img / 255.0
        h, w, _ = img.shape

    img_numpy_ori = (img_gpu * 255).byte().cpu().numpy()

    global args
    if override_args is not None:
        args = override_args

    with timer.env('Postprocess'):
        save = cfg.rescore_bbox
        cfg.rescore_bbox = True
        t = postprocess(dets_out,
                        w,
                        h,
                        visualize_lincomb=args.display_lincomb,
                        crop_masks=args.crop,
                        score_threshold=args.score_threshold)
        cfg.rescore_bbox = save

    with timer.env('Copy'):
        idx = t[1].argsort(0, descending=True)[:args.top_k]

        if cfg.eval_mask_branch:
            masks = t[3][idx]
        classes, scores, boxes = [x[idx] for x in t[:3]]

    num_dets_to_consider = min(args.top_k, classes.shape[0])
    for j in range(num_dets_to_consider):
        if scores[j] < args.score_threshold:
            num_dets_to_consider = j
            break

    def get_color(j, on_gpu=None):
        global color_cache
        color_idx = (classes[j] if class_color else j) % len(COLORS)

        if on_gpu is not None and color_idx in color_cache[on_gpu]:
            return color_cache[on_gpu][color_idx]
        else:
            color = COLORS[color_idx]
            if not undo_transform:
                color = (color[2], color[1], color[0])
            if on_gpu is not None:
                color = torch.Tensor(color).to(on_gpu).float() / 255.
                color_cache[on_gpu][color_idx] = color
            return color

    global frame_compare

    if args.display_masks and cfg.eval_mask_branch and num_dets_to_consider > 0:
        if frame_compare != save_folder[4]:
            masks = masks[:num_dets_to_consider, :, :, None]

            colors = torch.cat([
                get_color(j, on_gpu=img_gpu.device.index).view(1, 1, 1, 3)
                for j in range(num_dets_to_consider)
            ],
                               dim=0)
            masks_color = masks.repeat(1, 1, 1, 3) * colors * mask_alpha

            inv_alph_masks = masks * (-mask_alpha) + 1

            masks_color_summand = masks_color[0]
            if num_dets_to_consider > 1:
                inv_alph_cumul = inv_alph_masks[:(num_dets_to_consider -
                                                  1)].cumprod(dim=0)
                masks_color_cumul = masks_color[1:] * inv_alph_cumul
                masks_color_summand += masks_color_cumul.sum(dim=0)

            img_gpu = img_gpu * inv_alph_masks.prod(
                dim=0) + masks_color_summand

    img_numpy = (img_gpu * 255).byte().cpu().numpy()

    if num_dets_to_consider == 0:
        if os.path.isdir(
                save_folder[0]) and save_folder[4] % args.video_fps == 0:
            file_name = save_folder[1] + "_%05d" % save_folder[4] + '.png'
            cv2.imwrite(os.path.join(save_folder[3], file_name), img_numpy)
            cv2.imwrite(os.path.join(save_folder[2], file_name), img_numpy_ori)

        return [img_numpy, img_numpy_ori]

    font_face = cv2.FONT_HERSHEY_DUPLEX
    font_scale = 0.6
    font_thickness = 1

    if args.display_text or args.display_bboxes:
        if frame_compare != save_folder[4]:
            frame_compare = save_folder[4]
            for j in reversed(range(num_dets_to_consider)):
                x1, y1, x2, y2 = boxes[j, :]
                color = get_color(j)
                score = scores[j]

                if args.display_bboxes:
                    cv2.rectangle(img_numpy, (x1, y1), (x2, y2), color, 1)

                if args.display_text:
                    _class = cfg.dataset.class_names[classes[j]]
                    # text_str = '%s: %.2f' % (_class, score) if args.display_scores else _class
                    if args.display_scores:
                        text_str_class = f"{_class}"
                        text_str_score = f": {score:.2f}"

                        text_w_class, text_h_class = \
                            cv2.getTextSize(text_str_class, font_face, font_scale, font_thickness)[0]

                        img_numpy = ps.putBText(img_numpy,
                                                text_str_class,
                                                text_offset_x=x1,
                                                text_offset_y=y1,
                                                vspace=0,
                                                hspace=0,
                                                font=font_face,
                                                font_scale=0.6,
                                                thickness=font_thickness,
                                                alpha=0.7,
                                                background_RGB=color,
                                                text_RGB=(255, 255, 255))
                        img_numpy = ps.putBText(img_numpy,
                                                text_str_score,
                                                text_offset_x=x1,
                                                text_offset_y=y1 +
                                                text_h_class + 2,
                                                vspace=0,
                                                hspace=0,
                                                font=font_face,
                                                font_scale=0.6,
                                                thickness=font_thickness,
                                                alpha=0.7,
                                                background_RGB=color,
                                                text_RGB=(255, 255, 255))
                    else:
                        text_str_class = '%s' % (_class)

                        img_numpy = ps.putBText(img_numpy,
                                                text_str_class,
                                                text_offset_x=x1,
                                                text_offset_y=y1,
                                                vspace=0,
                                                hspace=0,
                                                font=font_face,
                                                font_scale=0.6,
                                                thickness=font_thickness,
                                                alpha=0.7,
                                                background_RGB=color,
                                                text_RGB=(255, 255, 255))

                    if save_folder[4] % args.video_fps == 0:
                        dist = ocr(img_numpy_ori)
                        result = save_folder[
                            4], f"{dist}", f"{_class}", f"{score:.2f}", f"{x1}", f"{y1}", f"{x2}", f"{y2}"
                        result_list.append(result)

            if os.path.isdir(
                    save_folder[0]) and save_folder[4] % args.video_fps == 0:
                file_name = save_folder[1] + "_%05d" % save_folder[4] + '.png'
                cv2.imwrite(os.path.join(save_folder[3], file_name), img_numpy)
                cv2.imwrite(os.path.join(save_folder[2], file_name),
                            img_numpy_ori)

            return [img_numpy, img_numpy_ori, result_list]

    return [img_numpy, img_numpy_ori]
Beispiel #7
0
def prep_display_for_img(dets_out,
                         img,
                         h=None,
                         w=None,
                         undo_transform=True,
                         class_color=False,
                         mask_alpha=0.45):
    if undo_transform:
        img_numpy = undo_image_transformation(img, w, h)
        img_gpu = torch.Tensor(img_numpy).cuda()
    else:
        img_gpu = img / 255.0
        h, w, _ = img.shape

    with timer.env('Postprocess'):
        save = cfg.rescore_bbox
        cfg.rescore_bbox = True
        t = postprocess(dets_out,
                        w,
                        h,
                        visualize_lincomb=args.display_lincomb,
                        crop_masks=args.crop,
                        score_threshold=args.score_threshold)
        cfg.rescore_bbox = save

    with timer.env('Copy'):
        idx = t[1].argsort(0, descending=True)[:args.top_k]

        if cfg.eval_mask_branch:
            masks = t[3][idx]
        classes, scores, boxes = [x[idx] for x in t[:3]]

    num_dets_to_consider = min(args.top_k, classes.shape[0])
    for j in range(num_dets_to_consider):
        if scores[j] < args.score_threshold:
            num_dets_to_consider = j
            break

    def get_color(j, on_gpu=None):
        global color_cache
        color_idx = (classes[j] if class_color else j) % len(COLORS)

        if on_gpu is not None and color_idx in color_cache[on_gpu]:
            return color_cache[on_gpu][color_idx]
        else:
            color = COLORS[color_idx]
            if not undo_transform:
                color = (color[2], color[1], color[0])
            if on_gpu is not None:
                color = torch.Tensor(color).to(on_gpu).float() / 255.
                color_cache[on_gpu][color_idx] = color
            return color

    if args.display_masks and cfg.eval_mask_branch and num_dets_to_consider > 0:
        masks = masks[:num_dets_to_consider, :, :, None]

        colors = torch.cat([
            get_color(j, on_gpu=img_gpu.device.index).view(1, 1, 1, 3)
            for j in range(num_dets_to_consider)
        ],
                           dim=0)
        masks_color = masks.repeat(1, 1, 1, 3) * colors * mask_alpha

        inv_alph_masks = masks * (-mask_alpha) + 1

        masks_color_summand = masks_color[0]
        if num_dets_to_consider > 1:
            inv_alph_cumul = inv_alph_masks[:(num_dets_to_consider -
                                              1)].cumprod(dim=0)
            masks_color_cumul = masks_color[1:] * inv_alph_cumul
            masks_color_summand += masks_color_cumul.sum(dim=0)

        img_gpu = img_gpu * inv_alph_masks.prod(dim=0) + masks_color_summand

    img_numpy = (img_gpu * 255).byte().cpu().numpy()

    if num_dets_to_consider == 0:
        return img_numpy

    font_face = cv2.FONT_HERSHEY_DUPLEX
    font_scale = 0.6
    font_thickness = 1

    if args.display_text or args.display_bboxes:
        for j in reversed(range(num_dets_to_consider)):
            x1, y1, x2, y2 = boxes[j, :]
            color = get_color(j)
            score = scores[j]

            if args.display_bboxes:
                cv2.rectangle(img_numpy, (x1, y1), (x2, y2), color, 1)

            if args.display_text:
                _class = cfg.dataset.class_names[classes[j]]
                if args.display_scores:
                    text_str_class = f"{_class}"
                    text_str_score = f": {score:.2f}"

                    text_w_class, text_h_class = cv2.getTextSize(
                        text_str_class, font_face, font_scale,
                        font_thickness)[0]

                    img_numpy = ps.putBText(img_numpy,
                                            text_str_class,
                                            text_offset_x=x1,
                                            text_offset_y=y1,
                                            vspace=0,
                                            hspace=0,
                                            font=font_face,
                                            font_scale=0.6,
                                            thickness=font_thickness,
                                            alpha=0.7,
                                            background_RGB=color,
                                            text_RGB=(255, 255, 255))
                    img_numpy = ps.putBText(img_numpy,
                                            text_str_score,
                                            text_offset_x=x1,
                                            text_offset_y=y1 + text_h_class +
                                            2,
                                            vspace=0,
                                            hspace=0,
                                            font=font_face,
                                            font_scale=0.6,
                                            thickness=font_thickness,
                                            alpha=0.7,
                                            background_RGB=color,
                                            text_RGB=(255, 255, 255))
                else:
                    text_str_class = '%s' % _class

                    img_numpy = ps.putBText(img_numpy,
                                            text_str_class,
                                            text_offset_x=x1,
                                            text_offset_y=y1,
                                            vspace=0,
                                            hspace=0,
                                            font=font_face,
                                            font_scale=0.6,
                                            thickness=font_thickness,
                                            alpha=0.7,
                                            background_RGB=color,
                                            text_RGB=(255, 255, 255))

    return img_numpy
Beispiel #8
0
            while len(data) < payload_size:
                packet = s.recv(4 * 1024)  # 4K
                if not packet: break
                data += packet
            packed_msg_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("Q", packed_msg_size)[0]

            while len(data) < msg_size:
                data += client_socket.recv(4 * 1024)
            frame_data = data[:msg_size]
            data = data[msg_size:]
            frame = pickle.loads(frame_data)
            frame = ps.putBText(frame,
                                f"Canal {channel}",
                                10,
                                10,
                                vspace=10,
                                hspace=1,
                                font_scale=0.7,
                                text_RGB=(255, 250, 250))
            cv2.imshow(f"FROM ", frame)
            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                break
        client_socket.close()
except Exception as e:
    print(e)
    client_socket.close()
    print(f"CLIENT DISCONNECTED")
    pass
Beispiel #9
0
                    data += packet
                packed_msg_size = data[:payload_size]
                data = data[payload_size:]
                msg_size = struct.unpack("Q".packed_msg_size)[0]

                while len(data) < msg_size:
                    data += sock.recv(4 * 1024)
                frame_data = data[:msg_size]
                data = data[msg_size:]
                frame = pickle.loads(frame_data)
                text = f"CLIENT:"
                frame = ps.putBText(frame,
                                    text,
                                    10,
                                    10,
                                    vspace=10,
                                    hspace=1,
                                    font_scale=.7,
                                    background_RGB=(255, 0, 0),
                                    text_RGB=(255, 250, 250))
                cv2.imshow(f"FROM ", frame)
                key = cv2.waitKey(1) & 0xFF
                if key == ord('q'):
                    break
            sock.close()
    except Exception as e:
        print("Cliente desconectado")
        pass
    finally:
        print('Socket cerrado')
        sock.close()
    def run(self,catch):
        def intersect(A, B, C, D):
            return ccw(A, C, D) != ccw(B, C, D) and ccw(A, B, C) != ccw(A, B, D)
    
        def ccw(A, B, C):
            return (C[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (C[0] - A[0])
    
        def vector_angle(midpoint, previous_midpoint):
            x = midpoint[0] - previous_midpoint[0]
            y = midpoint[1] - previous_midpoint[1]
            return math.degrees(math.atan2(y, x))

        global fsplit, mobil, truk, motor, becak, mulai, titik1, titik2
        mulai = 1
        # parameters
        max_cosine_distance = 0.4
        nn_budget = None
        nms_max_overlap = 1.0

        # initialize track
        model_filename = 'model_data/mars-small128.pb'
        encoder = gdet.create_box_encoder(model_filename, batch_size=1)
        # calculate cosine distance metric
        metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
        # initialize tracker
        tracker = Tracker(metric)

        # initialize counting variables
        count_dict = {}  # initiate dict for storing counts
        total_counter = 0
        up_count = 0
        down_count = 0
        from collections import Counter
        class_counter = Counter()  # store counts of each detected class
        from collections import deque
        already_counted = deque(maxlen=50)  # temporary memory for storing counted IDs
        intersect_info = []  # initialise intersection list
        memory = {}

        # load configuration for object detector
        config = ConfigProto()
        config.gpu_options.allow_growth = True
        session = InteractiveSession(config=config)
        input_size = 416
        video_path = 'C:/Users/MSI Laptop/Pictures/overpass.mp4' #ini dia
        
        #model
        saved_model_loaded = tf.saved_model.load('./checkpoints/customfinals-416', tags=[tag_constants.SERVING])
        infer = saved_model_loaded.signatures['serving_default']

        # begin video capture
        try:
            vid = cv2.VideoCapture(int(catch))
        except:
            vid = cv2.VideoCapture(catch)

        frame_num = 0
        # while video is running
        while True:
            return_value, frame = vid.read()
            if return_value:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                image = Image.fromarray(frame)
            else:
                #cetak csv
                row_list = [["No", "Jenis Kendaraan", "Jumlah"],
                            [1, "Mobil", mobil],
                            [2, "Truk", truk],
                            [3, "Motor", motor],
                            [4, "Becak", becak]]
                with open('./outputs/CSV/'+fsplit+'.csv', 'w', newline='') as file:
                    writer = csv.writer(file)
                    writer.writerows(row_list)
                print('Video Telah Selesai atau Gagal Memuat, coba dengan Video lainnya!')
                tf.keras.backend.clear_session()
                break
            frame_num +=1
            # print('Frame #: ', frame_num)
            frame_size = frame.shape[:2]
            image_data = cv2.resize(frame, (input_size, input_size))
            image_data = image_data / 255.
            image_data = image_data[np.newaxis, ...].astype(np.float32)
            start_time = time.time()

            batch_data = tf.constant(image_data)
            pred_bbox = infer(batch_data)
            for key, value in pred_bbox.items():
                boxes = value[:, :, 0:4]
                pred_conf = value[:, :, 4:]

            boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
                boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
                scores=tf.reshape(
                    pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
                max_output_size_per_class=50,
                max_total_size=50,
                iou_threshold=0.45,
                score_threshold=0.50
            )

            # convert data to numpy arrays and slice out unused elements
            num_objects = valid_detections.numpy()[0]
            bboxes = boxes.numpy()[0]
            bboxes = bboxes[0:int(num_objects)]
            scores = scores.numpy()[0]
            scores = scores[0:int(num_objects)]
            classes = classes.numpy()[0]
            classes = classes[0:int(num_objects)]

            # memformat kotak pembatas dari dinormalisasi ymin, xmin, ymax, xmax ---> xmin, ymin, width, height
            original_h, original_w, _ = frame.shape
            bboxes = utils.format_boxes(bboxes, original_h, original_w)

            # simpan semua prediksi dalam satu parameter untuk kesederhanaan saat memanggil fungsi
            pred_bbox = [bboxes, scores, classes, num_objects]

            # baca di semua nama kelas dari config
            class_names = utils.read_class_names(cfg.YOLO.CLASSES)

            # by default allow all classes in .names file
            allowed_classes = list(class_names.values())
            
            # custom allowed classes (hapus komentar di bawah untuk menyesuaikan pelacakan hanya untuk orang)
            allowed_classes = ['mobil', 'motor', 'truk', 'becak']

            # loop melalui objek dan gunakan indeks kelas untuk mendapatkan nama kelas, izinkan hanya kelas dalam daftar allow_classes
            names = []
            deleted_indx = []
            for i in range(num_objects):
                class_indx = int(classes[i])
                class_name = class_names[class_indx]
                if class_name not in allowed_classes:
                    deleted_indx.append(i)
                else:
                    names.append(class_name)
            names = np.array(names)
            count = len(names)

            bboxes = np.delete(bboxes, deleted_indx, axis=0)
            scores = np.delete(scores, deleted_indx, axis=0)

            # encode yolo detections and feed to tracker
            features = encoder(frame, bboxes)
            detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in zip(bboxes, scores, names, features)]

            #initialize color map
            cmap = plt.get_cmap('tab20b')
            colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]

            # run non-maxima supression
            boxs = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            classes = np.array([d.class_name for d in detections])
            indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]

            # Call the tracker
            tracker.predict()
            tracker.update(detections)
            
            #buat garis biru
            cv2.line(frame,titik1,titik2, (0, 255, 255), 2)

            # update tracks
            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue 
                bbox = track.to_tlbr() # Dapatkan posisi saat ini dalam format kotak pembatas `(min x, miny, max x,max y)
                #track_cls = track.cls  # most common detection class for track
                class_name = track.get_class()
                
                #Object counting
                midpoint = track.tlbr_midpoint(bbox) # Menemukan titik tengah kotak dalam format tlbr.
                origin_midpoint = (midpoint[0], frame.shape[0] - midpoint[1])  # dapatkan titik tengah masing-masing ke kiri bawah

                if track.track_id not in memory:
                    memory[track.track_id] = deque(maxlen=2)
                
                memory[track.track_id].append(midpoint)
                previous_midpoint = memory[track.track_id][0]

                origin_previous_midpoint = (previous_midpoint[0], frame.shape[0] - previous_midpoint[1])

                if intersect(midpoint, previous_midpoint, titik1,titik2) and track.track_id not in already_counted:
                    class_counter[class_name] += 1
                    total_counter += 1
                    cv2.line(frame,titik1,titik2, (255, 0, 0), 2) #garis merah
                    already_counted.append(track.track_id)  # Setel sudah dihitung untuk ID ke true.
                    angle = vector_angle(origin_midpoint, origin_previous_midpoint)

                    if angle > 0:
                        up_count += 1
                    if angle < 0:
                        down_count += 1
                # menggambar bbox di layar
                color = colors[int(track.track_id) % len(colors)]
                color = [i * 255 for i in color]
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1]-30)), (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*17, int(bbox[1])), color, -1)
                cv2.putText(frame, class_name + "-" + str(track.track_id),(int(bbox[0]), int(bbox[1]-10)),0, 0.75, (255,255,255),2)

            if len(memory) > 50:
                del memory[list(memory)[0]]

            fps = 1.0 / (time.time() - start_time)
            # gambar jumlah total.
            text = ("FPS: %.2f" %fps)
            frame =  ps.putBText(frame,text,text_offset_x=int(frame.shape[1]-185),text_offset_y=int(0.05 * frame.shape[0]),vspace=10,hspace=10, font_scale=1.0,background_RGB=(228,20,222),text_RGB=(255,255,255))
            text = "Total: {}".format(str(total_counter))
            frame =  ps.putBText(frame,text,text_offset_x=int(10),text_offset_y=int(0.05 * frame.shape[0]),vspace=10,hspace=10, font_scale=1.0,background_RGB=(10,20,222),text_RGB=(255,255,255))

            # jumlah tampilan untuk setiap kelas saat mereka muncul
            y = 0.12 * frame.shape[0]
            for cls in class_counter:
                class_count = class_counter[cls]
                text = str(cls) + " " + str(class_count)
                if str(cls) == 'mobil':
                    mobil = str(class_count)
                elif str(cls) == 'truk':
                    truk = str(class_count)
                elif str(cls) == 'motor':
                    motor = str(class_count)
                elif str(cls) == 'becak':
                    becak = str(class_count)
                frame =  ps.putBText(frame,text,text_offset_x=int(10),text_offset_y=int(y),vspace=5,hspace=10, font_scale=1.0,background_RGB=(20,210,4),text_RGB=(255,255,255))
                y += 0.05 * frame.shape[0]
                
            #hasil proses ditampung variabel result
            result = np.asarray(frame)
            result = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            self.chart()
            self.display_frame(result)
            if cv2.waitKey(1) & 0xFF == ord('q'): break
        cv2.destroyAllWindows()
    def run(self, catch):
        def intersect(A, B, C, D):
            return ccw(A, C, D) != ccw(B, C, D) and ccw(A, B, C) != ccw(
                A, B, D)

        def ccw(A, B, C):
            return (C[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (C[0] -
                                                                    A[0])

        def vector_angle(midpoint, previous_midpoint):
            x = midpoint[0] - previous_midpoint[0]
            y = midpoint[1] - previous_midpoint[1]
            return math.degrees(math.atan2(y, x))

        global truck
        global car
        titik1 = (100, 511)
        titik2 = (551, 511)
        # Definition of the parameters
        max_cosine_distance = 0.4
        nn_budget = None
        nms_max_overlap = 1.0

        # initialize deep sort
        model_filename = 'model_data/mars-small128.pb'
        encoder = gdet.create_box_encoder(model_filename, batch_size=1)
        # calculate cosine distance metric
        metric = nn_matching.NearestNeighborDistanceMetric(
            "cosine", max_cosine_distance, nn_budget)
        # initialize tracker
        tracker = Tracker(metric)

        # initialize counting variables
        count_dict = {}  # initiate dict for storing counts
        total_counter = 0
        up_count = 0
        down_count = 0
        from collections import Counter
        class_counter = Counter()  # store counts of each detected class
        from collections import deque
        already_counted = deque(
            maxlen=50)  # temporary memory for storing counted IDs
        intersect_info = []  # initialise intersection list
        memory = {}

        # load configuration for object detector
        config = ConfigProto()
        config.gpu_options.allow_growth = True
        session = InteractiveSession(config=config)
        input_size = 416
        video_path = 'C:/Users/MSI Laptop/Pictures/overpass.mp4'  #ini dia

        saved_model_loaded = tf.saved_model.load('./checkpoints/yolov4-416',
                                                 tags=[tag_constants.SERVING])
        infer = saved_model_loaded.signatures['serving_default']

        # begin video capture
        try:
            vid = cv2.VideoCapture(int(catch))
        except:
            vid = cv2.VideoCapture(catch)

        frame_num = 0
        # while video is running
        while True:
            return_value, frame = vid.read()
            if return_value:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                image = Image.fromarray(frame)
            else:
                print(
                    'Video Telah Selesai atau Gagal Memuat, coba dengan Video lainnya!'
                )
                break
            frame_num += 1
            # print('Frame #: ', frame_num)
            frame_size = frame.shape[:2]
            image_data = cv2.resize(frame, (input_size, input_size))
            image_data = image_data / 255.
            image_data = image_data[np.newaxis, ...].astype(np.float32)
            start_time = time.time()

            batch_data = tf.constant(image_data)
            pred_bbox = infer(batch_data)
            for key, value in pred_bbox.items():
                boxes = value[:, :, 0:4]
                pred_conf = value[:, :, 4:]

            boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
                boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
                scores=tf.reshape(
                    pred_conf,
                    (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
                max_output_size_per_class=50,
                max_total_size=50,
                iou_threshold=0.45,
                score_threshold=0.50)

            # convert data to numpy arrays and slice out unused elements
            num_objects = valid_detections.numpy()[0]
            bboxes = boxes.numpy()[0]
            bboxes = bboxes[0:int(num_objects)]
            scores = scores.numpy()[0]
            scores = scores[0:int(num_objects)]
            classes = classes.numpy()[0]
            classes = classes[0:int(num_objects)]

            # format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, width, height
            original_h, original_w, _ = frame.shape
            bboxes = utils.format_boxes(bboxes, original_h, original_w)

            # store all predictions in one parameter for simplicity when calling functions
            pred_bbox = [bboxes, scores, classes, num_objects]

            # read in all class names from config
            class_names = utils.read_class_names(cfg.YOLO.CLASSES)

            # by default allow all classes in .names file
            allowed_classes = list(class_names.values())

            # custom allowed classes (uncomment line below to customize tracker for only people)
            # allowed_classes = ['person']

            # loop through objects and use class index to get class name, allow only classes in allowed_classes list
            names = []
            deleted_indx = []
            for i in range(num_objects):
                class_indx = int(classes[i])
                class_name = class_names[class_indx]
                if class_name not in allowed_classes:
                    deleted_indx.append(i)
                else:
                    names.append(class_name)
            names = np.array(names)
            count = len(names)

            bboxes = np.delete(bboxes, deleted_indx, axis=0)
            scores = np.delete(scores, deleted_indx, axis=0)

            # encode yolo detections and feed to tracker
            features = encoder(frame, bboxes)
            detections = [
                Detection(bbox, score, class_name, feature)
                for bbox, score, class_name, feature in zip(
                    bboxes, scores, names, features)
            ]

            #initialize color map
            cmap = plt.get_cmap('tab20b')
            colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]

            # run non-maxima supression
            boxs = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            classes = np.array([d.class_name for d in detections])
            indices = preprocessing.non_max_suppression(
                boxs, classes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]

            # Call the tracker
            tracker.predict()
            tracker.update(detections)
            #buat garis biru
            cv2.line(frame, titik1, titik2, (0, 255, 255), 2)

            # update tracks
            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr(
                )  # Get current position in bounding box format `(min x, miny, max x,max y)
                #track_cls = track.cls  # most common detection class for track
                class_name = track.get_class()

                #Object counting
                midpoint = track.tlbr_midpoint(
                    bbox)  # Finds midpoint of a box in tlbr format.
                origin_midpoint = (midpoint[0], frame.shape[0] - midpoint[1]
                                   )  # get midpoint respective to botton-left

                if track.track_id not in memory:
                    memory[track.track_id] = deque(maxlen=2)

                memory[track.track_id].append(midpoint)
                previous_midpoint = memory[track.track_id][0]

                origin_previous_midpoint = (previous_midpoint[0],
                                            frame.shape[0] -
                                            previous_midpoint[1])

                if intersect(midpoint, previous_midpoint, titik1,
                             titik2) and track.track_id not in already_counted:
                    class_counter[class_name] += 1
                    total_counter += 1
                    cv2.line(frame, titik1, titik2, (255, 0, 0),
                             2)  #garis merah
                    already_counted.append(
                        track.track_id)  # Set already counted for ID to true.
                    angle = vector_angle(origin_midpoint,
                                         origin_previous_midpoint)

                    if angle > 0:
                        up_count += 1
                    if angle < 0:
                        down_count += 1
                # draw bbox on screen
                color = colors[int(track.track_id) % len(colors)]
                color = [i * 255 for i in color]
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), color, 2)
                cv2.rectangle(
                    frame, (int(bbox[0]), int(bbox[1] - 30)),
                    (int(bbox[0]) +
                     (len(class_name) + len(str(track.track_id))) * 17,
                     int(bbox[1])), color, -1)
                cv2.putText(frame, class_name + "-" + str(track.track_id),
                            (int(bbox[0]), int(bbox[1] - 10)), 0, 0.75,
                            (255, 255, 255), 2)

            if len(memory) > 50:
                del memory[list(memory)[0]]

            fps = 1.0 / (time.time() - start_time)
            # Draw total count.
            text = ("FPS: %.2f" % fps)
            frame = ps.putBText(frame,
                                text,
                                text_offset_x=int(frame.shape[1] - 185),
                                text_offset_y=int(0.05 * frame.shape[0]),
                                vspace=10,
                                hspace=10,
                                font_scale=1.0,
                                background_RGB=(228, 20, 222),
                                text_RGB=(255, 255, 255))
            text = "Total: {}".format(str(total_counter))
            frame = ps.putBText(frame,
                                text,
                                text_offset_x=int(10),
                                text_offset_y=int(0.05 * frame.shape[0]),
                                vspace=10,
                                hspace=10,
                                font_scale=1.0,
                                background_RGB=(10, 20, 222),
                                text_RGB=(255, 255, 255))

            # display counts for each class as they appear
            y = 0.12 * frame.shape[0]
            for cls in class_counter:
                class_count = class_counter[cls]
                text = str(cls) + " " + str(class_count)
                if str(cls) == 'car':
                    car = str(class_count)
                elif str(cls) == 'truck':
                    truck = str(class_count)
                frame = ps.putBText(frame,
                                    text,
                                    text_offset_x=int(10),
                                    text_offset_y=int(y),
                                    vspace=5,
                                    hspace=10,
                                    font_scale=1.0,
                                    background_RGB=(20, 210, 4),
                                    text_RGB=(255, 255, 255))
                y += 0.05 * frame.shape[0]
            # self.ui.label_2.setText(text)
            # calculate frames per second of running detections
            # fps = 1.0 / (time.time() - start_time)
            # print("FPS: %.2f" % fps)
            result = np.asarray(frame)
            result = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

            # cv2.imshow("Output Video", result)
            # self.ui.label.setPixmap(QPixmap.fromImage(result))
            self.display_frame(result)
            if cv2.waitKey(1) & 0xFF == ord('q'): break
        cv2.destroyAllWindows()
Beispiel #12
0
def monitor():
    # Lista de los usuarios detectados en los últimos 60 segundos.
    ultimos = []
    # Tiempo de inicio.
    before = time.time()

    # Carga modelo de reconocimiento entrenado.
    with open('data/model.dat', 'rb') as f:
        all_face_encodings = pickle.load(f)
    
    # Inicia feed de video.
    video_capture = cv2.VideoCapture(0)

    # Carga datos de usuarios registrados.
    wb = openpyxl.load_workbook("data/info.xlsx")
    ws = wb.active

    # Lista las caras conocidas en base al modelo entrenado.
    known_face_names = list(all_face_encodings.keys())
    # Lista las codificaciones de las caras conocidas en base al modelo entrenado.
    known_face_encodings = np.array(list(all_face_encodings.values()))

    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True

    # Pausa para el inicio correcto del sensor de imagen.
    time.sleep(0.5)
    
    while True:
        ret, frame = video_capture.read()
        # Reflejar la imagen horizontalmente (espejo).
        frame = cv2.flip(frame, 1)
        # Escalar la imagen a 1/4 de la original.
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
        # convertir la imagen desde BGR a RGB.
        rgb_small_frame = small_frame[:, :, ::-1]

        # Se procesa 1 de cada 2 frames.
        if process_this_frame:
            # Encuentra todas las caras en un frame dado.
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
            face_names = []

            for face_encoding in face_encodings:
                # Revisar coincidencias.
                matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance)
                name = "DESCONOCIDO"

                # # Se utiliza la primera coincidencia encontrada en known_face_encodings.
                # if True in matches:
                #     first_match_index = matches.index(True)
                #     name = known_face_names[first_match_index]

                # Se utiliza la mejor coincidencia a la cara detectada.
                face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
                best_match_index = np.argmin(face_distances)

                if matches[best_match_index]:
                    name = known_face_names[best_match_index]

                if name not in face_names:
                    face_names.append(name)

                if name in face_names:
                    face_names.append("DUPLICADO")

        process_this_frame = not process_this_frame

        # Muestra los resultados.
        for (top, right, bottom, left), name in zip(face_locations, face_names):
            depto = "DESCONOCIDO"
            correo = 0
            deudas = 0
            user = name.split("-")
            
            for j in range(1,ws.max_row):
                if (ws.cell(row = j, column = 1).value == user[0] and ws.cell(row = j, column = 2).value == user[1]):
                    name = user[0]
                    depto = user[1]
                    correo = str(ws.cell(row = j, column = 3).value)
                    deudas = str(ws.cell(row = j, column = 4).value)

            # Desescalar la imagen.
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4
            font = cv2.FONT_HERSHEY_DUPLEX

            # Proporción utilizada para desplazar los cuadros de texto respecto al cuadro que encierra la cara.
            proporcion = 8
            borde = proporcion
            escala = proporcion/10
            salto = proporcion * 5
            ancho = int((right-left)/proporcion)
            pos = bottom+20
            blanco = (255,255,255)
            verde = (0,255,200)
            azul = (0,100,255)
            rojo = (255,0,100)
            naranjo = (255,100,0)
            aceptado = verde
            denegado = rojo

            if (name == "DUPLICADO"):
                # Dibujar cuadrado.
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 100, 255), 2)

                # Dibujar etiqueta con nombre.
                with suppress(Exception):
                    ps.putBText(frame, name, text_offset_x=left+ancho, text_offset_y=bottom+20, vspace=borde, hspace=borde, font_scale=escala, background_RGB=naranjo, text_RGB=(255,250,250))

            elif (name == "DESCONOCIDO"):
                # Dibujar cuadrado.
                cv2.rectangle(frame, (left, top), (right, bottom), (100, 0, 255), 2)

                # Dibujar etiqueta con nombre.
                with suppress(Exception):
                    ps.putBText(frame, name, text_offset_x=left+ancho, text_offset_y=bottom+20, vspace=borde, hspace=borde, font_scale=escala, background_RGB=denegado, text_RGB=(255,250,250))
                
            else:
                # Dibujar cuadrado.
                cv2.rectangle(frame, (left, top), (right, bottom), (200, 250, 0), 2)

                # Dibujar etiqueta con nombre.
                with suppress(Exception):
                    ps.putBText(frame, str(name), text_offset_x=left+ancho, text_offset_y=pos, vspace=borde, hspace=borde, font_scale=escala, background_RGB=aceptado, text_RGB=blanco)

                # Dibujar etiqueta con departamento.
                with suppress(Exception):
                    pos += salto
                    ps.putBText(frame, "Depto. "+str(depto), text_offset_x=left+ancho, text_offset_y=pos, vspace=borde, hspace=borde, font_scale=escala, background_RGB=aceptado, text_RGB=blanco)
                    
                if (int(correo) > 0):
                    # Dibujar etiqueta con correo.
                    with suppress(Exception):
                        pos += salto
                        ps.putBText(frame, "Correo "+str(correo), text_offset_x=left+ancho, text_offset_y=pos, vspace=borde, hspace=borde, font_scale=escala, background_RGB=denegado, text_RGB=blanco)

                if (int(deudas) > 0):
                    # Dibujar etiqueta con deudas.
                    with suppress(Exception):
                        pos += salto
                        ps.putBText(frame, "Deudas "+str(deudas), text_offset_x=left+ancho, text_offset_y=pos, vspace=borde, hspace=borde, font_scale=escala, background_RGB=denegado, text_RGB=blanco)
                            
        # Lógica de invocación de función "log_user()".
        now = time.time()
        with suppress(Exception):
            if (name+"-"+depto) not in ultimos:
                user_log(name, depto, frame, now)
                ultimos.append(name+"-"+depto)
            if int(now - before) >= tiempo_log:
                before = time.time()
                ultimos.clear()

        # Texto de cierre de programa.
        ps.putBText(frame,'"ESC" para salir',text_offset_x=50,text_offset_y=frame.shape[0]-50,vspace=10,hspace=10, font_scale=1.0,background_RGB=(228,225,222),text_RGB=(1,1,1))
        # Escala la imagen para mostrarla en pantalla en un tamaño más razonable.
        frame = cv2.resize(frame, (0, 0), fx=ventana, fy=ventana)
        # Muestra imagen resultante.
        cv2.imshow('Video', frame)

        # Presionar "q" para salir.
        #if cv2.waitKey(1) & 0xFF == ord('q'): break
        if cv2.waitKey(1) & 0xFF == 27:
            res = messagebox.askokcancel('Salir','¿Detener monitoreo?')
            #res = messagebox.askyesno('Salir','¿Está seguro que desea detener el monitoreo')
            if res: break

    # Libera la cámara y destruye las ventanas.
    video_capture.release()
    cv2.destroyAllWindows()
Beispiel #13
0
# author:    PyShine
# website:   http://www.pyshine.com

# import the necessary packages
import pyshine as ps, cv2, imutils
import time

image = cv2.imread('lena.jpg')
image = imutils.resize(image, width=720)

text = 'ID: ' + str(123)
image = ps.putBText(image,
                    text,
                    text_offset_x=20,
                    text_offset_y=20,
                    vspace=10,
                    hspace=10,
                    font_scale=1.0,
                    background_RGB=(228, 225, 222),
                    text_RGB=(1, 1, 1))
text = str(time.strftime("%H:%M %p"))
image = ps.putBText(image,
                    text,
                    text_offset_x=image.shape[1] - 170,
                    text_offset_y=20,
                    vspace=10,
                    hspace=10,
                    font_scale=1.0,
                    background_RGB=(228, 225, 222),
                    text_RGB=(1, 1, 1))
Beispiel #14
0
def add_user(usuario, depto, mail, debt):
    # Abre el archivo de datos de usuario.
    wb = openpyxl.load_workbook("data/info.xlsx")
    ws = wb.active
    encontrado = False
    act_data = True
    capturar = True
    actualizar = False

    # Recorre el archivo y lo imprime fila por fila.
    for i in range(1, ws.max_row):
        usr_cell = ws.cell(row=i, column=1).value
        dep_cell = ws.cell(row=i, column=2).value
        mail_cell = ws.cell(row=i, column=3).value
        debt_cell = ws.cell(row=i, column=4).value

        # En caso de que el usuario a registrar ya se encuentre en la base de datos.
        if (usr_cell == usuario and dep_cell == depto):
            encontrado = True
            if verbose: print("USUARIO YA SE ENCUENTRA REGISTRADO")
            actualizar_datos = messagebox.askyesno(
                'Usuario ya registrado',
                'Usuario ya registrado\n ¿Desea actualizar las deudas y correos del usuario?'
            )
            actualizar = True if actualizar_datos else False
            actualizar_foto = messagebox.askyesno(
                'Usuario ya registrado',
                'Usuario ya registrado\n ¿Desea actualizar la imagen del usuario?'
            )
            capturar = True if actualizar_foto else False

    # Capturar de imagen de usuario.
    if capturar:
        messagebox.showwarning(
            'Captura fotográfica',
            'Presionar Espacio para capturar, "ESC" para salir.')
        video = True
        #vs = VideoStream(src=0).start()
        # Inicia feed de video.
        vs = VideoStream(0).start()
        cv2.startWindowThread()
        cv2.namedWindow("REGISTRAR USUARIO")

        # Pausa para el inicio correcto del sensor de imagen.
        time.sleep(0.5)

        while video:
            # Variable que determina si existe una cara detectada.
            cara = False
            frame = vs.read()
            # Reflejar la imagen horizontalmente (espejo).
            frame = cv2.flip(frame, 1)
            picture = frame.copy()
            # Escalar la imagen a 1/4 de la original
            small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

            rgb_small_frame = small_frame[:, :, ::-1]
            face_locations = face_recognition.face_locations(rgb_small_frame)

            for (top, right, bottom, left) in face_locations:
                top *= 4
                right *= 4
                bottom *= 4
                left *= 4
                picture = picture[top:bottom, left:right]

                # Proporción utilizada para desplazar los cuadros de texto respecto al cuadro que encierra la cara.
                proporcion = 8
                borde = proporcion
                escala = proporcion / 10
                salto = proporcion * 5
                ancho = int((right - left) / proporcion)
                pos = bottom + 20
                blanco = (255, 255, 255)
                verde = (0, 255, 200)
                azul = (0, 100, 255)
                rojo = (255, 0, 100)
                aceptado = azul
                denegado = rojo

                # Dibujar cuadrado.
                cv2.rectangle(frame, (left, top), (right, bottom),
                              (250, 100, 0), 2)

                # Dibujar etiqueta con nombre.
                with suppress(Exception):
                    ps.putBText(frame,
                                str(usuario),
                                text_offset_x=left + ancho,
                                text_offset_y=pos,
                                vspace=borde,
                                hspace=borde,
                                font_scale=escala,
                                background_RGB=aceptado,
                                text_RGB=blanco)

                # Dibujar etiqueta con departamento.
                with suppress(Exception):
                    ps.putBText(frame,
                                "Depto. " + str(depto),
                                text_offset_x=left + ancho,
                                text_offset_y=pos + 50,
                                vspace=borde,
                                hspace=borde,
                                font_scale=escala,
                                background_RGB=aceptado,
                                text_RGB=blanco)

                if len(face_locations) == 1:
                    cara = True

            frame_show = frame.copy()
            # Texto de cierre de programa.
            ps.putBText(frame_show,
                        '"ESC" para salir',
                        text_offset_x=50,
                        text_offset_y=frame_show.shape[0] - 50,
                        vspace=10,
                        hspace=10,
                        font_scale=1.0,
                        background_RGB=(228, 225, 222),
                        text_RGB=(1, 1, 1))

            # Texto de captura de imagen.
            ps.putBText(frame_show,
                        '"Espacio" para capturar',
                        text_offset_x=frame_show.shape[1] - 450,
                        text_offset_y=frame_show.shape[0] - 50,
                        vspace=10,
                        hspace=10,
                        font_scale=1.0,
                        background_RGB=(228, 225, 222),
                        text_RGB=(1, 1, 1))
            frame_show = cv2.resize(frame_show, (0, 0), fx=ventana, fy=ventana)

            cv2.imshow('REGISTRAR USUARIO', frame_show)
            #cv2.startWindowThread()

            key = cv2.waitKey(1) & 0xFF

            # Presionar "c" para capturar imagen
            #if key == ord('c') and cara == True:
            if (key == ord(' ') or key == ord('c')) and cara == True:
                cv2.imwrite('data/dataset/' + usuario + "-" + depto + '.jpg',
                            picture)
                if verbose: print("IMAGEN CAPTURADA")
                cv2.destroyAllWindows()
                #VideoStream(0).stop()
                vs.stop()
                video = False
                #messagebox.showwarning('Captura fotográfica','Imagen capturada')
                break

            # Presionar "q" para salir
            #if key == ord("q")
            if key == 27:
                if verbose: print("ENROLAMIENTO CANCELADO")
                cv2.destroyAllWindows()
                #VideoStream(0).stop()
                vs.stop()
                video = False
                act_data = False
                print('\007')
                messagebox.showwarning('Captura fotográfica',
                                       'Enrolamiento Cancelado')
                break

    if ((not encontrado and act_data) or actualizar):
        if actualizar:
            for i in range(1, ws.max_row):
                usr_cell = ws.cell(row=i, column=1).value
                dep_cell = ws.cell(row=i, column=2).value
                if (usr_cell == usuario and dep_cell == depto):
                    encontrado = True
                    ws.delete_rows(i, 1)
                    wb.save('data/info.xlsx')
        ws.insert_rows(2)
        ws.cell(row=2, column=1).value = usuario
        ws.cell(row=2, column=2).value = depto
        now = time.time()
        tiempo = time.localtime(now)
        time_log = time.strftime("%Y/%m/%d, %H:%M:%S", tiempo)
        ws.cell(row=2, column=5).value = time_log
        try:
            ws.cell(row=2, column=3).value = int(mail)
            ws.cell(row=2, column=4).value = int(debt)
        except:
            ws.cell(row=2, column=3).value = 0
            ws.cell(row=2, column=4).value = 0
        wb.save('data/info.xlsx')
        messagebox.showwarning('Agregar usuario', 'Usuario Agregado')
        if verbose: print("USUARIO AGREGADO A LA BASE DE DATOS")
        if auto_train: train()