Пример #1
0
def detect(path_to_img):
    print("START RECOGNIZING")

    startTime = time.time()
    # DETECT
    img = mpimg.imread(path_to_img)
    # сохраняем изображение для дата сета
    # _img = cv2.imread(path_to_img)
    # saveIMG(img)
    NP = nnet.detect([img])
    # Generate image mask.
    cv_img_masks = filters.cv_img_mask(NP)

    # Detect points.
    arrPoints = rectDetector.detect(cv_img_masks)
    zones = rectDetector.get_cv_zonesBGR(img, arrPoints)
    # if bool(zones):
    #     saveIMG(img)
    # find standart
    regionIds, stateIds, countLines = optionsDetector.predict(zones)
    regionNames = optionsDetector.getRegionLabels(regionIds)

    # find text with postprocessing by standart
    textArr = textDetector.predict(zones)
    textArr = textPostprocessing(textArr, regionNames)
    cprint(textArr, 'blue', attrs=['reverse'])
    print(time.time() - startTime)
    # return textArr
    print(parceNumber(textArr[0]))
    return parceNumber(textArr[0])[1]
Пример #2
0
def read_number_plates(url):
    global graph, sess
    with urlopen(url) as file:
        img = mpimg.imread(file, 0)

    with graph.as_default():
        set_session(sess)
        NP = nnet.detect([img])

        # Generate image mask.
        cv_img_masks = filters.cv_img_mask(NP)

        # Detect points.
        points = rectDetector.detect(cv_img_masks)
        zones = rectDetector.get_cv_zonesBGR(img, points)

        # find standart
        region_ids, state_ids, _ = optionsDetector.predict(zones)
        region_names = optionsDetector.getRegionLabels(region_ids)

        # find text with postprocessing by standart
        number_plates = textDetector.predict(zones, region_names)
        number_plates = textPostprocessing(number_plates, region_names)

    return number_plates, region_names
Пример #3
0
def upload():
    global graph
    if request.method == "POST":
        file = request.files["file"]
        if file and (file.content_type.rsplit('/', 1)[1]
                     in ALLOWED_EXTENSIONS).__bool__():
            filename = secure_filename(file.filename)
            file.save('images/' + filename)
            imgPath = ('images/' + filename)
            img = mpimg.imread(imgPath)
            with graph.as_default():
                NP = nnet.detect([img])
                cv_img_masks = filters.cv_img_mask(NP)
                arrPoints = rectDetector.detect(cv_img_masks)
                zones = rectDetector.get_cv_zonesBGR(img, arrPoints)
                regionIds, stateIds, countLines = optionsDetector.predict(
                    zones)
                regionNames = optionsDetector.getRegionLabels(regionIds)
                textArr = textDetector.predict(zones)
                textArr = textPostprocessing(textArr, regionNames)
                print(textArr)
                response = requests.post(
                    'https://ab1b27f2.ngrok.io/api/validateOut',
                    data={
                        'carNumber': textArr,
                        'parkId': '1'
                    })
                return response.json()
    return jsonify({'error': ''})
Пример #4
0
def detect(img):
    print("START RECOGNIZING")

    # img_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ex2.jpeg')
    # read image in RGB format
    # p.s. opencv read in BGR format
    # img = mpimg.imread(img_path)
    # DETECT
    NP = nnet.detect([img])
    # Generate image mask.
    cv_img_masks = filters.cv_img_mask(NP)

    # Detect points.
    arrPoints = rectDetector.detect(cv_img_masks)
    print(arrPoints)
    zones = rectDetector.get_cv_zonesBGR(img, arrPoints)

    # find standart
    regionIds, stateIds, countLines = optionsDetector.predict(zones)
    regionNames = optionsDetector.getRegionLabels(regionIds)

    # find text with postprocessing by standart
    textArr = textDetector.predict(zones)
    textArr = textPostprocessing(textArr, regionNames)

    return arrPoints, textArr, zones
Пример #5
0
def index(file_name):

    # Detect numberplate
    file_path = pathlib.Path(__file__).parent.absolute()
    file_path_str = str(file_path.resolve())
    print(file_name)
    img_path = file_path_str + '\\images\\' + file_name
    print(img_path)
    with graph.as_default():
        img = mpimg.imread(img_path)
        NP = nnet.detect([img])

        # Generate image mask.
        cv_img_masks = filters.cv_img_mask(NP)

        # Detect points.
        arrPoints = rectDetector.detect(cv_img_masks)
        zones = rectDetector.get_cv_zonesBGR(img, arrPoints)

        # find standart
        regionIds, stateIds, countLines = optionsDetector.predict(zones)
        regionNames = optionsDetector.getRegionLabels(regionIds)

        # find text with postprocessing by standart
        textArr = textDetector.predict(zones)
        textArr = textPostprocessing(textArr, regionNames)

    return jsonify({'numbers': textArr})
Пример #6
0
def test(dirName, fname, y, verbose=0):
    nGood = 0
    nBad = 0
    img_path = os.path.join(dirName, fname)
    if verbose == 1:
        print(colored(f"__________ \t\t {img_path} \t\t __________", "blue"))
    img = cv2.imread(img_path)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    cv_imgs_masks = nnet.detect_mask([img])

    for cv_img_masks in cv_imgs_masks:
        for mask in cv_img_masks:
            plt.imshow(mask)
            plt.show()

        # Detect points.
        arrPoints = rectDetector.detect(cv_img_masks)

        if verbose:
            filters.draw_box(img, arrPoints, (0, 255, 0), 3)
            plt.imshow(img)
            plt.show()

        # cut zones
        zones = rectDetector.get_cv_zonesBGR(img, arrPoints)
        toShowZones = rectDetector.get_cv_zonesRGB(img, arrPoints)
        if verbose:
            for zone, points in zip(toShowZones, arrPoints):
                plt.imshow(zone)
                plt.show()

        # find standart
        regionIds, stateIds, lines = optionsDetector.predict(zones)
        regionNames = optionsDetector.getRegionLabels(regionIds)
        if verbose:
            print(regionNames)

        # find text with postprocessing by standart
        textArr = textDetector.predict(zones, regionNames, lines)
        textArr = textPostprocessing(textArr, regionNames)
        if verbose:
            print(textArr)

        for yText in y:
            if yText in textArr:
                print(
                    colored(
                        f"OK: TEXT:{yText} \t\t\t RESULTS:{textArr} \n\t\t\t\t\t in PATH:{img_path}",
                        'green'))
                nGood += 1
            else:
                print(
                    colored(
                        f"NOT OK: TEXT:{yText} \t\t\t RESULTS:{textArr} \n\t\t\t\t\t in PATH:{img_path} ",
                        'red'))
                nBad += 1
    return nGood, nBad
Пример #7
0
def read_number_plates(img):
    targetBoxes = detector.detect_bbox(img)
    all_points = npPointsCraft.detect(img, targetBoxes, [5, 2, 0])

    # cut zones
    zones = convertCvZonesRGBtoBGR(
        [getCvZoneRGB(img, reshapePoints(rect, 1)) for rect in all_points])

    # predict zones attributes
    regionIds, stateIds, countLines = optionsDetector.predict(zones)
    regionNames = optionsDetector.getRegionLabels(regionIds)

    # find text with postprocessing by standart
    textArr = textDetector.predict(zones)
    textArr = textPostprocessing(textArr, regionNames)

    return textArr, regionNames
Пример #8
0
def detectCarNumber(imgPath: str) -> str:
    """
    :param imgPath:
    :return: number of car
    """
    img = mpimg.imread(imgPath)
    NP = nnet.detect([img])

    cvImgMasks = filters.cv_img_mask(NP)

    arrPoints = rectDetector.detect(cvImgMasks)
    zones = rectDetector.get_cv_zonesBGR(img, arrPoints)

    regionIds, stateIds, _c = optionsDetector.predict(zones)
    regionNames = optionsDetector.getRegionLabels(regionIds)

    # find text with postprocessing by standart
    textArr = textDetector.predict(zones)
    textArr = textPostprocessing(textArr, regionNames)
    return textArr
Пример #9
0
    def detect(self, img_path):
        # Detect numberplate
        img = mpimg.imread(img_path)
        NP = self.nnet.detect([img])

        # Generate image mask.
        cv_img_masks = filters.cv_img_mask(NP)

        # Detect points.
        arrPoints = self.rectDetector.detect(cv_img_masks)
        zones = self.rectDetector.get_cv_zonesBGR(img, arrPoints)

        # find standart
        regionIds, stateIds, countLines = self.optionsDetector.predict(zones)
        regionNames = self.optionsDetector.getRegionLabels(regionIds)

        # find text with postprocessing by standart
        textArr = self.textDetector.predict(zones)
        textArr = textPostprocessing(textArr, regionNames)

        return textArr
def detect_number(img):  # кадр, номер, который должны обнаружить
    cv_img_masks = nnet.detect_mask([img])

    # Detect points.
    arrPoints = rectDetector.detect(cv_img_masks)
    zones = rectDetector.get_cv_zonesBGR(img, arrPoints)
    toShowZones = rectDetector.get_cv_zonesRGB(img, arrPoints)

    # find standart
    regionIds, stateIds, countLines = optionsDetector.predict(zones)
    regionNames = optionsDetector.getRegionLabels(regionIds)

    # find text with postprocessing by standart
    textArr = textDetector.predict(zones)
    textArr = textPostprocessing(textArr, regionNames)

    state = False  # нашли ли номер?
    really_number = False  # может ли номер быть таким?
    zone = ''
    answer_nums = []
    answer_cords = []
    if len(textArr) > 0:
        state = True
        i = 0
        for num in textArr:
            ok = WrongNumbers.check(num)
            if ok:
                really_number = True
                zone = toShowZones[
                    i]  # !Problem! How we should do with zome. I suppose I have one zone
                answer_nums.append(num)
                answer_cords.append(arrPoints[i])
                log.debug(' Found really number: %s' % str(answer_nums))
            i += 1
    if not really_number:
        answer_nums = textArr
        answer_cords = arrPoints

    return state, really_number, answer_nums, answer_cords, zone  # нашли номер, может быть такой номер,
Пример #11
0
def test(dirName, fname):
    img_path = os.path.join(dirName, fname)
    img = cv2.imread(img_path)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    cv_imgs_masks = nnet.detect_mask([img])
    
    for cv_img_masks in cv_imgs_masks:
        #print(np.array(cv_img_masks).shape)
        # Detect points.
        arrPoints = rectDetector.detect(cv_img_masks)

        # cut zones
        zones = rectDetector.get_cv_zonesBGR(img, arrPoints, 64, 295)

        # find standart
        regionIds, stateIds, countLines = optionsDetector.predict(zones)
        regionNames = optionsDetector.getRegionLabels(regionIds)

        # find text with postprocessing by standart  
        textArr = textDetector.predict(zones, regionNames, countLines)
        textArr = textPostprocessing(textArr, regionNames)
        return textArr
        "kz": {
            "for_regions": ["kz"],
            "model_path": "latest"
        },
        "ge": {
            "for_regions": ["ge"],
            "model_path": "latest"
        }
    })

    img_path = '/var/www/1.JPG'
    print(img_path)

    img = mpimg.imread(img_path)
    NP = nnet.detect([img])

    # Generate image mask.
    cv_img_masks = filters.cv_img_mask(NP)

    # Detect points.
    arrPoints = rectDetector.detect(cv_img_masks)
    zones = rectDetector.get_cv_zonesBGR(img, arrPoints)

    # find standart
    regionIds, stateIds, countLines = optionsDetector.predict(zones)
    regionNames = optionsDetector.getRegionLabels(regionIds)

    # find text with postprocessing by standart
    textArr = textDetector.predict(zones)
    textArr = textPostprocessing(textArr, regionNames)
    print(textArr)
Пример #13
0
def detect_on_image():
    global graph

    if request.method == 'POST':
        if 'file' not in request.files:
            flash('No file part')
            return "No file part"

        file = request.files['file']

        if file.filename == '':
            flash('No selected file')
            return "No selected file"

        if file and allowed_file(file.filename):

            filename = secure_filename(file.filename)
            path_to_file = os.path.join('images/', filename)
            file.save(os.path.join(app.config['UPLOAD_FOLDER'], path_to_file))

            img_path = os.path.join(app.config['UPLOAD_FOLDER'], path_to_file)
            img = mpimg.imread(img_path)

            or_img = cv2.imread(img_path)

            img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)
            or_img = cv2.resize(or_img, (0, 0), fx=0.5, fy=0.5)
            with graph.as_default():
                NP = nnet.detect([img])
                cv_img_masks = filters.cv_img_mask(NP)

                arrPoints = rectDetector.detect(cv_img_masks)
                zones = rectDetector.get_cv_zonesBGR(img, arrPoints)
                toShowZones = rectDetector.get_cv_zonesRGB(img, arrPoints)

                filters.draw_box(or_img, arrPoints, (0, 255, 0), 3)

                regionIds, stateIds, countLines = optionsDetector.predict(
                    zones)
                regionNames = optionsDetector.getRegionLabels(regionIds)

                textArr = textDetector.predict(zones)
                textArr = textPostprocessing(textArr, regionNames)

                print('Detected LP : "%s" in region [%s]' %
                      (textArr, regionNames))

                imgs = []

                for zone, points in zip(toShowZones, arrPoints):
                    reversed = cv2.cvtColor(zone, cv2.COLOR_BGR2RGB)
                    encd = cv2.imencode('.jpg', reversed)[1]
                    imgs.append(str(base64.b64encode(encd)))

                # return or_img, imgs[], textAr[]

                or_img = cv2.imencode('.jpg', or_img)[1]
                output_img = str(base64.b64encode(or_img))

            return jsonify(image=output_img,
                           croped=json.dumps(imgs),
                           plates=json.dumps(textArr))
    return "lol"
Пример #14
0
def detect_on_video():
    global graph

    if request.method == 'POST':
        if 'file' not in request.files:
            flash('No file part')
            return "No file part"

        file = request.files['file']

        if file.filename == '':
            flash('No selected file')
            return "No selected file"

        if file and allowed_file_videos(file.filename):
            filename = secure_filename(file.filename)
            path_to_file = os.path.join('videos/', filename)
            file.save(os.path.join(app.config['UPLOAD_FOLDER'], path_to_file))
            cap = cv2.VideoCapture(
                os.path.join(app.config['UPLOAD_FOLDER'], path_to_file))
            # fr_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            # fps = int(cap.get(cv2.CAP_PROP_FPS))
            numbers = {}
            cnt = 0

            while cap.isOpened():
                ret, img = cap.read()
                cnt += 1

                if ret == True:
                    img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)

                    if cnt % 100 == 0:
                        with graph.as_default():
                            NP = nnet.detect([img])

                            cv_img_masks = filters.cv_img_mask(NP)

                            arrPoints = rectDetector.detect(cv_img_masks)
                            zones = rectDetector.get_cv_zonesBGR(
                                img, arrPoints)
                            toShowZones = rectDetector.get_cv_zonesRGB(
                                img, arrPoints)
                            filters.draw_box(img, arrPoints, (0, 255, 0), 3)

                            regionIds, stateIds, countLines = optionsDetector.predict(
                                zones)
                            regionNames = optionsDetector.getRegionLabels(
                                regionIds)

                            textArr = textDetector.predict(zones)
                            textArr = textPostprocessing(textArr, regionNames)

                            imgs = []

                            for zone, points in zip(toShowZones, arrPoints):
                                #reversed = cv2.cvtColor(zone, cv2.COLOR_BGR2RGB)
                                encd = cv2.imencode('.jpg', zone)[1]
                                imgs.append(str(base64.b64encode(encd)))

                            time = round(cap.get(cv2.CAP_PROP_POS_MSEC))
                            numbers[time] = [imgs, textArr]

                            print('Detected LP : "%s" in region [%s]' %
                                  (textArr, regionNames))

                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break

                else:
                    break

            return json.dumps(numbers)
Пример #15
0
def main(ss):
    trigger = datetime.time(hour=10, minute=0, second=0, microsecond=0)

    # specify the path to Mask_RCNN if you placed it outside Nomeroff-net project
    NOMEROFF_NET_DIR = os.path.abspath('../')
    MASK_RCNN_DIR = os.path.join(NOMEROFF_NET_DIR, 'Mask_RCNN')
    MASK_RCNN_LOG_DIR = os.path.join(NOMEROFF_NET_DIR, 'logs')
    MASK_RCNN_MODEL_PATH = os.path.join(
        NOMEROFF_NET_DIR, "models/mask_rcnn_numberplate_0700.h5")
    OPTIONS_MODEL_PATH = os.path.join(
        NOMEROFF_NET_DIR, "models/numberplate_options_2019_03_05.h5")

    # If you use gpu version tensorflow please change model to gpu version named like *-gpu.pb
    mode = "gpu"
    OCR_NP_RU_TEXT = os.path.join(NOMEROFF_NET_DIR,
                                  "models/anpr_ocr_ru_3-{}.h5".format(mode))

    sys.path.append(NOMEROFF_NET_DIR)

    # Import license plate recognition tools.
    from NomeroffNet import filters, RectDetector, TextDetector, OptionsDetector, Detector, textPostprocessing

    # Initialize npdetector with default configuration file.
    nnet = Detector(MASK_RCNN_DIR, MASK_RCNN_LOG_DIR)

    # Load weights in keras format.
    nnet.loadModel(MASK_RCNN_MODEL_PATH)

    # Initialize rect detector with default configuration file.
    rectDetector = RectDetector()

    # Initialize text detector.
    # Also you may use gpu version models.
    textDetector = TextDetector(
        {"ru": {
            "for_regions": ["ru"],
            "model_path": OCR_NP_RU_TEXT
        }})

    # Initialize train detector.
    optionsDetector = OptionsDetector()
    optionsDetector.load(OPTIONS_MODEL_PATH)
    script_dir = os.path.dirname(os.path.realpath(__file__))
    main_directory = os.path.join(script_dir, "data")

    if not os.path.exists(main_directory):
        os.makedirs(main_directory)

    car_snapshot = len(os.listdir(main_directory))
    print(car_snapshot)

    caffe_net_filepath = os.path.join(script_dir,
                                      'MobileNetSSD_deploy.caffemodel')
    caffe_net_filepath = os.path.abspath(caffe_net_filepath)
    proto_filepath = os.path.join(script_dir,
                                  'MobileNetSSD_deploy.prototxt.txt')
    proto_filepath = os.path.abspath(proto_filepath)
    car_net = cv2.dnn.readNetFromCaffe(proto_filepath, caffe_net_filepath)
    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]
    COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
    limit_for_confidence = 0.7
    number_cascade = cv2.CascadeClassifier(
        'haarcascade_russian_plate_number.xml')

    src0 = 'rtsp://...'  # outside camera
    src1 = 'rtsp://...'  # inside camera
    inc_capture = VideoCaptureAsync(src0).start()
    out_capture = VideoCaptureAsync(src1).start()
    time.sleep(3.0)
    application = MyWindow(sock=ss)
    application.show()
    flag_cnt = False
    flag_outside = False
    start_save = True
    clear_log_files = dt.month  # clear logging every month
    cnt = 0
    file = open(script_dir + "/found_numbers.txt", 'a')
    while True:
        # clear history every month
        if dt.month != clear_log_files:
            clear_log_files = dt.month
            with open(script_dir + "/found_numbers.txt", 'w'):
                pass
            for the_file in os.listdir(main_directory):
                file_path = os.path.join(main_directory, the_file)
                try:
                    if os.path.isfile(file_path):
                        os.unlink(file_path)
                except Exception as e:
                    print(e)

        # getting the frame from incoming camera and frame preprocessing
        try:
            _, frame = inc_capture.read()
        except AttributeError:
            print("Incoming camera isn't responding")
            break
        frame = resize(frame, width=720)
        # Crop the ROI
        height_frame, width_frame = frame.shape[:2]
        frame = frame[int(0.3 * height_frame):height_frame, 0:width_frame]

        # if we get the frame in grayscale format (night mode enable) - convert it to bgr
        try:
            frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
        except cv2.error:
            pass

        # getting the frame from outcoming camera and frame preprocessing
        try:
            _, shot = out_capture.read()
        except AttributeError:
            print("Outcoming camera isn't responding")
            break
        shot = resize(shot, width=720)
        # Crop the ROI
        height_shot, width_shot = shot.shape[:2]
        shot = shot[int(0.3 * height_shot):height_shot, 0:width_shot]

        # if we get the frame in grayscale format (night mode enable) - convert it to bgr
        try:
            shot = cv2.cvtColor(shot, cv2.COLOR_GRAY2BGR)
        except cv2.error:
            pass

        copy_frame = frame.copy()
        copy_shot = shot.copy()

        # Pass the blob through the network and obtain the detections and predictions
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                     (300, 300), 127.5)

        car_net.setInput(blob)
        detections = car_net.forward()

        (H, W) = shot.shape[:2]
        blobbed = cv2.dnn.blobFromImage(cv2.resize(shot, (300, 300)), 0.007843,
                                        (300, 300), 127.5)
        car_net.setInput(blobbed)
        discoveries = car_net.forward()
        number_plate = ''
        # analyze incoming camera frame
        for ind in np.arange(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with the predictions
            confidence = detections[0, 0, ind, 2]
            # Filter out weak detections by ensuring the confidence is greater than
            # the minimum confidence
            if confidence > limit_for_confidence:
                # Extract the index of the class labels from detections, then compute
                # the (x, y)-coordinates of the bounding box for the object
                idx = int(detections[0, 0, ind, 1])
                box = detections[0, 0, ind, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # Draw the bounding boxes for everything car
                if CLASSES[idx] == 'car':
                    label = "{}: {:.2f}%".format(CLASSES[idx],
                                                 confidence * 100)
                    car_frame = frame[startY:endY, startX:endX]
                    car_number = car_frame.copy()

                    # looking for a license plate on car frame
                    plaques = []
                    plaques = number_cascade.detectMultiScale(car_number,
                                                              scaleFactor=1.3,
                                                              minNeighbors=4)
                    # if any license plate has been found - try to recognize its
                    if len(plaques) > 0 and flag_cnt is False:
                        start = time.time()
                        try:
                            car_number = resize(car_number, width=1080)
                        except cv2.error:
                            print('cv2 error')
                        except ZeroDivisionError:
                            print('zero division')

                        try:
                            NP = nnet.detect([car_number])
                            # Generate image mask.
                            cv_img_masks = filters.cv_img_mask(NP)
                            # Detect points.
                            arrPoints = rectDetector.detect(cv_img_masks)
                            zones = rectDetector.get_cv_zonesBGR(
                                car_number, arrPoints)

                            # find standart
                            # Added a classifier (isHiddenIds) for determining the fact of hide text of number,
                            # in order not to recognize a deliberately damaged license plate image.
                            regionIds, isHiddenIds = optionsDetector.predict(
                                zones)
                            regionNames = optionsDetector.getRegionLabels(
                                regionIds)

                            # find text with postprocessing by standart
                            textArr = textDetector.predict(zones, regionNames)
                            textArr = textPostprocessing(textArr, regionNames)
                            number_plate = ''.join(textArr)
                            for (xx, yy, ww, hh) in plaques:
                                cv2.rectangle(
                                    copy_frame, (startX + xx, startY + yy),
                                    (startX + xx + ww, startY + yy + hh),
                                    (0, 0, 255), 2)

                            # if license plate has been recognized - compare the number with white numbers
                            if len(number_plate) > 0:
                                print(number_plate)
                                # if not flag_cnt:
                                flag, show = check_whitelist(number_plate)
                                if not flag:
                                    sending_flag = send_message(b'2', ss)
                                    if show is not None:
                                        application.ui.carNumber.setText(
                                            'Номер:' + number_plate)
                                        application.ui.carNumber.adjustSize()
                                    if sending_flag:
                                        application.ui.barrierStatus.setText(
                                            'Шлагбаум: открыт')
                                        application.ui.barrierStatus.adjustSize(
                                        )
                                        flag_cnt = True
                                    else:
                                        application.ui.barrierStatus.setText(
                                            'Команда \nне принята')
                                        application.ui.barrierStatus.adjustSize(
                                        )
                        except ZeroDivisionError:
                            print('zero')
                        except RecursionError:
                            print('Recursion Error')
                        finish = time.time()
                        print(finish - start,
                              'at:',
                              datetime.datetime.now(),
                              sep=' ')
                    y = startY - 15 if startY - 15 > 15 else startY + 15
                    cv2.putText(copy_frame, label, (startX, y),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)

        # analyze outcoming camera frame
        for index in np.arange(0, discoveries.shape[2]):
            # extract the confidence (i.e., probability) associated with the predictions
            confidence = discoveries[0, 0, index, 2]
            # Filter out weak detections by ensuring the confidence is greater than
            # the minimum confidence
            if confidence > limit_for_confidence:
                # Extract the index of the class labels from detections, then compute
                # the (x, y)-coordinates of the bounding box for the object
                idx = int(discoveries[0, 0, index, 1])
                box = discoveries[0, 0, index, 3:7] * np.array([W, H, W, H])
                (startX, startY, endX, endY) = box.astype("int")

                # Draw the bounding boxes for everything car
                if CLASSES[idx] == 'car':
                    # heightShot, widthShot = shot.shape[:2]
                    label = "{}: {:.2f}%".format(CLASSES[idx],
                                                 confidence * 100)
                    car_shot = shot[startY:endY, startX:endX]
                    car_area = car_shot.copy()
                    height_car, width_car = car_area.shape[:2]

                    # cv2.rectangle(frame, (startX, startY), (endX, endY),
                    #              COLORS[idx], 2)
                    y = startY - 15 if startY - 15 > 15 else startY + 15
                    cv2.putText(copy_shot, label, (startX, y),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)

                    if not flag_cnt:
                        if height_car > 0.3 * H or width_car > 0.3 * W:
                            if not flag_outside:
                                sending_flag = send_message(b'2', ss)
                                if sending_flag:
                                    application.ui.outCar.setText(
                                        "Авто на выезде: да")
                                    application.ui.outCar.adjustSize()
                                    application.ui.barrierStatus.setText(
                                        'Шлагбаум: открыт')
                                    application.ui.barrierStatus.adjustSize()
                                    flag_outside = True
                                else:
                                    application.ui.barrierStatus.setText(
                                        'Команда \nне принята')
                                    application.ui.barrierStatus.adjustSize()
                                flag_cnt = True

        if start_save:
            if len(number_plate) > 0:
                number_with_time = str(number_plate) + " at: " + str(dt.now())
                file.write("%s\n" % str(number_with_time))
                car_snapshot += 1
                cv2.imwrite(main_directory + "/" + str(car_snapshot) + ".jpg",
                            frame)
                # time.sleep(5.0)
                print("The frame has been saved")
        cv2.imshow('Incoming', copy_frame)
        cv2.imshow('Outcoming', copy_shot)
        time.sleep(0.2)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        if close_flag:
            break

        # while flag_cnt is True:
        if flag_cnt:
            cnt += 1
            # time.sleep(1.0)
            if cnt > 100:
                sending_flag = send_message(b'1', ss)
                if sending_flag:
                    application.ui.barrierStatus.setText('Шлагбаум: закрыт')
                    application.ui.outCar.setText("Авто на выезде: нет")
                    application.ui.outCar.adjustSize()
                    application.ui.barrierStatus.adjustSize()
                    flag_outside = False
                    flag_cnt = False
                else:
                    application.ui.barrierStatus.setText(
                        'Команда \nне принята')
                    application.ui.barrierStatus.adjustSize()
                cnt = 0

    # release all cameras
    inc_capture.__exit__()
    out_capture.__exit__()
    file.close()
    cv2.destroyAllWindows()