Example #1
0
def main():
    args = parser()
    check_arguments_errors(args)

    random.seed(3)  # deterministic bbox colors
    network, class_names, class_colors = darknet.load_network(
        args.config_file,
        args.data_file,
        args.weights,
        batch_size=args.batch_size)

    images = load_images(args.input)

    index = 0
    while True:
        # loop asking for new image paths if no list is given
        if args.input:
            if index >= len(images):
                break
            image_name = images[index]
        else:
            image_name = input("Enter Image Path: ")
        prev_time = time.time()
        image, detections = image_detection(image_name, network, class_names,
                                            class_colors, args.thresh)
        if args.save_labels:
            save_annotations(image_name, image, detections, class_names)
        darknet.print_detections(detections, args.ext_output)
        fps = int(1 / (time.time() - prev_time))
        print("FPS: {}".format(fps))
        if not args.dont_show:
            cv2.imshow('Inference', image)
            if cv2.waitKey() & 0xFF == ord('q'):
                break
        index += 1
Example #2
0
def inference(darknet_image_queue, detections_queue, fps_queue):
    """
    inference the captures into the darknet.
    function is also in charge of the printing/writing (fps, caputre time, detections).
    """
    # results log
    logname = args.export_logname
    """ OPTION: """
    # each time will open a new txt file
    logname_split = args.export_logname.rsplit(".", 1)
    index = 0
    while 1:
        # name_<index>.txt
        logname = logname_split[0] + '_' + str(index) + '.' + logname_split[1]
        # file not exists
        if not os.path.isfile(logname):
            break
        # trying next index
        index += 1
        """ OPTION: END """
    f = open(logname, "w")
    enter_time_queue = [0, 0, 0]
    exit_time_queue = [1, 1, 1]
    while cap.isOpened():
        # get new image from queue
        darknet_image = darknet_image_queue.get()
        # sample entering time
        prev_time = time.time()
        enter_time_queue.pop(0)
        enter_time_queue.append(prev_time)
        # detect image (inference image in neural network)
        detections = darknet.detect_image(network,
                                          class_names,
                                          darknet_image,
                                          thresh=args.thresh)
        # store result in queue
        detections_queue.put(detections)
        # calculate fps of passing image
        fps = float(1 / (time.time() - prev_time))
        exit_time_queue.pop(0)
        exit_time_queue.append(time.time())
        # store fps in queue
        fps_queue.put(int(fps))
        # calculate the average fps of 3 last frame (just to follow up)
        fps_list = [
            1. / (m - n) for m, n in zip(exit_time_queue, enter_time_queue)
        ]
        print("Average FPS over last 3 frames is: {:.2f}".format(
            mean(fps_list)))
        # store capture time to file (in ms, for ground station)
        f.write("time: {}\n".format(str(round(capture_time_queue.get() *
                                              1000))))
        # store bbox to file
        height_ratio = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) / height
        width_ratio = cap.get(cv2.CAP_PROP_FRAME_WIDTH) / width
        darknet.print_detections(detections, height_ratio, width_ratio, f)
        f.write("\n")
    cap.release()
    f.close()
    print("\nFinished successfully, results: {}".format(logname))
Example #3
0
def inference(darknet_image_queue, detections_queue, fps_queue):
    while cap.isOpened():
        darknet_image = darknet_image_queue.get()
        prev_time = time.time()
        detections = darknet.detect_image(network, class_names, darknet_image, thresh=args.thresh)
        detections_queue.put(detections)
        fps = int(1/(time.time() - prev_time))
        fps_queue.put(fps)
        print("FPS: {}".format(fps))
        darknet.print_detections(detections, args.ext_output)
    cap.release()
Example #4
0
def main():
    args = parser()
    check_arguments_errors(args)

    random.seed(3)  # deterministic bbox colors
    t = time.time()
    network, class_names, class_colors = darknet.load_network(
        args.config_file,
        args.data_file,
        args.weights,
        batch_size=args.batch_size)
    t1 = time.time()
    print("Load weight takes: {}s".format(t1 - t))
    image_names = load_images(args.input)
    output_dir = args.output_dir
    if len(image_names) > 1:
        print("Detecting for {} images".format(len(image_names)))
        for i in range(len(image_names)):
            image_name = image_names[i]
            image, detections = image_detection(image_name, network,
                                                class_names, class_colors,
                                                args.thresh)
            expression = convert_detections_to_expression(detections)
            print(expression)

            save_annotations(image_name, image, detections, class_names,
                             output_dir)

    elif len(image_names) == 1:
        image_name = image_names[0]
        image, detections = image_detection(image_name, network, class_names,
                                            class_colors, args.thresh)

        if args.save_labels:
            save_annotations(image_name, image, detections, class_names,
                             output_dir)
        darknet.print_detections(detections, args.ext_output)
        if len(image_names) < 2 and not args.dont_show:
            cv2.imshow('Inference', image)
            cv2.waitKey()

        expression = convert_detections_to_expression(detections)
        expression = normalize_expression(expression)
        print(expression)

        roots = solve.parse_and_solve_and_round(expression, 0.00001)
        print(roots)

        latex = convert_infix_to_latex(expression)

        print(latex)

    t2 = time.time()
    print("Detection takes: {}s".format(t2 - t1))
 def inference(self):
     while self.cap.isOpened():
         darknet_image = self.darknet_image_queue.get()
         prev_time = time.time()
         detections = darknet.detect_image(self.network, self.class_names, self.darknet_image, thresh=self.args.thresh)
         self.detections_queue.put(detections)
         fps = int(1/(time.time() - prev_time))
         self.fps_queue.put(fps)
         print("FPS: {}".format(fps))
         #印出good,bad,none
         darknet.print_detections(detections, self.args.ext_output)
     self.cap.release()
Example #6
0
    def DetectObjects(self):
        if self.frame is None:
            self.detections = None
            return
        frameRGB = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
        frameResized = cv2.resize(frameRGB,
                                  (self.darknetWidth, self.darknetHeight),
                                  interpolation=cv2.INTER_LINEAR)

        darknet.copy_image_from_bytes(self.darknetImage,
                                      frameResized.tobytes())
        self.detections = darknet.detect_image(self.network,
                                               self.classNames,
                                               self.darknetImage,
                                               thresh=self.args.thresh)
        darknet.print_detections(self.detections, self.args.ext_output)
Example #7
0
def inference(darknet_image_queue, detections_queue, fps_queue):
    nth_frame = 0
    while cap.isOpened():
        darknet_image = darknet_image_queue.get()
        prev_time = time.time()
        detections = darknet.detect_image(network,
                                          class_names,
                                          darknet_image,
                                          thresh=args.thresh)
        original_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        original_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        #print("ow, oh", original_width, original_height)
        x_scale = original_width / width
        y_scale = original_height / height
        if len(detections) != 0:
            temp_detections = list()
            for i in range(len(detections)):
                x_detection = detections[i][2][0] * x_scale
                y_detection = detections[i][2][1] * y_scale
                w_detection = detections[i][2][2] * x_scale
                h_detection = detections[i][2][3] * y_scale
                temp_detections.append([
                    detections[i][0], detections[i][1],
                    (x_detection, y_detection, w_detection, h_detection)
                ])
            detections = temp_detections

        if args.output_prefix is not None and len(detections) != 0:
            new_file = open(args.output_prefix + str(nth_frame) + ".txt",
                            mode='w')
            for i in range(len(detections)):
                x = detections[i][2][0] / cap.get(cv2.CAP_PROP_FRAME_WIDTH)
                y = detections[i][2][1] / cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
                w = detections[i][2][2] / cap.get(cv2.CAP_PROP_FRAME_WIDTH)
                h = detections[i][2][3] / cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
                label = detections[i][0]
                label = class_names.index(label)
                new_file.write("{} {:.4f} {:.4f} {:.4f} {:.4f}\n".format(
                    label, x, y, w, h))
            new_file.close()
        detections_queue.put(detections)
        fps = int(1 / (time.time() - prev_time))
        fps_queue.put(fps)
        print("FPS: {}".format(fps))
        darknet.print_detections(detections, args.ext_output)
        nth_frame += 1
    cap.release()
Example #8
0
def inference(darknet_image_queue, detections_queue, fps_queue):
    while cap.isOpened():
        darknet_image = darknet_image_queue.get()
        prev_time = time.time()
        detections = darknet.detect_image(network, class_names, darknet_image, thresh=args.thresh)
        detections_queue.put(detections)
        fps = int(1/(time.time() - prev_time))
        fps_queue.put(fps)
        print("FPS: {}".format(fps))
        darknet.print_detections(detections, args.ext_output)
    cap.release()
    if class_names == "fresh":

        fresh = 0

    if class_names == "ripe":

        ripe = 0

    if class_names == "raw":

        raw = 0

    if class_names == "flowering":

        flowering = 0

    if class_names == "alternaria":

        alternaria = 0

    if class_names == "cedar":

        cedar = 0

    if class_names == "fire-blight":

        fire_blight = 0

    if class_names == "leaf-roller":

        leaf_roller = 0

    if class_names == "fungal":

        fungal = 0
Example #9
0
def main():
    pub = rospy.Publisher("/yolo_traffic")  #WS_ADDED
    args = parser()
    check_arguments_errors(args)
    random.seed(3)  # deterministic bbox colors
    network, class_names, class_colors = darknet.load_network(
        args.config_file,
        args.data_file,
        args.weights,
        batch_size=args.batch_size)

    images = load_images(args.input)

    index = 0
    while True:
        # loop asking for new image paths if no list is given
        if args.input:
            if index >= len(images):
                break
            image_name = images[index]
        else:
            image_name = input("Enter Image Path: ")
        prev_time = time.time()
        image, detections = image_detection(image_name, network, class_names,
                                            class_colors, args.thresh)
        # Call Yolo publishing ftn

        rt = publish_yolo(detections)  #WS_ADDED
        if rt != 0:
            pub.publish(rt)  #WS_ADDED
        else:
            ROS_INFO("TRAFFIC LIGHT UNDETECTED")
        if args.save_labels:
            save_annotations(image_name, image, detections, class_names)
        darknet.print_detections(detections, args.ext_output)
        fps = int(1 / (time.time() - prev_time))
        print("FPS: {}".format(fps))
        if not args.dont_show:
            cv2.imshow('Inference', image)
            if cv2.waitKey() & 0xFF == ord('q'):
                break
        index += 1
Example #10
0
def get_most_confident_bbox(class_name):
    # First run through Yolo to check if
    image_original, detections = darknet_images.image_detection(
        image_name, network, class_names, class_colors, thresh)
    darknet.print_detections(detections, ext_output)
    # take the detection that had the most confidence and write the coordinates to a txt file
    #detections is a list of tuples (class, confidence,(coordinates))
    picked_class = -1
    for i in range(len(detections)):
        _tuple = detections[i]
        if _tuple[0] == class_name:
            coordinates = _tuple[2]
            with open("coordinates.txt", "w+") as coord_file:
                # we convert the coordinates to relative because that's how it is used in the C files
                coordinates_rel = darknet_images.convert2relative(
                    image_original, coordinates)
                for _coord in coordinates_rel:
                    coord_file.write(str(_coord) + "\n")
                picked_class = i
            break
    return picked_class, detections
Example #11
0
def inference(darknet_image_queue, detections_queue, fps_queue, time_queue,
              time_list, pedestrain_frame):
    while cap.isOpened():
        detect = []
        if (not darknet_image_queue.empty()) and (not time_queue.empty()):
            darknet_image = darknet_image_queue.get(False)
            timestamp = time_queue.get(False)

            prev_time = time.time()
            if (darknet_image is not None) and (timestamp is not None):
                detections = darknet.detect_image(network,
                                                  class_names,
                                                  darknet_image,
                                                  timestamp,
                                                  thresh=args.thresh)
            """
            for label, confidence, bbox, timestamp in detect_people:
                pedestrain_frame.put(timestamp+clip_t*1000)
                time_list.append(timestamp)
                detect.append((label,confidence,bbox))
            detect_people_queue.put(detect, timeout =4)
            """
            if len(detections):
                for label, confidence, bbox, times in detections:
                    if pedestrain_frame.empty():
                        pedestrain_frame.put(times + clip_t * 1000,
                                             block=False)
                    time_list.append(times)
                    detect.append((label, confidence, bbox))
            if detections_queue.empty():
                detections_queue.put(detect, block=False)

            fps = int(1 / (time.time() - prev_time))
            if fps_queue.empty():
                fps_queue.put(fps, block=False)
            print("FPS: {}".format(fps))
            darknet.print_detections(detections, args.ext_output)
    cap.release()
Example #12
0
def inference(darknet_image_queue, detections_queue, fps_queue):
    while cap.isOpened():
        darknet_image = darknet_image_queue.get()
        prev_time = time.time()
        detections = darknet.detect_image(network,
                                          class_names,
                                          darknet_image,
                                          thresh=args.thresh)
        detections_queue.put(detections)

        ## Code for fire detection
        ## detections is a list, fire is at position [0][0]
        ##
        if len(detections) > 0:  ## IF fire is found in image, trigger alarm
            print("\nFIRE FOUND\n")
            #os.system("python3 sendText.py")

        fps = int(1 / (time.time() - prev_time))
        fps_queue.put(fps)
        print("FPS: {}".format(fps))
        darknet.print_detections(detections, args.ext_output)
        darknet.free_image(darknet_image)
    cap.release()
    if not ret: break

    # Calculate for FPS
    t_prev = time.time()

    # Fix image format
    frame_rgb = cv2.cvtColor( frame, cv2.COLOR_BGR2RGB)
    frame_resized = cv2.resize( frame_rgb, (width, height))
    
    # convert to darknet format, save to " darknet_image "
    darknet_image = darknet.make_image(width, height, 3)
    darknet.copy_image_from_bytes(darknet_image, frame_resized.tobytes())    
    
    # inference
    detections = darknet.detect_image(network, class_names, darknet_image, thresh=thre)
    darknet.print_detections(detections, show_coordinates)
    darknet.free_image(darknet_image)

    # draw bounding box
    image = darknet.draw_boxes(detections, frame_resized, class_colors)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    # Show Image and FPS
    fps = int(1/(time.time()-t_prev))
    cv2.rectangle(image, (5, 5), (75, 25), (0,0,0), -1)
    cv2.putText(image, f'FPS {fps}', (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    cv2.imshow(win_title, cv2.resize(image, show_size))

    # press 'q' to quit
    if cv2.waitKey(1) == ord('q'):
        break
Example #14
0
def main():
    # read user's input
    args = parser()
    print("publish to ", args.topic)
    # initialize darknet
    network, class_names, class_colors = darknet.load_network(args.config_file,
                                                              args.data_file,
                                                              args.weights,
                                                              batch_size=1)
    # initialize mqtt client and connect to broker
    client = mqtt.Client()
    client.connect("localhost")

    # read input, webcam or video file
    input_path = str2int(args.input)
    cap = cv2.VideoCapture(input_path)

    # if using webcam, set video fps, width and height
    if type(input_path) == int:
        cap.set(cv2.CAP_PROP_FPS, 1)
        cap.set(3, 416)
        cap.set(4, 416)

    # get video properties
    fps = cap.get(cv2.CAP_PROP_FPS)
    w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    # record when to take photo
    # take a shot every # seconds
    seconds = 3
    multiplier = int(fps * seconds)

    # set saved video format and properties
    if args.out_filename is not None:
        video = set_saved_video(cap, args.out_filename, (w, h), 1 / seconds)
    if args.out_img is not None:
        image_folder = args.out_img + "/" + datetime.datetime.now(
            datetime.timezone.utc).astimezone().isoformat(
                "T", "seconds").replace(':', '_')
        if not os.path.exists(image_folder):
            os.makedirs(image_folder)

    # Init sql
    if args.out_db is not None:
        conn = sqlite3.connect(
            args.out_db +
            datetime.datetime.now(datetime.timezone.utc).astimezone(
            ).isoformat("T", "seconds").replace(':', '_') + '.sqlite')
        cur = conn.cursor()
        cur.execute(
            'CREATE TABLE inference_result (frameID INTEGER, unique_id TEXT, time TEXT, Walk_stand INTEGER, Car INTEGER, Van INTEGER, Bus INTEGER, Motorcycle INTEGER, Riding_bike INTEGER, Children INTEGER, Skateboarder INTEGER, Queuing INTEGER, Sit INTEGER, Truck INTEGER, Riding_scooter INTEGER)'
        )
        conn.commit()

    count = 0
    while cap.isOpened():
        count += 1
        ret, frame = cap.read()

        if not ret:
            break
        frameID = count
        if frameID % multiplier == 0:
            if args.vflip:
                frame = cv2.flip(frame, 0)
            if args.hflip:
                frame = cv2.flip(frame, 1)
            anno_image, detections = image_detection(frame, network,
                                                     class_names, class_colors,
                                                     .25)
            darknet.print_detections(detections, args.ext_output)

            # format result
            unique_id = str(uuid.uuid4())
            re = count_result(detections, class_names, unique_id)
            # send result to mqtt
            client.publish(args.topic, json.dumps(re))

            # Store result to database

            if args.out_db is not None:
                # save result to database
                cur.execute(
                    'INSERT INTO inference_result (frameID, unique_id, time, Walk_stand, Car, Van, Bus, Motorcycle, Riding_bike, Children, Skateboarder, Queuing, Sit, Truck, Riding_scooter) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
                    [
                        frameID, unique_id, re['sampleTime'],
                        re["count" + class_names[0].title().replace(" ", "")],
                        re["count" + class_names[1].title().replace(" ", "")],
                        re["count" + class_names[2].title().replace(" ", "")],
                        re["count" + class_names[3].title().replace(" ", "")],
                        re["count" + class_names[4].title().replace(" ", "")],
                        re["count" + class_names[5].title().replace(" ", "")],
                        re["count" + class_names[6].title().replace(" ", "")],
                        re["count" + class_names[7].title().replace(" ", "")],
                        re["count" + class_names[8].title().replace(" ", "")],
                        re["count" + class_names[9].title().replace(" ", "")],
                        re["count" + class_names[10].title().replace(" ", "")],
                        re["count" + class_names[11].title().replace(" ", "")]
                    ])
                conn.commit()

            # show image
            if not args.dont_show:
                cv2.imshow('Inference', frame)

            # write video to file
            if args.out_filename is not None:
                if args.save_infer:
                    anno_image = cv2.resize(anno_image, (w, h),
                                            interpolation=cv2.INTER_LINEAR)
                    video.write(anno_image)
                else:
                    video.write(frame)

            # write images to folder
            if args.out_img is not None:
                cv2.imwrite(
                    image_folder + "/" + str(
                        round(
                            datetime.datetime.now(datetime.timezone.utc).
                            astimezone().timestamp())) + '_' + unique_id +
                    ".jpg", frame)
                #cv2.imwrite(image_folder + "/" + unique_id + ".jpg", frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    video.release()
    cv2.destroyAllWindows()
Example #15
0
    index = 0
    while True:
        # loop asking for new image paths if no list is given
        if args.input:
            if index >= len(images):
                break
            image_name = images[index]
        else:
            image_name = input("Enter Image Path: ")
        prev_time = time.time()
        image, detections = image_detection(
            image_name, network, class_names, class_colors, args.thresh
            )
        if args.save_labels:
            save_annotations(image_name, image, detections, class_names)
        darknet.print_detections(detections, args.ext_output)
        fps = int(1/(time.time() - prev_time))
        print("FPS: {}".format(fps))
        if not args.dont_show:
            cv2.imshow('Inference', image)
<<<<<<< HEAD
            cv2.waitKey(20000)
=======
            if cv2.waitKey() & 0xFF == ord('q'):
                break
>>>>>>> 1c76abd428ec9957d72b6fe3ec705929f4af4d85
        index += 1


if __name__ == "__main__":
    # unconmment next line for an example of batch processing
Example #16
0
    def process(self, ss):
        """
        index = 0
        while True:
            prev_time = time.time()
            ss = ImageGrab.grab((0, 0, 1500, 900))
            img_np = np.array(ss)
            im_rgb = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
            cv2.imwrite("temp.jpg", im_rgb)
            # loop asking for new image paths if no list is given
            image, detections = self.image_detection(
                "temp.jpg", self.network, self.class_names, self.class_colors, self.args.thresh
                )
            if self.args.save_labels:
                self.save_annotations(image_name, image, detections, class_names)
            darknet.print_detections(detections, self.args.ext_output)
            print(detections)
            trafficSign = TrafficSign()
            if detections != []:
                for x in detections:
                    if x[0] == "ileriSol":
                        trafficSign.trafficSignArray[2] = 1
                        cv2.imshow("ileriSol", image[int(x[2][3]):int(x[2][0]), int(x[2][2]):int(x[2][1])])
            else:
                trafficSign.trafficSignArray[2] = 0
            trafficSign.printingAllSigns()

            fps = int(1/(time.time() - prev_time))
            print("FPS: {}".format(fps))
            if not self.args.dont_show:
                cv2.imshow('Inference', image)
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    break
            index += 1

        """
        #ss = ImageGrab.grab((0, 0, 1500, 900))
        img_np = np.array(ss)
        im_rgb = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
        cv2.imwrite("temp.jpg", im_rgb)
        # loop asking for new image paths if no list is given
        image, detections = self.image_detection("temp.jpg", self.network,
                                                 self.class_names,
                                                 self.class_colors,
                                                 self.args.thresh)
        if self.args.save_labels:
            self.save_annotations(image_name, image, detections, class_names)
        darknet.print_detections(detections, self.args.ext_output)
        #print(detections)
        #return detections
        if detections != []:
            for x in detections:
                if x[0] == "park":
                    self.trafficSign.trafficSignArray[0] = 1
                else:
                    self.trafficSign.trafficSignArray[0] = 0
                if x[0] == "parkYasak":
                    self.trafficSign.trafficSignArray[1] = 1
                else:
                    self.trafficSign.trafficSignArray[1] = 0
                if x[0] == "durak":
                    self.trafficSign.trafficSignArray[2] = 1
                else:
                    self.trafficSign.trafficSignArray[2] = 0
                if x[0] == "sol":
                    self.trafficSign.trafficSignArray[3] = 1
                else:
                    self.trafficSign.trafficSignArray[3] = 0
                if x[0] == "sag":
                    self.trafficSign.trafficSignArray[4] = 1
                else:
                    self.trafficSign.trafficSignArray[4] = 0
                if x[0] == "solaDonulmez":
                    self.trafficSign.trafficSignArray[5] = 1
                else:
                    self.trafficSign.trafficSignArray[5] = 0
                if x[0] == "sagaDonulmez":
                    self.trafficSign.trafficSignArray[6] = 1
                else:
                    self.trafficSign.trafficSignArray[6] = 0
                if x[0] == "ileriSol":
                    self.trafficSign.trafficSignArray[7] = 1
                else:
                    self.trafficSign.trafficSignArray[7] = 0
                if x[0] == "ileriSag":
                    self.trafficSign.trafficSignArray[8] = 1
                else:
                    self.trafficSign.trafficSignArray[8] = 0
                if x[0] == "dur":
                    self.trafficSign.trafficSignArray[9] = 1
                else:
                    self.trafficSign.trafficSignArray[9] = 0
                if x[0] == "30":
                    self.trafficSign.trafficSignArray[10] = 1
                else:
                    self.trafficSign.trafficSignArray[10] = 0
                if x[0] == "20":
                    self.trafficSign.trafficSignArray[11] = 1
                else:
                    self.trafficSign.trafficSignArray[11] = 0
                if x[0] == "yesil":
                    self.trafficSign.trafficSignArray[12] = 1
                if x[0] == "kirmizi":
                    self.trafficSign.trafficSignArray[12] = 0
                if x[0] == "girisYok":
                    self.trafficSign.trafficSignArray[13] = 1
                else:
                    self.trafficSign.trafficSignArray[13] = 0
                if x[0] == "tasitTrafigineKapali":
                    self.trafficSign.trafficSignArray[14] = 1
                else:
                    self.trafficSign.trafficSignArray[14] = 0
        else:
            self.trafficSign.trafficSignArray[0] = 0
            self.trafficSign.trafficSignArray[1] = 0
            self.trafficSign.trafficSignArray[2] = 0
            self.trafficSign.trafficSignArray[3] = 0
            self.trafficSign.trafficSignArray[4] = 0
            self.trafficSign.trafficSignArray[5] = 0
            self.trafficSign.trafficSignArray[6] = 0
            self.trafficSign.trafficSignArray[7] = 0
            self.trafficSign.trafficSignArray[8] = 0
            self.trafficSign.trafficSignArray[9] = 0
            self.trafficSign.trafficSignArray[10] = 0
            self.trafficSign.trafficSignArray[11] = 0
            self.trafficSign.trafficSignArray[12] = -1
            self.trafficSign.trafficSignArray[13] = 0
            self.trafficSign.trafficSignArray[14] = 0

            #cv2.imshow("ileriSol", image[int(x[2][3]):int(x[2][0]), int(x[2][2]):int(x[2][1])])
            #cv2.imshow("ileriSol", image[int(x[2][3]):int(x[2][0]), int(x[2][2]):int(x[2][1])])
        self.trafficSign.printingAllSigns()
        if not self.args.dont_show:
            cv2.imshow('Inference', image)