def capture_setting():
    # intialising the key information
    counter = 1
    marker = 1
    distance = 0
    predicted_character_list = []
    predicted_color_list = []
    end = time.time()
    start = time.time()
    config = Settings()
    save = Saving(config.name_of_folder, config.exist_ok)

    if config.capture == "pc":
        if config.testing == "video":
            cap = cv2.VideoCapture(config.media)
        else:
            cap = cv2.VideoCapture(0)
            cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960)  # 800 default
            cap.set(3, 960)  # 800 default
            cap.set(4, 540)  # 800 default
            cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 540)
            cap.set(cv2.CAP_PROP_FPS, 60)

            time.sleep(2)  # allows the camera to start-up
        print('Camera on')
        while True:
            if counter == 1:
                if config.pause:
                    distance = input("Are you Ready?")
            if counter == 1 or end - start < 10:
                end = time.time()
                ret, frame = cap.read()
                if config.Step_camera:
                    cv2.imshow('frame', frame)
                    k = cv2.waitKey(5) & 0xFF
                    if k == 27:
                        break

                color, roi, frame, success = detection(frame, config)

                if success:
                    counter = counter + 1

                    # time that the target has been last seen
                    start = time.time()

                    predicted_character, contour_image, chosen_image = character_recognition.character(color)
                    predicted_color, processed_image = colour_recognition.colour(color)

                    predicted_character_list.append(predicted_character)
                    predicted_color_list.append(predicted_color)

                    if config.save_results:
                        name_of_results = ["color", "roi", "frame","contour_image","processed_image", "chosen_image"]
                        image_results = [color, roi, frame, contour_image, processed_image, chosen_image]
                        for value, data in enumerate(name_of_results):
                            image_name = f"{marker}_{data}_{counter}.jpg"
                            image = image_results[value]
                            if image is not None:
                                save.save_the_image(image_name, image)

                if counter == 8:
                    print("Starting Recognition Thread")
                    common_character = Counter(predicted_character_list).most_common(1)[0][0]
                    common_color = Counter(predicted_color_list).most_common(1)[0][0]
                    solution(counter, marker, common_character, common_color, save.save_dir)
                    predicted_character_list = []
                    predicted_color_list = []

            else:
                print("Starting Recognition Thread")
                common_character = Counter(predicted_character_list).most_common(1)[0][0]
                common_color = Counter(predicted_color_list).most_common(1)[0][0]
                solution(counter, marker, common_character, common_color, save.save_dir)
                predicted_character_list = []
                predicted_color_list = []

    elif config.capture == "pi":
        from picamera.array import PiRGBArray
        from picamera import PiCamera

        camera = PiCamera()
        camera.resolution = (1280, 720)
        camera.brightness = 50  # 50 is default
        camera.framerate = 90
        camera.awb_mode = 'auto'
        camera.shutter_speed = camera.exposure_speed
        cap = PiRGBArray(camera, size=(1280, 720))

        for image in camera.capture_continuous(cap, format="bgr", use_video_port=True):
            #  to start the progress of capture and don't stop unless the counter increases and has surpass 5 seconds
            if counter == 1 or end - start < 10:
                frame = image.array
                end = time.time()

                color, roi, frame, success = detection(frame, config)
                
                if success:
                    counter = counter + 1

                    # time that the target has been last seen
                    start = time.time()

                    predicted_character, contour_image, chosen_image = character_recognition.character(color)
                    predicted_color, processed_image = colour_recognition.colour(color)

                    predicted_character_list.append(predicted_character)
                    predicted_color_list.append(predicted_color)

                    if config.save_results:
                        name_of_results = ["color", "roi", "frame","contour_image","processed_image", "chosen_image"]
                        image_results = [color, roi, frame, contour_image, processed_image, chosen_image]
                        for value, data in enumerate(name_of_results):
                            image_name = f"{marker}_{data}_{counter}.jpg"
                            image = image_results[value]
                            if image is not None:
                                save.save_the_image(image_name, image)

                if counter == 8:
                    print("Starting Recognition Thread")
                    common_character = Counter(predicted_character_list).most_common(1)[0][0]
                    common_color = Counter(predicted_color_list).most_common(1)[0][0]
                    marker, counter = solution(counter, marker, common_character, common_color, save.save_dir)
                    predicted_character_list = []
                    predicted_color_list = []

            else:
                print("Starting Recognition Thread")
                common_character = Counter(predicted_character_list).most_common(1)[0][0]
                common_color = Counter(predicted_color_list).most_common(1)[0][0]
                marker, counter = solution(counter, marker, common_character, common_color, save.save_dir)
                predicted_character_list = []
                predicted_color_list = []
            cap.truncate(0)
                
    elif config.capture == "image":
        cap = [] # to store the names of the images
        data_dir = Path(config.media)

        # the following code interite over the extension that exist within a folder and place them into a single list
        image_count = list(itertools.chain.from_iterable(data_dir.glob(pattern) for pattern in ('*.jpg', '*.png')))
        # image_count = len(list(data_dir.glob('*.jpg')))
        for name in image_count:
                # head, tail = ntpath.split(name)
                filename = Path(name)  # .stem removes the extension and .name grabs the filename with extension
                cap.append(filename)
                test_image = cv2.imread(str(filename))
                marker = Path(name).stem # grabs the name with the extension

                color, roi, frame, success = detection(test_image, config)

                if success:
                    predicted_character, contour_image, chosen_image = character_recognition.character(color)
                    predicted_color, processed_image = colour_recognition.colour(color)

                    _, _ = solution(counter, marker, predicted_character, predicted_color, save.save_dir)

                    if config.save_results:
                        name_of_results = ["color", "roi", "frame","contour_image","processed_image", "chosen_image", color, roi, frame, contour_image, processed_image, chosen_image]
                        for value in range(5):
                            image_name = f"{marker}_{name_of_results[value]}.jpg"
                            image = name_of_results[value + 6]
                            if image is not None:
                                save.save_the_image(image_name, image)

                    print("Detected and saved a target")
        print(f"there is a total image count of {len(image_count)} and frames appended {len(cap)}")