示例#1
0
def background(queue_foreground_to_background, queue_background_to_foreground,
               queue_stdout):
    # Redirect the stdout into own class that sends it into queue given in the constructor.
    sys.stdout = StreamRedirect(queue_stdout)

    while True:  # Endless loop.
        # Get the message from GUI with the name to the image (or a message indicating the end of the program).
        gui_input_image_path, retrain_flag = queue_foreground_to_background.get(
        )
        if gui_input_image_path != 'End.':  # 'End.' is sent when the GUI is closed.
            try:
                # Get the image name from the given path.
                input_image_name = gui_input_image_path.split('/')[-1]

                # Picture segmentation.
                construct_output(
                    indent_level=-1,
                    message="Input image: {}\n".format(input_image_name))

                # Transform it into OS depended path.
                input_image_path = os.path.abspath(
                    os.path.join(str(Path(__file__).parent.parent),
                                 'resources', 'input_images'))
                input_image_path = os.path.join(input_image_path,
                                                input_image_name)

                # Process the images to get the individual elements.
                process_image(input_image_path)

                # Convolutional network analysis.
                names, durations = conv_network_analysis(
                    input_image_name, retrain_flag=retrain_flag)

                # Generate the results format for the midi constructor.
                results = generate_results(input_image_name, names, durations)
                # Construct the midi.
                construct_midi(results, input_image_name)

                # Send the information about the successful operation.
                queue_background_to_foreground.put(
                    ('Success.', input_image_name))
            except:
                # Catch errors.
                queue_background_to_foreground.put(
                    ('ERROR! Check logs file.', 'None.'))
        else:
            break
示例#2
0
def process_image(input_image_path):
    """
    Main function for image processing.
    Calls on module for row splitting (first), and then module for individual elements extraction(second).
    :param input_image_path: Path to the image that needs to be processed.
    """
    img_name = input_image_path[input_image_path.rfind('\\') +
                                1:]  # Extract image name from the given path.
    construct_output(
        indent_level="block",
        message="Processing the input image ({}).".format(img_name))

    split_into_rows(input_image_path)  # Firstly, extract rows.
    extract_elements_by_template_matching(
        img_name)  # Then, extract elements from those rows.

    construct_output(indent_level="block",
                     message="Input image processing done.")
def conv_network_analysis(input_image_name, retrain_flag=False):
    """
    Main function for convolutional network analysis.
    Calls on the network for value recognizing.
    Calls on the network for duration analyzing.
    :param input_image_name: Name of the image that is being analyzed.
    :param retrain_flag: Flag that indicates if the retraining is needed.
    """
    construct_output(
        indent_level="block",
        message=
        "Analyzing the elements of the image ({}) with a convolutional network."
        .format(input_image_name))

    gpus = tf.config.experimental.list_physical_devices('GPU')
    tf.config.experimental.set_memory_growth(gpus[0], True)

    if retrain_flag is True:
        # Import the dataset and split it to training and testing.
        print('RETRAINING THE NETWORKS (this may take a while.)')
        (test_arr,
         test_label), (train_arr,
                       train__label) = prepare_new_data(test_data_percentage=0)
        value_processing_conv_net.train_note_values_conv_net(
            test_arr, test_label, train_arr, train__label)
        duration_processing_conv_net.train_note_duration_conv_net(
            test_arr, test_label, train_arr, train__label)

    # Load the trained data from the disk.
    value_names = value_processing_conv_net.analyze_using_saved_data(
        input_image_name)
    # Load the trained data from the disk.
    durations = duration_processing_conv_net.analyze_using_saved_data(
        input_image_name)

    construct_output(
        indent_level="block",
        message=
        "Done analyzing the elements of the image ({}) with a convolutional network."
        .format(input_image_name))

    return value_names, durations
示例#4
0
def save_elements_standard_size(max_size_templates, img_rgb, image_h, image_w,
                                img_name, row_no, template_names,
                                template_recognized_list):
    """ This function takes the input templates and resizes them to standard dimensions (200 px x 200 px).

    :param max_size_templates: List of templates to be resized.
    :param img_rgb: Original image used to resize templates.
    :param image_h: Original image height.
    :param image_w: Original image width.
    :param img_name: Original image name, used in naming the input_images_individual_elements produced.
    :param row_no: Row number.
    :param template_names: Names of the template files that were found.
    :param template_recognized_list: List that indicates if the templates are recognized just by their name.
    :return: list: A list that contains the position of the slice within the original image.
    """

    dir_name = os.path.join(str(Path(__file__).parent.parent.parent),
                            'resources', 'input_images')

    dir_name = os.path.join(dir_name, 'input_images_individual_elements',
                            img_name[:-4])
    try:  # Try creating a directory.
        construct_output(
            indent_level=1,
            message="Creating a folder for the image elements: {}".format(
                dir_name))
        os.mkdir(dir_name)
    except OSError as e:
        construct_output(indent_level=1,
                         message="Folder already exists: {}".format(dir_name))

    slices = []
    file_reset = False  # Boolean flag to check if a file has been reset when the image of the same name is read.
    for index, el in enumerate(
            max_size_templates):  # Iterate through the images.
        right_coord = el[1]  # Rightmost coordinate of the image.
        left_coord = el[0]  # Leftmost coordinate of the image.
        img_slice = img_rgb[
            0:image_h,
            left_coord:right_coord]  # Get the slice from the original image.

        if abs(right_coord -
               left_coord) < 200:  # If 'X' coordinate is too small...
            rightmost_column = img_rgb[
                0:image_h, right_coord -
                1:right_coord]  # Get the rightmost column, 1px wide.
            leftmost_column = img_rgb[0:image_h, left_coord:left_coord +
                                      1]  # Get the leftmost column, 1px wide.
            while (img_slice.shape[1]
                   ) < 200:  # While the width of the image is not 200 px...
                img_slice = np.concatenate(
                    (img_slice, rightmost_column),
                    axis=1)  # Expand to the right by 1 px.
                img_slice = np.concatenate(
                    (leftmost_column, img_slice),
                    axis=1)  # Expand to the left by 1 px.
            if (img_slice.shape[1]
                ) == 201:  # If the image was expanded too much...
                img_slice = img_slice[0:image_h, 0:
                                      200]  # Crop the image to be 200 px wide.

        elif abs(right_coord -
                 left_coord) >= 200:  # If the 'X' coordinate is too big...
            middle = int(abs(right_coord + left_coord) /
                         2)  # Find the middle of the current slice.
            if (middle - 100) > 0 and (
                    middle + 100) < image_w:  # If expanding is possible...
                img_slice = img_rgb[0:image_h, middle - 100:middle +
                                    100]  # Crop the image to be 200 px wide.

        if img_slice.shape[
                1] == 200:  # If the horizontal expansion was successful...

            if img_slice.shape[0] >= 200:  # If the 'Y' coordinate is too big...
                img_slice = img_slice[
                    20:220,
                    0:img_slice.shape[1]]  # Crop the image to be 200 px tall.

            elif img_slice.shape[
                    0] < 200:  # If the 'Y' coordinate is too small...
                upmost_row = img_rgb[
                    0:1, 0:img_slice.shape[1]]  # Upmost pixel row, 1 px tall.
                downmost_row = img_rgb[
                    image_h - 2:image_h - 1,
                    0:img_slice.shape[1]]  # Downmost pixel row, 1 px tall.
                while (
                        img_slice.shape[0]
                ) < 200:  # While the height of the image is not 200 px...
                    img_slice = np.concatenate((img_slice, upmost_row),
                                               axis=0)  # Expand up by 1 px.
                    img_slice = np.concatenate((downmost_row, img_slice),
                                               axis=0)  # Expand down by 1 px
                if (img_slice.shape[0]
                    ) == 201:  # If the image was expanded too much...
                    img_slice = img_slice[
                        0:200, 0:img_slice.
                        shape[1]]  # Crop the image to be 200 px tall.

        if img_slice.shape[0] == 200 and img_slice.shape[
                1] == 200:  # If the slice's dimensions are the right size...
            slice_name = img_name[:-4] + "_row" + row_no + "_" + "slice" + str(
                index)  # The first part of the name.
            if template_recognized_list[
                    index] is True:  # If the template is recognized by name, add that name.
                slice_name = slice_name + "_" + template_names[
                    index] + ".png"  # Second part of the name.
                # Get the path to the directory that will contain the individual elements that were recognized.
                slice_path = os.path.join(dir_name, "recognized")
                try:  # Make the directory, if it doesn't exist.
                    os.mkdir(slice_path)
                except OSError:
                    pass
                try:
                    file_name = img_name[:-4] + ".txt"  # Name for a file that will contain the recognized elements.
                    # Generate file path
                    file_path = os.path.abspath(
                        os.path.join(str(Path(slice_path)), file_name))
                    # Reset the file if an image with the same name was already read before.
                    if "_row0_" in slice_name and file_reset is False:
                        open(file_path, 'w').close()
                        file_reset = True
                    # Add the new information into the file.
                    construct_output(
                        indent_level=2,
                        message=
                        "Writing the information about a recognized element ({}) into: '{}'"
                        .format(slice_name, file_path))
                    recognized_templates_file = open(file_path, "a")
                    recognized_templates_file.write(slice_name[:-4] + "\n")
                    recognized_templates_file.close()
                except OSError:
                    pass
            else:  # Otherwise, write "UNKNOWN" into the image name.
                slice_name = slice_name + "_UNKNOWN" + ".png"  # Second part of the name that were not recognized.
                slice_path = os.path.join(dir_name, "unrecognized")
                try:  # Make the directory, if it doesn't exist.
                    os.mkdir(slice_path)
                except OSError:
                    pass
                slice_path = os.path.abspath(
                    os.path.join(slice_path,
                                 slice_name))  # Image write location.
                construct_output(
                    indent_level=2,
                    message="Saving element: '{}' into: '{}'".format(
                        slice_name, slice_path))
                cv2.imwrite(slice_path, img_slice)  # Write the image.

            slices.append(img_slice)  # Append the positions.

    return slices
示例#5
0
def extract_elements_by_template_matching(img_name):
    """
    Main function for element extraction that calls on all the sub-functions.
    :param img_name: Name of the input image from which image rows where extracted.
    """
    img_location = os.path.join(str(Path(__file__).parent.parent.parent),
                                'resources', 'input_images')
    img_location = os.path.join(img_location, 'input_images_rows',
                                img_name[:-4])
    rows_numerated = [
        row for row in os.listdir(img_location) if row.startswith("row")
    ]

    construct_output(indent_level=0,
                     message="Finding individual elements in the saved rows.")
    construct_output(indent_level=1, message="Reading extracted rows.")
    for row_number, row_img in enumerate(rows_numerated):
        construct_output(indent_level=2,
                         message="Reading row number {}.".format(row_number))
        img_rgb = cv2.imread(img_location + "/" + row_img)  # Read the image.
        img_gray = cv2.cvtColor(
            img_rgb, cv2.COLOR_BGR2GRAY)  # Convert it into a gray image.
        image_w, image_h = img_gray.shape[::-1]  # Image dimensions.

        # Construct the path to the templates.
        template_path = os.path.abspath(
            os.path.join(str(Path(__file__).parent.parent.parent), 'resources',
                         'templates'))
        # List all the templates that are not within the 'line_start_templates' subdirectory.
        template_names = [
            t for t in os.listdir(template_path)
            if not str(t).startswith("line_start_templates")
        ]

        # (1) Find templates using template matching.
        # Use the 'match_templates' function to get the locations(list),dimensions(list).
        # Also, get list of booleans on whether the templates are recognized by their names (such as clef_g),
        # or if they need to be processed by a conv. network.
        # Replace the values in 'template_names' with names of the found templates
        t_loc, t_dim, t_recognized_list, found_t_names = match_templates(
            template_names, template_path, img_gray)

        # (2) Get the start and end coordinates of the templates.
        construct_output(
            indent_level=2,
            message="Matching the row elements with the templates.")
        templates_start_end = [(x[0], x[0] + t_dim[index][0])
                               for index, x in enumerate(t_loc)]

        # (3) Save the images in the standard size (200x200 px). Return value only used for visualisation.
        construct_output(
            indent_level=2,
            message="Saving found elements in the row {}.".format(row_number))
        slices = save_elements_standard_size(templates_start_end, img_rgb,
                                             image_h, image_w, img_name,
                                             str(row_number), found_t_names,
                                             t_recognized_list)
        construct_output(
            indent_level=0,
            message="Finding individual elements in the saved rows done.")
def construct_midi(results, img_name):
    """
    This function constructs and saves the midi file.
    :param results: Individual element names and their classifications.
    :param img_name: Input image name.
    """

    construct_output(indent_level="block",
                     message="Creating the '.midi' file.".format(img_name))

    # Create  MIDI object
    midi_file = MIDIFile(1)  # Only 1 track.
    track = 0  # Track number.
    time = 0  # Start at the beginning.

    # Add  properties to the midi file.
    midi_file.addTrackName(track, time, img_name)
    midi_file.addTempo(track, time, 120)

    # Convert note names to their numerical values (i.e. C4 -> 60)
    results = match_notes_to_midi_values(results)

    time_signature = "4/4"  # Default time signature

    max_bar_length = 4  # Length of one bar.
    current_bar_length = 0  # Length of the current bar.
    channel = 0  # Channel number (only one channel used).
    volume = 100  # Volume will be set to 100.
    time = 0  # Start on beat 0.

    for result in results:  # Iterate through all the classified images.
        result_name = result[0]  # Get the image name.
        pitch = -1  # Set the default pitch to be negative (not allowed, error check).
        note_duration = -1  # Set the default note value to be negative (not allowed, error check).

        if "clef" in result_name:  # Check the clef type (not important for now).
            pass  # TODO
        # Check the time signature (not important right now).
        elif "time_signature_4-4" in result_name:
            time_signature = "4/4"
            max_bar_length = 4  # Change the maximum bar length accordingly.
        elif "time_signature_2-4" in result_name:
            time_signature = "2/4"
            max_bar_length = 2  # Change the maximum bar length accordingly.
        elif "time_signature_3-4" in result_name:
            time_signature = "3/4"
            max_bar_length = 3  # Change the maximum bar length accordingly.
        elif "UNKNOWN" in result_name:
            # Notes that were classified are marked as "UNKNOWN".
            is_note = False  # Check is it a note (not used right now).

            # Set the real duration of the current note.
            if result[2] == "1/16":
                note_duration = 1 / 4
            elif result[2] == "1/8":
                note_duration = 1 / 2
            elif result[2] == "1/4":
                note_duration = 1
            elif result[2] == "1/2":
                note_duration = 2
            elif result[2] == "1/1":
                note_duration = 4

            note_pitch = result[1]  # Get the note pitch.
            if note_duration != -1 and note_pitch in range(
                    47, 82):  # Check if the data is correct.
                current_bar_length = current_bar_length + note_duration  # Update current bar length.
                # Add the note to the midi file.
                midi_file.addNote(track, channel, note_pitch, time,
                                  note_duration, volume)
                time = time + note_duration  # Update the timer.

        # Check if there are enough notes in the bar.
        elif "barline" in result_name:
            if current_bar_length < max_bar_length:
                # If notes are missing, add a rest to the bar, so that the rest if correctly aligned.
                duration = max_bar_length - current_bar_length
                if duration > 0:  # Check if the current bar duration is greater then maximum bar duration.
                    time = time + duration  # If it isn't, add a rest to the bar.
            current_bar_length = 0  # If a barline is reached, reset the current bar length.

    # Construct the path the the location where the generated midi files will be saved.
    midi_path = os.path.abspath(
        os.path.join(str(Path(__file__).parent.parent.parent), 'results'))
    midi_path = os.path.join(midi_path, img_name[:-4] + ".mid")
    with open(midi_path, 'wb') as out_file:
        # Save the midi file.
        midi_file.writeFile(out_file)

    construct_output(
        indent_level="block",
        message="Creating the '.midi' file done.".format(img_name))
示例#7
0
def train_note_duration_conv_net(test_data_arr, test_data_label, train_data_arr, train_data_label):
    """
    This function trains the convolutional network for recognizing note durations based on input data.
    Tutorial for this code found here:
    https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/keras/classification.ipynb
    The results are saved on a disk so that they can be used without retraining the network.
    :param train_data_label: Labels with names and durations for the train data images.
    :param train_data_arr: Array containing the train images.
    :param test_data_label: Labels with names and durations for the test data images.
    :param test_data_arr: Array containing the test images.
    """

    gpus = tf.config.experimental.list_physical_devices('GPU')
    tf.config.experimental.set_memory_growth(gpus[0], True)

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # Alleged fix for some tensorflow bugs.

    construct_output(indent_level=0,
                     message="Convolutional Network 1 (Note duration determining).")

    # Scale these values to a range of 0 to 1 before feeding them to the convolutional network model
    print("Scaling test values to [0-1] range.")
    test_data_arr = test_data_arr / 255.0
    print("Scaling train values to [0-1] range (this will take a while).")
    train_data_arr = train_data_arr / 255.0
    #
    # Construct the path for saving the results of training.
    saved_model_duration_path = os.path.abspath(os.path.join(str(Path(__file__).parent.parent.parent), 'resources'))
    saved_model_duration_path = os.path.join(saved_model_duration_path, 'saved_models')
    saved_model_name = "duration_processing_net_saved.ckpt"
    saved_model_duration_path = os.path.join(saved_model_duration_path, saved_model_name)
    duration_model_cb = tf.keras.callbacks.ModelCheckpoint(filepath=saved_model_duration_path,
                                                           save_weights_only=True,
                                                           verbose=1)

    # Second network recognizes the duration.
    duration_network_train_data_arr = np.array(
        [x for i, x in enumerate(train_data_arr) if train_data_label[i][0][1] != "Uncategorized"])
    duration_network_train_data_label = np.array([(x[0][1], x[1]) for x in train_data_label
                                                  if x[0][1] != "Uncategorized"])

    duration_network_test_data_arr = np.array(
        [x for i, x in enumerate(test_data_arr) if test_data_label[i][0][1] != "Uncategorized"])
    duration_network_test_data_label = np.array([(x[0][1], x[1]) for x in test_data_label
                                                 if x[0][1] != "Uncategorized"])

    # class_names contains possible results
    class_names = ["1/1", "1/2", "1/4", "1/8", "1/16"]

    # Fetch only the labels (note durations) from the data.
    duration_network_train_data_label = [item[0] for item in duration_network_train_data_label]
    # Assign the corresponding numerical values to labels.
    duration_train_label_values_numerical = values_to_numerical(duration_network_train_data_label, class_names)

    with tf.device('/GPU:1'):  # Specify using nvidia discrete GPU instead of Intel integrated graphics.
        construct_output(indent_level=0, message="Start training.")
        # Set up the layers.
        # The first layer in this network, tf.keras.layers.Flatten, transforms the format of the images
        # from a 2D array(200x200px) to 1D array(of 200x200 = 40000 pixels)
        # After  the pixels are flattened, the network consists of a sequence of two tf.keras.layers.Dense layers.
        # These are densely connected, or fully connected, neural layers.
        # The first Dense layer has 128 nodes( or neurons).
        # The second( and last) layer returns an array with length of 22.
        # Each node contains a score that indicates the current image belongs to one of the 22 classes.
        model = tf.keras.Sequential([
            tf.keras.layers.Flatten(input_shape=(200, 200)),
            tf.keras.layers.Dense(128, activation='relu'),
            tf.keras.layers.Dense(5)
        ])

        # Before the model is ready for training, it needs a few more settings.
        # These are added during the model's compile step:
        # Loss function —This measures how accurate the model is during training.
        # You want to minimize this function to "steer" the model in the right direction.
        # Optimizer —This is how the model is updated based on the data it sees and its loss function.
        # Metrics —Used to monitor the training and testing steps.
        # The following example uses accuracy, the fraction of the images that are correctly classified.
        model.compile(optimizer='adam',
                      loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
                      metrics=['accuracy'])

        # Training the convolutional network model requires the following steps:
        # Feed the training data to the model.
        # In this example, the training data is in the train_images and train_labels arrays.
        # The model learns to associate images and labels.
        # You ask the model to make predictions about a test set—in this example, the test_images array.
        # Verify that the predictions match the labels from the test_labels array.
        model.fit(
            duration_network_train_data_arr,
            duration_train_label_values_numerical,
            epochs=3,
            callbacks=[duration_model_cb]
        )
        construct_output(indent_level=0, message="Save the network weights to avoid retraining on every run.")

        # Attach a softmax layer to convert the logits to probabilities, which are easier to interpret.
        probability_model = tf.keras.Sequential([model, tf.keras.layers.Softmax()])

        # TESTING THE NETWORK. =======================================================================================
        # Compare how the model performs on the test dataset.
        # duration_network_test_data_label = [item[0] for item in duration_network_test_data_label]
        # duration_network_test_data_label_values_numerical = values_to_numerical(
        #     duration_network_test_data_label,
        #     class_names)
        # test_loss, test_acc = model.evaluate(duration_network_test_data_arr,
        #                                      duration_network_test_data_label_values_numerical,
        #                                      verbose=2
        #                                      )
        # print('\nTest accuracy:', test_acc)
        # predictions = probability_model.predict(duration_network_test_data_arr)
        # print(predictions[0])
        # print("max= ", np.argmax(predictions[0]))
        # import cv2
        # cv2.imshow("img", duration_network_test_data_arr[0])
        # cv2.waitKey()

        construct_output(indent_level=0, message="End training.")
        construct_output(indent_level=0, message="Convolutional Network 1 (Note duration determining) Done.")
示例#8
0
def generate_results(img_name, note_names, note_durations):
    """
    This function matches the results with file names and sorts them so that it matches the original
    order (from the original image).
    :param img_name: Name of the input image.
    :param note_names: Values of note.
    :param note_durations: Note durations.
    :return: list: List containing the order and description of the elements on the image.
    """
    construct_output(
        indent_level="block",
        message=
        "Matching the elements of the image ({}) with the values given by the networks."
        .format(img_name))

    results = []  # A list that will be returned.

    # Path that contains the information about the original image.
    image_path = os.path.abspath(
        os.path.join(str(Path(__file__).parent.parent.parent), 'resources',
                     'input_images'))
    image_path = os.path.join(image_path, 'input_images_individual_elements')
    image_path = os.path.join(image_path, img_name[:-4])

    # Find all the unrecognized elements and their names.
    unrecognized_elements_path = os.path.join(image_path, "unrecognized")
    unrecognized_elements = os.listdir(unrecognized_elements_path)

    results_and_positions = []
    for el in unrecognized_elements:
        row_number = el[(el.index("_row") + len("_row")):(el.index("_slice"))]
        el_number = el[(el.index("slice") + len("slice")):]
        el_number = el_number[:el_number.index("_")]
        results_and_positions.append((el, row_number, el_number))
    results_and_positions.sort(key=lambda k: (int(k[1]), int(k[2])))
    if len(note_names) != len(note_durations) != len(unrecognized_elements):
        print("generator.py = Not enough elements were recognized!")

    for index, el in enumerate(results_and_positions):
        el = el[0]
        if el.endswith(".png"):
            results.append((el[len("img02_"):-len(".png")], note_names[index],
                            note_durations[index]))

    # Find all the recognized elements and their names.
    recognized_elements_path = os.path.join(image_path, "recognized",
                                            img_name[:-4] + ".txt")

    with open(recognized_elements_path, 'r') as file:
        recognized_elements = file.readlines()
    if len(recognized_elements) == 0:
        print("generator.py = Error in reading image file info!")
        exit(-1)
    for index, content in enumerate(recognized_elements):
        if content.endswith("\n"):
            recognized_elements[index] = content[:-1]
    for index, el in enumerate(recognized_elements):
        el_name = el[(el.find("template_") + len("template_")):el.find(".png")]
        if el_name.startswith("Z_barline"):
            el_name = "barline"
        results.append((el[len("img02_"):-len(".png")], el_name, "-"))

    # Join the individual elements with their descriptions.
    results_and_positions = []
    for result in results:
        x = result[0]
        row_number = x[(x.index("row") + len("row")):x.index("_slice")]
        el_number = x[(x.index("slice") + len("slice")):]
        el_number = el_number[:el_number.index("_")]
        results_and_positions.append((result, row_number, el_number))

    # Sort the elements with the order found in the original image.
    results_and_positions.sort(key=lambda k: (int(k[1]), int(k[2])))

    results = []
    for x in results_and_positions:
        results.append(x[0])

    construct_output(indent_level="block",
                     message="Results generated.".format(img_name))

    return results
def split_into_rows(img_path):
    """
    This function splits the input image into separate rows of note lines.
    :param img_path: Path to the image.
    :return: boolean: True if successful, false otherwise.
    """
    try:
        construct_output(indent_level=0, message="Row splitting.")
        img_name = img_path[img_path.rfind('\\') +
                            1:]  # Extract image name from the given path.
        # Directory name for the directory that will hold the rows of the input image.
        dir_name = os.path.join(str(Path(__file__).parent.parent.parent),
                                'resources', 'input_images')
        dir_name = os.path.join(dir_name, 'input_images_rows', img_name[:-4])
        print("2 ", dir_name)
        try:  # Try creating a directory.
            construct_output(
                indent_level=1,
                message="Creating a folder for the image: {}".format(dir_name))
            os.mkdir(dir_name)
        except OSError as e:
            construct_output(
                indent_level=1,
                message="Folder already exists: {}".format(dir_name))

        construct_output(
            indent_level=1,
            message="Reading the input image {}.".format(img_name))
        img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)  # Read the image.
        trans_mask = img[:, :, 3] == 0  # Remove any transparency.
        img[trans_mask] = [255, 255, 255, 255]
        img_gray = cv2.cvtColor(
            img, cv2.COLOR_BGR2GRAY)  # Convert to BR2GRAY (grayscale mode).
        # Make a black and white image based on a threshold.
        th, img_gray = cv2.threshold(img_gray, 127, 255,
                                     cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
        image_w, image_h = img_gray.shape[::-1]  # Image dimensions.

        template_path = os.path.abspath(
            os.path.join(str(Path(__file__).parent.parent.parent), 'resources',
                         'templates', 'line_start_templates'))
        row_templates = [file for file in os.listdir(template_path)]

        construct_output(indent_level=1, message="Finding rows in the image.")
        t_locations = [
        ]  # List that will contain all the locations that were found by template matching.
        t_dimensions = [
        ]  # List that will contain all the dimensions of the found templates on the locations.
        for t in row_templates:  # Iterate through all of the vertical line templates.
            template = cv2.imread(template_path + "\\" + t,
                                  0)  # Read the template from the path.
            res = cv2.matchTemplate(
                img_gray, template, cv2.TM_CCOEFF_NORMED
            )  # Convert the template into a gray image.
            threshold = 0.80  # The threshold to determine whether a part of an image is similar enough to the template.
            locations = np.where(
                res >= threshold
            )  # Locations in the image where the template matching found results.
            template_w, template_h = template.shape[::
                                                    -1]  # Dimensions of the current template.
            # list(zip(*locations[::-1])) -> Match the 'x' and 'y' coordinates into a tuple them and save into a list.
            # Iterate through locations to remove elements already found by previous templates.
            for point in list(zip(*locations[::-1])):
                if len(
                        t_locations
                ) == 0:  # Save the first template matching results without checking.
                    t_locations.append(point)
                    t_dimensions.append(
                        (template_w,
                         template_h))  # Also save the template dimensions.
                else:  # Check if 'v_line_locations' already contains the new point +/- 6 px, don't add if true.
                    if not np.intersect1d(
                            list(zip(*t_locations))[1],
                            list(range(point[1] - 6, point[1] + 6))):
                        t_locations.append(point)
                        t_dimensions.append((template_w, template_h))

        construct_output(
            indent_level=1,
            message="Saving the found rows into folder: {}".format(dir_name))
        for index, el in enumerate(
                t_locations):  # Iterate through found locations.
            img_slice_name_and_path = dir_name + "/row" + str(
                index) + ".png"  # Generate a path and a name.
            img_slice = img_gray[el[1] - 40:el[1] + t_dimensions[index][1] +
                                 40:, 0:image_w]  # Cut the part of the img.
            cv2.imwrite(img_slice_name_and_path,
                        img_slice)  # Save that part of the image.
    except Exception as e:  # Catch exception.
        print(e)
        exit(-1)  # If any exceptions caught, return False.
    construct_output(indent_level=0, message="Row splitting done.")