Beispiel #1
0
def prediction(valid_imgs, sg_model, show_image=SHOW_IMAGES_WITH_PREDICTIONS):
    counter = 1
    n_valid = len(valid_imgs)
    results = np.array([])

    #go through each image one at a time and make a prediction, adding the prediction to the output array
    for img in valid_imgs:
        start_time = time.time()

        new_label = sg_model.predict(np.array([img]))
        results = np.append(results, new_label, axis=None)

        elapsed_time = (time.time() - start_time) * 1000  #ms
        global total_prediction_time
        total_prediction_time += elapsed_time

        msg.timemsg(
            "Image #: {}, Predicted label: {:.4f}, Predicted in: {:.3f}s".
            format(counter, float(new_label), elapsed_time / 1000.0))
        msg.timemsg('Prediction Progress: {:.2f}%'.format(
            float(counter / n_valid) * 100))
        counter += 1

        if show_image:
            # Display image
            plt.imshow(img, interpolation='nearest')
            # Add a title to the plot
            plt.title('Predicted: ' + str(new_label))

    return results
Beispiel #2
0
def renaming_main(img_dir):
    start_time = time.time()
    msg.ini("/home/james/Documents/Seagrass-Repository/Results/output_log.txt")

    rename_loader = FileLoader(img_dir, img_dir)
    to_rename = rename_loader.load_all_from_folder(False)
    to_rename.sort(key=lambda f: int(re.sub('\D', '', f[1])))
    to_rename = np.array([(img[0]) for img in to_rename])

    msg.timemsg("Loaded {} images to be renamed".format(len(to_rename)))
    process_for_saving_just_rename(rename_loader, to_rename, 145)
Beispiel #3
0
def resizing_main():
    save_path = "/home/james/Documents/Seagrass-Repository/Images/Emma_Images/"
    load_path = "/home/james/Documents/Seagrass-Repository/Images/Emma_Images/"
    width = 576
    height = 576

    start = time.time()
    msg.ini("/home/james/Documents/Seagrass-Repository/Results/output_log.txt")

    loader = FileLoader(load_path, save_path)
    to_process = loader.load_all_from_folder(False)
    to_process.sort(key=lambda f: int(re.sub('\D', '', f[1])))

    if (len(to_process) > 0):
        msg.timemsg(
            "Width and Height Parameters set with Height: {} and Width {}".
            format(height, width))
        images = np.array([(img[0]) for img in to_process])

        msg.timemsg("Processing done, resizing and saving")
        resize_and_save(loader, images, width, height)
    else:
        print("Directory empty or loading problem occurred")

    msg.timemsg("Finished executing")
Beispiel #4
0
def main(load_path, save_path):
    start = time.time()
    msg.ini("/home/james/Documents/Seagrass-Repository/Results/output_log.txt")

    loader = FileLoader(load_path, save_path)
    to_process = loader.load_all_from_folder(False)

    loader2 = FileLoader(save_path, save_path)
    check_saved = loader2.load_first()

    if (to_process != []):
        min_height = 576
        min_width = 576

        if (check_saved != []):
            min_width = check_saved.shape[1]
            min_height = check_saved.shape[0]

        msg.timemsg(
            "Width and Height Parameters set with Height: {} and Width {}".
            format(min_height, min_width))
        images = np.array([(img[0]) for img in to_process])
        out = []
        for i in range(len(images)):
            #need to adjust the quadrat cropping slightly
            initial_mask = ImageEditor(images[i]).generate_silver_mask()
            border_vals = find_cropping_coordinates(initial_mask, True)
            squared = initial_square(images[i], border_vals)
            cropped_mask = ImageEditor(squared).generate_silver_mask()
            out.append(
                ImageEditor(squared).crop_quadrat_from_image(cropped_mask))

        msg.timemsg("Processing done, resizing and saving")
        resize_and_save(loader, out, min_width, min_height)
    else:
        print("Directory empty or loading problem occurred")

    msg.timemsg("Finished executing")
Beispiel #5
0
def augmentation_main():
    save_path = "/home/james/Documents/Seagrass-Repository/Images/Formatted_Images/"
    load_path = "/home/james/Documents/Seagrass-Repository/Images/Formatted_Images/"

    start_time = time.time()
    msg.ini("/home/james/Documents/Seagrass-Repository/Results/output_log.txt")

    aug_loader = FileLoader(load_path, save_path)
    to_augment = aug_loader.load_all_from_folder(False)
    to_augment.sort(key=lambda f: int(re.sub('\D', '', f[1])))
    to_augment = np.array([(img[0]) for img in to_augment])

    msg.timemsg("Loaded {} images to be augmented".format(len(to_augment)))

    loader2 = FileLoader(save_path, save_path)
    check_saved = loader2.load_first()

    augmented_images = []
    sharpening_kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
    blurring_kernel = np.ones((5, 5), np.float32) / 25

    # possible data augmentations for each image:
    # a vertical or horizontal flip, or none
    # a change in brightness, contrast or blurring
    # decide which image alteration to perform: sharpen, blur, adjust contrast or none
    # then for that alteration, select the image orientation, vert, hori or none
    # repeat this 4 times until each image alteration

    # each number refers to an operation function to be applied to the image
    operations = [1, 2, 3, 4]
    rand.shuffle(operations)
    # each number refers to which orientation to use as described above
    orientation = [1, 2, 3]
    rand.shuffle(orientation)

    all_combinations = list(itertools.product(operations, orientation))
    all_combinations.remove((4, 3))
    rand.shuffle(all_combinations)

    msg.timemsg("Beginning augmentation of {} images".format(len(to_augment)))

    if len(to_augment) > 0:
        min_height = None
        min_width = None

        if (check_saved is not None):
            min_width = check_saved.shape[1]
            min_height = check_saved.shape[0]

        msg.timemsg(
            "Width and Height Parameters set with Height: {} and Width {}".
            format(min_height, min_width))
        to_perform = all_combinations[0:6]

        for combination in to_perform:
            for i in range(len(to_augment)):
                new_image = to_augment[i]
                editor = ImageEditor(new_image)
                #perform operation picked from random choice
                if combination[0] == 1:
                    g_val = rand.uniform(0.8, 2)
                    new_image = editor.alter_brightness(g_val)
                if combination[0] == 2:
                    new_image = editor.alter_sharpness(sharpening_kernel)
                if combination[0] == 3:
                    new_image = editor.blur_image(blurring_kernel)

                #flip image if chosen to be
                if combination[1] == 1:
                    new_image = editor.flip_image_vert()
                if combination[1] == 2:
                    new_image = editor.flip_image_hor()

                augmented_images.append(np.array(new_image))

        msg.timemsg("Augmentation done, resizing and saving the images")
        resize_and_save(aug_loader, augmented_images, min_width, min_height)
    else:
        print("Directory empty or problem loading images")

    msg.timemsg("Finished executing")
Beispiel #6
0
def train_in_batch(images,
                   labels,
                   cp_path,
                   m_path,
                   results_dir,
                   batch_size=BATCH_SIZE):
    #batches the labels and images
    images_batched = list(batch(images, batch_size))
    labels_batched = list(batch(labels, batch_size))

    #all image shapes are identical so choose the dimensions of the first
    height = rgb_images.shape[1]
    width = rgb_images.shape[2]
    depth = rgb_images.shape[3]
    print("\n")
    msg.timemsg("Training CNN start")

    #create the model
    model = cnn.create_cnn(width, height, depth)
    cnn.give_summary(model)

    #train each batch one at a time, with evaluation performed
    for i in range(len(images_batched)):
        msg.timemsg("Batch {}".format(i))

        #if a proper training set can't be made from the last batch, add the last batch to the one prior
        if len(images_batched[-1]) * TEST_SIZE < 1:
            last_images = images_batched[-1]
            last_labels = labels_batched[-1]
            del images_batched[-1]
            del labels_batched[-1]
            images_batched[-1] = np.append(images_batched[-1],
                                           last_images,
                                           axis=0)
            labels_batched[-1] = np.append(labels_batched[-1],
                                           last_labels,
                                           axis=0)

        #convert the data to be in the range of 0 and 1 for the pixel values
        msg.timemsg("Batch {}: Normalising pixel values".format(i))
        images_batched[i] = images_batched[i].astype('float32')
        images_batched[i] /= 255.0
        msg.timemsg("Batch {}: Normalised pixel values".format(i))

        #split data into training and testing data for that batch
        #also shuffles the data whilst splitting
        msg.timemsg(
            "Batch {}: Shuffling and splitting data for training".format(i))
        train_images, test_images, train_labels, test_labels = train_test_split(
            images_batched[i],
            labels_batched[i],
            test_size=TEST_SIZE,
            random_state=42)

        msg.timemsg(
            "Batch {}: Data shuffled and split, it is ready for usage".format(
                i))
        #input size for input layer is: 576x576 = 331776 neurons in input layer per image colour channel, 331776 * 3 per images

        # will need to train
        # checkpoints will be created during training
        # load from checkpoint if not the first batch and save the entire model at the end of each batch

        if i > 0:
            #if not the first batch, then load the weights from the previous batch and begin training again
            #model = cnn.create_cnn(width, height, depth)
            msg.timemsg("Loading weights for model")
            model = cnn.load_weights_from_disk(model, cp_path)

        msg.timemsg("Batch {}: Training batch".format(i))
        model, history = cnn.train_model(model, train_images, train_labels,
                                         test_images, test_labels, EPOCHS,
                                         cp_path)

        msg.timemsg("Batch {}: Evaluating model".format(i))
        m_s_error, mean_abs_error, = cnn.evaluate_model(
            model, test_images, test_labels)
        #can probably use train mse and test mse in plot training results method
        msg.timemsg("Batch {}: test loss: {:.5f}, test mae: {:.5f}\n\n".format(
            i, m_s_error, mean_abs_error))

    msg.timemsg("Training CNN finished")
    msg.timemsg("Saving model to file")
    cnn.save_model(model, m_path)
    msg.timemsg("Model saved to file")

    #plot mse and mae of final trained model
    #plotter = cnn.create_history_plotter()
    cnn.plot_mse(history, results_dir)
    cnn.plot_mae(history, results_dir)

    return model
Beispiel #7
0
def predict_directory(model_to_load, results):
    format_images = input("Do the images require formatting? ('y' or 'n'): ")
    predict_dir = None

    if format_images == 'y':
        #get directory from user, using file selector
        root = tk.Tk()
        root.withdraw()
        original_folder = filedialog.askdirectory(
            title='Select Folder Of Images to Formated')

        #get directory from user, using file selector
        root = tk.Tk()
        root.withdraw()
        predict_dir = filedialog.askdirectory(
            title='Select Folder Destination Folder')

        msg.timemsg("Loading from: {}".format(original_folder))
        msg.timemsg("Saving to: {}".format(predict_dir))

        msg.timemsg("Formatting the images")
        formatter.main(original_folder, predict_dir + "/")
        msg.timemsg("Images formatted")

    counter = 0
    start_time = time.time()
    #initialise the cnn instance so we have access to it's functionality
    cnn.ini()
    #load the entire model
    model = None
    try:
        model = cnn.load_model_from_disk(model_to_load)
        msg.timemsg("Model loaded for use")
    except ValueError:
        msg.timemsg(
            "No model loaded, check the path is correct or a model has been saved properly"
        )

    if (model is not None):
        if predict_dir is None:
            #get directory from user, maybe using file selector
            root = tk.Tk()
            root.withdraw()
            predict_dir = filedialog.askdirectory(
                title='Select Folder of Formatted Images')

        out_file_name = os.path.join(predict_dir, "Predictions.txt")

        msg.timemsg("Making predictions on images")
        with open(out_file_name, "w") as prediction_file:
            #process each image one at a time
            #Check first with the CheckValidImages class that each file is an image
            #This can be done by running the class in the command line and passing the path of the directory required
            for image in os.scandir(predict_dir):
                name = image.name
                msg.timemsg("Loading file: {}".format(name))

                if image.is_file() and image.name.endswith(
                        '.jpg') or image.name.endswith(
                            '.png') or image.name.endswith('.jpeg'):
                    image_path = os.path.join(predict_dir, image.name)
                    numpy_image = cv2.imread(image_path)
                    numpy_image = cv2.cvtColor(numpy_image, cv2.COLOR_BGR2RGB)
                    numpy_image = numpy_image.astype('float32')
                    numpy_image /= 255.0
                    numpy_image = np.array([numpy_image])

                    msg.timemsg("Making prediction on: {}".format(name))
                    predicted_coverage = model.predict(numpy_image)

                    elapsed_time = (time.time() - start_time) * 1000  #ms
                    global total_prediction_time
                    total_prediction_time += elapsed_time
                    #load the image, normalise the data, make the prediction and then log the prediction
                    print(
                        "Image: {}, Prediction: {:.6f}, Predicted in: {:.3f}s".
                        format(name, float(predicted_coverage * 100),
                               elapsed_time / 100000.0),
                        file=prediction_file)

                    counter += 1
                else:
                    if not (image.is_file() and image.name.endswith(".txt")):
                        msg.timemsg(
                            "Not a valid file, please check the directory contents, exiting"
                        )
                        msg.timemsg(
                            "Ensure the folder path contains no folders that include a space in the name"
                        )
                        sys.exit(0)

        msg.timemsg("\n")
        if counter == 0:
            msg.timemsg("No images were in the folder, please double check")
        else:
            msg.timemsg(
                "All predictions made and stored at: {}".format(out_file_name))

    return counter
Beispiel #8
0
def load_data(config_file):
    #split into batches of 50 for the computer to handle more easily
    msg.timemsg("Loading data from file")
    loader = FileUtil(config_file)
    loader.read_data()

    img_dir = loader.images[0].split("/")
    img_dir = args.root_img_dir + "/" + img_dir[1]

    img_loader = FileLoader(img_dir, args.root_img_dir)
    msg.timemsg("Loading images from folder")

    images = img_loader.load_all_from_folder(False)
    msg.timemsg("Images loaded from folder")

    msg.timemsg("Sorting image order and storing as numpy array")
    images.sort(key=lambda f: int(re.sub(r'\D', '', f[1])))
    images = np.array([(img[0]) for img in images])
    msg.timemsg("Images sorted and stored as numpy array")

    msg.timemsg("Converting images to RGB")
    for img in range(len(images)):
        images[img] = convert_to_rbg(images[img])

    msg.timemsg("Converted images to RGB")

    loaded_labels_arr = np.array(
        [item for sublist in loader.labels for item in sublist])
    msg.timemsg("Loaded labels from file")

    return images, loaded_labels_arr
Beispiel #9
0
        "--skip_training",
        help=
        "determines whether to skip training and load a pre-trained model instead"
    )
    args = parser.parse_args()

    msg.ini(args.results_dir + args.logging_file)

    checkpoint_path = args.results_dir + args.checkpoint_dir
    model_path = args.results_dir + args.model_dir
    #if training not needed, make predictions on formatted images in a given directory
    if (args.skip_training == "1"):
        number_images = predict_directory(model_path, args.results_dir)
        if number_images > 0:
            msg.timemsg(
                "Predictions made on set of {} images, time taken: {:.3f}s".
                format(number_images, float(total_prediction_time / 100000.0)))
            msg.timemsg("Average prediction time of {:.3f}s per image".format(
                float((total_prediction_time / 100000.0) / number_images)))
    else:
        #train the model and generate the evaluation metrics
        rgb_images = np.array([])
        labels_arr = np.array([])

        msg.timemsg("Loading or creating numpy data")

        IMAGE_FILE = "/images.npy"
        LABEL_FILE = "/labels.npy"
        if (args.using_small == "1"):
            IMAGE_FILE = "/images_small.npy"
            LABEL_FILE = "/labels_small.npy"