def test_load_ippn_leaf_count_dataset_from_directory(test_data_dir): # The following tests take the format laid out in the documentation of an example # for training a leaf counter, and leave out key parts to see if the program # throws an appropriate exception, or executes as intended due to using a default setting data_path = os.path.join(test_data_dir, 'test_Ara2013_Canon', '') # forgetting to set image dimensions model = dpp.RegressionModel(debug=False, save_checkpoints=False, report_rate=20) # channels = 3 model.set_batch_size(4) # model.set_image_dimensions(128, 128, channels) model.set_resize_images(True) model.set_num_regression_outputs(1) model.set_test_split(0.1) model.set_weight_initializer('xavier') model.set_maximum_training_epochs(1) model.set_learning_rate(0.0001) with pytest.raises(RuntimeError): model.load_ippn_leaf_count_dataset_from_directory(data_path) # forgetting to set num epochs model = dpp.RegressionModel(debug=False, save_checkpoints=False, report_rate=20) channels = 3 model.set_batch_size(4) model.set_image_dimensions(128, 128, channels) model.set_resize_images(True) model.set_num_regression_outputs(1) model.set_test_split(0.1) model.set_weight_initializer('xavier') # model.set_maximum_training_epochs(1) model.set_learning_rate(0.0001) with pytest.raises(RuntimeError): model.load_ippn_leaf_count_dataset_from_directory(data_path) # the following shouldn't raise any issues since there should be defaults for # batch_size, train_test_split, and learning_rate model = dpp.RegressionModel(debug=False, save_checkpoints=False, report_rate=20) channels = 3 # model.set_batch_size(4) model.set_image_dimensions(128, 128, channels) model.set_resize_images(True) model.set_num_regression_outputs(1) # model.set_train_test_split(0.8) model.set_weight_initializer('xavier') model.set_maximum_training_epochs(1) # model.set_learning_rate(0.0001) model.load_ippn_leaf_count_dataset_from_directory(data_path)
def test_set_num_regression_outputs(): model = dpp.RegressionModel() with pytest.raises(TypeError): model.set_num_regression_outputs(5.0) with pytest.raises(ValueError): model.set_num_regression_outputs(-1)
def test_det_random_set_split(test_data_dir): model = dpp.RegressionModel() data_path = os.path.join(test_data_dir, 'test_Ara2013_Canon', '') model.set_validation_split(0.25) model.set_test_split(0.25) model.set_maximum_training_epochs(1) model.set_image_dimensions(128, 128, 3) model.load_ippn_leaf_count_dataset_from_directory(data_path) def get_random_splits(): with model._graph.as_default(): model.set_random_seed(7) trn_im, trn_lab, _, tst_im, tst_lab, _, val_im, val_lab, _ \ = loaders.split_raw_data(model._raw_image_files, model._raw_labels, model._test_split, model._validation_split, split_labels=True, force_mask_creation=True) return model._session.run( [trn_im, trn_lab, tst_im, tst_lab, val_im, val_lab]) splits_1 = get_random_splits() model._reset_graph() model._reset_session() splits_2 = get_random_splits() assert np.all([np.all(x == y) for x, y in zip(splits_1, splits_2)])
def __init__(self, model_dir, batch_size=9): """A network which predicts rosette leaf count via a convolutional neural net""" self.__dir_name = os.path.join(model_dir) self.model = dpp.RegressionModel(debug=False, load_from_saved=self.__dir_name) # Define model hyperparameters self.model.set_batch_size(batch_size) self.model.set_number_of_threads(1) print(self.img_height, self.img_width) self.model.set_image_dimensions(self.img_height, self.img_width, 3) self.model.set_resize_images(True) #self.model.set_augmentation_crop(True) # Define a model architecture self.model.add_input_layer() self.model.add_convolutional_layer(filter_dimension=[5, 5, 3, 32], stride_length=1, activation_function='tanh') self.model.add_pooling_layer(kernel_size=3, stride_length=2) self.model.add_convolutional_layer(filter_dimension=[5, 5, 32, 64], stride_length=1, activation_function='tanh') self.model.add_pooling_layer(kernel_size=3, stride_length=2) self.model.add_convolutional_layer(filter_dimension=[3, 3, 64, 64], stride_length=1, activation_function='tanh') self.model.add_pooling_layer(kernel_size=3, stride_length=2) self.model.add_convolutional_layer(filter_dimension=[3, 3, 64, 64], stride_length=1, activation_function='tanh') self.model.add_pooling_layer(kernel_size=3, stride_length=2) self.model.add_output_layer()
def test_set_augmentation_flip_horizontal(): model1 = dpp.RegressionModel() model2 = dpp.SemanticSegmentationModel() with pytest.raises(TypeError): model1.set_augmentation_flip_horizontal("True") with pytest.raises(RuntimeError): model2.set_augmentation_flip_horizontal(True) model1.set_augmentation_flip_horizontal(True)
def test_set_augmentation_brightness_and_contrast(): model1 = dpp.RegressionModel() model2 = MockDPPModel() model2._supported_augmentations = [] with pytest.raises(TypeError): model1.set_augmentation_crop("True") with pytest.raises(RuntimeError): model2.set_augmentation_brightness_and_contrast(True) model1.set_augmentation_brightness_and_contrast(True)
def __init__(self, height, width, batch_size=4): """A network which predicts bounding box coordinates via a convolutional neural net""" # Set original image dimensions self.original_img_height = height self.original_img_width = width m_path, _ = os.path.split(__file__) checkpoint_path = os.path.join(m_path, 'network_states', self.__dir_name) import deepplantphenomics as dpp self.model = dpp.RegressionModel(debug=False, load_from_saved=checkpoint_path) # Define model hyperparameters self.model.set_batch_size(batch_size) self.model.set_number_of_threads(1) self.model.set_original_image_dimensions(self.original_img_height, self.original_img_width) self.model.set_image_dimensions(self.img_height, self.img_width, 3) self.model.set_resize_images(True) self.model.set_num_regression_outputs(4) # Define a model architecture self.model.add_input_layer() self.model.add_convolutional_layer(filter_dimension=[5, 5, 3, 16], stride_length=1, activation_function='relu') self.model.add_pooling_layer(kernel_size=3, stride_length=2) self.model.add_convolutional_layer(filter_dimension=[5, 5, 16, 64], stride_length=1, activation_function='relu') self.model.add_pooling_layer(kernel_size=3, stride_length=2) self.model.add_convolutional_layer(filter_dimension=[5, 5, 64, 64], stride_length=1, activation_function='relu') self.model.add_pooling_layer(kernel_size=3, stride_length=2) self.model.add_convolutional_layer(filter_dimension=[5, 5, 64, 64], stride_length=1, activation_function='relu') self.model.add_pooling_layer(kernel_size=3, stride_length=2) self.model.add_fully_connected_layer(output_size=384, activation_function='relu') self.model.add_output_layer()
def test_set_augmentation_crop(): model1 = dpp.RegressionModel() model2 = dpp.SemanticSegmentationModel() with pytest.raises(TypeError): model1.set_augmentation_crop("True", 0.5) with pytest.raises(TypeError): model1.set_augmentation_crop(True, "5") with pytest.raises(ValueError): model1.set_augmentation_crop(False, -1.0) with pytest.raises(RuntimeError): model2.set_augmentation_crop(True) model1.set_augmentation_crop(True)
def __init__(self, batch_size=8): """A network which predicts rosette leaf count via a convolutional neural net""" m_path, _ = os.path.split(__file__) checkpoint_path = os.path.join(m_path, 'network_states', self.__dir_name) import deepplantphenomics as dpp self.model = dpp.RegressionModel(debug=False, load_from_saved=checkpoint_path) # Define model hyperparameters self.model.set_batch_size(batch_size) self.model.set_number_of_threads(1) self.model.set_image_dimensions(self.img_height, self.img_width, 3) self.model.set_resize_images(True) self.model.set_augmentation_crop(True, crop_ratio=0.9) # Define a model architecture self.model.add_input_layer() self.model.add_convolutional_layer(filter_dimension=[5, 5, 3, 32], stride_length=1, activation_function='tanh') self.model.add_pooling_layer(kernel_size=3, stride_length=2) self.model.add_convolutional_layer(filter_dimension=[5, 5, 32, 32], stride_length=1, activation_function='tanh') self.model.add_pooling_layer(kernel_size=3, stride_length=2) self.model.add_convolutional_layer(filter_dimension=[3, 3, 32, 64], stride_length=1, activation_function='tanh') self.model.add_pooling_layer(kernel_size=3, stride_length=2) self.model.add_convolutional_layer(filter_dimension=[3, 3, 64, 64], stride_length=1, activation_function='tanh') self.model.add_pooling_layer(kernel_size=3, stride_length=2) self.model.add_fully_connected_layer(output_size=1024, activation_function='tanh') self.model.add_output_layer()
def test_set_augmentation_rotation(): model1 = dpp.RegressionModel() model2 = dpp.SemanticSegmentationModel() # Check the type-checking with pytest.raises(TypeError): model1.set_augmentation_rotation("True") with pytest.raises(TypeError): model1.set_augmentation_rotation(True, crop_borders="False") with pytest.raises(RuntimeError): model2.set_augmentation_rotation(True) # Check that rotation augmentation can be turned on the simple way model1.set_augmentation_rotation(True) assert model1._augmentation_rotate is True assert model1._rotate_crop_borders is False # Check that it can be turned on with a border cropping setting model1.set_augmentation_rotation(False, crop_borders=True) assert model1._augmentation_rotate is False assert model1._rotate_crop_borders is True
def test_det_random_augmentations(test_data_dir): model = dpp.RegressionModel() data_path = os.path.join(test_data_dir, 'test_Ara2013_Canon', '') model.set_validation_split(0) model.set_test_split(0) model.set_maximum_training_epochs(1) model.set_image_dimensions(128, 128, 3) model.set_resize_images(True) model.set_augmentation_brightness_and_contrast(True) model.set_augmentation_flip_horizontal(True) model.set_augmentation_flip_vertical(True) model.load_ippn_leaf_count_dataset_from_directory(data_path) def get_random_augmentations(): with model._graph.as_default(): model.set_random_seed(7) labels = [' '.join(map(str, label)) for label in model._raw_labels] model._parse_dataset(model._raw_image_files, labels, None, None, None, None, None, None, None) data_iter = model._train_dataset.make_one_shot_iterator().get_next( ) data = [] for _ in range(len(model._raw_image_files)): xy = model._session.run(data_iter) data.append(xy) return data data_1 = get_random_augmentations() model._reset_graph() model._reset_session() data_2 = get_random_augmentations() assert np.all([ np.all(x[0] == y[0]) and x[1] == y[1] for x, y in zip(data_1, data_2) ])
def test_set_patch_size(model): with pytest.raises(TypeError): model.set_patch_size(1.0, 1) with pytest.raises(ValueError): model.set_patch_size(-1, 1) with pytest.raises(TypeError): model.set_patch_size(1, 1.0) with pytest.raises(ValueError): model.set_patch_size(1, -1) @pytest.mark.parametrize( "model,bad_loss,good_loss", [(dpp.ClassificationModel(), 'l2', 'softmax cross entropy'), (dpp.RegressionModel(), 'softmax cross entropy', 'l2'), (dpp.SemanticSegmentationModel(), 'l2', 'sigmoid cross entropy'), (dpp.ObjectDetectionModel(), 'l2', 'yolo'), (dpp.CountCeptionModel(), 'l2', 'l1'), (dpp.HeatmapObjectCountingModel(), 'l1', 'sigmoid cross entropy')]) def test_set_loss_function(model, bad_loss, good_loss): with pytest.raises(TypeError): model.set_loss_function(0) with pytest.raises(ValueError): model.set_loss_function(bad_loss) model.set_loss_function(good_loss) def test_set_yolo_parameters(): model = dpp.ObjectDetectionModel() with pytest.raises(RuntimeError):
# # Used to train the rosette-leaf-regressor model # import deepplantphenomics as dpp model = dpp.RegressionModel(debug=True, save_checkpoints=False, report_rate=20) # 3 channels for colour, 1 channel for greyscale channels = 3 # Setup and hyperparameters model.set_batch_size(4) model.set_number_of_threads(8) model.set_image_dimensions(128, 128, channels) model.set_resize_images(True) model.set_num_regression_outputs(1) model.set_test_split(0.2) model.set_validation_split(0.0) model.set_learning_rate(0.0001) model.set_weight_initializer('xavier') model.set_maximum_training_epochs(500) # Augmentation options model.set_augmentation_brightness_and_contrast(True) model.set_augmentation_flip_horizontal(True) model.set_augmentation_flip_vertical(True) model.set_augmentation_crop(True) # Load all VIS images from a Lemnatec image repository
def train(train_dir, label_fn, model_dir, epoch, lr): """ train_dir: the directory where your training images located label_fn: the file name of labels under train_dir. Just specify the file name don't inclde the path. model_dir: the name of you model. Model results will be save to this dir epoch: specify the epoch. Based on dpp document suggest 100 for plant stress and 500 for counting. lr: specify learnning rate. 0.0001 used in dpp leaf counting example """ model_dir_path = Path(model_dir) if not model_dir_path.exists(): model_dir_path.mkdir() tensorboard_dir_path = model_dir_path / 'tensorboard' img_dir = Path(train_dir) model = dpp.RegressionModel(debug=True, save_checkpoints=True, report_rate=150, tensorboard_dir=str(tensorboard_dir_path), save_dir=str(model_dir_path)) #model.set_batch_size(72) model.set_batch_size(45) #model.set_number_of_threads(10) model.set_number_of_threads(100) model.set_image_dimensions(418, 283, 3) model.set_resize_images(True) model.set_num_regression_outputs(1) model.set_test_split(0.0) model.set_validation_split(0.0) model.set_learning_rate(float(lr)) model.set_weight_initializer('xavier') model.set_maximum_training_epochs(int(epoch)) # Augmentation options model.set_augmentation_brightness_and_contrast(True) model.set_augmentation_flip_horizontal(True) model.set_augmentation_flip_vertical(True) #model.set_augmentation_crop(True) # Load labels and images model.load_multiple_labels_from_csv(img_dir / label_fn, id_column=0) model.load_images_with_ids_from_directory(img_dir) # Define a model architecture model.add_input_layer() model.add_convolutional_layer(filter_dimension=[5, 5, 3, 32], stride_length=1, activation_function='tanh') model.add_pooling_layer(kernel_size=3, stride_length=2) model.add_convolutional_layer(filter_dimension=[5, 5, 32, 64], stride_length=1, activation_function='tanh') model.add_pooling_layer(kernel_size=3, stride_length=2) model.add_convolutional_layer(filter_dimension=[3, 3, 64, 64], stride_length=1, activation_function='tanh') model.add_pooling_layer(kernel_size=3, stride_length=2) model.add_convolutional_layer(filter_dimension=[3, 3, 64, 64], stride_length=1, activation_function='tanh') model.add_pooling_layer(kernel_size=3, stride_length=2) model.add_output_layer() # Begin training the model model.begin_training()