def test_add_output_layer(): model1 = dpp.ClassificationModel() model2 = dpp.SemanticSegmentationModel() model3 = dpp.CountCeptionModel() model1.set_image_dimensions(5, 5, 3) model2.set_image_dimensions(5, 5, 3) with pytest.raises(RuntimeError): model1.add_output_layer(2.5, 3) model1.add_input_layer() model2.add_input_layer() model3.add_input_layer() with pytest.raises(TypeError): model1.add_output_layer("2") with pytest.raises(ValueError): model1.add_output_layer(-0.4) with pytest.raises(TypeError): model1.add_output_layer(2.0, 3.4) with pytest.raises(ValueError): model1.add_output_layer(2.0, -4) with pytest.raises(RuntimeError): model2.add_output_layer( output_size=3 ) # Semantic segmentation needed for this runtime error to occur model1.add_output_layer(2.5, 3) assert isinstance(model1._last_layer(), dpp.layers.fullyConnectedLayer) with pytest.warns(Warning): model2.add_output_layer(regularization_coefficient=2.0) assert isinstance(model2._last_layer(), dpp.layers.convLayer) model3.add_output_layer() assert isinstance(model3._last_layer(), dpp.layers.inputLayer)
def __init__(self, batch_size=32): """A network which provides segmentation masks from plant images""" m_path, _ = os.path.split(__file__) checkpoint_path = os.path.join(m_path, 'network_states', self.__dir_name) import deepplantphenomics as dpp self.model = dpp.SemanticSegmentationModel( debug=False, load_from_saved=checkpoint_path) # Define model hyperparameters self.model.set_batch_size(batch_size) self.model.set_number_of_threads(1) self.model.set_image_dimensions(self.img_height, self.img_width, 3) self.model.set_resize_images(True) # Define a model architecture self.model.add_input_layer() self.model.add_convolutional_layer(filter_dimension=[3, 3, 3, 16], stride_length=1, activation_function='relu') self.model.add_convolutional_layer(filter_dimension=[3, 3, 16, 32], stride_length=1, activation_function='relu') self.model.add_convolutional_layer(filter_dimension=[5, 5, 32, 32], stride_length=1, activation_function='relu') self.model.add_output_layer()
def test_set_augmentation_flip_horizontal(): model1 = dpp.RegressionModel() model2 = dpp.SemanticSegmentationModel() with pytest.raises(TypeError): model1.set_augmentation_flip_horizontal("True") with pytest.raises(RuntimeError): model2.set_augmentation_flip_horizontal(True) model1.set_augmentation_flip_horizontal(True)
def test_set_augmentation_crop(): model1 = dpp.RegressionModel() model2 = dpp.SemanticSegmentationModel() with pytest.raises(TypeError): model1.set_augmentation_crop("True", 0.5) with pytest.raises(TypeError): model1.set_augmentation_crop(True, "5") with pytest.raises(ValueError): model1.set_augmentation_crop(False, -1.0) with pytest.raises(RuntimeError): model2.set_augmentation_crop(True) model1.set_augmentation_crop(True)
def test_graph_problem_loss_semantic(): model = dpp.SemanticSegmentationModel() assert model._loss_fn == 'sigmoid cross entropy' assert model._num_seg_class == 2 in_batch_binary = np.array( [[[[1.0], [0.9]], [[0.1], [0.0]]], [[[1.0], [0.0]], [[0.8], [0.2]]]], np.float32) in_label_binary = np.array( [[[[1.0], [0.0]], [[0.0], [1.0]]], [[[1.0], [0.0]], [[0.0], [1.0]]]], np.float32) out_loss_binary = np.array([0.7480, 0.6939], np.float32) # Correct outputs are one-hot encoded but as inputs to softmax; -50 should turn into a small probability and 0 # should turn into a probability close to 1 (i.e. softmax(0, -50, -50) ~= [1, 0, 0]) in_batch_multi = np.array([[[[0.0, -50.0, -50.0], [-50.0, 0.0, -50.0]], [[-50.0, -50.0, 0.0], [-50.0, 0.0, -50.0]]], [[[-50.0, 0.0, -50.0], [-50.0, 0.0, -50.0]], [[-2.0, 0.0, -2.0], [-50.0, -50.0, 0.0]]]], np.float32) in_label_multi = np.array( [[[[0], [1]], [[2], [1]]], [[[1], [1]], [[0], [2]]]], np.int32) out_loss_multi = np.array([0.0000, 0.5599], np.float32) with pytest.raises(RuntimeError): model._loss_fn = 'sigmoid cross entropy' model._num_seg_class = 3 model._graph_problem_loss(in_batch_multi, in_label_multi) with pytest.raises(RuntimeError): model._loss_fn = 'softmax cross entropy' model._num_seg_class = 2 model._graph_problem_loss(in_batch_binary, in_label_binary) model._loss_fn = 'sigmoid cross entropy' model._num_seg_class = 2 with tf.Session() as sess: out_binary_tensor = model._graph_problem_loss(in_batch_binary, in_label_binary) out_binary = sess.run(out_binary_tensor) assert np.all(out_binary.shape == (2, )) assert np.allclose(out_binary, out_loss_binary, atol=0.0001) model._loss_fn = 'softmax cross entropy' model._num_seg_class = 3 with tf.Session() as sess: out_multi_tensor = model._graph_problem_loss(in_batch_multi, in_label_multi) out_multi = sess.run(out_multi_tensor) assert np.all(out_multi.shape == (2, )) assert np.allclose(out_multi, out_loss_multi, atol=0.0001)
def test_set_num_segmentation_classes(): model = dpp.SemanticSegmentationModel() assert model._num_seg_class == 2 assert model._loss_fn == 'sigmoid cross entropy' with pytest.raises(TypeError): model.set_num_segmentation_classes('2') with pytest.raises(ValueError): model.set_num_segmentation_classes(1) model.set_num_segmentation_classes(5) assert model._num_seg_class == 5 assert model._loss_fn == 'softmax cross entropy' model.set_num_segmentation_classes(2) assert model._num_seg_class == 2 assert model._loss_fn == 'sigmoid cross entropy'
def test_forward_pass_residual(): model = dpp.SemanticSegmentationModel() model.set_image_dimensions(50, 50, 1) model.set_batch_size(1) # Set up a small deterministic network with residuals model.add_input_layer() model.add_skip_connection(downsampled=False) model.add_skip_connection(downsampled=False) # Create an input image and its expected output test_im = np.full([50, 50, 1], 0.5, dtype=np.float32) expected_im = np.full([50, 50, 1], 1.0, dtype=np.float32) # Add the layers and get the forward pass model._add_layers_to_graph() out_im = model.forward_pass(test_im) assert out_im.size == expected_im.size assert np.all(out_im == expected_im)
def test_set_augmentation_rotation(): model1 = dpp.RegressionModel() model2 = dpp.SemanticSegmentationModel() # Check the type-checking with pytest.raises(TypeError): model1.set_augmentation_rotation("True") with pytest.raises(TypeError): model1.set_augmentation_rotation(True, crop_borders="False") with pytest.raises(RuntimeError): model2.set_augmentation_rotation(True) # Check that rotation augmentation can be turned on the simple way model1.set_augmentation_rotation(True) assert model1._augmentation_rotate is True assert model1._rotate_crop_borders is False # Check that it can be turned on with a border cropping setting model1.set_augmentation_rotation(False, crop_borders=True) assert model1._augmentation_rotate is False assert model1._rotate_crop_borders is True
def test_set_patch_size(model): with pytest.raises(TypeError): model.set_patch_size(1.0, 1) with pytest.raises(ValueError): model.set_patch_size(-1, 1) with pytest.raises(TypeError): model.set_patch_size(1, 1.0) with pytest.raises(ValueError): model.set_patch_size(1, -1) @pytest.mark.parametrize( "model,bad_loss,good_loss", [(dpp.ClassificationModel(), 'l2', 'softmax cross entropy'), (dpp.RegressionModel(), 'softmax cross entropy', 'l2'), (dpp.SemanticSegmentationModel(), 'l2', 'sigmoid cross entropy'), (dpp.ObjectDetectionModel(), 'l2', 'yolo'), (dpp.CountCeptionModel(), 'l2', 'l1'), (dpp.HeatmapObjectCountingModel(), 'l1', 'sigmoid cross entropy')]) def test_set_loss_function(model, bad_loss, good_loss): with pytest.raises(TypeError): model.set_loss_function(0) with pytest.raises(ValueError): model.set_loss_function(bad_loss) model.set_loss_function(good_loss) def test_set_yolo_parameters(): model = dpp.ObjectDetectionModel() with pytest.raises(RuntimeError): model.set_yolo_parameters()