Ejemplo n.º 1
0
def test_add_output_layer():
    model1 = dpp.ClassificationModel()
    model2 = dpp.SemanticSegmentationModel()
    model3 = dpp.CountCeptionModel()
    model1.set_image_dimensions(5, 5, 3)
    model2.set_image_dimensions(5, 5, 3)

    with pytest.raises(RuntimeError):
        model1.add_output_layer(2.5, 3)
    model1.add_input_layer()
    model2.add_input_layer()
    model3.add_input_layer()
    with pytest.raises(TypeError):
        model1.add_output_layer("2")
    with pytest.raises(ValueError):
        model1.add_output_layer(-0.4)
    with pytest.raises(TypeError):
        model1.add_output_layer(2.0, 3.4)
    with pytest.raises(ValueError):
        model1.add_output_layer(2.0, -4)
    with pytest.raises(RuntimeError):
        model2.add_output_layer(
            output_size=3
        )  # Semantic segmentation needed for this runtime error to occur

    model1.add_output_layer(2.5, 3)
    assert isinstance(model1._last_layer(), dpp.layers.fullyConnectedLayer)
    with pytest.warns(Warning):
        model2.add_output_layer(regularization_coefficient=2.0)
    assert isinstance(model2._last_layer(), dpp.layers.convLayer)
    model3.add_output_layer()
    assert isinstance(model3._last_layer(), dpp.layers.inputLayer)
Ejemplo n.º 2
0
    def __init__(self, batch_size=1, image_height=300, image_width=300, image_depth=3):
        """A network which counts flowers in plant images"""

        m_path, _ = os.path.split(__file__)
        checkpoint_path = os.path.join(m_path, 'network_states', self.__dir_name)

        import deepplantphenomics as dpp

        self.model = dpp.CountCeptionModel(debug=True, load_from_saved=checkpoint_path)

        # Define model hyperparameters
        self.model.set_loss_function('l1')
        self.model.set_batch_size(batch_size)
        self.model.set_number_of_threads(4)
        self.model.set_image_dimensions(image_height, image_width, image_depth)

        # Define a model architecture
        self.model.add_input_layer()
        self.model.add_convolutional_layer(filter_dimension=[3, 3, 3, 64],
                                      stride_length=1,
                                      activation_function='lrelu',
                                      padding=self.patch_size,
                                      batch_norm=True,
                                      epsilon=1e-5,
                                      decay=0.9)
        self.model.add_paral_conv_block(filter_dimension_1=[1, 1, 0, 16],
                                   filter_dimension_2=[3, 3, 0, 16])
        self.model.add_paral_conv_block(filter_dimension_1=[1, 1, 0, 16],
                                   filter_dimension_2=[3, 3, 0, 32])
        self.model.add_convolutional_layer(filter_dimension=[14, 14, 0, 16],
                                      stride_length=1,
                                      activation_function='lrelu',
                                      padding=0,
                                      batch_norm=True,
                                      epsilon=1e-5,
                                      decay=0.9)
        self.model.add_paral_conv_block(filter_dimension_1=[1, 1, 0, 112],
                                   filter_dimension_2=[3, 3, 0, 48])
        self.model.add_paral_conv_block(filter_dimension_1=[1, 1, 0, 64],
                                   filter_dimension_2=[3, 3, 0, 32])
        self.model.add_paral_conv_block(filter_dimension_1=[1, 1, 0, 40],
                                   filter_dimension_2=[3, 3, 0, 40])
        self.model.add_paral_conv_block(filter_dimension_1=[1, 1, 0, 32],
                                   filter_dimension_2=[3, 3, 0, 96])
        self.model.add_convolutional_layer(filter_dimension=[18, 18, 0, 32],
                                      stride_length=1,
                                      activation_function='lrelu',
                                      padding=0,
                                      batch_norm=True,
                                      epsilon=1e-5,
                                      decay=0.9)
        self.model.add_convolutional_layer(filter_dimension=[1, 1, 0, 64],
                                      stride_length=1,
                                      activation_function='lrelu',
                                      padding=0,
                                      batch_norm=True,
                                      epsilon=1e-5,
                                      decay=0.9)
        self.model.add_convolutional_layer(filter_dimension=[1, 1, 0, 64],
                                      stride_length=1,
                                      activation_function='lrelu',
                                      padding=0,
                                      batch_norm=True,
                                      epsilon=1e-5,
                                      decay=0.9)
        self.model.add_convolutional_layer(filter_dimension=[1, 1, 0, 1],
                                      stride_length=1,
                                      activation_function='lrelu',
                                      padding=0,
                                      batch_norm=True,
                                      epsilon=1e-5,
                                      decay=0.9)
Ejemplo n.º 3
0
        model.set_patch_size(1.0, 1)
    with pytest.raises(ValueError):
        model.set_patch_size(-1, 1)
    with pytest.raises(TypeError):
        model.set_patch_size(1, 1.0)
    with pytest.raises(ValueError):
        model.set_patch_size(1, -1)


@pytest.mark.parametrize(
    "model,bad_loss,good_loss",
    [(dpp.ClassificationModel(), 'l2', 'softmax cross entropy'),
     (dpp.RegressionModel(), 'softmax cross entropy', 'l2'),
     (dpp.SemanticSegmentationModel(), 'l2', 'sigmoid cross entropy'),
     (dpp.ObjectDetectionModel(), 'l2', 'yolo'),
     (dpp.CountCeptionModel(), 'l2', 'l1'),
     (dpp.HeatmapObjectCountingModel(), 'l1', 'sigmoid cross entropy')])
def test_set_loss_function(model, bad_loss, good_loss):
    with pytest.raises(TypeError):
        model.set_loss_function(0)
    with pytest.raises(ValueError):
        model.set_loss_function(bad_loss)
    model.set_loss_function(good_loss)


def test_set_yolo_parameters():
    model = dpp.ObjectDetectionModel()
    with pytest.raises(RuntimeError):
        model.set_yolo_parameters()
    model.set_image_dimensions(448, 448, 3)
    model.set_yolo_parameters()
#
# Used to train the Count-ception model
#

import deepplantphenomics as dpp

model = dpp.CountCeptionModel(debug=True,
                              load_from_saved=False,
                              save_checkpoints=False,
                              report_rate=20)

patch_size = 32

# Setup and hyperparameters
model.set_loss_function('l1')
model.set_batch_size(2)
model.set_number_of_threads(4)
model.set_image_dimensions(300, 300, 3)

model.set_test_split(0.3)
model.set_validation_split(0.2)
model.set_learning_rate(0.0001)
model.set_weight_initializer('xavier')
model.set_maximum_training_epochs(10)

# Load images and ground truth from a pickle file
model.load_countception_dataset_from_pkl_file('MBM-dataset.pkl')

# Define a model architecture
model.use_predefined_model("countception")