Example #1
0
def svhn_complicated_ensemble_submodel3(input_shape=None, input_tensor=None, n_classes=None,
                                        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a svhn network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Define a weight decay for the regularisation.
    weight_decay = 1e-4

    x = Conv2D(64, (3, 3), padding='same', activation='elu', name='conv1', kernel_regularizer=l2(weight_decay))(inputs)
    x = BatchNormalization(name='batch-norm')(x)
    x = Conv2D(64, (3, 3), padding='same', activation='elu', name='conv2', kernel_regularizer=l2(weight_decay))(x)
    x = MaxPooling2D(pool_size=(2, 2), name='pool')(x)
    x = Dropout(0.3, name='dropout', seed=0)(x)

    # Add top layers.
    x = Flatten(name='flatten')(x)
    outputs = Dense(n_classes, activation='softmax', name='softmax_outputs')(x)

    # Create Submodel 3.
    model = Model(inputs, outputs, name='svhn_complicated_ensemble_submodel3')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #2
0
def cifar100_pyramid_ensemble_submodel_weak2(input_shape=None, input_tensor=None, n_classes=None,
                                             weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar100 network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Convolutions.
    x = Conv2D(128, (3, 3), padding='same', activation='elu', name='conv1')(inputs)
    x = Conv2D(128, (3, 3), padding='same', activation='elu', name='conv2')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='pool')(x)

    # Add top layers.
    x = Flatten(name='flatten')(x)
    outputs = Dense(n_classes, activation='softmax', name='softmax_outputs')(x)

    # Create Submodel 2.
    model = Model(inputs, outputs, name='cifar100_pyramid_ensemble_submodel_weak2')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #3
0
def omniglot_student_strong(n_classes: int,
                            input_shape=None,
                            input_tensor=None,
                            weights_path: Union[None, str] = None) -> Model:
    """
    Defines a omniglot strong student network.

    :param n_classes: the number of classes.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained omniglot tiny network's weights.
    :return: Keras functional Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    x = Conv2D(64, (2, 2),
               activation='relu',
               name='conv1',
               kernel_regularizer=l2(1e-4))(inputs)
    x = BatchNormalization(name='batch-norm')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='pool')(x)
    x = Dropout(0.3, name='dropout', seed=0)(x)

    # Add top layers.
    x = Flatten(name='flatten')(x)
    outputs = Dense(n_classes, activation='softmax', name='softmax_outputs')(x)

    # Create strong student.
    model = Model(inputs, outputs, name='omniglot_student_strong')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #4
0
def svhn_model2(n_classes: int, input_shape=None, input_tensor=None,
                weights_path: Union[None, str] = None) -> Sequential:
    """
    Defines a svhn network.

    :param n_classes: the number of classes.
    We use this parameter even though we know its value,
    in order to be able to use the model in order to predict some of the classes.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras Sequential Model.
    """
    if input_shape is None and input_tensor is None:
        raise ValueError('You need to specify input shape or input tensor for the network.')

    # Create a Sequential model.
    model = Sequential(name='svhn_model2')

    if input_shape is None:
        # Create an InputLayer using the input tensor.
        model.add(InputLayer(input_tensor=input_tensor))

    # Block1
    if input_tensor is None:
        first_conv = Conv2D(32, (3, 3), padding='same', activation='elu', name='block1_conv1', input_shape=input_shape)

    else:
        first_conv = Conv2D(32, (3, 3), padding='same', activation='elu', name='block1_conv1')

    model.add(first_conv)
    model.add(BatchNormalization(name='block1_batch-norm1'))
    model.add(Conv2D(32, (3, 3), padding='same', activation='elu', name='block1_conv2'))
    model.add(BatchNormalization(name='block1_batch-norm2'))
    model.add(MaxPooling2D(pool_size=(2, 2), name='block1_pool'))
    model.add(Dropout(0.2, name='block1_dropout', seed=0))

    # Block2
    model.add(Conv2D(64, (3, 3), padding='same', activation='elu', name='block2_conv1'))
    model.add(BatchNormalization(name='block2_batch-norm1'))
    model.add(Conv2D(64, (3, 3), padding='same', activation='elu', name='block2_conv2'))
    model.add(BatchNormalization(name='block2_batch-norm2'))
    model.add(MaxPooling2D(pool_size=(2, 2), name='block2_pool'))
    model.add(Dropout(0.4, name='block2_dropout', seed=0))

    # Block3
    model.add(Conv2D(256, (3, 3), padding='same', activation='elu', name='block3_conv1'))
    model.add(BatchNormalization(name='block3_batch-norm1'))
    model.add(MaxPooling2D(pool_size=(2, 2), name='block3_pool'))
    model.add(Dropout(0.6, name='block3_dropout', seed=0))

    # Add top layers.
    model.add(Flatten())
    model.add(Dense(n_classes, activation='softmax'))

    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #5
0
def cifar10_pyramid_ensemble_submodel_strong(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar10 network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Block1.
    x = Conv2D(32, (3, 3),
               padding='same',
               activation='elu',
               name='block1_conv1')(inputs)
    x = Conv2D(32, (3, 3),
               padding='same',
               activation='elu',
               name='block1_conv2')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='block1_pool')(x)

    # Block2
    x = Conv2D(64, (3, 3),
               padding='same',
               activation='elu',
               name='block2_conv1')(x)
    x = Conv2D(64, (3, 3),
               padding='same',
               activation='elu',
               name='block2_conv2')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='block2_pool')(x)

    # Block3
    x = BatchNormalization(name='block3_batch-norm')(x)
    x = Conv2D(128, (3, 3),
               padding='same',
               activation='elu',
               name='block3_conv')(x)
    x = Dropout(0.5, name='block3_dropout', seed=0)(x)

    # Add Submodel Strong top layers.
    x = Flatten(name='flatten')(x)
    outputs = Dense(n_classes, activation='softmax', name='softmax_outputs')(x)

    # Create Submodel 1.
    model = Model(inputs,
                  outputs,
                  name='cifar10_pyramid_ensemble_submodel_strong')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #6
0
def caltech_pyramid_ensemble(input_shape=None, input_tensor=None, n_classes=None,
                             weights_path: Union[None, str] = None) -> Model:
    """
    Defines a caltech network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    output_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Submodel Strong.
    submodel_strong = caltech_pyramid_ensemble_submodel_strong(input_shape, input_tensor, n_classes, weights_path)
    outputs_submodel_strong = submodel_strong.output

    # Submodel Weak 1.
    submodel_weak1 = caltech_pyramid_ensemble_submodel_weak1(input_shape, input_tensor, n_classes, weights_path)
    outputs_submodel_weak1 = Dense(61, name='outputs_submodel_weak1')(submodel_weak1.layers[-1])

    # Average the predictions for the first half classes.
    averaged_first_half_classes = Average(name='averaged_first_half_classes')(
        [
            Crop(1, 0, 61)(outputs_submodel_strong),
            outputs_submodel_weak1
        ]
    )

    output_list.append(averaged_first_half_classes)

    # Submodel Weak 2.
    # Block1.
    submodel_weak2 = caltech_pyramid_ensemble_submodel_weak2(input_shape, input_tensor, n_classes, weights_path)
    outputs_submodel_weak2 = Dense(61, name='outputs_submodel_weak2')(submodel_weak2.layers[-1])

    # Average the predictions for the last half classes.
    averaged_last_half_classes = Average(name='averaged_last_half_classes')(
        [
            Crop(1, 61, 102)(outputs_submodel_strong),
            outputs_submodel_weak2
        ]
    )

    output_list.append(averaged_last_half_classes)

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(output_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='caltech_pyramid_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #7
0
def omniglot_pyramid_ensemble(input_shape=None,
                              input_tensor=None,
                              n_classes=None,
                              weights_path: Union[None, str] = None) -> Model:
    """
    Defines a omniglot network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Generate Submodels.
    submodel_strong = omniglot_pyramid_ensemble_submodel_strong(
        input_shape, input_tensor, n_classes, weights_path)
    submodel_weak1 = omniglot_pyramid_ensemble_submodel_weak1(
        input_shape, input_tensor, 3, weights_path)
    submodel_weak2 = omniglot_pyramid_ensemble_submodel_weak2(
        input_shape, input_tensor, 3, weights_path)
    # Get their outputs.
    outputs_submodel_strong = submodel_strong.output
    outputs_submodel_weak1 = submodel_weak1.output
    outputs_submodel_weak2 = submodel_weak2.output

    # Average classes.
    first_classes = Average(name='averaged_first_classes')([
        Crop(1, 0, 812,
             name='first_classes_submodel_strong')(outputs_submodel_strong),
        Crop(1, 0, 812,
             name='first_classes_submodel_weak1')(outputs_submodel_weak1)
    ])

    last_classes = Average(name='averaged_last_classes')([
        Crop(1, 812, 1623,
             name='last_classes_submodel_strong')(outputs_submodel_strong),
        Crop(1, 0, 811,
             name='last_classes_submodel_weak2')(outputs_submodel_weak2)
    ])

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')([first_classes, last_classes])
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='omniglot_pyramid_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #8
0
def cifar10_student_weak(n_classes: int,
                         input_shape=None,
                         input_tensor=None,
                         weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar10 strong student network.

    :param n_classes: the number of classes.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained cifar10 tiny network's weights.
    :return: Keras functional Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Define a weight decay for the regularisation.
    weight_decay = 1e-6

    # Block1.
    x = Conv2D(32, (3, 3),
               padding='same',
               activation='elu',
               name='block1_conv1',
               kernel_regularizer=l2(weight_decay))(inputs)
    x = BatchNormalization(name='block1_batch-norm')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='block1_pool')(x)
    x = Dropout(0.2, name='block1_dropout', seed=0)(x)

    # Block2.
    x = Conv2D(64, (3, 3),
               padding='same',
               activation='elu',
               name='block2_conv1',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name='block2_batch-norm')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='block2_pool')(x)
    x = Dropout(0.3, name='block2_dropout', seed=0)(x)

    # Add top layers.
    x = Flatten()(x)
    x = Dense(n_classes)(x)
    outputs = Activation('softmax', name='softmax')(x)

    # Create model.
    model = Model(inputs, outputs, name='cifar10_student_weak')

    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #9
0
def omniglot_model2(n_classes: int,
                    input_shape=None,
                    input_tensor=None,
                    weights_path: Union[None, str] = None) -> Sequential:
    """
    Defines a omniglot network.

    :param n_classes: the number of classes.
    We use this parameter even though we know its value,
    in order to be able to use the model in order to predict some of the classes.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras Sequential Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Define a weight decay for the regularisation.
    weight_decay = 5e-4

    # Block1.
    x = Conv2D(32, (3, 3),
               padding='same',
               activation='elu',
               name='block1_conv1',
               kernel_regularizer=l2(weight_decay))(inputs)
    x = BatchNormalization(name='block1_batch-norm')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='block1_pool')(x)
    x = Dropout(0.2, name='block1_dropout', seed=0)(x)

    # Block2
    x = Conv2D(64, (3, 3),
               padding='same',
               activation='elu',
               name='block2_conv1',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name='block2_batch-norm')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='block2_pool')(x)
    x = Dropout(0.4, name='block2_dropout', seed=0)(x)

    # Add top layers.
    x = Flatten(name='flatten')(x)
    outputs = Dense(n_classes, activation='softmax', name='softmax_outputs')(x)

    # Create Submodel 1.
    model = Model(inputs, outputs, name='omniglot_model2')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #10
0
def omniglot_complicated_ensemble_submodel1(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a omniglot network.

    :param n_classes: the number of classes.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Define a weight decay for the regularisation.
    weight_decay = 1e-4

    # Block1.
    x = Conv2D(32, (3, 3),
               activation='relu',
               name='block1_conv1',
               kernel_regularizer=l2(weight_decay))(inputs)
    x = BatchNormalization(name='block1_batch-norm')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='block1_pool')(x)
    x = Dropout(0.2, name='block1_dropout', seed=0)(x)

    # Block2
    x = Conv2D(32, (3, 3),
               activation='relu',
               name='block2_conv1',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name='block2_batch-norm')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='block2_pool')(x)
    x = Dropout(0.4, name='block2_dropout', seed=0)(x)

    # Add top layers.
    x = Flatten(name='flatten')(x)
    outputs = Dense(n_classes, activation='softmax', name='softmax_outputs')(x)

    # Create Submodel 1.
    model = Model(inputs,
                  outputs,
                  name='omniglot_complicated_ensemble_submodel1')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #11
0
def caltech_student_weak(n_classes: int,
                         input_shape=None,
                         input_tensor=None,
                         weights_path: Union[None, str] = None) -> Model:
    """
    Defines a caltech strong student network.

    :param n_classes: the number of classes.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained caltech tiny network's weights.
    :return: Keras functional Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Define a weight decay for the regularisation.
    weight_decay = 1e-4

    x = Conv2D(64, (3, 3),
               padding='same',
               activation='relu',
               input_shape=input_shape,
               kernel_regularizer=l2(weight_decay))(inputs)
    x = BatchNormalization()(x)
    x = Dropout(0.3, seed=0)(x)

    x = Conv2D(128, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Dropout(0.4, seed=0)(x)

    x = Flatten()(x)
    x = Dense(512, kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Dropout(0.5, seed=0)(x)
    outputs = Dense(n_classes, activation='softmax', name='softmax_outputs')(x)

    # Create model.
    model = Model(inputs, outputs, name='caltech_student_weak')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #12
0
def cifar100_architectures_diverse_ensemble(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar100_architectures_diverse_ensemble network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Generate Submodels.
    submodel_1 = cifar100_model1(n_classes, input_shape, input_tensor,
                                 weights_path)
    submodel_2 = cifar100_model2(n_classes, input_shape, input_tensor,
                                 weights_path)
    submodel_3 = cifar100_model3(n_classes, input_shape, input_tensor,
                                 weights_path)

    submodel_1._name = 'cifar100_baseline_ensemble_submodel1'
    submodel_2._name = 'cifar100_baseline_ensemble_submodel2'
    submodel_3._name = 'cifar100_baseline_ensemble_submodel3'

    # Get their outputs.
    outputs_submodel1 = submodel_1(inputs)
    outputs_submodel2 = submodel_2(inputs)
    outputs_submodel3 = submodel_3(inputs)

    # Average classes.
    outputs = Average(name='averaged_predictions')(
        [outputs_submodel1, outputs_submodel2, outputs_submodel3])

    # Create model.
    model = Model(inputs,
                  outputs,
                  name='cifar100_architectures_diverse_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #13
0
def caltech_model1(n_classes: int,
                   input_shape=None,
                   input_tensor=None,
                   weights_path: Union[None, str] = None) -> Sequential:
    """
    Defines a caltech network.

    :param n_classes: the number of classes.
    We use this parameter even though we know its value,
    in order to be able to use the model in order to predict some of the classes.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras Sequential Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Define a weight decay for the regularisation.
    weight_decay = 1e-4

    x = Conv2D(64, (3, 3),
               padding='same',
               activation='relu',
               input_shape=input_shape,
               kernel_regularizer=l2(weight_decay))(inputs)
    x = BatchNormalization()(x)
    x = Dropout(0.3)(x)

    x = Conv2D(64, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(128, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(128, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(256, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(256, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(256, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(512, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(512, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Flatten()(x)
    x = Dense(512, kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Dropout(0.5)(x)
    outputs = Dense(n_classes, activation='softmax', name='softmax_outputs')(x)

    # Create model.
    model = Model(inputs, outputs, name='caltech_model1')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #14
0
def cifar100_complicated_ensemble(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar100 network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    output_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Define a weight decay for the regularisation.
    weight_decay = 1e-4

    # Submodel 1.
    # Block1.
    x1 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x1 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = BatchNormalization(name='submodel1_block1_batch-norm')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel1_block1_pool')(x1)

    # Block2
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = BatchNormalization(name='submodel1_block2_batch-norm')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel1_block2_pool')(x1)

    # Block3
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block3_conv1',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block3_conv2',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = BatchNormalization(name='submodel1_block3_batch-norm')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel1_block3_pool')(x1)

    # Add Submodel 1 top layers.
    x1 = Flatten(name='submodel1_flatten')(x1)
    outputs1 = Dense(20, name='submodel1_output')(x1)
    # Crop outputs1 in order to create the first submodel's output.
    outputs_first_submodel = Crop(1, 0, 10,
                                  name='first_ten_classes_submodel')(outputs1)
    output_list.append(outputs_first_submodel)

    # Submodel 2.
    # Block1.
    x2 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel2_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x2 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel2_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x2)
    x2 = BatchNormalization(name='submodel2_block1_batch-norm')(x2)
    x2 = MaxPooling2D(pool_size=(2, 2), name='submodel2_block1_pool')(x2)

    # Block2
    x2 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel2_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x2)
    x2 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel2_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x2)
    x2 = BatchNormalization(name='submodel2_block2_batch-norm')(x2)
    x2 = MaxPooling2D(pool_size=(2, 2), name='submodel2_block2_pool')(x2)

    # Add Submodel 2 top layers.
    x2 = Flatten(name='submodel2_flatten')(x2)
    outputs2 = Dense(30, name='submodel2_output')(x2)

    # Average the predictions for the second class of the first two submodels.
    averaged_classes_20_30 = Average(name='averaged_second_ten_classes')(
        [Crop(1, 10, 20)(outputs1),
         Crop(1, 0, 10)(outputs2)])
    # Crop outputs2 in order to create the third ten classes output.
    outputs_classes_30_40 = Crop(1, 10, 20, name='third_ten_classes')(outputs2)
    # Concatenate classes outputs in order to create the second submodel's output.
    outputs_second_submodel = Concatenate(name='second_submodel')(
        [averaged_classes_20_30, outputs_classes_30_40])
    output_list.append(outputs_second_submodel)

    # Submodel 3.
    # Block1.
    x3 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel3_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x3 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel3_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x3)
    x3 = BatchNormalization(name='submodel3_block1_batch-norm')(x3)
    x3 = MaxPooling2D(pool_size=(2, 2), name='submodel3_block1_pool')(x3)

    # Block2
    x3 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel3_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x3)
    x3 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel3_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x3)
    x3 = BatchNormalization(name='submodel3_block2_batch-norm')(x3)
    x3 = MaxPooling2D(pool_size=(2, 2), name='submodel3_block2_pool')(x3)

    # Add Submodel 3 top layers.
    x3 = Flatten(name='submodel3_flatten')(x3)
    outputs3 = Dense(30, name='submodel3_output')(x3)

    # Average the predictions for the fourth class of the last two submodels.
    averaged_classes_30_40 = Average(name='averaged_fourth_ten_class')(
        [Crop(1, 20, 30)(outputs2),
         Crop(1, 0, 10)(outputs3)])
    # Crop outputs3 in order to create the fifth abd sixth class outputs.
    outputs_classes_40_50 = Crop(1, 10, 20, name='fifth_ten_class')(outputs3)
    outputs_classes_50_60 = Crop(1, 20, 30, name='sixth_ten_class')(outputs3)
    # Concatenate classes outputs in order to create the third submodel's output.
    outputs_third_submodel = Concatenate(name='third_submodel')(
        [averaged_classes_30_40, outputs_classes_40_50, outputs_classes_50_60])
    output_list.append(outputs_third_submodel)

    # Submodel 4.
    # Block1.
    x4 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x4 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = BatchNormalization(name='submodel4_block1_batch-norm')(x4)
    x4 = MaxPooling2D(pool_size=(2, 2), name='submodel4_block1_pool')(x4)

    # Block2
    x4 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = BatchNormalization(name='submodel4_block2_batch-norm')(x4)
    x4 = MaxPooling2D(pool_size=(2, 2), name='submodel4_block2_pool')(x4)

    # Block3
    x4 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block3_conv1',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block3_conv2',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = BatchNormalization(name='submodel4_block3_batch-norm')(x4)
    x4 = MaxPooling2D(pool_size=(2, 2), name='submodel4_block3_pool')(x4)

    # Add Submodel 4 top layers.
    x4 = Flatten(name='submodel4_flatten')(x4)
    outputs4 = Dense(20, name='60-80_classes_submodel4')(x4)
    output_list.append(outputs4)

    # Submodel 5.
    # Block1.
    x5 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x5 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = BatchNormalization(name='submodel5_block1_batch-norm')(x5)
    x5 = MaxPooling2D(pool_size=(2, 2), name='submodel5_block1_pool')(x5)

    # Block2
    x5 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = BatchNormalization(name='submodel5_block2_batch-norm')(x5)
    x5 = MaxPooling2D(pool_size=(2, 2), name='submodel5_block2_pool')(x5)

    # Block3
    x5 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block3_conv1',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block3_conv2',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = BatchNormalization(name='submodel5_block3_batch-norm')(x5)
    x5 = MaxPooling2D(pool_size=(2, 2), name='submodel5_block3_pool')(x5)

    # Add Submodel 5 top layers.
    x5 = Flatten(name='submodel5_flatten')(x5)
    outputs5 = Dense(20, name='80-100_classes_submodel4')(x5)
    output_list.append(outputs5)

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(output_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='cifar100_complicated_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #15
0
def cifar100_pyramid_ensemble(input_shape=None,
                              input_tensor=None,
                              n_classes=None,
                              weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar100 network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    output_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Submodel Strong.
    # Block1.
    x1 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block1_conv1')(inputs)
    x1 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block1_conv2')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel_strong_block1_pool')(x1)

    # Block2
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block2_conv1')(x1)
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block2_conv2')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel_strong_block2_pool')(x1)

    # Block3
    x1 = BatchNormalization(name='submodel_strong_block3_batch-norm')(x1)
    x1 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block3_conv')(x1)
    x1 = Dropout(0.5, name='submodel_strong_block3_dropout', seed=0)(x1)

    # Add Submodel Strong top layers.
    x1 = Flatten(name='submodel_strong_flatten')(x1)
    outputs_submodel_strong = Dense(100, name='submodel_strong_output')(x1)

    # Submodel Weak 1.
    # Block1.
    x2 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_weak_1_block1_conv1')(inputs)
    x2 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_weak_1_block1_conv2')(x2)
    x2 = MaxPooling2D(pool_size=(2, 2), name='submodel_weak_1_block1_pool')(x2)

    # Add Submodel Weak 1 top layers.
    x2 = Flatten(name='submodel_weak_1_flatten')(x2)
    outputs2 = Dense(50, name='submodel_weak_1_output')(x2)

    # Average the predictions for the first five classes.
    averaged_first_half_classes = Average(name='averaged_first_half_classes')(
        [Crop(1, 0, 50)(outputs_submodel_strong), outputs2])

    output_list.append(averaged_first_half_classes)

    # Submodel Weak 2.
    # Block1.
    x3 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_weak_2_block1_conv1')(inputs)
    x3 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_weak_2_block1_conv2')(x3)
    x3 = MaxPooling2D(pool_size=(2, 2), name='submodel_weak_2_block1_pool')(x3)

    # Add Submodel Weak 2 top layers.
    x3 = Flatten(name='submodel_weak_2_flatten')(x3)
    outputs3 = Dense(50, name='submodel_weak_2_output')(x3)

    # Average the predictions for the last five classes.
    averaged_last_half_classes = Average(name='averaged_last_half_classes')(
        [Crop(1, 50, 100)(outputs_submodel_strong), outputs3])

    output_list.append(averaged_last_half_classes)

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(output_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='cifar100_pyramid_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #16
0
def svhn_complicated_ensemble_v2(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a svhn network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    outputs_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Generate Submodels.
    submodel1 = svhn_complicated_ensemble_v2_submodel1(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)
    submodel2 = svhn_complicated_ensemble_v2_submodel2(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)
    submodel3 = svhn_complicated_ensemble_v2_submodel3(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)
    submodel4 = svhn_complicated_ensemble_v2_submodel4(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)
    submodel5 = svhn_complicated_ensemble_v2_submodel5(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)

    # Get their outputs.
    outputs_submodel1 = submodel1(inputs)
    outputs_submodel2 = submodel2(inputs)
    outputs_submodel3 = submodel3(inputs)
    outputs_submodel4 = submodel4(inputs)
    outputs_submodel5 = submodel5(inputs)

    # Correct submodel 2 - 5 outputs.
    outputs_submodel2 = Crop(1, 1,
                             outputs_submodel2.shape[1])(outputs_submodel2)
    outputs_submodel3 = Crop(1, 1,
                             outputs_submodel3.shape[1])(outputs_submodel3)
    outputs_submodel4 = Crop(1, 1,
                             outputs_submodel4.shape[1])(outputs_submodel4)
    outputs_submodel5 = Crop(1, 1,
                             outputs_submodel5.shape[1])(outputs_submodel5)

    # Create the complicated outputs.
    # Class 0.
    outputs_list.append(
        Average(name='class_0')([
            Crop(1, 0, 1)(outputs_submodel1),
            Crop(1, 1, 2)(outputs_submodel5)
        ]))

    # Classes 1, 2, 3.
    outputs_list.append(
        Average(name='classes_1_2_3')([
            Crop(1, 1, 4)(outputs_submodel1),
            Crop(1, 0, 3)(outputs_submodel2)
        ]))

    # Class 4.
    outputs_list.append(
        Average(name='class_4')([
            Crop(1, 3, 4)(outputs_submodel2),
            Crop(1, 0, 1)(outputs_submodel3)
        ]))

    # Class 5.
    outputs_list.append(
        Average(name='class_5')([
            Crop(1, 1, 2)(outputs_submodel3),
            Crop(1, 0, 1)(outputs_submodel5)
        ]))

    # Classes 6, 7.
    outputs_list.append(
        Average(name='classes_6_7')([
            Crop(1, 2, 4)(outputs_submodel3),
            Crop(1, 0, 2)(outputs_submodel4)
        ]))

    # Classes 8, 9.
    outputs_list.append(
        Average(name='classes_8_9')([
            Crop(1, 2, 4)(outputs_submodel4),
            Crop(1, 1, 3)(outputs_submodel5)
        ]))

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(outputs_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='svhn_complicated_ensemble_v2')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #17
0
def cifar100_complicated_ensemble_v2(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar100 network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    outputs_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Generate Submodels.
    submodel1 = cifar100_complicated_ensemble_v2_submodel1(
        input_shape, input_tensor, 41, weights_path)
    submodel2 = cifar100_complicated_ensemble_v2_submodel2(
        input_shape, input_tensor, 41, weights_path)
    submodel3 = cifar100_complicated_ensemble_v2_submodel3(
        input_shape, input_tensor, 41, weights_path)
    submodel4 = cifar100_complicated_ensemble_v2_submodel4(
        input_shape, input_tensor, 41, weights_path)
    submodel5 = cifar100_complicated_ensemble_v2_submodel5(
        input_shape, input_tensor, 41, weights_path)

    # Get their outputs.
    outputs_submodel1 = submodel1(inputs)
    outputs_submodel2 = submodel2(inputs)
    outputs_submodel3 = submodel3(inputs)
    outputs_submodel4 = submodel4(inputs)
    outputs_submodel5 = submodel5(inputs)

    # Correct submodel 2 - 5 outputs.
    outputs_submodel2 = Crop(1, 1,
                             outputs_submodel2.shape[1])(outputs_submodel2)
    outputs_submodel3 = Crop(1, 1,
                             outputs_submodel3.shape[1])(outputs_submodel3)
    outputs_submodel4 = Crop(1, 1,
                             outputs_submodel4.shape[1])(outputs_submodel4)
    outputs_submodel5 = Crop(1, 1,
                             outputs_submodel5.shape[1])(outputs_submodel5)

    # Create the complicated outputs.
    # Classes 0-9.
    outputs_list.append(
        Average(name='classes_0-9')([
            Crop(1, 0, 10)(outputs_submodel1),
            Crop(1, 10, 20)(outputs_submodel5)
        ]))

    # Classes 10-39.
    outputs_list.append(
        Average(name='classes_10-39')([
            Crop(1, 10, 40)(outputs_submodel1),
            Crop(1, 0, 30)(outputs_submodel2)
        ]))

    # Classes 40-49.
    outputs_list.append(
        Average(name='classes_40-49')([
            Crop(1, 30, 40)(outputs_submodel2),
            Crop(1, 0, 10)(outputs_submodel3)
        ]))

    # Classes 50-59.
    outputs_list.append(
        Average(name='classes_50-59')([
            Crop(1, 10, 20)(outputs_submodel3),
            Crop(1, 0, 10)(outputs_submodel5)
        ]))

    # Classes 60-79.
    outputs_list.append(
        Average(name='classes_60-79')([
            Crop(1, 20, 40)(outputs_submodel3),
            Crop(1, 0, 20)(outputs_submodel4)
        ]))

    # Classes 80-99.
    outputs_list.append(
        Average(name='classes_80-99')([
            Crop(1, 20, 40)(outputs_submodel4),
            Crop(1, 10, 30)(outputs_submodel5)
        ]))

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(outputs_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='cifar100_complicated_ensemble_v2')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #18
0
def caltech_pyramid_ensemble_submodel_strong(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a caltech network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Define a weight decay for the regularisation.
    weight_decay = 1e-5

    x = Conv2D(64, (3, 3),
               padding='same',
               activation='elu',
               input_shape=input_shape,
               kernel_regularizer=l2(weight_decay))(inputs)
    x = BatchNormalization()(x)
    x = Dropout(0.3)(x)

    x = Conv2D(64, (3, 3),
               padding='same',
               activation='elu',
               kernel_regularizer=l2(weight_decay))(x)

    x = BatchNormalization()(x)

    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(128, (3, 3),
               padding='same',
               activation='elu',
               kernel_regularizer=l2(weight_decay))(x)

    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(128, (3, 3),
               padding='same',
               activation='elu',
               kernel_regularizer=l2(weight_decay))(x)

    x = BatchNormalization()(x)

    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(256, (3, 3),
               padding='same',
               activation='elu',
               kernel_regularizer=l2(weight_decay))(x)

    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(256, (3, 3),
               padding='same',
               activation='elu',
               kernel_regularizer=l2(weight_decay))(x)

    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(256, (3, 3),
               padding='same',
               activation='elu',
               kernel_regularizer=l2(weight_decay))(x)

    x = BatchNormalization()(x)

    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(512, (3, 3),
               padding='same',
               activation='elu',
               kernel_regularizer=l2(weight_decay))(x)

    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(512, (3, 3),
               padding='same',
               activation='elu',
               kernel_regularizer=l2(weight_decay))(x)

    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(512, (3, 3),
               padding='same',
               activation='elu',
               kernel_regularizer=l2(weight_decay))(x)

    x = BatchNormalization()(x)

    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(512, (3, 3),
               padding='same',
               activation='elu',
               kernel_regularizer=l2(weight_decay))(x)

    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(512, (3, 3),
               padding='same',
               activation='elu',
               kernel_regularizer=l2(weight_decay))(x)

    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(512, (3, 3),
               padding='same',
               activation='elu',
               kernel_regularizer=l2(weight_decay))(x)

    x = BatchNormalization()(x)

    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.5)(x)

    x = Flatten()(x)
    x = Dense(512, kernel_regularizer=l2(weight_decay))(x)

    x = BatchNormalization()(x)

    x = Dropout(0.5)(x)
    outputs = Dense(n_classes, activation='softmax', name='softmax_outputs')(x)

    # Create Submodel strong.
    model = Model(inputs,
                  outputs,
                  name='caltech_pyramid_ensemble_submodel_strong')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Example #19
0
def caltech_complicated_ensemble(input_shape=None, input_tensor=None, n_classes=None,
                                 weights_path: Union[None, str] = None) -> Model:
    """
    Defines a caltech network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    output_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Submodel 1.
    submodel1 = caltech_complicated_ensemble_submodel1(input_shape, input_tensor, n_classes, weights_path)
    outputs1 = Dense(22, name='submodel1_output')(submodel1.layers[-2])
    # Crop outputs1 in order to create the first submodel's output.
    outputs_first_submodel = Crop(1, 0, 12, name='first_twelve_classes_submodel')(outputs1)
    output_list.append(outputs_first_submodel)

    # Submodel 2.
    submodel2 = caltech_complicated_ensemble_submodel2(input_shape, input_tensor, n_classes, weights_path)
    outputs2 = Dense(30, name='submodel2_output')(submodel2.layers[-2])

    # Average the predictions for the second class of the first two submodels.
    averaged_classes_20_30 = Average(name='averaged_second_ten_classes')(
        [Crop(1, 12, 22)(outputs1), Crop(1, 0, 10)(outputs2)])
    # Crop outputs2 in order to create the third ten classes output.
    outputs_classes_30_40 = Crop(1, 10, 20, name='third_ten_classes')(outputs2)
    # Concatenate classes outputs in order to create the second submodel's output.
    outputs_second_submodel = Concatenate(name='second_submodel')([averaged_classes_20_30, outputs_classes_30_40])
    output_list.append(outputs_second_submodel)

    # Submodel 3.
    submodel3 = caltech_complicated_ensemble_submodel3(input_shape, input_tensor, n_classes, weights_path)
    outputs3 = Dense(30, name='submodel3_output')(submodel3.layers[-2])

    # Average the predictions for the fourth class of the last two submodels.
    averaged_classes_30_40 = Average(name='averaged_fourth_ten_class')([
        Crop(1, 20, 30)(outputs2),
        Crop(1, 0, 10)(outputs3)
    ])
    # Crop outputs3 in order to create the fifth abd sixth class outputs.
    outputs_classes_40_50 = Crop(1, 10, 20, name='fifth_ten_class')(outputs3)
    outputs_classes_50_60 = Crop(1, 20, 30, name='sixth_ten_class')(outputs3)
    # Concatenate classes outputs in order to create the third submodel's output.
    outputs_third_submodel = Concatenate(name='third_submodel')([
        averaged_classes_30_40,
        outputs_classes_40_50,
        outputs_classes_50_60
    ])
    output_list.append(outputs_third_submodel)

    # Submodel 4.
    submodel4 = caltech_complicated_ensemble_submodel4(input_shape, input_tensor, n_classes, weights_path)
    outputs4 = Dense(20, name='submodel4_output')(submodel4.layers[-2])
    output_list.append(outputs4)

    # Submodel 5.
    submodel5 = caltech_complicated_ensemble_submodel5(input_shape, input_tensor, n_classes, weights_path)
    outputs5 = Dense(20, name='submodel5_output')(submodel5.layers[-2])
    output_list.append(outputs5)

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(output_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='caltech_complicated_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model