Beispiel #1
0
def caltech_pyramid_ensemble(input_shape=None, input_tensor=None, n_classes=None,
                             weights_path: Union[None, str] = None) -> Model:
    """
    Defines a caltech network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    output_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Submodel Strong.
    submodel_strong = caltech_pyramid_ensemble_submodel_strong(input_shape, input_tensor, n_classes, weights_path)
    outputs_submodel_strong = submodel_strong.output

    # Submodel Weak 1.
    submodel_weak1 = caltech_pyramid_ensemble_submodel_weak1(input_shape, input_tensor, n_classes, weights_path)
    outputs_submodel_weak1 = Dense(61, name='outputs_submodel_weak1')(submodel_weak1.layers[-1])

    # Average the predictions for the first half classes.
    averaged_first_half_classes = Average(name='averaged_first_half_classes')(
        [
            Crop(1, 0, 61)(outputs_submodel_strong),
            outputs_submodel_weak1
        ]
    )

    output_list.append(averaged_first_half_classes)

    # Submodel Weak 2.
    # Block1.
    submodel_weak2 = caltech_pyramid_ensemble_submodel_weak2(input_shape, input_tensor, n_classes, weights_path)
    outputs_submodel_weak2 = Dense(61, name='outputs_submodel_weak2')(submodel_weak2.layers[-1])

    # Average the predictions for the last half classes.
    averaged_last_half_classes = Average(name='averaged_last_half_classes')(
        [
            Crop(1, 61, 102)(outputs_submodel_strong),
            outputs_submodel_weak2
        ]
    )

    output_list.append(averaged_last_half_classes)

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(output_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='caltech_pyramid_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Beispiel #2
0
def omniglot_pyramid_ensemble(input_shape=None,
                              input_tensor=None,
                              n_classes=None,
                              weights_path: Union[None, str] = None) -> Model:
    """
    Defines a omniglot network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Generate Submodels.
    submodel_strong = omniglot_pyramid_ensemble_submodel_strong(
        input_shape, input_tensor, n_classes, weights_path)
    submodel_weak1 = omniglot_pyramid_ensemble_submodel_weak1(
        input_shape, input_tensor, 3, weights_path)
    submodel_weak2 = omniglot_pyramid_ensemble_submodel_weak2(
        input_shape, input_tensor, 3, weights_path)
    # Get their outputs.
    outputs_submodel_strong = submodel_strong.output
    outputs_submodel_weak1 = submodel_weak1.output
    outputs_submodel_weak2 = submodel_weak2.output

    # Average classes.
    first_classes = Average(name='averaged_first_classes')([
        Crop(1, 0, 812,
             name='first_classes_submodel_strong')(outputs_submodel_strong),
        Crop(1, 0, 812,
             name='first_classes_submodel_weak1')(outputs_submodel_weak1)
    ])

    last_classes = Average(name='averaged_last_classes')([
        Crop(1, 812, 1623,
             name='last_classes_submodel_strong')(outputs_submodel_strong),
        Crop(1, 0, 811,
             name='last_classes_submodel_weak2')(outputs_submodel_weak2)
    ])

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')([first_classes, last_classes])
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='omniglot_pyramid_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Beispiel #3
0
def cifar100_complicated_ensemble(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar100 network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    output_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Define a weight decay for the regularisation.
    weight_decay = 1e-4

    # Submodel 1.
    # Block1.
    x1 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x1 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = BatchNormalization(name='submodel1_block1_batch-norm')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel1_block1_pool')(x1)

    # Block2
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = BatchNormalization(name='submodel1_block2_batch-norm')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel1_block2_pool')(x1)

    # Block3
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block3_conv1',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block3_conv2',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = BatchNormalization(name='submodel1_block3_batch-norm')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel1_block3_pool')(x1)

    # Add Submodel 1 top layers.
    x1 = Flatten(name='submodel1_flatten')(x1)
    outputs1 = Dense(20, name='submodel1_output')(x1)
    # Crop outputs1 in order to create the first submodel's output.
    outputs_first_submodel = Crop(1, 0, 10,
                                  name='first_ten_classes_submodel')(outputs1)
    output_list.append(outputs_first_submodel)

    # Submodel 2.
    # Block1.
    x2 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel2_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x2 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel2_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x2)
    x2 = BatchNormalization(name='submodel2_block1_batch-norm')(x2)
    x2 = MaxPooling2D(pool_size=(2, 2), name='submodel2_block1_pool')(x2)

    # Block2
    x2 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel2_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x2)
    x2 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel2_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x2)
    x2 = BatchNormalization(name='submodel2_block2_batch-norm')(x2)
    x2 = MaxPooling2D(pool_size=(2, 2), name='submodel2_block2_pool')(x2)

    # Add Submodel 2 top layers.
    x2 = Flatten(name='submodel2_flatten')(x2)
    outputs2 = Dense(30, name='submodel2_output')(x2)

    # Average the predictions for the second class of the first two submodels.
    averaged_classes_20_30 = Average(name='averaged_second_ten_classes')(
        [Crop(1, 10, 20)(outputs1),
         Crop(1, 0, 10)(outputs2)])
    # Crop outputs2 in order to create the third ten classes output.
    outputs_classes_30_40 = Crop(1, 10, 20, name='third_ten_classes')(outputs2)
    # Concatenate classes outputs in order to create the second submodel's output.
    outputs_second_submodel = Concatenate(name='second_submodel')(
        [averaged_classes_20_30, outputs_classes_30_40])
    output_list.append(outputs_second_submodel)

    # Submodel 3.
    # Block1.
    x3 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel3_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x3 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel3_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x3)
    x3 = BatchNormalization(name='submodel3_block1_batch-norm')(x3)
    x3 = MaxPooling2D(pool_size=(2, 2), name='submodel3_block1_pool')(x3)

    # Block2
    x3 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel3_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x3)
    x3 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel3_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x3)
    x3 = BatchNormalization(name='submodel3_block2_batch-norm')(x3)
    x3 = MaxPooling2D(pool_size=(2, 2), name='submodel3_block2_pool')(x3)

    # Add Submodel 3 top layers.
    x3 = Flatten(name='submodel3_flatten')(x3)
    outputs3 = Dense(30, name='submodel3_output')(x3)

    # Average the predictions for the fourth class of the last two submodels.
    averaged_classes_30_40 = Average(name='averaged_fourth_ten_class')(
        [Crop(1, 20, 30)(outputs2),
         Crop(1, 0, 10)(outputs3)])
    # Crop outputs3 in order to create the fifth abd sixth class outputs.
    outputs_classes_40_50 = Crop(1, 10, 20, name='fifth_ten_class')(outputs3)
    outputs_classes_50_60 = Crop(1, 20, 30, name='sixth_ten_class')(outputs3)
    # Concatenate classes outputs in order to create the third submodel's output.
    outputs_third_submodel = Concatenate(name='third_submodel')(
        [averaged_classes_30_40, outputs_classes_40_50, outputs_classes_50_60])
    output_list.append(outputs_third_submodel)

    # Submodel 4.
    # Block1.
    x4 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x4 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = BatchNormalization(name='submodel4_block1_batch-norm')(x4)
    x4 = MaxPooling2D(pool_size=(2, 2), name='submodel4_block1_pool')(x4)

    # Block2
    x4 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = BatchNormalization(name='submodel4_block2_batch-norm')(x4)
    x4 = MaxPooling2D(pool_size=(2, 2), name='submodel4_block2_pool')(x4)

    # Block3
    x4 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block3_conv1',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block3_conv2',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = BatchNormalization(name='submodel4_block3_batch-norm')(x4)
    x4 = MaxPooling2D(pool_size=(2, 2), name='submodel4_block3_pool')(x4)

    # Add Submodel 4 top layers.
    x4 = Flatten(name='submodel4_flatten')(x4)
    outputs4 = Dense(20, name='60-80_classes_submodel4')(x4)
    output_list.append(outputs4)

    # Submodel 5.
    # Block1.
    x5 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x5 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = BatchNormalization(name='submodel5_block1_batch-norm')(x5)
    x5 = MaxPooling2D(pool_size=(2, 2), name='submodel5_block1_pool')(x5)

    # Block2
    x5 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = BatchNormalization(name='submodel5_block2_batch-norm')(x5)
    x5 = MaxPooling2D(pool_size=(2, 2), name='submodel5_block2_pool')(x5)

    # Block3
    x5 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block3_conv1',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block3_conv2',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = BatchNormalization(name='submodel5_block3_batch-norm')(x5)
    x5 = MaxPooling2D(pool_size=(2, 2), name='submodel5_block3_pool')(x5)

    # Add Submodel 5 top layers.
    x5 = Flatten(name='submodel5_flatten')(x5)
    outputs5 = Dense(20, name='80-100_classes_submodel4')(x5)
    output_list.append(outputs5)

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(output_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='cifar100_complicated_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Beispiel #4
0
def cifar100_pyramid_ensemble(input_shape=None,
                              input_tensor=None,
                              n_classes=None,
                              weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar100 network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    output_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Submodel Strong.
    # Block1.
    x1 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block1_conv1')(inputs)
    x1 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block1_conv2')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel_strong_block1_pool')(x1)

    # Block2
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block2_conv1')(x1)
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block2_conv2')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel_strong_block2_pool')(x1)

    # Block3
    x1 = BatchNormalization(name='submodel_strong_block3_batch-norm')(x1)
    x1 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block3_conv')(x1)
    x1 = Dropout(0.5, name='submodel_strong_block3_dropout', seed=0)(x1)

    # Add Submodel Strong top layers.
    x1 = Flatten(name='submodel_strong_flatten')(x1)
    outputs_submodel_strong = Dense(100, name='submodel_strong_output')(x1)

    # Submodel Weak 1.
    # Block1.
    x2 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_weak_1_block1_conv1')(inputs)
    x2 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_weak_1_block1_conv2')(x2)
    x2 = MaxPooling2D(pool_size=(2, 2), name='submodel_weak_1_block1_pool')(x2)

    # Add Submodel Weak 1 top layers.
    x2 = Flatten(name='submodel_weak_1_flatten')(x2)
    outputs2 = Dense(50, name='submodel_weak_1_output')(x2)

    # Average the predictions for the first five classes.
    averaged_first_half_classes = Average(name='averaged_first_half_classes')(
        [Crop(1, 0, 50)(outputs_submodel_strong), outputs2])

    output_list.append(averaged_first_half_classes)

    # Submodel Weak 2.
    # Block1.
    x3 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_weak_2_block1_conv1')(inputs)
    x3 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_weak_2_block1_conv2')(x3)
    x3 = MaxPooling2D(pool_size=(2, 2), name='submodel_weak_2_block1_pool')(x3)

    # Add Submodel Weak 2 top layers.
    x3 = Flatten(name='submodel_weak_2_flatten')(x3)
    outputs3 = Dense(50, name='submodel_weak_2_output')(x3)

    # Average the predictions for the last five classes.
    averaged_last_half_classes = Average(name='averaged_last_half_classes')(
        [Crop(1, 50, 100)(outputs_submodel_strong), outputs3])

    output_list.append(averaged_last_half_classes)

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(output_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='cifar100_pyramid_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Beispiel #5
0
def cifar100_complicated_ensemble_v2(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar100 network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    outputs_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Generate Submodels.
    submodel1 = cifar100_complicated_ensemble_v2_submodel1(
        input_shape, input_tensor, 41, weights_path)
    submodel2 = cifar100_complicated_ensemble_v2_submodel2(
        input_shape, input_tensor, 41, weights_path)
    submodel3 = cifar100_complicated_ensemble_v2_submodel3(
        input_shape, input_tensor, 41, weights_path)
    submodel4 = cifar100_complicated_ensemble_v2_submodel4(
        input_shape, input_tensor, 41, weights_path)
    submodel5 = cifar100_complicated_ensemble_v2_submodel5(
        input_shape, input_tensor, 41, weights_path)

    # Get their outputs.
    outputs_submodel1 = submodel1(inputs)
    outputs_submodel2 = submodel2(inputs)
    outputs_submodel3 = submodel3(inputs)
    outputs_submodel4 = submodel4(inputs)
    outputs_submodel5 = submodel5(inputs)

    # Correct submodel 2 - 5 outputs.
    outputs_submodel2 = Crop(1, 1,
                             outputs_submodel2.shape[1])(outputs_submodel2)
    outputs_submodel3 = Crop(1, 1,
                             outputs_submodel3.shape[1])(outputs_submodel3)
    outputs_submodel4 = Crop(1, 1,
                             outputs_submodel4.shape[1])(outputs_submodel4)
    outputs_submodel5 = Crop(1, 1,
                             outputs_submodel5.shape[1])(outputs_submodel5)

    # Create the complicated outputs.
    # Classes 0-9.
    outputs_list.append(
        Average(name='classes_0-9')([
            Crop(1, 0, 10)(outputs_submodel1),
            Crop(1, 10, 20)(outputs_submodel5)
        ]))

    # Classes 10-39.
    outputs_list.append(
        Average(name='classes_10-39')([
            Crop(1, 10, 40)(outputs_submodel1),
            Crop(1, 0, 30)(outputs_submodel2)
        ]))

    # Classes 40-49.
    outputs_list.append(
        Average(name='classes_40-49')([
            Crop(1, 30, 40)(outputs_submodel2),
            Crop(1, 0, 10)(outputs_submodel3)
        ]))

    # Classes 50-59.
    outputs_list.append(
        Average(name='classes_50-59')([
            Crop(1, 10, 20)(outputs_submodel3),
            Crop(1, 0, 10)(outputs_submodel5)
        ]))

    # Classes 60-79.
    outputs_list.append(
        Average(name='classes_60-79')([
            Crop(1, 20, 40)(outputs_submodel3),
            Crop(1, 0, 20)(outputs_submodel4)
        ]))

    # Classes 80-99.
    outputs_list.append(
        Average(name='classes_80-99')([
            Crop(1, 20, 40)(outputs_submodel4),
            Crop(1, 10, 30)(outputs_submodel5)
        ]))

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(outputs_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='cifar100_complicated_ensemble_v2')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Beispiel #6
0
def svhn_complicated_ensemble_v2(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a svhn network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    outputs_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Generate Submodels.
    submodel1 = svhn_complicated_ensemble_v2_submodel1(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)
    submodel2 = svhn_complicated_ensemble_v2_submodel2(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)
    submodel3 = svhn_complicated_ensemble_v2_submodel3(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)
    submodel4 = svhn_complicated_ensemble_v2_submodel4(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)
    submodel5 = svhn_complicated_ensemble_v2_submodel5(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)

    # Get their outputs.
    outputs_submodel1 = submodel1(inputs)
    outputs_submodel2 = submodel2(inputs)
    outputs_submodel3 = submodel3(inputs)
    outputs_submodel4 = submodel4(inputs)
    outputs_submodel5 = submodel5(inputs)

    # Correct submodel 2 - 5 outputs.
    outputs_submodel2 = Crop(1, 1,
                             outputs_submodel2.shape[1])(outputs_submodel2)
    outputs_submodel3 = Crop(1, 1,
                             outputs_submodel3.shape[1])(outputs_submodel3)
    outputs_submodel4 = Crop(1, 1,
                             outputs_submodel4.shape[1])(outputs_submodel4)
    outputs_submodel5 = Crop(1, 1,
                             outputs_submodel5.shape[1])(outputs_submodel5)

    # Create the complicated outputs.
    # Class 0.
    outputs_list.append(
        Average(name='class_0')([
            Crop(1, 0, 1)(outputs_submodel1),
            Crop(1, 1, 2)(outputs_submodel5)
        ]))

    # Classes 1, 2, 3.
    outputs_list.append(
        Average(name='classes_1_2_3')([
            Crop(1, 1, 4)(outputs_submodel1),
            Crop(1, 0, 3)(outputs_submodel2)
        ]))

    # Class 4.
    outputs_list.append(
        Average(name='class_4')([
            Crop(1, 3, 4)(outputs_submodel2),
            Crop(1, 0, 1)(outputs_submodel3)
        ]))

    # Class 5.
    outputs_list.append(
        Average(name='class_5')([
            Crop(1, 1, 2)(outputs_submodel3),
            Crop(1, 0, 1)(outputs_submodel5)
        ]))

    # Classes 6, 7.
    outputs_list.append(
        Average(name='classes_6_7')([
            Crop(1, 2, 4)(outputs_submodel3),
            Crop(1, 0, 2)(outputs_submodel4)
        ]))

    # Classes 8, 9.
    outputs_list.append(
        Average(name='classes_8_9')([
            Crop(1, 2, 4)(outputs_submodel4),
            Crop(1, 1, 3)(outputs_submodel5)
        ]))

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(outputs_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='svhn_complicated_ensemble_v2')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Beispiel #7
0
def caltech_complicated_ensemble(input_shape=None, input_tensor=None, n_classes=None,
                                 weights_path: Union[None, str] = None) -> Model:
    """
    Defines a caltech network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    output_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Submodel 1.
    submodel1 = caltech_complicated_ensemble_submodel1(input_shape, input_tensor, n_classes, weights_path)
    outputs1 = Dense(22, name='submodel1_output')(submodel1.layers[-2])
    # Crop outputs1 in order to create the first submodel's output.
    outputs_first_submodel = Crop(1, 0, 12, name='first_twelve_classes_submodel')(outputs1)
    output_list.append(outputs_first_submodel)

    # Submodel 2.
    submodel2 = caltech_complicated_ensemble_submodel2(input_shape, input_tensor, n_classes, weights_path)
    outputs2 = Dense(30, name='submodel2_output')(submodel2.layers[-2])

    # Average the predictions for the second class of the first two submodels.
    averaged_classes_20_30 = Average(name='averaged_second_ten_classes')(
        [Crop(1, 12, 22)(outputs1), Crop(1, 0, 10)(outputs2)])
    # Crop outputs2 in order to create the third ten classes output.
    outputs_classes_30_40 = Crop(1, 10, 20, name='third_ten_classes')(outputs2)
    # Concatenate classes outputs in order to create the second submodel's output.
    outputs_second_submodel = Concatenate(name='second_submodel')([averaged_classes_20_30, outputs_classes_30_40])
    output_list.append(outputs_second_submodel)

    # Submodel 3.
    submodel3 = caltech_complicated_ensemble_submodel3(input_shape, input_tensor, n_classes, weights_path)
    outputs3 = Dense(30, name='submodel3_output')(submodel3.layers[-2])

    # Average the predictions for the fourth class of the last two submodels.
    averaged_classes_30_40 = Average(name='averaged_fourth_ten_class')([
        Crop(1, 20, 30)(outputs2),
        Crop(1, 0, 10)(outputs3)
    ])
    # Crop outputs3 in order to create the fifth abd sixth class outputs.
    outputs_classes_40_50 = Crop(1, 10, 20, name='fifth_ten_class')(outputs3)
    outputs_classes_50_60 = Crop(1, 20, 30, name='sixth_ten_class')(outputs3)
    # Concatenate classes outputs in order to create the third submodel's output.
    outputs_third_submodel = Concatenate(name='third_submodel')([
        averaged_classes_30_40,
        outputs_classes_40_50,
        outputs_classes_50_60
    ])
    output_list.append(outputs_third_submodel)

    # Submodel 4.
    submodel4 = caltech_complicated_ensemble_submodel4(input_shape, input_tensor, n_classes, weights_path)
    outputs4 = Dense(20, name='submodel4_output')(submodel4.layers[-2])
    output_list.append(outputs4)

    # Submodel 5.
    submodel5 = caltech_complicated_ensemble_submodel5(input_shape, input_tensor, n_classes, weights_path)
    outputs5 = Dense(20, name='submodel5_output')(submodel5.layers[-2])
    output_list.append(outputs5)

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(output_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='caltech_complicated_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model