Ejemplo n.º 1
0
def caltech_pyramid_ensemble(input_shape=None, input_tensor=None, n_classes=None,
                             weights_path: Union[None, str] = None) -> Model:
    """
    Defines a caltech network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    output_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Submodel Strong.
    submodel_strong = caltech_pyramid_ensemble_submodel_strong(input_shape, input_tensor, n_classes, weights_path)
    outputs_submodel_strong = submodel_strong.output

    # Submodel Weak 1.
    submodel_weak1 = caltech_pyramid_ensemble_submodel_weak1(input_shape, input_tensor, n_classes, weights_path)
    outputs_submodel_weak1 = Dense(61, name='outputs_submodel_weak1')(submodel_weak1.layers[-1])

    # Average the predictions for the first half classes.
    averaged_first_half_classes = Average(name='averaged_first_half_classes')(
        [
            Crop(1, 0, 61)(outputs_submodel_strong),
            outputs_submodel_weak1
        ]
    )

    output_list.append(averaged_first_half_classes)

    # Submodel Weak 2.
    # Block1.
    submodel_weak2 = caltech_pyramid_ensemble_submodel_weak2(input_shape, input_tensor, n_classes, weights_path)
    outputs_submodel_weak2 = Dense(61, name='outputs_submodel_weak2')(submodel_weak2.layers[-1])

    # Average the predictions for the last half classes.
    averaged_last_half_classes = Average(name='averaged_last_half_classes')(
        [
            Crop(1, 61, 102)(outputs_submodel_strong),
            outputs_submodel_weak2
        ]
    )

    output_list.append(averaged_last_half_classes)

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(output_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='caltech_pyramid_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Ejemplo n.º 2
0
def omniglot_pyramid_ensemble(input_shape=None,
                              input_tensor=None,
                              n_classes=None,
                              weights_path: Union[None, str] = None) -> Model:
    """
    Defines a omniglot network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Generate Submodels.
    submodel_strong = omniglot_pyramid_ensemble_submodel_strong(
        input_shape, input_tensor, n_classes, weights_path)
    submodel_weak1 = omniglot_pyramid_ensemble_submodel_weak1(
        input_shape, input_tensor, 3, weights_path)
    submodel_weak2 = omniglot_pyramid_ensemble_submodel_weak2(
        input_shape, input_tensor, 3, weights_path)
    # Get their outputs.
    outputs_submodel_strong = submodel_strong.output
    outputs_submodel_weak1 = submodel_weak1.output
    outputs_submodel_weak2 = submodel_weak2.output

    # Average classes.
    first_classes = Average(name='averaged_first_classes')([
        Crop(1, 0, 812,
             name='first_classes_submodel_strong')(outputs_submodel_strong),
        Crop(1, 0, 812,
             name='first_classes_submodel_weak1')(outputs_submodel_weak1)
    ])

    last_classes = Average(name='averaged_last_classes')([
        Crop(1, 812, 1623,
             name='last_classes_submodel_strong')(outputs_submodel_strong),
        Crop(1, 0, 811,
             name='last_classes_submodel_weak2')(outputs_submodel_weak2)
    ])

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')([first_classes, last_classes])
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='omniglot_pyramid_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Ejemplo n.º 3
0
def cifar100_architectures_diverse_ensemble(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar100_architectures_diverse_ensemble network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Generate Submodels.
    submodel_1 = cifar100_model1(n_classes, input_shape, input_tensor,
                                 weights_path)
    submodel_2 = cifar100_model2(n_classes, input_shape, input_tensor,
                                 weights_path)
    submodel_3 = cifar100_model3(n_classes, input_shape, input_tensor,
                                 weights_path)

    submodel_1._name = 'cifar100_baseline_ensemble_submodel1'
    submodel_2._name = 'cifar100_baseline_ensemble_submodel2'
    submodel_3._name = 'cifar100_baseline_ensemble_submodel3'

    # Get their outputs.
    outputs_submodel1 = submodel_1(inputs)
    outputs_submodel2 = submodel_2(inputs)
    outputs_submodel3 = submodel_3(inputs)

    # Average classes.
    outputs = Average(name='averaged_predictions')(
        [outputs_submodel1, outputs_submodel2, outputs_submodel3])

    # Create model.
    model = Model(inputs,
                  outputs,
                  name='cifar100_architectures_diverse_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Ejemplo n.º 4
0
def RVSR(input_LR_num, input_channels, mag):
    input_list = input_LR_num * [None]
    output_list = (input_LR_num // 2 + 1) * [None]

    for img in range(input_LR_num):
        input_list[img] = Input(shape=(None, None, input_channels),
                                name="input_" + str((img)))

    for num in range(0, input_LR_num // 2 + 1):
        output = ESPCN(
            input_list[input_LR_num // 2 - num:input_LR_num // 2 + num + 1],
            input_channels, mag)
        output_list[num] = output

    Tem_agg_model = Average()(output_list)

    model = Model(inputs=input_list, outputs=[Tem_agg_model])

    model.summary()
    return model
Ejemplo n.º 5
0
def test_delete_channels_merge_others(channel_index, data_format):
    layer_test_helper_merge_2d(Add(), channel_index, data_format)
    layer_test_helper_merge_2d(Multiply(), channel_index, data_format)
    layer_test_helper_merge_2d(Average(), channel_index, data_format)
    layer_test_helper_merge_2d(Maximum(), channel_index, data_format)
Ejemplo n.º 6
0
def cifar100_complicated_ensemble(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar100 network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    output_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Define a weight decay for the regularisation.
    weight_decay = 1e-4

    # Submodel 1.
    # Block1.
    x1 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x1 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = BatchNormalization(name='submodel1_block1_batch-norm')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel1_block1_pool')(x1)

    # Block2
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = BatchNormalization(name='submodel1_block2_batch-norm')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel1_block2_pool')(x1)

    # Block3
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block3_conv1',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel1_block3_conv2',
                kernel_regularizer=l2(weight_decay))(x1)
    x1 = BatchNormalization(name='submodel1_block3_batch-norm')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel1_block3_pool')(x1)

    # Add Submodel 1 top layers.
    x1 = Flatten(name='submodel1_flatten')(x1)
    outputs1 = Dense(20, name='submodel1_output')(x1)
    # Crop outputs1 in order to create the first submodel's output.
    outputs_first_submodel = Crop(1, 0, 10,
                                  name='first_ten_classes_submodel')(outputs1)
    output_list.append(outputs_first_submodel)

    # Submodel 2.
    # Block1.
    x2 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel2_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x2 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel2_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x2)
    x2 = BatchNormalization(name='submodel2_block1_batch-norm')(x2)
    x2 = MaxPooling2D(pool_size=(2, 2), name='submodel2_block1_pool')(x2)

    # Block2
    x2 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel2_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x2)
    x2 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel2_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x2)
    x2 = BatchNormalization(name='submodel2_block2_batch-norm')(x2)
    x2 = MaxPooling2D(pool_size=(2, 2), name='submodel2_block2_pool')(x2)

    # Add Submodel 2 top layers.
    x2 = Flatten(name='submodel2_flatten')(x2)
    outputs2 = Dense(30, name='submodel2_output')(x2)

    # Average the predictions for the second class of the first two submodels.
    averaged_classes_20_30 = Average(name='averaged_second_ten_classes')(
        [Crop(1, 10, 20)(outputs1),
         Crop(1, 0, 10)(outputs2)])
    # Crop outputs2 in order to create the third ten classes output.
    outputs_classes_30_40 = Crop(1, 10, 20, name='third_ten_classes')(outputs2)
    # Concatenate classes outputs in order to create the second submodel's output.
    outputs_second_submodel = Concatenate(name='second_submodel')(
        [averaged_classes_20_30, outputs_classes_30_40])
    output_list.append(outputs_second_submodel)

    # Submodel 3.
    # Block1.
    x3 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel3_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x3 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel3_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x3)
    x3 = BatchNormalization(name='submodel3_block1_batch-norm')(x3)
    x3 = MaxPooling2D(pool_size=(2, 2), name='submodel3_block1_pool')(x3)

    # Block2
    x3 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel3_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x3)
    x3 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel3_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x3)
    x3 = BatchNormalization(name='submodel3_block2_batch-norm')(x3)
    x3 = MaxPooling2D(pool_size=(2, 2), name='submodel3_block2_pool')(x3)

    # Add Submodel 3 top layers.
    x3 = Flatten(name='submodel3_flatten')(x3)
    outputs3 = Dense(30, name='submodel3_output')(x3)

    # Average the predictions for the fourth class of the last two submodels.
    averaged_classes_30_40 = Average(name='averaged_fourth_ten_class')(
        [Crop(1, 20, 30)(outputs2),
         Crop(1, 0, 10)(outputs3)])
    # Crop outputs3 in order to create the fifth abd sixth class outputs.
    outputs_classes_40_50 = Crop(1, 10, 20, name='fifth_ten_class')(outputs3)
    outputs_classes_50_60 = Crop(1, 20, 30, name='sixth_ten_class')(outputs3)
    # Concatenate classes outputs in order to create the third submodel's output.
    outputs_third_submodel = Concatenate(name='third_submodel')(
        [averaged_classes_30_40, outputs_classes_40_50, outputs_classes_50_60])
    output_list.append(outputs_third_submodel)

    # Submodel 4.
    # Block1.
    x4 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x4 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = BatchNormalization(name='submodel4_block1_batch-norm')(x4)
    x4 = MaxPooling2D(pool_size=(2, 2), name='submodel4_block1_pool')(x4)

    # Block2
    x4 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = BatchNormalization(name='submodel4_block2_batch-norm')(x4)
    x4 = MaxPooling2D(pool_size=(2, 2), name='submodel4_block2_pool')(x4)

    # Block3
    x4 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block3_conv1',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel4_block3_conv2',
                kernel_regularizer=l2(weight_decay))(x4)
    x4 = BatchNormalization(name='submodel4_block3_batch-norm')(x4)
    x4 = MaxPooling2D(pool_size=(2, 2), name='submodel4_block3_pool')(x4)

    # Add Submodel 4 top layers.
    x4 = Flatten(name='submodel4_flatten')(x4)
    outputs4 = Dense(20, name='60-80_classes_submodel4')(x4)
    output_list.append(outputs4)

    # Submodel 5.
    # Block1.
    x5 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block1_conv1',
                kernel_regularizer=l2(weight_decay))(inputs)
    x5 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block1_conv2',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = BatchNormalization(name='submodel5_block1_batch-norm')(x5)
    x5 = MaxPooling2D(pool_size=(2, 2), name='submodel5_block1_pool')(x5)

    # Block2
    x5 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block2_conv1',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block2_conv2',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = BatchNormalization(name='submodel5_block2_batch-norm')(x5)
    x5 = MaxPooling2D(pool_size=(2, 2), name='submodel5_block2_pool')(x5)

    # Block3
    x5 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block3_conv1',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel5_block3_conv2',
                kernel_regularizer=l2(weight_decay))(x5)
    x5 = BatchNormalization(name='submodel5_block3_batch-norm')(x5)
    x5 = MaxPooling2D(pool_size=(2, 2), name='submodel5_block3_pool')(x5)

    # Add Submodel 5 top layers.
    x5 = Flatten(name='submodel5_flatten')(x5)
    outputs5 = Dense(20, name='80-100_classes_submodel4')(x5)
    output_list.append(outputs5)

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(output_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='cifar100_complicated_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Ejemplo n.º 7
0
def cifar100_pyramid_ensemble(input_shape=None,
                              input_tensor=None,
                              n_classes=None,
                              weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar100 network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    output_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Submodel Strong.
    # Block1.
    x1 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block1_conv1')(inputs)
    x1 = Conv2D(64, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block1_conv2')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel_strong_block1_pool')(x1)

    # Block2
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block2_conv1')(x1)
    x1 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block2_conv2')(x1)
    x1 = MaxPooling2D(pool_size=(2, 2), name='submodel_strong_block2_pool')(x1)

    # Block3
    x1 = BatchNormalization(name='submodel_strong_block3_batch-norm')(x1)
    x1 = Conv2D(256, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_strong_block3_conv')(x1)
    x1 = Dropout(0.5, name='submodel_strong_block3_dropout', seed=0)(x1)

    # Add Submodel Strong top layers.
    x1 = Flatten(name='submodel_strong_flatten')(x1)
    outputs_submodel_strong = Dense(100, name='submodel_strong_output')(x1)

    # Submodel Weak 1.
    # Block1.
    x2 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_weak_1_block1_conv1')(inputs)
    x2 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_weak_1_block1_conv2')(x2)
    x2 = MaxPooling2D(pool_size=(2, 2), name='submodel_weak_1_block1_pool')(x2)

    # Add Submodel Weak 1 top layers.
    x2 = Flatten(name='submodel_weak_1_flatten')(x2)
    outputs2 = Dense(50, name='submodel_weak_1_output')(x2)

    # Average the predictions for the first five classes.
    averaged_first_half_classes = Average(name='averaged_first_half_classes')(
        [Crop(1, 0, 50)(outputs_submodel_strong), outputs2])

    output_list.append(averaged_first_half_classes)

    # Submodel Weak 2.
    # Block1.
    x3 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_weak_2_block1_conv1')(inputs)
    x3 = Conv2D(128, (3, 3),
                padding='same',
                activation='elu',
                name='submodel_weak_2_block1_conv2')(x3)
    x3 = MaxPooling2D(pool_size=(2, 2), name='submodel_weak_2_block1_pool')(x3)

    # Add Submodel Weak 2 top layers.
    x3 = Flatten(name='submodel_weak_2_flatten')(x3)
    outputs3 = Dense(50, name='submodel_weak_2_output')(x3)

    # Average the predictions for the last five classes.
    averaged_last_half_classes = Average(name='averaged_last_half_classes')(
        [Crop(1, 50, 100)(outputs_submodel_strong), outputs3])

    output_list.append(averaged_last_half_classes)

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(output_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='cifar100_pyramid_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Ejemplo n.º 8
0
    def init_model(self, input_shape, num_classes, **kwargs):

        freq_axis = 2
        channel_axis = 3
        channel_size = 128
        min_size = min(input_shape[:2])
        inputs = Input(shape=input_shape)
        # x = ZeroPadding2D(padding=(0, 37))(melgram_input)
        # x = BatchNormalization(axis=freq_axis, name='bn_0_freq')(x)

        x = Reshape((input_shape[0], input_shape[1], 1))(inputs)
        # Conv block 1
        x = Convolution2D(64, 3, 1, padding='same', name='conv1')(x)
        x = BatchNormalization(axis=channel_axis, name='bn1')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x)
        x = Dropout(0.1, name='dropout1')(x)

        # Conv block 2
        x = Convolution2D(channel_size, 3, 1, padding='same', name='conv2')(x)
        x = BatchNormalization(axis=channel_axis, name='bn2')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(3, 3), strides=(3, 3), name='pool2')(x)
        x = Dropout(0.1, name='dropout2')(x)

        # Conv block 3
        x = Convolution2D(channel_size, 3, 1, padding='same', name='conv3')(x)
        x = BatchNormalization(axis=channel_axis, name='bn3')(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool3')(x)
        x = Dropout(0.1, name='dropout3')(x)

        if min_size // 24 >= 4:
            # Conv block 4
            x = Convolution2D(channel_size, 3, 1, padding='same',
                              name='conv4')(x)
            x = BatchNormalization(axis=channel_axis, name='bn4')(x)
            x = ELU()(x)
            x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4')(x)
            x = Dropout(0.1, name='dropout4')(x)

        x = Reshape((-1, channel_size))(x)

        avg = GlobalAvgPool1D()(x)
        max = GlobalMaxPool1D()(x)
        x = concatenate([avg, max], axis=-1)
        # x = Dense(max(int(num_classes*1.5), 128), activation='relu', name='dense1')(x)
        x = Dropout(0.3)(x)
        outputs1 = Dense(num_classes, activation='softmax', name='output')(x)

        # bnorm_1 = BatchNormalization(axis=2)(inputs)
        lstm_1 = Bidirectional(CuDNNLSTM(64,
                                         name='blstm_1',
                                         return_sequences=True),
                               merge_mode='concat')(inputs)
        activation_1 = Activation('tanh')(lstm_1)
        dropout1 = SpatialDropout1D(0.5)(activation_1)
        attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
        pool_1 = GlobalMaxPool1D()(attention_1)
        dropout2 = Dropout(rate=0.5)(pool_1)
        dense_1 = Dense(units=256, activation='relu')(dropout2)
        outputs2 = Dense(units=num_classes, activation='softmax')(dense_1)

        outputs = Average()([outputs1, outputs2])
        model = TFModel(inputs=inputs, outputs=outputs)
        optimizer = optimizers.Adam(
            # learning_rate=1e-3,
            lr=1e-3,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
            decay=0.0002,
            amsgrad=True)
        model.compile(optimizer=optimizer,
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
        model.summary()
        self._model = model
        self.is_init = True
Ejemplo n.º 9
0
def manet(input_layer_names, input_shape, config=None, logger=None):
    output_layers = []

    # paras of multi-scales
    nb_pyr_levels = 3
    pyr_levels = list(range(nb_pyr_levels))
    ret_feat_levels = 2

    # paras of layers
    conv_type, ks, activ = "conv", 2, "relu"

    # paras of cost volume (cv)
    min_disp, max_disp, num_disp_labels = -4, 4, 80

    ########## input layers ##########
    input_layers = []
    for _, input_layer_name in enumerate(input_layer_names):
        x = Input(shape=input_shape, name=input_layer_name)
        input_layers.append(x)

    ########## Branch_2, 3: cv ##########
    pyr_outputs = []  # outputs of pyramid level 1, 2

    # 1. Feature extraction
    nb_filt1 = 8
    feature_s_paras = {
        'ks': ks,
        'stride': [2, 1],
        'padding': "zero",
        'filter': [nb_filt1, nb_filt1 * 2] * 1,
        'activation': activ,
        'conv_type': conv_type,
        'pyr': True,
        'layer_nums': 2,
        "ret_feat_levels": ret_feat_levels
    }
    feature_s_m = feature_extraction_m((input_shape[0], input_shape[1], 1),
                                       feat_paras=feature_s_paras)

    fs_ts_ids = []
    feature_streams = []
    for stream_id, x in enumerate(input_layers):
        if stream_id > 1:
            continue
        feature_stream = []
        for x_sid in range(input_shape[2]):
            x_sub = Lambda(slicing, arguments={'index': x_sid})(x)
            x_sub = feature_s_m(x_sub)
            feature_stream.append(x_sub)

        if stream_id == 0:
            t_ids = list(range(input_shape[2]))[::-1]
            s_ids = [int((input_shape[2] - 1) / 2)] * input_shape[2]
        elif stream_id == 1:
            t_ids = [int((input_shape[2] - 1) / 2)] * input_shape[2]
            s_ids = list(range(input_shape[2]))
        fs_ts_ids.append((t_ids, s_ids))
        feature_streams.append(feature_stream)

    # 2/3/4. Cost volume + 3D aggregation + Regression
    cv_ca_pyr_levels = pyr_levels[1:]
    for pyr_level in cv_ca_pyr_levels[::-1]:
        cv_streams = []
        scale_factor = math.pow(2, pyr_level)
        pyr_level_ndl = int(num_disp_labels / scale_factor)

        # 2. Cost volume
        for fs_id, feature_stream in enumerate(feature_streams):
            pyr_fs = [fs_ep[pyr_level - 1] for fs_ep in feature_stream]
            cost_volume = Lambda(compute_cost_volume,
                                 arguments={
                                     "t_s_ids": fs_ts_ids[fs_id],
                                     "min_disp": min_disp / scale_factor,
                                     "max_disp": max_disp / scale_factor,
                                     "labels": pyr_level_ndl,
                                     "move_path": "LT"
                                 })(pyr_fs)
            cv_streams.append(cost_volume)

        # Multiple streams
        if len(cv_streams) > 1:
            cost_volume = concatenate(cv_streams)

        # 3/4. 3D aggregation + Regression
        # 3. 3D aggregation
        if pyr_level == cv_ca_pyr_levels[0]:
            ca_paras = {
                'ks': 3,
                'stride': 2,
                'padding': "same",
                'filter': nb_filt1 * 2,
                'activation': activ,
                'conv_type': conv_type,
                'n_dc': 1
            }
            output = cost_aggregation(cost_volume, ca_paras=ca_paras)
        else:
            ca_paras = {
                'ks': 3,
                'stride': 2,
                'padding': "same",
                'filter': nb_filt1 * 4,
                'activation': activ,
                'conv_type': conv_type,
                'n_dc': 1
            }
            output = cost_aggregation(cost_volume, ca_paras=ca_paras)
            output = UpSampling3D(size=(2, 2, 2),
                                  name="u_s{}".format(pyr_level))(output)

        # 4. Regression
        logger.info("=> regression at scale level {}".format(pyr_level))
        output = Lambda(lambda op: tf.nn.softmax(op, axis=1))(output)
        pl_o = Lambda(soft_min_reg,
                      arguments={
                          "axis": 1,
                          "min_disp": min_disp,
                          "max_disp": max_disp,
                          "labels": num_disp_labels
                      },
                      name="sm_disp{}".format(pyr_level))(output)
        pyr_outputs.append(pl_o)
    d2 = Average()(pyr_outputs[:2])  # outputs at scale level 1 and 2

    ########## Branch_1: no_cv ##########
    block_n = 8  # blocks
    ifn = 40  # filter

    # Branch_1: 2D aggregation
    pl_features = []
    pl_feature_streams = []
    for x in input_layers:
        x = ReflectionPadding2D(padding=([4, 4], [4, 4]))(x)
        feature_paras = {
            'ks': ks,
            'stride': 1,
            'padding': "zero",
            'filter': 1 * [ifn],
            'activation': activ,
            'conv_type': conv_type,
            'layer_nums': 1
        }
        x = cna_m(x, feature_paras, layer_names='random')
        pl_feature_streams.append(x)
    x = concatenate(pl_feature_streams)  # merge layers
    pl_features.append(x)

    pyr_level = pyr_levels[0]  # = 0
    fn = [i for i in block_n * [ifn * len(input_layer_names)]]
    cna_paras = {
        'ks': ks,
        'stride': 1,
        'padding': "valid",
        'filter': fn,
        'activation': activ,
        'conv_type': conv_type,
        'layer_nums': block_n
    }
    x = cna_m(pl_features[pyr_level], cna_paras, layer_names='random')
    x = conv_2d(x, num_disp_labels, ks=ks, padding="zero")

    # Branch_1: Regression
    logger.info("=> regression at scale level {}".format(pyr_level))
    x = Lambda(lambda op: tf.nn.softmax(op, axis=-1))(x)
    d1 = Lambda(soft_min_reg,
                arguments={
                    "axis": -1,
                    "min_disp": min_disp,
                    "max_disp": max_disp,
                    "labels": num_disp_labels
                },
                name="sm_disp_{}".format(pyr_level))(x)

    ########## Output ##########
    d0 = Average()([d2, d1])
    output_layers.append(d2)
    output_layers.append(d1)
    output_layers.append(d0)

    manet_model = Model(inputs=input_layers, outputs=output_layers)
    if config.model_infovis:
        manet_model.summary()
    return manet_model
Ejemplo n.º 10
0
def llfnet(input_layer_names,
           input_shape,
           config=None,
           hp=None,
           phase=None,
           logger=None):
    output_layers = []

    # paras of layers
    conv_type, ks, activ = "conv", 2, "relu"

    # paras of cost volume (cv)
    min_disp, max_disp, num_disp_labels = 0, 50, 128

    # 0. Input
    input_layers = get_input(input_layer_names, input_shape)

    # 1. Feature extraction
    nb_filt1 = 16
    fe_filt = [nb_filt1 * 2, nb_filt1 * 2, nb_filt1 * 4, nb_filt1 * 4]
    feature_s_paras = {
        'ks': ks,
        'stride': [2, 1],
        'padding': "zero",
        'filter': fe_filt * 1,
        'activation': activ,
        'conv_type': conv_type,
        'pyr': True,
        'layer_nums': 2,
        "ret_feat_levels": 1
    }
    # feature share module
    feature_s_m = feature_extraction_m((input_shape[0], input_shape[1], 1),
                                       feat_paras=feature_s_paras)

    feature_streams = []
    fs_ts_ids = []
    for stream_id, x in enumerate(input_layers):
        if stream_id > 1:
            logger.info("skip stream{}".format(stream_id))
            continue
        feature_stream = []
        # iterate views of a stream
        for x_sid in range(0, input_shape[2]):
            x_sub = Lambda(slicing,
                           arguments={
                               'index': x_sid,
                               'index_end': x_sid + 1
                           })(x)
            x_sub = feature_s_m(x_sub)
            feature_stream.append(x_sub)

        # | stream (vertical)
        if stream_id == 0:
            t_ids = list(range(input_shape[2]))
            s_ids = [int((input_shape[2] - 1) / 2)] * input_shape[2]
        # - stream (horizontal)
        elif stream_id == 1:
            t_ids = [int((input_shape[2] - 1) / 2)] * input_shape[2]
            s_ids = list(range(input_shape[2]))
        fs_ts_ids.append((t_ids, s_ids))
        feature_streams.append(feature_stream)

    # 2. Cost volume
    # iterate pyramid levels reversely
    pyr_cost_volume = []  # pyramid cost volume
    pyr_levels_l = list(range(2))
    skip_pyr_levels = 1
    for pyr_level in pyr_levels_l[::-1]:
        if pyr_level < skip_pyr_levels:
            logger.info("=> skip pyramid level: {}".format(pyr_level))
            continue
        else:
            logger.info("=> pyramid level: {}".format(pyr_level))
        cv_streams = []
        scale_factor = math.pow(2, pyr_level + 1)

        # Cost volume per pyramid level
        # 2.1 build (shift+cost) cost volume per stream
        for fs_id, feature_stream in enumerate(feature_streams):
            # pyramid feature stream
            pyr_fs = [fs_ep for fs_ep in feature_stream]
            cost_volume = Lambda(compute_cost_volume,
                                 arguments={
                                     "t_s_ids": fs_ts_ids[fs_id],
                                     "min_disp": min_disp / scale_factor,
                                     "max_disp": max_disp / scale_factor,
                                     "labels":
                                     int(num_disp_labels / scale_factor),
                                     "move_path": "LT"
                                 })(pyr_fs)
            cv_streams.append(cost_volume)

        # 2.2 fuse multiple streams (across views + across streams or intra + inter)
        if len(cv_streams) > 1:
            if input_shape[2] > 3:
                cv_streams_3x3s = []
                # divide
                for cv_stream in cv_streams:
                    cv_stream_3x3s = get_3x3(cv_stream, nb_filt1 * 2)
                    cv_streams_3x3s.append(cv_stream_3x3s)
                cv_streams_3x3s = list(map(list,
                                           zip(*cv_streams_3x3s)))  # transpose
                # concat
                concat_cost_volumes = []
                for cv_streams_3x3 in cv_streams_3x3s:
                    concat_cost_volume = concatenate(cv_streams_3x3)
                    concat_cost_volumes.append(concat_cost_volume)
                # sum over divided
                cost_volume = Average()(concat_cost_volumes)
            else:
                cost_volume = concatenate(cv_streams)

        pyr_cost_volume.append(cost_volume)

    # 3/4.Cost aggregation + Regression
    for idx in range(len(pyr_cost_volume)):
        # 3.Cost aggregation
        ca_paras = {
            'conv_type': conv_type,
            'ks': 3,
            'stride': 2,
            'padding': "same",
            'filter': nb_filt1 * 4,
            'activation': activ,
            'n_dc': 1
        }
        # view
        pcv_tmp_l = []
        for i in range(2):
            ind_st = i * nb_filt1 * 2 * 3
            ind_end = (i + 1) * nb_filt1 * 2 * 3
            pcv_sub = Lambda(slicing,
                             arguments={
                                 'index': ind_st,
                                 'index_end': ind_end
                             })(pyr_cost_volume[idx])
            pcv_sub_ca = channel_attention_m(pcv_sub, residual=True)
            pcv_tmp_l.append(pcv_sub_ca)
        pcv_tmp = concatenate(pcv_tmp_l)
        # stream
        pcv_tmp = channel_attention_m(pcv_tmp, residual=True, stream=True)
        output = cost_aggregation(pcv_tmp, ca_paras=ca_paras)

        # upsampling
        up_scale = int(input_shape[0] / K.int_shape(output)[2])
        x_shape = output.get_shape().as_list()
        output = Lambda(upsample_ops,
                        arguments={
                            'width': int(up_scale * x_shape[2]),
                            'height': int(up_scale * x_shape[3]),
                            'axis': 1,
                            'scale': 1,
                            'interp': "bilinear"
                        })(output)
        output = Lambda(upsample_ops,
                        arguments={
                            'width': int(2 * x_shape[1]),
                            'height': int(2 * x_shape[3]),
                            'axis': 2,
                            'scale': 1,
                            'interp': "bilinear"
                        })(output)

        # 4.Regression
        output = Lambda(lambda op: tf.nn.softmax(op, axis=1))(output)
        output = Lambda(soft_min_reg,
                        arguments={
                            "axis": 1,
                            "min_disp": min_disp,
                            "max_disp": max_disp,
                            "labels": num_disp_labels
                        },
                        name="sm_disp{}".format(pyr_level))(output)
    output_layers.append(output)

    # Set optimizer, and compile
    if phase == 'train':
        # Set optimizer with learning rate
        learning_rate = hp["network"]['learning_rate']
        opt = RMSprop(lr=learning_rate)

        llfnet_model = Model(inputs=input_layers, outputs=output_layers)
        if config.gpus > 1:
            llfnet_model = multi_gpu_model(llfnet_model, gpus=config.gpus)
        llfnet_model.compile(optimizer=opt, loss=smoothL1)

    else:
        llfnet_model = Model(inputs=input_layers, outputs=output_layers)
        if config.gpus > 1:
            llfnet_model = multi_gpu_model(llfnet_model, gpus=config.gpus)

    if config.model_infovis:
        llfnet_model.summary()
    return llfnet_model
Ejemplo n.º 11
0
def cifar100_complicated_ensemble_v2(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar100 network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    outputs_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Generate Submodels.
    submodel1 = cifar100_complicated_ensemble_v2_submodel1(
        input_shape, input_tensor, 41, weights_path)
    submodel2 = cifar100_complicated_ensemble_v2_submodel2(
        input_shape, input_tensor, 41, weights_path)
    submodel3 = cifar100_complicated_ensemble_v2_submodel3(
        input_shape, input_tensor, 41, weights_path)
    submodel4 = cifar100_complicated_ensemble_v2_submodel4(
        input_shape, input_tensor, 41, weights_path)
    submodel5 = cifar100_complicated_ensemble_v2_submodel5(
        input_shape, input_tensor, 41, weights_path)

    # Get their outputs.
    outputs_submodel1 = submodel1(inputs)
    outputs_submodel2 = submodel2(inputs)
    outputs_submodel3 = submodel3(inputs)
    outputs_submodel4 = submodel4(inputs)
    outputs_submodel5 = submodel5(inputs)

    # Correct submodel 2 - 5 outputs.
    outputs_submodel2 = Crop(1, 1,
                             outputs_submodel2.shape[1])(outputs_submodel2)
    outputs_submodel3 = Crop(1, 1,
                             outputs_submodel3.shape[1])(outputs_submodel3)
    outputs_submodel4 = Crop(1, 1,
                             outputs_submodel4.shape[1])(outputs_submodel4)
    outputs_submodel5 = Crop(1, 1,
                             outputs_submodel5.shape[1])(outputs_submodel5)

    # Create the complicated outputs.
    # Classes 0-9.
    outputs_list.append(
        Average(name='classes_0-9')([
            Crop(1, 0, 10)(outputs_submodel1),
            Crop(1, 10, 20)(outputs_submodel5)
        ]))

    # Classes 10-39.
    outputs_list.append(
        Average(name='classes_10-39')([
            Crop(1, 10, 40)(outputs_submodel1),
            Crop(1, 0, 30)(outputs_submodel2)
        ]))

    # Classes 40-49.
    outputs_list.append(
        Average(name='classes_40-49')([
            Crop(1, 30, 40)(outputs_submodel2),
            Crop(1, 0, 10)(outputs_submodel3)
        ]))

    # Classes 50-59.
    outputs_list.append(
        Average(name='classes_50-59')([
            Crop(1, 10, 20)(outputs_submodel3),
            Crop(1, 0, 10)(outputs_submodel5)
        ]))

    # Classes 60-79.
    outputs_list.append(
        Average(name='classes_60-79')([
            Crop(1, 20, 40)(outputs_submodel3),
            Crop(1, 0, 20)(outputs_submodel4)
        ]))

    # Classes 80-99.
    outputs_list.append(
        Average(name='classes_80-99')([
            Crop(1, 20, 40)(outputs_submodel4),
            Crop(1, 10, 30)(outputs_submodel5)
        ]))

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(outputs_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='cifar100_complicated_ensemble_v2')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Ejemplo n.º 12
0
def svhn_complicated_ensemble_v2(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a svhn network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    outputs_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Generate Submodels.
    submodel1 = svhn_complicated_ensemble_v2_submodel1(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)
    submodel2 = svhn_complicated_ensemble_v2_submodel2(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)
    submodel3 = svhn_complicated_ensemble_v2_submodel3(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)
    submodel4 = svhn_complicated_ensemble_v2_submodel4(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)
    submodel5 = svhn_complicated_ensemble_v2_submodel5(input_shape,
                                                       input_tensor, 5,
                                                       weights_path)

    # Get their outputs.
    outputs_submodel1 = submodel1(inputs)
    outputs_submodel2 = submodel2(inputs)
    outputs_submodel3 = submodel3(inputs)
    outputs_submodel4 = submodel4(inputs)
    outputs_submodel5 = submodel5(inputs)

    # Correct submodel 2 - 5 outputs.
    outputs_submodel2 = Crop(1, 1,
                             outputs_submodel2.shape[1])(outputs_submodel2)
    outputs_submodel3 = Crop(1, 1,
                             outputs_submodel3.shape[1])(outputs_submodel3)
    outputs_submodel4 = Crop(1, 1,
                             outputs_submodel4.shape[1])(outputs_submodel4)
    outputs_submodel5 = Crop(1, 1,
                             outputs_submodel5.shape[1])(outputs_submodel5)

    # Create the complicated outputs.
    # Class 0.
    outputs_list.append(
        Average(name='class_0')([
            Crop(1, 0, 1)(outputs_submodel1),
            Crop(1, 1, 2)(outputs_submodel5)
        ]))

    # Classes 1, 2, 3.
    outputs_list.append(
        Average(name='classes_1_2_3')([
            Crop(1, 1, 4)(outputs_submodel1),
            Crop(1, 0, 3)(outputs_submodel2)
        ]))

    # Class 4.
    outputs_list.append(
        Average(name='class_4')([
            Crop(1, 3, 4)(outputs_submodel2),
            Crop(1, 0, 1)(outputs_submodel3)
        ]))

    # Class 5.
    outputs_list.append(
        Average(name='class_5')([
            Crop(1, 1, 2)(outputs_submodel3),
            Crop(1, 0, 1)(outputs_submodel5)
        ]))

    # Classes 6, 7.
    outputs_list.append(
        Average(name='classes_6_7')([
            Crop(1, 2, 4)(outputs_submodel3),
            Crop(1, 0, 2)(outputs_submodel4)
        ]))

    # Classes 8, 9.
    outputs_list.append(
        Average(name='classes_8_9')([
            Crop(1, 2, 4)(outputs_submodel4),
            Crop(1, 1, 3)(outputs_submodel5)
        ]))

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(outputs_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='svhn_complicated_ensemble_v2')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Ejemplo n.º 13
0
def ensemble_training(models_list):
    outputs = [model.outputs[-1] for model in models]
    final_input = [model.input for model in models]
    final_output = Average()(outputs)
    ens_model = Model(final_input, final_output)
    return ens_model
Ejemplo n.º 14
0
def caltech_complicated_ensemble(input_shape=None, input_tensor=None, n_classes=None,
                                 weights_path: Union[None, str] = None) -> Model:
    """
    Defines a caltech network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    output_list = []
    inputs = create_inputs(input_shape, input_tensor)

    # Submodel 1.
    submodel1 = caltech_complicated_ensemble_submodel1(input_shape, input_tensor, n_classes, weights_path)
    outputs1 = Dense(22, name='submodel1_output')(submodel1.layers[-2])
    # Crop outputs1 in order to create the first submodel's output.
    outputs_first_submodel = Crop(1, 0, 12, name='first_twelve_classes_submodel')(outputs1)
    output_list.append(outputs_first_submodel)

    # Submodel 2.
    submodel2 = caltech_complicated_ensemble_submodel2(input_shape, input_tensor, n_classes, weights_path)
    outputs2 = Dense(30, name='submodel2_output')(submodel2.layers[-2])

    # Average the predictions for the second class of the first two submodels.
    averaged_classes_20_30 = Average(name='averaged_second_ten_classes')(
        [Crop(1, 12, 22)(outputs1), Crop(1, 0, 10)(outputs2)])
    # Crop outputs2 in order to create the third ten classes output.
    outputs_classes_30_40 = Crop(1, 10, 20, name='third_ten_classes')(outputs2)
    # Concatenate classes outputs in order to create the second submodel's output.
    outputs_second_submodel = Concatenate(name='second_submodel')([averaged_classes_20_30, outputs_classes_30_40])
    output_list.append(outputs_second_submodel)

    # Submodel 3.
    submodel3 = caltech_complicated_ensemble_submodel3(input_shape, input_tensor, n_classes, weights_path)
    outputs3 = Dense(30, name='submodel3_output')(submodel3.layers[-2])

    # Average the predictions for the fourth class of the last two submodels.
    averaged_classes_30_40 = Average(name='averaged_fourth_ten_class')([
        Crop(1, 20, 30)(outputs2),
        Crop(1, 0, 10)(outputs3)
    ])
    # Crop outputs3 in order to create the fifth abd sixth class outputs.
    outputs_classes_40_50 = Crop(1, 10, 20, name='fifth_ten_class')(outputs3)
    outputs_classes_50_60 = Crop(1, 20, 30, name='sixth_ten_class')(outputs3)
    # Concatenate classes outputs in order to create the third submodel's output.
    outputs_third_submodel = Concatenate(name='third_submodel')([
        averaged_classes_30_40,
        outputs_classes_40_50,
        outputs_classes_50_60
    ])
    output_list.append(outputs_third_submodel)

    # Submodel 4.
    submodel4 = caltech_complicated_ensemble_submodel4(input_shape, input_tensor, n_classes, weights_path)
    outputs4 = Dense(20, name='submodel4_output')(submodel4.layers[-2])
    output_list.append(outputs4)

    # Submodel 5.
    submodel5 = caltech_complicated_ensemble_submodel5(input_shape, input_tensor, n_classes, weights_path)
    outputs5 = Dense(20, name='submodel5_output')(submodel5.layers[-2])
    output_list.append(outputs5)

    # Concatenate all class predictions together.
    outputs = Concatenate(name='output')(output_list)
    outputs = Softmax(name='output_softmax')(outputs)

    # Create model.
    model = Model(inputs, outputs, name='caltech_complicated_ensemble')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model