Example #1
0
def ensemble(models):

    '''
    THIS FUNCTION IS USED TO ENSEMBLE OUTPUT OF A LIST OF MODELS, CONSIDERING ITS AVERAGE
    :param models: List Models : models used (AlexNet, VGGNet, ResNet)
    :return: model: model with average outputs of all models considered
    '''

    try:

        ## get outputs of each model
        input_shape = (config.WIDTH, config.HEIGHT, config.CHANNELS)
        input_model = Input(input_shape)
        models_out = [i(input_model) for i in models]

        ## get average of each model (ensemble)
        average = Average() (models_out)

        ## define model with new outputs

        model = mp(input_model, average, name='ensemble')

        return model

    except:
        raise
    def build(self, *args, trainedModel=None) -> Sequential:
        '''
        THIS FUNCTION IS RESPONSIBLE FOR THE INITIALIZATION OF SEQUENTIAL ALEXNET MODEL
        Reference: https://arxiv.org/pdf/1608.06993.pdf --> Original Paper
        Reference: https://github.com/liuzhuang13/DenseNet/blob/master/models/densenet.lua --> Original Author of DenseNet Paper
        :param args: list integers, in logical order --> to populate cnn (filters) and dense (neurons)
        :return: Sequential: AlexNet MODEL
        '''

        try:

            #IF USER ALREADY HAVE A TRAINED MODEL, AND NO WANTS TO BUILD AGAIN A NEW MODEL
            if trainedModel != None:
                return trainedModel

            input_shape = (config.WIDTH, config.HEIGHT, config.CHANNELS)
            input = Input(shape=(input_shape))

            x = Conv2D(args[0],
                       kernel_size=(5, 5),
                       use_bias=False,
                       kernel_initializer=he_uniform(config.HE_SEED),
                       strides=2,
                       padding=config.SAME_PADDING,
                       kernel_regularizer=regularizers.l2(1e-4))(input)
            x = BatchNormalization(axis=3)(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x)

            nFilters = args[0]
            for i in range(args[1]):
                x = self.dense_block(
                    x, args[2], args[3], args[3]
                )  # initial number of filters is equal to growth rate, and all conv's uses all same number of filters: growth rate
                if i < (args[1] - 1):
                    x = self.transition(
                        x, args[4]
                    )  ## in last block (final step doesn't apply transition logic, global average pooling, made this

            x = BatchNormalization(axis=3)(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = GlobalAveragePooling2D()(x)

            x = Dense(config.NUMBER_CLASSES,
                      kernel_initializer=he_uniform(config.HE_SEED),
                      kernel_regularizer=regularizers.l2(1e-4))(
                          x)  # Num Classes for CIFAR-10
            outputs = Activation(config.SOFTMAX_FUNCTION)(x)

            model = mp(input, outputs)

            if config.BUILD_SUMMARY == 1:
                model.summary()

            return model

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_BUILD)
    def build(self, *args, trainedModel=None) -> Sequential:
        '''
        THIS FUNCTION IS RESPONSIBLE FOR THE INITIALIZATION OF SEQUENTIAL ALEXNET MODEL
        :param args: list integers, in logical order --> to populate cnn (filters) and dense (neurons)
        :return: Sequential: AlexNet MODEL
        '''

        try:

            #IF USER ALREADY HAVE A TRAINED MODEL, AND NO WANTS TO BUILD AGAIN A NEW MODEL
            if trainedModel != None:
                return trainedModel

            # definition of input shape and Input Layer
            input_shape = (config.WIDTH, config.HEIGHT, config.CHANNELS)
            input = Input(input_shape)

            ## add stack conv layer to the model
            numberFilters = args[1]
            model = None
            for i in range(args[0]):
                if i == 0:
                    model = self.add_stack(
                        input, numberFilters, 0.25,
                        input_shape)  # first stack convolution layer
                else:
                    model = self.add_stack(model, numberFilters, 0.25)
                numberFilters += args[2]

            # flatten
            model = Flatten()(model)

            # Full Connected Layer(s)
            for i in range(args[3]):
                model = Dense(units=args[4],
                              kernel_regularizer=regularizers.l2(config.DECAY),
                              kernel_initializer='he_uniform')(model)
                model = Activation(config.RELU_FUNCTION)(model)
                model = BatchNormalization()(model)
                if i != (args[3] - 1):
                    model = Dropout(0.25)(
                        model
                    )  ## applies Dropout on all FCL's except FCL preceding the ouput layer (softmax)

            # Output Layer
            model = Dense(units=config.NUMBER_CLASSES)(model)
            model = Activation(config.SOFTMAX_FUNCTION)(model)

            # build model
            model = mp(inputs=input, outputs=model)

            if config.BUILD_SUMMARY == 1:
                model.summary()

            return model

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_BUILD)
Example #4
0
    def build(self, *args, trainedModel=None) -> Sequential:

        #resnet v1, based on: https://keras.io/examples/cifar10_resnet/
        #----------PAPER------------- https://arxiv.org/pdf/1512.03385.pdf
        #---------RESNET 18 AND 34 ARCHITECTURE: https://datascience.stackexchange.com/questions/33022/how-to-interpert-resnet50-layer-types/47489
        #---------VERY GOOD EXPLANATION: http://ethen8181.github.io/machine-learning/keras/resnet_cam/resnet_cam.html#Identity-Block
        ## model based on resnet-18 approach and described in paper cited in identity_block and convolution_block functions
        try:

            # IF USER ALREADY HAVE A TRAINED MODEL, AND NO WANTS TO BUILD AGAIN A NEW MODEL
            if trainedModel != None:
                return trainedModel

            input_shape = (config.HEIGHT, config.WIDTH, config.CHANNELS)
            input_shape = Input(input_shape)

            X = ZeroPadding2D((3, 3))(input_shape)

            ## normal convolution layer --> first entry
            X = Conv2D(filters=args[0],
                       kernel_size=(5, 5),
                       strides=2,
                       padding=config.SAME_PADDING,
                       kernel_initializer=he_uniform(config.HE_SEED),
                       kernel_regularizer=l2(config.DECAY))(X)
            X = BatchNormalization(axis=3)(X)
            X = Activation(config.RELU_FUNCTION)(X)
            X = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(X)

            ## loop of convolution and identity blocks
            numberFilters = args[0]
            for i in range(args[1]):
                if i == 0:
                    X = self.convolution_block(
                        X, *(numberFilters,
                             1))  #first set of building blocks, stride is 1
                else:
                    X = self.convolution_block(
                        X, *(numberFilters,
                             2))  #next set of building blocks, stride is 2
                for i in range(args[2]):
                    X = self.identity_block(X, *(numberFilters, ))
                numberFilters += args[3]

            X = GlobalAveragePooling2D()(X)

            X = Dense(units=config.NUMBER_CLASSES,
                      kernel_initializer=he_uniform(config.HE_SEED),
                      kernel_regularizer=l2(config.DECAY))(X)
            X = Activation(config.SOFTMAX_FUNCTION)(X)

            ## finally model creation
            model = mp(inputs=input_shape, outputs=X)

            if config.BUILD_SUMMARY == 1:
                model.summary()
            #plot_model(model, show_shapes=True, to_file='residual_module.png')

            return model

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_BUILD)
Example #5
0
    def build(self, *args, trainedModel=None):

        try:

            input_shape = (config.HEIGHT, config.WIDTH, config.CHANNELS)
            img_input = Input(input_shape)
            x = Conv2D(64, (3, 3), padding=config.SAME_PADDING, name='conv1', strides=(1, 1))(img_input)
            x = BatchNormalization(name='bn1')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(64, (3, 3), padding=config.SAME_PADDING, name='conv2')(x)
            x = BatchNormalization(name='bn2')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = MaxPooling2D()(x)

            x = Conv2D(128, (3, 3), padding=config.SAME_PADDING, name='conv3')(x)
            x = BatchNormalization(name='bn3')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(128, (3, 3), padding=config.SAME_PADDING, name='conv4')(x)
            x = BatchNormalization(name='bn4')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = MaxPooling2D()(x)

            x = Conv2D(256, (3, 3), padding=config.SAME_PADDING, name='conv5')(x)
            x = BatchNormalization(name='bn5')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(256, (3, 3), padding=config.SAME_PADDING, name='conv6')(x)
            x = BatchNormalization(name='bn6')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(256, (3, 3), padding=config.SAME_PADDING, name='conv7')(x)
            x = BatchNormalization(name='bn7')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = MaxPooling2D()(x)

            x = Conv2D(512, (3, 3), padding=config.SAME_PADDING, name='conv8')(x)
            x = BatchNormalization(name='bn8')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(512, (3, 3), padding=config.SAME_PADDING, name='conv9')(x)
            x = BatchNormalization(name='bn9')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(512, (3, 3), padding=config.SAME_PADDING, name='conv10')(x)
            x = BatchNormalization(name='bn10')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = MaxPooling2D()(x)

            x = Conv2D(512, (3, 3), padding=config.SAME_PADDING, name='conv11')(x)
            x = BatchNormalization(name='bn11')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(512, (3, 3), padding=config.SAME_PADDING, name='conv12')(x)
            x = BatchNormalization(name='bn12')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2D(512, (3, 3), padding=config.SAME_PADDING, name='conv13')(x)
            x = BatchNormalization(name='bn13')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = MaxPooling2D()(x)

            x = Dense(1024, activation=config.RELU_FUNCTION, name='fc1')(x)
            x = Dense(1024, activation=config.RELU_FUNCTION, name='fc2')(x)

            # Decoding Layer
            x = UpSampling2D()(x)
            x = Conv2DTranspose(512, (3, 3), padding=config.SAME_PADDING, name='deconv1')(x)
            x = BatchNormalization(name='bn14')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(512, (3, 3), padding=config.SAME_PADDING, name='deconv2')(x)
            x = BatchNormalization(name='bn15')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(512, (3, 3), padding=config.SAME_PADDING, name='deconv3')(x)
            x = BatchNormalization(name='bn16')(x)
            x = Activation(config.RELU_FUNCTION)(x)

            x = UpSampling2D()(x)
            x = Conv2DTranspose(512, (3, 3), padding=config.SAME_PADDING, name='deconv4')(x)
            x = BatchNormalization(name='bn17')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(512, (3, 3), padding=config.SAME_PADDING, name='deconv5')(x)
            x = BatchNormalization(name='bn18')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(256, (3, 3), padding=config.SAME_PADDING, name='deconv6')(x)
            x = BatchNormalization(name='bn19')(x)
            x = Activation(config.RELU_FUNCTION)(x)

            x = UpSampling2D()(x)
            x = Conv2DTranspose(256, (3, 3), padding=config.SAME_PADDING, name='deconv7')(x)
            x = BatchNormalization(name='bn20')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(256, (3, 3), padding=config.SAME_PADDING, name='deconv8')(x)
            x = BatchNormalization(name='bn21')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(128, (3, 3), padding=config.SAME_PADDING, name='deconv9')(x)
            x = BatchNormalization(name='bn22')(x)
            x = Activation(config.RELU_FUNCTION)(x)

            x = UpSampling2D()(x)
            x = Conv2DTranspose(128, (3, 3), padding=config.SAME_PADDING, name='deconv10')(x)
            x = BatchNormalization(name='bn23')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(64, (3, 3), padding=config.SAME_PADDING, name='deconv11')(x)
            x = BatchNormalization(name='bn24')(x)
            x = Activation(config.RELU_FUNCTION)(x)

            x = UpSampling2D()(x)
            x = Conv2DTranspose(64, (3, 3), padding=config.SAME_PADDING, name='deconv12')(x)
            x = BatchNormalization(name='bn25')(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = Conv2DTranspose(1, (3, 3), padding=config.SAME_PADDING, name='deconv13')(x)
            x = BatchNormalization(name='bn26')(x)
            x = Activation(config.SIGMOID_FUNCTION)(x)

            pred = Reshape((config.HEIGHT, config.WIDTH))(x) #reshape to single channel
            model = mp(inputs=img_input, outputs=pred)

            # input_size =(config.WIDTH, config.HEIGHT, config.CHANNELS)
            # N = input_size[0]
            # inputs = Input(input_size)
            # conv1 = Conv2D(64, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(inputs)
            # conv1 = Conv2D(64, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv1)
            #
            # pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
            # conv2 = Conv2D(128, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(pool1)
            # conv2 = Conv2D(128, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv2)
            # pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
            # conv3 = Conv2D(256, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(pool2)
            # conv3 = Conv2D(256, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv3)
            # drop3 = Dropout(0.5)(conv3)
            # pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
            # # D1
            # conv4 = Conv2D(512, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(pool3)
            # conv4_1 = Conv2D(512, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv4)
            # drop4_1 = Dropout(0.5)(conv4_1)
            # # D2
            # conv4_2 = Conv2D(512, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(drop4_1)
            # conv4_2 = Conv2D(512, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv4_2)
            # conv4_2 = Dropout(0.5)(conv4_2)
            # # D3
            # merge_dense = concatenate([conv4_2, drop4_1], axis=3)
            # conv4_3 = Conv2D(512, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(merge_dense)
            # conv4_3 = Conv2D(512, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv4_3)
            # drop4_3 = Dropout(0.5)(conv4_3)
            #
            # up6 = Conv2DTranspose(256, kernel_size=2, strides=2, padding=config.SAME_PADDING, kernel_initializer='he_normal')(
            #     drop4_3)
            # up6 = BatchNormalization(axis=3)(up6)
            # up6 = Activation(config.RELU_FUNCTION)(up6)
            #
            # x1 = Reshape(target_shape=(1, np.int32(N / 4), np.int32(N / 4), 256))(drop3)
            # x2 = Reshape(target_shape=(1, np.int32(N / 4), np.int32(N / 4), 256))(up6)
            # merge6 = concatenate([x1, x2], axis=1)
            # merge6 = ConvLSTM2D(filters=128, kernel_size=(3, 3), padding=config.SAME_PADDING, return_sequences=False,
            #                     go_backwards=True, kernel_initializer='he_normal')(merge6)
            #
            # conv6 = Conv2D(256, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(merge6)
            # conv6 = Conv2D(256, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv6)
            #
            # up7 = Conv2DTranspose(128, kernel_size=2, strides=2, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv6)
            # up7 = BatchNormalization(axis=3)(up7)
            # up7 = Activation(config.RELU_FUNCTION)(up7)
            #
            # x1 = Reshape(target_shape=(1, np.int32(N / 2), np.int32(N / 2), 128))(conv2)
            # x2 = Reshape(target_shape=(1, np.int32(N / 2), np.int32(N / 2), 128))(up7)
            # merge7 = concatenate([x1, x2], axis=1)
            # merge7 = ConvLSTM2D(filters=64, kernel_size=(3, 3), padding=config.SAME_PADDING, return_sequences=False,
            #                     go_backwards=True, kernel_initializer='he_normal')(merge7)
            #
            # conv7 = Conv2D(128, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(merge7)
            # conv7 = Conv2D(128, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv7)
            #
            # up8 = Conv2DTranspose(64, kernel_size=2, strides=2, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv7)
            # up8 = BatchNormalization(axis=3)(up8)
            # up8 = Activation(config.RELU_FUNCTION)(up8)
            #
            # x1 = Reshape(target_shape=(1, N, N, 64))(conv1)
            # x2 = Reshape(target_shape=(1, N, N, 64))(up8)
            # merge8 = concatenate([x1, x2], axis=1)
            # merge8 = ConvLSTM2D(filters=32, kernel_size=(3, 3), padding=config.SAME_PADDING, return_sequences=False,
            #                     go_backwards=True, kernel_initializer='he_normal')(merge8)
            #
            # conv8 = Conv2D(64, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(merge8)
            # conv8 = Conv2D(64, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv8)
            # conv8 = Conv2D(2, 3, activation=config.RELU_FUNCTION, padding=config.SAME_PADDING, kernel_initializer='he_normal')(conv8)
            # conv9 = Conv2D(1, 1, activation=config.SIGMOID_FUNCTION)(conv8)
            #
            # model = mp(input=inputs, output=conv9)

            model.summary()
            return model

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_UNET_STRATEGY)