def residual_block(x, **metaparameters):
        """ Construct a Residual Block
            x        : input to the block
            n_filters: number of filters in convolution layer in residual block
        """
        if 'n_filters' in metaparameters:
            n_filters = metaparameters['n_filters']
            del metaparameters['n_filters']
        else:
            n_filters = DenseNet.n_filters

            
        # Remember input tensor into residual block
        shortcut = x 
    
        # BN-RE-Conv pre-activation form of convolutions

        # Dimensionality expansion, expand filters by 4 (DenseNet-B)
        x = BatchNormalization()(x)
        x = Composable.ReLU(x)
        x = Composable.Conv2D(x, 4 * n_filters, (1, 1), strides=(1, 1), use_bias=False, 
                              **metaparameters)
    
        # Bottleneck convolution
        # 3x3 convolution with padding=same to preserve same shape of feature maps
        x = BatchNormalization()(x)
        x = Composable.ReLU(x)
        x = Composable.Conv2D(x, n_filters, (3, 3), strides=(1, 1), padding='same', use_bias=False, 
                              **metaparameters)

        # Concatenate the input (identity) with the output of the residual block
        # Concatenation (vs. merging) provides Feature Reuse between layers
        x = Concatenate()([shortcut, x])
        return x
Example #2
0
    def __init__(self, groups=None ,
                 input_shape=(32, 32, 3), include_top=True,
                 **hyperparameters):
        """ Construct a Wids Residual (Convolutional Neural) Network 
            groups      : metaparameter for group configuration
            input_shape : input shape
            include_top : include the reconstruction component
            initializer : kernel initialization
            regularizer : kernel regularization
            relu_clip   : max value for ReLU
            bn_epsilon  : epsilon for batch norm
            use_bias    : whether use bias in conjunction with batch norm
        """
        # Configure base (super) class
        Composable.__init__(self, input_shape, include_top, self.hyperparameters, **hyperparameters)

        if groups is None:
            groups = self.groups

        # The input tensor
        inputs = Input(input_shape)

        # The stem convolutional group
        x = self.stem(inputs)

        # The learner
        outputs = self.learner(x, groups)

        # The reconstruction
        if include_top:
             outputs = self.decoder(outputs)

        # Instantiate the Model
        self._model = Model(inputs, outputs)
    def __init__(self,  
                 input_shape=(32, 32, 3), include_top=True,
                 f1 = 9, f2=1, f3=5,
                 **hyperparameters):
        """ Construct a Wids Residual (Convolutional Neural) Network 
            f1, f2, f3  : number of filters for convolutional layers n1, n2 and n3
            input_shape : input shape
            include_top : include the reconstruction component
            initializer : kernel initialization
            regularizer : kernel regularization
            relu_clip   : max value for ReLU
            bn_epsilon  : epsilon for batch norm
            use_bias    : whether use bias in conjunction with batch norm
        """
        # Configure base (super) class
        Composable.__init__(self, input_shape, include_top, self.hyperparameters, **hyperparameters)

        # The input tensor
        inputs = Input(input_shape)

        # The stem convolutional group
        x = self.stem(inputs, f1)

        # The encoder
        outputs = self.encoder(x, f2)

        # The reconstruction
        if include_top:
             outputs = self.reconstruction(outputs, f3)

        # Instantiate the Model
        self._model = Model(inputs, outputs)
    def __init__(self, dropout=0.4, 
                 input_shape=(229, 229, 3), n_classes=1000, include_top=True,
                 **hyperparameters):
        """ Construct an Inception V3 convolutional neural network
            dropout     : percentage of dropout rate
            input_shape : the input to the model
            n_classes   : number of output classes
            include_top : whether to include the classifier
            initializer : kernel initiaklizer
            regularizer : kernel regularizer
            relu_clip   : max value for ReLU
            bn_epsilon  : epsilon for batch norm
            use_bias    : whether to use bias
        """
        # Configure base (super) class
        Composable.__init__(self, input_shape, include_top, self.hyperparameters, **hyperparameters)

        # The input tensor (299x299 in V3 vs 224x224 in V1/V2)
        inputs = Input(shape=input_shape)

        # The stem convolutional group
        x = self.stem(inputs)

        # The learner
        outputs, aux = self.learner(x, n_classes)

        # The classifier
        if include_top:
            outputs = self.classifier(outputs, n_classes, dropout)

        # Instantiate the Model
        self._model = Model(inputs, [outputs] + aux)
    def decoder(x, init_weights=None, **metaparameters):
        ''' Construct the Decoder
            x     : input to the decoder
            layers: number of filters per layer
            reg   : kernel regularizer
        '''
        layers = metaparameters['layers']
        if 'reg' in metaparameters:
            reg = metaparameters['reg']
        else:
            reg = AutoEncoder.reg
        if 'init_weights' in metaparameters:
            init_weights = metaparameters['init_weights']
        else:
            init_weights = AutoEncoder.init_weights

        # Progressive Feature Unpooling
        for _ in range(len(layers)-1, 0, -1):
            n_filters = layers[_]['n_filters']
            x = Conv2DTranspose(n_filters, (3, 3), strides=2, padding='same',
                                kernel_initializer=init_weights, kernel_regularizer=reg)(x)
            x = BatchNormalization()(x)
            x = Composable.ReLU(x)

        # Last unpooling and match shape to input
        x = Conv2DTranspose(3, (3, 3), strides=2, padding='same',
                            kernel_initializer=init_weights, kernel_regularizer=reg)(x)
        x = BatchNormalization()(x)
        x = Composable.ReLU(x)

        # The decoded image
        return x
Example #6
0
    def __init__(self, layers=None, input_shape=(32, 32, 3),
                 **hyperparameters):
        ''' Construct an AutoEncoder
            input_shape : input shape to the autoencoder
            layers      : the number of filters per layer
            initializer : kernel initializer
            regularizer : kernel regularizer
            relu_clip   : clip value for ReLU
            bn_epsilon  : epsilon for batch norm
            use_bias    : whether to use bias
        '''
        # Configure base (super) class
        Composable.__init__(self, input_shape, None, self.hyperparameters, **hyperparameters)

        if layers is None:
           layers = self.layers

        # remember the layers
        self.layers = layers

        # remember the input shape
        self.input_shape = input_shape

        inputs = Input(input_shape)
        encoder = self.encoder(inputs, layers=layers)
        outputs = self.decoder(encoder, layers=layers)
        self._model = Model(inputs, outputs)
Example #7
0
    def depthwise_block(x, strides, **metaparameters):
        """ Construct a Depthwise Separable Convolution block
            x         : input to the block
            strides   : strides
            n_filters : number of filters
            alpha     : width multiplier
        """
        n_filters = metaparameters['n_filters']
        alpha     = metaparameters['alpha']
        del metaparameters['n_filters']
            
        # Apply the width filter to the number of feature maps
        filters = int(n_filters * alpha)

        # Strided convolution to match number of filters
        if strides == (2, 2):
            x = ZeroPadding2D(padding=((0, 1), (0, 1)))(x)
            padding = 'valid'
        else:
            padding = 'same'

        # Depthwise Convolution
        x = Composable.DepthwiseConv2D(x, (3, 3), strides, padding=padding, use_bias=False, 
                                       **metaparameters)
        x = BatchNormalization()(x)
        x = Composable.ReLU(x)

        # Pointwise Convolution
        x = Composable.Conv2D(x, filters, (1, 1), strides=(1, 1), padding='same', use_bias=False, 
                              **metaparameters)
        x = BatchNormalization()(x)
        x = Composable.ReLU(x)
        return x
    def projection_block(x, **metaparameters):
        """ Create a residual block using Depthwise Separable Convolutions with Projection shortcut
            x        : input into residual block
            n_filters: number of filters
        """
        n_filters = metaparameters['n_filters']
        del metaparameters['n_filters']

        # Remember the input
        shortcut = x
    
        # Strided convolution to double number of filters in identity link to
        # match output of residual block for the add operation (projection shortcut)
        shortcut = Composable.Conv2D(x, n_filters, (1, 1), strides=(2, 2), padding='same', **metaparameters)
        shortcut = BatchNormalization()(shortcut)

        # First Depthwise Separable Convolution
        x = Composable.SeparableConv2D(x, n_filters, (3, 3), padding='same', **metaparameters)
        x = BatchNormalization()(x)
        x = Composable.ReLU(x)

        # Second depthwise Separable Convolution
        x = Composable.SeparableConv2D(x, n_filters, (3, 3), padding='same', **metaparameters)
        x = BatchNormalization()(x)
        x = Composable.ReLU(x)

        # Create pooled feature maps, reduce size by 75%
        x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

        # Add the projection shortcut to the output of the block
        x = Add()([x, shortcut])

        return x
    def __init__(self, groups=None, dropout=0.5, 
                 input_shape=(224, 224, 3), n_classes=1000, include_top=True,
                 **hyperparameters):
        ''' Construct a SqueezeNet Complex Bypass Convolution Neural Network
            groups      : number of blocks/filters per group
            dropout     : percent of dropoput
            input_shape : input shape to model
            n_classes   : number of output classes
            include_top : whether to include classifier
            init_weights: kernel initializer
            reg         : kernel regularizer
            relu        : max value for ReLU
            bias        : whether to use bias in conjunction with batch norm
        '''
        Composable.__init__(self, input_shape, include_top, self.hyperparameters, **hyperparameters)
        
        if groups is None:
            groups = list(SqueezeNetComplex.groups)
            
        # The input shape
        inputs = Input(shape=input_shape)

        # The Stem Group
        x = self.stem(inputs)

        # The Learner
        outputs = self.learner(x, groups=groups, dropout=dropout)

        # The Classifier
        if include_top:
            outputs = self.classifier(outputs, n_classes)

        self._model = Model(inputs, outputs)
Example #10
0
    def fire_block(x, **metaparameters):
        ''' Construct a Fire Block  with complex bypass
            x        : input to the block
            n_filters: number of filters in block
            reg      : kernel regularizer
        '''
        n_filters = metaparameters['n_filters']
        if 'reg' in metaparameters:
            reg = metaparameters['reg']
        else:
            reg = SqueezeNetComplex.reg
        if 'init_weights' in metaparameters:
            init_weights = metaparameters['init_weights']
        else:
            init_weights = SqueezeNetComplex.init_weights

        # remember the input (identity)
        shortcut = x

        # if the number of input filters does not equal the number of output filters, then use
        # a transition convolution to match the number of filters in identify link to output
        if shortcut.shape[3] != 8 * n_filters:
            shortcut = Conv2D(n_filters * 8, (1, 1),
                              strides=1,
                              padding='same',
                              kernel_initializer=init_weights,
                              kernel_regularizer=reg)(shortcut)
            shortcut = Composable.ReLU(shortcut)

        # squeeze layer
        squeeze = Conv2D(n_filters, (1, 1),
                         strides=1,
                         padding='same',
                         kernel_initializer=init_weights,
                         kernel_regularizer=reg)(x)
        squeeze = Composable.ReLU(squeeze)

        # branch the squeeze layer into a 1x1 and 3x3 convolution and double the number
        # of filters
        expand1x1 = Conv2D(n_filters * 4, (1, 1),
                           strides=1,
                           padding='same',
                           kernel_initializer=init_weights,
                           kernel_regularizer=reg)(squeeze)
        expand1x1 = Composable.ReLU(expand1x1)
        expand3x3 = Conv2D(n_filters * 4, (3, 3),
                           strides=1,
                           padding='same',
                           kernel_initializer=init_weights,
                           kernel_regularizer=reg)(squeeze)
        expand3x3 = Composable.ReLU(expand3x3)

        # concatenate the feature maps from the 1x1 and 3x3 branches
        x = Concatenate()([expand1x1, expand3x3])

        # if identity link, add (matrix addition) input filters to output filters
        if shortcut is not None:
            x = Add()([x, shortcut])
        return x
Example #11
0
    def strided_shuffle_block(x, **metaparameters):
        ''' Construct a Strided Shuffle Block 
            x           : input to the block
            n_partitions: number of groups to partition feature maps (channels) into.
            n_filters   : number of filters
            reduction   : dimensionality reduction factor (e.g, 0.25)
        '''
        n_partitions = metaparameters['n_partitions']
        n_filters = metaparameters['n_filters']
        reduction = metaparameters['reduction']
        del metaparameters['n_filters']
        del metaparameters['n_partitions']
        if 'reg' in metaparameters:
            reg = metaparameters['reg']
        else:
            reg = ShuffleNet.reg
        if 'init_weights' in metaparameters:
            init_weights = metaparameters['init_weights']
        else:
            init_weights = ShuffleNet.init_weights

        # projection shortcut
        shortcut = x
        shortcut = AveragePooling2D((3, 3), strides=2,
                                    padding='same')(shortcut)

        # On entry block, we need to adjust the number of output filters
        # of the entry pointwise group convolution to match the exit
        # pointwise group convolution, by subtracting the number of input filters
        n_filters -= int(x.shape[3])

        # pointwise group convolution, with dimensionality reduction
        x = ShuffleNet.pw_group_conv(x, n_partitions,
                                     int(reduction * n_filters),
                                     **metaparameters)
        x = Composable.ReLU(x)

        # channel shuffle layer
        x = ShuffleNet.channel_shuffle(x, n_partitions)

        # Depthwise 3x3 Strided Convolution
        x = DepthwiseConv2D((3, 3),
                            strides=2,
                            padding='same',
                            use_bias=False,
                            kernel_initializer=init_weights,
                            kernel_regularizer=reg)(x)
        x = BatchNormalization()(x)

        # pointwise group convolution, with dimensionality restoration
        x = ShuffleNet.pw_group_conv(x, n_partitions, n_filters,
                                     **metaparameters)

        # Concatenate the projection shortcut to the output
        x = Concatenate()([shortcut, x])
        x = Composable.ReLU(x)
        return x
Example #12
0
    def __init__(self,
                 groups=None,
                 alpha=1,
                 pho=1,
                 dropout=0.5,
                 input_shape=(224, 224, 3),
                 n_classes=1000,
                 include_top=True,
                 **hyperparameters):
        """ Construct a Mobile Convolution Neural Network
            alpha       : width multipler
            pho         : resolution multiplier
            input_shape : the input shape
            n_classes   : number of output classes
            include_top : whether to include classifier
            initializer : kernel initializer
            regularizer : kernel regularizer
            relu_clip   : max value for ReLU
            bn_epsilon  : epsilon for batch norm
            use_bias    : whether to include bias
        """
        # Configure base (super) class
        Composable.__init__(self, input_shape, include_top,
                            self.hyperparameters, **hyperparameters)

        if groups is None:
            groups = list(self.groups)

        if alpha < 0 or alpha > 1:
            raise Exception("MobileNet: alpha out of range")
        if pho < 0 or pho > 1:
            raise Exception("MobileNet: pho out of range")
        if dropout < 0 or dropout > 1:
            raise Exception("MobileNet: alpha out of range")

        inputs = Input(shape=(int(input_shape[0] * pho),
                              int(input_shape[1] * pho), 3))

        # The Stem Group
        x = self.stem(inputs, alpha=alpha)

        # The Learner
        outputs = self.learner(x, groups=groups, alpha=alpha)

        # The Classifier
        if include_top:
            outputs = self.classifier(outputs,
                                      n_classes,
                                      alpha=alpha,
                                      dropout=dropout)

        # Instantiate the Model
        self._model = Model(inputs, outputs)
Example #13
0
    def __init__(self,
                 n_layers,
                 cardinality=32,
                 ratio=16,
                 input_shape=(224, 224, 3),
                 n_classes=1000,
                 include_top=True,
                 **hyperparameters):
        """ Construct a Residual Next Convolution Neural Network
            n_layers    : number of layers
            cardinality : width of group convolution
            ratio       : amount of filter reduction in squeeze operation
            input_shape : the input shape
            n_classes   : number of output classes
            include_top : whether to include classifier
            initializer : kernel initializer
            regularizer : kernel regularization
            relu_clip   : max value for ReLU
            bn_epsilon  : epsilon for batch norm
            use_bias    : whether to use bias with batchnorm
        """
        # Configure base (super) class
        Composable.__init__(self, input_shape, include_top,
                            self.hyperparameters, **hyperparameters)

        # predefined
        if isinstance(n_layers, int):
            if n_layers not in [50, 101, 152]:
                raise Exception("SE-ResNeXt: Invalid value for n_layers")
            groups = list(self.groups[n_layers])
        # user defined
        else:
            groups = n_layers

        # The input tensor
        inputs = Input(shape=input_shape)

        # The Stem Group
        x = self.stem(inputs)

        # The Learner
        outputs = self.learner(x,
                               groups=groups,
                               cardinality=cardinality,
                               ratio=ratio)

        # The Classifier
        if include_top:
            # Add hidden dropout
            outputs = self.classifier(outputs, n_classes, dropout=0.0)

        # Instantiate the Model
        self._model = Model(inputs, outputs)
    def __init__(self,
                 n_layers,
                 n_filters=32,
                 reduction=0.5,
                 input_shape=(224, 224, 3),
                 n_classes=1000,
                 include_top=True,
                 **hyperparameters):
        """ Construct a Densely Connected Convolution Neural Network
            n_layers    : number of layers
            n_filters   : number of filters (growth rate)
            reduction   : anount to reduce feature maps by (compression factor)
            input_shape : input shape
            n_classes   : number of output classes
            include_top : whether to include the classifier
            regularizer : kernel regularizer
            initializer : kernel initializer
            relu_clip   : max value for ReLU
            bn_epsilon  : epsilon for batch norm
            use_bias    : whether to use bias
        """
        # Configure base (super) class
        Composable.__init__(self, input_shape, include_top,
                            self.hyperparameters, **hyperparameters)

        # predefined
        if isinstance(n_layers, int):
            if n_layers not in [121, 169, 201]:
                raise Exception("DenseNet: Invalid value for n_layers")
            groups = list(self.groups[n_layers])
        # user defined
        else:
            groups = n_layers

        # The input vector
        inputs = Input(shape=input_shape)

        # The Stem Convolution Group
        x = self.stem(inputs, n_filters)

        # The Learner
        outputs = self.learner(x,
                               n_filters=n_filters,
                               reduction=reduction,
                               groups=groups)

        # The Classifier
        if include_top:
            # Add hidden dropout layer
            outputs = self.classifier(outputs, n_classes, dropout=0.1)

        # Instantiate the model
        self._model = Model(inputs, outputs)
Example #15
0
    def fire_block(x, **metaparameters):
        ''' Construct a Fire Block
            x        : input to the block
            n_filters: number of filters in the block
            bypass   : whether block has an identity shortcut
            reg      : kernel regularizer
        '''
        n_filters = metaparameters['n_filters']
        bypass = metaparameters['bypass']
        if 'reg' in metaparameters:
            reg = metaparameters['reg']
        else:
            reg = SqueezeNetBypass.reg
        if 'init_weights' in metaparameters:
            init_weights = metaparameters['init_weights']
        else:
            init_weights = SqueezeNetBypass.init_weights

        # remember the input
        shortcut = x

        # squeeze layer
        squeeze = Conv2D(n_filters, (1, 1),
                         strides=1,
                         padding='same',
                         kernel_initializer=init_weights,
                         kernel_regularizer=reg)(x)
        squeeze = Composable.ReLU(squeeze)

        # branch the squeeze layer into a 1x1 and 3x3 convolution and double the number
        # of filters
        expand1x1 = Conv2D(n_filters * 4, (1, 1),
                           strides=1,
                           padding='same',
                           kernel_initializer=init_weights,
                           kernel_regularizer=reg)(squeeze)
        expand1x1 = Composable.ReLU(expand1x1)
        expand3x3 = Conv2D(n_filters * 4, (3, 3),
                           strides=1,
                           padding='same',
                           kernel_initializer=init_weights,
                           kernel_regularizer=reg)(squeeze)
        expand3x3 = Composable.ReLU(expand3x3)

        # concatenate the feature maps from the 1x1 and 3x3 branches
        x = Concatenate()([expand1x1, expand3x3])

        # if identity link, add (matrix addition) input filters to output filters
        if bypass:
            x = Add()([x, shortcut])

        return x
Example #16
0
    def __init__(self,
                 n_layers,
                 stem={
                     'n_filters': [32, 64],
                     'pooling': 'feature'
                 },
                 input_shape=(224, 224, 3),
                 n_classes=1000,
                 include_top=True,
                 **hyperparameters):
        """ Construct a Jump Convolutional Neural Network 
            n_layers    : number of layers
            stem        : number of filters in the stem convolutional stack
            input_shape : input shape
            n_classes   : number of output classes
            include_top : whether to include classifier
            regulalizer : kernel regularizer
            relu_clip   : max value for ReLU
            initializer : kernel initializer
            bn_epsilon  : epsilon for batch norm
            use_bias    : whether to use bias with batchnorm
        """
        # Configure the base (super) class
        Composable.__init__(self, input_shape, include_top,
                            self.hyperparameters, **hyperparameters)

        # predefined
        if isinstance(n_layers, int):
            if n_layers not in [50, 101, 152]:
                raise Exception("JumpNet: Invalid value for n_layers")
            groups = list(self.groups[n_layers])
        # user defined
        else:
            groups = n_layers

        # The input tensor
        inputs = Input(input_shape)

        # The stem convolutional group
        x = self.stem(inputs, stem=stem)

        # The learner
        outputs = self.learner(x, groups=groups)

        # The classifier
        # Add hidden dropout for training-time regularization
        if include_top:
            outputs = self.classifier(outputs, n_classes)

        # Instantiate the Model
        self._model = Model(inputs, outputs)
    def __init__(self,
                 groups=None,
                 filters=None,
                 n_partitions=2,
                 reduction=0.25,
                 input_shape=(224, 224, 3),
                 n_classes=1000,
                 include_top=True,
                 **hyperparameters):
        ''' Construct a Shuffle Convolution Neural Network
            groups      : number of shuffle blocks per shuffle group
            filters     : filters per group based on partitions
            n_partitions: number of groups to partition the filters (channels)
            reduction   : dimensionality reduction on entry to a shuffle block
            input_shape : the input shape to the model
            n_classes   : number of output classes
            include_top : whether to include classifier
            initializer : kernel initializer
            regularizer : kernel regularizer
            relu_clip   : max value for ReLU
            bn_epsilon  : epsilon for batch norm
            use_bias    : whether to use bias in conjunction with batch norm
        '''
        Composable.__init__(self, input_shape, include_top,
                            self.hyperparameters, **hyperparameters)

        if groups is None:
            groups = list(ShuffleNet.groups)

        if filters is None:
            filters = self.filters[n_partitions]

        # input tensor
        inputs = Input(shape=input_shape)

        # The Stem convolution group (referred to as Stage 1)
        x = self.stem(inputs)

        # The Learner
        outputs = self.learner(x,
                               groups=groups,
                               n_partitions=n_partitions,
                               filters=filters,
                               reduction=reduction)

        # The Classifier
        if include_top:
            # Add hidden dropout to classifier
            outputs = self.classifier(outputs, n_classes, dropout=0.0)

        self._model = Model(inputs, outputs)
Example #18
0
    def shuffle_block(x, **metaparameters):
        ''' Construct a shuffle Shuffle block  
            x           : input to the block
            n_partitions: number of groups to partition feature maps (channels) into.
            n_filters   : number of filters
            reduction   : dimensionality reduction factor (e.g, 0.25)
            reg         : kernel regularizer
        '''
        n_partitions = metaparameters['n_partitions']
        n_filters = metaparameters['n_filters']
        reduction = metaparameters['reduction']
        del metaparameters['n_filters']
        del metaparameters['n_partitions']
        if 'reg' in metaparameters:
            reg = metaparameters['reg']
        else:
            reg = ShuffleNet.reg
        if 'init_weights' in metaparameters:
            init_weights = metaparameters['init_weights']
        else:
            init_weights = ShuffleNet.init_weights

        # identity shortcut
        shortcut = x

        # pointwise group convolution, with dimensionality reduction
        x = ShuffleNet.pw_group_conv(x, n_partitions,
                                     int(reduction * n_filters),
                                     **metaparameters)
        x = Composable.ReLU(x)

        # channel shuffle layer
        x = ShuffleNet.channel_shuffle(x, n_partitions)

        # Depthwise 3x3 Convolution
        x = Composable.DepthwiseConv2D(x, (3, 3),
                                       strides=1,
                                       padding='same',
                                       use_bias=False,
                                       **metaparameters)
        x = BatchNormalization()(x)

        # pointwise group convolution, with dimensionality restoration
        x = ShuffleNet.pw_group_conv(x, n_partitions, n_filters,
                                     **metaparameters)

        # Add the identity shortcut (input added to output)
        x = Add()([shortcut, x])
        x = Composable.ReLU(x)
        return x
Example #19
0
 def auxiliary(x, n_classes, **metaparameters):
     """ Construct the auxiliary classier
         x        : input to the auxiliary classifier
         n_classes: number of output classes
     """
     x = AveragePooling2D((5, 5), strides=(3, 3))(x)
     x = Composable.Conv2D(x, 128, (1, 1), strides=(1, 1), padding='same', use_bias=False, **metaparameters)
     x = BatchNormalization()(x)
     x = Composable.ReLU(x)
     x = Flatten()(x)
     x = Composable.Dense(x, 1024, activation=Composable.ReLU, **metaparameters)
     x = Dropout(0.7)(x)
     output = Composable.Dense(x, n_classes, activation='softmax', **metaparameters)
     return output
    def trans_block(x, **metaparameters):
        """ Construct a Transition Block
            x        : input layer
            reduction: percentage of reduction of feature maps
        """
        if 'reduction' in metaparameters:
            reduction = metaparameters['reduction']
        else:
            reduction = DenseNet.reduction
        del metaparameters['n_filters']

        # Reduce (compress) the number of feature maps (DenseNet-C)
        # shape[n] returns a class object. We use int() to cast it into the dimension size
        n_filters = int( int(x.shape[3]) * reduction)
    
        # BN-LI-Conv pre-activation form of convolutions

        # Use 1x1 linear projection convolution
        x = BatchNormalization()(x)
        x = Composable.Conv2D(x, n_filters, (1, 1), strides=(1, 1), use_bias=False, 
                              **metaparameters)

        # Use mean value (average) instead of max value sampling when pooling reduce by 75%
        x = AveragePooling2D((2, 2), strides=(2, 2))(x)
        return x
        def stem(inputs):
            """ Create the stem entry into the neural network
                inputs : input tensor to neural network
            """
            # Strided convolution - dimensionality reduction
            # Reduce feature maps by 75%
            x = Composable.Conv2D(inputs, 32, (3, 3), strides=(2, 2), **metaparameters)
            x = BatchNormalization()(x)
            x = Composable.ReLU(x)

            # Convolution - dimensionality expansion
            # Double the number of filters
            x = Composable.Conv2D(x, 64, (3, 3), strides=(1, 1), **metaparameters)
            x = BatchNormalization()(x)
            x = Composable.ReLU(x)
            return x
    def classifier(self, x, n_classes):
        ''' Construct the Classifier 
            x        : input to the classifier
            n_classes: number of output classes
        '''
        # Save the encoding layer
        self.encoding = x

        # set the number of filters equal to number of classes
        x = Conv2D(n_classes, (1, 1),
                   strides=1,
                   padding='same',
                   kernel_initializer=self.init_weights,
                   kernel_regularizer=self.reg)(x)
        x = Composable.ReLU(x)

        # reduce each filter (class) to a single value
        x = GlobalAveragePooling2D()(x)

        # Save the pre-activation probabilities layer
        self.probabilities = x

        outputs = Activation('softmax')(x)

        # Save the post-activation probabilities layer
        self.softmax = outputs
        return outputs
Example #23
0
    def stem(self, inputs):
        """ Construct the Stem Convolutional Group 
            inputs : the input vector
        """
        # The 224x224 images are zero padded (black - no signal) to be 230x230 images prior to the first convolution
        x = ZeroPadding2D(padding=(3, 3))(inputs)
    
        # First Convolutional layer which uses a large (coarse) filter 
        x = self.Conv2D(x, 64, (7, 7), strides=(2, 2), padding='valid', use_bias=False)
        x = BatchNormalization()(x)
        x = self.ReLU(x)

        # Pooled feature maps will be reduced by 75%
        x = ZeroPadding2D(padding=(1, 1))(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        # Second Convolutional layer which uses a mid-size filter
        x = self.Conv2D(x, 64, (1, 1), strides=(1, 1), padding='same', use_bias=False)
        x = BatchNormalization()(x)
        x = Composable.ReLU(x)
        x = ZeroPadding2D(padding=(1, 1))(x)
        x = self.Conv2D(x, 192, (3, 3), strides=(1, 1), padding='valid', use_bias=False)
        x = BatchNormalization()(x)
        x = self.ReLU(x)
    
        # Pooled feature maps will be reduced by 75%
        x = ZeroPadding2D(padding=(1, 1))(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)
        return x
    def encoder(x, init_weights=None, **metaparameters):
        ''' Construct the Encoder 
            x     : input to the encoder
            layers: number of filters per layer
            reg   : kernel regularizer
        '''
        layers = metaparameters['layers']
        if 'reg' in metaparameters:
            reg = metaparameters['reg']
        else:
            reg = AutoEncoder.reg
        if 'init_weights' in metaparameters:
            init_weights = metaparameters['init_weights']
        else:
            init_weights = AutoEncoder.init_weights

        # Progressive Feature Pooling
        for layer in layers:
            n_filters = layer['n_filters']
            x = Conv2D(n_filters, (3, 3), strides=2, padding='same',
                       kernel_initializer=init_weights, kernel_regularizer=reg)(x)
            x = BatchNormalization()(x)
            x = Composable.ReLU(x)

        # The Encoding
        return x
    def __init__(self,
                 groups,
                 alpha=1,
                 input_shape=(224, 224, 3),
                 n_classes=1000,
                 include_top=True,
                 **hyperparameters):
        """ Construct a Mobile Convolution Neural Network V3
            groups      : number of filters and blocks per group
            alpha       : width multiplier
            input_shape : the input shape
            n_classes   : number of output classes
            include_top : whether to include classifier
            regularizer : kernel regularizer
            initializer : kernel initializer
            relu_clip   : max value for ReLU
            bn_epsilon  : epsilon for batch norm
            use_bias    : whether to use bias
        """
        # Configure base (super) class
        Composable.__init__(self, input_shape, include_top,
                            self.hyperparameters, **hyperparameters)

        # Variable Binding
        self.GROUPS()

        # predefined
        if isinstance(groups, str):
            if groups not in ['large', 'small']:
                raise Exception("MobileNetV3: Invalid value for groups")
            groups = list(self.groups[groups])

        inputs = Input(shape=input_shape)

        # The Stem Group
        x = self.stem(inputs, alpha=alpha)

        # The Learner
        outputs = self.learner(x, groups=groups, alpha=alpha)

        # The Classifier
        if include_top:
            outputs = self.classifier(outputs, n_classes)

        # Instantiate the Model
        self._model = Model(inputs, outputs)
    def __init__(self,
                 n_layers,
                 input_shape=(224, 224, 3),
                 n_classes=1000,
                 include_top=True,
                 **hyperparameters):
        """ Construct a Residual Convolutional Neural Network V2
            n_layers    : number of layers
            input_shape : input shape
            n_classes   : number of output classes
            include_top : whether to include classifier
            regularizer : kernel regularizer
            initializer : kernel initializer
            relu_clip   : max value for ReLU
            bn_epsilon  : epsilon for batch norm
            use_bias    : whether to include a bias with batchnorm
        """
        # Configure base (super) class
        Composable.__init__(self, input_shape, include_top,
                            self.hyperparameters, **hyperparameters)

        # predefined
        if isinstance(n_layers, int):
            if n_layers not in [50, 101, 152]:
                raise Exception("ResNet: Invalid value for n_layers")
            groups = self.groups[n_layers]
        # user defined
        else:
            groups = n_layers

        # The input tensor
        inputs = Input(input_shape)

        # The stem convolutional group
        x = self.stem(inputs)

        # The learner
        outputs = self.learner(x, groups=groups)

        # The classifier
        if include_top:
            # Add hidden dropout for training-time regularization
            outputs = self.classifier(outputs, n_classes, dropout=0.0)

        # Instantiate the Model
        self._model = Model(inputs, outputs)
Example #27
0
    def __init__(self,
                 groups=None,
                 alpha=1,
                 expansion=6,
                 input_shape=(224, 224, 3),
                 n_classes=1000,
                 include_top=True,
                 **hyperparameters):
        """ Construct a Mobile Convolution Neural Network V2
            groups      : number of filters and blocks per group
            alpha       : width multiplier
            expansion   : multiplier to expand the number of filters
            input_shape : the input shape
            n_classes   : number of output classes
            include_top : whether to include classifier
            regularizer : kernel regularizer
            initializer : kernel initializer
            relu_clip   : max value for ReLU
            bn_epsilon  : epsilon for batch norm
            use_bias    : whether to use a bias
        """
        # Configure base (super) class
        Composable.__init__(self, input_shape, include_top,
                            self.hyperparameters, **hyperparameters)

        if groups is None:
            groups = list(self.groups)

        inputs = Input(shape=input_shape)

        # The Stem Group
        x = self.stem(inputs, alpha=alpha)

        # The Learner
        outputs = self.learner(x,
                               groups=groups,
                               alpha=alpha,
                               expansion=expansion)

        # The Classifier
        # Add hidden dropout layer
        if include_top:
            outputs = self.classifier(outputs, n_classes, dropout=0.0)

        # Instantiate the Model
        self._model = Model(inputs, outputs)
Example #28
0
    def projection_block(x, **metaparameters):
        """ Construct a B(3,3) style block
            x        : input into the block
            n_filters: number of filters
            k        : width factor
            strides  : whether the projection shortcut is strided
        """
        n_filters = metaparameters['n_filters']
        strides = metaparameters['strides']
        k = metaparameters['k']
        del metaparameters['n_filters']
        del metaparameters['strides']

        # Save input vector (feature maps) for the identity link
        shortcut = BatchNormalization()(x)
        shortcut = Composable.Conv2D(shortcut,
                                     n_filters * k, (3, 3),
                                     strides=strides,
                                     padding='same',
                                     use_bias=False,
                                     **metaparameters)

        ## Construct the 3x3, 3x3 convolution block

        x = BatchNormalization()(x)
        x = Composable.ReLU(x)
        x = Composable.Conv2D(x,
                              n_filters * k, (3, 3),
                              strides=strides,
                              padding='same',
                              use_bias=False,
                              **metaparameters)

        x = BatchNormalization()(x)
        x = Composable.ReLU(x)
        x = Composable.Conv2D(x,
                              n_filters * k, (3, 3),
                              strides=(1, 1),
                              padding='same',
                              use_bias=False,
                              **metaparameters)

        # Add the identity link (input) to the output of the residual block
        x = Add()([shortcut, x])
        return x
Example #29
0
    def __init__(self,
                 n_layers,
                 ratio=16,
                 input_shape=(224, 224, 3),
                 n_classes=1000,
                 include_top=True,
                 **hyperparameters):
        """ Construct a Residual Convolutional Neural Network V1
            n_layers    : number of layers
            input_shape : input shape
            n_classes   : number of output classes
            include_top : whether to include classifier
            reg         : kernel regularizer
            init_weights: kernel initializer
            relu        : max value for ReLU
            bias        : whether to use bias for batchnorm
        """
        Composable.__init__(self, input_shape, include_top,
                            self.hyperparameters, **hyperparameters)

        # predefined
        if isinstance(n_layers, int):
            if n_layers not in [50, 101, 152]:
                raise Exception("SE-ResNet: Invalid value for n_layers")
            groups = list(self.groups[n_layers])
        # user defined
        else:
            groups = n_layers

        # The input tensor
        inputs = Input(shape=input_shape)

        # The Stem Group
        x = self.stem(inputs)

        # The Learner
        outputs = self.learner(x, groups=groups, ratio=ratio)

        # The Classifier
        if include_top:
            # Add hidden dropout
            outputs = self.classifier(outputs, n_classes, dropout=0.0)

        # Instantiate the Model
        self._model = Model(inputs, outputs)
    def __init__(self,
                 n_layers,
                 input_shape=(32, 32, 3),
                 n_classes=10,
                 include_top=True,
                 **hyperparameters):
        """ Construct a Residual Convolutional Neural Network V1
            n_layers    : number of layers
            input_shape : input shape
            n_classes   : number of output classes
            include_top : whether to include classifier
            regularizer : kernel regularizer
            relu_clip   : max value for ReLU
            initializer : kernel initializer
            bn_epsilon  : epsilon for batch norm
            use_bias    : whether to use bias with batchnorm
        """
        # Configure the base (super) class
        Composable.__init__(self, input_shape, include_top,
                            self.hyperparameters, **hyperparameters)

        # depth
        if isinstance(n_layers, int):
            if n_layers not in [20, 32, 44, 56, 110, 164]:
                raise Exception("ResNet CIFAR: invalid value for n_layers")
            groups = list(self.groups[n_layers])
        else:
            groups = n_layers

        # The input tensor
        inputs = Input(input_shape)

        # The stem convolutional group
        x = self.stem(inputs)

        # The learner
        outputs = self.learner(x, groups=groups)

        # The classifier
        if include_top:
            outputs = self.classifier(outputs, n_classes)

        # Instantiate the Model
        self._model = Model(inputs, outputs)