Beispiel #1
0
def slim_model():
    model = Sequential()
    model.add(Conv2D(4, (5, 5), input_shape=(IMG_HEIGHT, IMG_WIDTH, 1),
                     padding='same', activation='relu', kernel_regularizer=l2(L2_PENALTY)))
    model.add(AvgPool2D())
    model.add(GaussianNoise(stddev=NOISE_STDV))
    model.add(Dropout(rate=DROPOUT_PROB))
    model.add(Conv2D(8, (5, 5), padding='same', activation='relu', kernel_regularizer=l2(L2_PENALTY)))
    model.add(AvgPool2D())
    model.add(GaussianNoise(stddev=NOISE_STDV))
    model.add(Dropout(rate=DROPOUT_PROB))
    model.add(Conv2D(16, (5, 5), padding='same', activation='relu', kernel_regularizer=l2(L2_PENALTY)))
    model.add(AvgPool2D())
    model.add(GaussianNoise(stddev=NOISE_STDV))
    model.add(Dropout(rate=DROPOUT_PROB))
    model.add(Conv2D(8, (5, 5), padding='same', activation='relu', kernel_regularizer=l2(L2_PENALTY)))
    model.add(AvgPool2D())
    model.add(GaussianNoise(stddev=NOISE_STDV))
    model.add(Dropout(rate=DROPOUT_PROB))
    model.add(Conv2D(4, (5, 5), padding='same', activation='relu', kernel_regularizer=l2(L2_PENALTY)))
    model.add(AvgPool2D())
    model.add(GaussianNoise(stddev=NOISE_STDV))
    model.add(Dropout(rate=DROPOUT_PROB))
    model.add(Flatten())
    model.add(Dense(1))
    model.compile(optimizer='adam', loss='mse')
    return model
Beispiel #2
0
 def create_model(self):
     inputs = Input(shape=(self.x, self.y, self.channel_size))
     masked_inputs = MaskConv(self.mask_value)(inputs)
     outputs = MaskPooling(
         AvgPool2D(
             self.pool_size,
             self.strides,
             self.padding
         ),
         pool_mode='avg'
     )(masked_inputs)
     model = Model(inputs, outputs)
     model.compile('sgd', 'mean_squared_error')
     return model
 def _avgPooling_blocks(self, input, which_action, poolsize_list):
     if (input == None):
         self.actions_type.append('AvgPool')
         self.actions_params.append([])
     for p in poolsize_list:
         self._curr_action_num += 1
         if (input == None):
             self.actions_name.append('AvgPool'.format(
                 self._curr_action_num))
             self.actions_info.append('{}'.format(p))
             self.actions_canmerge.append(False)
             self.actions_params[-1].append(self._curr_action_num)
         else:
             if (self._curr_action_num == which_action):
                 if not self.canStack(input.shape, p):
                     return None, '', ''  # Unable to stack these two layers
                 self._this_action = AvgPool2D(p)(input)
                 self._this_name = self.actions_name[self._curr_action_num]
                 self._this_info = self.actions_info[self._curr_action_num]
                 return self._this_action, self._this_name, self._this_info
Beispiel #4
0
    def ResNetv2(self, config):
        """ResNet Version 2 Model builder [b]

        Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
        bottleneck layer
        First shortcut connection per layer is 1 x 1 Conv2D.
        Second and onwards shortcut connection is identity.
        At the beginning of each stage, the feature map size is halved (downsampled)
        by a convolutional layer with strides=2, while the number of filter maps is
        doubled. Within each stage, the layers have the same number filters and the
        same filter map sizes.
        Features maps sizes:
        conv1  : 32x32,  16
        stage 0: 32x32,  64
        stage 1: 16x16, 128
        stage 2:  8x8,  256

        # Arguments
            input_shape (tensor): shape of input image tensor
            depth (int): number of core convolutional layers
            num_classes (int): number of classes (CIFAR10 has 10)

        # Returns
            model (Model): Keras model instance
        """
        def resnet_layer(inputs,
                         num_filters=16,
                         kernel_size=config.CONV2D_KERNEL_SIZE,
                         strides=config.CONV2D_STRIDES,
                         activation=config.ACTIVATION_FUNC,
                         batch_normalization=True,
                         conv_first=True):
            """2D Convolution-Batch Normalization-Activation stack builder

            # Arguments
                inputs (tensor): input tensor from input image or previous layer
                num_filters (int): Conv2D number of filters
                kernel_size (int): Conv2D square kernel dimensions
                strides (int): Conv2D square stride dimensions
                activation (string): activation name
                batch_normalization (bool): whether to include batch normalization
                conv_first (bool): conv-bn-activation (True) or
                    bn-activation-conv (False)

            # Returns
                x (tensor): tensor as input to the next layer
            """
            conv = Conv2D(num_filters,
                          kernel_size=kernel_size,
                          strides=strides,
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(1e-4))

            x = inputs
            if conv_first:
                x = conv(x)
                if batch_normalization:
                    x = BatchNormalization()(x)
                if activation is not None:
                    x = Activation(activation)(x)
            else:
                if batch_normalization:
                    x = BatchNormalization()(x)
                if activation is not None:
                    x = Activation(activation)(x)
                x = conv(x)
            return x

        if (config.RESNET_DEPTH - 2) % 9 != 0:
            raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
        # Start model definition.
        num_filters_in = 16
        num_res_blocks = int((config.RESNET_DEPTH - 2) / 9)

        inputs = Input(shape=config.IMG_SHAPE)
        # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths
        x = resnet_layer(inputs=inputs,
                         num_filters=num_filters_in,
                         conv_first=True)

        # Instantiate the stack of residual units
        for stage in range(3):
            for res_block in range(num_res_blocks):
                activation = 'relu'
                batch_normalization = True
                strides = 1
                if stage == 0:
                    num_filters_out = num_filters_in * 4
                    if res_block == 0:  # first layer and first stage
                        activation = None
                        batch_normalization = False
                else:
                    num_filters_out = num_filters_in * 2
                    if res_block == 0:  # first layer but not first stage
                        strides = 2  # downsample

                # bottleneck residual unit
                y = resnet_layer(inputs=x,
                                 num_filters=num_filters_in,
                                 kernel_size=1,
                                 strides=strides,
                                 activation=activation,
                                 batch_normalization=batch_normalization,
                                 conv_first=False)
                y = resnet_layer(inputs=y,
                                 num_filters=num_filters_in,
                                 conv_first=False)
                y = resnet_layer(inputs=y,
                                 num_filters=num_filters_out,
                                 kernel_size=1,
                                 conv_first=False)
                if res_block == 0:
                    # linear projection residual shortcut connection to match
                    # changed dims
                    x = resnet_layer(inputs=x,
                                     num_filters=num_filters_out,
                                     kernel_size=1,
                                     strides=strides,
                                     activation=None,
                                     batch_normalization=False)
                x = keras.layers.add([x, y])

            num_filters_in = num_filters_out
        # Add classifier on top.
        # v2 has BN-ReLU before Pooling
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = AvgPool2D(pool_size=8)(x)
        y = Flatten()(x)
        outputs = Dense(config.N_CLASS,
                        activation='softmax',
                        kernel_initializer='he_normal')(y)

        # Instantiate model.
        model = Model(inputs=inputs, outputs=outputs)
        return model
Beispiel #5
0
def avgpool2d(x, kernel_size, stride=1, padding="SAME"):
    output = AvgPool2D(pool_size=kernel_size, strides=stride, padding=padding)(x)
    return output