Ejemplo n.º 1
0
def ResNet(input_shape=None, classes=10, block='bottleneck', residual_unit='v2',
           repetitions=None, initial_filters=16, activation='softmax', include_top=True,
           input_tensor=None, dropout=None, transition_dilation_rate=(1, 1),
           initial_strides=(1, 1), initial_kernel_size=(3, 3), initial_pooling=None,
           final_pooling=None, top='classification'):
    """Builds a custom ResNet like architecture. Defaults to ResNet50 v2.
    Args:
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` dim ordering)
            or `(3, 224, 224)` (with `channels_first` dim ordering).
            It should have exactly 3 dimensions,
            and width and height should be no smaller than 8.
            E.g. `(224, 224, 3)` would be one valid value.
        classes: The number of outputs at final softmax layer
        block: The block function to use. This is either `'basic'` or `'bottleneck'`.
            The original paper used `basic` for layers < 50.
        repetitions: Number of repetitions of various block units.
            At each block unit, the number of filters are doubled and the input size
            is halved. Default of None implies the ResNet50v2 values of [3, 4, 6, 3].
        residual_unit: the basic residual unit, 'v1' for conv bn relu, 'v2' for bn relu
            conv. See [Identity Mappings in
            Deep Residual Networks](https://arxiv.org/abs/1603.05027)
            for details.
        dropout: None for no dropout, otherwise rate of dropout from 0 to 1.
            Based on [Wide Residual Networks.(https://arxiv.org/pdf/1605.07146) paper.
        transition_dilation_rate: Dilation rate for transition layers. For semantic
            segmentation of images use a dilation rate of (2, 2).
        initial_strides: Stride of the very first residual unit and MaxPooling2D call,
            with default (2, 2), set to (1, 1) for small images like cifar.
        initial_kernel_size: kernel size of the very first convolution, (7, 7) for
            imagenet and (3, 3) for small image datasets like tiny imagenet and cifar.
            See [ResNeXt](https://arxiv.org/abs/1611.05431) paper for details.
        initial_pooling: Determine if there will be an initial pooling layer,
            'max' for imagenet and None for small image datasets.
            See [ResNeXt](https://arxiv.org/abs/1611.05431) paper for details.
        final_pooling: Optional pooling mode for feature extraction at the final
            model layer when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        top: Defines final layers to evaluate based on a specific problem type. Options
            are 'classification' for ImageNet style problems, 'segmentation' for
            problems like the Pascal VOC dataset, and None to exclude these layers
            entirely.
    Returns:
        The keras `Model`.
    """
    if activation not in ['softmax', 'sigmoid', None]:
        raise ValueError('activation must be one of "softmax", "sigmoid", or None')
    if activation == 'sigmoid' and classes != 1:
        raise ValueError('sigmoid activation can only be used when classes = 1')
    if repetitions is None:
        repetitions = [3, 4, 6, 3]
    # Determine proper input shape
    # input_shape = _obtain_input_shape(input_shape,
    #                                   default_size=32,
    #                                   min_size=8,
    #                                   data_format=K.image_data_format(),
    #                                   require_flatten=include_top)
    _handle_dim_ordering()
    if len(input_shape) != 3:
        raise Exception("Input shape should be a tuple (nb_channels, nb_rows, nb_cols)")

    if block == 'basic':
        block_fn = basic_block
    elif block == 'bottleneck':
        block_fn = bottleneck
    elif isinstance(block, six.string_types):
        block_fn = _string_to_function(block)
    else:
        block_fn = block

    if residual_unit == 'v2':
        residual_unit = _bn_relu_conv
    elif residual_unit == 'v1':
        residual_unit = _conv_bn_relu
    elif isinstance(residual_unit, six.string_types):
        residual_unit = _string_to_function(residual_unit)
    else:
        residual_unit = residual_unit

    # Permute dimension order if necessary
    if K.image_data_format() == 'channels_first':
        input_shape = (input_shape[1], input_shape[2], input_shape[0])
    # Determine proper input shape
    # input_shape = _obtain_input_shape(input_shape,
    #                                   default_size=32,
    #                                   min_size=8,
    #                                   data_format=K.image_data_format(),
    #                                   require_flatten=include_top)

    img_input = Input(shape=input_shape, tensor=input_tensor)
    x = _conv_bn_relu(filters=initial_filters, kernel_size=initial_kernel_size,
                      strides=initial_strides)(img_input)
    if initial_pooling == 'max':
        x = MaxPooling2D(pool_size=(3, 3), strides=initial_strides, padding="same")(x)

    block = x
    filters = initial_filters
    for i, r in enumerate(repetitions):
        transition_dilation_rates = [transition_dilation_rate] * r
        transition_strides = [(1, 1)] * r
        if transition_dilation_rate == (1, 1):
            transition_strides[0] = (2, 2)
        block = _residual_block(block_fn, filters=filters,
                                stage=i, blocks=r,
                                is_first_layer=(i == 0),
                                dropout=dropout,
                                transition_dilation_rates=transition_dilation_rates,
                                transition_strides=transition_strides,
                                residual_unit=residual_unit)(block)
        filters *= 2

    # Last activation
    x = _bn_relu(block)

    # Classifier block
    if include_top and top is 'classification':
        x = GlobalAveragePooling2D()(x)
        x = Dense(units=classes, activation=activation,
                  kernel_initializer="he_normal", kernel_regularizer=l2(L2_WEIGHT_DECAY),
                  bias_regularizer=l2(L2_WEIGHT_DECAY))(x)
    elif include_top and top is 'segmentation':
        x = Conv2D(classes, (1, 1), activation='linear', padding='same')(x)

        if K.image_data_format() == 'channels_first':
            channel, row, col = input_shape
        else:
            row, col, channel = input_shape

        x = Reshape((row * col, classes))(x)
        x = Activation(activation)(x)
        x = Reshape((row, col, classes))(x)
    elif final_pooling == 'avg':
        x = GlobalAveragePooling2D()(x)
    elif final_pooling == 'max':
        x = GlobalMaxPooling2D()(x)

    model = Model(inputs=img_input, outputs=x)
    return model
np.random.seed(23456)
tf.random.set_seed(123)

# Initiating an empty neural network
cnn_model = Sequential(name='cnn_1')

# Adding convolutional layer
cnn_model.add(
    Conv2D(filters=16,
           kernel_size=(3, 3),
           activation='relu',
           input_shape=(128, 660, 1)))

# Adding max pooling layer
cnn_model.add(MaxPooling2D(pool_size=(2, 4)))

# Adding convolutional layer
cnn_model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))

# Adding max pooling layer
cnn_model.add(MaxPooling2D(pool_size=(2, 4)))

# Adding a flattened layer to input our image data
cnn_model.add(Flatten())

# Adding a dense layer with 64 neurons
cnn_model.add(Dense(64, activation='relu'))

# Adding a dropout layer for regularization
cnn_model.add(Dropout(0.25))
Ejemplo n.º 3
0
    inputArray = inputArray[:i]
    outputArray = outputArray[:i]
    return (inputArray, outputArray)


#model
model = Sequential()

#normalizing and cropping 
model.add(Input((280,640,2)))
model.add(Lambda(lambda x:x/255.0))
#model.add(Cropping2D(cropping=((206,72), (0,0)))) # 43%, 15%

#layer 1
model.add(Conv2D(32, (19,19), activation='relu', kernel_regularizer=ft.keras.regularizers.l2()))
model.add(MaxPooling2D(3,3))
model.add(Dropout(rate=0.3))
#layer 1
model.add(Conv2D(32, (15,15), activation='relu', kernel_regularizer=ft.keras.regularizers.l2()))
model.add(MaxPooling2D(3,3))
model.add(Dropout(rate=0.3))

#layer 2
model.add(Conv2D(32, (5,5), activation='relu', kernel_regularizer=ft.keras.regularizers.l2()))
model.add(MaxPooling2D(3,3))
model.add(Dropout(rate=0.3))

model.add(Flatten())

#fc 1
model.add(Dense(32, activation=ft.nn.relu))
Ejemplo n.º 4
0
    def init(self, img_h, img_w): #1365x2048
        concat_axis = 3
        self.img_h = img_h
        self.img_w = img_w
        
        # contracting path        
        #self.pad1 = SymmetricPadding2D(output_dim=(512, 512, 3), kernel=5, padding=[pad_size, pad_size], input_shape=(512, 512, 3))
        self.conv1 = Conv2D(16, (5,5), (1,1), padding="same", activation="selu") #1365x2048x16                
        self.batch1 = BatchNormalization()
        self.pool1 = MaxPooling2D(pool_size=(2, 2)) 

        self.conv2 = Conv2D(32, (5,5), (2,2), padding="same", activation="selu") #256x256x32                
        self.batch2 = BatchNormalization()
        self.pool2 = MaxPooling2D(pool_size=(2, 2)) 

        self.conv3 = Conv2D(64, (5,5), (2,2), padding="same", activation="selu") #128x128x64               
        self.batch3 = BatchNormalization()
        self.pool3 = MaxPooling2D(pool_size=(2, 2))

        self.conv4 = Conv2D(128, (5,5), (2,2), padding="same", activation="selu") #64x64x128               
        self.batch4 = BatchNormalization()
        self.pool4 = MaxPooling2D(pool_size=(2, 2))
        
        self.conv5 = Conv2D(128, (5,5), (2,2), padding="same", activation="selu") #32x32x128                               
        self.batch5 = BatchNormalization()
        
        # global features
        self.pool5 = MaxPooling2D(pool_size=(2, 2))

        self.conv6 = Conv2D(128, (5,5), (2,2), padding="same", activation="selu") #16x16x128                               
        self.batch6 = BatchNormalization()
        self.pool6 = MaxPooling2D(pool_size=(2, 2))

        self.conv7 = Conv2D(128, (5,5), (2,2), padding="same", activation="selu") #8x8x128                               
        self.batch7 = BatchNormalization()
        
        # fc -> selu -> fc
        self.conv8 = Conv2D(128, (8,8), (1,1), padding="valid", activation="selu") #1x1x128
        self.conv9 = Conv2D(128, (1,1), (1,1), padding="valid") #1x1x128
        
        # expanding path
        self.conv10 = Conv2D(128, (3,3), (1,1), padding="same")        
        self.global_concat = GlobalConcat((1, 512, 512, 128))
        self.conv11 = Conv2D(128, (1,1), (1,1), padding="same", activation="selu")        
        self.batch11 = BatchNormalization()

        self.conv12 = Conv2D(128, (3,3), (1,1), padding="same")
        self.resize12 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(x.get_shape()*2), align_corners=True))
        self.concat12 = Concatenate(axis=concat_axis)
        self.act12 = Activation('selu')
        self.batch12 = BatchNormalization()

        self.conv13 = Conv2D(128, (3,3), (1,1), padding="same")
        self.resize13 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(x.get_shape()*2), align_corners=True))
        self.concat13 = Concatenate(axis=concat_axis)
        self.act13 = Activation('selu')
        self.batch13 = BatchNormalization()

        self.conv14 = Conv2D(64, (3,3), (1,1), padding="same")
        self.resize14 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(x.get_shape()*2), align_corners=True))
        self.concat14 = Concatenate(axis=concat_axis)
        self.act14 = Activation('selu')
        self.batch14 = BatchNormalization()

        self.conv15 = Conv2D(32, (3,3), (1,1), padding="same")
        self.resize15 = Lambda(lambda x: tf.image.resize_bilinear(x, size=(x.get_shape()*2), align_corners=True))
        self.concat15 = Concatenate(axis=concat_axis)
        self.act15 = Activation('selu')
        self.batch15 = BatchNormalization()

        self.conv16 = Conv2D(16, (3,3), (1,1), padding="same")
        self.act16 = Activation('selu')
        self.batch16 = BatchNormalization()
                
        self.conv17 = Conv2D(3, (3,3), (1,1), padding="same")

        self.add = Add()
Ejemplo n.º 5
0
        onehot = [0 for _ in range(len(LETTERSTR))]
        num = LETTERSTR.find(letter)
        onehot[num] = 1
        labellist.append(onehot)
    return labellist


# Create CNN Model
print("Creating CNN model...")
inp = Input((60, 200, 3))
out = inp
out = Conv2D(filters=32, kernel_size=(3, 3), padding='same',
             activation='relu')(out)
out = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(out)
out = BatchNormalization()(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Dropout(0.3)(out)
out = Conv2D(filters=64, kernel_size=(3, 3), padding='same',
             activation='relu')(out)
out = Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(out)
out = BatchNormalization()(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Dropout(0.3)(out)
out = Conv2D(filters=128,
             kernel_size=(3, 3),
             padding='same',
             activation='relu')(out)
out = Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(out)
out = BatchNormalization()(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Dropout(0.3)(out)
Ejemplo n.º 6
0
def VGG16(include_top=True, weights='vggface',
          input_tensor=None, input_shape=None,
          pooling=None,
          classes=2622):
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=48,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    # Block 1
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_1')(
        img_input)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_1')(
        x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_2')(
        x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(x)

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1')(
        x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2')(
        x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_3')(
        x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(x)

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1')(
        x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2')(
        x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_3')(
        x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(x)

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_1')(
        x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_2')(
        x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_3')(
        x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool5')(x)

    if include_top:
        # Classification block
        x = Flatten(name='flatten')(x)
        x = Dense(4096, name='fc6')(x)
        x = Activation('relu', name='fc6/relu')(x)
        x = Dense(4096, name='fc7')(x)
        x = Activation('relu', name='fc7/relu')(x)
        x = Dense(classes, name='fc8')(x)
        x = Activation('softmax', name='fc8/softmax')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

            # Ensure that the model takes into account
            # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
        # Create model.
    model = Model(inputs, x, name='vggface_vgg16')  # load weights
    if weights == 'vggface':
        if include_top:
            weights_path = get_file('rcmalli_vggface_tf_vgg16.h5',
                                    utils.
                                    VGG16_WEIGHTS_PATH,
                                    cache_subdir=utils.VGGFACE_DIR)
        else:
            weights_path = get_file('rcmalli_vggface_tf_notop_vgg16.h5',
                                    utils.VGG16_WEIGHTS_PATH_NO_TOP,
                                    cache_subdir=utils.VGGFACE_DIR)
        model.load_weights(weights_path, by_name=True)
        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)

        if K.image_data_format() == 'channels_first':
            if include_top:
                maxpool = model.get_layer(name='pool5')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='fc6')
                layer_utils.convert_dense_weights_data_format(dense, shape,
                                                              'channels_first')

            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
    return model
Ejemplo n.º 7
0
    def build_unet(self):
        input_img = Input(shape=self.img_shape)

        # (N,240,240,1) -> (N,120,120,64)
        conv1 = Conv2D(32, 3, 1, padding='same')(input_img)
        conv1 = InstanceNormalization()(conv1)
        conv1 = ReLU()(conv1)
        conv1 = Conv2D(32, 3, 1, padding='same')(conv1)
        conv1 = InstanceNormalization()(conv1)
        conv1 = ReLU()(conv1)
        pool1 = MaxPooling2D()(conv1)

        # (N,120,120,64) -> (N,60,60,128)
        conv2 = Conv2D(64, 3, 1, padding='same')(pool1)
        conv2 = InstanceNormalization()(conv2)
        conv2 = ReLU()(conv2)
        conv2 = Conv2D(64, 3, 1, padding='same')(conv2)
        conv2 = InstanceNormalization()(conv2)
        conv2 = ReLU()(conv2)
        pool2 = MaxPooling2D()(conv2)

        # (N,60,60,128) -> (N,30,30,256)
        conv3 = Conv2D(128, 3, 1, padding='same')(pool2)
        conv3 = InstanceNormalization()(conv3)
        conv3 = ReLU()(conv3)
        conv3 = Conv2D(128, 3, 1, padding='same')(conv3)
        conv3 = InstanceNormalization()(conv3)
        conv3 = ReLU()(conv3)
        pool3 = MaxPooling2D()(conv3)

        # (N,30,30,128) -> (N,15,15,256)
        conv4 = Conv2D(256, 3, 1, padding='same')(pool3)
        conv4 = InstanceNormalization()(conv4)
        conv4 = ReLU()(conv4)
        conv4 = Conv2D(256, 3, 1, padding='same')(conv4)
        conv4 = InstanceNormalization()(conv4)
        conv4 = ReLU()(conv4)
        pool4 = MaxPooling2D()(conv4)

        # (N,15,15,256) -> (N,15,15,512)
        conv5 = Conv2D(512, 3, 1, padding='same')(pool4)
        conv5 = InstanceNormalization()(conv5)
        conv5 = ReLU()(conv5)
        conv5 = Conv2D(512, 3, 1, padding='same')(conv5)
        conv5 = InstanceNormalization()(conv5)
        conv5 = ReLU()(conv5)

        # (N,15,15,512) -> (N,30,30,256)
        up1 = UpSampling2D(size=(2, 2))(conv5)
        conv6 = Concatenate(axis=-1)([up1, conv4])
        conv6 = Conv2D(256, 3, 1, padding='same')(conv6)
        conv6 = InstanceNormalization()(conv6)
        conv6 = ReLU()(conv6)
        conv6 = Conv2D(256, 3, 1, padding='same')(conv6)
        conv6 = InstanceNormalization()(conv6)
        conv6 = ReLU()(conv6)

        # (N,30,30,256) -> (N,60,60,128)
        up2 = UpSampling2D(size=(2, 2))(conv6)
        conv7 = Concatenate(axis=-1)([up2, conv3])
        conv7 = Conv2D(128, 3, 1, padding='same')(conv7)
        conv7 = InstanceNormalization()(conv7)
        conv7 = ReLU()(conv7)
        conv7 = Conv2D(128, 3, 1, padding='same')(conv7)
        conv7 = InstanceNormalization()(conv7)
        conv7 = ReLU()(conv7)

        # (N,60,60,128) -> (N,120,120,64)
        up3 = UpSampling2D(size=(2, 2))(conv7)
        conv8 = Concatenate(axis=-1)([up3, conv2])
        conv8 = Conv2D(64, 3, 1, padding='same')(conv8)
        conv8 = InstanceNormalization()(conv8)
        conv8 = ReLU()(conv8)
        conv8 = Conv2D(64, 3, 1, padding='same')(conv8)
        conv8 = InstanceNormalization()(conv8)
        conv8 = ReLU()(conv8)

        # (N,120,120,64) -> (N,240,240,32)
        up4 = UpSampling2D(size=(2, 2))(conv8)
        conv9 = Concatenate(axis=-1)([up4, conv1])
        conv9 = Conv2D(32, 3, 1, padding='same')(conv9)
        conv9 = InstanceNormalization()(conv9)
        conv9 = ReLU()(conv9)
        conv9 = Conv2D(32, 3, 1, padding='same')(conv9)
        conv9 = InstanceNormalization()(conv9)
        conv9 = ReLU()(conv9)

        output = Conv2D(1, 1, 1, padding='same', activation='sigmoid')(conv9)

        self.unet = Model(input_img, output)
        self.unet.compile(optimizer=Adam(lr=1e-4),
                          loss='binary_crossentropy',
                          metrics=[utils.f1])
        return self.unet
Ejemplo n.º 8
0
    def init_model(self, input_shape, num_classes, **kwargs):
        layers = 5
        filters_size = [64, 128, 256, 512, 512]
        kernel_size = (3, 3)
        pool_size = [(2, 2), (2, 2), (2, 2), (4, 1), (4, 1)]

        freq_axis = 2
        channel_axis = 3

        channel_size = 128
        min_size = min(input_shape[:2])
        melgram_input = Input(shape=input_shape)
        # x = ZeroPadding2D(padding=(0, 37))(melgram_input)

        x = Reshape((input_shape[0], input_shape[1], 1))(melgram_input)
        x = BatchNormalization(axis=freq_axis, name='bn_0_freq')(x)

        # Conv block 1
        x = Convolution2D(filters=filters_size[0],
                          kernel_size=kernel_size,
                          padding='same',
                          name='conv1')(x)
        x = ELU()(x)
        x = BatchNormalization(axis=channel_axis, name='bn1')(x)
        x = MaxPooling2D(pool_size=pool_size[0],
                         strides=pool_size[0],
                         name='pool1')(x)
        x = Dropout(0.1, name='dropout1')(x)

        min_size = min_size // pool_size[0][0]

        for layer in range(1, layers):
            min_size = min_size // pool_size[layer][0]
            if min_size < 1:
                break
            x = Convolution2D(filters=filters_size[layer],
                              kernel_size=kernel_size,
                              padding='same',
                              name=f'conv{layer + 1}')(x)
            x = ELU()(x)
            x = BatchNormalization(axis=channel_axis, name=f'bn{layer + 1}')(x)
            x = MaxPooling2D(pool_size=pool_size[layer],
                             strides=pool_size[layer],
                             name=f'pool{layer + 1}')(x)
            x = Dropout(0.1, name=f'dropout{layer + 1}')(x)

        x = Reshape((-1, channel_size))(x)

        gru_units = 32
        if num_classes > 32:
            gru_units = int(num_classes * 1.5)
        # GRU block 1, 2, output
        x = CuDNNGRU(gru_units, return_sequences=True, name='gru1')(x)
        x = CuDNNGRU(gru_units, return_sequences=False, name='gru2')(x)
        x = Dropout(0.3)(x)
        outputs = Dense(num_classes, activation='softmax', name='output')(x)

        model = TFModel(inputs=melgram_input, outputs=outputs)
        optimizer = optimizers.Adam(
            # learning_rate=1e-3,
            lr=1e-3,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
            decay=1e-4,
            amsgrad=True)
        model.compile(optimizer=optimizer,
                      loss="sparse_categorical_crossentropy",
                      metrics=['accuracy'])
        model.summary()
        self._model = model
        self.is_init = True
Ejemplo n.º 9
0
def VGG16(save_dir):
    VGG16 = Sequential()
    VGG16.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               input_shape=(224, 224, 3),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    VGG16.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    VGG16.add(MaxPooling2D(pool_size=(2, 2)))
    VGG16.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    VGG16.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    VGG16.add(MaxPooling2D(pool_size=(2, 2)))
    VGG16.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    VGG16.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    VGG16.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    VGG16.add(MaxPooling2D(pool_size=(2, 2)))
    VGG16.add(
        Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    VGG16.add(
        Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    VGG16.add(
        Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    VGG16.add(MaxPooling2D(pool_size=(2, 2)))
    VGG16.add(
        Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    VGG16.add(
        Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    VGG16.add(
        Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               kernel_initializer='uniform'))
    VGG16.add(MaxPooling2D(pool_size=(2, 2)))
    VGG16.add(Flatten())
    VGG16.add(Dense(4096, activation='relu'))
    VGG16.add(Dropout(0.5))
    VGG16.add(Dense(4096, activation='relu'))
    VGG16.add(Dropout(0.5))
    VGG16.add(Dense(1000, activation='softmax'))
    VGG16.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    VGG16.summary()
    save_dir += 'VGG16.h5'
    return VGG16
                                  1))  # shape ((867, 1, 7500, 1))
y_train_CNN = utils.to_categorical(y_train)  # shape (867, 6)

X_test_CNN = X_test_ss.reshape((X_test_ss.shape[0], 1, X_test_ss.shape[1],
                                1))  # shape ((217, 1, 7500, 1))
y_test_CNN = y_test
y_test_CNN = utils.to_categorical(y_test)  # shape (217, 6)

print(X_train_CNN.shape, X_test_CNN.shape, y_train_CNN.shape, y_test_CNN.shape)

conv = Sequential()
conv.add(
    Conv2D(64, (1, 3),
           activation='relu',
           input_shape=(1, N_channels * 30 * FreqSample // step, 1)))
conv.add(MaxPooling2D((1, 2)))

conv.add(Conv2D(128, (1, 3), activation='relu'))
conv.add(MaxPooling2D((1, 2)))

conv.add(Conv2D(256, (1, 3), activation='relu'))
conv.add(MaxPooling2D((1, 2)))

conv.add(Flatten())
conv.add(Dense(64, activation='relu'))
conv.add(Dropout(0.5))
conv.add(Dense(5, activation='softmax'))

conv.compile(loss='categorical_crossentropy',
             optimizer='rmsprop',
             metrics=['accuracy'])
Ejemplo n.º 11
0
    def build_autoencoder(self, param):
        autoencoder = None
        input_img = Input(shape=(param.get('image_size'),
                                 param.get('image_size'),
                                 param.get('image_channels')),
                          name='input')
        x = Conv2D(256,
                   (param.get('cae_conv_size'), param.get('cae_conv_size')),
                   activation='relu',
                   padding='same')(input_img)  # tanh?
        x = MaxPooling2D(
            (param.get('cae_max_pool_size'), param.get('cae_max_pool_size')),
            padding='same')(x)
        x = Conv2D(128,
                   (param.get('cae_conv_size'), param.get('cae_conv_size')),
                   activation='relu',
                   padding='same')(x)
        x = MaxPooling2D(
            (param.get('cae_max_pool_size'), param.get('cae_max_pool_size')),
            padding='same')(x)
        x = Conv2D(128,
                   (param.get('cae_conv_size'), param.get('cae_conv_size')),
                   activation='relu',
                   padding='same')(x)
        x = MaxPooling2D(
            (param.get('cae_max_pool_size'), param.get('cae_max_pool_size')),
            padding='same')(x)
        x = Flatten()(x)
        encoded = Dense(param.get('code_size'), name='encoded')(x)

        print('encoded shape ', encoded.shape)
        ims = 8
        first = True
        x = Dense(int(ims * ims), activation='relu')(encoded)
        x = Reshape(target_shape=(ims, ims, 1))(x)  # -12
        while ims != param.get('image_size'):
            x = Conv2D(
                int(ims * ims / 2),
                (param.get('cae_conv_size'), param.get('cae_conv_size')),
                activation='relu',
                padding='same')(x)
            x = UpSampling2D((param.get('cae_max_pool_size'),
                              param.get('cae_max_pool_size')))(x)
            ims = ims * param.get('cae_max_pool_size')
        decoded = Conv2D(
            param.get('image_channels'),
            (param.get('cae_conv_size'), param.get('cae_conv_size')),
            activation='sigmoid',
            padding='same',
            name='decoded')(x)

        print('decoded shape ', decoded.shape)

        autoencoder = Model(input_img, decoded)
        autoencoder.compile(optimizer='adam', loss='mean_squared_error')

        # Create a separate encoder model
        encoder = Model(input_img, encoded)
        encoder.compile(optimizer='adam', loss='mean_squared_error')
        encoder.summary()

        # Create a separate decoder model
        decoder_inp = Input(shape=(param.get('code_size'), ))
        #		decoder_inp = Input(shape=encoded.output_shape)
        enc_layer_idx = utils.getLayerIndexByName(autoencoder, 'encoded')
        print('encoder layer idx ', enc_layer_idx)
        decoder_layer = autoencoder.layers[enc_layer_idx + 1](decoder_inp)
        for i in range(enc_layer_idx + 2, len(autoencoder.layers)):
            decoder_layer = autoencoder.layers[i](decoder_layer)
        decoder = Model(decoder_inp, decoder_layer)
        decoder.compile(optimizer='adam', loss='mean_squared_error')
        decoder.summary()

        return autoencoder, encoder, decoder
Ejemplo n.º 12
0
def unet2d(input_shape=(224, 224, 1), dropout_rate=0.5):
    """Instantiate two-dimensional U-Net architecture."""

    conv_kwds = {
        'kernel_size': (3, 3),
        'activation': 'relu',
        'padding': 'same',
        'kernel_regularizer': l2(0.1),
    }

    conv_transpose_kwds = {
        'kernel_size': (2, 2),
        'strides': (2, 2),
        'padding': 'same',
        'kernel_regularizer': l2(0.1),
    }

    inputs = Input(shape=input_shape)
    conv1 = Conv2D(64, **conv_kwds)(inputs)
    conv1 = Conv2D(64, **conv_kwds)(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(128, **conv_kwds)(pool1)
    conv2 = Conv2D(128, **conv_kwds)(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(256, **conv_kwds)(pool2)
    conv3 = Conv2D(256, **conv_kwds)(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(512, **conv_kwds)(pool3)
    conv4 = Conv2D(512, **conv_kwds)(conv4)
    drop4 = Dropout(dropout_rate)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024, **conv_kwds)(pool4)
    conv5 = Conv2D(1024, **conv_kwds)(conv5)
    drop5 = Dropout(dropout_rate)(conv5)

    t = Conv2DTranspose(512, **conv_transpose_kwds)(drop5)
    up6 = Concatenate(axis=-1)([t, drop4])
    conv6 = Conv2D(512, **conv_kwds)(up6)
    conv6 = Conv2D(512, **conv_kwds)(conv6)

    t = Conv2DTranspose(256, **conv_transpose_kwds)(conv6)
    up7 = Concatenate(axis=-1)([t, conv3])
    conv7 = Conv2D(256, **conv_kwds)(up7)
    conv7 = Conv2D(256, **conv_kwds)(conv7)

    t = Conv2DTranspose(128, **conv_transpose_kwds)(conv7)
    up8 = Concatenate(axis=-1)([t, conv2])
    conv8 = Conv2D(128, **conv_kwds)(up8)
    conv8 = Conv2D(128, **conv_kwds)(conv8)

    t = Conv2DTranspose(64, **conv_transpose_kwds)(conv8)
    up9 = Concatenate(axis=-1)([t, conv1])
    conv9 = Conv2D(64, **conv_kwds)(up9)
    conv9 = Conv2D(64, **conv_kwds)(conv9)

    conv10 = Conv2D(1, (1, 1),
                    activation='sigmoid',
                    kernel_regularizer=l2(0.1))(conv9)

    return Model(inputs=[inputs], outputs=[conv10])
Ejemplo n.º 13
0
def create_model(learning_rate, num_dense_layers, num_dense_nodes, activation):
    """
    Hyper-parameters:
    learning_rate:     Learning-rate for the optimizer.
    num_dense_layers:  Number of dense layers.
    num_dense_nodes:   Number of nodes in each dense layer.
    activation:        Activation function for all layers.
    """

    # Start construction of a Keras Sequential model.
    model = Sequential()

    # Add an input layer which is similar to a feed_dict in TensorFlow.
    # Note that the input-shape must be a tuple containing the image-size.
    model.add(InputLayer(input_shape=(img_size_flat, )))

    # The input from MNIST is a flattened array with 784 elements,
    # but the convolutional layers expect images with shape (28, 28, 1)
    model.add(Reshape(img_shape_full))

    # First convolutional layer.
    # There are many hyper-parameters in this layer, but we only
    # want to optimize the activation-function in this example.
    model.add(
        Conv2D(kernel_size=5,
               strides=1,
               filters=16,
               padding='same',
               activation=activation,
               name='layer_conv1'))
    model.add(MaxPooling2D(pool_size=2, strides=2))

    # Second convolutional layer.
    # Again, we only want to optimize the activation-function here.
    model.add(
        Conv2D(kernel_size=5,
               strides=1,
               filters=36,
               padding='same',
               activation=activation,
               name='layer_conv2'))
    model.add(MaxPooling2D(pool_size=2, strides=2))

    # Flatten the 4-rank output of the convolutional layers
    # to 2-rank that can be input to a fully-connected / dense layer.
    model.add(Flatten())

    # Add fully-connected / dense layers.
    # The number of layers is a hyper-parameter we want to optimize.
    for i in range(num_dense_layers):
        # Name of the layer. This is not really necessary
        # because Keras should give them unique names.
        name = 'layer_dense_{0}'.format(i + 1)

        # Add the dense / fully-connected layer to the model.
        # This has two hyper-parameters we want to optimize:
        # The number of nodes and the activation function.
        model.add(Dense(num_dense_nodes, activation=activation, name=name))

    # Last fully-connected / dense layer with softmax-activation
    # for use in classification.
    model.add(Dense(num_classes, activation='softmax'))

    # Use the Adam method for training the network.
    # We want to find the best learning-rate for the Adam method.
    optimizer = Adam(lr=learning_rate)

    # In Keras we need to compile the model so it can be trained.
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Ejemplo n.º 14
0
def cifar10_complicated_ensemble_submodel4(
        input_shape=None,
        input_tensor=None,
        n_classes=None,
        weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar10 network.

    :param n_classes: used in order to be compatible with the main script.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras functional API Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Define a weight decay for the regularisation.
    weight_decay = 1e-7

    # Block1.
    x = Conv2D(32, (3, 3),
               padding='same',
               activation='relu',
               name='block1_conv1',
               kernel_regularizer=l2(weight_decay))(inputs)
    x = BatchNormalization(name='block1_batch-norm1')(x)
    x = Conv2D(32, (3, 3),
               padding='same',
               activation='relu',
               name='block1_conv2',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name='block1_batch-norm2')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='block1_pool')(x)

    # Block2
    x = Conv2D(64, (3, 3),
               padding='same',
               activation='relu',
               name='block2_conv1',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name='block2_batch-norm1')(x)
    x = Conv2D(64, (3, 3),
               padding='same',
               activation='relu',
               name='block2_conv2',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name='block2_batch-norm2')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='block2_pool')(x)

    # Block3
    x = Conv2D(128, (3, 3),
               padding='same',
               activation='relu',
               name='block3_conv1',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name='block3_batch-norm1')(x)
    x = Conv2D(128, (3, 3),
               padding='same',
               activation='relu',
               name='block3_conv2',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name='block3_batch-norm2')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='block3_pool')(x)

    # Add top layers.
    x = Flatten(name='flatten')(x)
    outputs = Dense(n_classes, activation='softmax', name='softmax_outputs')(x)

    # Create Submodel 3.
    model = Model(inputs,
                  outputs,
                  name='cifar10_complicated_ensemble_submodel3')
    # Load weights, if they exist.
    load_weights(weights_path, model)
    return model
def build_model(initial_filters, size_final_dense, initializer):

    # Input Layer
    image_input = Input(shape=(
        image_height, image_width, image_depth
    ))  # Final element is number of channels, set as 1 for greyscale
    #x = BatchNormalization()(image_input)

    ### Block 1
    # Convolutional Layer 1
    x = Conv2D(filters=initial_filters,
               kernel_initializer=initializer,
               kernel_size=(3, 3),
               activation='relu',
               padding='same')(image_input)
    #x = BatchNormalization()(x)

    # Convolutional Layer 2
    x = Conv2D(filters=initial_filters,
               kernel_initializer=initializer,
               kernel_size=(3, 3),
               activation='relu',
               padding='same')(x)
    #x = BatchNormalization()(x)

    # Pooling Layer 1 - halve spatial dimension
    x = MaxPooling2D(pool_size=(2, 2))(x)

    ### Block 2
    # Convolutional Layer 3 - double number of filters
    x = Conv2D(filters=initial_filters * 2,
               kernel_initializer=initializer,
               kernel_size=(3, 3),
               activation='relu',
               padding='same')(x)
    #x = BatchNormalization()(x)

    # Convolutional Layer 4
    x = Conv2D(filters=initial_filters * 2,
               kernel_initializer=initializer,
               kernel_size=(3, 3),
               activation='relu',
               padding='same')(x)
    #x = BatchNormalization()(x)

    # Pooling Layer 2 - halve spatial dimension
    x = MaxPooling2D(pool_size=(2, 2))(x)

    ### Block 3
    # Convolutional Layer 5 - double number of filters
    x = Conv2D(filters=initial_filters * 2 * 2,
               kernel_initializer=initializer,
               kernel_size=(3, 3),
               activation='relu',
               padding='same')(x)
    #x = BatchNormalization()(x)

    # Convolutional Layer 6
    x = Conv2D(filters=initial_filters * 2 * 2,
               kernel_initializer=initializer,
               kernel_size=(3, 3),
               activation='relu',
               padding='same')(x)
    #x = BatchNormalization()(x)

    # Pooling Layer 3 - halve spatial dimension
    x = MaxPooling2D(pool_size=(2, 2))(x)

    # Dense Layer
    x = Flatten()(x)
    x = Dense(size_final_dense,
              activation='relu',
              kernel_initializer=initializer)(x)

    # Output Layer
    out = Dense(num_classes,
                activation='softmax',
                kernel_initializer=initializer)(
                    x)  # Task is binary classification

    model = Model(image_input, out)
    return (model)
def attentionModule(input):

    residual = input

    x = Conv2D(16, kernel_size=(3, 3), strides=(1,1), padding="same", kernel_initializer="glorot_normal")(input)
    x = _bn_relu(x)
    x = Conv2D(16, kernel_size=(3, 3), strides=(1,1), padding="same", kernel_initializer="glorot_normal")(x)
    x = _bn_relu(x)

    ## softmax branch: bottom-up 
    x = MaxPooling2D(pool_size=(3, 3), strides=(1, 1))(x)
    x = Conv2D(16, kernel_size=(3, 3), dilation_rate=(4,8), kernel_initializer="glorot_normal")(x)
    x = ZeroPadding2D()(x)
    x = _bn_relu(x)

    out_skip1 = Conv2D(16, kernel_size=(3, 3), strides=(1,1), padding="same", kernel_initializer="glorot_normal")(x)
    out_skip1 = _bn_relu(out_skip1)
    print(out_skip1.shape)

    x = MaxPooling2D(pool_size=(3, 3), strides=(1, 1))(x)
    x = Conv2D(16, kernel_size=(3, 3), dilation_rate=(8,16), kernel_initializer="glorot_normal")(x)
    x = ZeroPadding2D()(x)
    x = _bn_relu(x)

    out_skip2 = Conv2D(16, kernel_size=(3, 3), strides=(1,1), padding="same", kernel_initializer="glorot_normal")(x)
    out_skip2 = _bn_relu(out_skip2)
    print(out_skip2.shape)

    x = MaxPooling2D(pool_size=(3, 3), strides=(1, 1))(x)
    x = Conv2D(16, kernel_size=(3, 3), dilation_rate=(16,32), kernel_initializer="glorot_normal")(x)
    x = ZeroPadding2D()(x)
    x = _bn_relu(x)

    out_skip3 = Conv2D(16, kernel_size=(3, 3), strides=(1,1), padding="same", kernel_initializer="glorot_normal")(x)
    out_skip3 = _bn_relu(out_skip3)
    print(out_skip3.shape)

    x = MaxPooling2D(pool_size=(3, 3), strides=(1, 1))(x)
    x = Conv2D(16, kernel_size=(3, 3), dilation_rate=(32,64), kernel_initializer="glorot_normal")(x)
    x = ZeroPadding2D()(x)
    x = _bn_relu(x)

    out_skip4 = Conv2D(16, kernel_size=(3, 3), strides=(1,1), padding="same", kernel_initializer="glorot_normal")(x)
    out_skip4 = _bn_relu(out_skip4)
    print(out_skip4.shape)
    
    x = MaxPooling2D(pool_size=(3, 3), strides=(1, 2))(x)
    x = Conv2D(16, kernel_size=(3, 3), dilation_rate=(64,128), kernel_initializer="glorot_normal")(x)
    x = ZeroPadding2D()(x)
    x = _bn_relu(x)
    x = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding="same", kernel_initializer="glorot_normal")(x)
    x = _bn_relu(x)
    
    #top down
    print(x.shape)
    #x = UpSamplingUnet(size=(905/454,1), interpolation='bilinear')(x)

    x = Lambda(resize_like, arguments={'ref_tensor':(137,851)})(x)
    x = Add()([x, out_skip4]) #x += out_skip4
    x = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding="same", kernel_initializer="glorot_normal")(x)
    x = _bn_relu(x)

    #x = UpSamplingUnet(size=((969/907),37/5), interpolation='bilinear')(x)
    x = Lambda(resize_like, arguments={'ref_tensor':(201,979)})(x)
    x = Add()([x, out_skip3]) #x += out_skip3
    x = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding="same", kernel_initializer="glorot_normal")(x)
    x = _bn_relu(x)
    
    #x = UpSampling2D(size=(1,1), interpolation='bilinear')(x)
    x = Lambda(resize_like, arguments={'ref_tensor':(233,1043)})(x)
    x = Add()([x, out_skip2]) #x += out_skip2
    x = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding="same", kernel_initializer="glorot_normal")(x)
    x = _bn_relu(x)  
    
    #x = UpSampling2D(size=(1,1), interpolation='bilinear')(x)
    x = Lambda(resize_like, arguments={'ref_tensor':(249,1075)})(x)
    x = Add()([x, out_skip1]) #x += out_skip1
    x = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding="same", kernel_initializer="glorot_normal")(x)
    x = _bn_relu(x)
    
    #x = UpSampling2D(size=(1,1), interpolation='bilinear')(x)
    x = Lambda(resize_like, arguments={'ref_tensor':(257,1091)})(x)

    # x = _bn_relu(x)
    # x = Conv2D(4, kernel_size=(1, 1), strides=(1,1), padding="same", kernel_initializer="glorot_normal")(x)
    # x = _bn_relu(x)
    # x = Conv2D(1, kernel_size=(1, 1), strides=(1,1), padding="same", kernel_initializer="glorot_normal")(x)

    x = _bn_relu(x)
    x = Conv2D(32, kernel_size=(1, 1), strides=(1,1), padding="same", use_bias=False)(x)
    x = _bn_relu(x)
    x = Conv2D(32, kernel_size=(1, 1), strides=(1,1), padding="same", use_bias=False)(x)


    weight = Activation('sigmoid')(x) #output of softmax layer is weight

    x = Multiply()([weight, residual])

    x = Add()([x, residual])

    return x, weight
Ejemplo n.º 17
0
def cifar10_student_strong(n_classes: int,
                           input_shape=None,
                           input_tensor=None,
                           weights_path: Union[None, str] = None) -> Model:
    """
    Defines a cifar10 strong student network.

    :param n_classes: the number of classes.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained cifar10 tiny network's weights.
    :return: Keras functional Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Define a weight decay for the regularisation.
    weight_decay = 1e-4

    # Block1.
    x = Conv2D(32, (3, 3),
               padding='same',
               activation='elu',
               name='block1_conv1',
               kernel_regularizer=l2(weight_decay))(inputs)

    x = BatchNormalization(name='block1_batch-norm1')(x)
    x = Conv2D(32, (3, 3),
               padding='same',
               activation='elu',
               name='block1_conv2',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name='block1_batch-norm2')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='block1_pool')(x)
    x = Dropout(0.2, name='block1_dropout', seed=0)(x)

    # Block2.
    x = Conv2D(64, (3, 3),
               padding='same',
               activation='elu',
               name='block2_conv1',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name='block2_batch-norm1')(x)
    x = Conv2D(64, (3, 3),
               padding='same',
               activation='elu',
               name='block2_conv2',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name='block2_batch-norm2')(x)
    x = MaxPooling2D(pool_size=(2, 2), name='block2_pool')(x)
    x = Dropout(0.3, name='block2_dropout', seed=0)(x)

    # Add top layers.
    x = Flatten()(x)
    x = Dense(n_classes)(x)
    outputs = Activation('softmax', name='softmax')(x)

    # Create model.
    model = Model(inputs, outputs, name='cifar10_student_strong')

    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Ejemplo n.º 18
0
from tensorflow.python.keras.preprocessing.image import load_img
from tensorflow.python.keras.preprocessing.image import img_to_array
import numpy as np
from tensorflow.python.keras.models import load_model
import cv2
import numpy

#import torchvision.datasets.imagenet as imagenet  #to include models like Squeezenet,Alexnet

model = vgg19.VGG19(weights="imagenet",
                    include_top=False,
                    input_shape=(224, 224, 3))
#model.cuda()
# add new classifier layers
x = model.output
x = MaxPooling2D()(x)
for layer in model.layers:
    layer.trainable = False
x = Dense(units=256,
          activation="relu",
          kernel_regularizer=regularizers.l2(0.01))(x)
x = Dropout(0.4)(x)
x = Dense(units=256,
          activation="relu",
          kernel_regularizer=regularizers.l2(0.01))(x)
x = Dropout(0.4)(x)
x = Flatten()(x)
output = Dense(units=4, activation="softmax")(x)

model = Model(inputs=model.input, outputs=output)
Ejemplo n.º 19
0
def SENET50(include_top=True, weights='vggface',
            input_tensor=None, input_shape=None,
            pooling=None,
            classes=8631):
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=197,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    x = Conv2D(
        64, (7, 7), use_bias=False, strides=(2, 2), padding='same',
        name='conv1/7x7_s2')(img_input)
    x = BatchNormalization(axis=bn_axis, name='conv1/7x7_s2/bn')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = senet_conv_block(x, 3, [64, 64, 256], stage=2, block=1, strides=(1, 1))
    x = senet_identity_block(x, 3, [64, 64, 256], stage=2, block=2)
    x = senet_identity_block(x, 3, [64, 64, 256], stage=2, block=3)

    x = senet_conv_block(x, 3, [128, 128, 512], stage=3, block=1)
    x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=2)
    x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=3)
    x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=4)

    x = senet_conv_block(x, 3, [256, 256, 1024], stage=4, block=1)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=2)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=3)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=4)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=5)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=6)

    x = senet_conv_block(x, 3, [512, 512, 2048], stage=5, block=1)
    x = senet_identity_block(x, 3, [512, 512, 2048], stage=5, block=2)
    x = senet_identity_block(x, 3, [512, 512, 2048], stage=5, block=3)

    x = AveragePooling2D((7, 7), name='avg_pool')(x)

    if include_top:
        x = Flatten()(x)
        x = Dense(classes, activation='softmax', name='classifier')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='vggface_senet50')

    # load weights
    if weights == 'vggface':
        if include_top:
            weights_path = get_file('rcmalli_vggface_tf_senet50.h5',
                                    utils.SENET50_WEIGHTS_PATH,
                                    cache_subdir=utils.VGGFACE_DIR)
        else:
            weights_path = get_file('rcmalli_vggface_tf_notop_senet50.h5',
                                    utils.SENET50_WEIGHTS_PATH_NO_TOP,
                                    cache_subdir=utils.VGGFACE_DIR)
        model.load_weights(weights_path)
        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)
            if include_top:
                maxpool = model.get_layer(name='avg_pool')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='classifier')
                layer_utils.convert_dense_weights_data_format(dense, shape,
                                                              'channels_first')

        if K.image_data_format() == 'channels_first' and K.backend() == 'tensorflow':
            warnings.warn('You are using the TensorFlow backend, yet you '
                          'are using the Theano '
                          'image data format convention '
                          '(`image_data_format="channels_first"`). '
                          'For best performance, set '
                          '`image_data_format="channels_last"` in '
                          'your Keras config '
                          'at ~/.keras/keras.json.')
    elif weights is not None:
        model.load_weights(weights)

    return model
Ejemplo n.º 20
0
def caltech_model2(n_classes: int,
                   input_shape=None,
                   input_tensor=None,
                   weights_path: Union[None, str] = None) -> Sequential:
    """
    Defines a caltech network.

    :param n_classes: the number of classes.
    We use this parameter even though we know its value,
    in order to be able to use the model in order to predict some of the classes.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras Sequential Model.
    """
    inputs = create_inputs(input_shape, input_tensor)

    # Define a weight decay for the regularisation.
    weight_decay = 5e-4

    x = Conv2D(64, (3, 3),
               padding='same',
               activation='relu',
               input_shape=input_shape,
               kernel_regularizer=l2(weight_decay))(inputs)
    x = BatchNormalization()(x)
    x = Dropout(0.3)(x)

    x = Conv2D(64, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(128, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(128, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(256, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(256, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(256, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(512, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Conv2D(512, (3, 3),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Dropout(0.4)(x)

    x = Flatten()(x)
    x = Dense(512, kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Dropout(0.5)(x)
    outputs = Dense(n_classes, activation='softmax', name='softmax_outputs')(x)

    # Create model.
    model = Model(inputs, outputs, name='caltech_model2')
    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Ejemplo n.º 21
0
# 1 for grayscale
num_channels = 1

# Number of classes, one class for each of 12 persons.
num_classes = 12

model = Sequential()

model.add(InputLayer(input_shape = (img_dim, img_dim)))

model.add(Reshape(img_shape_full))

model.add(Conv2D(kernel_size = 5, strides = 1, filters = 16, padding = "same",
            activation = "relu", name = "conv_layer_1"))

model.add(MaxPooling2D(pool_size = 2, strides = 2))

model.add(Conv2D(kernel_size = 5, strides = 1, filters = 36, padding = "same",
            activation = "relu", name = "conv_layer_2"))

model.add(MaxPooling2D(pool_size = 2, strides = 2))

model.add(Conv2D(kernel_size = 5, strides = 1, filters = 56, padding = "same",
            activation = "relu", name = "conv_layer_3"))

model.add(MaxPooling2D(pool_size = 2, strides = 2))

model.add(Conv2D(kernel_size = 5, strides = 1, filters = 80, padding = "same",
            activation = "relu", name = "conv_layer_4"))

model.add(MaxPooling2D(pool_size = 2, strides = 2))
                x_batch.append(process_wav_file(valid_df.wav_file.values[i]))
                y_batch.append(valid_df.label_id.values[i])
            x_batch = np.array(x_batch)
            y_batch = to_categorical(y_batch, num_classes=len(POSSIBLE_LABELS))
            yield x_batch, y_batch


# In[16]:

x_in = Input(shape=(257, 98, 2))
x = BatchNormalization()(x_in)
for i in range(4):
    x = Conv2D(16 * (2**i), (3, 3))(x)
    x = Activation('elu')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2))(x)
x = Conv2D(128, (1, 1))(x)
x_branch_1 = GlobalAveragePooling2D()(x)
x_branch_2 = GlobalMaxPool2D()(x)
x = concatenate([x_branch_1, x_branch_2])
x = Dense(256, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(len(POSSIBLE_LABELS), activation='sigmoid')(x)
model = Model(inputs=x_in, outputs=x)
model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.summary()

# In[17]:
Ejemplo n.º 23
0
def build_unet(input_shape, num_classes):
    inputs = tf.keras.Input(shape=input_shape)
    conv1 = Conv2D(64,
                   3,
                   activation='relu',
                   dilation_rate=2,
                   padding='same',
                   kernel_initializer='he_normal')(inputs)
    conv1 = BatchNormalization()(conv1)
    conv1 = Conv2D(64,
                   3,
                   activation='relu',
                   dilation_rate=2,
                   padding='same',
                   kernel_initializer='he_normal')(conv1)
    conv1 = BatchNormalization()(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   dilation_rate=2,
                   padding='same',
                   kernel_initializer='he_normal')(pool1)
    conv2 = BatchNormalization()(conv2)
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   dilation_rate=2,
                   padding='same',
                   kernel_initializer='he_normal')(conv2)
    conv2 = BatchNormalization()(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool2)
    conv3 = BatchNormalization()(conv3)
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv3)
    conv3 = BatchNormalization()(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool3)
    conv4 = BatchNormalization()(conv4)
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv4)
    conv4 = BatchNormalization()(conv4)
    drop4 = Dropout(0.5)(conv4, training=True)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(pool4)
    conv5 = BatchNormalization()(conv5)
    conv5 = Conv2D(1024,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv5)
    conv5 = BatchNormalization()(conv5)
    drop5 = Dropout(0.5)(conv5, training=True)

    up6 = Conv2D(512,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(drop5))
    merge6 = concatenate([drop4, up6], axis=3)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge6)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv6)

    up7 = Conv2D(256,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv6))
    merge7 = concatenate([conv3, up7], axis=3)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge7)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv7)

    up8 = Conv2D(128,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv7))
    merge8 = concatenate([conv2, up8], axis=3)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge8)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv8)

    up9 = Conv2D(64,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(UpSampling2D(size=(2,
                                                                    2))(conv8))
    merge9 = concatenate([conv1, up9], axis=3)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge9)
    conv9 = Conv2D(32,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv9)

    x = Flatten()(conv9)
    x = Dense(512, activation='relu', kernel_initializer='he_uniform')(x)
    x = Dropout(0.3)(x)
    x = Dense(num_classes, activation='softmax', name='predictions')(x)
    return Model(inputs=inputs, outputs=x)

    return model
Ejemplo n.º 24
0
def UNet64_2x2core_large(input_shape):
    """wie UNet64_out_expansed, aber downsampling bis 2x2"""
    inputs = Input(shape=input_shape)

    conv01 = Conv2D(10, kernel_size=(3, 3),
                    padding="same")(inputs)  # 10 x 64x64
    conv01 = Activation('relu')(conv01)
    conv01_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv01)  # 10 x 32x32
    print("0)", conv01_pool.shape, "10 x 32x32")

    conv02 = Conv2D(20, kernel_size=(3, 3),
                    padding="same")(conv01_pool)  # 20 x 32x32
    conv02 = Activation('relu')(conv02)
    conv02_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv02)  # 20 x 16x16
    print("1)", conv02_pool.shape, "20 x 16x16")

    conv03 = Conv2D(20, kernel_size=(3, 3),
                    padding="same")(conv02_pool)  # 20 x 16x16
    conv03 = Activation('relu')(conv03)
    conv03_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv03)  # 20 x 8x8
    print("2)", conv03_pool.shape, "20 x 8x8")

    conv04 = Conv2D(20, kernel_size=(3, 3),
                    padding="same")(conv03_pool)  # 20 x 8x8
    conv04 = Activation('relu')(conv04)
    conv04_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv04)  # 20 x 4x4
    print("3)", conv04_pool.shape, "20 x 4x4")

    conv05 = Conv2D(20, kernel_size=(3, 3),
                    padding="same")(conv04_pool)  # 20 x 4x4
    conv05 = Activation('relu')(conv05)
    conv05_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv05)  # 20 x 2x2
    print("4)", conv05_pool.shape, "20 x 2x2")

    ### UPSAMPLING:
    up05 = UpSampling2D((2, 2))(conv05_pool)  # 20 x 4x4
    up05 = concatenate([conv05, up05], axis=3)  # 40 x 4x4
    print("4)", up05.shape, "40 x 4x4")

    up04 = UpSampling2D((2, 2))(up05)  # 10 x 8x8
    up04 = concatenate([conv04, up04], axis=3)  # 20+40 x 8x8
    print("4)", up04.shape, "60 x 8x8")

    up03 = UpSampling2D((2, 2))(up04)  # 30 x 16x16
    up03 = concatenate([conv03, up03], axis=3)  # 20+60 x 16x16
    print("5)", up03.shape, "80 x 16x16")

    up02 = UpSampling2D((2, 2))(up03)  # 80 x 32x32
    up02 = concatenate([conv02, up02], axis=3)  # 20+80 x 32x32
    print("6)", up02.shape, "100 x 32x32")

    up01 = UpSampling2D((2, 2))(up02)  # 100 x 64x64
    up01 = concatenate([conv01, up01], axis=3)  # 10+100 x 64x64
    print("7)", up01.shape, "110 x 64x64")

    output = Conv2D(1, (3, 3), activation='relu',
                    padding="same")(up01)  # 1 x 64x64
    print("8)", output.shape, "1 x 64x64")
    output = Flatten()(output)
    model = Model(inputs=inputs, outputs=output)
    model.compile(loss="mean_squared_error", optimizer='adam')
    return model
Ejemplo n.º 25
0
def ResNet50(input_shape=(64, 64, 3), classes=6):
    X_input = Input(input_shape)

    X = ZeroPadding2D((3, 3))(X_input)

    X = Conv2D(64, (7, 7),
               strides=(2, 2),
               name='conv1',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name='bn_conv1')(X)
    X = Activation('relu')(X)
    X = MaxPooling2D((3, 3), strides=(2, 2))(X)

    X = convolution_block(X,
                          f=3,
                          filters=[64, 64, 256],
                          stage=2,
                          block='a',
                          s=1)
    X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
    X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')

    X = convolution_block(X,
                          f=3,
                          filters=[128, 128, 512],
                          stage=3,
                          block='a',
                          s=2)
    X = identity_block(X, 3, [128, 128, 512], stage=3, block='b')
    X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')
    X = identity_block(X, 3, [128, 128, 512], stage=3, block='d')

    X = convolution_block(X,
                          f=3,
                          filters=[256, 256, 1024],
                          stage=4,
                          block='a',
                          s=2)
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')
    X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f')

    X = convolution_block(X,
                          f=3,
                          filters=[512, 512, 2048],
                          stage=5,
                          block='a',
                          s=2)
    X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')
    X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')

    X = AveragePooling2D((2, 2), name='avg_pool')(X)

    X = Flatten()(X)
    X = Dense(classes,
              activation='softmax',
              name='fc' + str(classes),
              kernel_initializer=glorot_uniform(seed=0))(X)

    model = Model(inputs=X_input, outputs=X, name='ResNet50')

    return model
Ejemplo n.º 26
0
def UNet64(input_shape,
           n_predictions=1,
           lossfunction="mean_squared_error",
           simpleclassification=None,
           flatten_output=True,
           optimizer="adam",
           activation_hidden="relu",
           activation_output="relu",
           metrics=None):
    inputs = Input(shape=input_shape)

    conv01 = Conv2D(10, kernel_size=(3, 3),
                    padding="same")(inputs)  # 10 x 64x64
    conv01 = Activation(activation_hidden)(conv01)
    conv01_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv01)  # 10 x 32x32
    print("0)", conv01_pool.shape, "10 x 32x32")

    conv02 = Conv2D(20, kernel_size=(3, 3),
                    padding="same")(conv01_pool)  # 20 x 32x32
    conv02 = Activation(activation_hidden)(conv02)
    conv02_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv02)  # 20 x 16x16
    print("1)", conv02_pool.shape, "20 x 16x16")

    conv03 = Conv2D(20, kernel_size=(3, 3),
                    padding="same")(conv02_pool)  # 20 x 16x16
    conv03 = Activation(activation_hidden)(conv03)
    conv03_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv03)  # 20 x 8x8
    print("2)", conv03_pool.shape, "20 x 8x8")

    conv04 = Conv2D(20, kernel_size=(3, 3),
                    padding="same")(conv03_pool)  # 20 x 8x8
    conv04 = Activation(activation_hidden)(conv04)
    conv04_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv04)  # 20 x 4x4
    print("3)", conv04_pool.shape, "20 x 4x4")

    ### UPSAMPLING:
    up04 = UpSampling2D((2, 2))(conv04_pool)  # 20 x 8x8
    up04 = concatenate([conv04, up04], axis=3)  # 20+20 x 8x8
    print("4)", up04.shape, "40 x 8x8")

    up03 = UpSampling2D((2, 2))(up04)  # 40 x 16x16
    up03 = concatenate([conv03, up03], axis=3)  # 20+40 x 16x16
    print("5)", up03.shape, "60 x 16x16")

    up02 = UpSampling2D((2, 2))(up03)  # 60 x 32x32
    up02 = concatenate([conv02, up02], axis=3)  # 20+60 x 32x32
    print("6)", up02.shape, "80 x 32x32")

    up01 = UpSampling2D((2, 2))(up02)  # 80 x 64x64
    up01 = concatenate([conv01, up01], axis=3)  # 10+80 x 64x64
    print("7)", up01.shape, "90 x 64x64")

    output = Conv2D(n_predictions, (1, 1),
                    activation=activation_output)(up01)  # 1 x 64x64
    print("8)", output.shape, "{} x 64x64".format(n_predictions))
    if flatten_output:
        output = Flatten()(output)
        print("output flattened to {}".format(output.shape))
        if simpleclassification is not None:
            output = Dense(simpleclassification, activation='softmax')(output)
            print(
                "9)", output.shape,
                "zur Klassifikation von {} Klassen (mit softmax)".format(
                    simpleclassification))

    model = Model(inputs=inputs, outputs=output)
    if metrics is not None:
        model.compile(loss=lossfunction, optimizer=optimizer, metrics=metrics)
    else:
        model.compile(loss=lossfunction, optimizer=optimizer)
    return model
Ejemplo n.º 27
0
config = tf.compat.v1.ConfigProto(log_device_placement=True)
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)

#running the model
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())

from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D
from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense

model = Sequential()
model.add(Conv2D(32, kernel_size=2, input_shape=(450, 450, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(32, kernel_size=2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, kernel_size=2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
Ejemplo n.º 28
0
def unet_network(input_tensor, nb_classes):
    """Original architecture of the network.

    Parameters
    ----------
    input_tensor : Keras tensor, float32
        Input tensor with shape (batch_size, ?, ?, 1).
    nb_classes : int
        Number of final classes.

    Returns
    -------
    tensor : Keras tensor, float32
        Output tensor with shape (batch_size, ?, ?, nb_classes)

    """
    # contraction 1
    conv1 = Conv2D(filters=64,
                   kernel_size=(3, 3),
                   activation='relu',
                   name='conv1')(input_tensor)  # (batch_size, ?, ?, 64)
    conv2 = Conv2D(filters=64,
                   kernel_size=(3, 3),
                   activation='relu',
                   name='conv2')(conv1)  # (batch_size, ?, ?, 64)
    crop2 = Cropping2D(cropping=((88, 88), (88, 88)),
                       name="crop2")(conv2)  # (batch_size, ?, ?, 64)
    maxpool2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2),
                            name="maxpool2")(conv2)  # (batch_size, ?, ?, 64)

    # contraction 2
    conv3 = Conv2D(filters=128,
                   kernel_size=(3, 3),
                   activation='relu',
                   name='conv3')(maxpool2)  # (batch_size, ?, ?, 128)
    conv4 = Conv2D(filters=128,
                   kernel_size=(3, 3),
                   activation='relu',
                   name='conv4')(conv3)  # (batch_size, ?, ?, 128)
    crop4 = Cropping2D(cropping=((40, 40), (40, 40)),
                       name="crop4")(conv4)  # (batch_size, ?, ?, 128)
    maxpool4 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2),
                            name="maxpool4")(conv4)  # ((batch_size, ?, ?, 128)

    # contraction 3
    conv5 = Conv2D(filters=256,
                   kernel_size=(3, 3),
                   activation='relu',
                   name='conv5')(maxpool4)  # (batch_size, ?, ?, 256)
    conv6 = Conv2D(filters=256,
                   kernel_size=(3, 3),
                   activation='relu',
                   name='conv6')(conv5)  # (batch_size, ?, ?, 256)
    crop6 = Cropping2D(cropping=((16, 16), (16, 16)),
                       name="crop6")(conv6)  # (batch_size, ?, ?, 256)
    maxpool6 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2),
                            name="maxpool6")(conv6)  # (batch_size, ?, ?, 256)

    # contraction 4
    conv7 = Conv2D(filters=512,
                   kernel_size=(3, 3),
                   activation='relu',
                   name='conv7')(maxpool6)  # (batch_size, ?, ?, 512)
    conv8 = Conv2D(filters=512,
                   kernel_size=(3, 3),
                   activation='relu',
                   name='conv8')(conv7)  # (batch_size, ?, ?, 512)
    crop8 = Cropping2D(cropping=((4, 4), (4, 4)),
                       name="crop8")(conv8)  # (batch_size, ?, ?, 512)
    maxpool8 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2),
                            name="maxpool8")(conv8)  # (batch_size, ?, ?, 512)

    # bottom
    conv9 = Conv2D(filters=1024,
                   kernel_size=(3, 3),
                   activation='relu',
                   name='conv9')(maxpool8)  # (batch_size, ?, ?, 1024)
    conv10 = Conv2D(filters=1024,
                    kernel_size=(3, 3),
                    activation='relu',
                    name='conv10')(conv9)  # (batch_size, ?, ?, 1024)

    # expansion 1
    upconv11 = up_conv_2d(input_tensor=conv10, nb_filters=512,
                          name='upconv11')  # (batch_size, ?, ?, 512)
    concat11 = tf.concat(values=[crop8, upconv11], axis=-1,
                         name='concat11')  # (batch_size, ?, ?, 1024)
    conv12 = Conv2D(filters=512,
                    kernel_size=(3, 3),
                    activation='relu',
                    name='conv12')(concat11)  # (batch_size, ?, ?, 512)
    conv13 = Conv2D(filters=512,
                    kernel_size=(3, 3),
                    activation='relu',
                    name='conv13')(conv12)  # (batch_size, ?, ?, 512)

    # expansion 2
    upconv14 = up_conv_2d(input_tensor=conv13, nb_filters=256,
                          name='upconv14')  # (batch_size, ?, ?, 256)
    concat14 = tf.concat(values=[crop6, upconv14], axis=-1,
                         name='concat14')  # (batch_size, ?, ?, 512)
    conv15 = Conv2D(filters=256,
                    kernel_size=(3, 3),
                    activation='relu',
                    name='conv15')(concat14)  # (batch_size, ?, ?, 256)
    conv16 = Conv2D(filters=256,
                    kernel_size=(3, 3),
                    activation='relu',
                    name='conv16')(conv15)  # (batch_size, ?, ?, 256)

    # expansion 3
    upconv17 = up_conv_2d(input_tensor=conv16, nb_filters=128,
                          name='upconv17')  # (batch_size, ?, ?, 128)
    concat17 = tf.concat(values=[crop4, upconv17], axis=-1,
                         name='concat17')  # (batch_size, ?, ?, 256)
    conv18 = Conv2D(filters=128,
                    kernel_size=(3, 3),
                    activation='relu',
                    name='conv18')(concat17)  # (batch_size, ?, ?, 128)
    conv19 = Conv2D(filters=128,
                    kernel_size=(3, 3),
                    activation='relu',
                    name='conv19')(conv18)  # (batch_size, ?, ?, 128)

    # expansion 4
    upconv20 = up_conv_2d(input_tensor=conv19, nb_filters=64,
                          name='upconv20')  # (batch_size, ?, ?, 64)
    concat20 = tf.concat(values=[crop2, upconv20], axis=-1,
                         name='concat20')  # (batch_size, ?, ?, 128)
    conv21 = Conv2D(filters=64,
                    kernel_size=(3, 3),
                    activation='relu',
                    name='conv21')(concat20)  # (batch_size, ?, ?, 64)
    conv22 = Conv2D(filters=64,
                    kernel_size=(3, 3),
                    activation='relu',
                    name='conv22')(conv21)  # (batch_size, ?, ?, 64)
    conv23 = Conv2D(filters=nb_classes,
                    kernel_size=(1, 1),
                    activation='sigmoid',
                    name='conv23')(conv22)  # (batch_size, ?, ?, nb_classes)

    return conv23
Ejemplo n.º 29
0
    zoom_range=0.2,
    horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

entrenamiento_generador = entrenamiento_datagen.flow_from_directory(r"C:\Users\pasky\Documents\Master_en_Matematicas\PFM\Neuronal_Networks\Reference_code\AMP-Tech\CNN desde cero\data\entrenamiento", target_size=(altura, longitud), batch_size=batch_size, class_mode='categorical')

validacion_generador = test_datagen.flow_from_directory(
    r"C:\Users\pasky\Documents\Master_en_Matematicas\PFM\Neuronal_Networks\Reference_code\AMP-Tech\CNN desde cero\data\validacion",
    target_size=(altura, longitud),
    batch_size=batch_size,
    class_mode='categorical')

cnn = Sequential()
cnn.add(Convolution2D(filtrosConv1, tamano_filtro1, padding ="same", input_shape=(longitud, altura, 3), activation='relu'))
cnn.add(MaxPooling2D(pool_size=tamano_pool))

cnn.add(Convolution2D(filtrosConv2, tamano_filtro2, padding ="same"))
cnn.add(MaxPooling2D(pool_size=tamano_pool))

cnn.add(Flatten())
cnn.add(Dense(256, activation='relu'))
cnn.add(Dropout(0.5))
cnn.add(Dense(clases, activation='softmax'))

cnn.compile(loss='categorical_crossentropy',
            optimizer=optimizers.Adam(lr=lr),
            metrics=['accuracy'])


Ejemplo n.º 30
0
def build_vgg(img_rows: int = 224,
              img_cols: int = 224,
              num_classes: int = 1000):
    vgg = Sequential()
    vgg.add(
        Conv2D(64,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=l2(0.0005),
               input_shape=(img_rows, img_cols, 3)))
    vgg.add(
        Conv2D(64,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=l2(0.0005),
               input_shape=(img_rows, img_cols, 3)))
    vgg.add(MaxPooling2D())  # initial size /2
    vgg.add(
        Conv2D(128,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=l2(0.0005)))
    vgg.add(
        Conv2D(128,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=l2(0.0005)))
    vgg.add(MaxPooling2D())  # initial size /4
    vgg.add(
        Conv2D(256,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=l2(0.0005)))
    vgg.add(
        Conv2D(256,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=l2(0.0005)))
    vgg.add(
        Conv2D(256,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=l2(0.0005)))
    vgg.add(MaxPooling2D())  # initial size /8
    vgg.add(
        Conv2D(512,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=l2(0.0005)))
    vgg.add(
        Conv2D(512,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=l2(0.0005)))
    vgg.add(
        Conv2D(512,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=l2(0.0005)))
    vgg.add(MaxPooling2D())  # initial size /16
    vgg.add(
        Conv2D(512,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=l2(0.0005)))
    vgg.add(
        Conv2D(512,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=l2(0.0005)))
    vgg.add(
        Conv2D(512,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               kernel_regularizer=l2(0.0005)))
    vgg.add(MaxPooling2D())  # initial size /32
    vgg.add(Flatten())
    vgg.add(Dense(4096, activation='relu', kernel_regularizer=l2(0.0005)))
    vgg.add(Dense(4096, activation='relu', kernel_regularizer=l2(0.0005)))
    vgg.add(Dropout(0.5))
    vgg.add(
        Dense(num_classes, activation='softmax',
              kernel_regularizer=l2(0.0005)))

    return vgg