示例#1
0
def baseline_model():
    input_1 = Input(shape=(None, None, 3))
    input_2 = Input(shape=(None, None, 3))

    base_model = MobileNetV2(weights="imagenet", include_top=False)

    x1 = base_model(input_1)
    x2 = base_model(input_2)

    x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)])
    x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)])

    x3 = Subtract()([x1, x2])
    x3 = Multiply()([x3, x3])

    x = Multiply()([x1, x2])

    x = Concatenate(axis=-1)([x, x3])

    x = Dense(100, activation="relu")(x)
    x = Dropout(0.01)(x)
    out = Dense(1, activation="sigmoid")(x)

    model = Model([input_1, input_2], out)

    model.compile(loss="binary_crossentropy", metrics=[acc], optimizer=Adam(0.00001))

    model.summary()

    return model
示例#2
0
def ResNet50(input_shape, num_classes):
  inputs = k.Input(shape=input_shape)

  x = Conv2D_BN(filters=64, kernel_size=7, strides=2)(inputs)
  x = MaxPooling2D(pool_size=3, strides=2, padding="same")(x)

  x = ResConvBlockB(filters=[64,64,256], strides=1)(x)
  for i in range(2):
    x = ResIdentityBlockB(filters=[64,64,256])(x)

  x = ResConvBlockB(filters=[128,128,512])(x)
  for i in range(3):
    x = ResIdentityBlockB(filters=[128,128,512])(x)

  x = ResConvBlockB(filters=[256,256,1024])(x)
  for i in range(5):
    x = ResIdentityBlockB(filters=[256,256,1024])(x)

  x = ResConvBlockB(filters=[512,512,2048])(x)
  for i in range(2):
    x = ResIdentityBlockB(filters=[512,512,2048])(x)
  
  output = GlobalAvgPool2D()(x)
  output = Flatten()(output)
  output = Dense(units=num_classes, activation="softmax")(output)

  return k.Model(inputs=inputs, outputs = output)
示例#3
0
    def __call__(self):
        # Return a keras model on call

        inputs = x = Input(shape=(None, None, 3))

        # preact has to be set by subclass
        if self.preact:
            x = Conv2D(self.filters[0],
                       5,
                       2,
                       "same",
                       kernel_regularizer=l2(self.reg))(x)
        else:
            x = Conv2D(self.filters[0],
                       5,
                       2,
                       "same",
                       kernel_regularizer=l2(self.reg))(x)
            x = BatchNormalization()(x)
            x = Activation("relu")(x)

        for filt in self.filters:
            x = self.block(x, filt, reduce=True, reg=self.reg, cs=self.cs)
            for _ in range(self.nblocks - 1):
                x = self.block(x, filt, reg=self.reg, cs=self.cs)

        if self.preact:
            x = BatchNormalization()(x)
            x = Activation("relu")(x)
        x = GlobalAvgPool2D()(x)
        x = Dense(self.classes, activation="softmax")(x)

        model = Model(inputs, x)
        return model
def densenet_ml4(img_shape, n_classes, finalAct='softmax', f=32):
    repetitions = 6, 12, 24  #,16
    r2 = 10

    def bn_rl_conv(x, f, k=1, s=1, p='same'):
        x = BatchNormalization(epsilon=1.001e-5)(x)
        x = ReLU()(x)
        x = Conv2D(f, k, strides=s, padding=p)(x)
        return x

    def dense_block(tensor, r):
        for _ in range(r):
            x = bn_rl_conv(tensor, 4 * f)
            x = bn_rl_conv(x, f, 3)
            tensor = Concatenate()([tensor, x])
        return tensor

    def transition_block(x):
        x = bn_rl_conv(x, K.int_shape(x)[-1] // 2)
        #x = Dropout(0.3)(x)
        x = AvgPool2D(2, strides=2, padding='same')(x)
        return x

    input = Input(img_shape)

    x = Conv2D(64, 7, strides=2, padding='same')(input)
    x = BatchNormalization(epsilon=1.001e-5)(x)
    x = ReLU()(x)
    x = MaxPool2D(3, strides=2, padding='same')(x)

    for r in repetitions:
        d = dense_block(x, r)
        x = transition_block(d)

    x = GlobalAvgPool2D()(d)
    """
    outputs = []
    for i in range(n_classes):
        print("class ", i)
        d = dense_block(x, r2)
        branch = transition_block(d)
        branch = GlobalAvgPool2D()(d)
        output = Dense(1, activation=finalAct)(branch)
        outputs.append(output)
    """
    outputs = []
    for i in range(n_classes):
        print("\rclass ", i, end="")
        d = Dense(1024, activation='relu')(x)
        d = Dense(512, activation='relu')(d)
        d = Dense(256, activation='relu')(d)
        output = Dense(1, activation=finalAct)(d)
        outputs.append(output)

    outputs = Concatenate()(outputs)
    #output = Dense(n_classes, activation=finalAct)(x)

    model = Model(input, outputs)

    return model
示例#5
0
    def __init__(self, layer_num, include_top=False, **kwargs):
        super(ResNet, self).__init__(**kwargs)
        self.include_top = include_top
        if c.block_type[layer_num] == 'basic block':
            self.block = BasicBlock
        else:
            self.block = BottleneckBlock

        self.conv0 = Conv2D(64, (7, 7), strides=(2, 2), name='conv0', padding='same', use_bias=False)
        self.bn = BatchNormalization(name='bn', momentum=0.9, epsilon=1e-5)

        self.block_collector = []
        for layer_index, (b, f) in enumerate(zip(c.block_num[layer_num], c.filter_num), start=1):
            if layer_index == 1:
                if c.block_type[layer_num] == 'basic block':
                    self.block_collector.append(self.block(f, name='conv1_0'))
                else:
                    self.block_collector.append(self.block(f, projection=True, name='conv1_0'))
            else:
                self.block_collector.append(self.block(f, strides=(2, 2), name='conv{}_0'.format(layer_index)))

            for block_index in range(1, b):
                self.block_collector.append(self.block(f, name='conv{}_{}'.format(layer_index, block_index)))

        self.global_average_pooling = GlobalAvgPool2D()
        if self.include_top:
            self.fc = Dense(c.category_num, name='fully_connected', activation='softmax', use_bias=False)
示例#6
0
def shufflenet_v1(n_classes, start_channels, input_shape=(224, 224, 3)):
    groups = 2
    input = Input(input_shape)

    x = Conv2D(kernel_size=3, strides=2, padding="same", use_bias=True)(input)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = MaxPool2D(pool_size=3, strides=2, padding="same")(x)

    repetitions = [3, 7, 3]

    for i, repetition in enumerate(repetitions):
        channels = start_channels * (2**i)
        x = unit(x, groups, channels, strides=2)

        for i in range(repetition):
            x = unit(x, groups, channels, strides=1)

    x = GlobalAvgPool2D()(x)
    output = Dense(n_classes, activation="softmax")(x)

    model = Model(input, output)

    return model
示例#7
0
def get_network(n=2, hidden_dim=128, use_pred=False, return_before_head=True):
    depth = n * 9 + 2
    n_blocks = ((depth - 2) // 9) - 1

    # The input tensor
    inputs = Input(shape=(32, 32, 3))
    x = experimental.preprocessing.Rescaling(scale=1.0 / 127.5, offset=-1)(inputs)

    # The Stem Convolution Group
    x = stem(x)

    # The learner
    x = learner(x, n_blocks)

    # Projections
    trunk_output = GlobalAvgPool2D()(x)
    projection_outputs = projection_head(trunk_output, hidden_dim=hidden_dim)

    if return_before_head:
        model = Model(inputs, [trunk_output, projection_outputs])
    else:
        model = Model(inputs, projection_outputs)

    # Predictions
    if use_pred:
        prediction_outputs = prediction_head(projection_outputs)
        if return_before_head:
            model = Model(inputs, [projection_outputs, prediction_outputs])
        else:
            model = Model(inputs, prediction_outputs)

    return model
示例#8
0
def ResNet9(x_size, y_size, features):
    X_input = Input((x_size, y_size, features))

    X = Conv2D(16, (5, 5), strides=(2, 2), name="conv1",
               padding="same")(X_input)
    X = BatchNormalization(axis=3, name="bn_conv1")(X)
    X = Activation("Mish")(X)
    X = MaxPool2D((3, 3), strides=(2, 2), padding="same")(X)

    X = bottleneck_residual_block(X, 3, [16, 16])

    X = bottleneck_residual_block(X, 3, [32, 32], reduce=True, s=2)

    X = bottleneck_residual_block(X, 3, [64, 64], reduce=True, s=2)

    # X = bottleneck_residual_block(X, 3, [256, 256], reduce=True, s=2)

    X = GlobalAvgPool2D()(X)

    X = Flatten()(X)
    X = Dense(10, activation="softmax", name="fc")(X)

    model = Model(inputs=X_input, outputs=X, name="ResNet9")

    model.compile(
        loss="categorical_crossentropy",
        optimizer=Adam(lr=0.001),
        metrics=["categorical_accuracy"],
    )

    return model
示例#9
0
def Inception_v4(input, classes=1000):
    x = Stem(input)

    for _ in range(4):
        x = Inception_A(x)

    x = Reduction_A(x)

    for _ in range(7):
        x = Inception_B(x)

    x = Reduction_B(x)

    for _ in range(3):
        x = Inception_C(x)

    x = GlobalAvgPool2D()(x)

    x = tf.keras.layers.Dropout(rate=0.8)(x)

    output = Dense(units=classes, activation='softmax', name='output')(x)

    model = tf.keras.Model(input, output)

    return model
 def __init__(self, repeat_num_list, cardinality):
     if len(repeat_num_list) != 4:
         raise ValueError('The length of repeat_num_list must be 4')
     super(ResNext, self).__init__()
     self.conv1 = Conv2D(filters=64,
                         kernel_size=7,
                         strides=2,
                         padding='same')
     self.bn1 = BatchNormalization()
     self.pool1 = MaxPool2D(pool_size=3, strides=2, padding='same')
     self.block1 = build_ResNext_block(filters=128,
                                       strides=1,
                                       groups=cardinality,
                                       repeat_num=repeat_num_list[0])
     self.block2 = build_ResNext_block(filters=256,
                                       strides=2,
                                       groups=cardinality,
                                       repeat_num=repeat_num_list[1])
     self.block3 = build_ResNext_block(filters=512,
                                       strides=2,
                                       groups=cardinality,
                                       repeat_num=repeat_num_list[2])
     self.block4 = build_ResNext_block(filters=1024,
                                       strides=2,
                                       groups=cardinality,
                                       repeat_num=repeat_num_list[3])
     self.pool2 = GlobalAvgPool2D()
     self.fc = Dense(units=NUM_CLASSES, activation='softmax')
    def densenet(self, BINS, WIN_LEN, f=32):
        repetitions = 6, 12, 24, 16

        def bn_rl_conv(x, f, k=1, s=1, p='same'):
            x = layers.BatchNormalization()(x)
            x = keras.activations.relu(x)
            x = layers.Conv2D(f, k, strides=s, padding=p)(x)
            return x

        def dense_block(tensor, r):
            for _ in range(r):
                x = bn_rl_conv(tensor, 4 * f)
                x = bn_rl_conv(x, f, 3)
                tensor = Concatenate()([tensor, x])
            return tensor

        def transition_block(x):
            x = bn_rl_conv(x, K.int_shape(x)[-1] // 2)
            x = AvgPool2D(2, strides=2, padding='same')(x)
            return x

        noise_fft = keras.Input((BINS, WIN_LEN, 1))
        x = layers.Conv2D(64, 7, strides=2, padding='same')(noise_fft)
        x = MaxPool2D(3, strides=2, padding='same')(x)

        for r in repetitions:
            d = dense_block(x, r)
            x = transition_block(d)

        x = GlobalAvgPool2D()(d)

        output = Dense(257, activation='sigmoid')(x)

        model = Model(noise_fft, output)
        return model
示例#12
0
 def build(self, input_shape):
     ## downsampling
     if self.downsampling:
         if self.mode == 'max':
             self.pool = MaxPool2D(self.size, padding='same')
         elif self.mode == 'avg':
             self.pool = AvgPool2D(self.size, padding='same')
         elif self.mode == 'global':
             self.pool = GlobalAvgPool2D()
         else:
             raise NotImplementedError(f'No downsampling mode={self.mode}')
     ## upsampling
     else:
         if self.mode == 'pad':
             if not isinstance(self.size, (tuple, list)):
                 self.size = [self.size]
             if len(self.size) == 1:
                 self.size = list(self.size) * 2
             # this doesn't take into account odd number
             self.pool = ZeroPadding2D(
                 padding=[(i - 1) * s // 2
                          for i, s in zip(self.size, input_shape[1:])])
         else:
             self.pool = UpSampling2D(size=self.size,
                                      interpolation=self.mode)
     self.reshape = Reshape((1, 1, input_shape[-1]))
     return super().build(input_shape)
示例#13
0
    def get_functional_graph(self, input_shape, batch_size=None):
        input_shape_2d, input_shape_1d = input_shape

        enc_num_dim = 16
        num_channels = 32

        input_2d = Input(shape=input_shape_2d, name="input_2d")
        input_1d = Input(shape=input_shape_1d, name="input_1d")

        conv = Convolution2D(num_channels, (4, 4),
                             strides=1,
                             padding='same',
                             kernel_initializer=GlorotNormal,
                             use_bias=False,
                             name="conv_in")(input_2d)
        norm = BatchNormalization(name="norm_in")(conv)
        relu = ReLU(name="relu_in")(norm)

        res = ResNetBlock(32, 1, name="block1", first_block=True)(relu)
        res = ResNetBlock(64, 1, name="block2")(res)
        res = ResNetBlock(128, 1, name="block3")(res)

        avg = GlobalAvgPool2D(name="ResNet_out")(res)

        flat = Flatten(name='latent')(avg)

        num = Dense(enc_num_dim, activation="relu", name="enc_num")(input_1d)

        flat = Concatenate(name="final_enc")([flat, num])
        dense = Dense(self.latent_dim, activation="relu")(flat)
        for i in range(self.latent_depth - 1):
            dense = Dense(self.latent_dim, activation="relu")(dense)
        latent = dense

        return [input_2d, input_1d], latent
示例#14
0
def create_model(image_shape=(224, 224, 3),
                 restart_checkpoint=None,
                 backbone='mobilnetv2',
                 feature_len=128,
                 freeze=False):
    """
    Creates an image encoder.

    Args:
        image_shape: input image shape (use [None, None] for resizable network)
        restart_checkpoint: snapshot to be restored
        backbone: the backbone CNN (one of mobilenetv2, densent121, custom)
        feature_len: the length of the additional feature layer
        freeze: freeze the backbone
    """
    input_img = Input(shape=image_shape)

    # add the backbone
    backbone_name = backbone

    if backbone_name == 'densenet121':
        print('Using DenseNet121 backbone.')
        backbone = DenseNet121(input_tensor=input_img, include_top=False)
        backbone.layers.pop()
        if freeze:
            for layer in backbone.layers:
                layer.trainable = False
        backbone = backbone.output
    elif backbone_name == 'mobilenetv2':
        print('Using MobileNetV2 backbone.')
        backbone = MobileNetV2(input_tensor=input_img, include_top=False)
        backbone.layers.pop()
        if freeze:
            for layer in backbone.layers:
                layer.trainable = False
        backbone = backbone.output
    elif backbone_name == 'custom':
        backbone = custom_backbone(input_tensor=input_img)
    else:
        raise Exception('Unknown backbone: {}'.format(backbone_name))

        # add the head layers
    gmax = GlobalMaxPool2D()(backbone)
    gavg = GlobalAvgPool2D()(backbone)
    gmul = Multiply()([gmax, gavg])
    ggavg = Average()([gmax, gavg])
    backbone = Concatenate()([gmax, gavg, gmul, ggavg])
    backbone = BatchNormalization()(backbone)
    backbone = Dense(feature_len)(backbone)
    backbone = Activation('sigmoid')(backbone)

    encoder = Model(input_img, backbone)

    if restart_checkpoint:
        print('Loading weights from {}'.format(restart_checkpoint))
        encoder.load_weights(restart_checkpoint,
                             by_name=True,
                             skip_mismatch=True)

    return encoder
def channel_attention(inputs):
    """Channel Attention Map calculation

    The function aims to implement a simple
    version of a Channel Attention, whose output
    is a tensor that will weight each channel of the
    input.

    Args:
        inputs: Input tensor, above which the channel
        map will be calculated

    Returns:
        Channel Attention map
    """

    max_features = GlobalMaxPool2D()(inputs)
    avg_features = GlobalAvgPool2D()(inputs)

    extracted_max = extraction_network(max_features)
    extracted_avg = extraction_network(avg_features)

    merge = Add()[extracted_avg, extracted_max]

    return reshape(sigmoid(merge), (-1, 1, 1, inputs.shape[3]))
    def __init__(self, **kwargs):
        super(ResNet_v2_18, self).__init__(**kwargs)

        self.conv0 = Conv2D(64, (7, 7),
                            strides=(2, 2),
                            name='conv0',
                            padding='same',
                            use_bias=False)
        self.maxpool = MaxPooling2D((3, 3), strides=(2, 2), padding='same')
        self.block_collector = []
        block_num = 2, 2, 2, 2
        filters_num = 64, 128, 256, 512
        for i in range(1, 5):
            if i == 1:
                self.block_collector.append(
                    BasicBlock(filters_num[i - 1], name='conv1_0'))
            else:
                self.block_collector.append(
                    BasicBlock(filters_num[i - 1],
                               strides=(2, 2),
                               name='conv{}_0'.format(i)))

            for j in range(1, block_num[i - 1]):
                self.block_collector.append(
                    BasicBlock(filters_num[i - 1],
                               name='conv{}_{}'.format(i, j)))

        self.bn = BatchNormalization(name='bn', momentum=0.9, epsilon=1e-5)
        self.activation = Activation('relu')
        self.global_average_pooling = GlobalAvgPool2D()
        self.fc = Dense(c.num_class,
                        name='fully_connected',
                        activation='softmax',
                        use_bias=False)
示例#17
0
 def __init__(self, channel, reduction=16, **kwargs):
     self.channel = channel
     self.global_average_pooling = GlobalAvgPool2D()
     
     self.fc_1 = Dense(channel // reduction, activation='relu')
     self.fc_2 = Dense(channel, activation='sigmoid')
     super(SELayer, self).__init__(**kwargs)
示例#18
0
def MOBV1(shape, num_classes, alpha=1.0, include_top=True, weights=None):
    x_in = Input(shape=shape)

    #if x_in.shape[-1] == 3:
    #    x = CHANNEL_ATTENTION(x_in)
    #else:
    #    x = x_in
    x = get_conv_block(x_in, 32, (2, 2), alpha=alpha, name='initial')
    layers = [(64, (1, 1)), (128, (2, 2)), (128, (1, 1)), (256, (2, 2)),
              (256, (1, 1)), (512, (2, 2)), *[(512, (1, 1)) for _ in range(5)],
              (1024, (2, 2)), (1024, (2, 2))]

    for i, (channels, strides) in enumerate(layers):
        x = get_dw_sep_block(x,
                             channels,
                             strides,
                             alpha=alpha,
                             name='block{}'.format(i))

    if include_top:
        x = GlobalAvgPool2D(name='global_avg')(x)
        x = Dense(num_classes, activation='softmax', name='softmax')(x)

    model = Model(inputs=x_in, outputs=x)

    if weights is not None:
        model.load_weights(weights, by_name=True)

    return model
    def __init__(self, layer_num, category_num, **kwargs):
        super(ResNetV2, self).__init__(**kwargs)

        block_type = {
            18: 'basic block',
            34: 'basic block',
            50: 'bottlenect block',
            101: 'bottlenect block',
            152: 'bottlenect block'
        }

        block_num = {
            18: (2, 2, 2, 2),
            34: (3, 4, 6, 3),
            50: (3, 4, 6, 3),
            101: (3, 4, 23, 3),
            152: (3, 4, 36, 3)
        }

        filter_num = (64, 128, 256, 512)

        if block_type[layer_num] == 'basic block':
            self.block = BasicBlock
        else:
            self.block = BottleneckBlock

        self.conv0 = Conv2D(64, (7, 7),
                            strides=(2, 2),
                            name='conv0',
                            padding='same',
                            use_bias=False)

        self.block_collector = []
        for layer_index, (b, f) in enumerate(zip(block_num[layer_num],
                                                 filter_num),
                                             start=1):
            if layer_index == 1:
                if block_type[layer_num] == 'basic block':
                    self.block_collector.append(self.block(f, name='conv1_0'))
                else:
                    self.block_collector.append(
                        self.block(f, projection=True, name='conv1_0'))
            else:
                self.block_collector.append(
                    self.block(f,
                               strides=(2, 2),
                               name='conv{}_0'.format(layer_index)))

            for block_index in range(1, b):
                self.block_collector.append(
                    self.block(f,
                               name='conv{}_{}'.format(layer_index,
                                                       block_index)))

        self.bn = BatchNormalization(name='bn', momentum=0.9, epsilon=1e-5)
        self.global_average_pooling = GlobalAvgPool2D()
        self.fc = Dense(category_num,
                        name='fully_connected',
                        activation='softmax',
                        use_bias=False)
def squeeze(inputShape): #inputShape inside
    
        def fire(x, fs, fe):
                s = Conv2D(fs, 1, activation='relu')(x)
                e1 = Conv2D(fe, 1, activation='relu')(s)
                e3 = Conv2D(fe, 3, padding='same', activation='relu')(s)
                output = concatenate([e1, e3])
                return output
    
        input = Input(inputShape)

        x = Conv2D(96, 7, strides=2, padding='same', activation='relu')(input)
        x = MaxPool2D(3, strides=2, padding='same')(x)

        x = fire(x, 16, 64)
        x = fire(x, 16, 64)
        x = fire(x, 32, 128)
        x = MaxPool2D(3, strides=2, padding='same')(x)

        x = fire(x, 32, 128)
        x = fire(x, 48, 192)
        x = fire(x, 48, 192)
        x = fire(x, 64, 256)
        x = MaxPool2D(3, strides=2, padding='same')(x)

        x = fire(x, 64, 256)
        x = Conv2D(450, 1)(x)

        x = GlobalAvgPool2D()(x)
        output = Dense(450)(x)

        model = Model(input, output)
        tf.keras.models.load_model(r'C:\Users\jordan\Desktop\Siamv1')
        #tf.keras.models.load_model(r'C:\Users\jordan\Desktop\Siameq')
        return model
示例#21
0
    def __init__(self, **kwargs):
        super(ResNetV2, self).__init__(**kwargs)

        self.conv0 = Conv2D(16, (7, 7),
                            strides=(1, 1),
                            name='conv0',
                            padding='same',
                            kernel_initializer='he_normal',
                            kernel_regularizer=l2(wd))
        self.block_collector = []
        block_num = 6, 6, 6
        filters_num = 16, 32, 64
        for i in range(1, 4):
            if i == 1:
                self.block_collector.append(
                    BasicBlock(filters_num[i - 1], name='conv1_0'))
            else:
                self.block_collector.append(
                    BasicBlock(filters_num[i - 1],
                               strides=(2, 2),
                               name='conv{}_0'.format(i)))

            for j in range(1, block_num[i - 1]):
                self.block_collector.append(
                    BasicBlock(filters_num[i - 1],
                               name='conv{}_{}'.format(i, j)))

        self.bn = BatchNormalization(name='bn', momentum=0.9, epsilon=1e-5)
        self.activation = Activation('relu')
        self.global_average_pooling = GlobalAvgPool2D()
        self.fc = Dense(num_classes,
                        name='fully_connected',
                        activation='softmax',
                        kernel_initializer='he_normal',
                        kernel_regularizer=l2(wd))
def get_model():
    """
    Returns a compiled convolutional neural network model. Assume that the
    `input_shape` of the first layer is `(IMG_WIDTH, IMG_HEIGHT, 3)`.
    The output layer should have `NUM_CATEGORIES` units, one for each category.
    """
    # choosing a resnet architecture

    # training on the small dataset gets nearly 100% train acc but ~15% test acc, haha, overfitting
    # just trained on the full dataset, 77% train acc, 74% test acc, seems data has a
    # regularizing/generalizing effect, which is expected since there are orders of magnitude more
    # parameters than data

    # need data augmentation and regularization
    # how is keras initializing the weights and biases?

    start = Input(shape=(IMG_WIDTH, IMG_HEIGHT, 3))
    x = conv_batchnorm_relu(start, filters=64, kernel_size=7, strides=2)
    x = MaxPool2D(pool_size=3, strides=2)(x)
    x = resnet_block(x, filters=64, reps=3, strides=1)
    x = resnet_block(x, filters=128, reps=4, strides=2)
    x = resnet_block(x, filters=256, reps=6, strides=2)
    x = resnet_block(x, filters=512, reps=3, strides=2)
    x = GlobalAvgPool2D()(x)
    output = Dense(units=NUM_CATEGORIES, activation='softmax')(x)

    model = Model(inputs=start, outputs=output)

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
示例#23
0
def initialize_model():
    from model import Vggface2_ResNet50
    # Set basic environments.
    # Initialize GPUs
    toolkits.initialize_GPU()

    # ==> loading the pre-trained model.
    input1 = Input(shape=(224, 224, 3))
    input2 = Input(shape=(224, 224, 3))
    # x1 = resnet.resnet50_backend(input1)
    # x2 = resnet.resnet50_backend(input2)
    base_model = Vggface2_ResNet50(include_top=False)
    base_model.load_weights(weight_file, by_name=True)
    print("successfully load model ", weight_file)
    for x in base_model.layers:
        x.trainable = True
    x1 = base_model(input1)
    x2 = base_model(input2)

    x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)])
    x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)])

    x3 = Subtract()([x1, x2])
    x3 = Multiply()([x3, x3])

    x1_ = Multiply()([x1, x1])
    x2_ = Multiply()([x2, x2])
    x4 = Subtract()([x1_, x2_])
    x = Concatenate(axis=-1)([x4, x3])

    x = Dense(100, activation="relu")(x)
    x = Dropout(0.3)(x)
    x = Dense(25, activation="relu")(x)
    x = Dropout(0.3)(x)
    out = Dense(1, activation="sigmoid")(x)

    model = Model([input1, input2], out)
    # for x in model.layers[-21:]:
    #     x.trainable = True

    model.compile(loss="binary_crossentropy", metrics=['acc'], optimizer=Adam(0.00005))

    model.summary()

    return model
 def __init__(self, in_ch, ratio, **kwargs):
     super(cSE, self).__init__(**kwargs)
     # self.in_ch = in_ch
     self.squeeze_sp_avg_pool = GlobalAvgPool2D(name="squeeze_sp_avg_pool")
     self.excite_1 = Dense(in_ch // ratio, use_bias=False, name="excite_1")
     self.excite_r = ReLU(name="excite_ReLU")
     self.excite_2 = Dense(in_ch, use_bias=False, name="excite_2")
     self.excite_ch_sigmoid = Activation(tf.nn.sigmoid,
                                         name="excite_sigmoid")
 def dense_block_branch(x, outputs):
     depth = 16
     for i in range(n_classes):
         d = dense_block(x, depth)
         branch = GlobalAvgPool2D()(d)
         output = Dense(1, activation=finalAct)(branch)
         outputs.append(output)
     outputs = Concatenate()(outputs)
     return outputs
示例#26
0
def cnn(input_shape=[28, 28]):
    inputs = Input(input_shape)
    #x = tf.reshape(inputs, [-1]+input_shape+[3])
    x = Conv2D(8, (3, 3), activation='relu', use_bias=False)(inputs)
    x = AvgPool2D(strides=2)(x)
    x = Conv2D(16, (3, 3), activation='relu', use_bias=False)(x)
    x = GlobalAvgPool2D()(x)
    x = Dense(10, activation='softmax', use_bias=False)(x)
    return Model(inputs=inputs, outputs=[x])
示例#27
0
    def call(self, inputs, training=False):

        for i in range(self.block):

            input_dim = int(np.shape(inputs)[-1])
            if input_dim * 2 == self.outdim:
                flag = True
                channel = input_dim // 2
            else:
                flag = False
                channel = input_dim // 2

            layers_split = list()

            for ii in range(self.cardinality):
                x = self.conv1(inputs, training=training)
                x = self.bitchnorm(x, training=training)
                x = self.relu(x)

                if flag:
                    x = self.conv2_2(x, training=training)
                else:
                    x = self.conv2(x, training=training)

                x = self.bitchnorm(x, training=training)
                x = self.relu(x)
                layers_split.append(x)

            x = tf.concat(layers_split, axis=3)
            print("》》》》》》》》》》》》》》X的维度>>>>>>>>>")
            print(x.shape)
            # 过渡层
            x = self.convout(x, training=training)
            print("》》X的维度")
            print(x.shape)
            x = self.bitchnorm(x, training=training)

            # se层
            se_x = GlobalAvgPool2D()(x)
            se_x = self.dens1(se_x)
            se_x = self.relu(se_x)
            se_x = self.dens2(se_x)
            excitation = tf.nn.sigmoid(se_x)

            excitation = tf.reshape(excitation, [-1, 1, 1, self.outdim])
            x = x * excitation

            if flag is True:
                pad_input_x = self.argpool(inputs)
                pad_input_x = tf.pad(
                    pad_input_x, [[0, 0], [0, 0], [0, 0], [channel, channel]])
            else:
                pad_input_x = inputs

            inputs = self.relu(x + pad_input_x)

        return inputs
def get_xception():
    """Returns a Xception pretrained neural net.

        The function returns a partially pretrained Xception neural net.

        Returns:
            A modified Xception semi-pre-trained NN instance
    """

    model = Xception(include_top=False)

    model.trainable = False

    core_output = model.layers[45].output

    # Weighting Xception output via channel and spatial Attention

    channel_attention_map = channel_attention(core_output)
    channel_weighted = core_output * channel_attention_map
    spatial_attention_map = spatial_attention(channel_weighted)
    core_output = channel_weighted * spatial_attention_map

    for _ in range(5):

        output = relu(core_output)
        output = SeparableConvolution2D(728, (3, 3),
                                        padding='same',
                                        depthwise_regularizer=L2(0.2),
                                        pointwise_regularizer=L2(0.03))(output)
        output = BatchNormalization()(output)
        output = Dropout(0.3)(output)

        output = relu(output)
        output = SeparableConvolution2D(728, (3, 3),
                                        padding='same',
                                        depthwise_regularizer=L2(0.2),
                                        pointwise_regularizer=L2(0.03))(output)
        output = BatchNormalization()(output)
        output = Dropout(0.3)(output)

        core_output = Add()[output, core_output]

        # Output Weighting via Attention

        channel_attention_map = channel_attention(core_output)
        channel_weighted = core_output * channel_attention_map
        spatial_attention_map = spatial_attention(channel_weighted)
        core_output = channel_weighted * spatial_attention_map

    model_output = GlobalAvgPool2D()(core_output)

    model_output = Dense(1, activation='sigmoid')(model_output)

    model = Model(inputs=model.input, outputs=model_output)

    return model
示例#29
0
 def __init__(self, base):
     super().__init__()
     # self.base = ResNet152(include_top=False, weights='imagenet')
     self.base = base
     # 冻结基网络
     self.base.trainable = False  # 凍結權重
     self.net = Sequential(
         [GlobalAvgPool2D(),
          Dense(512, activation='relu'),
          Dense(2)])
 def fc_branch(x, outputs):
     x = GlobalAvgPool2D()(d)
     for i in range(n_classes):
         x = Dense(1024, activation='relu')(x)
         x = Dense(512, activation='relu')(x)
         x = Dense(256, activation='relu')(x)
         output = Dense(1, activation=finalAct)(x)
         outputs.append(output)
     outputs = Concatenate()(outputs)
     return outputs