Ejemplo n.º 1
0
def MobileNetV2(input_shape, num_classes):

    input = Input(shape=input_shape)
    network = Conv2D(filters=32,
                     kernel_size=(3, 3),
                     strides=(2, 2),
                     padding="same")(input)
    network = BatchNormalization()(network)
    network = Activation(relu6)(network)

    network = bottle_neck(network, 32, 16, 1, 1)

    network = bottle_neck(network, 16, 24, 6, 2)
    network = bottle_neck(network, 24, 24, 6, 1)

    network = bottle_neck(network, 24, 32, 6, 2)
    network = bottle_neck(network, 32, 32, 6, 1)
    network = bottle_neck(network, 32, 32, 6, 1)

    network = bottle_neck(network, 32, 64, 6, 2)
    network = bottle_neck(network, 64, 64, 6, 1)
    network = bottle_neck(network, 64, 64, 6, 1)
    network = bottle_neck(network, 64, 64, 6, 1)

    network = bottle_neck(network, 64, 96, 6, 1)
    network = bottle_neck(network, 96, 96, 6, 1)
    network = bottle_neck(network, 96, 96, 6, 1)

    network = bottle_neck(network, 96, 160, 6, 2)
    network = bottle_neck(network, 160, 160, 6, 1)
    network = bottle_neck(network, 160, 160, 6, 1)

    network = bottle_neck(network, 160, 320, 6, 1)

    network = Conv2D(kernel_size=(1, 1),
                     strides=(1, 1),
                     padding="same",
                     filters=1280)(network)
    network = BatchNormalization()(network)
    network = Activation(relu6)(network)
    network = AvgPool2D(pool_size=(7, 7))(network)
    network = Flatten()(network)
    network = Dense(units=10, activation="softmax")(network)

    model = Model(inputs=input, outputs=network)

    return model
Ejemplo n.º 2
0
def googlenet(input_data, n_classes):
    def inception_block(x, f):
        t1 = Conv2D(f[0], 1, activation='relu')(x)

        t2 = Conv2D(f[1], 1, activation='relu')(x)
        t2 = Conv2D(f[2], 3, padding="same", activation='relu')(t2)

        t3 = Conv2D(f[3], 1, activation='relu')(x)
        t3 = Conv2D(f[4], 5, padding='same', activation='relu')(t3)

        t4 = MaxPool2D(3, 1, padding='same')(x)
        t4 = Conv2D(f[5], 1, activation='relu')(t4)

        output = concatenate([t1, t2, t3, t4])
        return output

    input = Input(input_data)

    x = Conv2D(64, 7, strides=2, padding="same", activation='relu')(input)
    x = MaxPool2D(3, strides=2, padding='same')(x)

    x = Conv2D(64, 1, activation='relu')(x)
    x = Conv2D(192, 3, padding='same', activation='relu')(x)
    x = MaxPool2D(3, strides=2)(x)

    x = inception_block(x, [64, 96, 128, 16, 32, 32])
    x = inception_block(x, [128, 128, 192, 32, 96, 64])
    x = MaxPool2D(strides=2, padding='same')(x)

    x = inception_block(x, [192, 96, 208, 16, 48, 64])
    x = inception_block(x, [160, 112, 224, 24, 64, 64])
    x = inception_block(x, [128, 128, 256, 24, 64, 64])
    x = inception_block(x, [112, 144, 288, 32, 64, 64])
    x = inception_block(x, [256, 160, 320, 32, 128, 128])
    x = MaxPool2D(3, strides=2, padding='same')(x)

    x = inception_block(x, [256, 160, 320, 32, 128, 128])
    x = inception_block(x, [384, 192, 384, 48, 128, 128])
    print(x.get_shape())

    x = AvgPool2D(3, strides=1)(x)
    x = Dropout(0.4)(x)

    x = Flatten()(x)
    output = Dense(n_classes, activation='softmax')(x)
    model = Model(input, output)
    return model
Ejemplo n.º 3
0
def get_model_1(args):
    model = Sequential()
    model.add(
        Conv2D(32, (5, 5), input_shape=(args.area_size, args.area_size, 14)))
    model.add(Activation('relu'))
    model.add(Conv2D(16, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPool2D((1, 1), strides=(1, 1)))
    model.add(Dropout(0.25))
    #
    model.add(AvgPool2D((3, 3), strides=(1, 1)))
    model.add(Flatten(name="flatten"))
    #
    model.add(Dense(1, name='last_layer'))
    model.add(Activation('sigmoid'))

    return model
Ejemplo n.º 4
0
def make_model(features, layer_name="block2_conv1", pooling=None):
    vgg = VGG16(include_top=False)
    layer = vgg.get_layer(layer_name)
    x = layer.output
    num_chars, char_w, char_h, char_filters = features.shape
    fil_or = 1
    filters = features.transpose((1, 2, 3, 0)).astype(np.float32)[::fil_or, ::fil_or, ...]
    filter_norm = np.sqrt(np.sum(np.square(filters), axis=(0, 1), keepdims=True))
    x = BatchNormalization()(x)
    specialized_layer = Conv2D(num_chars, (char_w, char_h))
    x = specialized_layer(x)
    biases = np.zeros((num_chars, ))
    specialized_layer.set_weights([filters, biases])
    if pooling:
        x = AvgPool2D()(x)
    model = Model(inputs=vgg.input, outputs=x)
    return model
def ResNet(input_shape=(28, 28, 1), classes=10):
    X_input = Input(input_shape)
    X = ZeroPadding2D((3, 3))(X_input)
    X = Conv2D(16, (7, 7),
               strides=(2, 2),
               name='conv1',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name='bn_conv1')(X)
    X = Activation('relu')(X)
    X = MaxPool2D((3, 3), strides=(2, 2))(X)
    X = convolution_block(X,
                          f=3,
                          filters=[16, 16, 64],
                          stage=2,
                          block='a',
                          s=1)
    X = identity_block(X, 3, [16, 16, 64], stage=2, block='b')
    X = identity_block(X, 3, [16, 16, 64], stage=2, block='c')
    X = convolution_block(X,
                          f=3,
                          filters=[32, 32, 128],
                          stage=3,
                          block='a',
                          s=2)
    X = identity_block(X, 3, [32, 32, 128], stage=3, block='b')
    X = identity_block(X, 3, [32, 32, 128], stage=3, block='c')
    X = convolution_block(X,
                          f=3,
                          filters=[64, 64, 256],
                          stage=4,
                          block='a',
                          s=2)
    X = identity_block(X, 3, [64, 64, 256], stage=4, block='b')
    X = identity_block(X, 3, [64, 64, 256], stage=4, block='c')
    X = AvgPool2D((2, 2), name='avg_pool')(X)
    X = Flatten()(X)
    X = Dense(classes,
              activation='softmax',
              name='fc' + str(classes),
              kernel_initializer=glorot_uniform(seed=0))(X)
    model = Model(inputs=X_input, outputs=X, name='ResNet')
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
Ejemplo n.º 6
0
    def InceptionC(self, net):
        b1 = AvgPool2D((3, 3), strides=(1, 1), padding='same')(net)
        b1 = self.conv_bn(b1, 256, 1, 1)

        b2 = self.conv_bn(net, 256, 1, 1)

        b3 = self.conv_bn(net, 384, 1, 1)
        b3_1 = self.conv_bn(b3, 256, 3, 1)
        b3_2 = self.conv_bn(b3, 256, 1, 3)

        b4 = self.conv_bn(net, 384, 1, 1)
        b4 = self.conv_bn(b4, 448, 3, 1)
        b4 = self.conv_bn(b4, 512, 1, 3)
        b4_1 = self.conv_bn(b4, 256, 1, 3)
        b4_2 = self.conv_bn(b4, 256, 3, 1)

        net = concatenate([b1, b2, b3_1, b3_2, b4_1, b4_2], axis=-1)
        return net
Ejemplo n.º 7
0
    def shufflenet_block(tensor, ch, s, g):
        x = gconv(tensor, ch // 4, g)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = ch_shuffle(x, g)
        x = DepthwiseConv2D(3, strides=s, padding='same')(x)
        x = BatchNormalization()(x)
        x = gconv(x, ch if s == 1 else ch - K.int_shape(tensor)[-1], g)
        x = BatchNormalization()(x)

        if s == 1:
            x = Add()([tensor, x])
        else:
            avg = AvgPool2D(3, strides=2, padding='same')(tensor)
            x = Concatenate()([avg, x])

        output = ReLU()(x)
        return output
Ejemplo n.º 8
0
def build_convnet(shape=(112, 112, 3)):  #shape=size*channel
    momentum = .9
    model = keras.Sequential()
    model.add(BatchNormalization(input_shape=shape))  #BN to standardize inputs
    model.add(
        Conv2D(64, (5, 5), padding='same', activation='relu',
               use_bias=False))  # layer output shape- 112*112*64
    model.add(MaxPooling2D(pool_size=(2, 2)))  #layer output shape- 56*56*64
    model.add(
        Conv2D(128, (5, 5), activation='relu',
               use_bias=False))  #layer output shape- 52*52*128=((s-f)/st)+1
    model.add(MaxPooling2D(pool_size=(2, 2)))  #layer output shape- 26*26*128
    model.add(Conv2D(256, (5, 5), activation='relu',
                     use_bias=False))  #layer output shape- 22*22*256
    model.add(AvgPool2D(pool_size=(11, 11),
                        strides=11))  #layer output shape- 2*2*256
    model.add(Flatten())  #1024 features for each frame
    return model
Ejemplo n.º 9
0
    def InceptionB(self, net):
        b1 = AvgPool2D((3, 3), strides=(1, 1), padding='same')(net)
        b1 = self.conv_bn(b1, 128, 1, 1)

        b2 = self.conv_bn(net, 384, 1, 1)

        b3 = self.conv_bn(net, 192, 1, 1)
        b3 = self.conv_bn(b3, 224, 7, 1)
        b3 = self.conv_bn(b3, 256, 7, 1)

        b4 = self.conv_bn(net, 192, 1, 1)
        b4 = self.conv_bn(b4, 192, 7, 1)
        b4 = self.conv_bn(b4, 224, 1, 7)
        b4 = self.conv_bn(b4, 224, 7, 1)
        b4 = self.conv_bn(b4, 256, 1, 7)

        net = concatenate([b1, b2, b3, b4], axis=-1)

        return net
Ejemplo n.º 10
0
    def build_actor(self, NUM_STATE, NUM_ACTIONS, LOSS_FUNC):
        """
        Builds Actor Network with optional layers with increasing filters each layer
            The actor predicts an action based on the state of the game
        """
        state_input = Input(shape=NUM_STATE, name="actor_state_input")
        advantage = Input(
            shape=(1, ), name="actor_advantage"
        )  # Advantage is the critic predicted rewards subtracted from the actual rewards
        old_prediction = Input(shape=(NUM_ACTIONS, ),
                               name="actor_previous_prediction"
                               )  # Previous action predictions (probabilities)

        x = Conv2D(filters=self.NUM_FILTERS,
                   name="actor_block0_conv0",
                   **self.parameter_dict)(state_input)
        for i in range(self.NUM_BLOCKS):
            x = Conv2D(filters=self.NUM_FILTERS * (i + 2),
                       name="actor_block{0}_conv0".format(i + 1),
                       **self.parameter_dict)(x)
            x = Conv2D(filters=self.NUM_FILTERS * (i + 2),
                       name="actor_block{0}_conv1".format(i + 1),
                       padding="same",
                       **self.parameter_dict)(x)
            x = AvgPool2D(pool_size=(2, 2),
                          name="actor_block{0}_avgpool".format(i + 1))(x)
        x = Flatten(name="actor_flatten")(x)
        x = Dense(self.HIDDEN_SIZE,
                  activation=self.ACTIVATION,
                  name="actor_dense1_{0}".format(self.ACTIVATION))(x)
        out_actions = Dense(NUM_ACTIONS,
                            activation='softmax',
                            name='actor_output')(x)
        model = Model(inputs=[state_input, advantage, old_prediction],
                      outputs=[out_actions])
        model.compile(optimizer=Adam(lr=self.LEARNING_RATE),
                      loss=[
                          LOSS_FUNC(advantage=advantage,
                                    old_prediction=old_prediction)
                      ])
        model.summary()
        return model
Ejemplo n.º 11
0
def build(tensor_shape):
    base_model = ResNet50(weights='imagenet',
                          include_top=False,
                          input_shape=tensor_shape)

    model = base_model.output

    model = AvgPool2D()(model)

    model = Dense(1024, activation='relu')(model)
    model = Dropout(0.5)(model)
    model = Flatten()(model)
    model = Dense(256, activation='relu')(model)
    model = Dropout(0.5)(model)

    predictions = (Dense(len(lb.classes_), activation="softmax"))(model)

    model_ok = Model(inputs=base_model.input, outputs=predictions)

    return model_ok
Ejemplo n.º 12
0
def build_ResNet152(input_tensor_shape):
    base_model = ResNet152(weights='imagenet',
                           include_top=False,
                           input_shape=input_tensor_shape)

    x_model = base_model.output

    x_model = AvgPool2D(name='globalaveragepooling2d')(x_model)

    x_model = Dense(1024, activation='relu', name='fc1_Dense')(x_model)
    x_model = Dropout(0.5, name='dropout_1')(x_model)
    x_model = Flatten()(x_model)
    x_model = Dense(256, activation='relu', name='fc2_Dense')(x_model)
    x_model = Dropout(0.5, name='dropout_2')(x_model)

    predictions = Dense(3, activation='sigmoid', name='output_layer')(x_model)

    model = Model(inputs=base_model.input, outputs=predictions)

    return model
Ejemplo n.º 13
0
def get_model_3(args):
    """First inception network implementation"""
    x = input_image = Input(shape=(args.area_size, args.area_size, 14))

    tower_0 = Conv2D(64, (1, 1), border_mode='same', activation='relu')(x)
    tower_1 = Conv2D(64, (1, 1), border_mode='same', activation='relu')(x)
    tower_1 = Conv2D(64, (3, 3), border_mode='same',
                     activation='relu')(tower_1)
    tower_2 = Conv2D(64, (1, 1), border_mode='same', activation='relu')(x)
    tower_2 = Conv2D(64, (5, 5), border_mode='same',
                     activation='relu')(tower_2)
    tower_3 = MaxPool2D((3, 3), strides=(1, 1), border_mode='same')(x)
    tower_3 = Conv2D(64, (1, 1), border_mode='same',
                     activation='relu')(tower_3)
    x = merge([tower_0, tower_1, tower_2, tower_3],
              mode='concat',
              concat_axis=3)
    x = Dropout(0.5)(x)

    tower_0 = Conv2D(32, (1, 1), border_mode='same', activation='relu')(x)
    tower_1 = Conv2D(32, (1, 1), border_mode='same', activation='relu')(x)
    tower_1 = Conv2D(32, (3, 3), border_mode='same',
                     activation='relu')(tower_1)
    tower_2 = Conv2D(32, (1, 1), border_mode='same', activation='relu')(x)
    tower_2 = Conv2D(32, (5, 5), border_mode='same',
                     activation='relu')(tower_2)
    tower_3 = MaxPool2D((3, 3), strides=(1, 1), border_mode='same')(x)
    tower_3 = Conv2D(32, (1, 1), border_mode='same',
                     activation='relu')(tower_3)
    x = merge([tower_0, tower_1, tower_2, tower_3],
              mode='concat',
              concat_axis=3)
    x = Dropout(0.5)(x)

    x = AvgPool2D((3, 3), strides=(1, 1))(x)
    x = Flatten()(x)
    # model.add(Dropout(0.5))
    x = Dense(1)(x)
    x = Activation('sigmoid')(x)

    return Model(input_image, x)
Ejemplo n.º 14
0
def RES_Primary(x, filters, n_channels, dim_vector):

    x_skip = x  # this will be used for addition with the residual block
    f1, f2 = filters

    #first block
    x = Conv2D(f1,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               kernel_regularizer=regularizers.l2(0.001))(x)
    x = BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    #second block # bottleneck (but size kept same with padding)
    x = Conv2D(f1,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               kernel_regularizer=regularizers.l2(0.001))(x)
    x = BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    # third block activation used after adding the input
    x = Conv2D(f2,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               kernel_regularizer=regularizers.l2(0.001))(x)
    x = BatchNormalization()(x)

    # add the input
    x = Add()([x, x_skip])
    x = AvgPool2D(pool_size=4, strides=1, data_format='channels_last')(x)
    x = layers.Reshape(target_shape=[n_channels, dim_vector])(x)
    x = layers.Lambda(squash)(x)
    x = BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    return x
Ejemplo n.º 15
0
    def _create(self):
        base_model = KerasMobileNet(include_top=False, input_tensor=self.get_input_tensor())
        self.make_net_layers_non_trainable(base_model)

        x = base_model.output
        x = Dropout(self.dropout)(x)
        x = Dropout(self.dropout)(x)
        x = AvgPool2D()(x)
        x = Flatten()(x)
        # we could achieve almost the same accuracy without this layer, buy this one helps later
        # for novelty detection part and brings much more useful features.

        if self.run_config.main.classification_type==_config.CLASSIFICATION_TYPE.CLASSIFICATION:
            predictions = Dense(self.run_config.data.nb_classes, activation='softmax', name='predictions')(x)
        elif self.run_config.main.classification_type==_config.CLASSIFICATION_TYPE.REGRESSION:
            predictions = Dense(1, activation='linear', name='regression')(x)
        elif self.run_config.main.classification_type==_config.CLASSIFICATION_TYPE.MULTIPLE_REGRESSION:
            predictions = Dense(self.run_config.data.nb_classes, activation='linear', name='multiple_regression')(x)
        else:
            raise ValueError("Error, unknown classification_type: <%s>" % self.run_config.main.classification_type)

        self.model = Model(input=base_model.input, output=predictions)
Ejemplo n.º 16
0
    def build(self):
        data = Input(batch_shape=self.data_size)
        net = _conv_bn_relu(n_out=64, kernel_size=(7, 7), strides=(2, 2))(data)
        net = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding="same")(net)

        n_out = 64
        for i, r in enumerate(self.repetitions):
            net = _residual_block(self.block_fn,
                                  n_out=n_out,
                                  repetitions=r,
                                  is_first_layer=(i == 0))(net)
            n_out *= 2  # 下一个repeation 是前一个的二倍

        net = _bn_relu(net)
        block_shape = net.get_shape().as_list()
        net = AvgPool2D(pool_size=(block_shape[1:3]), strides=(1, 1))(net)
        net = Flatten()(net)
        net = Dense(units=self.num_outputs,
                    kernel_initializer="he_normal",
                    activation="softmax")(net)

        model = Model(inputs=data, outputs=net)
        return model
Ejemplo n.º 17
0
    def build(self, stack_size, summary=False):
        """
        Parameters
        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        stack_size  int  : number of layers per filter_size stack
        summary     bool : display model summary if true
        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
        """

        input_shape = (32, 32, 3)
        num_filter = 16
        num_stacks = 3

        x_in = Input(shape=input_shape)
        x = Conv2D(num_filter,
                   kernel_size=3,
                   padding='same',
                   activation='relu')(x_in)

        for i in range(num_stacks):
            for j in range(stack_size):
                if i != 0 and j == 0:
                    x = self.conv_block(x, num_filter, projection=True)
                else:
                    x = self.conv_block(x, num_filter)

            num_filter *= 2

        x = AvgPool2D(8)(x)
        x = Flatten()(x)
        x_out = Dense(10)(x)
        model = Model(inputs=x_in, outputs=x_out)

        if summary:
            print(model.summary())

        return model
Ejemplo n.º 18
0
    def __call__(self, tensor):
        with K.name_scope('ShuffleNetUnit'):
            x = GConv(self.num_groups,
                      self.in_channels,
                      self.in_channels // self.bottleneck_ratio,
                      kernel_regularizer=self.regularizer)(tensor)
            x = BatchNormalization()(x)
            x = LeakyReLU(0.1)(x)

            x = Lambda(self._shuffle_channels, name=f'channel_shuffle_{ShuffleNetUnit.count_shuffles}')(x)
            ShuffleNetUnit.count_shuffles += 1

            x = DConv(self.kernel_size,
                      self.strides,
                      self.regularizer)(x)
            x = BatchNormalization()(x)

            if not self.downsampling:
                x = GConv(self.num_groups,
                          self.in_channels // self.bottleneck_ratio,
                          self.out_channels,
                          kernel_regularizer=self.regularizer)(x)
            else:
                x = GConv(self.num_groups,
                          self.in_channels // self.bottleneck_ratio,
                          self.out_channels - self.in_channels,
                          kernel_regularizer=self.regularizer)(x)

            x = BatchNormalization()(x)

            if not self.downsampling:
                x = Add()([tensor, x])
            else:
                downsampled = AvgPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(tensor)
                x = Concatenate(axis=-1)([downsampled, x])

            return LeakyReLU(0.1)(x)
Ejemplo n.º 19
0
def fcn_sherrah2016_classifier(input_shape, for_receptive_field=False):

    input = Input(shape=input_shape, dtype='float32', name='input_image')

    x = Conv2D(filters=32,
               kernel_size=(5, 5),
               strides=1,
               dilation_rate=1,
               padding='same')(input)
    if for_receptive_field:
        x = Activation('linear')(x)
        x = AvgPool2D(pool_size=(3, 3), strides=1, padding='same')(x)
    else:
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(3, 3), strides=1, padding='same')(x)

    x = Conv2D(filters=int(96 / 2),
               kernel_size=(5, 5),
               strides=1,
               dilation_rate=2,
               padding='same')(x)
    if for_receptive_field:
        x = Activation('linear')(x)
        x = AvgPool2D(pool_size=(5, 5), strides=1, padding='same')(x)
    else:
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(5, 5), strides=1, padding='same')(x)

    x = Conv2D(filters=int(128 / 2),
               kernel_size=(3, 3),
               strides=1,
               dilation_rate=4,
               padding='same')(x)
    if for_receptive_field:
        x = Activation('linear')(x)
        x = AvgPool2D(pool_size=(9, 9), strides=1, padding='same')(x)
    else:
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(9, 9), strides=1, padding='same')(x)

    x = Conv2D(filters=int(196 / 2),
               kernel_size=(3, 3),
               strides=1,
               dilation_rate=8,
               padding='same')(x)
    if for_receptive_field:
        x = Activation('linear')(x)
        x = AvgPool2D(pool_size=(17, 17), strides=1, padding='same')(x)
    else:
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(17, 17), strides=1, padding='same')(x)

    x = Conv2D(filters=int(512 / 2),
               kernel_size=(3, 3),
               strides=1,
               dilation_rate=16,
               padding='same')(x)
    if for_receptive_field:
        x = Activation('linear')(x)
    else:
        x = Activation('relu')(x)

    # dimensionality reduction
    x = Conv2D(filters=64,
               kernel_size=(1, 1),
               strides=1,
               dilation_rate=1,
               padding='same')(x)
    if for_receptive_field:
        x = Activation('linear')(x)
    else:
        x = Activation('relu')(x)

    x = Conv2D(filters=8,
               kernel_size=(1, 1),
               strides=1,
               dilation_rate=1,
               padding='same')(x)

    if for_receptive_field:
        x = Activation('linear')(x)
    else:
        x = Activation('relu')(x)

    x = Conv2D(filters=1,
               kernel_size=(1, 1),
               strides=1,
               dilation_rate=1,
               padding='same')(x)

    # classification output
    classification_output = Activation('hard_sigmoid',
                                       name='classification_output')(x)

    return Model(inputs=input, outputs=[classification_output])
Ejemplo n.º 20
0
    def __init__(self, weights):
        """
        Initialize an OpenFaceNet from given weights.
        Weights can be a path to a mat file containing  model's weights, or a
        list of numpy arrays containing weights.

        This function does not work with the Theano backend due to how Conv2D
        is handled.
        """
        if isinstance(weights, str):
            # load weights, when given a path
            weights = scipy.io.loadmat(weights)
            weights = weights['weights'][0]
        # construct the model and initialize weights from the list of weights
        #  - first part is sequential
        net_in = Input(shape=(96,96,3))
        net_in_normalized = Lambda(lambda x: x/255., name='input_normalization')(net_in) # normalize input to [0,1] range
        net_out = StridedConv2D(net_in_normalized, 2, 'same', weights[0], weights[1])
        net_out = BatchNormalization(gamma_initializer = Constant(np.squeeze(weights[2])),\
                                     beta_initializer = Constant(np.squeeze(weights[3])),\
                                     moving_mean_initializer=Constant(np.squeeze(weights[4][:,0])),\
                                     moving_variance_initializer=Constant(np.squeeze(weights[4][:,1]**2)))(net_out)
        net_out = Activation('relu')(net_out)
        net_out = StridedMaxPooling2d(net_out, 2, (3,3))
        net_out = LRN2D()(net_out)
        net_out = Conv2D(weights[5].shape[3], (weights[5].shape[0],weights[5].shape[1]),\
                         kernel_initializer=Constant(weights[5]), bias_initializer=Constant(np.squeeze(weights[6])))(net_out)
        net_out = BatchNormalization(gamma_initializer = Constant(np.squeeze(weights[7])),\
                                     beta_initializer = Constant(np.squeeze(weights[8])),\
                                     moving_mean_initializer=Constant(np.squeeze(weights[9][:,0])),\
                                     moving_variance_initializer=Constant(np.squeeze(weights[9][:,1]**2)))(net_out)
        net_out = Activation('relu')(net_out)
        net_out = Conv2D(weights[10].shape[3], (weights[10].shape[0],weights[10].shape[1]),\
                         padding='same',\
                         kernel_initializer=Constant(weights[10]), bias_initializer=Constant(np.squeeze(weights[11])))(net_out)
        net_out = BatchNormalization(gamma_initializer = Constant(np.squeeze(weights[12])),\
                                     beta_initializer = Constant(np.squeeze(weights[13])),\
                                     moving_mean_initializer=Constant(np.squeeze(weights[14][:,0])),\
                                     moving_variance_initializer=Constant(np.squeeze(weights[14][:,1]**2)))(net_out)
        net_out = LRN2D()\
                  (Activation('relu')(net_out))
        net_out = StridedMaxPooling2d(net_out, 2, (3,3))
        # - first inception module
        # -- first branch
        net_out1 = InceptionBranchType1(net_out, weights[15:15+10])
        # -- second branch
        net_out2 = InceptionBranchType1(net_out, weights[25:25+10])
        # -- third branch
        net_out3 = InceptionBranchType2(net_out, weights[35:35+5])
        net_out3 = ZeroPadding2D(padding=((3,4),(3,4)))(net_out3)
        # -- fourth branch
        net_out4 = InceptionBranchType3(net_out, weights[40:40+5])
        # - concat
        net_out = Concatenate(axis=3)([net_out1, net_out2, net_out3, net_out4])
        # - second inception module
        # -- first branch
        net_out1 = InceptionBranchType1(net_out, weights[45:45+10])
        # -- second branch
        net_out2 = InceptionBranchType1(net_out, weights[55:55+10])
        # -- third branch
        net_out3 = InceptionBranchType4(net_out, 9, weights[65:65+5])
        net_out3 = ZeroPadding2D(padding=((4,4),(4,4)))(net_out3)
        # -- fourth branch
        net_out4 = InceptionBranchType3(net_out, weights[70:70+5])
        # - concat
        net_out = Concatenate(axis=3)([net_out1, net_out2, net_out3, net_out4])
        # - third inception module
        # -- first branch
        net_out1 = InceptionBranchType1(net_out, weights[75:75+10],\
                                        strides=[1,2],\
                                        padding=['valid','same'])
        # -- second branch
        net_out2 = InceptionBranchType1(net_out, weights[85:85+10],\
                                        strides=[1,2],\
                                        padding=['valid','same'])
        # -- third branch
        net_out3 = StridedMaxPooling2d(net_out, 2, (3,3), padding='valid')
        net_out3 = ZeroPadding2D(padding=((0,1),(0,1)))(net_out3)
        # - concat
        net_out = Concatenate(axis=3)([net_out1, net_out2, net_out3])
        # - fourth inception module
        # -- first branch
        net_out1 = InceptionBranchType1(net_out, weights[95:95+10])
        # -- second branch
        net_out2 = InceptionBranchType1(net_out, weights[105:105+10])
        # -- third branch
        net_out3 = InceptionBranchType4(net_out, 9, weights[115:115+5])
        net_out3 = ZeroPadding2D(padding=((2,2),(2,2)))(net_out3)
        # -- fourth branch
        net_out4 = InceptionBranchType3(net_out, weights[120:120+5])
        # - concat
        net_out = Concatenate(axis=3)([net_out1, net_out2, net_out3, net_out4])
        # - fifth inception module
        # -- first branch
        net_out1 = InceptionBranchType1(net_out, weights[125:125+10],\
                                        strides=[1,2],\
                                        padding=['valid','same'])
        # -- second branch
        net_out2 = InceptionBranchType1(net_out, weights[135:135+10],\
                                        strides=[1,2],\
                                        padding=['valid','same'])
        # -- third branch
        net_out3 = StridedMaxPooling2d(net_out, 2, (3,3), padding='valid')
        net_out3 = ZeroPadding2D(padding=((0,1),(0,1)))(net_out3)
        # - concat
        net_out = Concatenate(axis=3)([net_out1, net_out2, net_out3])
        # - sixth inception module
        # -- first branch
        net_out1 = InceptionBranchType1(net_out, weights[145:145+10])
        # -- second branch
        net_out2 = InceptionBranchType4(net_out, 9, weights[155:155+5])
        net_out2 = ZeroPadding2D(padding=((1,1),(1,1)))(net_out2)
        # -- third branch
        net_out3 = InceptionBranchType3(net_out, weights[160:160+5])
        # - concat
        net_out = Concatenate(axis=3)([net_out1, net_out2, net_out3])
        # - seventh inception module
        # -- first branch
        net_out1 = InceptionBranchType1(net_out, weights[165:165+10])
        # -- second branch
        net_out2 = InceptionBranchType2(net_out, weights[175:175+5])
        net_out2 = ZeroPadding2D(padding=((1,1),(1,1)))(net_out2)
        # -- third branch
        net_out3 = InceptionBranchType3(net_out, weights[180:180+5])
        # - concat
        net_out = Concatenate(axis=3)([net_out1, net_out2, net_out3])
        # done with inception layers from now on it's all sequential
        net_out = AvgPool2D(pool_size=(3,3), padding='valid')(net_out)
        net_out = Flatten()(net_out)
        net_out = Dense(weights[185].shape[0],\
                        kernel_initializer=Constant(np.transpose(weights[185], (1,0))),\
                        bias_initializer=Constant(np.squeeze(weights[186])))(net_out)
        net_out = Lambda(lambda x: K.l2_normalize(x, axis=1))(net_out)
        weights[187] = np.squeeze(weights[187])
        weights[189] = np.squeeze(weights[189])
        net_out = Dense(weights[187].shape[1],\
                        kernel_initializer=Constant(weights[187]),\
                        bias_initializer=Constant(np.squeeze(weights[188])))(net_out)
        net_out = Activation('tanh')(net_out)
        net_out = Dense(weights[189].shape[1],\
                        kernel_initializer=Constant(weights[189]),\
                        bias_initializer=Constant(np.squeeze(weights[190])))(net_out)
        net_out = Activation('softmax')(net_out)
        # create model
        self.model = Model(net_in, net_out)
        # print model summary
        self.model.summary()
Ejemplo n.º 21
0
#base_model = VGG19(weights='imagenet', include_top=False, input_shape=train_X.shape[1:])
base_model = VGG19(weights='imagenet',
                   include_top=False,
                   input_shape=train_X.shape[1:])
base_model.trainable = False

pretrained_features = Input(base_model.get_output_shape_at(0)[1:],
                            name='feature_input')
pretrained_depth = base_model.get_output_shape_at(0)[-1]

batch_features = BatchNormalization()(pretrained_features)
x = Conv2D(128, kernel_size=(1, 1), padding='same',
           activation='elu')(batch_features)
x = Conv2D(32, kernel_size=(1, 1), padding='same', activation='elu')(x)
x = Conv2D(16, kernel_size=(1, 1), padding='same', activation='elu')(x)
x = AvgPool2D((2, 2), strides=(1, 1), padding='same')(x)
x = Conv2D(1, kernel_size=(1, 1), padding='valid', activation='sigmoid')(x)
fan_out_layer = Conv2D(pretrained_depth,
                       kernel_size=(1, 1),
                       padding='same',
                       activation='linear',
                       use_bias=False,
                       weights=[np.ones((1, 1, 1, pretrained_depth))])
fan_out_layer.trainable = False
x = fan_out_layer(x)
mask_features = multiply([x, batch_features])
gap_features = GlobalAveragePooling2D()(mask_features)
gap_mask = GlobalAveragePooling2D()(x)

# Lets try setting rescaling for attention to certain regions
gap = Lambda(lambda x: x[0] / x[1],
Ejemplo n.º 22
0
def fcn_sherrah2016_classifier(input_shape, for_receptive_field=False):

    cnn_input = Input(shape=input_shape, dtype='float32', name='input_image')

    x = Conv2D(filters=32,
               kernel_size=(5, 5),
               strides=1,
               dilation_rate=1,
               padding='same')(cnn_input)
    if for_receptive_field:
        x = Activation('linear')(x)
        x = AvgPool2D(pool_size=(3, 3), strides=1, padding='same')(x)
    else:
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(3, 3), strides=1, padding='same')(x)

    x = Conv2D(filters=int(96 / 2),
               kernel_size=(5, 5),
               strides=1,
               dilation_rate=2,
               padding='same')(x)
    if for_receptive_field:
        x = Activation('linear')(x)
        x = AvgPool2D(pool_size=(5, 5), strides=1, padding='same')(x)
    else:
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(5, 5), strides=1, padding='same')(x)

    x = Conv2D(filters=int(128 / 2),
               kernel_size=(3, 3),
               strides=1,
               dilation_rate=4,
               padding='same')(x)
    if for_receptive_field:
        x = Activation('linear')(x)
        x = AvgPool2D(pool_size=(9, 9), strides=1, padding='same')(x)
    else:
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(9, 9), strides=1, padding='same')(x)

    x = Conv2D(filters=int(196 / 2),
               kernel_size=(3, 3),
               strides=1,
               dilation_rate=8,
               padding='same')(x)
    if for_receptive_field:
        x = Activation('linear')(x)
        x = AvgPool2D(pool_size=(17, 17), strides=1, padding='same')(x)
    else:
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(17, 17), strides=1, padding='same')(x)

    x = Conv2D(filters=int(512 / 2),
               kernel_size=(3, 3),
               strides=1,
               dilation_rate=16,
               padding='same')(x)
    if for_receptive_field:
        x = Activation('linear')(x)
    else:
        x = Activation('relu')(x)

    # output
    output = Conv2D(filters=3,
                    kernel_size=(1, 1),
                    strides=1,
                    dilation_rate=1,
                    padding='same',
                    activation='softmax',
                    name='output')(x)

    return Model(inputs=cnn_input, outputs=[output])
Ejemplo n.º 23
0
#建模型
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten, AvgPool2D, BatchNormalization, Dropout
from keras.optimizers import Adam
from keras import regularizers

model = Sequential([
    Conv2D(72,
           4,
           input_shape=(96, 96, 1),
           activation='relu',
           kernel_initializer='he_normal',
           kernel_regularizer=regularizers.l2(0.01)),
    AvgPool2D(pool_size=(2, 2)),
    Conv2D(48,
           2,
           activation='relu',
           use_bias=False,
           kernel_initializer='he_normal',
           kernel_regularizer=regularizers.l2(0.01)
           ),  #Según clase, no se debe inicializar bias antes de un batchnorm
    BatchNormalization(),
    Flatten(),
    Dropout(0.5),  #Actúa como regularizador
    Dense(48,
          activation='relu',
          kernel_initializer='he_normal',
          kernel_regularizer=regularizers.l2(
              0.01)),  #Importante utilizar he initialization para relu
Ejemplo n.º 24
0
def googlenet(input_shape, num_classes):
    net = {}
    input_tensor = Input(input_shape)
    net['input'] = input_tensor

    # layer 1
    net['c1'] = Conv2D(64, (7, 7),
                       strides=2,
                       padding='same',
                       activation='relu',
                       kernel_initializer=conv_init,
                       name='c1')(net['input'])
    net['p1'] = MaxPool2D((3, 3),
                          strides=2,
                          padding='same',
                          name='p1')(net['c1'])
    net['bn1'] = BatchNormalization(name='bn1')(net['p1'])

    # layer 2
    net['c2_reduce'] = Conv2D(64, (1, 1),
                              activation='relu',
                              padding='same',
                              kernel_initializer=conv_init,
                              name='c2_reduce')(net['bn1'])
    net['c2'] = Conv2D(192, (3, 3),
                       strides=1,
                       padding='same',
                       activation='relu',
                       kernel_initializer=conv_init,
                       name='c2')(net['c2_reduce'])
    net['bn2'] = BatchNormalization(name='bn2')(net['c2'])
    net['p2'] = MaxPool2D((3, 3),
                          strides=2,
                          padding='same',
                          name='p2')(net['bn2'])

    # layer 3
    net['inception_3a'] = inception(net['p2'], 64, 96, 128, 16, 32, 32)
    net['inception_3b'] = inception(net['inception_3a'], 128, 128, 192, 32, 96, 64)
    net['p3'] = MaxPool2D((3, 3),
                          strides=2,
                          padding='same',
                          name='p3')(net['inception_3b'])

    # layer 4
    net['inception_4a'] = inception(net['p3'], 192, 96, 208, 16, 48, 64)
    net['inception_4b'] = inception(net['inception_4a'], 160, 112, 224, 24, 64, 64)
    net['inception_4c'] = inception(net['inception_4b'], 128, 128, 256, 24, 64, 64)
    net['inception_4d'] = inception(net['inception_4c'], 112, 144, 288, 32, 64, 64)
    net['inception_4e'] = inception(net['inception_4d'], 256, 160, 320, 32, 128, 128)
    net['p4'] = MaxPool2D((3, 3),
                          strides=2,
                          padding='same',
                          name='p4')(net['inception_4e'])

    # layer 5
    net['inception_5a'] = inception(net['p4'], 256, 160, 320, 32, 128, 128)
    net['inception_5b'] = inception(net['inception_5a'], 384, 192, 384, 48, 128, 128)

    net['avgpool'] = AvgPool2D((7, 7),
                               strides=1,
                               padding='same',
                               name='avgpool')(net['inception_5b'])
    net['dropout'] = Dropout(0.4, name='dropout')(net['avgpool'])
    net['flat'] = Flatten(name='flat')(net['dropout'])
    net['linear'] = Dense(num_classes,
                          activation='softmax',
                          kernel_initializer=fc_init,
                          name='linear')(net['flat'])
    net['output'] = net['linear']
    model = Model(net['input'], net['output'])
    model.summary()
    return model
def fcn_sherrah2016_regression(input_shape, for_receptive_field=False):

    input = Input(shape=input_shape, dtype='float32', name='input_image')

    x = Conv2D(filters=32,
               kernel_size=(5, 5),
               strides=1,
               dilation_rate=1,
               padding='same')(input)
    if for_receptive_field:
        x = Activation('linear')(x)
        x = AvgPool2D(pool_size=(3, 3), strides=1, padding='same')(x)
    else:
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(3, 3), strides=1, padding='same')(x)

    x = Conv2D(filters=96,
               kernel_size=(5, 5),
               strides=1,
               dilation_rate=2,
               padding='same')(x)
    if for_receptive_field:
        x = Activation('linear')(x)
        x = AvgPool2D(pool_size=(5, 5), strides=1, padding='same')(x)
    else:
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(5, 5), strides=1, padding='same')(x)

    x = Conv2D(filters=128,
               kernel_size=(3, 3),
               strides=1,
               dilation_rate=4,
               padding='same')(x)
    if for_receptive_field:
        x = Activation('linear')(x)
        x = AvgPool2D(pool_size=(9, 9), strides=1, padding='same')(x)
    else:
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(9, 9), strides=1, padding='same')(x)

    x = Conv2D(filters=196,
               kernel_size=(3, 3),
               strides=1,
               dilation_rate=8,
               padding='same')(x)
    if for_receptive_field:
        x = Activation('linear')(x)
        x = AvgPool2D(pool_size=(17, 17), strides=1, padding='same')(x)
    else:
        x = Activation('relu')(x)
        x = MaxPooling2D(pool_size=(17, 17), strides=1, padding='same')(x)

    x = Conv2D(filters=512,
               kernel_size=(3, 3),
               strides=1,
               dilation_rate=16,
               padding='same')(x)
    if for_receptive_field:
        x = Activation('linear')(x)
    else:
        x = Activation('relu')(x)

    # dimensionality reduction layer
    main_output = Conv2D(filters=1,
                         kernel_size=(1, 1),
                         strides=1,
                         dilation_rate=1,
                         padding='same',
                         name='main_output')(x)

    return Model(inputs=input, outputs=main_output)
Ejemplo n.º 26
0
    def __init__(self,
                 width=256,
                 height=256,
                 channels=3,
                 learning_rate=0.0002,
                 decay_rate=2e-6,
                 gpus=0):

        self.width = width
        self.height = height
        self.channels = channels
        self.gpus = gpus
        self.learning_rate = learning_rate
        self.decay_rate = decay_rate

        # -----------------------------
        #  Discriminator Low resolution
        # -----------------------------

        output_size_low_picture, output_size_low_features = calc_output_and_feature_size(
            self.height / 2, self.width / 2)

        discriminator_low_res_input = Input(shape=(
            self.height,
            self.width,
            self.channels,
        ))
        discriminator_low_res_input_downsample = AvgPool2D(
            2, padding='same')(discriminator_low_res_input)

        x_1 = ConvSN2D(64, 4, padding='same',
                       strides=2)(discriminator_low_res_input_downsample)
        x = LeakyReLU(alpha=0.2)(x_1)

        x_1_att = Attention(64)(x)

        x_2 = ConvSN2D(128, 4, padding='same', strides=2)(x_1_att)
        x = LeakyReLU(alpha=0.2)(x_2)

        x_3 = ConvSN2D(256, 4, padding='same', strides=2)(x)
        x = LeakyReLU(alpha=0.2)(x_3)

        x_4 = ConvSN2D(512, 4, padding='same', strides=1)(x)
        x = LeakyReLU(alpha=0.2)(x_4)

        x = ConvSN2D(1, 4, padding='same', strides=1)(x)
        x = Reshape([output_size_low_picture, 1])(x)

        discriminator_low_features = concatenate(
            [Flatten()(x_1),
             Flatten()(x_2),
             Flatten()(x_3),
             Flatten()(x_4)],
            axis=1)
        discriminator_low_features = Reshape([output_size_low_features,
                                              1])(discriminator_low_features)

        def zero_loss(y_true, y_pred):
            return K.zeros_like(y_true)

        loss_d = ['mse', zero_loss]
        loss_weights_d = [1, 0]
        optimizer = Adam(self.learning_rate, 0.5, decay=self.decay_rate)

        if self.gpus < 2:
            self.model = Model(discriminator_low_res_input,
                               [x, discriminator_low_features])
            self.save_model = self.model
        else:
            self.save_model = Model(discriminator_low_res_input,
                                    [x, discriminator_low_features])
            self.model = multi_gpu_model(self.save_model, gpus=self.gpus)

        self.model.compile(optimizer=optimizer,
                           loss_weights=loss_weights_d,
                           loss=loss_d)
Ejemplo n.º 27
0
 def block_func(x):
     cx = c2(n, 3)(c2(n, 3, 2)(x))
     cs1 = concatenate([AvgPool2D((2, 2))(x), cx])
     cs2 = c2(n, 3)(c2(n, 3)(cs1))
     return concatenate([cs2, cs1])
    def build_model(self, loss):
        """
        Builds test Keras model for predicting Iris classifications

        :param loss (str): Type of loss - must be one of Keras accepted keras losses
        :return: Keras dense model of predefined structure
        """
        model = Sequential()
        # 1
        #model.add(BatchNormalization())
        model.add(
            Conv2D(32,
                   kernel_size=(3, 3),
                   padding='SAME',
                   strides=(2, 2),
                   activation=tf.nn.relu,
                   input_shape=(224, 224, 3)))
        model.add(BatchNormalization())
        # 2
        model.add(
            DepthwiseConv2D(kernel_size=(3, 3),
                            padding='SAME',
                            strides=(1, 1),
                            activation=tf.nn.relu))
        # 3
        model.add(
            Conv2D(64,
                   kernel_size=(1, 1),
                   padding='SAME',
                   strides=(1, 1),
                   activation=tf.nn.relu))
        model.add(BatchNormalization())
        # 4
        model.add(
            DepthwiseConv2D(kernel_size=(3, 3),
                            padding='SAME',
                            strides=(2, 2),
                            activation=tf.nn.relu))
        # 5
        model.add(
            Conv2D(128,
                   kernel_size=(1, 1),
                   padding='SAME',
                   strides=(1, 1),
                   activation=tf.nn.relu))
        model.add(BatchNormalization())
        # 6
        model.add(
            DepthwiseConv2D(kernel_size=(3, 3),
                            padding='SAME',
                            strides=(1, 1),
                            activation=tf.nn.relu))
        # 7
        model.add(
            Conv2D(128,
                   kernel_size=(1, 1),
                   padding='SAME',
                   strides=(1, 1),
                   activation=tf.nn.relu))
        model.add(BatchNormalization())
        # 8
        model.add(
            DepthwiseConv2D(kernel_size=(3, 3),
                            padding='SAME',
                            strides=(2, 2),
                            activation=tf.nn.relu))
        # 9
        model.add(
            Conv2D(256,
                   kernel_size=(1, 1),
                   padding='SAME',
                   strides=(1, 1),
                   activation=tf.nn.relu))
        model.add(BatchNormalization())
        # 10
        model.add(
            DepthwiseConv2D(kernel_size=(3, 3),
                            padding='SAME',
                            strides=(1, 1),
                            activation=tf.nn.relu))
        # 11
        model.add(
            Conv2D(256,
                   kernel_size=(1, 1),
                   padding='SAME',
                   strides=(1, 1),
                   activation=tf.nn.relu))
        model.add(BatchNormalization())
        # 12
        model.add(
            DepthwiseConv2D(kernel_size=(3, 3),
                            padding='SAME',
                            strides=(2, 2),
                            activation=tf.nn.relu))
        # 13
        model.add(
            Conv2D(512,
                   kernel_size=(1, 1),
                   padding='SAME',
                   strides=(1, 1),
                   activation=tf.nn.relu))
        model.add(BatchNormalization())
        # 14
        model.add(
            DepthwiseConv2D(kernel_size=(3, 3),
                            padding='SAME',
                            strides=(1, 1),
                            activation=tf.nn.relu))
        # 15
        model.add(
            Conv2D(512,
                   kernel_size=(1, 1),
                   padding='SAME',
                   strides=(1, 1),
                   activation=tf.nn.relu))
        model.add(BatchNormalization())
        # 142
        model.add(
            DepthwiseConv2D(kernel_size=(3, 3),
                            padding='SAME',
                            strides=(1, 1),
                            activation=tf.nn.relu))
        # 152
        model.add(
            Conv2D(512,
                   kernel_size=(1, 1),
                   padding='SAME',
                   strides=(1, 1),
                   activation=tf.nn.relu))
        model.add(BatchNormalization())
        # 143
        model.add(
            DepthwiseConv2D(kernel_size=(3, 3),
                            padding='SAME',
                            strides=(1, 1),
                            activation=tf.nn.relu))
        # 153
        model.add(
            Conv2D(512,
                   kernel_size=(1, 1),
                   padding='SAME',
                   strides=(1, 1),
                   activation=tf.nn.relu))
        model.add(BatchNormalization())
        # 144
        model.add(
            DepthwiseConv2D(kernel_size=(3, 3),
                            padding='SAME',
                            strides=(1, 1),
                            activation=tf.nn.relu))
        # 154
        model.add(
            Conv2D(512,
                   kernel_size=(1, 1),
                   padding='SAME',
                   strides=(1, 1),
                   activation=tf.nn.relu))
        model.add(BatchNormalization())
        # 145
        model.add(
            DepthwiseConv2D(kernel_size=(3, 3),
                            padding='SAME',
                            strides=(1, 1),
                            activation=tf.nn.relu))
        # 155
        model.add(
            Conv2D(512,
                   kernel_size=(1, 1),
                   padding='SAME',
                   strides=(1, 1),
                   activation=tf.nn.relu))
        model.add(BatchNormalization())
        # 16
        model.add(
            DepthwiseConv2D(kernel_size=(3, 3),
                            padding='SAME',
                            strides=(2, 2),
                            activation=tf.nn.relu))
        # 17
        model.add(
            Conv2D(1024,
                   kernel_size=(1, 1),
                   padding='SAME',
                   strides=(1, 1),
                   activation=tf.nn.relu))
        model.add(BatchNormalization())
        # 18
        model.add(
            DepthwiseConv2D(kernel_size=(3, 3),
                            padding='SAME',
                            strides=(1, 1),
                            activation=tf.nn.relu))
        # 19
        model.add(
            Conv2D(1024,
                   kernel_size=(1, 1),
                   padding='SAME',
                   strides=(1, 1),
                   activation=tf.nn.relu))
        # 20
        model.add(AvgPool2D(pool_size=(7, 7), strides=(1, 1)))
        # 21
        model.add(Dropout(rate=0.001))
        model.add(Flatten())
        model.add(Dense(NUMOFCLASS, activation=tf.nn.relu))
        model.add(Dense(NUMOFCLASS, activation='softmax'))

        #model.add(keras.losses.sparse_categorical_crossentropy())

        model.compile(loss=loss, optimizer='RMSprop', metrics=['accuracy'])

        print("model is ok ")
        return model
Ejemplo n.º 29
0
    def block_10(self, layer_name, net, out_lst):
        assert isinstance(out_lst, list), TypeError
        assert len(out_lst) == 8, ValueError
        '''
                                        net
            net0-cnn-1x1-1      net1-cnn-1x1-1              net2-cnn-1x1-1            net3-avg-3x3-1  
                                net1_1-cnn-1x3-1            net2-cnn-3x3-1            net3-cnn-1x1-1
                                net1_2-cnn-3x1-1            net2_1-cnn-1x3-1            
                                contract net1_1-net1_2      net2_2-cnn-3x1-1              
                                                            contract net2_1-net2_2
                                            contract net0-net1-net2 
        '''
        net0 = Conv2D(out_lst[0], (1, 1),
                      strides=(1, 1),
                      padding='same',
                      kernel_regularizer=regularizers.l2(self.decay_weight),
                      name=layer_name + 'Conv2d0_0a_1x1')(net)

        net1 = Conv2D(out_lst[1], (1, 1),
                      strides=(1, 1),
                      padding='same',
                      kernel_regularizer=regularizers.l2(self.decay_weight),
                      name=layer_name + 'Conv2d1_0a_1x1')(net)
        net1_1 = Conv2D(out_lst[2], (1, 3),
                        strides=(1, 1),
                        padding='same',
                        kernel_regularizer=regularizers.l2(self.decay_weight),
                        name=layer_name + 'Conv2d1_1_0b_1x3')(net1)
        net1_2 = Conv2D(out_lst[3], (3, 1),
                        strides=(1, 1),
                        padding='same',
                        kernel_regularizer=regularizers.l2(self.decay_weight),
                        name=layer_name + 'Conv2d1_2_0c_3x1')(net1)
        net1 = concatenate([net1_1, net1_2], axis=3)

        net2 = Conv2D(out_lst[4], (1, 1),
                      strides=(1, 1),
                      padding='same',
                      kernel_regularizer=regularizers.l2(self.decay_weight),
                      name=layer_name + 'Conv2d2_0a_1x1')(net)
        net2_1 = Conv2D(out_lst[5], (1, 3),
                        strides=(1, 1),
                        padding='same',
                        kernel_regularizer=regularizers.l2(self.decay_weight),
                        name=layer_name + 'Conv2d2_1_0b_1x3')(net2)
        net2_2 = Conv2D(out_lst[6], (3, 1),
                        strides=(1, 1),
                        padding='same',
                        kernel_regularizer=regularizers.l2(self.decay_weight),
                        name=layer_name + 'Conv2d2_2_0c_3x1')(net2)
        net2 = concatenate([net2_1, net2_2], axis=3)

        net3 = AvgPool2D((3, 3),
                         strides=(1, 1),
                         padding='same',
                         name=layer_name + 'AvgPool3_0a_3x3')(net)
        net3 = Conv2D(out_lst[7], (1, 1),
                      strides=(1, 1),
                      padding='same',
                      kernel_regularizer=regularizers.l2(self.decay_weight),
                      name=layer_name + 'Conv2d3_0b_1x1')(net3)
        return concatenate([net0, net1, net2, net3], axis=3)
Ejemplo n.º 30
0
def create_wide_residual_network(input_dim,
                                 nb_classes=100,
                                 N=2,
                                 k=1,
                                 dropout=0.0,
                                 verbose=1):
    """
    Creates a Wide Residual Network with specified parameters

    :param input: Input Keras object
    :param nb_classes: Number of output classes
    :param N: Depth of the network. Compute N = (n - 4) / 6.
              Example : For a depth of 16, n = 16, N = (16 - 4) / 6 = 2
              Example2: For a depth of 28, n = 28, N = (28 - 4) / 6 = 4
              Example3: For a depth of 40, n = 40, N = (40 - 4) / 6 = 6
    :param k: Width of the network.
    :param dropout: Adds dropout if value is greater than 0.0
    :param verbose: Debug info to describe created WRN
    :return:
    """
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1

    ip = Input(shape=input_dim)

    x = initial_conv(ip)
    nb_conv = 4

    x = expand_conv(x, 16, k)

    for i in range(N - 1):
        x = conv1_block(x, k, dropout)
        nb_conv += 2

    x = BatchNormalization(axis=channel_axis,
                           momentum=0.1,
                           epsilon=1e-5,
                           gamma_initializer='uniform')(x)
    x = Activation('relu')(x)

    x = expand_conv(x, 32, k, strides=(2, 2))

    for i in range(N - 1):
        x = conv2_block(x, k, dropout)
        nb_conv += 2

    x = BatchNormalization(axis=channel_axis,
                           momentum=0.1,
                           epsilon=1e-5,
                           gamma_initializer='uniform')(x)
    x = Activation('relu')(x)

    x = expand_conv(x, 64, k, strides=(2, 2))

    for i in range(N - 1):
        x = conv3_block(x, k, dropout)
        nb_conv += 2

    x = BatchNormalization(axis=channel_axis,
                           momentum=0.1,
                           epsilon=1e-5,
                           gamma_initializer='uniform')(x)
    x = Activation('relu')(x)

    x = AvgPool2D((8, 8))(x)
    x = Flatten()(x)

    x = Dense(nb_classes, activation='softmax')(x)

    model = Model(ip, x)

    if verbose:
        print("Wide Residual Network-%d-%d created." % (nb_conv, k))
    return model