Exemplo n.º 1
0
    def build(width, height, kernel, depth, classes):
        input_shape = (height, width, depth)
        if K.image_data_format() == "channels_first":
            input_shape = (depth, height, width)

        base = mobilenet_v2(weights="imagenet",
                            include_top=False,
                            input_tensor=Input(shape=input_shape))

        head = base.output
        head = AveragePooling2D(pool_size=(7, 7))(head)
        head = Flatten(name="flatten")(head)
        head = Dense(128, activation="relu")(head)
        head = Dropout(0.5)(head)
        head = Dense(classes, activation="softmax")(head)

        model = Model(inputs=base.input, outputs=head)

        for layer in base.layers:
            layer.trainable = False

        return model
Exemplo n.º 2
0
def inception_A(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    a1 = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(input)
    a1 = Convolution2D(96, 1, 1, activation='relu', border_mode='same')(a1)

    a2 = Convolution2D(96, 1, 1, activation='relu', border_mode='same')(input)

    a3 = Convolution2D(64, 1, 1, activation='relu', border_mode='same')(input)
    a3 = Convolution2D(96, 3, 3, activation='relu', border_mode='same')(a3)

    a4 = Convolution2D(64, 1, 1, activation='relu', border_mode='same')(input)
    a4 = Convolution2D(96, 3, 3, activation='relu', border_mode='same')(a4)
    a4 = Convolution2D(96, 3, 3, activation='relu', border_mode='same')(a4)

    m = merge([a1, a2, a3, a4], mode='concat', concat_axis=channel_axis)
    m = BatchNormalization(axis=channel_axis)(m)
    m = Activation('relu')(m)
    return m
Exemplo n.º 3
0
def block_inception_a(input, idx):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 96, 1, 1)

    branch_1 = conv2d_bn(input, 64, 1, 1)
    branch_1 = conv2d_bn(branch_1, 96, 3, 3)

    branch_2 = conv2d_bn(input, 64, 1, 1)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)

    branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
    branch_3 = conv2d_bn(branch_3, 96, 1, 1)

    x = concatenate([branch_0, branch_1, branch_2, branch_3],
                    axis=channel_axis,
                    name="mixed_inception_a_{0}".format(idx))
    return x
Exemplo n.º 4
0
def inception_v4(num_classes, dropout_keep_prob, weights, include_top):
    # Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)
    if K.image_data_format() == 'channels_first':
        inputs = Input((3, 299, 299))
    else:
        inputs = Input((299, 299, 3))

    # Make inception base
    x = inception_v4_base(inputs)

    if include_top:
        # 1 x 1 x 1536
        x = AveragePooling2D((8, 8), padding='valid')(x)
        x = Dropout(dropout_keep_prob)(x)
        x = Flatten()(x)
        dense1 = Dense(units=num_classes, activation='softmax')(x)

    model = Model(inputs, x, name='inception_v4')

    model.load_weights('inception-v4_weights_tf_dim_ordering_tf_kernels.h5',
                       by_name=True)
    return model
Exemplo n.º 5
0
def conv_bn_pool(inp_tensor,
                 conv_filters,
                 conv_kernel_size,
                 conv_strides,
                 conv_pad,
                 pool_type="",
                 pool_size=(2, 2),
                 pool_strides=None):
    x = ZeroPadding2D(padding=conv_pad)(inp_tensor)
    x = Conv2D(filters=conv_filters,
               kernel_size=conv_kernel_size,
               strides=conv_strides,
               padding='valid')(x)
    x = BatchNormalization(epsilon=1e-5, momentum=1)(x)
    x = Activation('relu')(x)

    if pool_type == 'max':
        return MaxPooling2D(pool_size=pool_size, strides=pool_strides)(x)
    elif pool_type == 'avg':
        return AveragePooling2D(pool_size=pool_size, strides=pool_strides)(x)

    return x
Exemplo n.º 6
0
    def __call__(self):
        logging.debug("Creating model...")

        inputs = Input(shape=self._input_shape)

        x = Conv2D(32, (3, 3), activation='relu')(inputs)
        x = MaxPooling2D(2, 2)(x)
        x = Conv2D(32, (3, 3), activation='relu')(x)
        x = MaxPooling2D(2, 2)(x)
        x = Conv2D(64, (3, 3), activation='relu')(x)

        y = Conv2D(32, (3, 3), activation='relu')(inputs)
        y = MaxPooling2D(2, 2)(y)
        y = Conv2D(32, (3, 3), activation='relu')(y)
        y = MaxPooling2D(2, 2)(y)
        y = Conv2D(64, (3, 3), activation='tanh')(y)

        z = Multiply()([x, y])
        z = BatchNormalization(axis=self._channel_axis)(z)

        # Classifier block
        pool = AveragePooling2D(pool_size=(8, 8),
                                strides=(1, 1),
                                padding="same")(z)
        flatten = Flatten()(pool)
        predictions_g = Dense(units=2,
                              kernel_initializer=self._weight_init,
                              use_bias=self._use_bias,
                              kernel_regularizer=l2(self._weight_decay),
                              activation="softmax")(flatten)
        predictions_a = Dense(units=21,
                              kernel_initializer=self._weight_init,
                              use_bias=self._use_bias,
                              kernel_regularizer=l2(self._weight_decay),
                              activation="softmax")(flatten)

        model = Model(inputs=inputs, outputs=[predictions_g, predictions_a])

        return model
Exemplo n.º 7
0
def inception_B(input):
    channel_axis = -1

    b1 = conv_block(input, 384, 1, 1)

    b2 = conv_block(input, 192, 1, 1)
    b2 = conv_block(b2, 224, 1, 7)
    b2 = conv_block(b2, 256, 7, 1)

    b3 = conv_block(input, 192, 1, 1)
    b3 = conv_block(b3, 192, 7, 1)
    b3 = conv_block(b3, 224, 1, 7)
    b3 = conv_block(b3, 224, 7, 1)
    b3 = conv_block(b3, 256, 1, 7)

    b4 = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(input)
    b4 = conv_block(b4, 128, 1, 1)

    #m = merge([b1, b2, b3, b4], mode='concat', concat_axis=channel_axis)
    m = concatenate([b1, b2, b3, b4], axis=channel_axis)

    return m
Exemplo n.º 8
0
def inception_C(input):
    c1 = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(input)
    c1 = Convolution2D(256, 1, 1, activation='relu', border_mode='same')(c1)

    c2 = Convolution2D(256, 1, 1, activation='relu', border_mode='same')(input)

    c3 = Convolution2D(384, 1, 1, activation='relu', border_mode='same')(input)
    c3_1 = Convolution2D(256, 1, 3, activation='relu', border_mode='same')(c3)
    c3_2 = Convolution2D(256, 3, 1, activation='relu', border_mode='same')(c3)

    c4 = Convolution2D(384, 1, 1, activation='relu', border_mode='same')(input)
    c4 = Convolution2D(192, 1, 3, activation='relu', border_mode='same')(c4)
    c4 = Convolution2D(224, 3, 1, activation='relu', border_mode='same')(c4)
    c4_1 = Convolution2D(256, 3, 1, activation='relu', border_mode='same')(c4)
    c4_2 = Convolution2D(256, 1, 3, activation='relu', border_mode='same')(c4)

    m = merge([c1, c2, c3_1, c3_2, c4_1, c4_2],
              mode='concat',
              concat_axis=channel_axis)
    m = BatchNormalization(axis=1)(m)
    m = Activation('relu')(m)
    return m
Exemplo n.º 9
0
def MiniGoogLeNet(width, height, depth, classes):
    input_shape = height, width, depth
    chan_dim = -1

    if K.image_data_format() == 'channels_first':
        input_shape = depth, height, width
        chan_dim = 1

    # define the model input and first CONV module
    inputs = Input(shape=input_shape)
    x = conv_module(inputs, 96, 3, 3, (1, 1), chan_dim)

    # two inception modules followed by a downsample module
    x = inception_module(x, 32, 32, chan_dim)
    x = inception_module(x, 32, 48, chan_dim)
    x = downample_module(x, 80, chan_dim)

    # for inception modules followed by a downsample module
    x = inception_module(x, 112, 48, chan_dim)
    x = inception_module(x, 96, 64, chan_dim)
    x = inception_module(x, 80, 80, chan_dim)
    x = inception_module(x, 48, 96, chan_dim)
    x = downample_module(x, 96, chan_dim)

    # two inception modules followed by global POOL and dropout
    x = inception_module(x, 176, 160, chan_dim)
    x = inception_module(x, 176, 160, chan_dim)
    x = AveragePooling2D((7, 7))(x)
    x = Dropout(.5)(x)

    # softmax classifier
    x = Flatten()(x)
    x = Dense(classes)(x)
    x = Activation('softmax')(x)

    # create model
    model = Model(inputs, x, name='mini googlenet')

    return model
Exemplo n.º 10
0
    def create(self, size, include_top):
        input_shape = (3, ) + size
        img_input = Input(shape=input_shape)
        bn_axis = 1

        x = Lambda(self.vgg_preprocess)(img_input)
        x = ZeroPadding2D((3, 3))(x)
        x = Conv2D(64, 7, 7, strides=(2, 2), name='conv1')(x)  # Keras2
        x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

        x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
        for n in ['b', 'c', 'd']:
            x = identity_block(x, 3, [128, 128, 512], stage=3, block=n)
        x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
        for n in ['b', 'c', 'd', 'e', 'f']:
            x = identity_block(x, 3, [256, 256, 1024], stage=4, block=n)

        x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

        if include_top:
            x = AveragePooling2D((7, 7), name='avg_pool')(x)
            x = Flatten()(x)
            x = Dense(1000, activation='softmax', name='fc1000')(x)
            fname = 'resnet50.h5'
        else:
            fname = 'resnet_nt.h5'

        self.img_input = img_input
        self.model = Model(self.img_input, x)
        convert_all_kernels_in_model(self.model)
        self.model.load_weights(self.FILE_PATH + fname)
Exemplo n.º 11
0
def marge_resnet_googlenet():
    ip = Input(shape=init)
    layers = 1
    conv_n = 0
    #=====================================================================================================
    if if_n[0]:
        for i in range(0, loop):
            x, l, c = resnet_block(ip, dropout_n=0)
            layers += l
            conv_n += c
    x = LeakyReLU(alpha=config.alpha_lrelu)(x)
    x = Dropout(rate=dropout[0])(x)
    layers += 2
    #=====================================================================================================
    if if_n[1]:
        for i in range(0, loop):
            x, l, c = resnet_block(ip, dropout_n=0)
            layers += l
            conv_n += c
    x = LeakyReLU(alpha=config.alpha_lrelu)(x)
    x = Dropout(rate=dropout[0])(x)
    layers += 2
    #=====================================================================================================
    if if_n[2]:
        for i in range(0, loop):
            x, l, c = resnet_block(ip, dropout_n=0)
            layers += l
            conv_n += c
    x = LeakyReLU(alpha=config.alpha_lrelu)(x)
    x = Dropout(rate=dropout[0])(x)
    layers += 2
    #=====================================================================================================
    x = AveragePooling2D((pool, pool))(x)
    x = Flatten()(x)
    x = Dense(nb_classes, activation='softmax')(x)
    layers += 3

    model = Model(ip, x)
    return [model, layers, conv_n]
Exemplo n.º 12
0
    def create_parallel_model(self):
        '''
        Creates A keras Model With a Parallel Branch for race detection.
        :return: None
        '''
        if self.__model is None or len(self.__model.layers) == 32:
            self.create_model()
        if self.__training:
            self.__model.load_weights(self.AGE_GENDER_WEIGHTS)
        if self.__training:
            for layer in self.__model.layers:
                layer.trainable = False
        batch_norm = BatchNormalization(axis=self._channel_axis, name='batch_norm_parallel_branch')(self.__conv4_copy)
        relu = Activation("relu", name='activation_parallel_branch')(batch_norm)

        # Classifier block
        pool = AveragePooling2D(pool_size=(8, 8), strides=(1, 1), padding="same", name='pool_parallel_branch')(relu)
        new_flatten = Flatten(name='flatten_parallel_branch')(pool)
        predictions_r = Dense(units=5, kernel_initializer=self._weight_init, use_bias=self._use_bias,
                              kernel_regularizer=l2(self._weight_decay), activation="softmax", name = 'race')(new_flatten)

        self.__model = Model(self.__model.layers[0].input, [self.__model.layers[-2].output, self.__model.layers[-1].output, predictions_r])
Exemplo n.º 13
0
def custom_cnn(input):
    x = Conv2D(12, (3, 3), padding='same', activation='relu')(input)
    x = Dropout(0.20)(
        x
    )  # very important. dropout is inserted into somewhere between consecutive Conv2D layers, rather than after max pooling layers
    x = Conv2D(18, (3, 3), activation='relu', padding='same')(x)
    x = AveragePooling2D(pool_size=(3, 3), dim_ordering="th")(x)

    y = Conv2D(18, (3, 3), activation='relu', padding='same')(x)
    y = Dropout(0.20)(y)
    y = Conv2D(18, (3, 3), activation='relu', padding='same')(y)
    y = MaxPooling2D(pool_size=(3, 3), dim_ordering="th")(y)

    z = Flatten()(y)
    z = Dropout(0.20)(z)
    z = Dense(128, activation='relu')(z)
    z = Dense(nb_classes, activation='softmax', name='prediction')(z)

    model = Model(input=input, output=z)
    model.summary()

    return model
Exemplo n.º 14
0
def ShallowConvNet(input_shape):
    """ Keras implementation of the Shallow Convolutional Network as described
    in Schirrmeister et. al. (2017), arXiv 1703.0505
    
    Assumes the input is a 2-second EEG signal sampled at 128Hz. Note that in 
    the original paper, they do temporal convolutions of length 25 for EEG
    data sampled at 250Hz. We instead use length 13 since the sampling rate is 
    roughly half of the 250Hz which the paper used. The pool_size and stride
    in later layers is also approximately half of what is used in the paper.
    
                     ours        original paper
    pool_size        1, 35       1, 75
    strides          1, 7        1, 15
    conv filters     1, 13       1, 25
    """

    #    if K.image_data_format() == 'channels_first':
    #        input_shape = (1, Chans, Samples)
    #    else:
    #        input_shape = (Chans, Samples, 1)
    #    print(input_shape)
    # start the model
    input_EEG = Input(input_shape)
    block1 = Conv2D(10, (1, 25),
                    input_shape=(1, n_ch, n_samp),
                    kernel_constraint=max_norm(2.))(input_EEG)
    block1 = Conv2D(10, (n_ch, 1),
                    use_bias=False,
                    kernel_constraint=max_norm(2.))(block1)
    block1 = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block1)
    block1 = Activation(square)(block1)
    block1 = AveragePooling2D(pool_size=(1, 30), strides=(1, 10))(block1)
    block1 = Activation(safe_log)(block1)
    block1 = Dropout(dropout_rate)(block1)
    flatten = Flatten()(block1)
    dense = Dense(n_class, kernel_constraint=max_norm(0.5))(flatten)
    softmax = Activation('softmax')(dense)

    return Model(inputs=input_EEG, outputs=softmax)
Exemplo n.º 15
0
def inception_B(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1
    b1 = AveragePooling2D(pool_size=(3, 3), strides=(1, 1), padding='same')(input)
    b1 = conv_block(b1, 128, 1, 1)

    b2 = conv_block(input, 384, 1, 1)

    b3 = conv_block(input, 192, 1, 1)
    b3 = conv_block(b3, 224, 1, 7)
    b3 = conv_block(b3, 256, 7, 1)

    b4 = conv_block(input, 192, 1, 1)
    b4 = conv_block(b4, 192, 7, 1)
    b4 = conv_block(b4, 224, 1, 7)
    b4 = conv_block(b4, 224, 7, 1)
    b4 = conv_block(b4, 256, 1, 7)

    m = concatenate([b1, b2, b3, b4], axis=channel_axis)
    return m
Exemplo n.º 16
0
    def __init__(self, weights_path=None):
        self.inp = Input(shape=(224, 224, 1))
        self.out = ZeroPadding2D((3, 3))(self.inp)
        self.out = Convolution2D(64, 7, 7, subsample=(2, 2))(self.out)
        self.out = BatchNormalization()(self.out)
        self.out = Activation('relu')(self.out)
        self.out = MaxPooling2D((3, 3), strides=(2, 2))(self.out)

        self.out = conv_block(self.out, [64, 64, 256])
        self.out = identity_block(self.out, [64, 64, 256])
        self.out = identity_block(self.out, [64, 64, 256])

        self.out = conv_block(self.out, [128, 128, 512])
        self.out = identity_block(self.out, [128, 128, 512])
        self.out = identity_block(self.out, [128, 128, 512])
        self.out = identity_block(self.out, [128, 128, 512])

        self.out = conv_block(self.out, [256, 256, 1024])
        self.out = identity_block(self.out, [256, 256, 1024])
        self.out = identity_block(self.out, [256, 256, 1024])
        self.out = identity_block(self.out, [256, 256, 1024])
        self.out = identity_block(self.out, [256, 256, 1024])
        self.out = identity_block(self.out, [256, 256, 1024])

        self.out = conv_block(self.out, [512, 512, 2048])
        self.out = identity_block(self.out, [512, 512, 2048])
        self.out = identity_block(self.out, [512, 512, 2048])

        self.out = AveragePooling2D((7, 7))(self.out)
        self.out = Flatten()(self.out)
        self.out = Dense(1)(self.out)

        self.model = Model(self.inp, self.out)
        self.model.compile(optimizer=adam(), loss='mse')

        if os.path.exists(weights_path):
            self.model.load_weights(weights_path)
            print(
                "********************Load Model Success*********************")
Exemplo n.º 17
0
def inception_B(x_input):
    """17*17 卷积块"""
    global INCEPTION_B_COUNT
    INCEPTION_B_COUNT += 1
    with K.name_scope('inception_B' + str(INCEPTION_B_COUNT)):
        averagepooling_conv1x1 = AveragePooling2D(pool_size=(3, 3), strides=(1, 1), padding='same')(x_input)
        averagepooling_conv1x1 = conv_block(averagepooling_conv1x1, 128, 1, 1)

        conv1x1 = conv_block(x_input, 384, 1, 1)

        conv1x7_1x7 = conv_block(x_input, 192, 1, 1)
        conv1x7_1x7 = conv_block(conv1x7_1x7, 224, 1, 7)
        conv1x7_1x7 = conv_block(conv1x7_1x7, 256, 1, 7)

        conv2_1x7_7x1 = conv_block(x_input, 192, 1, 1)
        conv2_1x7_7x1 = conv_block(conv2_1x7_7x1, 192, 1, 7)
        conv2_1x7_7x1 = conv_block(conv2_1x7_7x1, 224, 7, 1)
        conv2_1x7_7x1 = conv_block(conv2_1x7_7x1, 224, 1, 7)
        conv2_1x7_7x1 = conv_block(conv2_1x7_7x1, 256, 7, 1)

        merged_vector = concatenate([averagepooling_conv1x1, conv1x1, conv1x7_1x7, conv2_1x7_7x1], axis=-1)
    return merged_vector
Exemplo n.º 18
0
	def add_inceptionC(self, input_layer, list_nb_filter, base_name):

		l1_1 = self.add_bn_conv_layer(name=base_name+'_l1_1', input=input_layer, nb_filter=list_nb_filter[0][0], nb_row=1, nb_col=1)

		l2_1 = self.add_bn_conv_layer(name=base_name+'_l2_1', input=input_layer, nb_filter=list_nb_filter[1][0], nb_row=1, nb_col=1)    
		l2_2 = self.add_bn_conv_layer(name=base_name+'_l2_2', input=l2_1, nb_filter=list_nb_filter[1][1], nb_row=1, nb_col=7, padding=(0,3))
		l2_3 = self.add_bn_conv_layer(name=base_name+'_l2_3', input=l2_2, nb_filter=list_nb_filter[1][2], nb_row=7, nb_col=1, padding=(3,0))
		## padding and nb_row might not match with the lasagne weights
		
		l3_1 = self.add_bn_conv_layer(name=base_name+'_l3_1', input=input_layer, nb_filter=list_nb_filter[2][0], nb_row=1, nb_col=1)
		l3_2 = self.add_bn_conv_layer(name=base_name+'_l3_2', input=l3_1, nb_filter=list_nb_filter[2][1], nb_row=7, nb_col=1, padding=(3,0))    
		l3_3 = self.add_bn_conv_layer(name=base_name+'_l3_3', input=l3_2, nb_filter=list_nb_filter[2][2], nb_row=1, nb_col=7, padding=(0,3))
		l3_4 = self.add_bn_conv_layer(name=base_name+'_l3_4', input=l3_3, nb_filter=list_nb_filter[2][3], nb_row=7, nb_col=1, padding=(3,0))
		l3_5 = self.add_bn_conv_layer(name=base_name+'_l3_5', input=l3_4, nb_filter=list_nb_filter[2][4], nb_row=1, nb_col=7, padding=(0,3))
		
		l4_1 = self.add_to_graph(ZeroPadding2D(padding=(1,1)), name=base_name+'_14_1', input=input_layer)
		l4_2 = self.add_to_graph(AveragePooling2D(pool_size=(3,3), strides=(1,1)), name=base_name+'_14_2', input=l4_1)
		l4_3 = self.add_bn_conv_layer(name=base_name+'_l4_3', input=l4_2, nb_filter=list_nb_filter[3][0], nb_row=1, nb_col=1)

		self.add_to_graph(Activation("linear"), name=base_name, inputs=[l1_1, l2_3, l3_5, l4_3], merge_mode="concat", concat_axis=1)

		self.io.print_info('Added Inception-C {0}'.format(base_name))
    def build(width, height, depth, classes):

        inputShape = (height, width, depth)
        chanDim = -1

        if K.image_data_format() == 'channels_first':
            inputShape = (depth, height, width)
            chanDim = 1

        # Define the model input and first CONV module
        inputs = Input(shape=inputShape)
        x = MiniGoogLenet.conv_module(inputs, 96, 3, 3, (1, 1), chanDim)

        # Two inception modules followed by downsampling modules
        x = MiniGoogLenet.inception_module(x, 32, 32, chanDim)
        x = MiniGoogLenet.inception_module(x, 32, 48, chanDim)
        x = MiniGoogLenet.downsample_sample(x, 80, chanDim)

        # Four inception modules followed by a downsample module
        x = MiniGoogLenet.inception_module(x, 112, 48, chanDim)
        x = MiniGoogLenet.inception_module(x, 96, 64, chanDim)
        x = MiniGoogLenet.inception_module(x, 80, 80, chanDim)
        x = MiniGoogLenet.inception_module(x, 48, 96, chanDim)
        x = MiniGoogLenet.downsample_sample(x, 96, chanDim)

        # Two inception modules followed by global Pool and dropout
        x = MiniGoogLenet.inception_module(x, 176, 160, chanDim)
        x = MiniGoogLenet.inception_module(x, 176, 160, chanDim)
        x = AveragePooling2D((7, 7))(x)
        x = Dropout(0.5)(x)

        # Softmax classifier
        x = Flatten()(x)
        x = Dense(classes)(x)
        x = Activation("softmax")(x)

        model = Model(inputs, x, name='googlenet')

        return model
Exemplo n.º 20
0
    def _decoder_dense(self, x, num_out, name_suff='', name_out=''):
        """ Decoder with one conv2d layer, avg pooling and a fully connected
        layer.

        Args:
            x (tf.Tensor): Input, latent representation.
            num_out (int): # of output values.
            name_suff (str): Name suffix of layers.
            name_out (str): Name of the output layer.

        Returns:
            tf.Tensor, shape (B, `num_out`).
        """
        # Produces (N, 3, 3, 64)
        x = Convolution2D(64, (1, 1),
                          padding='same',
                          name='conv_1_{}'.format(name_suff))(x)

        x = AveragePooling2D((3, 3), padding='same')(x)
        x = Flatten()(x)
        x = Dense(num_out, name='dense_1_{}'.format(name_suff))(x)
        return Activation('linear', name=name_out)(x)
Exemplo n.º 21
0
def wlcc( input_shape, nb_classes = 3 ):
    '''
    Creates a inception v4 network

    :param nb_classes: number of classes.txt
    :return: Keras Model with 1 input and 1 output
    '''
    if K.image_dim_ordering() == ‘th’:
        channel_axis = 1
    else:
        channel_axis = -1  # channels_last is default in Keras
    init = Input(input_shape)
    x =  Conv2D( 32, kernel_size = ( 3, 3 ), strides = ( 1, 1 ), activation = 'relu', padding = 'same', input_shape = input_shape )( init )
 #   x2 = lzwl( init )
    x2 = mywl2d( x )
    x =  Conv2D( 64, kernel_size = ( 3, 3 ), activation = 'relu', padding = 'same', input_shape = input_shape )( x )
    x1 = Conv2D( 64, ( 2,2 ), strides = ( 2, 2 ), activation = 'relu', padding = 'valid' )( x )
    # Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th
    branches = [ x1, x2 ]
    x = Concatenate( axis = channel_axis )( branches )   
    x3 = Conv2D( 96, ( 2, 2 ), strides = ( 2, 2 ), activation = 'relu', padding = 'valid' )( x )
    # Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)
    x4 = mywl2d( x2 )
    branches = [ x3, x4 ]
    x = Concatenate( axis = channel_axis )( branches )   
    x = Conv2D( 128, ( 2, 2 ), strides = ( 2, 2 ), activation = 'relu', padding = 'valid' )( x )
#    x6 = mywl2d( x4 )
#    branches = [ x, x6 ]
 #   x = Concatenate( axis = channel_axis )( branches ) 
    x = AveragePooling2D( ( 2, 2 ), strides = ( 1, 1 ), padding = 'same' )( x )
    # Dropout
    x = Dropout( 0.5 )( x )
    x = Flatten()( x )
    x = Dense( activation = 'relu', units = 512 )( x )
    x = Dropout( 0.5 )( x )
    # Output
    out = Dense( activation = 'softmax', units = nb_classes )( x )
    model = Model( init, out, name = 'wlcc' )
    return model
Exemplo n.º 22
0
def resnet14(input_tensor, n_classes):
    x = ZeroPadding2D(padding=(3, 3), name='conv1_pad')(input_tensor)
    x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', name='conv1')(x)

    x = BatchNormalization(axis=3, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 64, stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 64, stage=2, block='b')

    x = conv_block(x, 128, stage=3, block='a')
    x = identity_block(x, 128, stage=3, block='b')

    x = conv_block(x, 256, stage=4, block='a')
    x = identity_block(x, 256, stage=4, block='b')

    x = AveragePooling2D((5, 5), name='avg_pool')(x)
    x = Flatten()(x)
    x = Dense(n_classes, activation='softmax', name='dense')(x)

    return Model(input_tensor, x, name='resnet34')
Exemplo n.º 23
0
def cifar_resnet(blocks, filters, repetations, input_shape=(3, 32, 32)):
    if blocks != len(repetations) or blocks != len(filters):
        print "size of blocks, size of repetations, size of filters should match"
        return None
    input = Input(shape=input_shape)
    conv1 = Convolution2D(nb_filter=16,
                          nb_row=3,
                          nb_col=3,
                          border_mode="same",
                          W_regularizer=l2(l=0.0001),
                          b_regularizer=l2(l=0.0001))(input)
    norm1 = BatchNormalization(mode=0, axis=1)(conv1)
    relu1 = Activation("relu")(norm1)

    # block_fun = getattr(resnet, "_basic_block")
    block_fun = _basic_block

    data = relu1
    for i in range(blocks):
        is_first_layer = False
        if i == 0:
            is_first_layer = True
        data = _residual_block(block_fun,
                               nb_filters=filters[i],
                               repetations=repetations[i],
                               is_first_layer=is_first_layer)(data)
    global_pool = AveragePooling2D(pool_size=(8, 8),
                                   strides=(8, 8),
                                   border_mode="valid")(data)
    flatten = Flatten()(global_pool)
    dense = Dense(output_dim=10,
                  init="he_normal",
                  activation="softmax",
                  W_regularizer=l2(l=0.0001),
                  b_regularizer=l2(l=0.0001))(flatten)

    model = Model(input=input, output=dense)
    return model
Exemplo n.º 24
0
    def build(width, height, depth, classes, stages, filters, reg = 0.0001, bnEps = 2e-5, bnMom = 0.9, dataset = "cifar"):
        inputShape = (height, width, depth)
        chanDim = -1

        if K.image_data_format() == "channels_first":
            inputShape = (depth, height, width)
            chanDim = 1

        inputs = Input(shape = inputShape)
        x = BatchNormalization(axis = chanDim, epsilon = bnEps, momentum = bnMom)(inputs)

        if dataset == "cifar":
            x = Conv2D(filters[0], (3, 3), use_bias = False, padding = "same", kernel_regularizer = l2(reg))(x)

        elif dataset == "tiny_imagenet":
            x = Conv2D(filters[0], (5, 5), use_bias = False, padding = "same", kernel_regularizer = l2(reg))(x)
            x = BatchNormalization(axis = chanDim, epsilon = bnEps, momentum = bnMom)(x)
            x = Activation("relu")(x)
            x = ZeroPadding2D((1, 1))(x)
            x = MaxPooling2D((3, 3), strides = (2, 2))(x)

        for i in range(0, len(stages)):
            stride = (1, 1) if i == 0 else (2, 2)
            x = ResNet.residual_module(x, filters[i + 1], stride, chanDim, red = True, bnEps = bnEps, bnMom = bnMom)

            for j in range(0, stages[i] - 1):
                x = ResNet.residual_module(x, filters[i + 1], (1, 1), chanDim, bnEps = bnEps, bnMom = bnMom)
                x = BatchNormalization(axis = chanDim, epsilon = bnEps, momentum = bnMom)(x)
                x = Activation("relu")(x)
                x = AveragePooling2D((8, 8))(x)

                x = Flatten()(x)
                x = Dense(classes, kernel_regularizer = l2(reg))(x)
                x = Activation("softmax")(x)

                model = Model(inputs, x, name = "resnet")

                return model
Exemplo n.º 25
0
    def build(width, height, depth, classes):
        # initialize the input shape to be "channels last" and the
        # channels dimension itself
        inputShape = (height, width, depth)
        chanDim = -1
        # if we are using "channels first", update the input shape
        # and channels dimension
        if K.image_data_format() == "channels_first":
            inputShape = (depth, height, width)
            chanDim = 1

        # define the model input and first CONV module
        inputs = Input(shape=inputShape)
        x = MiniGoogLeNet.conv_module(inputs, 96, 3, 3, (1, 1),
                                      chanDim)
        # two Inception modules followed by a downsample module
        x = MiniGoogLeNet.inception_module(x, 32, 32, chanDim)
        x = MiniGoogLeNet.inception_module(x, 32, 48, chanDim)
        x = MiniGoogLeNet.downsample_module(x, 80, chanDim)
        # four Inception modules followed by a downsample module
        x = MiniGoogLeNet.inception_module(x, 112, 48, chanDim)
        x = MiniGoogLeNet.inception_module(x, 96, 64, chanDim)
        x = MiniGoogLeNet.inception_module(x, 80, 80, chanDim)
        x = MiniGoogLeNet.inception_module(x, 48, 96, chanDim)
        x = MiniGoogLeNet.downsample_module(x, 96, chanDim)
        # two Inception modules followed by global POOL and dropout
        x = MiniGoogLeNet.inception_module(x, 176, 160, chanDim)
        x = MiniGoogLeNet.inception_module(x, 176, 160, chanDim)
        x = AveragePooling2D((7, 7))(x)
        x = Dropout(0.5)(x)
        # softmax classifier
        x = Flatten()(x)
        x = Dense(classes)(x)
        x = Activation("softmax")(x)
        # create the model
        model = Model(inputs, x, name="googlenet")
        # return the constructed network architecture
        return model
def make_symbol(input_shape, num_classes=1000):
    K.set_image_dim_ordering('th')
    data = Input(input_shape)
    #stage 1
    conv = conv_factory(32, 3, 3, stride=(2, 2))(data)
    conv_1 = conv_factory(32, 3, 3)(conv)
    conv_2 = conv_factory(64, 3, 3, pad=(1, 1))(conv_1)
    pool1 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(conv_2)
    #stage 2
    conv_3 = conv_factory(80)(pool1)
    conv_4 = conv_factory(192, 3, 3)(conv_3)
    pool2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(conv_4)
    #stage 3
    in3a = inception7a(pool2, 64, 64, 96, 96, 48, 64, "avg", 32)
    in3b = inception7a(in3a, 64, 64, 96, 96, 48, 64, "avg", 64)
    in3c = inception7a(in3b, 64, 64, 96, 96, 48, 64, "avg", 64)
    in3d = inception7b(in3c, 384, 64, 96, 96, "max")
    # stage 4
    in4a = inception7c(in3d, 192, 128, 128, 192, 128, 128, 128, 128, 192,
                       "avg", 192)
    in4b = inception7c(in4a, 192, 160, 160, 192, 160, 160, 160, 160, 192,
                       "avg", 192)
    in4c = inception7c(in4b, 192, 160, 160, 192, 160, 160, 160, 160, 192,
                       "avg", 192)
    in4d = inception7c(in4c, 192, 192, 192, 192, 192, 192, 192, 192, 192,
                       "avg", 192)
    in4e = inception7d(in4d, 192, 320, 192, 192, 192, 192, "max")
    # stage 5
    in5a = inception7e(in4e, 320, 384, 384, 384, 448, 384, 384, 384, "avg",
                       192)
    in5b = inception7e(in5a, 320, 384, 384, 384, 448, 384, 384, 384, "max",
                       192)
    # pool
    pool = AveragePooling2D(pool_size=(8, 8), strides=(1, 1))(in5b)
    flatten = Flatten()(pool)
    out = Dense(output_dim=num_classes, activation='softmax')(flatten)
    model = Model(input=data, output=out)
    return model
Exemplo n.º 27
0
def inception_resnet_v2(scale=True):
    init = Input((settings.img_rows, settings.img_cols, 3, ))
    x = resnet_v2_stem(init)  # Output: 35 * 35 * 256

    # 5 x Inception A
    for i in range(5):
        x = inception_resnet_v2_A(x, scale_residual=scale)
        # Output: 35 * 35 * 256

    # Reduction A
    x = reduction_resnet_v2_A(x)  # Output: 17 * 17 * 896

    # 10 x Inception B
    for i in range(10):
        x = inception_resnet_v2_B(x, scale_residual=scale)
        # Output: 17 * 17 * 896

    # Reduction B
    x = reduction_resnet_v2_B(x)  # Output: 8 * 8 * 1792

    # 5 x Inception C
    for i in range(5):
        x = inception_resnet_v2_C(x, scale_residual=scale)
        # Output: 8 * 8 * 1792

    # Average Pooling
    x = AveragePooling2D((8, 8))(x)  # Output: 1792

    # Dropout
    x = Dropout(0.2)(x)
    x = Flatten()(x)  # Output: 1792

    # Output layer
    output = Dense(units=len(data_setting.objects_num_name), activation="softmax")(x)  # Output: 10000

    model = Model(init, output, name="Inception-ResNet-v2")

    return model
Exemplo n.º 28
0
    def _create_graph(self):
        logging.debug("Creating model...")

        assert ((self._depth - 4) % 6 == 0)
        n = (self._depth - 4) / 6

        inputs = Input(shape=self._input_shape)

        n_stages = [16, 16 * self._k, 32 * self._k, 64 * self._k]

        conv1 = Conv2D(filters=n_stages[0], kernel_size=(3, 3),
                       strides=(1, 1),
                       padding="same",
                       kernel_initializer=self._weight_init,
                       kernel_regularizer=l2(self._weight_decay),
                       use_bias=self._use_bias)(inputs)  # "One conv at the beginning (spatial size: 32x32)"

        # Add wide residual blocks
        block_fn = self._wide_basic
        conv2 = self._layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n, stride=(1, 1))(
            conv1)
        conv3 = self._layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n, stride=(2, 2))(
            conv2)
        conv4 = self._layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n, stride=(2, 2))(
            conv3)
        batch_norm = BatchNormalization(axis=self._channel_axis)(conv4)
        relu = Activation("relu")(batch_norm)

        # Classifier block
        pool = AveragePooling2D(pool_size=(8, 8), strides=(1, 1), padding="same")(relu)
        flatten = Flatten()(pool)
        predictions_g = Dense(units=2, kernel_initializer=self._weight_init, use_bias=self._use_bias,
                              kernel_regularizer=l2(self._weight_decay), activation="softmax")(flatten)
        predictions_a = Dense(units=101, kernel_initializer=self._weight_init, use_bias=self._use_bias,
                              kernel_regularizer=l2(self._weight_decay), activation="softmax")(flatten)

        self._model = Model(inputs=inputs, outputs=[predictions_g, predictions_a])
        self._graph = tf.get_default_graph()
	def create(self):
		bn_axis = 3

		img_input = Input(shape=(224, 224, 3))

		x = ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input)
		x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', name='conv1')(x)
		x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
		x = Activation('relu')(x)
		x = MaxPooling2D((3, 3), strides=(2, 2))(x)

		x = self.conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
		x = self.identity_block(x, 3, [64, 64, 256], stage=2, block='b')
		x = self.identity_block(x, 3, [64, 64, 256], stage=2, block='c')

		x = self.conv_block(x, 3, [128, 128, 512], stage=3, block='a')
		x = self.identity_block(x, 3, [128, 128, 512], stage=3, block='b')
		x = self.identity_block(x, 3, [128, 128, 512], stage=3, block='c')
		x = self.identity_block(x, 3, [128, 128, 512], stage=3, block='d')

		x = self.conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
		x = self.identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
		x = self.identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
		x = self.identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
		x = self.identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
		x = self.identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

		x = self.conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
		x = self.identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
		x = self.identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

		x = AveragePooling2D((7, 7), name='avg_pool')(x)
		x = Flatten()(x)
		x = Dense(1000, activation='softmax', name='fc1000')(x)

		self.model = Model(img_input, x, name='resnet50')

		self.model.load_weights(get_file(self.weight_filename, self.WEIGHT_FILE_PATH + self.weight_filename, cache_subdir='models'))
Exemplo n.º 30
0
def squeezenet(num_classes):
    inp = Input(shape=(224, 224, 3))

    conv1 = Conv2D(64, (3, 3),
                   strides=(2, 2),
                   activation='relu',
                   padding='valid')(inp)
    maxpool1 = MaxPooling2D(pool_size=(3, 3), strides=2)(conv1)

    fire2 = fire(maxpool1, 16, 64)
    fire3 = fire(fire2, 16, 64)
    fire4 = fire(fire3, 32, 128)

    maxpool4 = MaxPooling2D(pool_size=(3, 3), strides=2)(fire4)
    fire5 = fire(maxpool4, 32, 128)
    fire6 = fire(fire5, 48, 192)
    fire7 = fire(fire6, 48, 192)
    fire8 = fire(fire7, 64, 256)

    maxpool8 = MaxPooling2D(pool_size=(3, 3), strides=2)(fire8)
    fire9 = fire(maxpool8, 64, 256)
    conv10 = Conv2D(num_classes, (1, 1), activation='relu',
                    padding='valid')(fire9)
    # # dr = Dropout(0.5)(conv10)

    avgpool = AveragePooling2D(pool_size=(13, 13), strides=1)(conv10)

    avgpool = Flatten()(conv10)
    avgpool = Dense(4096, activation='relu')(avgpool)
    # avgpool = Dropout(0.5)(avgpool)
    avgpool = Dense(4096, activation='relu')(avgpool)
    # avgpool = Dropout(0.5)(avgpool)
    out = Dense(num_classes, activation='sigmoid')(avgpool)

    model = Model(inputs=inp, outputs=out)
    print(model.summary())

    return model