def block_inception_b(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 384, 1, 1)

    branch_1 = conv2d_bn(input, 192, 1, 1)
    branch_1 = conv2d_bn(branch_1, 224, 1, 7)
    branch_1 = conv2d_bn(branch_1, 256, 7, 1)

    branch_2 = conv2d_bn(input, 192, 1, 1)
    branch_2 = conv2d_bn(branch_2, 192, 7, 1)
    branch_2 = conv2d_bn(branch_2, 224, 1, 7)
    branch_2 = conv2d_bn(branch_2, 224, 7, 1)
    branch_2 = conv2d_bn(branch_2, 256, 1, 7)

    branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
    branch_3 = conv2d_bn(branch_3, 128, 1, 1)

    x = concatenate([branch_0, branch_1, branch_2, branch_3],
                    axis=channel_axis)
    return x
def __transition_block(ip, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
    ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
    Args:
        ip: keras tensor
        nb_filter: number of filters
        compression: calculated as 1 - reduction. Reduces the number of feature maps
                    in the transition block.
        dropout_rate: dropout rate
        weight_decay: weight decay factor
    Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(ip)
    x = Activation('relu')(x)
    x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_uniform', padding='same', use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)
    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)

    return x
Beispiel #3
0
def inception_block_3a(X):
    X_3x3 = conv2d_bn(X,
                      layer='inception_5a_3x3',
                      cv1_out=96,
                      cv1_filter=(1, 1),
                      cv2_out=384,
                      cv2_filter=(3, 3),
                      cv2_strides=(1, 1),
                      padding=(1, 1))
    X_pool = AveragePooling2D(pool_size=(3, 3), strides=(
        3, 3), data_format='channels_first')(X)
    X_pool = conv2d_bn(X_pool,
                       layer='inception_5a_pool',
                       cv1_out=96,
                       cv1_filter=(1, 1),
                       padding=(1, 1))
    X_1x1 = conv2d_bn(X,
                      layer='inception_5a_1x1',
                      cv1_out=256,
                      cv1_filter=(1, 1))

    inception = concatenate([X_3x3, X_pool, X_1x1], axis=1)

    return inception
Beispiel #4
0
def model_cam(model_input,
			  gap_input,
			  gap_spacial_size,
			  num_classes,
			  cam_conv_layer_name):
	"""Build CAM model architecture
	
	# Arguments
		model_input: input tensor of CAM model
		gap_input: input tensor to cam gap layers
		gap_spacial_size: average pooling size
		cam_conv_layer_name: the name of new added conv layer

	"""
	x = Convolution2D(1024, 3, 3, 
					  activation='relu', 
					  border_mode='same',
					  name=cam_conv_layer_name)(gap_input)
	# Add GAP layer
	x = AveragePooling2D((gap_spacial_size, gap_spacial_size))(x)
	x = Flatten()(x)
	predictions = Dense(num_classes, activation='softmax')(x)
	model = Model(input=model_input, output=predictions)
	return model
Beispiel #5
0
def get(input_shape, num_classes, residual_unit_cls, units_per_block):
    """As described in [1]"""
    x = Input(shape=input_shape)
    conv1 = Conv2D(filters=64,
                   kernel_size=(7, 7),
                   strides=(2, 2),
                   padding='same',
                   kernel_initializer='glorot_normal')(x)
    norm1 = BatchNormalization(axis=3)(conv1)
    relu1 = Activation('relu')(norm1)
    current = MaxPooling2D(pool_size=(3, 3), strides=(2, 2),
                           padding='same')(relu1)
    filters = 64
    for i, units in enumerate(units_per_block):
        current = ResidualBlock(units,
                                filters,
                                residual_unit_cls,
                                is_first_block=(i == 0))(current)
        filters *= 2
    relu1 = Activation('relu')(current)
    avg_pool = AveragePooling2D(pool_size=(7, 7), strides=(1, 1))(relu1)
    flatten1 = Flatten()(avg_pool)
    dense = Dense(units=num_classes, activation='softmax')(flatten1)
    return Model(inputs=x, outputs=dense)
def create_model(MAX_QRY_LENGTH = 50, MAX_DOC_LENGTH = 2900, NUM_OF_FEATS = 10, PSGS_SIZE = [(50, 1)], NUM_OF_FILTERS = 5, tau = 1):
	alpha_size = len(PSGS_SIZE)
	psgMat = Input(shape = (MAX_QRY_LENGTH, MAX_DOC_LENGTH, 1,), name="passage")
	homoMat = Input(shape = (NUM_OF_FEATS, ), name="h_feats")
	# Convolution2D, Meaning pooling and Max pooling.
	# Conv2D, Mean pooling, Max pooling
	M, K, r = [], [], []
	for idx, PSG_SIZE in enumerate(PSGS_SIZE):
		tau = PSG_SIZE[0] / 2
		pool_size = (MAX_QRY_LENGTH - PSG_SIZE[0]) / tau + 1
		# Convolution
		m_1 = Convolution2D(filters=NUM_OF_FILTERS, kernel_size=PSG_SIZE, strides=tau, padding='valid', name="pConv2D_" + str(idx))(psgMat)
		M.append(m_1)
		# Mean pooling
		k_1 = AveragePooling2D(pool_size=(pool_size, 1), strides=1, name="pAvePool_" + str(idx))(M[idx])
		K.append(k_1)
		# Max Pooling
		r_1 = GlobalMaxPooling2D(name="pMaxPool_" + str(idx))(K[idx])
		r.append(r_1)
	concat_r = concatenate(r)
	# Fusion Matrix and predict relevance
	# get h(q, d)
	# MLP(DENSE(len(r(q,d))))
	phi_h = Dense(alpha_size, activation="softmax", name="TrainMat")(homoMat)
	dot_prod = dot([concat_r, phi_h], axes = 1, name="rel_dot")
	# tanh(dot(r.transpose * h))
	pred = Activation("tanh", name="activation_tanh")(dot_prod)
	
	# We now have everything we need to define our model.
	model = Model(inputs = [psgMat, homoMat], outputs = pred)
	model.summary()
	'''
	from keras.utils import plot_model
	plot_model(model, to_file='model.png')
	'''
	return model
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None):
    """ Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_filter: number of filters
            compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
            dropout_rate: dropout rate
            weight_decay: weight decay factor
    """

    eps = 1.1e-5
    conv_name_base = 'conv' + str(stage) + '_blk'
    relu_name_base = 'relu' + str(stage) + '_blk'
    pool_name_base = 'pool' + str(stage)

    x = BatchNormalization(epsilon=eps,
                           axis=concat_axis,
                           momentum=1,
                           name=conv_name_base + '_batch_normalization',
                           trainable=False)(x, training=False)
    x = Scale(axis=concat_axis,
              name=conv_name_base + '_scale',
              trainable=False)(x)
    x = Activation('relu', name=relu_name_base)(x)
    x = Conv2D(int(nb_filter * compression), (1, 1),
               name=conv_name_base,
               use_bias=False,
               trainable=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x)

    return x
Beispiel #8
0
def transition_block(ip,
                     nb_filter,
                     compression=1.0,
                     dropout_rate=None,
                     weight_decay=1E-4):
    ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D

    Args:
        ip: keras tensor
        nb_filter: number of filters
        dropout_rate: dropout rate
        weight_decay: weight decay factor

    Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool

    '''

    concat_axis = 1 if K.image_dim_ordering() == "th" else -1

    x = BatchNormalization(mode=0,
                           axis=concat_axis,
                           gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(ip)
    x = Activation('relu')(x)
    x = Convolution2D(int(nb_filter * compression),
                      1,
                      1,
                      init="he_uniform",
                      border_mode="same",
                      bias=False,
                      W_regularizer=l2(weight_decay))(x)
    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)

    return x
    def __func10__(self, input_shape):
        from keras.layers import Input
        from keras.models import Model
        from keras.layers.pooling import AveragePooling2D
        from keras.layers.core import Lambda
        from keras import backend as K
        from Executer import utils

        inception_4e = Input(shape=input_shape)
        inception_5a_pool = Lambda(lambda x: x**2,
                                   name='power2_5a')(inception_4e)
        inception_5a_pool = AveragePooling2D(pool_size=(3, 3),
                                             strides=(3, 3))(inception_5a_pool)
        inception_5a_pool = Lambda(lambda x: x * 9,
                                   name='mult9_5a')(inception_5a_pool)
        inception_5a_pool = Lambda(lambda x: K.sqrt(x),
                                   name='sqrt_5a')(inception_5a_pool)
        inception_5a_pool = utils.conv2d_bn(inception_5a_pool,
                                            layer='inception_5a_pool',
                                            cv1_out=96,
                                            cv1_filter=(1, 1),
                                            padding=(1, 1))
        model = Model(inputs=inception_4e, outputs=inception_5a_pool)
        return model
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
    ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
    Args:
        ip: keras tensor
        nb_filter: number of filters
        compression: calculated as 1 - reduction. Reduces the number of feature maps
                    in the transition block.
        dropout_rate: dropout rate
        weight_decay: weight decay factor
    Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
    myLRelu = Lambda(lambda x: K.relu(x, 0.1))
    x = myLRelu(x)
    x = Conv2D(int(nb_filter * compression), (1, 1),
               kernel_initializer='he_normal',
               padding='same',
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)
    x = squeeze_excite_block(x)
    return x
def New_model(n_class):
    model_name = 'CNN_12'

    N = 224

    model = Sequential()
    model.add(Convolution2D(32, (3, 3), input_shape=(N, N, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Convolution2D(32, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # model.add(Convolution2D(64, (3, 3)))
    # model.add(ZeroPadding2D(padding=(1,1)))
    # model.add(BatchNormalization())
    # model.add(PReLU())

    # model.add(Convolution2D(64, (3, 3)))
    # model.add(ZeroPadding2D(padding=(1,1)))
    # model.add(BatchNormalization())
    # model.add(PReLU())
    # model.add(MaxPooling2D(pool_size=(2,2)))

    model.add(Convolution2D(128, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Convolution2D(128, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(256, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Convolution2D(256, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(512, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Convolution2D(512, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(1028, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Convolution2D(1028, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(AveragePooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    # Dense = Fully connected layer
    model.add(Dense(n_class))
    model.add(Activation('softmax'))

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    return model, model_name
Beispiel #12
0
def resnet50(include_top=True,
             weights='imagenet',
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=1000):

    RESNET50_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5'  #'https://github.com/rcmalli/keras-vggface/releases/download/v2.0/rcmalli_vggface_tf_resnet50.h5'
    RESNET50_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'  #'https://github.com/rcmalli/keras-vggface/releases/download/v2.0/rcmalli_vggface_tf_notop_resnet50.h5'

    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=32,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    if K.image_data_format() == "channels_last":
        bn_axis = 3
    else:
        bn_axis = 1

    x = Conv2D(64, (7, 7),
               use_bias=False,
               strides=(2, 2),
               padding='same',
               name='conv1/7x7_s2')(img_input)
    x = BatchNormalization(axis=bn_axis, name='conv1/7x7_s2/bn')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = resnet_conv_block(x,
                          3, [64, 64, 256],
                          stage=2,
                          block=1,
                          strides=(1, 1))
    x = resnet_identity_block(x, 3, [64, 64, 256], stage=2, block=2)
    x = resnet_identity_block(x, 3, [64, 64, 256], stage=2, block=3)

    x = resnet_conv_block(x, 3, [128, 128, 512], stage=3, block=1)
    x = resnet_identity_block(x, 3, [128, 128, 512], stage=3, block=2)
    x = resnet_identity_block(x, 3, [128, 128, 512], stage=3, block=3)
    x = resnet_identity_block(x, 3, [128, 128, 512], stage=3, block=4)

    x = resnet_conv_block(x, 3, [256, 256, 1024], stage=4, block=1)
    x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=2)
    x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=3)
    x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=4)
    x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=5)
    x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=6)

    x = resnet_conv_block(x, 3, [512, 512, 2048], stage=5, block=1)
    x = resnet_identity_block(x, 3, [512, 512, 2048], stage=5, block=2)
    x = resnet_identity_block(x, 3, [512, 512, 2048], stage=5, block=3)

    x = AveragePooling2D((7, 7), name='avg_pool')(x)

    if include_top:
        x = Flatten()(x)
        x = Dense(classes, activation='softmax', name='classifier')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, x, name='vggface_resnet50')

    model.summary()
    '''
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',
                                    RESNET50_WEIGHTS_PATH,
                                    cache_subdir='./models')
        else:
            weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                     RESNET50_WEIGHTS_PATH_NO_TOP,
                                    cache_dir="./models")
        
        model.load_weights(weights_path)

        if K.backend() == "theano":
            layer_utils.convert_all_kernels_in_model(model)
            if include_top:
                maxpool = model.get_layer(name='avg_pool')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='classifier')
                layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
            
        if K.image_data_format() == "channels_first" and K.backend() == 'tensorflow':
            warnings.warn('You are using the TensorFlow backend, yet you '
                          'are using the Theano '
                          'image data format convention '
                          '(`image_data_format="channels_first"`). '
                          'For best performance, set '
                          '`image_data_format="channels_last"` in '
                          'your Keras config '
                          'at ~/.keras/keras.json.')
    elif weights is not None:
        model.load_weights(weights)
    '''

    return model
Beispiel #13
0
def create_model():
    myInput = Input(shape=(150, 150, 3))

    x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn1')(x)
    x = Activation('relu')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = MaxPooling2D(pool_size=3, strides=2)(x)
    x = Lambda(LRN2D, name='lrn_1')(x)
    x = Conv2D(64, (1, 1), name='conv2')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn2')(x)
    x = Activation('relu')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(192, (3, 3), name='conv3')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn3')(x)
    x = Activation('relu')(x)
    x = Lambda(LRN2D, name='lrn_2')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = MaxPooling2D(pool_size=3, strides=2)(x)

    # Inception3a
    inception_3a_3x3 = Conv2D(96, (1, 1), name='inception_3a_3x3_conv1')(x)
    inception_3a_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_3x3_bn1')(inception_3a_3x3)
    inception_3a_3x3 = Activation('relu')(inception_3a_3x3)
    inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
    inception_3a_3x3 = Conv2D(128, (3, 3),
                              name='inception_3a_3x3_conv2')(inception_3a_3x3)
    inception_3a_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_3x3_bn2')(inception_3a_3x3)
    inception_3a_3x3 = Activation('relu')(inception_3a_3x3)

    inception_3a_5x5 = Conv2D(16, (1, 1), name='inception_3a_5x5_conv1')(x)
    inception_3a_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_5x5_bn1')(inception_3a_5x5)
    inception_3a_5x5 = Activation('relu')(inception_3a_5x5)
    inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
    inception_3a_5x5 = Conv2D(32, (5, 5),
                              name='inception_3a_5x5_conv2')(inception_3a_5x5)
    inception_3a_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_5x5_bn2')(inception_3a_5x5)
    inception_3a_5x5 = Activation('relu')(inception_3a_5x5)

    inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
    inception_3a_pool = Conv2D(
        32, (1, 1), name='inception_3a_pool_conv')(inception_3a_pool)
    inception_3a_pool = BatchNormalization(
        axis=3, epsilon=0.00001,
        name='inception_3a_pool_bn')(inception_3a_pool)
    inception_3a_pool = Activation('relu')(inception_3a_pool)
    inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3,
                                                        4)))(inception_3a_pool)

    inception_3a_1x1 = Conv2D(64, (1, 1), name='inception_3a_1x1_conv')(x)
    inception_3a_1x1 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_1x1_bn')(inception_3a_1x1)
    inception_3a_1x1 = Activation('relu')(inception_3a_1x1)

    inception_3a = concatenate([
        inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1
    ],
                               axis=3)

    # Inception3b
    inception_3b_3x3 = Conv2D(96, (1, 1),
                              name='inception_3b_3x3_conv1')(inception_3a)
    inception_3b_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_3x3_bn1')(inception_3b_3x3)
    inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
    inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
    inception_3b_3x3 = Conv2D(128, (3, 3),
                              name='inception_3b_3x3_conv2')(inception_3b_3x3)
    inception_3b_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_3x3_bn2')(inception_3b_3x3)
    inception_3b_3x3 = Activation('relu')(inception_3b_3x3)

    inception_3b_5x5 = Conv2D(32, (1, 1),
                              name='inception_3b_5x5_conv1')(inception_3a)
    inception_3b_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_5x5_bn1')(inception_3b_5x5)
    inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
    inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
    inception_3b_5x5 = Conv2D(64, (5, 5),
                              name='inception_3b_5x5_conv2')(inception_3b_5x5)
    inception_3b_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_5x5_bn2')(inception_3b_5x5)
    inception_3b_5x5 = Activation('relu')(inception_3b_5x5)

    inception_3b_pool = AveragePooling2D(pool_size=(3, 3),
                                         strides=(3, 3))(inception_3a)
    inception_3b_pool = Conv2D(
        64, (1, 1), name='inception_3b_pool_conv')(inception_3b_pool)
    inception_3b_pool = BatchNormalization(
        axis=3, epsilon=0.00001,
        name='inception_3b_pool_bn')(inception_3b_pool)
    inception_3b_pool = Activation('relu')(inception_3b_pool)
    inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)

    inception_3b_1x1 = Conv2D(64, (1, 1),
                              name='inception_3b_1x1_conv')(inception_3a)
    inception_3b_1x1 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_1x1_bn')(inception_3b_1x1)
    inception_3b_1x1 = Activation('relu')(inception_3b_1x1)

    inception_3b = concatenate([
        inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1
    ],
                               axis=3)

    # Inception3c
    inception_3c_3x3 = utils.conv2d_bn(inception_3b,
                                       layer='inception_3c_3x3',
                                       cv1_out=128,
                                       cv1_filter=(1, 1),
                                       cv2_out=256,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(2, 2),
                                       padding=(1, 1))

    inception_3c_5x5 = utils.conv2d_bn(inception_3b,
                                       layer='inception_3c_5x5',
                                       cv1_out=32,
                                       cv1_filter=(1, 1),
                                       cv2_out=64,
                                       cv2_filter=(5, 5),
                                       cv2_strides=(2, 2),
                                       padding=(2, 2))

    inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
    inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0,
                                                        1)))(inception_3c_pool)

    inception_3c = concatenate(
        [inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3)

    #inception 4a
    inception_4a_3x3 = utils.conv2d_bn(inception_3c,
                                       layer='inception_4a_3x3',
                                       cv1_out=96,
                                       cv1_filter=(1, 1),
                                       cv2_out=192,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(1, 1),
                                       padding=(1, 1))
    inception_4a_5x5 = utils.conv2d_bn(inception_3c,
                                       layer='inception_4a_5x5',
                                       cv1_out=32,
                                       cv1_filter=(1, 1),
                                       cv2_out=64,
                                       cv2_filter=(5, 5),
                                       cv2_strides=(1, 1),
                                       padding=(2, 2))

    inception_4a_pool = AveragePooling2D(pool_size=(3, 3),
                                         strides=(3, 3))(inception_3c)
    inception_4a_pool = utils.conv2d_bn(inception_4a_pool,
                                        layer='inception_4a_pool',
                                        cv1_out=128,
                                        cv1_filter=(1, 1),
                                        padding=(2, 2))
    inception_4a_1x1 = utils.conv2d_bn(inception_3c,
                                       layer='inception_4a_1x1',
                                       cv1_out=256,
                                       cv1_filter=(1, 1))
    inception_4a = concatenate([
        inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1
    ],
                               axis=3)

    #inception4e
    inception_4e_3x3 = utils.conv2d_bn(inception_4a,
                                       layer='inception_4e_3x3',
                                       cv1_out=160,
                                       cv1_filter=(1, 1),
                                       cv2_out=256,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(2, 2),
                                       padding=(1, 1))
    inception_4e_5x5 = utils.conv2d_bn(inception_4a,
                                       layer='inception_4e_5x5',
                                       cv1_out=64,
                                       cv1_filter=(1, 1),
                                       cv2_out=128,
                                       cv2_filter=(5, 5),
                                       cv2_strides=(2, 2),
                                       padding=(2, 2))
    inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
    inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0,
                                                        1)))(inception_4e_pool)

    inception_4e = concatenate(
        [inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3)

    #inception5a
    inception_5a_3x3 = utils.conv2d_bn(inception_4e,
                                       layer='inception_5a_3x3',
                                       cv1_out=96,
                                       cv1_filter=(1, 1),
                                       cv2_out=384,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(1, 1),
                                       padding=(1, 1))

    inception_5a_pool = AveragePooling2D(pool_size=(3, 3),
                                         strides=(3, 3))(inception_4e)
    inception_5a_pool = utils.conv2d_bn(inception_5a_pool,
                                        layer='inception_5a_pool',
                                        cv1_out=96,
                                        cv1_filter=(1, 1),
                                        padding=(1, 1))
    inception_5a_1x1 = utils.conv2d_bn(inception_4e,
                                       layer='inception_5a_1x1',
                                       cv1_out=256,
                                       cv1_filter=(1, 1))

    inception_5a = concatenate(
        [inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3)

    #inception_5b
    inception_5b_3x3 = utils.conv2d_bn(inception_5a,
                                       layer='inception_5b_3x3',
                                       cv1_out=96,
                                       cv1_filter=(1, 1),
                                       cv2_out=384,
                                       cv2_filter=(3, 3),
                                       cv2_strides=(1, 1),
                                       padding=(1, 1))
    inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)
    inception_5b_pool = utils.conv2d_bn(inception_5b_pool,
                                        layer='inception_5b_pool',
                                        cv1_out=96,
                                        cv1_filter=(1, 1))
    inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)

    inception_5b_1x1 = utils.conv2d_bn(inception_5a,
                                       layer='inception_5b_1x1',
                                       cv1_out=256,
                                       cv1_filter=(1, 1))
    inception_5b = concatenate(
        [inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3)

    av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
    reshape_layer = Flatten()(av_pool)
    dense_layer = Dense(128, name='dense_layer')(reshape_layer)
    norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1),
                        name='norm_layer')(dense_layer)

    return Model(inputs=[myInput], outputs=norm_layer)
def faceRecoModel(input_shape):
    """
    Implementation of the Inception model used for FaceNet
    
    Arguments:
    input_shape -- shape of the images of the dataset
    Returns:
    model -- a Model() instance in Keras
    """

    # Define the input as a tensor with shape input_shape
    X_input = Input(input_shape)

    # Zero-Padding
    X = ZeroPadding2D((3, 3))(X_input)

    # First Block
    X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(X)
    X = BatchNormalization(axis=1, name='bn1')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D((3, 3), strides=2)(X)

    # Second Block
    X = Conv2D(64, (1, 1), strides=(1, 1), name='conv2')(X)
    X = BatchNormalization(axis=1, epsilon=0.00001, name='bn2')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)

    # Second Block
    X = Conv2D(192, (3, 3), strides=(1, 1), name='conv3')(X)
    X = BatchNormalization(axis=1, epsilon=0.00001, name='bn3')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D(pool_size=3, strides=2)(X)

    # Inception 1: a/b/c
    X = inception_block_1a(X)
    X = inception_block_1b(X)
    X = inception_block_1c(X)

    # Inception 2: a/b
    X = inception_block_2a(X)
    X = inception_block_2b(X)

    # Inception 3: a/b
    X = inception_block_3a(X)
    X = inception_block_3b(X)

    # Top layer
    X = AveragePooling2D(pool_size=(3, 3),
                         strides=(1, 1),
                         data_format='channels_first')(X)
    X = Flatten()(X)
    X = Dense(128, name='dense_layer')(X)

    # L2 normalization
    X = Lambda(lambda x: K.l2_normalize(x, axis=1))(X)

    # Create model instance
    model = Model(inputs=X_input, outputs=X, name='FaceRecoModel')

    return model
    def _build_model(self):
        self._create_submodels()

        # AUTOENCODER #########################################################################################
        outer_input_shape = list(self.input_shape)
        outer_input_shape[0] *= 2
        outer_input_shape[1] *= 2
        ae_input = Input(shape=outer_input_shape,
                         dtype="float32",
                         name=self.name_prefix + "Autoencoder_Input")

        if self.config.tiles_use_global:
            GT_fields = Lambda(lambda x: x[:, ..., 3:],
                               name="Slice_gt_input_{}".format(0))(ae_input)
            GT_fields = AveragePooling2D(pool_size=(2, 2))(GT_fields)

        # 1. slice
        cur_input = Lambda(
            lambda x: x[:, 0:self.config.res_y, 0:self.config.res_x, :3],
            name="Slice_ae_input_{}".format(0))(ae_input)
        if self.config.tiles_use_global:
            cur_input = concatenate([cur_input, GT_fields], axis=3)
        z0 = self._encoder(cur_input)
        output_0 = self._decoder(z0)
        # 2. slice
        cur_input = Lambda(
            lambda x: x[:, 0:self.config.res_y, self.config.res_x:, :3],
            name="Slice_ae_input_{}".format(1))(ae_input)
        if self.config.tiles_use_global:
            cur_input = concatenate([cur_input, GT_fields], axis=3)
        z = self._encoder(cur_input)
        output_1 = self._decoder(z)
        # 3. slice
        cur_input = Lambda(
            lambda x: x[:, self.config.res_y:, 0:self.config.res_x, :3],
            name="Slice_ae_input_{}".format(2))(ae_input)
        if self.config.tiles_use_global:
            cur_input = concatenate([cur_input, GT_fields], axis=3)
        z = self._encoder(cur_input)
        output_2 = self._decoder(z)
        # 4. slice
        cur_input = Lambda(
            lambda x: x[:, self.config.res_y:, self.config.res_x:, :3],
            name="Slice_ae_input_{}".format(3))(ae_input)
        if self.config.tiles_use_global:
            cur_input = concatenate([cur_input, GT_fields], axis=3)
        z = self._encoder(cur_input)
        output_3 = self._decoder(z)

        final_out_0 = concatenate([output_0, output_1], axis=2)
        final_out_1 = concatenate([output_2, output_3], axis=2)
        final_out = concatenate([final_out_0, final_out_1], axis=1)

        # print("final_out: {}".format(final_out.shape))
        # if self.use_c:
        #     final_out_c = Lambda(curl, arguments={'data_format': 'NHWC'})(final_out)
        #     if self.use_c and ("density" in self.config.data_type or "levelset" in self.config.data_type):
        #         # cut of density part of "out" tensor and concatenate with the "velo_out" tensor
        #         final_out = Concatenate(axis=-1)([final_out_c, Lambda(K.expand_dims, arguments={'axis': -1})(Lambda(lambda x: x[...,-1])(final_out))])
        # print("use_c final_out: {}".format(final_out.shape))

        p_pred = self._p_pred(z0)
        print("p_pred: {}".format(p_pred.shape))

        output_list = [final_out, p_pred]

        if self.config.tile_multitile_border > 0:
            # Tile Loss
            border_region_start = (outer_input_shape[0] //
                                   2) - 1 - self.config.tile_multitile_border
            border_region_end = (outer_input_shape[0] //
                                 2) + self.config.tile_multitile_border
            tile_output0 = Lambda(
                lambda x: x[:, border_region_start:border_region_end, :, :],
                name="TileBorderLoss_{}".format(0))(final_out)
            border_region_start = (outer_input_shape[1] //
                                   2) - 1 - self.config.tile_multitile_border
            border_region_end = (outer_input_shape[1] //
                                 2) + self.config.tile_multitile_border
            tile_output1 = Lambda(
                lambda x: x[:, :, border_region_start:border_region_end, :],
                name="TileBorderLoss_{}".format(1))(final_out)
            output_list.append(tile_output0)
            output_list.append(tile_output1)

        if len(self.gpus) > 1:
            with tf.device('/cpu:0'):
                self.model = Model(name=self.name_prefix + "Autoencoder",
                                   inputs=ae_input,
                                   outputs=output_list)
        else:
            self.model = Model(name=self.name_prefix + "Autoencoder",
                               inputs=ae_input,
                               outputs=output_list)
Beispiel #16
0
inception_3b_3x3 = Activation('relu')(inception_3b_3x3)

inception_3b_5x5 = Conv2D(32, (1, 1),
                          name='inception_3b_5x5_conv1')(inception_3a)
inception_3b_5x5 = BatchNormalization(
    axis=3, epsilon=0.00001, name='inception_3b_5x5_bn1')(inception_3b_5x5)
inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
inception_3b_5x5 = Conv2D(64, (5, 5),
                          name='inception_3b_5x5_conv2')(inception_3b_5x5)
inception_3b_5x5 = BatchNormalization(
    axis=3, epsilon=0.00001, name='inception_3b_5x5_bn2')(inception_3b_5x5)
inception_3b_5x5 = Activation('relu')(inception_3b_5x5)

inception_3b_pool = Lambda(lambda x: x**2, name='power2_3b')(inception_3a)
inception_3b_pool = AveragePooling2D(pool_size=(3, 3),
                                     strides=(3, 3))(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: x * 9, name='mult9_3b')(inception_3b_pool)
inception_3b_pool = Lambda(lambda x: K.sqrt(x),
                           name='sqrt_3b')(inception_3b_pool)
inception_3b_pool = Conv2D(64, (1, 1),
                           name='inception_3b_pool_conv')(inception_3b_pool)
inception_3b_pool = BatchNormalization(
    axis=3, epsilon=0.00001, name='inception_3b_pool_bn')(inception_3b_pool)
inception_3b_pool = Activation('relu')(inception_3b_pool)
inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)

inception_3b_1x1 = Conv2D(64, (1, 1),
                          name='inception_3b_1x1_conv')(inception_3a)
inception_3b_1x1 = BatchNormalization(
    axis=3, epsilon=0.00001, name='inception_3b_1x1_bn')(inception_3b_1x1)
inception_3b_1x1 = Activation('relu')(inception_3b_1x1)
Beispiel #17
0
    def build(input_shape,
              num_outputs,
              block_fn,
              repetitions,
              logits_and_block_endpoints=False,
              original_resnet=False,
              input_tensor=None):
        if input_shape is None and input_tensor is None:
            raise Exception(
                "Input shape should be a tuple (nb_channels, nb_rows, nb_cols)"
            )

        assert K.image_dim_ordering() == 'tf'

        if input_tensor is None:
            input = Input(shape=input_shape, name="input_image")
        else:
            input = input_tensor

        resnet_blocks_endpoints = []

        if original_resnet:
            conv1 = _conv_bn_relu(filters=64,
                                  kernel_size=(7, 7),
                                  strides=(2, 2),
                                  name="resnet_initial_conv1")(input)
            conv1 = MaxPooling2D(pool_size=(3, 3),
                                 strides=(2, 2),
                                 padding="same",
                                 name="resnet_initial_pool1")(conv1)
        else:
            conv1 = _conv_bn_relu(filters=64,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  name="resnet_initial_conv1")(input)
            conv1 = _conv_bn_relu(filters=64,
                                  kernel_size=(3, 3),
                                  strides=(1, 1),
                                  name="resnet_initial_conv2")(conv1)
            if logits_and_block_endpoints:
                resnet_blocks_endpoints.append(conv1)
            conv1 = MaxPooling2D(pool_size=(2, 2),
                                 padding="same",
                                 name="resnet_initial_pool1")(conv1)

        block = conv1
        filters = 64

        for i, r in enumerate(repetitions):
            block = _residual_block(block_fn,
                                    filters=filters,
                                    repetitions=r,
                                    is_first_layer=(i == 0))(block)
            filters *= 2

            if logits_and_block_endpoints:
                """Save each residual block."""
                resnet_blocks_endpoints.append(block)

        if logits_and_block_endpoints:
            return resnet_blocks_endpoints, input

        block = Graph_utils._bn_relu()(block)

        block_shape = K.int_shape(block)
        pool2 = AveragePooling2D(pool_size=(block_shape[1], block_shape[2]),
                                 strides=(1, 1))(block)
        flatten1 = Flatten()(pool2)
        dense = Dense(units=num_outputs,
                      kernel_initializer="he_normal",
                      activation="softmax")(flatten1)

        model = Model(inputs=input, outputs=dense)
        return model
def max_avg_pool2d(x):
    max_x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
    mean_x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(x)
    return Add()([max_x, mean_x])  # concatenate on channel
Beispiel #19
0
def build_model():
    '''
    #先定義好框架
    #第一步從input吃起
    '''

    input_img = Input(shape=(48, 48, 1))
    '''
    先來看一下keras document 的Conv2D
    keras.layers.Conv2D(filters, kernel_size, strides=(1,1), padding='valid', 
                        data_format=None, dilation_rate=(1,1), activation=None, 
                        ues_bias=True, kernel_initializer='glorot_uniform', 
                        bias_initializer='zeros', kernel_regularizer=None, 
                        bias_regularizer=None, activity_regularizer=None, 
                        kernel_constraint=None, bias_constraint=None)
    '''

    # 64個(5*5)filters
    # no padding不會在原有輸入的基礎上添加新的像素
    # 將input放在最後面括號內,也是一種置入"self"參數的方式
    block1 = Conv2D(64, (5, 5), padding='valid', activation='relu')(input_img)
    # 在邊界加上為0的padding,重新變回48*48
    # 表示輸入中維度的順序,channels_last對應輸入尺寸為(batch, height, width, channels)
    block1 = ZeroPadding2D(padding=(2, 2), data_format='channels_last')(block1)
    # Maxpooling也可以設stride
    block1 = MaxPooling2D(pool_size=(5, 5), strides=(2, 2))(block1)

    block2 = Conv2D(64, (3, 3), activation='relu')(block1)
    block2 = ZeroPadding2D(padding=(1, 1), data_format='channels_last')(block2)

    block3 = Conv2D(64, (3, 3), activation='relu')(block2)
    block3 = AveragePooling2D(pool_size=(3, 3), strides=(2, 2))(block3)
    block3 = ZeroPadding2D(padding=(1, 1), data_format='channels_last')(block3)

    block4 = Conv2D(128, (3, 3), activation='relu')(block3)
    block4 = ZeroPadding2D(padding=(1, 1), data_format='channels_last')(block4)

    block5 = Conv2D(128, (3, 3), activation='relu')(block4)
    block5 = ZeroPadding2D(padding=(1, 1), data_format='channels_last')(block5)
    block5 = AveragePooling2D(pool_size=(3, 3), strides=(2, 2))(block5)
    block5 = Flatten()(block5)

    fc1 = Dense(1024, activation=('relu'))(block5)
    fc1 = Dropout(0.5)(fc1)

    fc2 = Dense(1024, activation='relu')(fc1)
    fc2 = Dropout(0.5)(fc2)

    predict = Dense(7)(fc2)
    predict = Activation('softmax')(predict)
    model = Model(inputs=input_img, outputs=predict)

    # opt = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    # opt = Adam(lr=1e-3)
    opt = Adadelta(lr=0.1, rho=0.95, epsilon=1e-08)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])
    # 輸出整個模型的摘要資訊,包含簡單的結構表與參數統計
    model.summary()
    return model
Beispiel #20
0
    conv6_resized = K.reshape(conv6_resized,
                              (-1, num_input_channels, 256 * 256))
    classmap = K.dot(WT, conv6_resized).reshape((-1, nb_classes, 256, 256))
    get_cmap = K.function([inc], classmap)
    return get_cmap([X])


if __name__ == '__main__':
    model = VGGCAM(20)
    model.summary()
    model = load_model('VOC-CLS.model')
    x = model.input
    y = model.layers[-8].output
    # Add another conv layer with ReLU + GAP
    y = Convolution2D(1024, 3, 3, activation='relu', border_mode="same")(y)
    y = AveragePooling2D((16, 16))(y)
    y = Flatten()(y)
    # Add the W layer
    y = Dense(20, activation='softmax')(y)
    voc_cam = Model(x, y)
    voc_cam.summary()
    voc_cam.compile(loss='categorical_crossentropy',
                    optimizer='sgd',
                    metrics=['accuracy'])

    train_generator = loadData(image_path="/home/andrew/VOC-CLS/")
    weight_path = "voc_cam_"
    # checkpointer = ModelCheckpoint(filepath=weight_path + 'weights.{epoch:02d}-{loss:.4f}.hdf5',
    #                                monitor='loss', verbose=1, save_best_only=True, period=2)
    #
    # voc_cam.fit_generator(generator=train_generator, nb_epoch=100, samples_per_epoch=14000,
    block1 = build_resnet_block(hb1, nb_reps = 3, nb_conv = nb_conv,
                                nb_filters = nb_filters,
                                hbp = hbp, wd = wd, data_format = data_format)
    hb2 = HB_2d_conv(hbp, shift = -2)(block1)
    block2 = build_resnet_block(hb2, nb_reps = 3, nb_conv = nb_conv,
                                nb_filters = 2 * nb_filters,
                                hbp = hbp, wd = wd, stride = 2,
                                data_format = data_format)
    hb3 = HB_2d_conv(hbp, shift = -3)(block2)
    block3 = build_resnet_block(hb3, nb_reps = 3,
                                nb_conv = nb_conv, nb_filters = 4 * nb_filters,
                                hbp = hbp, wd = wd, stride = 2,
                                data_format = data_format)
    bn_pre_pool = BatchNormalization(axis = 1)(block3)
    rectifier_pre_pool = Lambda(rectifier)(bn_pre_pool)
    pool = AveragePooling2D(pool_size=(8, 8), data_format = data_format)(rectifier_pre_pool)
    flat = Flatten()(pool)
    dense = Dense(nb_classes, activation = 'softmax',
                   kernel_regularizer = regularizers.l2(wd),
                  use_bias = False)(flat)
    model = Model(inputs = inp, outputs = dense)

    datagen = ImageDataGenerator(width_shift_range = 0.15,
                                 height_shift_range = 0.15,
                                 zoom_range = 0.0,
                                 rotation_range = 0,
                                 horizontal_flip = True,
                                 fill_mode='reflect',
                                 data_format = data_format)

    sgd = SGD(lr = 0.1, momentum = 0.9, decay = 0.00, nesterov = True)
Beispiel #22
0
def loadModel(url = 'https://drive.google.com/uc?id=1LSe1YCV1x-BfNnfb7DFZTNpv_Q9jITxn'):
	myInput = Input(shape=(96, 96, 3))

	x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
	x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
	x = BatchNormalization(axis=3, epsilon=0.00001, name='bn1')(x)
	x = Activation('relu')(x)
	x = ZeroPadding2D(padding=(1, 1))(x)
	x = MaxPooling2D(pool_size=3, strides=2)(x)
	x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_1')(x)
	x = Conv2D(64, (1, 1), name='conv2')(x)
	x = BatchNormalization(axis=3, epsilon=0.00001, name='bn2')(x)
	x = Activation('relu')(x)
	x = ZeroPadding2D(padding=(1, 1))(x)
	x = Conv2D(192, (3, 3), name='conv3')(x)
	x = BatchNormalization(axis=3, epsilon=0.00001, name='bn3')(x)
	x = Activation('relu')(x)
	x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_2')(x) #x is equal added
	x = ZeroPadding2D(padding=(1, 1))(x)
	x = MaxPooling2D(pool_size=3, strides=2)(x)

	# Inception3a
	inception_3a_3x3 = Conv2D(96, (1, 1), name='inception_3a_3x3_conv1')(x)
	inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_3x3_bn1')(inception_3a_3x3)
	inception_3a_3x3 = Activation('relu')(inception_3a_3x3)
	inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
	inception_3a_3x3 = Conv2D(128, (3, 3), name='inception_3a_3x3_conv2')(inception_3a_3x3)
	inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_3x3_bn2')(inception_3a_3x3)
	inception_3a_3x3 = Activation('relu')(inception_3a_3x3)

	inception_3a_5x5 = Conv2D(16, (1, 1), name='inception_3a_5x5_conv1')(x)
	inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_5x5_bn1')(inception_3a_5x5)
	inception_3a_5x5 = Activation('relu')(inception_3a_5x5)
	inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
	inception_3a_5x5 = Conv2D(32, (5, 5), name='inception_3a_5x5_conv2')(inception_3a_5x5)
	inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_5x5_bn2')(inception_3a_5x5)
	inception_3a_5x5 = Activation('relu')(inception_3a_5x5)

	inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
	inception_3a_pool = Conv2D(32, (1, 1), name='inception_3a_pool_conv')(inception_3a_pool)
	inception_3a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_pool_bn')(inception_3a_pool)
	inception_3a_pool = Activation('relu')(inception_3a_pool)
	inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3, 4)))(inception_3a_pool)

	inception_3a_1x1 = Conv2D(64, (1, 1), name='inception_3a_1x1_conv')(x)
	inception_3a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3a_1x1_bn')(inception_3a_1x1)
	inception_3a_1x1 = Activation('relu')(inception_3a_1x1)

	inception_3a = concatenate([inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1], axis=3)

	# Inception3b
	inception_3b_3x3 = Conv2D(96, (1, 1), name='inception_3b_3x3_conv1')(inception_3a)
	inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_3x3_bn1')(inception_3b_3x3)
	inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
	inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
	inception_3b_3x3 = Conv2D(128, (3, 3), name='inception_3b_3x3_conv2')(inception_3b_3x3)
	inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_3x3_bn2')(inception_3b_3x3)
	inception_3b_3x3 = Activation('relu')(inception_3b_3x3)

	inception_3b_5x5 = Conv2D(32, (1, 1), name='inception_3b_5x5_conv1')(inception_3a)
	inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_5x5_bn1')(inception_3b_5x5)
	inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
	inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
	inception_3b_5x5 = Conv2D(64, (5, 5), name='inception_3b_5x5_conv2')(inception_3b_5x5)
	inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_5x5_bn2')(inception_3b_5x5)
	inception_3b_5x5 = Activation('relu')(inception_3b_5x5)

	inception_3b_pool = Lambda(lambda x: x**2, name='power2_3b')(inception_3a)
	inception_3b_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_3b_pool)
	inception_3b_pool = Lambda(lambda x: x*9, name='mult9_3b')(inception_3b_pool)
	inception_3b_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_3b')(inception_3b_pool)
	inception_3b_pool = Conv2D(64, (1, 1), name='inception_3b_pool_conv')(inception_3b_pool)
	inception_3b_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_pool_bn')(inception_3b_pool)
	inception_3b_pool = Activation('relu')(inception_3b_pool)
	inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)

	inception_3b_1x1 = Conv2D(64, (1, 1), name='inception_3b_1x1_conv')(inception_3a)
	inception_3b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3b_1x1_bn')(inception_3b_1x1)
	inception_3b_1x1 = Activation('relu')(inception_3b_1x1)

	inception_3b = concatenate([inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1], axis=3)

	# Inception3c
	inception_3c_3x3 = Conv2D(128, (1, 1), strides=(1, 1), name='inception_3c_3x3_conv1')(inception_3b)
	inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_3x3_bn1')(inception_3c_3x3)
	inception_3c_3x3 = Activation('relu')(inception_3c_3x3)
	inception_3c_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3c_3x3)
	inception_3c_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name='inception_3c_3x3_conv'+'2')(inception_3c_3x3)
	inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_3x3_bn'+'2')(inception_3c_3x3)
	inception_3c_3x3 = Activation('relu')(inception_3c_3x3)

	inception_3c_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name='inception_3c_5x5_conv1')(inception_3b)
	inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_5x5_bn1')(inception_3c_5x5)
	inception_3c_5x5 = Activation('relu')(inception_3c_5x5)
	inception_3c_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3c_5x5)
	inception_3c_5x5 = Conv2D(64, (5, 5), strides=(2, 2), name='inception_3c_5x5_conv'+'2')(inception_3c_5x5)
	inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_3c_5x5_bn'+'2')(inception_3c_5x5)
	inception_3c_5x5 = Activation('relu')(inception_3c_5x5)

	inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
	inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_3c_pool)

	inception_3c = concatenate([inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3)

	#inception 4a
	inception_4a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name='inception_4a_3x3_conv'+'1')(inception_3c)
	inception_4a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_3x3_bn'+'1')(inception_4a_3x3)
	inception_4a_3x3 = Activation('relu')(inception_4a_3x3)
	inception_4a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4a_3x3)
	inception_4a_3x3 = Conv2D(192, (3, 3), strides=(1, 1), name='inception_4a_3x3_conv'+'2')(inception_4a_3x3)
	inception_4a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_3x3_bn'+'2')(inception_4a_3x3)
	inception_4a_3x3 = Activation('relu')(inception_4a_3x3)

	inception_4a_5x5 = Conv2D(32, (1,1), strides=(1,1), name='inception_4a_5x5_conv1')(inception_3c)
	inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_5x5_bn1')(inception_4a_5x5)
	inception_4a_5x5 = Activation('relu')(inception_4a_5x5)
	inception_4a_5x5 = ZeroPadding2D(padding=(2,2))(inception_4a_5x5)
	inception_4a_5x5 = Conv2D(64, (5,5), strides=(1,1), name='inception_4a_5x5_conv'+'2')(inception_4a_5x5)
	inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_5x5_bn'+'2')(inception_4a_5x5)
	inception_4a_5x5 = Activation('relu')(inception_4a_5x5)

	inception_4a_pool = Lambda(lambda x: x**2, name='power2_4a')(inception_3c)
	inception_4a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_4a_pool)
	inception_4a_pool = Lambda(lambda x: x*9, name='mult9_4a')(inception_4a_pool)
	inception_4a_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_4a')(inception_4a_pool)

	inception_4a_pool = Conv2D(128, (1,1), strides=(1,1), name='inception_4a_pool_conv'+'')(inception_4a_pool)
	inception_4a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_pool_bn'+'')(inception_4a_pool)
	inception_4a_pool = Activation('relu')(inception_4a_pool)
	inception_4a_pool = ZeroPadding2D(padding=(2, 2))(inception_4a_pool)

	inception_4a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name='inception_4a_1x1_conv'+'')(inception_3c)
	inception_4a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4a_1x1_bn'+'')(inception_4a_1x1)
	inception_4a_1x1 = Activation('relu')(inception_4a_1x1)

	inception_4a = concatenate([inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1], axis=3)

	#inception4e
	inception_4e_3x3 = Conv2D(160, (1,1), strides=(1,1), name='inception_4e_3x3_conv'+'1')(inception_4a)
	inception_4e_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_3x3_bn'+'1')(inception_4e_3x3)
	inception_4e_3x3 = Activation('relu')(inception_4e_3x3)
	inception_4e_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4e_3x3)
	inception_4e_3x3 = Conv2D(256, (3,3), strides=(2,2), name='inception_4e_3x3_conv'+'2')(inception_4e_3x3)
	inception_4e_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_3x3_bn'+'2')(inception_4e_3x3)
	inception_4e_3x3 = Activation('relu')(inception_4e_3x3)

	inception_4e_5x5 = Conv2D(64, (1,1), strides=(1,1), name='inception_4e_5x5_conv'+'1')(inception_4a)
	inception_4e_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_5x5_bn'+'1')(inception_4e_5x5)
	inception_4e_5x5 = Activation('relu')(inception_4e_5x5)
	inception_4e_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4e_5x5)
	inception_4e_5x5 = Conv2D(128, (5,5), strides=(2,2), name='inception_4e_5x5_conv'+'2')(inception_4e_5x5)
	inception_4e_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_4e_5x5_bn'+'2')(inception_4e_5x5)
	inception_4e_5x5 = Activation('relu')(inception_4e_5x5)

	inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
	inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_4e_pool)

	inception_4e = concatenate([inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3)

	#inception5a
	inception_5a_3x3 = Conv2D(96, (1,1), strides=(1,1), name='inception_5a_3x3_conv'+'1')(inception_4e)
	inception_5a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_3x3_bn'+'1')(inception_5a_3x3)
	inception_5a_3x3 = Activation('relu')(inception_5a_3x3)
	inception_5a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5a_3x3)
	inception_5a_3x3 = Conv2D(384, (3,3), strides=(1,1), name='inception_5a_3x3_conv'+'2')(inception_5a_3x3)
	inception_5a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_3x3_bn'+'2')(inception_5a_3x3)
	inception_5a_3x3 = Activation('relu')(inception_5a_3x3)

	inception_5a_pool = Lambda(lambda x: x**2, name='power2_5a')(inception_4e)
	inception_5a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_5a_pool)
	inception_5a_pool = Lambda(lambda x: x*9, name='mult9_5a')(inception_5a_pool)
	inception_5a_pool = Lambda(lambda x: K.sqrt(x), name='sqrt_5a')(inception_5a_pool)

	inception_5a_pool = Conv2D(96, (1,1), strides=(1,1), name='inception_5a_pool_conv'+'')(inception_5a_pool)
	inception_5a_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_pool_bn'+'')(inception_5a_pool)
	inception_5a_pool = Activation('relu')(inception_5a_pool)
	inception_5a_pool = ZeroPadding2D(padding=(1,1))(inception_5a_pool)

	inception_5a_1x1 = Conv2D(256, (1,1), strides=(1,1), name='inception_5a_1x1_conv'+'')(inception_4e)
	inception_5a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5a_1x1_bn'+'')(inception_5a_1x1)
	inception_5a_1x1 = Activation('relu')(inception_5a_1x1)

	inception_5a = concatenate([inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3)

	#inception_5b
	inception_5b_3x3 = Conv2D(96, (1,1), strides=(1,1), name='inception_5b_3x3_conv'+'1')(inception_5a)
	inception_5b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_3x3_bn'+'1')(inception_5b_3x3)
	inception_5b_3x3 = Activation('relu')(inception_5b_3x3)
	inception_5b_3x3 = ZeroPadding2D(padding=(1,1))(inception_5b_3x3)
	inception_5b_3x3 = Conv2D(384, (3,3), strides=(1,1), name='inception_5b_3x3_conv'+'2')(inception_5b_3x3)
	inception_5b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_3x3_bn'+'2')(inception_5b_3x3)
	inception_5b_3x3 = Activation('relu')(inception_5b_3x3)

	inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)

	inception_5b_pool = Conv2D(96, (1,1), strides=(1,1), name='inception_5b_pool_conv'+'')(inception_5b_pool)
	inception_5b_pool = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_pool_bn'+'')(inception_5b_pool)
	inception_5b_pool = Activation('relu')(inception_5b_pool)

	inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)

	inception_5b_1x1 = Conv2D(256, (1,1), strides=(1,1), name='inception_5b_1x1_conv'+'')(inception_5a)
	inception_5b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name='inception_5b_1x1_bn'+'')(inception_5b_1x1)
	inception_5b_1x1 = Activation('relu')(inception_5b_1x1)

	inception_5b = concatenate([inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3)

	av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
	reshape_layer = Flatten()(av_pool)
	dense_layer = Dense(128, name='dense_layer')(reshape_layer)
	norm_layer = Lambda(lambda  x: K.l2_normalize(x, axis=1), name='norm_layer')(dense_layer)

	# Final Model
	model = Model(inputs=[myInput], outputs=norm_layer)
	
	#-----------------------------------
	
	home = str(Path.home())
	
	if os.path.isfile(home+'/.deepface/weights/openface_weights.h5') != True:
		print("openface_weights.h5 will be downloaded...")
		
		output = home+'/.deepface/weights/openface_weights.h5'
		gdown.download(url, output, quiet=False)
	
	#-----------------------------------
	
	model.load_weights(home+'/.deepface/weights/openface_weights.h5')
	
	#-----------------------------------
	
	return model
Beispiel #23
0
                              horizontal_flip=True,
                              fill_mode="nearest")

valAug = ImageDataGenerator()

mean = np.array([123.68, 116.779, 103.939], dtype="float32")
trainAug.mean = mean
valAug.mean = mean

#Carga la red ResNet-50
baseModel = ResNet50(weights="imagenet",
                     include_top=False,
                     input_tensor=Input(shape=(224, 224, 3)))

headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(512, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(len(lb.classes_), activation="softmax")(headModel)

model = Model(inputs=baseModel.input, outputs=headModel)

for layer in baseModel.layers:
    layer.trainable = False

#Compila el modelo
print("[INFO] Compilando el modelo...")
opt = SGD(lr=1e-4, momentum=0.9, decay=1e-4 / args["epochs"])
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
def inception_v4_model(img_rows,
                       img_cols,
                       color_type=1,
                       num_classes=None,
                       dropout_keep_prob=0.2):
    '''
    Inception V4 Model for Keras
    Model Schema is based on
    https://github.com/kentsommer/keras-inceptionV4
    ImageNet Pretrained Weights 
    Theano: https://github.com/kentsommer/keras-inceptionV4/releases/download/2.0/inception-v4_weights_th_dim_ordering_th_kernels.h5
    TensorFlow: https://github.com/kentsommer/keras-inceptionV4/releases/download/2.0/inception-v4_weights_tf_dim_ordering_tf_kernels.h5
    Parameters:
      img_rows, img_cols - resolution of inputs
      channel - 1 for grayscale, 3 for color 
      num_classes - number of class labels for our classification task
    '''

    # Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)
    if K.image_dim_ordering() == 'th':
        inputs = Input((3, 299, 299))
    else:
        inputs = Input((299, 299, 3))

    # Make inception base
    net = inception_v4_base(inputs)

    # Final pooling and prediction

    # 8 x 8 x 1536
    net_old = AveragePooling2D((8, 8), border_mode='valid')(net)

    # 1 x 1 x 1536
    net_old = Dropout(dropout_keep_prob)(net_old)
    net_old = Flatten()(net_old)

    # 1536
    predictions = Dense(output_dim=1001, activation='softmax')(net_old)

    model = Model(inputs, predictions, name='inception_v4')
    weights_path = inception4_weights
    #    if K.image_dim_ordering() == 'th':
    #      # Use pre-trained weights for Theano backend
    #      weights_path = 'imagenet_models/inception-v4_weights_th_dim_ordering_th_kernels.h5'
    #    else:
    #      # Use pre-trained weights for Tensorflow backend
    #      weights_path = 'imagenet_models/inception-v4_weights_tf_dim_ordering_tf_kernels.h5'

    model.load_weights(weights_path, by_name=True)

    # Truncate and replace softmax layer for transfer learning
    # Cannot use model.layers.pop() since model is not of Sequential() type
    # The method below works since pre-trained weights are stored in layers but not in the model
    net_ft = AveragePooling2D((8, 8), border_mode='valid')(net)
    net_ft = Dropout(dropout_keep_prob)(net_ft)
    net_ft = Flatten()(net_ft)
    predictions_ft = Dense(output_dim=num_classes,
                           activation='softmax')(net_ft)

    model = Model(inputs, predictions_ft, name='inception_v4')

    # Learning rate is changed to 0.001
    #    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    #    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])

    return model
Beispiel #25
0
def Baseline(nb_classes,
             img_dim,
             depth,
             nb_dense_block,
             growth_rate,
             nb_filter,
             dropout_rate=None,
             weight_decay=1E-4,
             compression=0.5):
    if K.image_dim_ordering() == "th":
        concat_axis = 1
    elif K.image_dim_ordering() == "tf":
        concat_axis = -1
    model_input = Input(shape=(224, 224, 3))
    #init block
    print("init block...")
    #==================================================================================
    # x = ConvBNAct(model_input,32,3,3)
    # x = ConvBNAct(x,64,3,3)
    # if dropout_rate:
    #     x = Dropout(dropout_rate)(x)
    # x_1 = AveragePooling2D((2, 2), strides=(2,2))(x)
    #==================================================================================
    x = Convolution2D(32,
                      5,
                      5,
                      init="he_uniform",
                      border_mode="same",
                      bias=False,
                      W_regularizer=l2(weight_decay),
                      subsample=(2, 2))(model_input)
    x = BatchNormalization(mode=0,
                           axis=-1,
                           gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x)
    x_1 = Activation('relu')(x)
    #==================================================================================

    #main flow
    print("main flow...")
    #==================================================================================
    #nb_groups = 32
    #slices = [4,8,16]
    nb_block = 4
    c_init = 8
    c_factor = [1, 1, 2, 4]
    cs = [64, 128, 256, 512]

    #stage 1
    #---------------------------------------------------------------
    for g in range(nb_block):
        print("nb block%d" % g)
        merge_adapting_unit = []
        merge_path = []
        merge_block = []
        c = c_init * c_factor[g]
        # merge_block.append(x)
        for i in range(int(cs[g] / c)):
            print("nb block %d nb group %d" % (g, i))
            # res_1 = x
            x = Convolution2D(c,
                              1,
                              1,
                              init="he_uniform",
                              border_mode="same",
                              bias=False,
                              W_regularizer=l2(weight_decay))(x_1)
            merge_block.append(x)
            x = BatchNormalization(mode=0,
                                   axis=-1,
                                   gamma_regularizer=l2(weight_decay),
                                   beta_regularizer=l2(weight_decay))(x)
            x = Activation('relu')(x)
            # x = ConvBNAct(x_1,slices[g],1,1)

            #merge_unit = []
            #merge_unit.append(x)

            x = BottleNeck(x, c, 0.5)
            #x = BottleNeck(x,c,0.5)

            #merge_unit.append(x)
            #x = merge(merge_unit, mode='sum')
            #merge_unit = []
            #merge_unit.append(x)

            x = BottleNeck(x, c, 0.5)
            x = BottleNeck(x, c, 0.5)

            #merge_unit.append(x)
            #x = merge(merge_unit, mode='sum')
            merge_path.append(x)
        x = merge(merge_path, mode='concat', concat_axis=concat_axis)
        block_input = merge(merge_block,
                            mode='concat',
                            concat_axis=concat_axis)
        x_1 = merge([x, block_input], mode='sum')

        if g != nb_block - 1:
            merge_adapting_unit.append(x_1)
            x = BottleNeck(x_1, int(cs[g]), 0.5)
            x = BottleNeck(x, int(cs[g]), 0.5)
            merge_adapting_unit.append(x)
            x = merge(merge_adapting_unit, mode='sum')
            # x = BottleNeck(x,slices[g]*nb_groups*2,0.5)
            x_1 = AveragePooling2D((2, 2), strides=(2, 2))(x)

    #end flow
    print("end flow...")
    #==================================================================================
    merge_end = []
    merge_end.append(x_1)
    x = BottleNeck(x_1, int(cs[g]), 0.5)
    x = BottleNeck(x, int(cs[g]), 0.5)
    merge_end.append(x)
    x = merge(merge_end, mode='sum')
    x = GlobalAveragePooling2D(dim_ordering="tf")(x)
    x = Dense(nb_classes,
              activation='softmax',
              W_regularizer=l2(weight_decay),
              b_regularizer=l2(weight_decay))(x)

    Baseline = Model(input=[model_input], output=[x], name="Baseline")
    #==================================================================================
    #==================================================================================
    #==================================================================================
    #==================================================================================
    #==================================================================================
    #==================================================================================
    return Baseline
Beispiel #26
0
def bounding_box_prediction_layers(inputs, bboxes_per_cell, batch_size):

    endpoints = {}

    branch88 = Conv2D(96, (1, 1), strides=1, padding='same')(inputs)
    branch88 = Conv2D(96, (3, 3), strides=1, padding='same')(branch88)
    endpoints['branch88_locs'] = Conv2D(bboxes_per_cell * 4, (1, 1),
                                        strides=1,
                                        padding='same')(branch88)
    endpoints['branch88_confs'] = Conv2D(bboxes_per_cell, (1, 1),
                                         strides=1,
                                         padding='same',
                                         activation='sigmoid')(branch88)

    branch66 = Conv2D(96, (3, 3), strides=1, padding='same')(inputs)
    branch66 = Conv2D(96, (3, 3), strides=1, padding='valid')(branch66)
    endpoints['branch66_locs'] = Conv2D(bboxes_per_cell * 4, (1, 1),
                                        strides=1,
                                        padding='same')(branch66)
    endpoints['branch66_confs'] = Conv2D(bboxes_per_cell, (1, 1),
                                         strides=1,
                                         padding='same',
                                         activation='sigmoid')(branch66)

    rightBranchBase = Conv2D(256, (3, 3), strides=(2, 2),
                             padding='same')(inputs)

    branch44 = Conv2D(128, (3, 3), strides=1, padding='same')(rightBranchBase)
    endpoints['branch44_locs'] = Conv2D(bboxes_per_cell * 4, (1, 1),
                                        strides=1,
                                        padding='same')(branch44)
    endpoints['branch44_confs'] = Conv2D(bboxes_per_cell, (1, 1),
                                         strides=1,
                                         padding='same',
                                         activation='sigmoid')(branch44)

    branch33 = Conv2D(128, (1, 1), strides=1, padding='same')(rightBranchBase)
    branch33 = Conv2D(96, (2, 2), strides=1, padding='valid')(branch33)
    endpoints['branch33_locs'] = Conv2D(bboxes_per_cell * 4, (1, 1),
                                        strides=1,
                                        padding='same')(branch33)
    endpoints['branch33_confs'] = Conv2D(bboxes_per_cell, (1, 1),
                                         strides=1,
                                         padding='same',
                                         activation='sigmoid')(branch33)

    branch22 = Conv2D(128, (1, 1), strides=1, padding='same')(rightBranchBase)
    branch22 = Conv2D(96, (3, 3), strides=1, padding='valid')(branch22)
    endpoints['branch22_locs'] = Conv2D(bboxes_per_cell * 4, (1, 1),
                                        strides=1,
                                        padding='same')(branch22)
    endpoints['branch22_confs'] = Conv2D(bboxes_per_cell, (1, 1),
                                         strides=1,
                                         padding='same',
                                         activation='sigmoid')(branch22)

    branch11 = AveragePooling2D((8, 8), strides=(1, 1),
                                padding='valid')(inputs)
    endpoints['branch11_locs'] = Conv2D(4, (1, 1), strides=1,
                                        padding='same')(branch11)
    endpoints['branch11_confs'] = Conv2D(1, (1, 1),
                                         strides=1,
                                         padding='same',
                                         activation='sigmoid')(branch11)

    # batch_size = tf.shape(inputs)[0]

    locs88 = Reshape((-1, 4))(endpoints['branch88_locs'])
    confs88 = Reshape((-1, 1))(endpoints['branch88_confs'])
    locs66 = Reshape((-1, 4))(endpoints['branch66_locs'])
    confs66 = Reshape((-1, 1))(endpoints['branch66_confs'])
    locs44 = Reshape((-1, 4))(endpoints['branch44_locs'])
    confs44 = Reshape((-1, 1))(endpoints['branch44_confs'])
    locs33 = Reshape((-1, 4))(endpoints['branch33_locs'])
    confs33 = Reshape((-1, 1))(endpoints['branch33_confs'])
    locs22 = Reshape((-1, 4))(endpoints['branch22_locs'])
    confs22 = Reshape((-1, 1))(endpoints['branch22_confs'])
    locs11 = Reshape((-1, 4))(endpoints['branch11_locs'])
    confs11 = Reshape((-1, 1))(endpoints['branch11_confs'])

    locs = Concatenate(axis=1)(
        [locs88, locs66, locs44, locs33, locs22, locs11])
    confs = Concatenate(axis=1)(
        [confs88, confs66, confs44, confs33, confs22, confs11])
    loc_confs = Concatenate(axis=2)([locs, confs])
    return loc_confs
    #with tf.device('/device:GPU:3'):
    #x5 = ResidualR(256, 256, x4pool)   #12x12x256
    #x5 = ResidualR(256, 256, x5)
    #x5 = ResidualR(256, 256, x5)   #12x12x256
    x5 = Conv2D(512, (3, 3), padding='same',
                kernel_initializer='he_normal')(x4pool)
    x5 = BatchNormalization()(x5)
    x5 = layers.LeakyReLU()(x5)
    #x5pool = MaxPooling2D(pool_size=(2,2))(x5)

    #x6 = ResidualR(256, 512, x5pool)   #6x6x512
    #x6 = ResidualR(512, 512, x6)
    #x6 = ResidualR(512, 512, x6)   #6x6x512

    block_shape = K.int_shape(x5)
    xpool = AveragePooling2D(pool_size=(block_shape[1], block_shape[2]),
                             strides=(1, 1))(x5)
    #xpool = MaxPooling2D(pool_size=(2,2))(x6)

    flatten = layers.Flatten()(xpool)

    #dense1 = layers.Dense(512)(flatten)
    #dense1 = layers.LeakyReLU()(dense1)
    #dense1 = layers.Dropout(0.5)(dense1)

    #dense2 = layers.Dense(1024)(dense1)
    #dense2 = layers.LeakyReLU()(dense2)
    #dense2 = layers.Dropout(0.5)(dense2)

    output = Dense(20,
                   use_bias=False,
                   kernel_regularizer=l2(5e-4),
Beispiel #28
0
LR_DECAY = 0.07
N_CLASSES = 10
BN_DECAY = 0.999
BN_EPS = 0.001
STEER_COR = 0.2
TOP_CROP = 50
BOT_CROP = 22

# Generators
tr_gen = batch_generator(tr_samples, batch_size=BATCH_SIZE)
va_gen = batch_generator(va_samples, batch_size=BATCH_SIZE)

# Preprocessing
model = Sequential()
#model.add(Cropping2D(cropping=((TOP_CROP, BOT_CROP), (0, 0)), input_shape=(H, W, C)))
model.add(AveragePooling2D(input_shape=(H, W, C)))
model.add(Lambda(lambda x: x / 255 - 0.5))  # normalize
# Conv1
model.add(
    Convolution2D(24,
                  5,
                  5,
                  init='glorot_uniform',
                  border_mode='same',
                  subsample=(2, 2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
# Conv2
model.add(Convolution2D(36, 5, 5, init='glorot_uniform', subsample=(2, 2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
Beispiel #29
0
def builtModel():
    myInput = Input(shape=(96, 96, 3))

    x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn1')(x)
    x = Activation('relu')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = MaxPooling2D(pool_size=3, strides=2)(x)
    x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_1')(x)
    x = Conv2D(64, (1, 1), name='conv2')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn2')(x)
    x = Activation('relu')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(192, (3, 3), name='conv3')(x)
    x = BatchNormalization(axis=3, epsilon=0.00001, name='bn3')(x)
    x = Activation('relu')(x)
    Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name='lrn_2')(x)
    x = ZeroPadding2D(padding=(1, 1))(x)
    x = MaxPooling2D(pool_size=3, strides=2)(x)

    # Inception3a
    inception_3a_3x3 = Conv2D(96, (1, 1), name='inception_3a_3x3_conv1')(x)
    inception_3a_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_3x3_bn1')(inception_3a_3x3)
    inception_3a_3x3 = Activation('relu')(inception_3a_3x3)
    inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
    inception_3a_3x3 = Conv2D(128, (3, 3),
                              name='inception_3a_3x3_conv2')(inception_3a_3x3)
    inception_3a_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_3x3_bn2')(inception_3a_3x3)
    inception_3a_3x3 = Activation('relu')(inception_3a_3x3)

    inception_3a_5x5 = Conv2D(16, (1, 1), name='inception_3a_5x5_conv1')(x)
    inception_3a_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_5x5_bn1')(inception_3a_5x5)
    inception_3a_5x5 = Activation('relu')(inception_3a_5x5)
    inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
    inception_3a_5x5 = Conv2D(32, (5, 5),
                              name='inception_3a_5x5_conv2')(inception_3a_5x5)
    inception_3a_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_5x5_bn2')(inception_3a_5x5)
    inception_3a_5x5 = Activation('relu')(inception_3a_5x5)

    inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
    inception_3a_pool = Conv2D(
        32, (1, 1), name='inception_3a_pool_conv')(inception_3a_pool)
    inception_3a_pool = BatchNormalization(
        axis=3, epsilon=0.00001,
        name='inception_3a_pool_bn')(inception_3a_pool)
    inception_3a_pool = Activation('relu')(inception_3a_pool)
    inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3,
                                                        4)))(inception_3a_pool)

    inception_3a_1x1 = Conv2D(64, (1, 1), name='inception_3a_1x1_conv')(x)
    inception_3a_1x1 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3a_1x1_bn')(inception_3a_1x1)
    inception_3a_1x1 = Activation('relu')(inception_3a_1x1)

    inception_3a = concatenate([
        inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1
    ],
                               axis=3)

    # Inception3b
    inception_3b_3x3 = Conv2D(96, (1, 1),
                              name='inception_3b_3x3_conv1')(inception_3a)
    inception_3b_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_3x3_bn1')(inception_3b_3x3)
    inception_3b_3x3 = Activation('relu')(inception_3b_3x3)
    inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
    inception_3b_3x3 = Conv2D(128, (3, 3),
                              name='inception_3b_3x3_conv2')(inception_3b_3x3)
    inception_3b_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_3x3_bn2')(inception_3b_3x3)
    inception_3b_3x3 = Activation('relu')(inception_3b_3x3)

    inception_3b_5x5 = Conv2D(32, (1, 1),
                              name='inception_3b_5x5_conv1')(inception_3a)
    inception_3b_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_5x5_bn1')(inception_3b_5x5)
    inception_3b_5x5 = Activation('relu')(inception_3b_5x5)
    inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
    inception_3b_5x5 = Conv2D(64, (5, 5),
                              name='inception_3b_5x5_conv2')(inception_3b_5x5)
    inception_3b_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_5x5_bn2')(inception_3b_5x5)
    inception_3b_5x5 = Activation('relu')(inception_3b_5x5)

    inception_3b_pool = Lambda(lambda x: x**2, name='power2_3b')(inception_3a)
    inception_3b_pool = AveragePooling2D(pool_size=(3, 3),
                                         strides=(3, 3))(inception_3b_pool)
    inception_3b_pool = Lambda(lambda x: x * 9,
                               name='mult9_3b')(inception_3b_pool)
    inception_3b_pool = Lambda(lambda x: K.sqrt(x),
                               name='sqrt_3b')(inception_3b_pool)
    inception_3b_pool = Conv2D(
        64, (1, 1), name='inception_3b_pool_conv')(inception_3b_pool)
    inception_3b_pool = BatchNormalization(
        axis=3, epsilon=0.00001,
        name='inception_3b_pool_bn')(inception_3b_pool)
    inception_3b_pool = Activation('relu')(inception_3b_pool)
    inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)

    inception_3b_1x1 = Conv2D(64, (1, 1),
                              name='inception_3b_1x1_conv')(inception_3a)
    inception_3b_1x1 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3b_1x1_bn')(inception_3b_1x1)
    inception_3b_1x1 = Activation('relu')(inception_3b_1x1)

    inception_3b = concatenate([
        inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1
    ],
                               axis=3)

    # Inception3c
    inception_3c_3x3 = Conv2D(128, (1, 1),
                              strides=(1, 1),
                              name='inception_3c_3x3_conv1')(inception_3b)
    inception_3c_3x3 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3c_3x3_bn1')(inception_3c_3x3)
    inception_3c_3x3 = Activation('relu')(inception_3c_3x3)
    inception_3c_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3c_3x3)
    inception_3c_3x3 = Conv2D(256, (3, 3),
                              strides=(2, 2),
                              name='inception_3c_3x3_conv' +
                              '2')(inception_3c_3x3)
    inception_3c_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_3c_3x3_bn' +
                                          '2')(inception_3c_3x3)
    inception_3c_3x3 = Activation('relu')(inception_3c_3x3)

    inception_3c_5x5 = Conv2D(32, (1, 1),
                              strides=(1, 1),
                              name='inception_3c_5x5_conv1')(inception_3b)
    inception_3c_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_3c_5x5_bn1')(inception_3c_5x5)
    inception_3c_5x5 = Activation('relu')(inception_3c_5x5)
    inception_3c_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3c_5x5)
    inception_3c_5x5 = Conv2D(64, (5, 5),
                              strides=(2, 2),
                              name='inception_3c_5x5_conv' +
                              '2')(inception_3c_5x5)
    inception_3c_5x5 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_3c_5x5_bn' +
                                          '2')(inception_3c_5x5)
    inception_3c_5x5 = Activation('relu')(inception_3c_5x5)

    inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
    inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0,
                                                        1)))(inception_3c_pool)

    inception_3c = concatenate(
        [inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3)

    #inception 4a
    inception_4a_3x3 = Conv2D(96, (1, 1),
                              strides=(1, 1),
                              name='inception_4a_3x3_conv' + '1')(inception_3c)
    inception_4a_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4a_3x3_bn' +
                                          '1')(inception_4a_3x3)
    inception_4a_3x3 = Activation('relu')(inception_4a_3x3)
    inception_4a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4a_3x3)
    inception_4a_3x3 = Conv2D(192, (3, 3),
                              strides=(1, 1),
                              name='inception_4a_3x3_conv' +
                              '2')(inception_4a_3x3)
    inception_4a_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4a_3x3_bn' +
                                          '2')(inception_4a_3x3)
    inception_4a_3x3 = Activation('relu')(inception_4a_3x3)

    inception_4a_5x5 = Conv2D(32, (1, 1),
                              strides=(1, 1),
                              name='inception_4a_5x5_conv1')(inception_3c)
    inception_4a_5x5 = BatchNormalization(
        axis=3, epsilon=0.00001, name='inception_4a_5x5_bn1')(inception_4a_5x5)
    inception_4a_5x5 = Activation('relu')(inception_4a_5x5)
    inception_4a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4a_5x5)
    inception_4a_5x5 = Conv2D(64, (5, 5),
                              strides=(1, 1),
                              name='inception_4a_5x5_conv' +
                              '2')(inception_4a_5x5)
    inception_4a_5x5 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4a_5x5_bn' +
                                          '2')(inception_4a_5x5)
    inception_4a_5x5 = Activation('relu')(inception_4a_5x5)

    inception_4a_pool = Lambda(lambda x: x**2, name='power2_4a')(inception_3c)
    inception_4a_pool = AveragePooling2D(pool_size=(3, 3),
                                         strides=(3, 3))(inception_4a_pool)
    inception_4a_pool = Lambda(lambda x: x * 9,
                               name='mult9_4a')(inception_4a_pool)
    inception_4a_pool = Lambda(lambda x: K.sqrt(x),
                               name='sqrt_4a')(inception_4a_pool)

    inception_4a_pool = Conv2D(128, (1, 1),
                               strides=(1, 1),
                               name='inception_4a_pool_conv' +
                               '')(inception_4a_pool)
    inception_4a_pool = BatchNormalization(axis=3,
                                           epsilon=0.00001,
                                           name='inception_4a_pool_bn' +
                                           '')(inception_4a_pool)
    inception_4a_pool = Activation('relu')(inception_4a_pool)
    inception_4a_pool = ZeroPadding2D(padding=(2, 2))(inception_4a_pool)

    inception_4a_1x1 = Conv2D(256, (1, 1),
                              strides=(1, 1),
                              name='inception_4a_1x1_conv' + '')(inception_3c)
    inception_4a_1x1 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4a_1x1_bn' +
                                          '')(inception_4a_1x1)
    inception_4a_1x1 = Activation('relu')(inception_4a_1x1)

    inception_4a = concatenate([
        inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1
    ],
                               axis=3)

    #inception4e
    inception_4e_3x3 = Conv2D(160, (1, 1),
                              strides=(1, 1),
                              name='inception_4e_3x3_conv' + '1')(inception_4a)
    inception_4e_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4e_3x3_bn' +
                                          '1')(inception_4e_3x3)
    inception_4e_3x3 = Activation('relu')(inception_4e_3x3)
    inception_4e_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4e_3x3)
    inception_4e_3x3 = Conv2D(256, (3, 3),
                              strides=(2, 2),
                              name='inception_4e_3x3_conv' +
                              '2')(inception_4e_3x3)
    inception_4e_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4e_3x3_bn' +
                                          '2')(inception_4e_3x3)
    inception_4e_3x3 = Activation('relu')(inception_4e_3x3)

    inception_4e_5x5 = Conv2D(64, (1, 1),
                              strides=(1, 1),
                              name='inception_4e_5x5_conv' + '1')(inception_4a)
    inception_4e_5x5 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4e_5x5_bn' +
                                          '1')(inception_4e_5x5)
    inception_4e_5x5 = Activation('relu')(inception_4e_5x5)
    inception_4e_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4e_5x5)
    inception_4e_5x5 = Conv2D(128, (5, 5),
                              strides=(2, 2),
                              name='inception_4e_5x5_conv' +
                              '2')(inception_4e_5x5)
    inception_4e_5x5 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_4e_5x5_bn' +
                                          '2')(inception_4e_5x5)
    inception_4e_5x5 = Activation('relu')(inception_4e_5x5)

    inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
    inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0,
                                                        1)))(inception_4e_pool)

    inception_4e = concatenate(
        [inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3)

    #inception5a
    inception_5a_3x3 = Conv2D(96, (1, 1),
                              strides=(1, 1),
                              name='inception_5a_3x3_conv' + '1')(inception_4e)
    inception_5a_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_5a_3x3_bn' +
                                          '1')(inception_5a_3x3)
    inception_5a_3x3 = Activation('relu')(inception_5a_3x3)
    inception_5a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5a_3x3)
    inception_5a_3x3 = Conv2D(384, (3, 3),
                              strides=(1, 1),
                              name='inception_5a_3x3_conv' +
                              '2')(inception_5a_3x3)
    inception_5a_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_5a_3x3_bn' +
                                          '2')(inception_5a_3x3)
    inception_5a_3x3 = Activation('relu')(inception_5a_3x3)

    inception_5a_pool = Lambda(lambda x: x**2, name='power2_5a')(inception_4e)
    inception_5a_pool = AveragePooling2D(pool_size=(3, 3),
                                         strides=(3, 3))(inception_5a_pool)
    inception_5a_pool = Lambda(lambda x: x * 9,
                               name='mult9_5a')(inception_5a_pool)
    inception_5a_pool = Lambda(lambda x: K.sqrt(x),
                               name='sqrt_5a')(inception_5a_pool)

    inception_5a_pool = Conv2D(96, (1, 1),
                               strides=(1, 1),
                               name='inception_5a_pool_conv' +
                               '')(inception_5a_pool)
    inception_5a_pool = BatchNormalization(axis=3,
                                           epsilon=0.00001,
                                           name='inception_5a_pool_bn' +
                                           '')(inception_5a_pool)
    inception_5a_pool = Activation('relu')(inception_5a_pool)
    inception_5a_pool = ZeroPadding2D(padding=(1, 1))(inception_5a_pool)

    inception_5a_1x1 = Conv2D(256, (1, 1),
                              strides=(1, 1),
                              name='inception_5a_1x1_conv' + '')(inception_4e)
    inception_5a_1x1 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_5a_1x1_bn' +
                                          '')(inception_5a_1x1)
    inception_5a_1x1 = Activation('relu')(inception_5a_1x1)

    inception_5a = concatenate(
        [inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3)

    #inception_5b
    inception_5b_3x3 = Conv2D(96, (1, 1),
                              strides=(1, 1),
                              name='inception_5b_3x3_conv' + '1')(inception_5a)
    inception_5b_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_5b_3x3_bn' +
                                          '1')(inception_5b_3x3)
    inception_5b_3x3 = Activation('relu')(inception_5b_3x3)
    inception_5b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5b_3x3)
    inception_5b_3x3 = Conv2D(384, (3, 3),
                              strides=(1, 1),
                              name='inception_5b_3x3_conv' +
                              '2')(inception_5b_3x3)
    inception_5b_3x3 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_5b_3x3_bn' +
                                          '2')(inception_5b_3x3)
    inception_5b_3x3 = Activation('relu')(inception_5b_3x3)

    inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)

    inception_5b_pool = Conv2D(96, (1, 1),
                               strides=(1, 1),
                               name='inception_5b_pool_conv' +
                               '')(inception_5b_pool)
    inception_5b_pool = BatchNormalization(axis=3,
                                           epsilon=0.00001,
                                           name='inception_5b_pool_bn' +
                                           '')(inception_5b_pool)
    inception_5b_pool = Activation('relu')(inception_5b_pool)

    inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)

    inception_5b_1x1 = Conv2D(256, (1, 1),
                              strides=(1, 1),
                              name='inception_5b_1x1_conv' + '')(inception_5a)
    inception_5b_1x1 = BatchNormalization(axis=3,
                                          epsilon=0.00001,
                                          name='inception_5b_1x1_bn' +
                                          '')(inception_5b_1x1)
    inception_5b_1x1 = Activation('relu')(inception_5b_1x1)

    inception_5b = concatenate(
        [inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3)

    av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
    reshape_layer = Flatten()(av_pool)
    dense_layer = Dense(128, name='dense_layer')(reshape_layer)
    norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1),
                        name='norm_layer')(dense_layer)

    # Final Model
    model = Model(inputs=[myInput], outputs=norm_layer)
    return model
Beispiel #30
0
def inceptionv2(input,
                dropout_keep_prob=0.8,
                num_classes=1000,
                is_training=True,
                scope='InceptionV2',
                supermd=False):
    with tf.name_scope(scope, "InceptionV2", [input]):

        conv1_7x7_s2 = Conv2D(64, (7, 7),
                              strides=(2, 2),
                              padding='same',
                              activation='relu',
                              name='conv1/7x7_s2',
                              kernel_regularizer=l2(0.0002))(input)

        conv1_zero_pad = ZeroPadding2D(padding=(1, 1))(conv1_7x7_s2)

        pool1_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                    strides=(2, 2),
                                    padding='valid',
                                    name='pool1/3x3_s2')(conv1_zero_pad)

        pool1_norm1 = BatchNormalization(axis=3,
                                         scale=False,
                                         name='pool1/norm1')(pool1_3x3_s2)

        conv2_3x3_reduce = Conv2D(64, (1, 1),
                                  padding='same',
                                  activation='relu',
                                  name='conv2/3x3_reduce',
                                  kernel_regularizer=l2(0.0002))(pool1_norm1)

        conv2_3x3 = Conv2D(192, (3, 3),
                           padding='same',
                           activation='relu',
                           name='conv2/3x3',
                           kernel_regularizer=l2(0.0002))(conv2_3x3_reduce)

        conv2_norm2 = BatchNormalization(axis=3,
                                         scale=False,
                                         name='conv2/norm2')(conv2_3x3)

        conv2_zero_pad = ZeroPadding2D(padding=(1, 1))(conv2_norm2)

        pool2_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                    strides=(2, 2),
                                    padding='valid',
                                    name='pool2/3x3_s2')(conv2_zero_pad)

        inception_3a_1x1 = Conv2D(64, (1, 1),
                                  padding='same',
                                  activation='relu',
                                  name='inception_3a/1x1',
                                  kernel_regularizer=l2(0.0002))(pool2_3x3_s2)

        inception_3a_3x3_reduce = Conv2D(
            96, (1, 1),
            padding='same',
            activation='relu',
            name='inception_3a/3x3_reduce',
            kernel_regularizer=l2(0.0002))(pool2_3x3_s2)

        inception_3a_3x3 = Conv2D(
            128, (3, 3),
            padding='same',
            activation='relu',
            name='inception_3a/3x3',
            kernel_regularizer=l2(0.0002))(inception_3a_3x3_reduce)

        inception_3a_5x5_reduce = Conv2D(
            16, (1, 1),
            padding='same',
            activation='relu',
            name='inception_3a/5x5_reduce',
            kernel_regularizer=l2(0.0002))(pool2_3x3_s2)

        inception_3a_5x5_a = Conv2D(
            96, (3, 3),
            padding='same',
            activation='relu',
            name='inception_3a/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_3a_5x5_reduce)

        inception_3a_5x5_b = Conv2D(
            96, (3, 3),
            padding='same',
            activation='relu',
            name='inception_3a/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_3a_5x5_a)

        inception_3a_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_3a/pool')(pool2_3x3_s2)

        inception_3a_pool_proj = Conv2D(
            32, (1, 1),
            padding='same',
            activation='relu',
            name='inception_3a/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_3a_pool)

        inception_3a_output = concatenate([
            inception_3a_1x1, inception_3a_3x3, inception_3a_5x5_b,
            inception_3a_pool_proj
        ],
                                          axis=3,
                                          name='inception_3a/output')

        inception_3b_1x1 = Conv2D(
            128, (1, 1),
            padding='same',
            activation='relu',
            name='inception_3b/1x1',
            kernel_regularizer=l2(0.0002))(inception_3a_output)

        inception_3b_3x3_reduce = Conv2D(
            128, (1, 1),
            padding='same',
            activation='relu',
            name='inception_3b/3x3_reduce',
            kernel_regularizer=l2(0.0002))(inception_3a_output)

        inception_3b_3x3 = Conv2D(
            192, (3, 3),
            padding='same',
            activation='relu',
            name='inception_3b/3x3',
            kernel_regularizer=l2(0.0002))(inception_3b_3x3_reduce)

        inception_3b_5x5_reduce = Conv2D(
            32, (1, 1),
            padding='same',
            activation='relu',
            name='inception_3b/5x5_reduce',
            kernel_regularizer=l2(0.0002))(inception_3a_output)

        inception_3b_5x5_a = Conv2D(
            96, (3, 3),
            padding='same',
            activation='relu',
            name='inception_3b/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_3b_5x5_reduce)

        inception_3b_5x5_b = Conv2D(
            96, (3, 3),
            padding='same',
            activation='relu',
            name='inception_3b/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_3b_5x5_a)

        inception_3b_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_3b/pool')(inception_3a_output)

        inception_3b_pool_proj = Conv2D(
            64, (1, 1),
            padding='same',
            activation='relu',
            name='inception_3b/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_3b_pool)

        inception_3b_output = concatenate([
            inception_3b_1x1, inception_3b_3x3, inception_3b_5x5_b,
            inception_3b_pool_proj
        ],
                                          axis=3,
                                          name='inception_3b/output')

        inception_3b_output_zero_pad = ZeroPadding2D(
            padding=(1, 1))(inception_3b_output)

        pool3_3x3_s2 = MaxPooling2D(
            pool_size=(3, 3),
            strides=(2, 2),
            padding='valid',
            name='pool3/3x3_s2')(inception_3b_output_zero_pad)

        inception_4a_1x1 = Conv2D(192, (1, 1),
                                  padding='same',
                                  activation='relu',
                                  name='inception_4a/1x1',
                                  kernel_regularizer=l2(0.0002))(pool3_3x3_s2)

        inception_4a_3x3_reduce = Conv2D(
            96, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4a/3x3_reduce',
            kernel_regularizer=l2(0.0002))(pool3_3x3_s2)

        inception_4a_3x3 = Conv2D(
            208, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4a/3x3',
            kernel_regularizer=l2(0.0002))(inception_4a_3x3_reduce)

        inception_4a_5x5_reduce = Conv2D(
            16, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4a/5x5_reduce',
            kernel_regularizer=l2(0.0002))(pool3_3x3_s2)

        inception_4a_5x5_a = Conv2D(
            48, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4a/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_4a_5x5_reduce)

        inception_4a_5x5_b = Conv2D(
            48, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4a/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_4a_5x5_a)

        inception_4a_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_4a/pool')(pool3_3x3_s2)

        inception_4a_pool_proj = Conv2D(
            64, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4a/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_4a_pool)

        inception_4a_output = concatenate([
            inception_4a_1x1, inception_4a_3x3, inception_4a_5x5_b,
            inception_4a_pool_proj
        ],
                                          axis=3,
                                          name='inception_4a/output')

        loss1_ave_pool = AveragePooling2D(
            pool_size=(5, 5), strides=(3, 3),
            name='loss1/ave_pool')(inception_4a_output)

        loss1_conv = Conv2D(128, (1, 1),
                            padding='same',
                            activation='relu',
                            name='loss1/conv',
                            kernel_regularizer=l2(0.0002))(loss1_ave_pool)

        loss1_flat = Flatten()(loss1_conv)

        loss1_fc = Dense(1024,
                         activation='relu',
                         name='loss1/fc',
                         kernel_regularizer=l2(0.0002))(loss1_flat)

        loss1_drop_fc = Dropout(dropout_keep_prob)(loss1_fc,
                                                   training=is_training)

        if supermd:
            loss1_classifier_a = Dense(
                4, name='loss1/classifiera',
                kernel_regularizer=l2(0.0002))(loss1_drop_fc)
            loss1_classifier_a, loss1_classifier_a2 = tf.split(
                loss1_classifier_a, [1, 3], 1)
            loss1_classifier_a2 = Activation('relu')(loss1_classifier_a2)
            loss1_classifier_b = Dense(
                3, name='loss1/classifierb',
                kernel_regularizer=l2(0.0002))(loss1_classifier_a2)
            loss1_classifier_b, loss1_classifier_b2 = tf.split(
                loss1_classifier_b, [1, 2], 1)
            loss1_classifier_b2 = Activation('relu')(loss1_classifier_b2)
            loss1_classifier_c = Dense(
                2, name='loss1/classifierc',
                kernel_regularizer=l2(0.0002))(loss1_classifier_b2)
            loss1_classifier = concatenate(
                [loss1_classifier_a, loss1_classifier_b, loss1_classifier_c],
                axis=-1)
        else:
            loss1_classifier = Dense(
                num_classes,
                name='loss1/classifier',
                kernel_regularizer=l2(0.0002))(loss1_drop_fc)

        inception_4b_1x1 = Conv2D(
            160, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4b/1x1',
            kernel_regularizer=l2(0.0002))(inception_4a_output)

        inception_4b_3x3_reduce = Conv2D(
            112, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4b/3x3_reduce',
            kernel_regularizer=l2(0.0002))(inception_4a_output)

        inception_4b_3x3 = Conv2D(
            224, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4b/3x3',
            kernel_regularizer=l2(0.0002))(inception_4b_3x3_reduce)

        inception_4b_5x5_reduce = Conv2D(
            24, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4b/5x5_reduce',
            kernel_regularizer=l2(0.0002))(inception_4a_output)

        inception_4b_5x5_a = Conv2D(
            64, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4b/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_4b_5x5_reduce)

        inception_4b_5x5_b = Conv2D(
            64, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4b/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_4b_5x5_a)

        inception_4b_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_4b/pool')(inception_4a_output)

        inception_4b_pool_proj = Conv2D(
            64, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4b/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_4b_pool)

        inception_4b_output = concatenate([
            inception_4b_1x1, inception_4b_3x3, inception_4b_5x5_b,
            inception_4b_pool_proj
        ],
                                          axis=3,
                                          name='inception_4b_output')

        inception_4c_1x1 = Conv2D(
            128, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4c/1x1',
            kernel_regularizer=l2(0.0002))(inception_4b_output)

        inception_4c_3x3_reduce = Conv2D(
            128, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4c/3x3_reduce',
            kernel_regularizer=l2(0.0002))(inception_4b_output)

        inception_4c_3x3 = Conv2D(
            256, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4c/3x3',
            kernel_regularizer=l2(0.0002))(inception_4c_3x3_reduce)

        inception_4c_5x5_reduce = Conv2D(
            24, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4c/5x5_reduce',
            kernel_regularizer=l2(0.0002))(inception_4b_output)

        inception_4c_5x5_a = Conv2D(
            64, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4c/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_4c_5x5_reduce)

        inception_4c_5x5_b = Conv2D(
            64, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4c/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_4c_5x5_a)

        inception_4c_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_4c/pool')(inception_4b_output)

        inception_4c_pool_proj = Conv2D(
            64, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4c/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_4c_pool)

        inception_4c_output = concatenate([
            inception_4c_1x1, inception_4c_3x3, inception_4c_5x5_b,
            inception_4c_pool_proj
        ],
                                          axis=3,
                                          name='inception_4c/output')

        inception_4d_1x1 = Conv2D(
            112, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4d/1x1',
            kernel_regularizer=l2(0.0002))(inception_4c_output)

        inception_4d_3x3_reduce = Conv2D(
            144, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4d/3x3_reduce',
            kernel_regularizer=l2(0.0002))(inception_4c_output)

        inception_4d_3x3 = Conv2D(
            288, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4d/3x3',
            kernel_regularizer=l2(0.0002))(inception_4d_3x3_reduce)

        inception_4d_5x5_reduce = Conv2D(
            32, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4d/5x5_reduce',
            kernel_regularizer=l2(0.0002))(inception_4c_output)

        inception_4d_5x5_a = Conv2D(
            64, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4d/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_4d_5x5_reduce)

        inception_4d_5x5_b = Conv2D(
            64, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4d/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_4d_5x5_a)

        inception_4d_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_4d/pool')(inception_4c_output)

        inception_4d_pool_proj = Conv2D(
            64, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4d/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_4d_pool)

        inception_4d_output = concatenate([
            inception_4d_1x1, inception_4d_3x3, inception_4d_5x5_b,
            inception_4d_pool_proj
        ],
                                          axis=3,
                                          name='inception_4d/output')

        loss2_ave_pool = AveragePooling2D(
            pool_size=(5, 5), strides=(3, 3),
            name='loss2/ave_pool')(inception_4d_output)

        loss2_conv = Conv2D(128, (1, 1),
                            padding='same',
                            activation='relu',
                            name='loss2/conv',
                            kernel_regularizer=l2(0.0002))(loss2_ave_pool)

        loss2_flat = Flatten()(loss2_conv)

        loss2_fc = Dense(1024,
                         activation='relu',
                         name='loss2/fc',
                         kernel_regularizer=l2(0.0002))(loss2_flat)

        loss2_drop_fc = Dropout(dropout_keep_prob)(loss2_fc,
                                                   training=is_training)

        if supermd:
            loss2_classifier_a = Dense(
                4, name='loss2/classifiera',
                kernel_regularizer=l2(0.0002))(loss2_drop_fc)
            loss2_classifier_a, loss2_classifier_a2 = tf.split(
                loss2_classifier_a, [1, 3], 1)
            loss2_classifier_a2 = Activation('relu')(loss2_classifier_a2)
            loss2_classifier_b = Dense(
                3, name='loss2/classifierb',
                kernel_regularizer=l2(0.0002))(loss2_classifier_a2)
            loss2_classifier_b, loss2_classifier_b2 = tf.split(
                loss2_classifier_b, [1, 2], 1)
            loss2_classifier_b2 = Activation('relu')(loss2_classifier_b2)
            loss2_classifier_c = Dense(
                2, name='loss2/classifierc',
                kernel_regularizer=l2(0.0002))(loss2_classifier_b2)
            loss2_classifier = concatenate(
                [loss2_classifier_a, loss2_classifier_b, loss2_classifier_c],
                axis=-1)
        else:
            loss2_classifier = Dense(
                num_classes,
                name='loss2/classifier',
                kernel_regularizer=l2(0.0002))(loss2_drop_fc)

        inception_4e_1x1 = Conv2D(
            256, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4e/1x1',
            kernel_regularizer=l2(0.0002))(inception_4d_output)

        inception_4e_3x3_reduce = Conv2D(
            160, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4e/3x3_reduce',
            kernel_regularizer=l2(0.0002))(inception_4d_output)

        inception_4e_3x3 = Conv2D(
            320, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4e/3x3',
            kernel_regularizer=l2(0.0002))(inception_4e_3x3_reduce)

        inception_4e_5x5_reduce = Conv2D(
            32, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4e/5x5_reduce',
            kernel_regularizer=l2(0.0002))(inception_4d_output)

        inception_4e_5x5_a = Conv2D(
            128, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4e/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_4e_5x5_reduce)

        inception_4e_5x5_b = Conv2D(
            128, (3, 3),
            padding='same',
            activation='relu',
            name='inception_4e/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_4e_5x5_a)

        inception_4e_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_4e/pool')(inception_4d_output)

        inception_4e_pool_proj = Conv2D(
            128, (1, 1),
            padding='same',
            activation='relu',
            name='inception_4e/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_4e_pool)

        inception_4e_output = concatenate([
            inception_4e_1x1, inception_4e_3x3, inception_4e_5x5_b,
            inception_4e_pool_proj
        ],
                                          axis=3,
                                          name='inception_4e/output')

        inception_4e_output_zero_pad = ZeroPadding2D(
            padding=(1, 1))(inception_4e_output)

        pool4_3x3_s2 = MaxPooling2D(
            pool_size=(3, 3),
            strides=(2, 2),
            padding='valid',
            name='pool4/3x3_s2')(inception_4e_output_zero_pad)

        inception_5a_1x1 = Conv2D(256, (1, 1),
                                  padding='same',
                                  activation='relu',
                                  name='inception_5a/1x1',
                                  kernel_regularizer=l2(0.0002))(pool4_3x3_s2)

        inception_5a_3x3_reduce = Conv2D(
            160, (1, 1),
            padding='same',
            activation='relu',
            name='inception_5a/3x3_reduce',
            kernel_regularizer=l2(0.0002))(pool4_3x3_s2)

        inception_5a_3x3 = Conv2D(
            320, (3, 3),
            padding='same',
            activation='relu',
            name='inception_5a/3x3',
            kernel_regularizer=l2(0.0002))(inception_5a_3x3_reduce)

        inception_5a_5x5_reduce = Conv2D(
            32, (1, 1),
            padding='same',
            activation='relu',
            name='inception_5a/5x5_reduce',
            kernel_regularizer=l2(0.0002))(pool4_3x3_s2)

        inception_5a_5x5_a = Conv2D(
            128, (3, 3),
            padding='same',
            activation='relu',
            name='inception_5a/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_5a_5x5_reduce)

        inception_5a_5x5_b = Conv2D(
            128, (3, 3),
            padding='same',
            activation='relu',
            name='inception_5a/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_5a_5x5_a)

        inception_5a_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_5a/pool')(pool4_3x3_s2)

        inception_5a_pool_proj = Conv2D(
            128, (1, 1),
            padding='same',
            activation='relu',
            name='inception_5a/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_5a_pool)

        inception_5a_output = concatenate([
            inception_5a_1x1, inception_5a_3x3, inception_5a_5x5_b,
            inception_5a_pool_proj
        ],
                                          axis=3,
                                          name='inception_5a/output')

        inception_5b_1x1 = Conv2D(
            384, (1, 1),
            padding='same',
            activation='relu',
            name='inception_5b/1x1',
            kernel_regularizer=l2(0.0002))(inception_5a_output)

        inception_5b_3x3_reduce = Conv2D(
            192, (1, 1),
            padding='same',
            activation='relu',
            name='inception_5b/3x3_reduce',
            kernel_regularizer=l2(0.0002))(inception_5a_output)

        inception_5b_3x3 = Conv2D(
            384, (3, 3),
            padding='same',
            activation='relu',
            name='inception_5b/3x3',
            kernel_regularizer=l2(0.0002))(inception_5b_3x3_reduce)

        inception_5b_5x5_reduce = Conv2D(
            48, (1, 1),
            padding='same',
            activation='relu',
            name='inception_5b/5x5_reduce',
            kernel_regularizer=l2(0.0002))(inception_5a_output)

        inception_5b_5x5_a = Conv2D(
            128, (3, 3),
            padding='same',
            activation='relu',
            name='inception_5b/5x5_a',
            kernel_regularizer=l2(0.0002))(inception_5b_5x5_reduce)

        inception_5b_5x5_b = Conv2D(
            128, (3, 3),
            padding='same',
            activation='relu',
            name='inception_5b/5x5_b',
            kernel_regularizer=l2(0.0002))(inception_5b_5x5_a)

        inception_5b_pool = MaxPooling2D(
            pool_size=(3, 3),
            strides=(1, 1),
            padding='same',
            name='inception_5b/pool')(inception_5a_output)

        inception_5b_pool_proj = Conv2D(
            128, (1, 1),
            padding='same',
            activation='relu',
            name='inception_5b/pool_proj',
            kernel_regularizer=l2(0.0002))(inception_5b_pool)

        inception_5b_output = concatenate([
            inception_5b_1x1, inception_5b_3x3, inception_5b_5x5_b,
            inception_5b_pool_proj
        ],
                                          axis=3,
                                          name='inception_5b/output')

        net = inception_5b_output

        # Modified for 299x299
        pool5_10x10_s1 = AveragePooling2D(
            pool_size=(10, 10), strides=(1, 1),
            name='pool5/10x10_s2')(inception_5b_output)

        loss3_flat = Flatten()(pool5_10x10_s1)

        pool5_drop_10x10_s1 = Dropout(dropout_keep_prob)(loss3_flat,
                                                         training=is_training)

        if supermd:
            loss3_classifier_aw = Dense(4,
                                        name='loss3/classifiera',
                                        kernel_regularizer=l2(0.0002))
            loss3_classifier_a = loss3_classifier_aw(pool5_drop_10x10_s1)
            loss3_classifier_a, loss3_classifier_a2 = tf.split(
                loss3_classifier_a, [1, 3], 1)
            loss3_classifier_a2 = Activation('relu')(loss3_classifier_a2)
            loss3_classifier_bw = Dense(3,
                                        name='loss3/classifierb',
                                        kernel_regularizer=l2(0.0002))
            loss3_classifier_b = loss3_classifier_bw(loss3_classifier_a2)
            loss3_classifier_b, loss3_classifier_b2 = tf.split(
                loss3_classifier_b, [1, 2], 1)
            loss3_classifier_b2 = Activation('relu')(loss3_classifier_b2)
            loss3_classifier_cw = Dense(2,
                                        name='loss3/classifierc',
                                        kernel_regularizer=l2(0.0002))
            loss3_classifier_c = loss3_classifier_cw(loss3_classifier_b2)
            loss3_classifier = concatenate(
                [loss3_classifier_a, loss3_classifier_b, loss3_classifier_c],
                axis=-1)

            w_variables = loss3_classifier_aw.get_weights()

        else:
            loss3_classifier_w = Dense(num_classes,
                                       name='loss3/classifier',
                                       kernel_regularizer=l2(0.0002))

            loss3_classifier = loss3_classifier_w(pool5_drop_10x10_s1)

            w_variables = loss3_classifier_w.get_weights()

        logits = tf.cond(
            tf.equal(is_training, tf.constant(True)), lambda: tf.add(
                loss3_classifier,
                tf.scalar_mul(tf.constant(0.3),
                              tf.add(loss1_classifier, loss2_classifier))),
            lambda: loss3_classifier)
    return logits, net, tf.convert_to_tensor(w_variables[0])