def myAlexNet2(weights_path=None):
    inputs = Input(shape=(3, 227, 227))
    conv_1 = Convolution2D(96,
                           11,
                           11,
                           subsample=(4, 4),
                           activation='relu',
                           name='conv_1')(inputs)
    conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_1)
    conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
    conv_2 = ZeroPadding2D((2, 2))(conv_2)
    conv_2 = merge([
        Convolution2D(
            128, 5, 5, activation="relu", name='conv_2_' + str(i + 1))(
                splittensor(ratio_split=2, id_split=i)(conv_2))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name="conv_2")
    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1, 1))(conv_3)
    model = Model(input=inputs, output=conv_3)
    if weights_path:
        model.load_weights(weights_path)
    return model
예제 #2
0
def AlexNet(weights_path=None, heatmap=False):
    if heatmap:
        inputs = Input(shape=(3,None,None))
#        inputs = Input(shape=(None, None, 3))
    else:
        inputs = Input(shape=(3, 227, 227))
#        inputs = Input(shape=(227, 227, 3))

    conv_1 = Conv2D(96, 11, 11,strides=(4,4),activation='relu',
                           name='conv_1')(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2,2))(conv_1)
    conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
    conv_2 = ZeroPadding2D((2,2))(conv_2)
    conv_2 = merge([
        Conv2D(128,5,5,activation="relu",name='conv_2_'+str(i+1))(
            splittensor(ratio_split=2,id_split=i)(conv_2)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_2")

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1,1))(conv_3)
    conv_3 = Conv2D(384,3,3,activation='relu',name='conv_3')(conv_3)

    conv_4 = ZeroPadding2D((1,1))(conv_3)
    conv_4 = merge([
        Conv2D(192,3,3,activation="relu",name='conv_4_'+str(i+1))(
            splittensor(ratio_split=2,id_split=i)(conv_4)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_4")

    conv_5 = ZeroPadding2D((1,1))(conv_4)
    conv_5 = merge([
        Conv2D(128,3,3,activation="relu",name='conv_5_'+str(i+1))(
            splittensor(ratio_split=2,id_split=i)(conv_5)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_5")

    dense_1 = MaxPooling2D((3, 3), strides=(2,2),name="convpool_5")(conv_5)

    if heatmap:
        dense_1 = Conv2D(4096,6,6,activation="relu",name="dense_1")(dense_1)
        dense_2 = Conv2D(4096,1,1,activation="relu",name="dense_2")(dense_1)
        dense_3 = Conv2D(1000, 1,1,name="dense_3")(dense_2)
        prediction = Softmax4D(axis=1,name="softmax")(dense_3)
    else:
        dense_1 = Flatten(name="flatten")(dense_1)
        dense_1 = Dense(4096, activation='relu',name='dense_1')(dense_1)
        dense_2 = Dropout(0.5)(dense_1)
        dense_2 = Dense(4096, activation='relu',name='dense_2')(dense_2)
        dense_3 = Dropout(0.5)(dense_2)
        dense_3 = Dense(1000,name='dense_3')(dense_3)
        prediction = Activation("softmax",name="softmax")(dense_3)


    model = Model(input=inputs, output=prediction)

    if weights_path:
        model.load_weights(weights_path)

    return model
예제 #3
0
def AlexNet(nb_classes=2, weights_path=None, heatmap=False):
    if heatmap:
        inputs = Input(shape=(3,None,None))
    else:
        inputs = Input(shape=(3,227,227))

    conv_1 = Convolution2D(96, 11, 11,subsample=(4,4),activation='relu',
                           name='conv_1')(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2,2))(conv_1)
    conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
    conv_2 = ZeroPadding2D((2,2))(conv_2)
    conv_2 = merge([
        Convolution2D(128,5,5,activation="relu",name='conv_2_'+str(i+1))(
            splittensor(ratio_split=2,id_split=i)(conv_2)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_2")

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1,1))(conv_3)
    conv_3 = Convolution2D(384,3,3,activation='relu',name='conv_3')(conv_3)

    conv_4 = ZeroPadding2D((1,1))(conv_3)
    conv_4 = merge([
        Convolution2D(192,3,3,activation="relu",name='conv_4_'+str(i+1))(
            splittensor(ratio_split=2,id_split=i)(conv_4)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_4")

    conv_5 = ZeroPadding2D((1,1))(conv_4)
    conv_5 = merge([
        Convolution2D(128,3,3,activation="relu",name='conv_5_'+str(i+1))(
            splittensor(ratio_split=2,id_split=i)(conv_5)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_5")

    dense_1 = MaxPooling2D((3, 3), strides=(2,2),name="convpool_5")(conv_5)

    if heatmap:
        dense_1 = Convolution2D(4096,6,6,activation="relu",name="dense_1")(dense_1)
        dense_2 = Convolution2D(4096,1,1,activation="relu",name="dense_2")(dense_1)
        dense_3 = Convolution2D(1000, 1,1,name="dense_3")(dense_2)
        prediction = Softmax4D(axis=1,name="softmax")(dense_3)
    else:
        dense_1 = Flatten(name="flatten")(dense_1)
        dense_1 = Dense(4096, activation='relu',name='dense_1')(dense_1)
        dense_2 = Dropout(0.5)(dense_1)
        dense_2 = Dense(4096, activation='relu',name='dense_2')(dense_2)
        dense_3 = Dropout(0.5)(dense_2)
        dense_3 = Dense(output_dim=nb_classes,name='dense_3')(dense_3)
        # dense_3 = Dense(1000,name='dense_3')(dense_3)
        prediction = Activation("softmax",name="softmax")(dense_3)


    model = Model(input=inputs, output=prediction)

    if weights_path:
        model.load_weights(weights_path)

    return model
예제 #4
0
파일: util.py 프로젝트: jackg0h/foodtag
def load_alexnet_model_finetune67(weights_path=None, nb_class=None, top_model_weight_path=None):

    inputs = Input(shape=(3,227,227))
    conv_1 = Convolution2D(96, 11, 11,subsample=(4,4),
                            activation='relu',
                            name='conv_1',
                            W_regularizer=l2(0.0002))(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2,2))(conv_1)
    conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
    conv_2 = ZeroPadding2D((2,2))(conv_2)
    conv_2 = merge([
        Convolution2D(128,5,5,
                      activation="relu",
                      name='conv_2_'+str(i+1),
                      W_regularizer=l2(0.0002))(
            splittensor(ratio_split=2,id_split=i)(conv_2)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_2")
    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1,1))(conv_3)
    conv_3 = Convolution2D(384,3,3,activation='relu',name='conv_3', W_regularizer=l2(0.0002))(conv_3)

    conv_4 = ZeroPadding2D((1,1))(conv_3)
    conv_4 = merge([
        Convolution2D(192,3,3,activation="relu",name='conv_4_'+str(i+1),W_regularizer=l2(0.0002))(
            splittensor(ratio_split=2,id_split=i)(conv_4)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_4")

    conv_5 = ZeroPadding2D((1,1))(conv_4)
    conv_5 = merge([
        Convolution2D(128,3,3,activation="relu",name='conv_5_'+str(i+1),W_regularizer=l2(0.0002))(
            splittensor(ratio_split=2,id_split=i)(conv_5)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_5")

    conv_5 = MaxPooling2D((3, 3), strides=(2,2),name="convpool_5")(conv_5)

    dense_1 = Flatten(name="flatten")(conv_5)
    dense_1 = Dense(4096, activation='relu',name='dense_1')(dense_1)
    dense_2 = Dropout(0.5)(dense_1)
    dense_2 = Dense(4096, activation='relu',name='dense_2')(dense_2)
    dense_3 = Dropout(0.5)(dense_2)
    dense_3 = Dense(nb_class,name='dense_3')(dense_3)
    prediction = Activation("softmax",name="softmax")(dense_3)

    base_model = Model(input=inputs, output=prediction)
    base_model.load_weights(weights_path)
    base_model = Model(input=inputs, output=conv_5)

    model = get_top_model_for_alex_finetune67(
        shape=base_model.output_shape[1:],
        nb_class=nb_class,
        weights_file_path=top_model_weight_path,
        input=base_model.input,
        output=base_model.output)

    return model
예제 #5
0
def AlexNet(weights_path=None, heatmap=False):
    if heatmap:
        inputs = Input(shape=(3, None, None))
    else:
        inputs = Input(shape=(3, 227, 227))

    conv_1 = Convolution2D(96, kernel_size=(11, 11), subsample=(4, 4), activation='relu', data_format='channels_first',
                           name='conv_1')(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_1)
    conv_2 = crosschannelnormalization(name='convpool_1')(conv_2)
    conv_2 = ZeroPadding2D((2, 2))(conv_2)
    conv_2 = merge([
                       Convolution2D(128, kernel_size=(5, 5), activation='relu', data_format='channels_first', name='conv_2_' + str(i + 1))(
                           splittensor(ratio_split=2, id_split=i)(conv_2)
                       ) for i in range(2)], mode='concat', concat_axis=1, name='conv_2')

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1, 1))(conv_3)
    conv_3 = Convolution2D(384, kernel_size=(3, 3), activation='relu', data_format='channels_first', name='conv_3')(conv_3)

    conv_4 = ZeroPadding2D((1, 1))(conv_3)
    conv_4 = merge([
                       Convolution2D(192, kernel_size=(3, 3), activation='relu', data_format='channels_first', name='conv_4_' + str(i + 1))(
                           splittensor(ratio_split=2, id_split=i)(conv_4)
                       ) for i in range(2)], mode='concat', concat_axis=1, name='conv_4')

    conv_5 = ZeroPadding2D((1, 1))(conv_4)
    conv_5 = merge([
                       Convolution2D(128, kernel_size=(3, 3), activation='relu', data_format='channels_first', name='conv_5_' + str(i + 1))(
                           splittensor(ratio_split=2, id_split=i)(conv_5)
                       ) for i in range(2)], mode='concat', concat_axis=1, name='conv_5')

    dense_1 = MaxPooling2D((3, 3), strides=(2, 2), name='convpool_5')(conv_5)

    if heatmap:
        dense_1 = Convolution2D(4096, kernel_size=(6, 6), activation='relu', data_format='channels_first', name='dense_1')(dense_1)
        dense_2 = Convolution2D(4096, kernel_size=(1, 1), activation='relu', data_format='channels_first', name='dense_2')(dense_1)
        dense_3 = Convolution2D(1000, kernel_size=(1, 1), data_format='channels_first', name='dense_3')(dense_2)
        prediction = Softmax4D(axis=1, name='softmax')(dense_3)
    else:
        dense_1 = Flatten(name='flatten')(dense_1)
        dense_1 = Dense(4096, activation='relu', name='dense_1')(dense_1)
        dense_2 = Dropout(0.5)(dense_1)
        dense_2 = Dense(4096, activation='relu', name='dense_2')(dense_2)
        dense_3 = Dropout(0.5)(dense_2)
        dense_3 = Dense(1000, name='dense_3')(dense_3)
        prediction = Activation('softmax', name='softmax')(dense_3)

    model = Model(input=inputs, output=prediction)

    if weights_path:
        model.load_weights(weights_path)

    return model
예제 #6
0
파일: model.py 프로젝트: jackg0h/foodtag
def alexnet2(weights_path=None, nb_class=None):

    inputs = Input(shape=(3,227,227))

    conv_1 = Convolution2D(96, 11, 11,subsample=(4,4),activation='relu',
                           name='conv_1')(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2,2))(conv_1)
    conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
    conv_2 = ZeroPadding2D((2,2))(conv_2)

    x1 = conv2D_bn(conv_2, 128, 5, 5, subsample=(1, 1), border_mode='same')
    y1 = conv2D_bn(conv_2, 128, 5, 5, subsample=(1, 1), border_mode='same')

    conv_2 = merge([x1,y1], mode='concat',concat_axis=1,name="conv_2")

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1,1))(conv_3)
    conv_3 = Convolution2D(384,3,3,activation='relu',name='conv_3')(conv_3)

    conv_4 = ZeroPadding2D((1,1))(conv_3)

    x2 = conv2D_bn(conv_4, 192, 3, 3, subsample=(1, 1), border_mode='same')
    y2 = conv2D_bn(conv_4, 192, 3, 3, subsample=(1, 1), border_mode='same')

    conv_4 = merge([x2,y2], mode='concat',concat_axis=1,name="conv_4")

    conv_5 = ZeroPadding2D((1,1))(conv_4)

    x3 = conv2D_bn(conv_5, 192, 3, 3, subsample=(1, 1), border_mode='same')
    y3 = conv2D_bn(conv_5, 192, 3, 3, subsample=(1, 1), border_mode='same')

    conv_5 = merge([x3,y3], mode='concat',concat_axis=1,name="conv_5")

    conv_5 = MaxPooling2D((3, 3), strides=(2,2),name="convpool_5")(conv_5)

    dense_1 = Flatten(name="flatten")(conv_5)
    dense_1 = Dense(4096, activation='relu',name='dense_1')(dense_1)
    dense_2 = Dropout(0.5)(dense_1)
    dense_2 = Dense(4096, activation='relu',name='dense_2')(dense_2)
    dense_3 = Dropout(0.5)(dense_2)
    dense_3 = Dense(nb_class,name='dense_3')(dense_3)
    prediction = Activation("softmax",name="softmax")(dense_3)


    base_model = Model(input=inputs, output=prediction)

    if weights_path:
        base_model.load_weights(weights_path)

    return base_model
예제 #7
0
def alexnet(input_shape, nb_classes, mean_flag): 
	# code adapted from https://github.com/heuritech/convnets-keras

	inputs = Input(shape=input_shape, name='main_input')

	if mean_flag:
		mean_subtraction = Lambda(mean_subtract, name='mean_subtraction')(inputs)
		conv_1 = Conv2D(96, (11, 11), strides=(4,4), activation='relu',
						   name='conv_1', kernel_initializer='he_normal', bias_initializer='he_normal')(mean_subtraction)
	else:
		conv_1 = Conv2D(96, (11, 11), strides=(4,4), activation='relu',
						   name='conv_1', kernel_initializer='he_normal', bias_initializer='he_normal')(inputs)

	conv_2 = MaxPooling2D((3, 3), strides=(2,2))(conv_1)
	conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
	conv_2 = ZeroPadding2D((2,2))(conv_2)
	conv_2 = concatenate([
		Conv2D(128, (5, 5), activation="relu", kernel_initializer='he_normal', bias_initializer='he_normal', name='conv_2_'+str(i+1))(
		splittensor(ratio_split=2,id_split=i)(conv_2)
		) for i in range(2)], axis=1, name="conv_2")

	conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
	conv_3 = crosschannelnormalization()(conv_3)
	conv_3 = ZeroPadding2D((1,1))(conv_3)
	conv_3 = Conv2D(384, (3, 3), activation='relu', name='conv_3', kernel_initializer='he_normal', bias_initializer='he_normal')(conv_3)

	conv_4 = ZeroPadding2D((1,1))(conv_3)
	conv_4 = concatenate([
		Conv2D(192, (3, 3), activation="relu", kernel_initializer='he_normal', bias_initializer='he_normal', name='conv_4_'+str(i+1))(
		splittensor(ratio_split=2,id_split=i)(conv_4)
		) for i in range(2)], axis=1, name="conv_4")

	conv_5 = ZeroPadding2D((1,1))(conv_4)
	conv_5 = concatenate([
		Conv2D(128, (3, 3), activation="relu", kernel_initializer='he_normal', bias_initializer='he_normal', name='conv_5_'+str(i+1))(
		splittensor(ratio_split=2,id_split=i)(conv_5)
		) for i in range(2)], axis=1, name="conv_5")

	dense_1 = MaxPooling2D((3, 3), strides=(2,2),name="convpool_5")(conv_5)

	dense_1 = Flatten(name="flatten")(dense_1)
	dense_1 = Dense(4096, activation='relu',name='dense_1', kernel_initializer='he_normal', bias_initializer='he_normal')(dense_1)
	dense_2 = Dropout(0.5)(dense_1)
	dense_2 = Dense(4096, activation='relu',name='dense__2', kernel_initializer='he_normal', bias_initializer='he_normal')(dense_2)
	dense_3 = Dropout(0.5)(dense_2)
	dense_3 = Dense(nb_classes,name='dense_3_new', kernel_initializer='he_normal', bias_initializer='he_normal')(dense_3)

	prediction = Activation("softmax",name="softmax")(dense_3)

	alexnet = Model(inputs = inputs, outputs = prediction)
	
	return alexnet
예제 #8
0
파일: util.py 프로젝트: jackg0h/foodtag
def load_deep_features_model(nb_class, weights_path=None):

    inputs = Input(shape=(3,227,227))

    conv_1 = Convolution2D(96, 11, 11,subsample=(4,4),activation='relu',
                           name='conv_1')(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2,2))(conv_1)
    conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
    conv_2 = ZeroPadding2D((2,2))(conv_2)
    conv_2 = merge([
        Convolution2D(128,5,5,activation="relu",name='conv_2_'+str(i+1))(
            splittensor(ratio_split=2,id_split=i)(conv_2)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_2")

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1,1))(conv_3)
    conv_3 = Convolution2D(384,3,3,activation='relu',name='conv_3')(conv_3)

    conv_4 = ZeroPadding2D((1,1))(conv_3)
    conv_4 = merge([
        Convolution2D(192,3,3,activation="relu",name='conv_4_'+str(i+1))(
            splittensor(ratio_split=2,id_split=i)(conv_4)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_4")

    conv_5 = ZeroPadding2D((1,1))(conv_4)
    conv_5 = merge([
        Convolution2D(128,3,3,activation="relu",name='conv_5_'+str(i+1))(
            splittensor(ratio_split=2,id_split=i)(conv_5)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_5")

    conv_5 = MaxPooling2D((3, 3), strides=(2,2),name="convpool_5")(conv_5)

    dense_1 = Flatten(name="flatten")(conv_5)
    dense_1 = Dense(4096, activation='relu',name='dense_1')(dense_1)
    dense_2 = Dropout(0.5)(dense_1)
    dense_2 = Dense(4096, activation='relu',name='dense_2')(dense_2)
    dense_3 = Dropout(0.5)(dense_2)
    dense_3 = Dense(nb_class,name='dense_3')(dense_3)
    prediction = Activation("softmax",name="softmax")(dense_3)


    base_model = Model(input=inputs, output=prediction)

    if weights_path:
        base_model.load_weights(weights_path)

    base_model = Model(input=inputs, output=conv_5)

    return base_model
예제 #9
0
def get_alexnet(input_shape,nb_classes,mean_flag): 

	inputs = Input(shape=input_shape)

	if mean_flag:
		mean_subtraction = Lambda(mean_subtract, name='mean_subtraction')(inputs)
		conv_1 = Convolution2D(96, 11, 11,subsample=(4,4),activation='relu',
		                   name='conv_1', init='he_normal')(mean_subtraction)
	else:
		conv_1 = Convolution2D(96, 11, 11,subsample=(4,4),activation='relu',
		                   name='conv_1', init='he_normal')(inputs)

	conv_2 = MaxPooling2D((3, 3), strides=(2,2))(conv_1)
	conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
	conv_2 = ZeroPadding2D((2,2))(conv_2)
	conv_2 = merge([
	    Convolution2D(128,5,5,activation="relu",init='he_normal', name='conv_2_'+str(i+1))(
		splittensor(ratio_split=2,id_split=i)(conv_2)
	    ) for i in range(2)], mode='concat',concat_axis=1,name="conv_2")

	conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
	conv_3 = crosschannelnormalization()(conv_3)
	conv_3 = ZeroPadding2D((1,1))(conv_3)
	conv_3 = Convolution2D(384,3,3,activation='relu',name='conv_3',init='he_normal')(conv_3)

	conv_4 = ZeroPadding2D((1,1))(conv_3)
	conv_4 = merge([
	    Convolution2D(192,3,3,activation="relu", init='he_normal', name='conv_4_'+str(i+1))(
		splittensor(ratio_split=2,id_split=i)(conv_4)
	    ) for i in range(2)], mode='concat',concat_axis=1,name="conv_4")

	conv_5 = ZeroPadding2D((1,1))(conv_4)
	conv_5 = merge([
	    Convolution2D(128,3,3,activation="relu",init='he_normal', name='conv_5_'+str(i+1))(
		splittensor(ratio_split=2,id_split=i)(conv_5)
	    ) for i in range(2)], mode='concat',concat_axis=1,name="conv_5")

	dense_1 = MaxPooling2D((3, 3), strides=(2,2),name="convpool_5")(conv_5)

	dense_1 = Flatten(name="flatten")(dense_1)
	dense_1 = Dense(4096, activation='relu',name='dense_1',init='he_normal')(dense_1)
	dense_2 = Dropout(0.5)(dense_1)
	dense_2 = Dense(4096, activation='relu',name='dense_2',init='he_normal')(dense_2)
	dense_3 = Dropout(0.5)(dense_2)
	dense_3 = Dense(nb_classes,name='dense_3_new',init='he_normal')(dense_3)

	prediction = Activation("softmax",name="softmax")(dense_3)

	alexnet = Model(input=inputs, output=prediction)
    
	return alexnet
예제 #10
0
def alexnet(input_shape, nb_classes):
    # https://github.com/duggalrahul/AlexNet-Experiments-Keras/blob/master/Code/alexnet_base.py
    # code adapted from https://github.com/heuritech/convnets-keras

    inputs = Input(shape=input_shape)


    conv_1 = Convolution2D(96, 11, 11, subsample=(4, 4), activation='relu',
                           name='conv_1', init='he_normal')(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_1)
    conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
    conv_2 = ZeroPadding2D((2, 2))(conv_2)
    conv_2 = merge([
        Convolution2D(128, 5, 5, activation="relu", init='he_normal', name='conv_2_' + str(i + 1))(
            splittensor(ratio_split=2, id_split=i)(conv_2)
        ) for i in range(2)], mode='concat', concat_axis=1, name="conv_2")

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1, 1))(conv_3)
    conv_3 = Convolution2D(384, 3, 3, activation='relu', name='conv_3', init='he_normal')(conv_3)

    conv_4 = ZeroPadding2D((1, 1))(conv_3)
    conv_4 = merge([
        Convolution2D(192, 3, 3, activation="relu", init='he_normal', name='conv_4_' + str(i + 1))(
            splittensor(ratio_split=2, id_split=i)(conv_4)
        ) for i in range(2)], mode='concat', concat_axis=1, name="conv_4")

    conv_5 = ZeroPadding2D((1, 1))(conv_4)
    conv_5 = merge([
        Convolution2D(128, 3, 3, activation="relu", init='he_normal', name='conv_5_' + str(i + 1))(
            splittensor(ratio_split=2, id_split=i)(conv_5)
        ) for i in range(2)], mode='concat', concat_axis=1, name="conv_5")

    dense_1 = MaxPooling2D((3, 3), strides=(2, 2), name="convpool_5")(conv_5)

    dense_1 = Flatten(name="flatten")(dense_1)
    dense_1 = Dense(4096, activation='relu', name='dense_1', init='he_normal')(dense_1)
    dense_2 = Dropout(0.5)(dense_1)
    dense_2 = Dense(4096, activation='relu', name='dense_2', init='he_normal')(dense_2)
    dense_3 = Dropout(0.5)(dense_2)
    dense_3 = Dense(nb_classes, name='dense_3_new', init='he_normal')(dense_3)

    prediction = Activation("softmax", name="softmax")(dense_3)

    alexnet = Model(input=inputs, output=prediction)

    return alexnet
예제 #11
0
def train_shallow_alexnet_imagenet_FCN(classes=5, freeze_flag=None):
    model = alexnet(input_shape=(3, 227, 227), nb_classes=1000, mean_flag=True)
    model.load_weights('alexnet_weights.h5')

    # modify architecture
    last_conv_1 = model.layers[5].output
    conv_2 = Conv2D(256, (5, 5),
                    strides=(1, 1),
                    activation='relu',
                    name='conv_2',
                    kernel_initializer='he_normal',
                    bias_initializer='he_normal')(last_conv_1)
    conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_2 = crosschannelnormalization(name="convpool_2")(conv_2)
    conv_2 = ZeroPadding2D((2, 2))(conv_2)

    conv_2 = Dropout(0.5)(conv_2)

    conv_activate = Conv2D(classes,
                           kernel_size=(1, 1),
                           strides=(1, 1),
                           activation='relu',
                           kernel_initializer='he_normal',
                           bias_initializer='he_normal',
                           name='conv_activate')(conv_2)
    conv_activate = GlobalAveragePooling2D(
        data_format='channels_first')(conv_activate)

    model = Model(inputs=model.input, outputs=conv_activate)
    plot_model(model, to_file='shallowalex_fcn', show_shapes=True)
    print(model.summary())

    return model
예제 #12
0
def alexnet_correct(weights_path=None,
                    out_dim=1000,
                    dist_type='blur',
                    corr_arch='CW',
                    num_ly_corr=5):
    filter_sz = [96, 256, 384, 384, 256]

    tr_wts = np.load(weights_path)
    inp_img = Input((3, IN_dat.img_crop, IN_dat.img_crop), name='input_img')
    set_trainable = False

    correction_perc = 0.75

    def split_tensor_lower(x):

        inp_shape = K.int_shape(x)
        chn = corrected_chn

        return x[:, chn:, :, :]

    def split_tensor_upper(x):
        # corr_per = 0.25
        inp_shape = K.int_shape(x)
        chn = corrected_chn
        return x[:, :chn, :, :]

    def split_tensor_uppershape(input_shape):
        inp_shape = input_shape
        chn = corrected_chn
        shape = (chn, ) + input_shape[2:]
        shape = (input_shape[0], ) + shape
        return shape

    def split_tensor_lowershape(input_shape):
        inp_shape = input_shape
        chn = inp_shape[1] - corrected_chn
        shape = (chn, ) + input_shape[2:]
        shape = (input_shape[0], ) + shape
        return shape
        # else:
        #     def split_tensor_lower(x):
        #
        #         inp_shape = K.int_shape(x)
        #         chn = int(inp_shape[1] * correction_perc)
        #
        #         return x[:, chn:, :, :]
        #
        #     def split_tensor_upper(x):
        #         # corr_per = 0.25
        #         inp_shape = K.int_shape(x)
        #         chn = int(inp_shape[1] * correction_perc)
        #         return x[:, :chn, :, :]
        #
        #     def split_tensor_uppershape(input_shape):
        #         inp_shape = input_shape
        #         chn = int(inp_shape[1] * correction_perc)
        #         shape = (chn,) + input_shape[2:]
        #         shape = (input_shape[0],) + shape
        #         return shape
        #
        #     def split_tensor_lowershape(input_shape):
        #         inp_shape = input_shape
        #         chn = inp_shape[1] - int(inp_shape[1] * correction_perc)
        #         shape = (chn,) + input_shape[2:]
        #         shape = (input_shape[0],) + shape
        #         return shape

    wts_l1 = tr_wts[0].copy()
    b_l1 = tr_wts[1].copy()

    ranked_fltr = h5py.File('imagenet_alexnet_ranked_filters.h5', 'r')

    if dist_type == 'blur':
        corr_id = ranked_fltr['alexnet_blur/layer_' + str(1)][:]

    elif dist_type == 'awgn':
        corr_id = ranked_fltr['alexnet_awgn/layer_' + str(1)][:]

    corrected_chn = int(correction_perc * filter_sz[0])
    print corrected_chn
    # print corr_id
    wts_l1 = wts_l1[corr_id, :, :, :]
    b_l1 = b_l1[corr_id]

    wts_remap_l1 = np.zeros((filter_sz[0], filter_sz[0], 1, 1), np.float32)
    b_remap_l1 = np.zeros(filter_sz[0], np.float32)

    for filt_id in range(filter_sz[0]):
        wts_remap_l1[corr_id[filt_id], filt_id, :, :] = 1.0

    # inp_img1 = ZeroPadding2D((2,2))(inp_img)
    conv_1 = Convolution2D(96,
                           11,
                           11,
                           subsample=(4, 4),
                           activation='linear',
                           weights=[wts_l1, b_l1],
                           name='conv_1',
                           trainable=set_trainable)(inp_img)

    conv1_lower = Lambda(split_tensor_lower,
                         output_shape=split_tensor_lowershape)(conv_1)

    conv1_upper = Lambda(split_tensor_upper,
                         output_shape=split_tensor_uppershape)(conv_1)

    if corr_arch == 'CW':
        conv1_correct = get_correct_unit_CW(conv1_upper, corrected_chn, 5, 2)
    elif corr_arch == 'fixed':
        conv1_correct = get_correct_unit_bottleneck(conv1_upper, 128,
                                                    corrected_chn, [1, 2], 2)
    else:
        conv1_correct = get_correct_unit_bottleneck(conv1_upper,
                                                    int(0.5 * corrected_chn),
                                                    corrected_chn, [1, 1, 2],
                                                    3)

    conv1_sum_merge1 = merge([conv1_correct, conv1_upper], mode='sum')

    conv1_1_merged = merge([conv1_sum_merge1, conv1_lower],
                           mode='concat',
                           concat_axis=1)

    conv_1_remap = Convolution2D(filter_sz[0],
                                 1,
                                 1,
                                 activation='linear',
                                 name='conv_1_corr',
                                 trainable=False,
                                 weights=[wts_remap_l1,
                                          b_remap_l1])(conv1_1_merged)

    conv_1_out = Activation('relu', name='conv_1_relu')(conv_1_remap)

    conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_1_out)
    conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
    conv_2 = ZeroPadding2D((2, 2))(conv_2)

    conv2_w_t = tr_wts[2]
    conv2_w = np.empty((2, conv2_w_t.shape[0], conv2_w_t.shape[1],
                        conv2_w_t.shape[2], conv2_w_t.shape[3]), np.float32)
    conv2_w[0, :] = conv2_w_t.copy()
    del conv2_w_t
    conv2_w[1, :] = tr_wts[4]

    conv2_b_t = tr_wts[3]
    conv2_b = np.empty((2, conv2_b_t.shape[0]), np.float32)
    conv2_b[0, :] = conv2_b_t.copy()
    del conv2_b_t
    conv2_b[1, :] = tr_wts[5]

    conv_2 = merge([
        Convolution2D(128,
                      5,
                      5,
                      activation="linear",
                      name='conv_2_' + str(i + 1),
                      weights=[conv2_w[i, :], conv2_b[i, :]],
                      trainable=set_trainable)(splittensor(ratio_split=2,
                                                           id_split=i)(conv_2))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name="conv_2")

    if num_ly_corr > 1:

        if dist_type == 'blur':
            corr_id = ranked_fltr['alexnet_blur/layer_' + str(2)][:]

        elif dist_type == 'awgn':
            corr_id = ranked_fltr['alexnet_awgn/layer_' + str(2)][:]

        corrected_chn = int(correction_perc * filter_sz[1])
        print corrected_chn
        # print corr_id

        wts_remap_l2_inp = np.zeros((filter_sz[1], filter_sz[1], 1, 1),
                                    np.float32)
        b_remap_l2_inp = np.zeros(filter_sz[1], np.float32)

        wts_remap_l2 = np.zeros((filter_sz[1], filter_sz[1], 1, 1), np.float32)
        b_remap_l2 = np.zeros(filter_sz[1], np.float32)

        for filt_id in range(filter_sz[1]):
            wts_remap_l2_inp[filt_id, corr_id[filt_id], :, :] = 1.0
            wts_remap_l2[corr_id[filt_id], filt_id, :, :] = 1.0

        conv_2_inp_remap = Convolution2D(
            filter_sz[1],
            1,
            1,
            activation='linear',
            trainable=False,
            weights=[wts_remap_l2_inp, b_remap_l2_inp])(conv_2)

        conv2_lower = Lambda(
            split_tensor_lower,
            output_shape=split_tensor_lowershape)(conv_2_inp_remap)
        conv2_upper = Lambda(
            split_tensor_upper,
            output_shape=split_tensor_uppershape)(conv_2_inp_remap)

        if corr_arch == 'CW':
            conv2_correct = get_correct_unit_CW(conv2_upper, corrected_chn, 3,
                                                2)
        elif corr_arch == 'fixed':
            conv2_correct = get_correct_unit_bottleneck(
                conv2_upper, 128, corrected_chn, [1, 2], 2)
        else:
            conv2_correct = get_correct_unit_bottleneck(
                conv2_upper, int(0.5 * corrected_chn), corrected_chn,
                [1, 1, 1], 3)

        conv2_sum_merge1 = merge([conv2_correct, conv2_upper], mode='sum')

        conv2_1_merged = merge([conv2_sum_merge1, conv2_lower],
                               mode='concat',
                               concat_axis=1)

        conv_2_remap = Convolution2D(filter_sz[1],
                                     1,
                                     1,
                                     activation='linear',
                                     name='conv_2_corr',
                                     trainable=False,
                                     weights=[wts_remap_l2,
                                              b_remap_l2])(conv2_1_merged)

        conv_2_out = Activation('relu', name='conv_2_relu')(conv_2_remap)
    else:

        conv_2_out = Activation('relu', name='conv_2_relu')(conv_2)

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2_out)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1, 1))(conv_3)

    wts_l3 = tr_wts[6].copy()
    b_l3 = tr_wts[7].copy()
    #
    if num_ly_corr > 2:
        correction_perc = 0.5

        if dist_type == 'blur':
            corr_id = ranked_fltr['alexnet_blur/layer_' + str(3)][:]

        elif dist_type == 'awgn':
            corr_id = ranked_fltr['alexnet_awgn/layer_' + str(3)][:]

        corrected_chn = int(correction_perc * filter_sz[2])

        print corrected_chn
        # print corr_id
        wts_l3 = wts_l3[corr_id, :, :, :]
        b_l3 = b_l3[corr_id]

        wts_remap_l3 = np.zeros((filter_sz[2], filter_sz[2], 1, 1), np.float32)
        b_remap_l3 = np.zeros(filter_sz[2], np.float32)

        for filt_id in range(filter_sz[2]):
            wts_remap_l3[corr_id[filt_id], filt_id, :, :] = 1.0

        conv_3 = Convolution2D(384,
                               3,
                               3,
                               weights=[wts_l3, b_l3],
                               activation='linear',
                               name='conv_3',
                               trainable=set_trainable)(conv_3)

        #
        conv3_lower = Lambda(split_tensor_lower,
                             output_shape=split_tensor_lowershape)(conv_3)
        conv3_upper = Lambda(split_tensor_upper,
                             output_shape=split_tensor_uppershape)(conv_3)

        if corr_arch == 'CW':
            conv3_correct = get_correct_unit_CW(conv3_upper, corrected_chn, 3,
                                                2)
        elif corr_arch == 'fixed':
            conv3_correct = get_correct_unit_bottleneck(
                conv3_upper, 128, corrected_chn, [1, 1], 2)
        else:
            conv3_correct = get_correct_unit_bottleneck(
                conv3_upper, int(0.5 * corrected_chn), corrected_chn,
                [1, 1, 1], 3)

        conv3_sum_merge1 = merge([conv3_correct, conv3_upper], mode='sum')

        conv3_1_merged = merge([conv3_sum_merge1, conv3_lower],
                               mode='concat',
                               concat_axis=1)

        conv_3_remap = Convolution2D(filter_sz[2],
                                     1,
                                     1,
                                     activation='linear',
                                     name='conv_3_corr',
                                     trainable=False,
                                     weights=[wts_remap_l3,
                                              b_remap_l3])(conv3_1_merged)

        conv_3_out = Activation('relu', name='conv_3_relu')(conv_3_remap)

    else:
        conv_3 = Convolution2D(384,
                               3,
                               3,
                               weights=[wts_l3, b_l3],
                               activation='linear',
                               name='conv_3',
                               trainable=set_trainable)(conv_3)

        conv_3_out = Activation('relu', name='conv_3_relu')(conv_3)

    conv_4 = ZeroPadding2D((1, 1))(conv_3_out)

    conv4_w_t = tr_wts[8].copy()
    conv4_w = np.empty((2, conv4_w_t.shape[0], conv4_w_t.shape[1],
                        conv4_w_t.shape[2], conv4_w_t.shape[3]), np.float32)
    conv4_w[0, :] = conv4_w_t.copy()
    del conv4_w_t
    conv4_w[1, :] = tr_wts[10].copy()

    conv4_b_t = tr_wts[9].copy()
    conv4_b = np.empty((2, conv4_b_t.shape[0]), np.float32)
    conv4_b[0, :] = conv4_b_t.copy()
    del conv4_b_t
    conv4_b[1, :] = tr_wts[11].copy()

    conv_4 = merge([
        Convolution2D(192,
                      3,
                      3,
                      activation="linear",
                      name='conv_4_' + str(i + 1),
                      weights=[conv4_w[i, :], conv4_b[i, :]],
                      trainable=set_trainable)(splittensor(ratio_split=2,
                                                           id_split=i)(conv_4))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name="conv_4")
    #
    if num_ly_corr > 3:
        correction_perc = 0.5

        if dist_type == 'blur':
            corr_id = ranked_fltr['alexnet_blur/layer_' + str(4)][:]

        elif dist_type == 'awgn':
            corr_id = ranked_fltr['alexnet_awgn/layer_' + str(4)][:]

        corrected_chn = int(correction_perc * filter_sz[3])
        # corrected_chn = corr_chn
        print corrected_chn
        # print corr_id
        wts_remap_l4_inp = np.zeros((filter_sz[3], filter_sz[3], 1, 1),
                                    np.float32)
        b_remap_l4_inp = np.zeros(filter_sz[3], np.float32)

        wts_remap_l4 = np.zeros((filter_sz[3], filter_sz[3], 1, 1), np.float32)
        b_remap_l4 = np.zeros(filter_sz[3], np.float32)

        for filt_id in range(filter_sz[3]):
            wts_remap_l4_inp[filt_id, corr_id[filt_id], :, :] = 1.0
            wts_remap_l4[corr_id[filt_id], filt_id, :, :] = 1.0

        conv_4_inp_remap = Convolution2D(
            filter_sz[3],
            1,
            1,
            activation='linear',
            trainable=False,
            weights=[wts_remap_l4_inp, b_remap_l4_inp])(conv_4)

        conv4_lower = Lambda(
            split_tensor_lower,
            output_shape=split_tensor_lowershape)(conv_4_inp_remap)
        conv4_upper = Lambda(
            split_tensor_upper,
            output_shape=split_tensor_uppershape)(conv_4_inp_remap)

        if corr_arch == 'CW':
            conv4_correct = get_correct_unit_CW(conv4_upper, corrected_chn, 3,
                                                2)
        elif corr_arch == 'fixed':
            conv4_correct = get_correct_unit_bottleneck(
                conv4_upper, 128, corrected_chn, [1, 1], 2)
        else:
            conv4_correct = get_correct_unit_bottleneck(
                conv4_upper, int(0.5 * corrected_chn), corrected_chn,
                [1, 1, 1], 3)

        conv4_sum_merge1 = merge([conv4_correct, conv4_upper], mode='sum')

        conv4_1_merged = merge([conv4_sum_merge1, conv4_lower],
                               mode='concat',
                               concat_axis=1)

        conv_4_remap = Convolution2D(filter_sz[3],
                                     1,
                                     1,
                                     activation='linear',
                                     name='conv_4_corr',
                                     trainable=False,
                                     weights=[wts_remap_l4,
                                              b_remap_l4])(conv4_1_merged)

        conv_4_out = Activation('relu', name='conv_4_relu')(conv_4_remap)
    else:

        conv_4_out = Activation('relu', name='conv_4_relu')(conv_4)

    conv_5 = ZeroPadding2D((1, 1))(conv_4_out)

    conv5_w_t = tr_wts[12].copy()
    conv5_w = np.empty((2, conv5_w_t.shape[0], conv5_w_t.shape[1],
                        conv5_w_t.shape[2], conv5_w_t.shape[3]), np.float32)
    conv5_w[0, :] = conv5_w_t.copy()
    del conv5_w_t
    conv5_w[1, :] = tr_wts[14].copy()

    conv5_b_t = tr_wts[13].copy()
    conv5_b = np.empty((2, conv5_b_t.shape[0]), np.float32)
    conv5_b[0, :] = conv5_b_t.copy()
    del conv5_b_t
    conv5_b[1, :] = tr_wts[15].copy()

    conv_5 = merge([
        Convolution2D(128,
                      3,
                      3,
                      activation="linear",
                      name='conv_5_' + str(i + 1),
                      weights=[conv5_w[i, :], conv5_b[i, :]],
                      trainable=set_trainable)(splittensor(ratio_split=2,
                                                           id_split=i)(conv_5))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name="conv_5")
    #
    #
    #
    if num_ly_corr > 4:
        correction_perc = 0.5

        if dist_type == 'blur':
            corr_id = ranked_fltr['alexnet_blur/layer_' + str(5)][:]

        elif dist_type == 'awgn':
            corr_id = ranked_fltr['alexnet_awgn/layer_' + str(5)][:]

        corrected_chn = int(correction_perc * filter_sz[4])
        print corrected_chn
        # print corr_id

        wts_remap_l5_inp = np.zeros((filter_sz[4], filter_sz[4], 1, 1),
                                    np.float32)
        b_remap_l5_inp = np.zeros(filter_sz[4], np.float32)

        wts_remap_l5 = np.zeros((filter_sz[4], filter_sz[4], 1, 1), np.float32)
        b_remap_l5 = np.zeros(filter_sz[4], np.float32)

        for filt_id in range(filter_sz[4]):
            wts_remap_l5_inp[filt_id, corr_id[filt_id], :, :] = 1.0
            wts_remap_l5[corr_id[filt_id], filt_id, :, :] = 1.0

        conv_5_inp_remap = Convolution2D(
            filter_sz[4],
            1,
            1,
            activation='linear',
            trainable=False,
            weights=[wts_remap_l5_inp, b_remap_l5_inp])(conv_5)

        conv5_lower = Lambda(
            split_tensor_lower,
            output_shape=split_tensor_lowershape)(conv_5_inp_remap)
        conv5_upper = Lambda(
            split_tensor_upper,
            output_shape=split_tensor_uppershape)(conv_5_inp_remap)

        if corr_arch == 'CW':
            conv5_correct = get_correct_unit_CW(conv5_upper, corrected_chn, 3,
                                                2)
        elif corr_arch == 'fixed':
            conv5_correct = get_correct_unit_bottleneck(
                conv5_upper, 128, corrected_chn, [1, 1], 2)
        else:
            conv5_correct = get_correct_unit_bottleneck(
                conv5_upper, int(0.5 * corrected_chn), corrected_chn,
                [1, 1, 1], 3)

        conv5_sum_merge1 = merge([conv5_correct, conv5_upper], mode='sum')

        conv5_1_merged = merge([conv5_sum_merge1, conv5_lower],
                               mode='concat',
                               concat_axis=1)

        conv_5_remap = Convolution2D(filter_sz[4],
                                     1,
                                     1,
                                     activation='linear',
                                     name='conv_5_corr',
                                     trainable=False,
                                     weights=[wts_remap_l5,
                                              b_remap_l5])(conv5_1_merged)

        conv_5_out = Activation('relu', name='conv_5_relu')(conv_5_remap)
    else:

        conv_5_out = Activation('relu', name='conv_5_relu')(conv_5)

    dense_1 = MaxPooling2D((3, 3), strides=(2, 2),
                           name="convpool_5")(conv_5_out)

    dense_1_w = tr_wts[16]
    dense_1_b = tr_wts[17]

    dense_2_w = tr_wts[18]
    dense_2_b = tr_wts[19]

    dense_1 = Flatten(name="flatten")(dense_1)
    dense_1 = Dense(4096,
                    activation='relu',
                    name='dense_1',
                    weights=[dense_1_w, dense_1_b],
                    trainable=False)(dense_1)
    dense_2 = Dropout(0.5)(dense_1)
    dense_2 = Dense(4096,
                    activation='relu',
                    name='dense_2',
                    weights=[dense_2_w, dense_2_b],
                    trainable=False)(dense_2)
    dense_3 = Dropout(0.5)(dense_2)

    dense_3 = Dense(out_dim,
                    name='dense_3',
                    weights=[tr_wts[20][:, :out_dim], tr_wts[21][:out_dim]],
                    trainable=False,
                    init='he_normal')(dense_3)
    prediction = Activation("softmax", name="softmax")(dense_3)

    print '\n'
    print tr_wts[20].shape
    print '\n'
    print tr_wts[21].shape
    model = Model(input=inp_img, output=prediction)

    layer_dict = dict([(layer.name, layer) for layer in model.layers])

    return model, layer_dict
예제 #13
0
def AlexNetDNN(weights_path=None,
               heatmap=False,
               trainable=False,
               out_dim=1000):
    if heatmap:
        inputs = Input(shape=(3, None, None))
    else:
        inputs = Input(shape=(3, 227, 227))

    inp_file = h5py.File(weights_path)

    conv1_w = inp_file['conv_1/conv_1_W'][:]
    conv1_b = inp_file['conv_1/conv_1_b'][:]
    conv_1 = Convolution2D(96,
                           11,
                           11,
                           subsample=(4, 4),
                           activation='relu',
                           weights=[conv1_w, conv1_b],
                           name='conv_1',
                           trainable=trainable)(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_1)
    conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
    conv_2 = ZeroPadding2D((2, 2))(conv_2)

    conv2_w_t = inp_file['conv_2_1/conv_2_1_W'][:]
    conv2_w = np.empty((2, conv2_w_t.shape[0], conv2_w_t.shape[1],
                        conv2_w_t.shape[2], conv2_w_t.shape[3]), np.float32)
    conv2_w[0, :] = conv2_w_t.copy()
    del conv2_w_t
    conv2_w[1, :] = inp_file['conv_2_2/conv_2_2_W'][:]

    conv2_b_t = inp_file['conv_2_1/conv_2_1_b'][:]
    conv2_b = np.empty((2, conv2_b_t.shape[0]), np.float32)
    conv2_b[0, :] = conv2_b_t.copy()
    del conv2_b_t
    conv2_b[1, :] = inp_file['conv_2_2/conv_2_2_b'][:]

    conv_2 = merge([
        Convolution2D(128,
                      5,
                      5,
                      activation="relu",
                      name='conv_2_' + str(i + 1),
                      weights=[conv2_w[i, :], conv2_b[i, :]],
                      trainable=trainable)(splittensor(ratio_split=2,
                                                       id_split=i)(conv_2))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name="conv_2")

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1, 1))(conv_3)

    conv3_w = inp_file['conv_3/conv_3_W'][:]
    conv3_b = inp_file['conv_3/conv_3_b'][:]
    conv_3 = Convolution2D(384,
                           3,
                           3,
                           weights=[conv3_w, conv3_b],
                           activation='relu',
                           name='conv_3',
                           trainable=trainable)(conv_3)

    conv_4 = ZeroPadding2D((1, 1))(conv_3)

    conv4_w_t = inp_file['conv_4_1/conv_4_1_W'][:]
    conv4_w = np.empty((2, conv4_w_t.shape[0], conv4_w_t.shape[1],
                        conv4_w_t.shape[2], conv4_w_t.shape[3]), np.float32)
    conv4_w[0, :] = conv4_w_t.copy()
    del conv4_w_t
    conv4_w[1, :] = inp_file['conv_4_2/conv_4_2_W'][:]

    conv4_b_t = inp_file['conv_4_1/conv_4_1_b'][:]
    conv4_b = np.empty((2, conv4_b_t.shape[0]), np.float32)
    conv4_b[0, :] = conv4_b_t.copy()
    del conv4_b_t
    conv4_b[1, :] = inp_file['conv_4_2/conv_4_2_b'][:]

    conv_4 = merge([
        Convolution2D(192,
                      3,
                      3,
                      activation="relu",
                      name='conv_4_' + str(i + 1),
                      weights=[conv4_w[i, :], conv4_b[i, :]],
                      trainable=trainable)(splittensor(ratio_split=2,
                                                       id_split=i)(conv_4))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name="conv_4")

    conv_5 = ZeroPadding2D((1, 1))(conv_4)

    conv5_w_t = inp_file['conv_5_1/conv_5_1_W'][:]
    conv5_w = np.empty((2, conv5_w_t.shape[0], conv5_w_t.shape[1],
                        conv5_w_t.shape[2], conv5_w_t.shape[3]), np.float32)
    conv5_w[0, :] = conv5_w_t.copy()
    del conv5_w_t
    conv5_w[1, :] = inp_file['conv_5_2/conv_5_2_W'][:]

    conv5_b_t = inp_file['conv_5_1/conv_5_1_b'][:]
    conv5_b = np.empty((2, conv5_b_t.shape[0]), np.float32)
    conv5_b[0, :] = conv5_b_t.copy()
    del conv5_b_t
    conv5_b[1, :] = inp_file['conv_5_2/conv_5_2_b'][:]

    conv_5 = merge([
        Convolution2D(128,
                      3,
                      3,
                      activation="relu",
                      name='conv_5_' + str(i + 1),
                      weights=[conv5_w[i, :], conv5_b[i, :]],
                      trainable=trainable)(splittensor(ratio_split=2,
                                                       id_split=i)(conv_5))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name="conv_5")

    dense_1 = MaxPooling2D((3, 3), strides=(2, 2), name="convpool_5")(conv_5)

    dense_1_w = inp_file['dense_1/dense_1_W'][:]
    dense_1_b = inp_file['dense_1/dense_1_b'][:]

    dense_2_w = inp_file['dense_2/dense_2_W'][:]
    dense_2_b = inp_file['dense_2/dense_2_b'][:]

    dense_3_w = inp_file['dense_3/dense_3_W'][:]
    dense_3_b = inp_file['dense_3/dense_3_b'][:]

    dense_1 = Flatten(name="flatten")(dense_1)
    dense_1 = Dense(4096,
                    activation='relu',
                    name='dense_1',
                    weights=[dense_1_w, dense_1_b],
                    trainable=trainable)(dense_1)
    dense_2 = Dropout(0.5)(dense_1)
    dense_2 = Dense(4096,
                    activation='relu',
                    name='dense_2',
                    weights=[dense_2_w, dense_2_b],
                    trainable=trainable)(dense_2)
    dense_3 = Dropout(0.5)(dense_2)
    if out_dim == 1000:
        dense_3 = Dense(out_dim,
                        name='dense_3',
                        weights=[dense_3_w, dense_3_b],
                        trainable=trainable)(dense_3)
    else:
        # change trainable from true to trainable
        dense_3 = Dense(out_dim,
                        name='dense_3',
                        trainable=True,
                        init='he_normal')(dense_3)
    prediction = Activation("softmax", name="softmax")(dense_3)

    model = Model(input=inputs, output=prediction)
    layer_dict = dict([(layer.name, layer) for layer in model.layers])

    return model, layer_dict
예제 #14
0
def AlexNetDNN_layers(weights_path=None,
                      layer_id=0,
                      trainable=False,
                      out_dim=1000):

    learning_rate_multiplier = 1.0
    tr_wts = np.load(weights_path)
    if layer_id == 0:
        inputs = Input(shape=(96, 55, 55))
        conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(inputs)
        conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
        conv_2 = ZeroPadding2D((2, 2))(conv_2)

        conv2_w_t = tr_wts[2]
        conv2_w = np.empty((2, conv2_w_t.shape[0], conv2_w_t.shape[1],
                            conv2_w_t.shape[2], conv2_w_t.shape[3]),
                           np.float32)
        conv2_w[0, :] = conv2_w_t.copy()
        del conv2_w_t
        conv2_w[1, :] = tr_wts[4]

        conv2_b_t = tr_wts[3]
        conv2_b = np.empty((2, conv2_b_t.shape[0]), np.float32)
        conv2_b[0, :] = conv2_b_t.copy()
        del conv2_b_t
        conv2_b[1, :] = tr_wts[5]

        conv_2 = merge([
            Convolution2D(128,
                          5,
                          5,
                          activation="relu",
                          name='conv_2_' + str(i + 1),
                          weights=[conv2_w[i, :], conv2_b[i, :]],
                          trainable=trainable)(splittensor(ratio_split=2,
                                                           id_split=i)(conv_2))
            for i in range(2)
        ],
                       mode='concat',
                       concat_axis=1,
                       name="conv_2")

        conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
        conv_3 = crosschannelnormalization()(conv_3)
        conv_3 = ZeroPadding2D((1, 1))(conv_3)

        conv3_w = tr_wts[6]
        conv3_b = tr_wts[7]
        conv_3 = Convolution2D(384,
                               3,
                               3,
                               weights=[conv3_w, conv3_b],
                               activation='relu',
                               name='conv_3',
                               trainable=trainable)(conv_3)

        conv_4 = ZeroPadding2D((1, 1))(conv_3)

        conv4_w_t = tr_wts[8]
        conv4_w = np.empty((2, conv4_w_t.shape[0], conv4_w_t.shape[1],
                            conv4_w_t.shape[2], conv4_w_t.shape[3]),
                           np.float32)
        conv4_w[0, :] = conv4_w_t.copy()
        del conv4_w_t
        conv4_w[1, :] = tr_wts[10]

        conv4_b_t = tr_wts[9]
        conv4_b = np.empty((2, conv4_b_t.shape[0]), np.float32)
        conv4_b[0, :] = conv4_b_t.copy()
        del conv4_b_t
        conv4_b[1, :] = tr_wts[11]

        conv_4 = merge([
            Convolution2D(192,
                          3,
                          3,
                          activation="relu",
                          name='conv_4_' + str(i + 1),
                          weights=[conv4_w[i, :], conv4_b[i, :]],
                          trainable=trainable)(splittensor(ratio_split=2,
                                                           id_split=i)(conv_4))
            for i in range(2)
        ],
                       mode='concat',
                       concat_axis=1,
                       name="conv_4")

        conv_5 = ZeroPadding2D((1, 1))(conv_4)

        conv5_w_t = tr_wts[12]
        conv5_w = np.empty((2, conv5_w_t.shape[0], conv5_w_t.shape[1],
                            conv5_w_t.shape[2], conv5_w_t.shape[3]),
                           np.float32)
        conv5_w[0, :] = conv5_w_t.copy()
        del conv5_w_t
        conv5_w[1, :] = tr_wts[14]

        conv5_b_t = tr_wts[13]
        conv5_b = np.empty((2, conv5_b_t.shape[0]), np.float32)
        conv5_b[0, :] = conv5_b_t.copy()
        del conv5_b_t
        conv5_b[1, :] = tr_wts[15]

        conv_5 = merge([
            Convolution2D(128,
                          3,
                          3,
                          activation="relu",
                          name='conv_5_' + str(i + 1),
                          weights=[conv5_w[i, :], conv5_b[i, :]],
                          trainable=trainable)(splittensor(ratio_split=2,
                                                           id_split=i)(conv_5))
            for i in range(2)
        ],
                       mode='concat',
                       concat_axis=1,
                       name="conv_5")

        dense_1 = MaxPooling2D((3, 3), strides=(2, 2),
                               name="convpool_5")(conv_5)

        dense_1_w = tr_wts[16]
        dense_1_b = tr_wts[17]

        dense_2_w = tr_wts[18]
        dense_2_b = tr_wts[19]

        dense_1 = Flatten(name="flatten")(dense_1)
        dense_1 = Dense(4096,
                        activation='relu',
                        name='dense_1',
                        weights=[dense_1_w, dense_1_b],
                        trainable=trainable)(dense_1)
        dense_2 = Dropout(0.5)(dense_1)
        dense_2 = Dense(4096,
                        activation='relu',
                        name='dense_2',
                        weights=[dense_2_w, dense_2_b],
                        trainable=trainable)(dense_2)
        dense_3 = Dropout(0.5)(dense_2)
        dense_3 = Dense(out_dim,
                        name='dense_3',
                        weights=[tr_wts[20], tr_wts[21]],
                        trainable=trainable,
                        init='he_normal')(dense_3)
        prediction = Activation("softmax", name="softmax")(dense_3)

    elif layer_id == 1:
        inputs = Input(shape=(256, 27, 27))
        conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(inputs)
        conv_3 = crosschannelnormalization()(conv_3)
        conv_3 = ZeroPadding2D((1, 1))(conv_3)

        conv3_w = tr_wts[6]
        conv3_b = tr_wts[7]
        conv_3 = Convolution2D(384,
                               3,
                               3,
                               weights=[conv3_w, conv3_b],
                               activation='relu',
                               name='conv_3',
                               trainable=trainable)(conv_3)

        conv_4 = ZeroPadding2D((1, 1))(conv_3)

        conv4_w_t = tr_wts[8]
        conv4_w = np.empty((2, conv4_w_t.shape[0], conv4_w_t.shape[1],
                            conv4_w_t.shape[2], conv4_w_t.shape[3]),
                           np.float32)
        conv4_w[0, :] = conv4_w_t.copy()
        del conv4_w_t
        conv4_w[1, :] = tr_wts[10]

        conv4_b_t = tr_wts[9]
        conv4_b = np.empty((2, conv4_b_t.shape[0]), np.float32)
        conv4_b[0, :] = conv4_b_t.copy()
        del conv4_b_t
        conv4_b[1, :] = tr_wts[11]

        conv_4 = merge([
            Convolution2D(192,
                          3,
                          3,
                          activation="relu",
                          name='conv_4_' + str(i + 1),
                          weights=[conv4_w[i, :], conv4_b[i, :]],
                          trainable=trainable)(splittensor(ratio_split=2,
                                                           id_split=i)(conv_4))
            for i in range(2)
        ],
                       mode='concat',
                       concat_axis=1,
                       name="conv_4")

        conv_5 = ZeroPadding2D((1, 1))(conv_4)

        conv5_w_t = tr_wts[12]
        conv5_w = np.empty((2, conv5_w_t.shape[0], conv5_w_t.shape[1],
                            conv5_w_t.shape[2], conv5_w_t.shape[3]),
                           np.float32)
        conv5_w[0, :] = conv5_w_t.copy()
        del conv5_w_t
        conv5_w[1, :] = tr_wts[14]

        conv5_b_t = tr_wts[13]
        conv5_b = np.empty((2, conv5_b_t.shape[0]), np.float32)
        conv5_b[0, :] = conv5_b_t.copy()
        del conv5_b_t
        conv5_b[1, :] = tr_wts[15]

        conv_5 = merge([
            Convolution2D(128,
                          3,
                          3,
                          activation="relu",
                          name='conv_5_' + str(i + 1),
                          weights=[conv5_w[i, :], conv5_b[i, :]],
                          trainable=trainable)(splittensor(ratio_split=2,
                                                           id_split=i)(conv_5))
            for i in range(2)
        ],
                       mode='concat',
                       concat_axis=1,
                       name="conv_5")

        dense_1 = MaxPooling2D((3, 3), strides=(2, 2),
                               name="convpool_5")(conv_5)

        dense_1_w = tr_wts[16]
        dense_1_b = tr_wts[17]

        dense_2_w = tr_wts[18]
        dense_2_b = tr_wts[19]

        dense_1 = Flatten(name="flatten")(dense_1)
        dense_1 = Dense(4096,
                        activation='relu',
                        name='dense_1',
                        weights=[dense_1_w, dense_1_b],
                        trainable=trainable)(dense_1)
        dense_2 = Dropout(0.5)(dense_1)
        dense_2 = Dense(4096,
                        activation='relu',
                        name='dense_2',
                        weights=[dense_2_w, dense_2_b],
                        trainable=trainable)(dense_2)
        dense_3 = Dropout(0.5)(dense_2)
        dense_3 = Dense(out_dim,
                        name='dense_3',
                        weights=[tr_wts[20], tr_wts[21]],
                        trainable=trainable,
                        init='he_normal')(dense_3)
        prediction = Activation("softmax", name="softmax")(dense_3)

    elif layer_id == 2:
        inputs = Input(shape=(384, 13, 13))
        conv_4 = ZeroPadding2D((1, 1))(inputs)

        conv4_w_t = tr_wts[8]
        conv4_w = np.empty((2, conv4_w_t.shape[0], conv4_w_t.shape[1],
                            conv4_w_t.shape[2], conv4_w_t.shape[3]),
                           np.float32)
        conv4_w[0, :] = conv4_w_t.copy()
        del conv4_w_t
        conv4_w[1, :] = tr_wts[10]

        conv4_b_t = tr_wts[9]
        conv4_b = np.empty((2, conv4_b_t.shape[0]), np.float32)
        conv4_b[0, :] = conv4_b_t.copy()
        del conv4_b_t
        conv4_b[1, :] = tr_wts[11]

        conv_4 = merge([
            Convolution2D(192,
                          3,
                          3,
                          activation="relu",
                          name='conv_4_' + str(i + 1),
                          weights=[conv4_w[i, :], conv4_b[i, :]],
                          trainable=trainable)(splittensor(ratio_split=2,
                                                           id_split=i)(conv_4))
            for i in range(2)
        ],
                       mode='concat',
                       concat_axis=1,
                       name="conv_4")

        conv_5 = ZeroPadding2D((1, 1))(conv_4)

        conv5_w_t = tr_wts[12]
        conv5_w = np.empty((2, conv5_w_t.shape[0], conv5_w_t.shape[1],
                            conv5_w_t.shape[2], conv5_w_t.shape[3]),
                           np.float32)
        conv5_w[0, :] = conv5_w_t.copy()
        del conv5_w_t
        conv5_w[1, :] = tr_wts[14]

        conv5_b_t = tr_wts[13]
        conv5_b = np.empty((2, conv5_b_t.shape[0]), np.float32)
        conv5_b[0, :] = conv5_b_t.copy()
        del conv5_b_t
        conv5_b[1, :] = tr_wts[15]

        conv_5 = merge([
            Convolution2D(128,
                          3,
                          3,
                          activation="relu",
                          name='conv_5_' + str(i + 1),
                          weights=[conv5_w[i, :], conv5_b[i, :]],
                          trainable=trainable)(splittensor(ratio_split=2,
                                                           id_split=i)(conv_5))
            for i in range(2)
        ],
                       mode='concat',
                       concat_axis=1,
                       name="conv_5")

        dense_1 = MaxPooling2D((3, 3), strides=(2, 2),
                               name="convpool_5")(conv_5)

        dense_1_w = tr_wts[16]
        dense_1_b = tr_wts[17]

        dense_2_w = tr_wts[18]
        dense_2_b = tr_wts[19]

        dense_1 = Flatten(name="flatten")(dense_1)
        dense_1 = Dense(4096,
                        activation='relu',
                        name='dense_1',
                        weights=[dense_1_w, dense_1_b],
                        trainable=trainable)(dense_1)
        dense_2 = Dropout(0.5)(dense_1)
        dense_2 = Dense(4096,
                        activation='relu',
                        name='dense_2',
                        weights=[dense_2_w, dense_2_b],
                        trainable=trainable)(dense_2)
        dense_3 = Dropout(0.5)(dense_2)
        dense_3 = Dense(out_dim,
                        name='dense_3',
                        weights=[tr_wts[20], tr_wts[21]],
                        trainable=trainable,
                        init='he_normal')(dense_3)
        prediction = Activation("softmax", name="softmax")(dense_3)

    elif layer_id == 3:
        inputs = Input(shape=(384, 13, 13))
        conv_5 = ZeroPadding2D((1, 1))(inputs)

        conv5_w_t = tr_wts[12]
        conv5_w = np.empty((2, conv5_w_t.shape[0], conv5_w_t.shape[1],
                            conv5_w_t.shape[2], conv5_w_t.shape[3]),
                           np.float32)
        conv5_w[0, :] = conv5_w_t.copy()
        del conv5_w_t
        conv5_w[1, :] = tr_wts[14]

        conv5_b_t = tr_wts[13]
        conv5_b = np.empty((2, conv5_b_t.shape[0]), np.float32)
        conv5_b[0, :] = conv5_b_t.copy()
        del conv5_b_t
        conv5_b[1, :] = tr_wts[15]

        conv_5 = merge([
            Convolution2D(128,
                          3,
                          3,
                          activation="relu",
                          name='conv_5_' + str(i + 1),
                          weights=[conv5_w[i, :], conv5_b[i, :]],
                          trainable=trainable)(splittensor(ratio_split=2,
                                                           id_split=i)(conv_5))
            for i in range(2)
        ],
                       mode='concat',
                       concat_axis=1,
                       name="conv_5")

        dense_1 = MaxPooling2D((3, 3), strides=(2, 2),
                               name="convpool_5")(conv_5)

        dense_1_w = tr_wts[16]
        dense_1_b = tr_wts[17]

        dense_2_w = tr_wts[18]
        dense_2_b = tr_wts[19]

        dense_1 = Flatten(name="flatten")(dense_1)
        dense_1 = Dense(4096,
                        activation='relu',
                        name='dense_1',
                        weights=[dense_1_w, dense_1_b],
                        trainable=trainable)(dense_1)
        dense_2 = Dropout(0.5)(dense_1)
        dense_2 = Dense(4096,
                        activation='relu',
                        name='dense_2',
                        weights=[dense_2_w, dense_2_b],
                        trainable=trainable)(dense_2)
        dense_3 = Dropout(0.5)(dense_2)
        dense_3 = Dense(out_dim,
                        name='dense_3',
                        weights=[tr_wts[20], tr_wts[21]],
                        trainable=trainable,
                        init='he_normal')(dense_3)
        prediction = Activation("softmax", name="softmax")(dense_3)

    elif layer_id == 4:
        inputs = Input(shape=(256, 13, 13))
        dense_1 = MaxPooling2D((3, 3), strides=(2, 2),
                               name="convpool_5")(inputs)

        dense_1_w = tr_wts[16]
        dense_1_b = tr_wts[17]

        dense_2_w = tr_wts[18]
        dense_2_b = tr_wts[19]

        dense_1 = Flatten(name="flatten")(dense_1)
        dense_1 = Dense(4096,
                        activation='relu',
                        name='dense_1',
                        weights=[dense_1_w, dense_1_b],
                        trainable=trainable)(dense_1)
        dense_2 = Dropout(0.5)(dense_1)
        dense_2 = Dense(4096,
                        activation='relu',
                        name='dense_2',
                        weights=[dense_2_w, dense_2_b],
                        trainable=trainable)(dense_2)
        dense_3 = Dropout(0.5)(dense_2)
        dense_3 = Dense(out_dim,
                        name='dense_3',
                        weights=[tr_wts[20], tr_wts[21]],
                        trainable=trainable,
                        init='he_normal')(dense_3)
        prediction = Activation("softmax", name="softmax")(dense_3)

    model = Model(input=inputs, output=prediction)

    layer_dict = dict([(layer.name, layer) for layer in model.layers])
    return model, layer_dict
예제 #15
0
def AlexNet(outdim=1000, weights_path=None, heatmap=False, l1=0, l2=0, usemil=False, usemymil=False, k=1., usemysoftmil=False, softmink=1., softmaxk=1.,\
    sparsemil=False, sparsemill1=0., sparsemill2=0., saveact=False):
    l1factor = l1
    l2factor = l2
    if heatmap:
        inputs = Input(shape=(3, None, None))
    else:
        inputs = Input(shape=(3, 227, 227))

    conv_1 = Convolution2D(96,
                           11,
                           11,
                           subsample=(4, 4),
                           activation='relu',
                           W_regularizer=l1l2(l1=l1factor, l2=l2factor),
                           name='conv_1')(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_1)
    conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
    conv_2 = ZeroPadding2D((2, 2))(conv_2)
    conv_2 = merge([
        Convolution2D(128,
                      5,
                      5,
                      activation="relu",
                      name='conv_2_' + str(i + 1),
                      W_regularizer=l1l2(l1=l1factor, l2=l2factor))(
                          splittensor(ratio_split=2, id_split=i)(conv_2))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name="conv_2")

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1, 1))(conv_3)
    conv_3 = Convolution2D(384,
                           3,
                           3,
                           activation='relu',
                           name='conv_3',
                           W_regularizer=l1l2(l1=l1factor,
                                              l2=l2factor))(conv_3)

    conv_4 = ZeroPadding2D((1, 1))(conv_3)
    conv_4 = merge([
        Convolution2D(192,
                      3,
                      3,
                      activation="relu",
                      name='conv_4_' + str(i + 1),
                      W_regularizer=l1l2(l1=l1factor, l2=l2factor))(
                          splittensor(ratio_split=2, id_split=i)(conv_4))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name="conv_4")

    conv_5 = ZeroPadding2D((1, 1))(conv_4)
    conv_5 = merge([
        Convolution2D(128,
                      3,
                      3,
                      activation="relu",
                      name='conv_5_' + str(i + 1),
                      W_regularizer=l1l2(l1=l1factor, l2=l2factor))(
                          splittensor(ratio_split=2, id_split=i)(conv_5))
        for i in range(2)
    ],
                   mode='concat',
                   concat_axis=1,
                   name="conv_5")

    dense_1 = MaxPooling2D((3, 3), strides=(2, 2), name="convpool_5")(conv_5)

    if heatmap:
        dense_1 = Convolution2D(4096,
                                6,
                                6,
                                activation="relu",
                                name="dense_1",
                                W_regularizer=l1l2(l1=l1factor,
                                                   l2=l2factor))(dense_1)
        dense_2 = Convolution2D(4096,
                                1,
                                1,
                                activation="relu",
                                name="dense_2",
                                W_regularizer=l1l2(l1=l1factor,
                                                   l2=l2factor))(dense_1)
        dense_3 = Convolution2D(outdim,
                                1,
                                1,
                                name="dense_3",
                                W_regularizer=l1l2(l1=l1factor,
                                                   l2=l2factor))(dense_2)
        prediction = Softmax4D(axis=1, name="softmax")(dense_3)
    elif usemil:
        dense_1 = Convolution2D(128,
                                1,
                                1,
                                activation='relu',
                                name='mil_1',
                                W_regularizer=l1l2(l1=l1factor,
                                                   l2=l2factor))(dense_1)
        dense_2 = Convolution2D(128,
                                1,
                                1,
                                activation='relu',
                                name='mil_2',
                                W_regularizer=l1l2(l1=l1factor,
                                                   l2=l2factor))(dense_1)
        dense_3 = Convolution2D(outdim,
                                1,
                                1,
                                name='mil_3',
                                W_regularizer=l1l2(l1=l1factor,
                                                   l2=l2factor))(dense_2)
        prediction_1 = Softmax4D(axis=1, name='softmax')(dense_3)
        #prediction = Flatten(name='flatten')(prediction_1)
        #dense_3 = Dense(outdim,name='dense_3',W_regularizer=l1l2(l1=l1factor, l2=l2factor))(prediction)
        #prediction = Activation("softmax",name="softmax2")(dense_3)

        prediction_1 = MaxPooling2D((6, 6), name='output')(prediction_1)
        prediction = Flatten(name='flatten')(prediction_1)
        prediction = Recalc(axis=1, name='Recalcmil')(prediction)
    elif usemymil:
        dense_1 = Convolution2D(128,
                                1,
                                1,
                                activation='relu',
                                name='mil_1',
                                W_regularizer=l1l2(l1=l1factor,
                                                   l2=l2factor))(dense_1)
        dense_2 = Convolution2D(128,
                                1,
                                1,
                                activation='relu',
                                name='mil_2',
                                W_regularizer=l1l2(l1=l1factor,
                                                   l2=l2factor))(dense_1)
        dense_3 = Convolution2D(1,
                                1,
                                1,
                                activation='sigmoid',
                                name='mil_3',
                                W_regularizer=l1l2(l1=l1factor,
                                                   l2=l2factor))(dense_2)
        #prediction_1 = Softmax4D(axis=1, name='softmax')(dense_3)
        #prediction = ExtractDim(axis=1, name='extract')(prediction_1)
        prediction = Flatten(name='flatten')(dense_3)
        prediction = ReRank(k=k, label=1, name='output')(prediction)
    elif usemysoftmil:
        dense_1 = Convolution2D(128,
                                1,
                                1,
                                activation='relu',
                                name='mil_1',
                                W_regularizer=l1l2(l1=l1factor,
                                                   l2=l2factor))(dense_1)
        dense_2 = Convolution2D(128,
                                1,
                                1,
                                activation='relu',
                                name='mil_2',
                                W_regularizer=l1l2(l1=l1factor,
                                                   l2=l2factor))(dense_1)
        dense_3 = Convolution2D(1,
                                1,
                                1,
                                activation='sigmoid',
                                name='mil_3',
                                W_regularizer=l1l2(l1=l1factor,
                                                   l2=l2factor))(dense_2)
        #prediction_1 = Softmax4D(axis=1, name='softmax')(dense_3)
        #prediction = ExtractDim(axis=1, name='extract')(prediction_1)
        prediction = Flatten(name='flatten')(dense_3)
        prediction = SoftReRank(softmink=softmink,
                                softmaxk=softmaxk,
                                label=1,
                                name='output')(prediction)
    elif sparsemil:
        dense_1 = Convolution2D(128,
                                1,
                                1,
                                activation='relu',
                                name='mil_1',
                                W_regularizer=l1l2(l1=l1factor,
                                                   l2=l2factor))(dense_1)
        dense_2 = Convolution2D(128,
                                1,
                                1,
                                activation='relu',
                                name='mil_2',
                                W_regularizer=l1l2(l1=l1factor,
                                                   l2=l2factor))(dense_1)
        prediction_1 = Convolution2D(1,1,1,activation='sigmoid',name='mil_3',W_regularizer=l1l2(l1=l1factor, l2=l2factor),\
            activity_regularizer=activity_l1l2(l1=sparsemill1, l2=sparsemill2))(dense_2)
        #        prediction_1 = Softmax4D(axis=1, name='softmax')(prediction_1)
        #dense_3 = Convolution2D(outdim,1,1,name='mil_3',W_regularizer=l1l2(l1=l1factor, l2=l2factor))(dense_2)
        #prediction_1 = Softmax4D(axis=1, name='softmax')(dense_3)
        #prediction_1 = ActivityRegularizerOneDim(l1=sparsemill1, l2=sparsemill2)(prediction_1)
        #prediction = MaxPooling2D((6,6), name='output')(prediction_1)
        #        prediction_1 = Convolution2D(1, 3, 3, activation='sigmoid', border_mode='same', name='smooth', \
        #            W_regularizer=l1l2(l1=l1factor, l2=l2factor), activity_regularizer=activity_l1l2(l1=sparsemill1, l2=sparsemill2))(prediction_1)
        prediction = Flatten(name='flatten')(prediction_1)
        if saveact:
            model = Model(input=inputs, output=prediction)
            return model
        prediction = RecalcExpand(axis=1, name='Recalcmil')(prediction)
    else:
        dense_1 = Flatten(name="flatten")(dense_1)
        dense_1 = Dense(4096,
                        activation='relu',
                        name='dense_1',
                        W_regularizer=l1l2(l1=l1factor, l2=l2factor))(dense_1)
        dense_2 = Dropout(0.5)(dense_1)
        dense_2 = Dense(4096,
                        activation='relu',
                        name='dense_2',
                        W_regularizer=l1l2(l1=l1factor, l2=l2factor))(dense_2)
        dense_3 = Dropout(0.5)(dense_2)
        dense_3 = Dense(outdim,
                        name='dense_3',
                        W_regularizer=l1l2(l1=l1factor, l2=l2factor))(dense_3)
        prediction = Activation("softmax", name="softmax")(dense_3)

    model = Model(input=inputs, output=prediction)

    if weights_path:
        model.load_weights(weights_path)

    return model
예제 #16
0
파일: test.py 프로젝트: jackg0h/foodtag
def load_svm_model(nb_class, weights_path=None):

    inputs = Input(shape=(3,227,227))

    conv_1 = Convolution2D(96, 11, 11,subsample=(4,4),activation='relu',
                           name='conv_1')(inputs)

    conv_2 = MaxPooling2D((3, 3), strides=(2,2))(conv_1)
    conv_2 = crosschannelnormalization(name="convpool_1")(conv_2)
    conv_2 = ZeroPadding2D((2,2))(conv_2)
    conv_2 = merge([
        Convolution2D(128,5,5,activation="relu",name='conv_2_'+str(i+1))(
            splittensor(ratio_split=2,id_split=i)(conv_2)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_2")

    conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
    conv_3 = crosschannelnormalization()(conv_3)
    conv_3 = ZeroPadding2D((1,1))(conv_3)
    conv_3 = Convolution2D(384,3,3,activation='relu',name='conv_3')(conv_3)

    conv_4 = ZeroPadding2D((1,1))(conv_3)
    conv_4 = merge([
        Convolution2D(192,3,3,activation="relu",name='conv_4_'+str(i+1))(
            splittensor(ratio_split=2,id_split=i)(conv_4)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_4")

    conv_5 = ZeroPadding2D((1,1))(conv_4)
    conv_5 = merge([
        Convolution2D(128,3,3,activation="relu",name='conv_5_'+str(i+1))(
            splittensor(ratio_split=2,id_split=i)(conv_5)
        ) for i in range(2)], mode='concat',concat_axis=1,name="conv_5")

    conv_5 = MaxPooling2D((3, 3), strides=(2,2),name="convpool_5")(conv_5)

    dense_1 = Flatten(name="flatten")(conv_5)
    dense_1 = Dense(4096, activation='relu',name='dense_1')(dense_1)
    dense_2 = Dropout(0.5)(dense_1)
    dense_2 = Dense(4096, activation='relu',name='dense_2')(dense_2)
    dense_3 = Dropout(0.5)(dense_2)
    dense_3 = Dense(nb_class,name='dense_3')(dense_3)
    prediction = Activation("softmax",name="softmax")(dense_3)


    base_model = Model(input=inputs, output=prediction)

    if weights_path:
        base_model.load_weights(weights_path)


    #model = Model(input=inputs, output=conv_5)

    base_model = Model(input=inputs, output=dense_2)

    '''
    for layer in base_model.layers:
        layer.trainable = False

    model = get_top_model_for_svm(
        shape=base_model.output_shape[1:],
        nb_class=nb_class,
        weights_file_path="model/alex_topmodel" + str(fold_count) + ".h5",
        input= base_model.input,
        output= base_model.output)
    '''

    return base_model