예제 #1
0
def nasnet_retinanet(num_classes,
                     backbone='nasnet',
                     inputs=None,
                     modifier=None,
                     **kwargs):

    k.clear_session()
    # choose default input
    if inputs is None:
        if keras.backend.image_data_format() == 'channels_first':
            inputs = keras.layers.Input(shape=(3, None, None))
        else:
            inputs = keras.layers.Input(shape=(None, None, 3))

    # create the resnet backbone
    if backbone == 'nasnet':
        nasnet_model = NASNetLarge(weights=None,
                                   include_top=False,
                                   input_tensor=inputs)
    else:
        raise ValueError('Backbone (\'{}\') is invalid.'.format(backbone))

    # invoke modifier if given
    if modifier:
        nasnet_model = modifier(nasnet_model)
    concatenated_features = [
        nasnet_model.get_layer('add_4').output,
        nasnet_model.get_layer('activation_204').output, nasnet_model.output
    ]
    # create the full model
    return retinanet.retinanet(inputs=inputs,
                               num_classes=num_classes,
                               backbone_layers=concatenated_features,
                               **kwargs)
예제 #2
0
def save_model12(new_model_path, conv_model_path):
	model = NASNetLarge(
		input_shape=(img_width, img_height, 3),
		include_top=False,
		weights=None
	)
	if pretrained:
		model = NASNetLarge(
			input_shape=(img_width, img_height, 3),
			include_top=False,
			weights='imagenet'
		)
	model.summary()
	transfer_layer = model.get_layer('?')
	conv_model = Model(inputs=model.input,
					   outputs=transfer_layer.output)
	new_model = Sequential()
	new_model.add(conv_model)
	new_model.add(GlobalAveragePooling2D())
	if num_fc_layers>=1:
		new_model.add(Dense(num_fc_neurons, activation='relu'))
	if num_fc_layers>=2:
		new_model.add(Dropout(dropout))
		new_model.add(Dense(num_fc_neurons, activation='relu'))
	if num_fc_layers>=3:
		new_model.add(Dropout(dropout))
		new_model.add(Dense(num_fc_neurons, activation='relu'))
	new_model.add(Dense(num_classes, activation='softmax'))

	print(new_model.summary())

	new_model.save(new_model_path)
	conv_model.save(conv_model_path)
	return
예제 #3
0
print(x_val.shape)
#print(np.asarray(range(len(uc))))
#print(y_val[0,:])


# ## Adapting the NasNetLarge Architecture to Regression Problems

# In[8]:


from keras.applications.nasnet import NASNetLarge
from keras.models import Model

model = NASNetLarge(weights='imagenet', include_top=True, input_shape=(331, 331, 3))

x = model.get_layer(index=len(model.layers)-2).output

print(x)
x = Dense(1)(x)

model = Model(inputs=model.input, outputs=x)
model.summary()


# **Using RMSprop optimizer, mean absolute error with metrics, and mean square erro with loss**

# In[ ]:


opt = RMSprop(lr=0.0001)
model.compile(loss='mean_squared_error', optimizer=opt, metrics=['mae'])
예제 #4
0
def NASNet_large_FCN(input_image, weights=None):
    """
        return Model instance
    """
    input_tensor = Input(shape=(input_image))
    model = NASNetLarge(input_shape=input_image,
                        input_tensor=input_tensor,
                        include_top=False,
                        weights=weights)
    #print model.summary()
    #return
    #reduce_stem_1 = model.get_layer()

    normal_18 = model.get_layer(name='normal_concat_18').output

    activation_normal_concat_7 = model.get_layer(name='activation_118').output
    activation_normal_concat_8 = model.get_layer(name='activation_130').output
    activation_normal_concat_9 = model.get_layer(name='activation_142').output
    activation_normal_concat_10 = model.get_layer(name='activation_154').output
    activation_normal_concat_11 = model.get_layer(name='activation_166').output
    activation_normal_concat_12 = model.get_layer(name='activation_178').output
    """
    activation_normal_concat_7 = ScaledLayer()(activation_normal_concat_7)
    activation_normal_concat_8 = ScaledLayer()(activation_normal_concat_8)
    activation_normal_concat_9 = ScaledLayer()(activation_normal_concat_9)
    activation_normal_concat_10 = ScaledLayer()(activation_normal_concat_10)
    activation_normal_concat_11 = ScaledLayer()(activation_normal_concat_11)
    activation_normal_concat_12 = ScaledLayer()(activation_normal_concat_12)
    """
    fuse_activation_7_10 = Add()([activation_normal_concat_7, activation_normal_concat_8, activation_normal_concat_9, \
                                activation_normal_concat_10, activation_normal_concat_11, activation_normal_concat_12])

    activation_normal_concat_0 = model.get_layer(name='activation_35').output
    activation_normal_concat_1 = model.get_layer(name='activation_47').output
    activation_normal_concat_2 = model.get_layer(name='activation_59').output
    activation_normal_concat_3 = model.get_layer(name='activation_71').output
    activation_normal_concat_4 = model.get_layer(name='activation_83').output
    activation_normal_concat_5 = model.get_layer(name='activation_95').output
    """
    activation_normal_concat_0 = ScaledLayer()(activation_normal_concat_0)
    activation_normal_concat_1 = ScaledLayer()(activation_normal_concat_1)
    activation_normal_concat_2 = ScaledLayer()(activation_normal_concat_2)
    activation_normal_concat_3 = ScaledLayer()(activation_normal_concat_3)
    activation_normal_concat_4 = ScaledLayer()(activation_normal_concat_4)
    activation_normal_concat_5 = ScaledLayer()(activation_normal_concat_5)
    """

    fuse_activation_0_5 = Add()([activation_normal_concat_0, activation_normal_concat_1, activation_normal_concat_2, \
                                activation_normal_concat_3, activation_normal_concat_4, activation_normal_concat_5])

    conv_normal_18 = Conv2D(filters=6, kernel_size=(1, 1))(normal_18)
    upscore_normal_18 = Conv2DTranspose(filters=6,
                                        kernel_size=(4, 4),
                                        strides=(2, 2),
                                        padding='same')(conv_normal_18)

    conv_fuse_7_10 = Conv2D(filters=6,
                            kernel_size=(1, 1))(fuse_activation_7_10)
    conv_fuse_7_10 = Add()([conv_fuse_7_10, upscore_normal_18])
    upscore_fuse_7_10 = Conv2DTranspose(filters=6,
                                        kernel_size=(4, 4),
                                        strides=(2, 2),
                                        padding='same')(conv_fuse_7_10)

    conv_fuse_0_5 = Conv2D(filters=6, kernel_size=(1, 1))(fuse_activation_0_5)
    conv_fuse_0_5 = Add()([conv_fuse_0_5, upscore_fuse_7_10])
    upscore = Conv2DTranspose(filters=6,
                              kernel_size=(16, 16),
                              strides=(8, 8),
                              padding='same')(conv_fuse_0_5)

    model = Model(inputs=input_tensor, outputs=upscore)
    print model.summary()
    return model
예제 #5
0
def NASNet_large_ensemble_FCN(input_image, weights=None, fine_tune=False):

    input_tensor = Input(shape=(input_image))
    model = NASNetLarge(input_shape=input_image,
                        input_tensor=input_tensor,
                        include_top=False,
                        weights=weights)
    #print model.summary()
    #reduce_stem_1 = model.get_layer()

    normal_18 = model.get_layer(name='normal_concat_18').output

    activation_normal_concat_7 = model.get_layer(name='activation_118').output
    activation_normal_concat_8 = model.get_layer(name='activation_130').output
    activation_normal_concat_9 = model.get_layer(name='activation_142').output
    activation_normal_concat_10 = model.get_layer(name='activation_154').output
    activation_normal_concat_11 = model.get_layer(name='activation_166').output
    activation_normal_concat_12 = model.get_layer(name='activation_178').output

    fuse_activation_7_10 = Add()([activation_normal_concat_7, activation_normal_concat_8, activation_normal_concat_9, \
                                activation_normal_concat_10, activation_normal_concat_11, activation_normal_concat_12])

    activation_normal_concat_0 = model.get_layer(name='activation_35').output
    activation_normal_concat_1 = model.get_layer(name='activation_47').output
    activation_normal_concat_2 = model.get_layer(name='activation_59').output
    activation_normal_concat_3 = model.get_layer(name='activation_71').output
    activation_normal_concat_4 = model.get_layer(name='activation_83').output
    activation_normal_concat_5 = model.get_layer(name='activation_95').output

    fuse_activation_0_5 = Add()([activation_normal_concat_0, activation_normal_concat_1, activation_normal_concat_2, \
                                activation_normal_concat_3, activation_normal_concat_4, activation_normal_concat_5])

    conv_normal_18 = Conv2D(filters=6, kernel_size=(1, 1))(normal_18)
    upscore_normal_18 = Conv2DTranspose(filters=6,
                                        kernel_size=(4, 4),
                                        strides=(2, 2),
                                        padding='same')(conv_normal_18)

    conv_fuse_7_10 = Conv2D(filters=6,
                            kernel_size=(1, 1))(fuse_activation_7_10)
    conv_fuse_7_10 = Add()([conv_fuse_7_10, upscore_normal_18])
    upscore_fuse_7_10 = Conv2DTranspose(filters=6,
                                        kernel_size=(4, 4),
                                        strides=(2, 2),
                                        padding='same')(conv_fuse_7_10)

    conv_fuse_0_5 = Conv2D(filters=6, kernel_size=(1, 1))(fuse_activation_0_5)
    conv_fuse_0_5 = Add()([conv_fuse_0_5, upscore_fuse_7_10])
    output_8 = Conv2DTranspose(filters=6,
                               kernel_size=(16, 16),
                               strides=(8, 8),
                               padding='same')(conv_fuse_0_5)
    output_16 = Conv2DTranspose(filters=6,
                                kernel_size=(32, 32),
                                strides=(16, 16),
                                padding='same')(conv_fuse_7_10)
    output_32 = Conv2DTranspose(filters=6,
                                kernel_size=(64, 64),
                                strides=(32, 32),
                                padding='same')(conv_normal_18)
    model = Model(inputs=input_tensor,
                  outputs=[output_8, output_16, output_32])

    #print model.summary()
    return model
예제 #6
0
def TransferNet(input_shape: Tuple[int, int],
                num_classes: int,
                feature_extractor: FeatureExtractor = None,
                dense_layers: int = 3,
                dropout_rate: float = 0.3) -> Model:
    """
    Exploits a pre-trained model as feature extractor, feeding its output into a fully-connected NN.
    The feature extractor model is NOT fine-tuned for the specific task.
    Dropout and batch normalization are used throughout the trainable portion of the network.

    :param input_shape: Shape of the input tensor as a 2-dimensional int tuple
    :param num_classes: Number of classes for the final FC layer
    :param feature_extractor: FeatureExtractor instance representing which pre-trained model to use as feature extractor
    :param dense_layers: Number of layers for the FC NN
    :param dropout_rate: Dropout rate

    :return: a Keras model
    """

    adam_opt = Adam(lr=0.1)
    model_input = Input(shape=input_shape)

    # load pre-trained model on ImageNet
    if feature_extractor == FeatureExtractor.Dense121:
        fe_model = DenseNet121(weights="imagenet",
                               include_top=False,
                               input_tensor=model_input)
    elif feature_extractor == FeatureExtractor.Dense169:
        fe_model = DenseNet169(weights="imagenet",
                               include_top=False,
                               input_tensor=model_input)
    elif feature_extractor == FeatureExtractor.Dense201:
        fe_model = DenseNet201(weights="imagenet",
                               include_top=False,
                               input_tensor=model_input)
    elif feature_extractor == FeatureExtractor.NASNetLarge:
        fe_model = NASNetLarge(weights="imagenet",
                               include_top=False,
                               input_tensor=model_input)
    else:
        # default: NASNetMobile
        fe_model = NASNetMobile(weights="imagenet",
                                include_top=False,
                                input_tensor=model_input)

    fe_model = Model(input=model_input,
                     output=fe_model.output,
                     name="FeatureExtractor")
    fe_model.compile(loss=keras.losses.categorical_crossentropy,
                     optimizer=adam_opt,
                     metrics=["accuracy"])

    # get handles to the model (input, output tensors)
    fe_input = fe_model.get_layer(index=0).input
    fe_output = fe_model.get_layer(index=-1).output

    # freeze layers
    for _, layer in enumerate(fe_model.layers):
        layer.trainable = False

    # final fully-connected layers
    dense = Flatten()(fe_output)
    dense = BatchNormalization()(dense)
    dense = Dropout(rate=dropout_rate)(dense)

    num_units = 128
    for i in range(1, dense_layers + 1):
        dense = Dense(units=int(num_units / i), activation="relu")(dense)
        dense = BatchNormalization()(dense)
        dense = Dropout(rate=dropout_rate)(dense)

    output_dense = Dense(units=num_classes, activation="softmax")(dense)

    model = Model(input=fe_input, output=output_dense, name="TransferNet")
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=adam_opt,
                  metrics=["accuracy"])

    return model