Пример #1
0
def save_model11(new_model_path, conv_model_path):
	model = NASNetMobile(
		input_shape=(img_width, img_height, 3),
		include_top=False,
		weights=None
	)
	if pretrained:
		model = NASNetMobile(
			input_shape=(img_width, img_height, 3),
			include_top=False,
			weights='imagenet'
		)
	model.summary()
	transfer_layer = model.get_layer('?')
	conv_model = Model(inputs=model.input,
					   outputs=transfer_layer.output)
	new_model = Sequential()
	new_model.add(conv_model)
	new_model.add(GlobalAveragePooling2D())
	if num_fc_layers>=1:
		new_model.add(Dense(num_fc_neurons, activation='relu'))
	if num_fc_layers>=2:
		new_model.add(Dropout(dropout))
		new_model.add(Dense(num_fc_neurons, activation='relu'))
	if num_fc_layers>=3:
		new_model.add(Dropout(dropout))
		new_model.add(Dense(num_fc_neurons, activation='relu'))
	new_model.add(Dense(num_classes, activation='softmax'))

	print(new_model.summary())

	new_model.save(new_model_path)
	conv_model.save(conv_model_path)
	return
Пример #2
0
def NASNet_ensemble_FCN(input_image, weights=None, fine_tune=False):

    input_tensor = Input(shape=(input_image))
    model = NASNetMobile(input_shape=(224, 224, 3),
                         input_tensor=input_tensor,
                         include_top=False,
                         weights=weights)
    #print model.summary()
    #return
    #For fine-tuning
    if fine_tune:
        nasnet_layers = model.layers
        for layer in nasnet_layers:
            print layer
            layer.trainable = False

    stem_2 = model.get_layer(name='reduction_concat_stem_2').output
    reduce_4 = model.get_layer(name='reduction_concat_reduce_4').output
    normal_12 = model.get_layer(name='normal_concat_12').output

    #fcn_8s
    conv_normal_12 = Conv2D(filters=6, kernel_size=(1, 1))(normal_12)
    upscore_normal_12 = Conv2DTranspose(filters=6,
                                        kernel_size=(4, 4),
                                        strides=(2, 2),
                                        padding='same')(conv_normal_12)

    conv_reduce_4 = Conv2D(filters=6, kernel_size=(1, 1))(reduce_4)
    fuse_4 = Add()([conv_reduce_4, upscore_normal_12])
    upscore_fuse_4 = Conv2DTranspose(filters=6,
                                     kernel_size=(4, 4),
                                     strides=(2, 2),
                                     padding='same')(fuse_4)

    conv_stem_2 = Conv2D(filters=6, kernel_size=(1, 1))(stem_2)
    fuse_2 = Add()([conv_stem_2, upscore_fuse_4])

    output_8 = Conv2DTranspose(filters=6,
                               kernel_size=(16, 16),
                               strides=(8, 8),
                               padding='same')(fuse_2)

    output_16 = Conv2DTranspose(filters=6,
                                kernel_size=(32, 32),
                                strides=(16, 16),
                                padding='same')(fuse_4)

    #fcn_32s
    output_32 = Conv2DTranspose(filters=6,
                                kernel_size=(64, 64),
                                strides=(32, 32),
                                padding='same')(conv_normal_12)
    model = Model(inputs=input_tensor,
                  outputs=[output_8, output_16, output_32])
    print model.summary()
    return model
Пример #3
0
def get_model_classif_NASNetMobile(input_shape, learning_rate, num_classes,
                                   flags):
    input_tensor = Input(shape=input_shape, name='input')
    base_model = NASNetMobile(include_top=False,
                              input_shape=input_shape,
                              weights='imagenet')  # , weights=None
    bn = BatchNormalization()(input_tensor)
    base_model.summary()
    x = base_model(bn)
    out1 = GlobalMaxPooling2D()(x)
    out2 = GlobalAveragePooling2D()(x)
    out3 = Flatten()(x)
    out = Concatenate(axis=-1)([out1, out2, out3])
    out = Dropout(0.5)(out)
    out = Dense(num_classes, activation="sigmoid", name="3_")(out)
    model = Model(input_tensor, out)

    if flags == 1:
        # 微调预训练模型
        base_model.trainble = False
        set_trainble = False
        num_layers = len(base_model.layers)
        for i, layer in enumerate(base_model.layers):
            if i >= num_layers - 5:
                set_trainble = True
            if set_trainble:
                layer.trainable = True
            else:
                layer.trainable = False
        # 编译模型
        model.compile(optimizer=Adam(learning_rate),
                      loss=binary_crossentropy,
                      metrics=['acc'])
    else:
        # 释放所有层
        for layer in model.layers:
            layer.trainable = True
        # 编译模型
        model.compile(optimizer=Adam(learning_rate / 10),
                      loss=binary_crossentropy,
                      metrics=['acc'])

    model.summary()
    return model
def autoencoder_train(folder, batch_size, epoch_size, model_name):
    """
    Autoencoding, inherently UNET, is a data compression algorithm where the compression and decompression functions are:
    - data specific, ie, only compress data similar to what they have been trained on
    - lossy, ie, decompressed output will be degraded
    - learned automatically from examples.

    Two practical applications of autoencoders are data removal and dimensionality reduction

    There is an implementation from scikit-learn:
    http://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html

    :param folder: image folder for training
    :param batch_size: training batch size
    :param epoch_size: training epoch size
    :param model_name: IR2, InceptionResNetV2; NL, NASNetLarge; NM, NASNetLarge
    :return: None
    """
    image_wh = system_config['image_wh']

    image_size = (image_wh, image_wh)
    image_shape = (image_wh, image_wh, 1)

    train_list, valid_list = create_tv_list(folder)
    print(f'Train size: {len(train_list)}, valid size: {len(valid_list)}')

    train_df = pd.DataFrame(train_list, columns=['fname', 'class'])
    valid_df = pd.DataFrame(valid_list, columns=['fname', 'class'])

    model = None
    if 'NM' in model_name:
        model_name = 'NM'
        model = NASNetMobile(include_top=True,
                             weights=None,
                             input_tensor=None,
                             input_shape=image_shape,
                             pooling='max',
                             classes=6)
    elif 'NL' in model_name:
        model_name = 'NL'
        model = NASNetLarge(include_top=True,
                            weights=None,
                            input_tensor=None,
                            input_shape=image_shape,
                            pooling='max',
                            classes=6)
    elif 'XC' in model_name:
        model_name = 'XC'
        model = Xception(include_top=True,
                         weights=None,
                         input_tensor=None,
                         input_shape=image_shape,
                         pooling='max',
                         classes=6)
    elif 'D21' in model_name:
        model_name = 'D21'
        model = DenseNet201(include_top=True,
                            weights=None,
                            input_tensor=None,
                            input_shape=image_shape,
                            pooling='max',
                            classes=6)
    elif 'IV3' in model_name:
        model_name = 'IV3'
        model = InceptionV3(include_top=True,
                            weights=None,
                            input_tensor=None,
                            input_shape=image_shape,
                            pooling='max',
                            classes=6)
    elif 'SC' in model_name:
        model_name = 'SC'
        model = simple_cnn(input_shape=image_shape, classes=6)
    else:
        model_name = 'IR2'
        model = InceptionResNetV2(include_top=True,
                                  weights=None,
                                  input_tensor=None,
                                  input_shape=image_shape,
                                  pooling='max',
                                  classes=6)

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=lr_schedule(0)),
                  metrics=['accuracy'])
    model.summary()

    # Image generator does data augmentation:
    datagen = data_generator()

    train_gen = datagen.flow_from_dataframe(dataframe=train_df,
                                            directory=folder,
                                            x_col="fname",
                                            y_col="class",
                                            class_mode="categorical",
                                            target_size=image_size,
                                            color_mode='grayscale',
                                            batch_size=batch_size,
                                            shuffle=False)

    valid_gen = datagen.flow_from_dataframe(dataframe=valid_df,
                                            directory=folder,
                                            x_col="fname",
                                            y_col="class",
                                            class_mode="categorical",
                                            target_size=image_size,
                                            color_mode='grayscale',
                                            batch_size=batch_size,
                                            shuffle=False)

    # Prepare model model saving directory.
    save_dir = Path(os.path.dirname(
        os.path.realpath(__file__))).joinpath('models')
    if not save_dir.is_dir():
        save_dir.mkdir(exist_ok=True)
    filepath = f'{str(save_dir)}/{MODEL_NAMES[model_name]}'
    print(f'{filepath}\n')

    # Prepare callbacks for model saving and for learning rate adjustment.
    checkpoint = ModelCheckpoint(filepath=filepath,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True)

    lr_scheduler = LearningRateScheduler(lr_schedule)

    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=5,
                                   min_lr=0.5e-6)

    callbacks = [checkpoint, lr_reducer, lr_scheduler]

    # Fit the model on the batches generated by datagen.flow().
    steps_per_epoch = int(len(train_list) / batch_size)
    history = model.fit_generator(generator=train_gen,
                                  steps_per_epoch=steps_per_epoch,
                                  validation_data=valid_gen,
                                  validation_steps=steps_per_epoch,
                                  epochs=epoch_size,
                                  use_multiprocessing=False,
                                  verbose=1,
                                  workers=4,
                                  callbacks=callbacks)

    # Score trained model.
    scores = model.evaluate_generator(generator=valid_gen,
                                      steps=steps_per_epoch,
                                      verbose=1)
    print('Test loss:', scores[0])
    print('Test accuracy:', scores[1])

    # Save score in configuration file
    system_config[f'{model_name}_Accuracy'] = scores[1]
    save_config()

    return history
lb = LabelBinarizer()
lb.fit(np.asarray(data['primary_microconstituent']))
y = lb.transform(labels)
print('\nLabels Binarized, converting array')

input = np.asarray(processed_imgs)

X_train, X_test, y_train, y_test = train_test_split(input,
                                                    y,
                                                    test_size=0.1,
                                                    random_state=42)

model = NASNetMobile(weights=None, classes=7)

model.summary()
model.compile(loss='categorical_crossentropy',
              optimizer='sgd',
              metrics=['accuracy'])
time_callback = TimeHistory()
model.fit(X_train,
          y_train,
          epochs=5,
          batch_size=32,
          validation_data=(X_test, y_test),
          callbacks=[time_callback])
name = 'results/UHCS_NASNetMobile_Weights'
score = model.evaluate(X_test, y_test)
print('Test score:', score[0])
print('Test accuracy:', score[1])
model.save_weights(name + '.h5')
Пример #6
0
class FeatureExtraction():
	"""
		Feature Extraction class for extracting features from image data
		through pre-trained model and saving features and labels  to
		user defined path.
	"""
	def __init__(self, model="mobilenet", weights="imagenet", include_top=True):
		
		
		if model_name == "vgg16":
			self.base_model = VGG16(weights=weights)
			self.model = Model(input=self.base_model.input, output=self.base_model.get_layer('fc1').output)
			self.image_size = (224, 224)
		elif model_name == "vgg19":
			self.base_model = VGG19(weights=weights)
			self.model = Model(input=self.base_model.input, output=self.base_model.get_layer('fc1').output)
			self.image_size = (224, 224)
		elif model_name == "resnet50":
			self.base_model = ResNet50(weights=weights)
			self.base_model.summary()
			self.model = Model(inputs=self.base_model.input, outputs=self.base_model.get_layer('fc1000').output)
			self.image_size = (224, 224)
		elif model_name == "inceptionv3":
			self.base_model = InceptionV3(include_top=include_top, weights=weights, input_tensor=Input(shape=(299,299,3)))
			self.model = Model(input=self.base_model.input, output=self.base_model.get_layer('custom').output)
			self.image_size = (299, 299)
		elif model_name == "inceptionresnetv2":
			self.base_model = InceptionResNetV2(include_top=include_top, weights=weights, input_tensor=Input(shape=(299,299,3)))
			self.model = Model(inputs=self.base_model.input, outputs=self.base_model.get_layer('custom').output)
			self.image_size = (299, 299)
		elif model_name == "mobilenet":
			self.base_model = MobileNet(include_top=include_top, weights=weights, input_tensor=Input(shape=(224,224,3)), input_shape=(224,224,3))
			self.model = Model(inputs=self.base_model.input, outputs=self.base_model.get_layer('conv_pw_13_relu').output)
			self.image_size = (224, 224)
		elif model_name == "mobilenetv2":
			self.base_model = MobileNeV2(include_top=include_top, weights=weights, input_tensor=Input(shape=(224,224,3)), input_shape=(224,224,3))
			self.base_model.summary()
			self.model = Model(inputs=self.base_model.input, outputs=self.base_model.get_layer('conv_pw_13_relu').output)
			self.image_size = (224, 224)
		elif model_name == "xception":
			self.base_model = Xception(weights=weights, , input_tensor=Input(shape=(299,299,3)))
			self.model = Model(inputs=self.base_model.input, outputs=self.base_model.get_layer('avg_pool').output)
			self.image_size = (299, 299)
		elif model_name == "densenet":
			self.base_model = DenseNet121(include_top=include_top, weights=weights, input_tensor=Input(shape=(224,224,3)), input_shape=(224,224,3))
			self.model = Model(inputs=self.base_model.input, outputs=self.base_model.get_layer('avg_pool').output)
			self.image_size = (224, 224)
		elif model_name == "xception":
			self.base_model = NASNetMobile(include_top=include_top, weights=weights, input_tensor=Input(shape=(224,224,3)), input_shape=(224,224,3))
			self.model = Model(inputs=self.base_model.input, outputs=self.base_model.get_layer('avg_pool').output)
			self.image_size = (224, 224)
		else:
			self.base_model = None
		self.features = []
		self.labels = []

	def list_directory(self, path=None):

		train_labels = os.listdir(path)

		return 


	def label_encoder(self, train_labels=None):

		le = LabelEncoder()
		le.fit(train_labels)
		self.le_labels = le.transform(self.labels)


		return le, self.le_labels


	def extract_feature(self, features_path='', train_path=path, train_labels=train_labels):

		# loop over all the labels in the folder
		count = 0
		for i, label in enumerate(train_labels):
			cur_path = train_path + "/" + label
			for image_path in glob.glob(cur_path + "/*.png"):
				flat = process_image(image_path)
				features.append(flat)
				labels.append(label)
				count += 1
		print ("[INFO] processed - " + str(count))
		print ("[STATUS] training labels: {}".format(le_labels))
		print ("[STATUS] training labels shape: {}".format(le_labels.shape))

	def process_image(self, image_path=path):

		# extract features from images
		img = image.load_img(image_path, target_size=self.image_size)
		img = image.img_to_array(img)
		img = np.expand_dims(img, axis=0)
		img = preprocess_input(img)
		feature = model.predict(img)
		flat = feature.flatten()

		return flat
Пример #7
0
def aishufan_train(folder, batch_size, epoch_size, model_name):
    """
    Train network with the parameters specified.

    :param folder: image folder for training
    :param batch_size: training batch size
    :param epoch_size: training epoch size
    :param model_name: IR2, InceptionResNetV2; NL, NASNetLarge; NM, NASNetLarge
    :return: None
    """
    image_wh = system_config['image_wh']

    image_size = (image_wh, image_wh)
    image_shape= (image_wh, image_wh, 3)

    train_list, valid_list = create_tv_list(folder)
    print(f'Train size: {len(train_list)}, valid size: {len(valid_list)}')

    train_df = pd.DataFrame(train_list, columns=['fname', 'class'])
    valid_df = pd.DataFrame(valid_list, columns=['fname', 'class'])

    model = None
    if 'NM' in model_name:
        model_name = 'NM'
        model = NASNetMobile(include_top=True,
                             weights=None,
                             input_tensor=None,
                             input_shape=image_shape,
                             pooling='max',
                             classes=2)

    elif 'XC' in model_name:
        model_name = 'XC'
        model = Xception(include_top=True,
                         weights=None,
                         input_tensor=None,
                         input_shape=image_shape,
                         pooling='max',
                         classes=2)
    elif 'D21' in model_name:
        model_name = 'D21'
        model = DenseNet201(include_top=True,
                            weights=None,
                            input_tensor=None,
                            input_shape=image_shape,
                            pooling='max',
                            classes=2)
    elif 'IV3' in model_name:
        model_name = 'IV3'
        model = InceptionV3(include_top=True,
                            weights=None,
                            input_tensor=None,
                            input_shape=image_shape,
                            pooling='max',
                            classes=2)

    else:
        model_name = 'IR2'
        model = InceptionResNetV2(include_top=True,
                                  weights=None,
                                  input_tensor=None,
                                  input_shape=image_shape,
                                  pooling='max',
                                  classes=2)

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=lr_schedule(0)),
                  metrics=['accuracy'])
    model.summary()

    # Image generator does data augmentation:
    datagen = data_generator()

    train_gen = datagen.flow_from_dataframe(
        dataframe=train_df,
        directory=folder,
        x_col="fname",
        y_col="class",
        class_mode="categorical",
        target_size=image_size,
        color_mode='rgb',
        batch_size=batch_size,
        shuffle=False)

    valid_gen = datagen.flow_from_dataframe(
        dataframe=valid_df,
        directory=folder,
        x_col="fname",
        y_col="class",
        class_mode="categorical",
        target_size=image_size,
        color_mode='rgb',
        batch_size=batch_size,
        shuffle=False)

    # Save class indices
    system_config['class_indices'] = train_gen.class_indices
    save_config()

    # Prepare model model saving directory.
    save_dir = Path(os.path.dirname(os.path.realpath(__file__))).joinpath('models')
    if not save_dir.is_dir():
        save_dir.mkdir(exist_ok=True)
    filepath = f'{str(save_dir)}/{MODEL_NAMES[model_name]}'
    print(f'{filepath}\n')

    # Prepare callbacks for model saving and for learning rate adjustment.
    checkpoint = ModelCheckpoint(filepath=filepath,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True)

    lr_scheduler = LearningRateScheduler(lr_schedule)

    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=5,
                                   min_lr=0.5e-6)

    callbacks = [checkpoint, lr_reducer, lr_scheduler]

    # Fit the model on the batches generated by datagen.flow().
    steps_per_epoch = int(len(train_list)/batch_size)
    history = model.fit_generator(
        generator=train_gen,
        steps_per_epoch=steps_per_epoch,
        validation_data=valid_gen,
        validation_steps=steps_per_epoch,
        epochs=epoch_size,
        use_multiprocessing=False,
        verbose=1,
        workers=4,
        callbacks=callbacks)

    # Score trained model.
    scores = model.evaluate_generator(generator=valid_gen, steps=steps_per_epoch, verbose=1)
    print('Test loss:', scores[0])
    print('Test accuracy:', scores[1])

    # Save score in configuration file
    system_config[f'{model_name}_Accuracy'] = scores[1]
    save_config()

    return history