def simple(img_path=None, gt_path=None, batchSize=7, target_size=(256, 256), epoch=30, lr=3e-4, steps_per_epoch=None, model_save_path=None, seed=1): # ---- 1. prepare data ---- data_gen_args = dict(horizontal_flip=True, fill_mode='nearest') img_datagen = ImageDataGenerator(**data_gen_args) data_gen_args['rescale'] = 1. / 255 mask_datagen = ImageDataGenerator(**data_gen_args) img_gen = img_datagen.flow_from_directory(img_path, batch_size=batchSize, target_size=target_size, shuffle=True, class_mode=None, seed=seed) mask_gen = mask_datagen.flow_from_directory(gt_path, color_mode='grayscale', batch_size=batchSize, target_size=target_size, shuffle=True, class_mode=None, seed=seed) train_gen = zip(img_gen, mask_gen) # ---- 2. define your model ---- model = Xnet(backbone_name='vgg16', encoder_weights='imagenet', decoder_block_type='transpose') print(model.summary()) # ---- 3. define your optimizer ---- opt = optimizers.Adam(lr=lr) model.compile(optimizer=opt, loss=bce_dice_loss, metrics=["binary_crossentropy", mean_iou, dice_coef]) # ---- 4. snapshot ---- save_best = callbacks.ModelCheckpoint(filepath=model_save_path, monitor='loss', save_best_only=True, verbose=1) early_stopping = callbacks.EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='min') callbacks_list = [save_best, early_stopping] # ---- 5. start training ---- model.fit_generator(train_gen, steps_per_epoch=steps_per_epoch, epochs=epoch, verbose=1, callbacks=callbacks_list)
def Simple(weight_path=None, img_path=None, target_size=(256, 256), batch_size=1, save_path=None): os.makedirs(save_path, exist_ok=True) print(img_path) data_gen_args = dict( fill_mode='nearest') img_datagen = ImageDataGenerator(**data_gen_args) test_gen = img_datagen.flow_from_directory( img_path, batch_size=batch_size, target_size=target_size, shuffle=False, class_mode=None) model = Xnet(backbone_name='vgg16', encoder_weights='imagenet', decoder_block_type='transpose') print(model.summary()) model.load_weights(weight_path) opt = optimizers.Adam(lr=3e-4) model.compile(optimizer=opt, loss=bce_dice_loss, metrics=["binary_crossentropy", mean_iou, dice_coef]) predicted_list = model.predict_generator(test_gen, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=1) img_name_list = test_gen.order_filenames num = 0 for predict in predicted_list: img_name = img_name_list[num].split('/')[1] img_name = img_name.replace('.jpg', '.png') img = Convert(predict) cv2.imwrite(os.path.join(save_path, img_name), img) num += 1 print("[INFO] {}/{}".format(num, len(img_name_list)))
Y_train.append(seg_labels) # 读完一个周期后重新开始 i = (i + 1) % n yield (np.array(X_train), np.array(Y_train)) # prepare data dataset_path = 'C:\\Users\\admin\\dongwei\\workspace\\dataset\\defeat_seg' # range in [0,1], the network expects input channels of 3 # x, y = load_data(root_dir=dataset_path, contents=['jpg', 'png']) # prepare model # build UNet++ model = Xnet(backbone_name='resnet50', encoder_weights='imagenet', decoder_block_type='transpose', classes=NCLASSES) # model = Unet(backbone_name='resnet50', encoder_weights='imagenet', decoder_block_type='transpose') # build U-Net # model = NestNet(backbone_name='resnet50', encoder_weights='imagenet', decoder_block_type='transpose') # build DLA model.compile('Adam', 'binary_crossentropy', ['binary_accuracy']) # train model # model.fit(x, y) batch_size = 2 with open( "C:\\Users\\admin\\dongwei\\workspace\\dataset\\defeat_seg\\train.txt", "r") as f: lines = f.readlines() # 90%用于训练,10%用于估计。 num_val = int(len(lines) * 0.1)
if config.model == "Unet": model = Unet(backbone_name=config.backbone, encoder_weights=config.weights, decoder_block_type=config.decoder_block_type, classes=config.nb_class, activation=config.activation) elif config.model == "Nestnet": model = Nestnet(backbone_name=config.backbone, encoder_weights=config.weights, decoder_block_type=config.decoder_block_type, classes=config.nb_class, activation=config.activation) elif config.model == "Xnet": model = Xnet(backbone_name=config.backbone, encoder_weights=config.weights, decoder_block_type=config.decoder_block_type, classes=config.nb_class, activation=config.activation) else: raise model.compile(optimizer="Adam", loss=bce_dice_loss, metrics=["binary_crossentropy", mean_iou, dice_coef]) # plot_model(model, to_file=os.path.join(model_path, config.exp_name+".png")) if os.path.exists(os.path.join(model_path, config.exp_name + ".txt")): os.remove(os.path.join(model_path, config.exp_name + ".txt")) with open(os.path.join(model_path, config.exp_name + ".txt"), 'w') as fh: model.summary(positions=[.3, .55, .67, 1.], print_fn=lambda x: fh.write(x + '\n'))
val_generator = ImageDataGenerator(**data_gen_val_args) # Generate examples of data augmentation if aug_examples: train_generator.get_transformed_samples(10, save_to_dir=True, train=False, out_dir=da_samples_dir) print("#################################\n" "# BUILD AND TRAIN THE NETWORK #\n" "#################################\n") print("Creating the network . . .") model = Xnet(backbone_name='resnet50', encoder_weights='imagenet', decoder_block_type='transpose') # build UNet++ # Select the optimizer if optimizer == "sgd": opt = tf.keras.optimizers.SGD(lr=learning_rate_value, momentum=0.99, decay=0.0, nesterov=False) elif optimizer == "adam": opt = tf.keras.optimizers.Adam(lr=learning_rate_value, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
from segmentation_models import Unet, Nestnet, Xnet model = Xnet(backbone_name='resnet50', encoder_weights='imagenet', decoder_block_type='transpose') # build UNet++ # model = Unet(backbone_name='resnet50', encoder_weights='imagenet', decoder_block_type='transpose') # build U-Net # model = NestNet(backbone_name='resnet50', encoder_weights='imagenet', decoder_block_type='transpose') # build DLA model.compile('Adam', 'binary_crossentropy', ['binary_accuracy'])
from PIL import Image import numpy as np import random import copy import os import time random.seed(0) class_colors = [[0, 0, 0], [0, 255, 0]] NCLASSES = 2 HEIGHT = 544 WIDTH = 544 model = Xnet(backbone_name='resnet50', encoder_weights='imagenet', decoder_block_type='transpose', classes=NCLASSES) # model.load_weights("logs/ep010-loss1.375-val_loss0.657.h5 ") model.load_weights('logs/ep011-loss0.007-val_loss0.010.h5') imgs = os.listdir("./img") print(imgs) """ def iou(y_true, y_pred, label: int): # extract the label values using the argmax operator then # calculate equality of the predictions and truths to the label y_true = K.cast(K.equal(y_true, label), K.floatx()) y_pred = K.cast(K.equal(y_pred, label), K.floatx()) # calculate the |intersection| (AND) of the labels intersection = K.sum(y_true * y_pred)