def create_model(): fcn_vgg16 = FCN(input_shape=(PATCH_SIZE, PATCH_SIZE, 3), classes=3, weights='imagenet', trainable_encoder=True) sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True) fcn_vgg16.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy']) return fcn_vgg16
def test_fcn_vgg16_shape(): """Test output shape.""" if K.image_data_format() == 'channels_first': input_shape = (3, 500, 500) else: input_shape = (500, 500, 3) fcn_vgg16 = FCN(input_shape=input_shape, classes=21) layers = [l.name for l in fcn_vgg16.layers] assert 'upscore_feat1' in layers assert 'upscore_feat2' in layers assert 'upscore_feat3' in layers for l in fcn_vgg16.layers: if l.name == 'block1_pool': test_shape = (None, 250, 250, 64) assert is_same_shape(l.output_shape, test_shape) elif l.name == 'block2_pool': test_shape = (None, 125, 125, 128) assert is_same_shape(l.output_shape, test_shape) elif l.name == 'block3_pool': test_shape = (None, 63, 63, 256) assert is_same_shape(l.output_shape, test_shape) elif l.name == 'block4_pool': test_shape = (None, 32, 32, 512) assert is_same_shape(l.output_shape, test_shape) elif l.name == 'block5_pool': test_shape = (None, 16, 16, 512) assert is_same_shape(l.output_shape, test_shape) elif l.name == 'dropout_2': test_shape = (None, 16, 16, 4096) assert is_same_shape(l.output_shape, test_shape) elif l.name == 'upscore_feat1': test_shape = (None, 32, 32, 21) assert is_same_shape(l.output_shape, test_shape) elif l.name == 'upscore_feat2': test_shape = (None, 63, 63, 21) assert is_same_shape(l.output_shape, test_shape) elif l.name == 'upscore_feat3': test_shape = (None, 500, 500, 21) assert is_same_shape(l.output_shape, test_shape) elif l.name == 'score': test_shape = (None, 500, 500, 21) assert is_same_shape(l.output_shape, test_shape) assert is_same_shape(fcn_vgg16.output_shape, (None, 500, 500, 21)) input_shape = (1366, 768, 3) fcn_vgg16 = FCN(input_shape=input_shape, classes=21) assert is_same_shape(fcn_vgg16.output_shape, (None, 1366, 768, 21))
def main(args): # set the necessary list # train_list = pd.read_csv(args.train_list,header=None) # val_list = pd.read_csv(args.val_list,header=None) # # # set the necessary directories # trainimg_dir = args.trainimg_dir # trainmsk_dir = args.trainmsk_dir # valimg_dir = args.valimg_dir # valmsk_dir = args.valmsk_dir # # train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list, args.batch_size, [args.input_shape[0], args.input_shape[1]], args.n_labels) # val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list, args.batch_size, [args.input_shape[0], args.input_shape[1]], args.n_labels) train_gen, val_gen = train_val_generator(args.batch_size) # segnet = CreateSegNet(args.input_shape, args.n_labels, args.kernel, args.pool_size, args.output_mode) from keras_fcn import FCN segnet = FCN(input_shape=args.input_shape, classes=args.n_labels, weights='imagenet', trainable_encoder=True) print(segnet.summary()) checkpointer = ModelCheckpoint( filepath="model/weights.{epoch:02d}-{val_acc:.4f}.hdf5", verbose=1, save_best_only=False) mycallback = MyCallBack() segnet.compile(loss=args.loss, optimizer=args.optimizer, metrics=["accuracy"]) segnet.fit_generator(train_gen, steps_per_epoch=args.epoch_steps, epochs=args.n_epochs, validation_data=val_gen, validation_steps=args.val_steps, callbacks=[checkpointer, mycallback]) segnet.save_weights("model/finalSegNet" + str(args.n_epochs) + ".hdf5") print("sava weight done..") json_string = segnet.to_json() open("model/LIP_SegNet.json", "w").write(json_string)
def test_fcn_vgg16_correctness(): """Test output not NaN.""" if K.image_data_format() == 'channels_first': input_shape = (3, 500, 500) x = np.random.rand(1, 3, 500, 500) y = np.random.randint(21, size=(1, 500, 500)) y = np.eye(21)[y] y = np.transpose(y, (0, 3, 1, 2)) else: input_shape = (500, 500, 3) x = np.random.rand(1, 500, 500, 3) y = np.random.randint(21, size=(1, 500, 500)) y = np.eye(21)[y] fcn_vgg16 = FCN(classes=21, input_shape=input_shape) fcn_vgg16.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) fcn_vgg16.fit(x, y, batch_size=1, epochs=1) loss = fcn_vgg16.evaluate(x, y, batch_size=1) assert not np.any(np.isinf(loss)) assert not np.any(np.isnan(loss)) y_pred = fcn_vgg16.predict(x, batch_size=1) assert not np.any(np.isinf(y_pred)) assert not np.any(np.isnan(y_pred))
#check_num = CheckNumericsOps(validation_data=[np.random.random((1, 224, 224, 3)), 1], # histogram_freq=100) datagen = PascalVocGenerator(image_shape=[224, 224, 3], image_resample=True, pixelwise_center=True, pixel_mean=[115.85100, 110.50989, 102.16182], pixelwise_std_normalization=True, pixel_std=[70.30930, 69.41244, 72.60676]) train_loader = ImageSetLoader(**init_args['image_set_loader']['train']) val_loader = ImageSetLoader(**init_args['image_set_loader']['val']) fcn_vgg16 = FCN(input_shape=(224, 224, 3), classes=21, weight_decay=3e-3, weights='imagenet', trainable_encoder=True) optimizer = keras.optimizers.Adam(1e-4) fcn_vgg16.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) fcn_vgg16.fit_generator( datagen.flow_from_imageset(class_mode='categorical', classes=21, batch_size=1, shuffle=True, image_set_loader=train_loader), steps_per_epoch=1112,
#%% print(len(X_train_all)) #%% generator = ImageDataGenerator( horizontal_flip=True, vertical_flip=True, ) #%% # u-net model = get_unet(X_train_all[0].shape[0], 3) history = model.fit(X_train_all, y_train_all, batch_size=32, epochs=10) #%% fcn_vgg16 = FCN(input_shape=X_train[0].shape, classes=3, weights='None', trainable_encoder=True) sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True) fcn_vgg16.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy']) # history = fcn_vgg16.fit(X_train_all, y_train_all, batch_size=32, epochs=20) batch_size = 64 epochs = 50 history = fcn_vgg16.fit_generator(generator.flow(X_train_all, y_train_all, batch_size=batch_size), steps_per_epoch=len(X_train_all) / batch_size, epochs=epochs) #%% plot_history(history)