def main(args): # set the necessary list # train_list = pd.read_csv(args.train_list,header=None) # val_list = pd.read_csv(args.val_list,header=None) # # # set the necessary directories # trainimg_dir = args.trainimg_dir # trainmsk_dir = args.trainmsk_dir # valimg_dir = args.valimg_dir # valmsk_dir = args.valmsk_dir # # train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list, args.batch_size, [args.input_shape[0], args.input_shape[1]], args.n_labels) # val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list, args.batch_size, [args.input_shape[0], args.input_shape[1]], args.n_labels) train_gen, val_gen = train_val_generator(args.batch_size) # segnet = CreateSegNet(args.input_shape, args.n_labels, args.kernel, args.pool_size, args.output_mode) from keras_fcn import FCN segnet = FCN(input_shape=args.input_shape, classes=args.n_labels, weights='imagenet', trainable_encoder=True) print(segnet.summary()) checkpointer = ModelCheckpoint( filepath="model/weights.{epoch:02d}-{val_acc:.4f}.hdf5", verbose=1, save_best_only=False) mycallback = MyCallBack() segnet.compile(loss=args.loss, optimizer=args.optimizer, metrics=["accuracy"]) segnet.fit_generator(train_gen, steps_per_epoch=args.epoch_steps, epochs=args.n_epochs, validation_data=val_gen, validation_steps=args.val_steps, callbacks=[checkpointer, mycallback]) segnet.save_weights("model/finalSegNet" + str(args.n_epochs) + ".hdf5") print("sava weight done..") json_string = segnet.to_json() open("model/LIP_SegNet.json", "w").write(json_string)
classes=21, weight_decay=3e-3, weights='imagenet', trainable_encoder=True) optimizer = keras.optimizers.Adam(1e-4) fcn_vgg16.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) fcn_vgg16.fit_generator( datagen.flow_from_imageset(class_mode='categorical', classes=21, batch_size=1, shuffle=True, image_set_loader=train_loader), steps_per_epoch=1112, epochs=100, validation_data=datagen.flow_from_imageset(class_mode='categorical', classes=21, batch_size=1, shuffle=True, image_set_loader=val_loader), validation_steps=1111, verbose=1, callbacks=[ lr_reducer, early_stopper, csv_logger, checkpointer, nan_terminator ]) fcn_vgg16.save('output/fcn_vgg16.h5')
history = model.fit(X_train_all, y_train_all, batch_size=32, epochs=10) #%% fcn_vgg16 = FCN(input_shape=X_train[0].shape, classes=3, weights='None', trainable_encoder=True) sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True) fcn_vgg16.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy']) # history = fcn_vgg16.fit(X_train_all, y_train_all, batch_size=32, epochs=20) batch_size = 64 epochs = 50 history = fcn_vgg16.fit_generator(generator.flow(X_train_all, y_train_all, batch_size=batch_size), steps_per_epoch=len(X_train_all) / batch_size, epochs=epochs) #%% plot_history(history) #%% y_pred = fcn_vgg16.predict(X_test[0][np.newaxis, ...]) y_pred_cls = np.argmax(y_pred, axis=3) print(y_pred_cls.shape) #%% print(X_test[0][np.newaxis, ...].shape) #%% read_test_dataset_and_predict(model, X_mean, X_std)