def eval(test_ds, model=MSI_FCN(), ckpt_dir='./work_dir/msi_fcn/ckpt-*' ): # checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(model=model) if 'ckpt' not in ckpt_dir: path=tf.train.latest_checkpoint(ckpt_dir) else: path=ckpt_dir status=checkpoint.restore(path) print("resotre model from {}".format(path)) Metric = Metrics() n = 1 start = time.time() for img_file, lab_file in test_ds: img, lab = get_test_data(img_file, lab_file) print("start inference {}th image".format(n)) pred = model(img) m = Metric.update_state(lab, pred, is_train=False) # print("p: {}, r: {}, IUcrack: {}".format( m['tp'], m['fp'], m['fn'])) n += 1 metric = Metric.overall_metrics() for key, val in metric.items(): print("{}: {}".format(key, val)) end = time.time() print("total time: ", end -start)
def main(): args = parse_args() test_dir = os.path.join(args.root, args.test) testannot_dir = os.path.join(args.root, args.testannot) test_ds = get_filename_list(test_dir, testannot_dir) # MSI_FCN if args.model == 'msi_fcn': model_config = { "input_scales": 4, "dcu_gr": 16, "dense_gr": 24, "filters": 64, "expansion": 2, "msc_filters": [2, 2, 2, 2], "k": (7, 5, 3, 1), "up_filters": 2, "num_layers": (4, 4, 4, 4), "num_classes": 2, "use_msc": True, "use_up_block": False } model = MSI_FCN(**model_config) # FCN-VGG elif args.model == 'fcn': model_config = {"filters": 64, "expansion": 2, "num_classes": 2} model = FCN_vgg16(**model_config) # FCD elif args.model == 'fcd': model_config = { "growth_rate": 12, "td_filters": [48, 112, 192, 304, 464, 656, 896], "up_filters": [1088, 816, 578, 384, 256], "down_layers": [4, 4, 4, 4, 4, 4], "up_layers": [4, 4, 4, 4, 4], "num_classes": 2 } model = FCD(**model_config) else: raise ValueError("args.model should be 'msi_fcn', 'fcn' or 'fcd'.") ckpt = args.ckpt assert ckpt is not None # print model params # model.build(input_shape=(None,256,256,3)) # print(model.summary()) for k, v in model_config.items(): print("{}: {}".format(k, v)) eval(test_ds, model, ckpt_dir=ckpt)
def main(): root = '/home/yel/yel/data/Aerialgoaf/detail/' # root = '/home/yel/yel/data/DeepCrack-master/dataset/Deepcrack/' img_dir = root + 'test' label_dir = root + 'testannot' save_dir = './visualization/msi_fcn_17/' test_ds = get_filename_list(img_dir, label_dir) # MSI_FCN model_config = { "input_scales": 4, "dcu_gr": 16, "dense_gr": 24, "filters": 64, "expansion": 2, "msc_filters": [2, 2, 2, 2], "k": (7, 5, 3, 1), "up_filters": 2, "num_layers": (4, 4, 4, 4), "num_classes": 2, "use_msc": True, "use_up_block": False } # FCN_VGG16 # model_config = {"filters":64,"expansion":2,"num_classes":2} ckpt_dir = './work_dir/msi_fcn_4scales/' ckpt_name = 'ckpt-17' model = MSI_FCN(**model_config, display_stages=True) # model = FCN_vgg16(**model_config) checkpoint = tf.train.Checkpoint(model=model) if ckpt_name: path = os.path.join(ckpt_dir, ckpt_name) else: path = tf.train.latest_checkpoint(ckpt_dir) status = checkpoint.restore(path) if path is not None: print("resotre model from {}".format(path)) n = 1 if not os.path.exists(save_dir): os.makedirs(save_dir) for img_file, lab_file in test_ds: img, lab = get_test_data(img_file, lab_file) filename = img_file.split('/')[-1] print("inference {}th image:".format(n)) pred = model(img) show_all_branch(pred, save_dir, filename, rgb=True) n += 1
def main(): # root = 'D:\AerialGoaf\detail\\512x512\\' root = "D:/data/detail/" # root = '/home/yel/yel/data/DeepCrack-master/dataset/DeepCrack/' # root = '/home/yel/yel/data/road_crack/' img_dir = root + 'test' label_dir = root + 'testannot' test_ds = get_filename_list(img_dir, label_dir) # MSI_FCN model_config = { "input_scales": 4, "dcu_gr": 16, "dense_gr": 24, "filters": 64, "expansion": 2, "msc_filters": [2, 2, 2, 2], "k": (7, 5, 3, 1), "up_filters": 2, "num_layers": (4, 4, 4, 4), "num_classes": 2, "use_msc": True, "use_up_block": False } # FCN_VGG16 # model_config = {"filters": 64, "expansion": 2, "num_classes": 2} # FCD # model_config = {"growth_rate": 12, "td_filters": [48, 112, 192, 304, 464, 656, 896], # "up_filters": [1088, 816, 578, 384, 256], "down_layers": [4, 4, 4, 4, 4, 4], # "up_layers": [4, 4, 4, 4, 4], "num_classes": 2} ckpt_dir = '../work_dir/msi_fcn_4scales' # model = FCD(**model_config) model = MSI_FCN(**model_config) # model = FCN_vgg16(**model_config) for k, v in model_config.items(): print("{}: {}".format(k, v)) eval(test_ds, model, ckpt_dir=ckpt_dir, ckpt_name='ckpt-11017')
def main(): root = '/home/yel/yel/data/Aerialgoaf/detail/' # root = '/home/yel/yel/data/DeepCrack-master/dataset/Deepcrack/' img_dir = root + 'train' label_dir = root + 'trainannot' val_dir = root + 'val' vallabel_dir = root + 'valannot' train_ds = get_dataset(img_dir, label_dir, batch_size=5) val_ds = get_dataset(val_dir, vallabel_dir, batch_size=5) model = MSI_FCN() lr = tf.keras.optimizers.schedules.ExponentialDecay(2e-4, 10000, 0.1) optimizer = tf.keras.optimizers.Adam(lr, beta_1=0.5) fit(train_ds=train_ds, val_ds=val_ds, model=model, optimizer=optimizer, loss_func=WSCE, work_dir='../work_dir/msi_fcn_2', epochs=100, fine_tune=True)
image = Image.open(img_path) image = np.array(image) h, w, c = image.shape patch = image[:512, w - 512:, :] img_in = tf.convert_to_tensor(patch, dtype=tf.float32) img_in = tf.image.resize(img_in, (256, 256)) img_in = img_in / 255 img_in = tf.expand_dims(img_in, 0) debug = 1 # MSI_FCN model_config = { "input_scales": 4, "dcu_gr": 16, "dense_gr": 24, "filters": 64, "expansion": 2, "msc_filters": [2, 2, 2, 2], "k": (7, 5, 3, 1), "up_filters": 2, "num_layers": (4, 4, 4, 4), "num_classes": 2, "use_msc": True, "use_up_block": False } work_dir = './work_dir/msi_fcn_4scales/weights/ckpt-17' model = MSI_FCN(**model_config) checkpoint = tf.train.Checkpoint(model=model) checkpoint.restore(work_dir) pred = model(img_in) writeImage(pred, "pred.png")
import tensorflow as tf import time import os from model.msi_fcn import MSI_FCN from core.data import get_dataset from core.loss import WSCE from core.metrics import show_metrics import datetime root = '/home/yel/yel/data/Aerialgoaf/detail/' img_dir = root + 'train' label_dir = root + 'trainannot' train_ds = get_dataset(img_dir, label_dir, batch_size=3) model = MSI_FCN() optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5) checkpoint_dir = './training_checkpoints' # checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) ckpt_manager = tf.train.CheckpointManager(checkpoint, checkpoint_dir, max_to_keep=5) log_dir = './logs/' summary_writer = tf.summary.create_file_writer( log_dir + "fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")) # @tf.function def train_step(model, input, label, loss_object, optimizer, show_metrics, summary_writer, step): with tf.GradientTape() as t: output = model(input, training=True) loss = loss_object(output, label)
def fit(train_ds, val_ds, model=MSI_FCN(), loss_func=WSCE, optimizer=tf.keras.optimizers.Adam(2e-4, beta_1=0.5), Metricor=Metrics(), work_dir='./work_dir/msi_fcn', epochs=100, fine_tune=False): # checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) ckpt_manager = tf.train.CheckpointManager(checkpoint, work_dir, max_to_keep=5) summary_writer = tf.summary.create_file_writer(work_dir) # n = 22000 n = 0 if fine_tune: path = tf.train.latest_checkpoint(work_dir) # path ="/home/yel/yel/Pyproject/MSI_FCN/work_dir/msi_fcn_5/ckpt-15000" n = int(path.split('-')[1]) checkpoint.restore(path) print("restore from: {}".format(path)) for epoch in range(epochs): start = time.time() # Train for inputs, label in train_ds: output, loss = train_step(inputs, label, model, loss_func, optimizer) metrics = Metricor.update_state(label, output, is_train=True) # metrics = Metricor(label,output) write_sumamry(summary_writer, loss, metrics, step=n, eval=False) if (n + 1) % 10 == 0: print_summary(metrics, loss, n, epoch, val=False) if (n + 1) % 100 == 0: if val_ds is not None: for val_inputs, val_label in val_ds.take(1): output, loss = train_step(val_inputs, val_label, model, loss_func, optimizer) metrics = Metricor.update_state(val_label, output, is_train=True) # metrics = Metricor(label,output) write_sumamry(summary_writer, loss, metrics, step=n, eval=True) print_summary(metrics, loss, n, epoch, val=True) n += 1 # saving (checkpoint) the model every 20 epochs if (epoch + 1) % 10 == 0: ckpt_manager.save(checkpoint_number=n) consume_time = time.time() - start print('Time taken for epoch {} is {:2f} sec, eta:{} \n'.format( epoch + 1, consume_time, datetime.timedelta(seconds=(epochs - epoch - 1) * consume_time))) ckpt_manager.save(checkpoint_number=n)
def main(): args = parse_args() train_dir = os.path.join(args.root, args.train) trainannot_dir = os.path.join(args.root, args.trainannot) val_dir = os.path.join(args.root, args.val) valannot_dir = os.path.join(args.root, args.valannot) train_ds = get_dataset(train_dir, trainannot_dir, batch_size=4) val_ds = get_dataset(val_dir, valannot_dir, batch_size=4) # val_ds = None fine_tune = args.finetune # fine_tune=False # MSI_FCN if args.model == 'msi_fcn': model_config = { "input_scales": 4, "dcu_gr": 16, "dense_gr": 24, "filters": 64, "expansion": 2, "msc_filters": [2, 2, 2, 2], "k": (7, 5, 3, 1), "up_filters": 2, "num_layers": (4, 4, 4, 4), "num_classes": 2, "use_msc": True, "use_up_block": False } model = MSI_FCN(**model_config) # FCN-VGG elif args.model == 'fcn': model_config = {"filters": 64, "expansion": 2, "num_classes": 2} model = FCN_vgg16(**model_config) # FCD elif args.model == 'fcd': model_config = { "growth_rate": 12, "td_filters": [48, 112, 192, 304, 464, 656, 896], "up_filters": [1088, 816, 578, 384, 256], "down_layers": [4, 4, 4, 4, 4, 4], "up_layers": [4, 4, 4, 4, 4], "num_classes": 2 } model = FCD(**model_config) else: raise ValueError("args.model should be 'msi_fcn', 'fcn' or 'fcd'.") work_dir = args.work_dir # print model params # model.build(input_shape=(None,256,256,3)) # print(model.summary()) lr = tf.keras.optimizers.schedules.ExponentialDecay(2e-4, 5000, 0.95) optimizer = tf.keras.optimizers.Adam(lr) for k, v in model_config.items(): print("{}: {}".format(k, v)) fit(train_ds=train_ds, val_ds=val_ds, model=model, optimizer=optimizer, loss_func=WSCE, work_dir=work_dir, epochs=60, fine_tune=fine_tune)