コード例 #1
0
valid_mult = 5
gpu = 0
n_train = 8000
n_valid = 1000
verb_step = 50
save_epoch = 20
suffix = 'test'
lr = 1e-4

nn_utils.set_gpu(gpu)

# define network
model = unet.UNet(class_num,
                  patch_size,
                  suffix=suffix,
                  learn_rate=lr,
                  decay_step=ds,
                  decay_rate=dr,
                  epochs=epochs,
                  batch_size=bs)
overlap = model.get_overlap()

cm = collectionMaker.read_collection(ds_name)
cm.print_meta_data()

file_list_train = cm.load_files(field_id=','.join(
    str(i) for i in range(6, 37)),
                                field_ext='RGB,gt_d255')
file_list_valid = cm.load_files(field_id=','.join(str(i) for i in range(5)),
                                field_ext='RGB,gt_d255')

patch_list_train = patchExtractor.PatchExtractor(patch_size, tile_size, ds_name+'_train',
コード例 #2
0
ファイル: finetune_aemo_dcc.py プロジェクト: bohaohuang/sis
def main(flags):
    nn_utils.set_gpu(flags.GPU)
    for start_layer in flags.start_layer:
        if start_layer >= 10:
            suffix_base = 'aemo_newloss'
        else:
            suffix_base = 'aemo_newloss_up{}'.format(start_layer)
        if flags.from_scratch:
            suffix_base += '_scratch'
        for lr in flags.learn_rate:
            for run_id in range(4):
                suffix = '{}_{}'.format(suffix_base, run_id)
                tf.reset_default_graph()

                np.random.seed(run_id)
                tf.set_random_seed(run_id)

                # define network
                model = unet.UNet(flags.num_classes, flags.patch_size, suffix=suffix, learn_rate=lr,
                                  decay_step=flags.decay_step, decay_rate=flags.decay_rate,
                                  epochs=flags.epochs, batch_size=flags.batch_size)
                overlap = model.get_overlap()

                cm = collectionMaker.read_collection(raw_data_path=flags.data_dir,
                                                     field_name='aus10,aus30,aus50',
                                                     field_id='',
                                                     rgb_ext='.*rgb',
                                                     gt_ext='.*gt',
                                                     file_ext='tif',
                                                     force_run=False,
                                                     clc_name=flags.ds_name)
                cm.print_meta_data()

                file_list_train = cm.load_files(field_name='aus10,aus30', field_id='', field_ext='.*rgb,.*gt')
                file_list_valid = cm.load_files(field_name='aus50', field_id='', field_ext='.*rgb,.*gt')

                patch_list_train = patchExtractor.PatchExtractor(flags.patch_size, flags.tile_size,
                                                                 flags.ds_name + '_train_hist',
                                                                 overlap, overlap // 2). \
                    run(file_list=file_list_train, file_exts=['jpg', 'png'], force_run=False).get_filelist()
                patch_list_valid = patchExtractor.PatchExtractor(flags.patch_size, flags.tile_size,
                                                                 flags.ds_name + '_valid_hist',
                                                                 overlap, overlap // 2). \
                    run(file_list=file_list_valid, file_exts=['jpg', 'png'], force_run=False).get_filelist()
                chan_mean = cm.meta_data['chan_mean']

                train_init_op, valid_init_op, reader_op = \
                    dataReaderSegmentation.DataReaderSegmentationTrainValid(
                        flags.patch_size, patch_list_train, patch_list_valid, batch_size=flags.batch_size,
                        chan_mean=chan_mean,
                        aug_func=[reader_utils.image_flipping, reader_utils.image_rotating],
                        random=True, has_gt=True, gt_dim=1, include_gt=True, valid_mult=flags.val_mult).read_op()
                feature, label = reader_op

                model.create_graph(feature)
                if start_layer >= 10:
                    model.compile(feature, label, flags.n_train, flags.n_valid, flags.patch_size, ersaPath.PATH['model'],
                                  par_dir=flags.par_dir, loss_type='xent')
                else:
                    model.compile(feature, label, flags.n_train, flags.n_valid, flags.patch_size, ersaPath.PATH['model'],
                                  par_dir=flags.par_dir, loss_type='xent',
                                  train_var_filter=['layerup{}'.format(i) for i in range(start_layer, 10)])
                train_hook = hook.ValueSummaryHook(flags.verb_step, [model.loss, model.lr_op],
                                                   value_names=['train_loss', 'learning_rate'],
                                                   print_val=[0])
                model_save_hook = hook.ModelSaveHook(model.get_epoch_step() * flags.save_epoch, model.ckdir)
                valid_loss_hook = hook.ValueSummaryHookIters(model.get_epoch_step(), [model.loss_xent, model.loss_iou],
                                                             value_names=['valid_loss', 'IoU'], log_time=True,
                                                             run_time=model.n_valid)
                image_hook = hook.ImageValidSummaryHook(model.input_size, model.get_epoch_step(), feature, label,
                                                        model.pred,
                                                        nn_utils.image_summary, img_mean=chan_mean)
                start_time = time.time()
                if not flags.from_scratch:
                    model.load(flags.model_dir)
                model.train(train_hooks=[train_hook, model_save_hook], valid_hooks=[valid_loss_hook, image_hook],
                            train_init=train_init_op, valid_init=valid_init_op)
                print('Duration: {:.3f}'.format((time.time() - start_time) / 3600))
コード例 #3
0
ファイル: eval_aemo.py プロジェクト: bohaohuang/sis
        files = sorted(glob(os.path.join(self.path, '*.*')))
        files = [f for f in files if 'txt' not in f]
        return files

# settings
class_num = 2
tile_size = (5000, 5000)
suffix = 'aemo_pad'
bs = 5
gpu = 1
model_name = 'unet'

# define network
if model_name == 'unet':
    patch_size = (572, 572)
    unet = unet.UNet(class_num, patch_size, suffix=suffix, batch_size=bs)
else:
    patch_size = (321, 321)
    unet = deeplab.DeepLab(class_num, patch_size, suffix=suffix, batch_size=bs)
overlap = unet.get_overlap()

cm = collectionMaker.read_collection(raw_data_path=r'/home/lab/Documents/bohao/data/aemo/aemo_pad',
                                     field_name='aus10,aus30,aus50',
                                     field_id='',
                                     rgb_ext='.*rgb',
                                     gt_ext='.*gt',
                                     file_ext='tif',
                                     force_run=False,
                                     clc_name=suffix)
gt_d255 = collectionEditor.SingleChanMult(cm.clc_dir, 1/255, ['.*gt', 'gt_d255']).\
    run(force_run=False, file_ext='tif', d_type=np.uint8,)
コード例 #4
0
ファイル: compare_model.py プロジェクト: bohaohuang/sis
class_num = 2
patch_size = (572, 572)
tile_size = (5000, 5000)
batch_size = 1
gpu = 0
model_dir = r'/hdd6/Models/Inria_decay/UnetCrop_inria_decay_0_PS(572, 572)_BS5_EP100_LR0.0001_DS60.0_DR0.1_SFN32'

cm = collectionMaker.read_collection('inria')
#cm.print_meta_data()

file_list_valid = cm.load_files(field_id=','.join(str(i) for i in range(5)),
                                field_ext='RGB,gt_d255')
chan_mean = cm.meta_data['chan_mean']

nn_utils.tf_warn_level(3)
model = unet.UNet(class_num, patch_size)

feature = tf.placeholder(dtype=tf.float32,
                         shape=[None, patch_size[0], patch_size[1], 3])
model.create_graph(feature)

model_names_ersa = [i.name for i in tf.trainable_variables()]
for n in model_names_ersa:
    print(n)

tf.reset_default_graph()
X = tf.placeholder(tf.float32,
                   shape=[None, patch_size[0], patch_size[1], 3],
                   name='X')
y = tf.placeholder(tf.int32,
                   shape=[None, patch_size[0], patch_size[1], 1],
コード例 #5
0
def main(flags):
    nn_utils.set_gpu(flags.GPU)
    for start_layer in flags.start_layer:
        if start_layer >= 10:
            suffix_base = 'aemo'
        else:
            suffix_base = 'aemo_up{}'.format(start_layer)
        if flags.from_scratch:
            suffix_base += '_scratch'
        for lr in flags.learn_rate:
            for run_id in range(1):
                suffix = '{}_{}'.format(suffix_base, run_id)
                tf.reset_default_graph()

                np.random.seed(run_id)
                tf.set_random_seed(run_id)

                # define network
                model = unet.UNet(flags.num_classes,
                                  flags.patch_size,
                                  suffix=suffix,
                                  learn_rate=lr,
                                  decay_step=flags.decay_step,
                                  decay_rate=flags.decay_rate,
                                  epochs=flags.epochs,
                                  batch_size=flags.batch_size)

                file_list = os.path.join(flags.data_dir, 'file_list.txt')
                lines = ersa_utils.load_file(file_list)

                patch_list_train = []
                patch_list_valid = []
                train_tile_names = ['aus10', 'aus30']
                valid_tile_names = ['aus50']

                for line in lines:
                    tile_name = os.path.basename(
                        line.split(' ')[0]).split('_')[0].strip()
                    if tile_name in train_tile_names:
                        patch_list_train.append(line.strip().split(' '))
                    elif tile_name in valid_tile_names:
                        patch_list_valid.append(line.strip().split(' '))
                    else:
                        raise ValueError

                cm = collectionMaker.read_collection('aemo_align')
                chan_mean = cm.meta_data['chan_mean']

                train_init_op, valid_init_op, reader_op = \
                    dataReaderSegmentation.DataReaderSegmentationTrainValid(
                        flags.patch_size, patch_list_train, patch_list_valid, batch_size=flags.batch_size,
                        chan_mean=chan_mean,
                        aug_func=[reader_utils.image_flipping, reader_utils.image_rotating],
                        random=True, has_gt=True, gt_dim=1, include_gt=True, valid_mult=flags.val_mult).read_op()
                feature, label = reader_op

                model.create_graph(feature)
                if start_layer >= 10:
                    model.compile(feature,
                                  label,
                                  flags.n_train,
                                  flags.n_valid,
                                  flags.patch_size,
                                  ersaPath.PATH['model'],
                                  par_dir=flags.par_dir,
                                  loss_type='xent')
                else:
                    model.compile(feature,
                                  label,
                                  flags.n_train,
                                  flags.n_valid,
                                  flags.patch_size,
                                  ersaPath.PATH['model'],
                                  par_dir=flags.par_dir,
                                  loss_type='xent',
                                  train_var_filter=[
                                      'layerup{}'.format(i)
                                      for i in range(start_layer, 10)
                                  ])
                train_hook = hook.ValueSummaryHook(
                    flags.verb_step, [model.loss, model.lr_op],
                    value_names=['train_loss', 'learning_rate'],
                    print_val=[0])
                model_save_hook = hook.ModelSaveHook(
                    model.get_epoch_step() * flags.save_epoch, model.ckdir)
                valid_loss_hook = hook.ValueSummaryHookIters(
                    model.get_epoch_step(), [model.loss_xent, model.loss_iou],
                    value_names=['valid_loss', 'IoU'],
                    log_time=True,
                    run_time=model.n_valid)
                image_hook = hook.ImageValidSummaryHook(model.input_size,
                                                        model.get_epoch_step(),
                                                        feature,
                                                        label,
                                                        model.pred,
                                                        nn_utils.image_summary,
                                                        img_mean=chan_mean)
                start_time = time.time()
                if not flags.from_scratch:
                    model.load(flags.model_dir)
                model.train(train_hooks=[train_hook, model_save_hook],
                            valid_hooks=[valid_loss_hook, image_hook],
                            train_init=train_init_op,
                            valid_init=valid_init_op)
                print('Duration: {:.3f}'.format(
                    (time.time() - start_time) / 3600))