Ejemplo n.º 1
0
import sis_utils
from bohaoCustom import uabMakeNetwork_DeepLabV2

os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
img_dir, task_dir = sis_utils.get_task_img_folder()

# make network
input_size = (321, 321)
input_size_fit = (224, 224)
# define place holder
X = tf.placeholder(tf.float32, shape=[None, input_size[0], input_size[1], 3], name='X')
y = tf.placeholder(tf.int32, shape=[None, input_size[0], input_size[1], 1], name='y')
mode = tf.placeholder(tf.bool, name='mode')
model = uabMakeNetwork_DeepLabV2.DeeplabV3({'X':X, 'Y':y},
                                           trainable=mode,
                                           input_size=input_size,)

# create collection
# the original file is in /ei-edl01/data/uab_datasets/inria
blCol = uab_collectionFunctions.uabCollection('inria')
opDetObj = bPreproc.uabOperTileDivide(255)          # inria GT has value 0 and 255, we map it back to 0 and 1
# [3] is the channel id of GT
rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj)
rescObj.run(blCol)
img_mean = blCol.getChannelMeans([0, 1, 2])         # get mean of rgb info

# extract patches
extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4], # extract all 4 channels
                                                cSize=input_size, # patch size as 572*572
                                                numPixOverlap=int(model.get_overlap()/2),  # overlap as 92
Ejemplo n.º 2
0
def main(flags):
    city_dict = {'austin': 0, 'chicago': 1, 'kitsap': 2, 'tyrol-w': 3, 'vienna': 4}
    city_alpha = [0.2, 0.5, 0.1, 0.1, 0.1]

    # make network
    # define place holder
    X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X')
    y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_DeepLabV2.DeeplabV3({'X':X, 'Y':y},
                                               trainable=mode,
                                               model_name=flags.model_name,
                                               input_size=flags.input_size,
                                               batch_size=flags.batch_size,
                                               learn_rate=flags.learning_rate,
                                               decay_step=flags.decay_step,
                                               decay_rate=flags.decay_rate,
                                               epochs=flags.epochs,
                                               start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(255)          # inria GT has value 0 and 255, we map it back to 0 and 1
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])         # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4], # extract all 4 channels
                                                    cSize=flags.input_size, # patch size as 572*572
                                                    numPixOverlap=int(model.get_overlap()/2),  # overlap as 92
                                                    extSave=['jpg', 'jpg', 'jpg', 'png'], # save rgb files as jpg and gt as png
                                                    isTrain=True,
                                                    gtInd=3,
                                                    pad=model.get_overlap()) # pad around the tiles
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile')
    # use first 5 tiles for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(6, 37)])
    file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(0, 6)])

    dataReader_train = uabDataReader.ImageLabelReaderCitySampleControl(
        [3], [0, 1, 2], patchDir, file_list_train, flags.input_size, flags.batch_size,
        city_dict, city_alpha, dataAug='flip,rotate', block_mean=np.append([0], img_mean))
    # no augmentation needed for validation
    dataReader_valid = uabDataReader.ImageLabelReaderCitySampleControl(
        [3], [0, 1, 2], patchDir, file_list_valid, flags.input_size, flags.batch_size,
        city_dict, city_alpha, dataAug=' ', block_mean=np.append([0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,
                       loss_type='xent', par_dir='Inria_Domain')
    model.run(train_reader=dataReader_train,
              valid_reader=dataReader_valid,
              pretrained_model_dir=flags.res_dir,
              isTrain=True,
              img_mean=img_mean,
              verb_step=100,                    # print a message every 100 step(sample)
              save_epoch=5,                     # save the model every 5 epochs
              gpu=GPU,
              tile_size=flags.tile_size,
              patch_size=flags.input_size
              )

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration/60/60))
        ])
    img_mean = blCol.getChannelMeans([0, 1, 2])

    # make the model
    # define place holder
    X = tf.placeholder(tf.float32,
                       shape=[None, input_size[0], input_size[1], 3],
                       name='X')
    y = tf.placeholder(tf.int32,
                       shape=[None, input_size[0], input_size[1], 1],
                       name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_DeepLabV2.DeeplabV3({
        'X': X,
        'Y': y
    },
                                               trainable=mode,
                                               input_size=input_size,
                                               batch_size=5,
                                               start_filter_num=32)
    # create graph
    model.create_graph('X', class_num=2)

    # evaluate on tiles
    model.evaluate(file_list_valid,
                   file_list_valid_truth,
                   parent_dir,
                   parent_dir_truth,
                   input_size,
                   tile_size,
                   batch_size,
                   img_mean,
Ejemplo n.º 4
0
def main(flags):
    # make network
    # define place holder
    X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X')
    y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_DeepLabV2.DeeplabV3({'X':X, 'Y':y},
                                               trainable=mode,
                                               model_name=flags.model_name,
                                               input_size=flags.input_size,
                                               batch_size=flags.batch_size,
                                               learn_rate=flags.learning_rate,
                                               decay_step=flags.decay_step,
                                               decay_rate=flags.decay_rate,
                                               epochs=flags.epochs,
                                               start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('spca')
    blCol.readMetadata()
    img_mean = blCol.getChannelMeans([1, 2, 3])         # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtrRand([0, 1, 2, 3],  # extract all 4 channels
                                                        cSize=flags.input_size,  # patch size as 572*572
                                                        numPerTile=256,  # overlap as 92
                                                        extSave=['png', 'jpg', 'jpg', 'jpg'],
                                                        # save rgb files as jpg and gt as png
                                                        isTrain=True,
                                                        gtInd=0,
                                                        pad=model.get_overlap(),
                                                        name='Rand{}'.format(flags.run_id))  # pad around the tiles
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile')
    # use first 5 tiles for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(0, 250)])
    file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(250, 500)])

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_train, flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size, dataAug='flip,rotate',
                                                          block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_valid, flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size, dataAug=' ', block_mean=np.append([0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,
                       loss_type='xent')
    model.run(train_reader=dataReader_train,
              valid_reader=dataReader_valid,
              pretrained_model_dir=flags.res_dir,
              isTrain=True,
              img_mean=img_mean,
              verb_step=100,                    # print a message every 100 step(sample)
              save_epoch=5,                     # save the model every 5 epochs
              gpu=GPU,
              tile_size=flags.tile_size,
              patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration/60/60))
Ejemplo n.º 5
0
def main(flags):
    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_DeepLabV2.DeeplabV3({
        'X': X,
        'Y': y
    },
                                               trainable=mode,
                                               model_name=flags.model_name,
                                               input_size=flags.input_size,
                                               batch_size=flags.batch_size,
                                               learn_rate=flags.learning_rate,
                                               decay_step=flags.decay_step,
                                               decay_rate=flags.decay_rate,
                                               epochs=flags.epochs,
                                               start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('Mass_road')
    blCol.readMetadata()
    opDetObj = bPreproc.uabOperTileDivide(
        255)  # inria GT has value 0 and 255, we map it back to 0 and 1
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                     'Map GT to (0, 1)', [3],
                                                     opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [0, 1, 2, 4],
        cSize=flags.input_size,
        numPixOverlap=int(model.get_overlap()),
        extSave=['jpg', 'jpg', 'jpg', 'png'],
        isTrain=True,
        gtInd=3,
        pad=int(model.get_overlap() // 2))
    patchDir = extrObj.run(blCol)

    # make data reader
    # use first 5 tiles for validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                      'city')
    file_list_train = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [1])
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [2])

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader(
            [3], [0, 1, 2],
            patchDir,
            file_list_train,
            flags.input_size,
            flags.tile_size,
            flags.batch_size,
            dataAug='flip,rotate',
            block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                          patchDir,
                                                          file_list_valid,
                                                          flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size,
                                                          dataAug=' ',
                                                          block_mean=np.append(
                                                              [0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X',
                       'Y',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent',
                       par_dir='Inria_GAN/Road')
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=
        None,  # train from scratch, no need to load pre-trained model
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=5,  # save the model every 5 epochs
        gpu=GPU,
        tile_size=flags.tile_size,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
Ejemplo n.º 6
0
def main(flags):
    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_DeepLabV2.DeeplabV3({
        'X': X,
        'Y': y
    },
                                               trainable=mode,
                                               model_name=flags.model_name,
                                               input_size=flags.input_size,
                                               batch_size=flags.batch_size,
                                               learn_rate=flags.learning_rate,
                                               decay_step=flags.decay_step,
                                               decay_rate=flags.decay_rate,
                                               epochs=flags.epochs,
                                               start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('gbdx2')
    opDetObj = bPreproc.uabOperTileDivide(255)
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                     'Map GT to (0, 1)', [0],
                                                     opDetObj)
    rescObj.run(blCol)
    blCol.readMetadata()
    img_mean = blCol.getChannelMeans([1, 2, 3])  # get mean of rgb info
    print(img_mean)

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [1, 2, 3, 4],  # extract all 4 channels
        cSize=flags.input_size,  # patch size as 572*572
        numPixOverlap=int(model.get_overlap() / 2),  # overlap as 92
        extSave=['jpg', 'jpg', 'jpg',
                 'png'],  # save rgb files as jpg and gt as png
        isTrain=True,
        gtInd=3,
        pad=model.get_overlap())  # pad around the tiles
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                      'force_tile')
    # use first 5 tiles for validation
    # file_list = [file_list[a] for a in np.random.permutation(len(file_list))] # permutate the files
    valid_num = int(len(file_list) * 0.9)
    file_list_train = file_list[:valid_num]
    file_list_valid = file_list[valid_num:]

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader(
            [3], [0, 1, 2],
            patchDir,
            file_list_train,
            flags.input_size,
            flags.tile_size,
            flags.batch_size,
            dataAug='flip,rotate',
            block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                          patchDir,
                                                          file_list_valid,
                                                          flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size,
                                                          dataAug=' ',
                                                          block_mean=np.append(
                                                              [0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X',
                       'Y',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent')
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=flags.res_dir,
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=2,  # save the model every 5 epochs
        gpu=GPU,
        tile_size=flags.tile_size,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
X = tf.placeholder(tf.float32,
                   shape=[None, chip_size[0], chip_size[1], 3],
                   name='X')
y = tf.placeholder(tf.int32,
                   shape=[None, chip_size[0], chip_size[1], 1],
                   name='y')
mode = tf.placeholder(
    tf.bool, name='mode')  # This controls if you'll update weights or not
# Set this True when training
model = uabMakeNetwork_DeepLabV2.DeeplabV3(
    {
        'X': X,
        'Y': y
    },
    trainable=mode,  # control if you're training or not
    input_size=chip_size,  # input size to NN, same as extracted
    model_name=model_name,  # patch size
    batch_size=batch_size,  # mini-batch size
    learn_rate=learn_rate,  # learning rate
    decay_step=decay_step,  # learn rate decay after 60 epochs
    decay_rate=decay_rate,  # learn rate decay to 0.1*before
    epochs=epochs,  # total number of epochs to run
    start_filter_num=start_filter_num)  # number of filters at the first layer
model.create_graph('X',
                   class_num=class_num)  # TensorFlow will now draw the graph

####Inira 1 ########
# create collection
# the original file is in /ei-edl01/data/uab_datasets/inria
# create collection
# the original file is in /ei-edl01/data/uab_datasets/inria
blCol = uab_collectionFunctions.uabCollection('inria')
Ejemplo n.º 8
0
def main(flags):
    np.random.seed(int(flags.run_id))
    tf.set_random_seed(int(flags.run_id))

    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_DeepLabV2.DeeplabV3({
        'X': X,
        'Y': y
    },
                                               trainable=mode,
                                               model_name=flags.model_name,
                                               input_size=flags.input_size,
                                               batch_size=flags.batch_size,
                                               learn_rate=flags.learning_rate,
                                               decay_step=flags.decay_step,
                                               decay_rate=flags.decay_rate,
                                               epochs=flags.epochs,
                                               start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection(flags.ds_name)
    blCol.readMetadata()
    img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info
    print(img_mean)

    img_dir, task_dir = sis_utils.get_task_img_folder()
    save_dir = os.path.join(task_dir, 'bihar_patches')
    ersa_utils.make_dir_if_not_exist(save_dir)
    files, par_dir = blCol.getAllTileByDirAndExt([0, 1, 2, 3])
    resize_patches(files, par_dir, flags.input_size, save_dir)

    patchDir = save_dir

    # make data reader
    # use first 5 tiles for validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                      'tile')
    # use first city for validation
    #assert len(file_list) == flags.n_train + flags.n_valid
    file_list_train = [a for a in file_list[:52]]
    file_list_valid = [a for a in file_list[-5:]]

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader(
            [3], [0, 1, 2],
            patchDir,
            file_list_train,
            flags.input_size,
            None,
            flags.batch_size,
            dataAug='flip,rotate',
            block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                          patchDir,
                                                          file_list_valid,
                                                          flags.input_size,
                                                          None,
                                                          flags.batch_size,
                                                          dataAug=' ',
                                                          block_mean=np.append(
                                                              [0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X',
                       'Y',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent',
                       par_dir='{}'.format(flags.ds_name))
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=flags.
        res_dir,  # train from scratch, no need to load pre-trained model
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=200,  # save the model every 5 epochs
        gpu=GPU,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
Ejemplo n.º 9
0
def main(flags):
    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_DeepLabV2.DeeplabV3({
        'X': X,
        'Y': y
    },
                                               trainable=mode,
                                               model_name=flags.model_name,
                                               input_size=flags.input_size,
                                               batch_size=flags.batch_size,
                                               learn_rate=flags.learning_rate,
                                               decay_step=flags.decay_step,
                                               decay_rate=flags.decay_rate,
                                               epochs=flags.epochs,
                                               start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(
        255)  # inria GT has value 0 and 255, we map it back to 0 and 1
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                     'Map GT to (0, 1)', [3],
                                                     opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [0, 1, 2, 4],  # extract all 4 channels
        cSize=flags.input_size,  # patch size as 572*572
        numPixOverlap=int(model.get_overlap() / 2),  # overlap as 92
        extSave=['jpg', 'jpg', 'jpg',
                 'png'],  # save rgb files as jpg and gt as png
        isTrain=True,
        gtInd=3,
        pad=model.get_overlap())  # pad around the tiles
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                      'city')
    # use first city for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [i for i in range(5) if i != flags.leave_city])
    file_list_train += get_file_list_finetune(INPUT_SIZE, PORTION)

    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                      'city')
    idx2, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                               'force_tile')
    idx3 = [j * 10 + i for i, j in zip(idx, idx2)]

    # use first city for validation
    filter_valid = []
    for i in range(6):
        for j in range(1, 37):
            if i == flags.leave_city and j <= 5:
                filter_valid.append(j * 10 + i)
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx3, file_list, filter_valid)

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader(
            [3], [0, 1, 2],
            patchDir,
            file_list_train,
            flags.input_size,
            flags.tile_size,
            flags.batch_size,
            dataAug='flip,rotate',
            block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                          patchDir,
                                                          file_list_valid,
                                                          flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size,
                                                          dataAug=' ',
                                                          block_mean=np.append(
                                                              [0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X',
                       'Y',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent')
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=flags.
        pred_dir,  # train from scratch, no need to load pre-trained model
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=1,  # save the model every 1 epochs
        gpu=GPU,
        tile_size=flags.tile_size,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))