def make_preds(model, model_dir, file_list, save_path):
    for file_name in file_list:
        tile_name = os.path.basename(file_name)[:-4]
        print('Evaluating {} ...'.format(tile_name))
        start_time = time.time()
        try:
            tile_size = imageio.imread(file_name).shape[:2]
        except OSError:
            continue
        
        #prepare the reader
        pad = model.get_overlap()
        reader = uabDataReader.ImageLabelReader(gtInds=[0], dataInds=[0], nChannels=3,
                                                parentDir=os.path.dirname(file_name),
                                                chipFiles=[[os.path.basename(file_name)]],
                                                chip_size=input_size, tile_size=tile_size,
                                                batchSize=batch_size, block_mean=img_mean,
                                                overlap=pad, padding=np.array([pad/2, pad/2]),
                                                isTrain=False)
        rManager = reader.readManager

        # run the model
        pred = model.run(pretrained_model_dir=model_dir, test_reader=rManager, tile_size=tile_size,
                         patch_size=input_size, gpu=gpu, load_epoch_num=None, best_model=False)
        print(np.unique(pred), pred.shape)
        
        # save results
        save_name = os.path.join(save_path, '{}_pred.png'.format(tile_name))
        imageio.imsave(save_name, pred.astype(np.uint8))

        duration = time.time() - start_time
        print('duration: {:.2f}'.format(duration))
예제 #2
0
    def save_activations(self, rgb_list, gt_list, rgb_dir, img_mean, gpu,
                         pretrained_model_dir, path_to_save, input_size,
                         batch_size, load_epoch_num):
        os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
        activation_dict = dict()
        for file_name, file_name_truth in zip(rgb_list, gt_list):
            tile_name = file_name_truth.split('_')[0]
            print('Evaluating {} ... '.format(tile_name))

            # get tile size
            sample_img = imageio.imread(os.path.join(rgb_dir[0], file_name[0]))
            tile_size = sample_img.shape[:2]

            # prepare the reader
            reader = uabDataReader.ImageLabelReader(
                gtInds=[0],
                dataInds=[0],
                nChannels=3,
                parentDir=rgb_dir,
                chipFiles=[file_name],
                chip_size=input_size,
                tile_size=tile_size,
                batchSize=batch_size,
                block_mean=img_mean,
                overlap=self.get_overlap(),
                padding=np.array(
                    (self.get_overlap() / 2, self.get_overlap() / 2)),
                isTrain=False)
            rManager = reader.readManager
            total_len = np.ceil((tile_size[0] + self.get_overlap()) / (input_size[0] - self.get_overlap())) * \
                        np.ceil((tile_size[1] + self.get_overlap()) / (input_size[1] - self.get_overlap()))
            if self.config is None:
                self.config = tf.ConfigProto(allow_soft_placement=True)
            with tf.Session(config=self.config) as sess:
                init = tf.global_variables_initializer()
                sess.run(init)
                self.load(pretrained_model_dir,
                          sess,
                          epoch=load_epoch_num,
                          best_model=False)
                for X_batch in tqdm(rManager, total=total_len):
                    for layer_id in range(len(self.activations)):
                        layer_val = sess.run(self.activations[layer_id],
                                             feed_dict={
                                                 self.inputs['X']: X_batch,
                                                 self.trainable: False
                                             })
                        for chan_id in range(layer_val.shape[-1]):
                            f_i_t = layer_val[:, :, :, chan_id].flatten()
                            act_name = 'f_{}_{}'.format(layer_id, chan_id)
                            if act_name not in activation_dict:
                                activation_dict[act_name] = bayes_update()
                            activation_dict[act_name].update(f_i_t)

        save_name = os.path.join(path_to_save, 'activation_list.pkl')
        ersa_utils.save_file(save_name, activation_dict)
예제 #3
0
def make_preds(model, model_dir, file_list):
    for file_name in file_list:
        tile_name = os.path.basename(file_name)[:-4]
        print('Evaluating {} ... '.format(tile_name))
        start_time = time.time()
        tile_size = imageio.imread(file_name).shape[:2]

        print(img_mean)

        # prepare the reader
        reader = uabDataReader.ImageLabelReader(
            gtInds=[0],
            dataInds=[0],
            nChannels=3,
            parentDir=os.path.dirname(file_name),
            chipFiles=[[os.path.basename(file_name)]],
            chip_size=input_size,
            tile_size=tile_size,
            batchSize=batch_size,
            block_mean=img_mean,
            overlap=model.get_overlap(),
            padding=np.array(
                (model.get_overlap() / 2, model.get_overlap() / 2)),
            isTrain=False)
        rManager = reader.readManager

        # run the model
        pred = model.run(pretrained_model_dir=model_dir,
                         test_reader=rManager,
                         tile_size=tile_size,
                         patch_size=input_size,
                         gpu=gpu,
                         load_epoch_num=None,
                         best_model=False)

        print(np.unique(pred))
        #from visualize import visualize_utils
        #visualize_utils.compare_figures([imageio.imread(file_name), pred], (1, 2), fig_size=(12, 5))

        duration = time.time() - start_time
        print('duration: {:.3f}'.format(duration))
예제 #4
0
def main(flags):
    # make network
    # define place holder
    X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X')
    y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_DeepLabV2.DeeplabV3({'X':X, 'Y':y},
                                               trainable=mode,
                                               model_name=flags.model_name,
                                               input_size=flags.input_size,
                                               batch_size=flags.batch_size,
                                               learn_rate=flags.learning_rate,
                                               decay_step=flags.decay_step,
                                               decay_rate=flags.decay_rate,
                                               epochs=flags.epochs,
                                               start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('spca')
    blCol.readMetadata()
    img_mean = blCol.getChannelMeans([1, 2, 3])         # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtrRand([0, 1, 2, 3],  # extract all 4 channels
                                                        cSize=flags.input_size,  # patch size as 572*572
                                                        numPerTile=256,  # overlap as 92
                                                        extSave=['png', 'jpg', 'jpg', 'jpg'],
                                                        # save rgb files as jpg and gt as png
                                                        isTrain=True,
                                                        gtInd=0,
                                                        pad=model.get_overlap(),
                                                        name='Rand{}'.format(flags.run_id))  # pad around the tiles
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile')
    # use first 5 tiles for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(0, 250)])
    file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(250, 500)])

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_train, flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size, dataAug='flip,rotate',
                                                          block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_valid, flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size, dataAug=' ', block_mean=np.append([0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,
                       loss_type='xent')
    model.run(train_reader=dataReader_train,
              valid_reader=dataReader_valid,
              pretrained_model_dir=flags.res_dir,
              isTrain=True,
              img_mean=img_mean,
              verb_step=100,                    # print a message every 100 step(sample)
              save_epoch=5,                     # save the model every 5 epochs
              gpu=GPU,
              tile_size=flags.tile_size,
              patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration/60/60))
예제 #5
0
# define place holder
X = tf.placeholder(tf.float32, shape=[None, input_size[0], input_size[1], 3], name='X')
y = tf.placeholder(tf.int32, shape=[None, input_size[0], input_size[1], 1], name='y')
mode = tf.placeholder(tf.bool, name='mode')
model = uabMakeNetwork_UNet.UnetModelCrop({'X': X, 'Y': y},
                                          trainable=mode,
                                          input_size=input_size,
                                          batch_size=5, start_filter_num=32)
# create graph
model.create_graph('X', class_num=2)
reader = uabDataReader.ImageLabelReader(gtInds=[0],
                                        dataInds=[0],
                                        nChannels=3,
                                        parentDir=adjust_save_dir,
                                        chipFiles=[['{}.tif'.format(img_id)]],
                                        chip_size=input_size,
                                        tile_size=tile_size,
                                        batchSize=5,
                                        block_mean=img_mean,
                                        overlap=model.get_overlap(),
                                        padding=np.array((model.get_overlap() / 2, model.get_overlap() / 2)),
                                        isTrain=False)
test_reader = reader.readManager
# run algo
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
with tf.Session() as sess:
    init = tf.global_variables_initializer()
    sess.run(init)
    model.load(sp_model_dir, sess)
    result = model.test('X', sess, test_reader)
image_pred = uabUtilreader.un_patchify_shrink(result,
예제 #6
0
                                              start_filter_num=32)
    # create graph
    model.create_graph('X', class_num=2)

    # sp detector
    file_name = 'sp_' + test_file[0]
    if os.path.exists(os.path.join(my_dir, file_name)):
        continue
    else:
        reader = uabDataReader.ImageLabelReader(
            gtInds=[0],
            dataInds=[0],
            nChannels=3,
            parentDir=os.path.join(data_dir, 'hist_match_ct'),
            chipFiles=[test_file],
            chip_size=patch_size,
            tile_size=tile_size,
            batchSize=bs,
            block_mean=img_mean,
            overlap=model.get_overlap(),
            padding=np.array(
                (model.get_overlap() / 2, model.get_overlap() / 2)),
            isTrain=False)
        test_reader = reader.readManager
        # run algo
        os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
        with tf.Session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)
            model.load(model_dir, sess)
            result = model.test('X', sess, test_reader)
def main(flags):
    np.random.seed(int(flags.run_id))
    tf.set_random_seed(int(flags.run_id))

    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = myUnetModelCrop({
        'X': X,
        'Y': y
    },
                            trainable=mode,
                            model_name=flags.model_name,
                            input_size=flags.input_size,
                            batch_size=flags.batch_size,
                            learn_rate=flags.learning_rate,
                            decay_step=flags.decay_step,
                            decay_rate=flags.decay_rate,
                            epochs=flags.epochs,
                            start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection(flags.ds_name)
    blCol.readMetadata()
    img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info
    print(img_mean)

    img_dir, task_dir = sis_utils.get_task_img_folder()
    save_dir = os.path.join(task_dir, 'bihar_patches')
    ersa_utils.make_dir_if_not_exist(save_dir)
    files, par_dir = blCol.getAllTileByDirAndExt([0, 1, 2, 3])
    resize_patches(files, par_dir, flags.input_size, save_dir)

    patchDir = save_dir

    # make data reader
    # use first 5 tiles for validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                      'tile')
    # use first city for validation
    #assert len(file_list) == flags.n_train + flags.n_valid
    file_list_train = [a for a in file_list[:45]]
    file_list_valid = [a for a in file_list[-5:]]

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader(
            [3], [0, 1, 2],
            patchDir,
            file_list_train,
            flags.input_size,
            None,
            flags.batch_size,
            dataAug='flip,rotate',
            block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                          patchDir,
                                                          file_list_valid,
                                                          flags.input_size,
                                                          None,
                                                          flags.batch_size,
                                                          dataAug=' ',
                                                          block_mean=np.append(
                                                              [0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X',
                       'Y',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent',
                       par_dir='{}'.format(flags.ds_name))
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=
        None,  # train from scratch, no need to load pre-trained model
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=200,  # save the model every 5 epochs
        gpu=GPU,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
예제 #8
0
def main(flags):
    np.random.seed(int(flags.run_id))
    tf.set_random_seed(int(flags.run_id))

    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = UnetModelCrop({
        'X': X,
        'Y': y
    },
                          trainable=mode,
                          model_name=flags.model_name,
                          input_size=flags.input_size,
                          batch_size=flags.batch_size,
                          learn_rate=flags.learning_rate,
                          decay_step=flags.decay_step,
                          decay_rate=flags.decay_rate,
                          epochs=flags.epochs,
                          start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection(flags.ds_name)
    blCol.readMetadata()
    img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [0, 1, 2, 3],
        cSize=flags.input_size,
        numPixOverlap=int(model.get_overlap()),
        extSave=['jpg', 'jpg', 'jpg', 'png'],
        isTrain=True,
        gtInd=3,
        pad=int(model.get_overlap() // 2))
    patchDir = extrObj.run(blCol)

    # make data reader
    # use first 5 tiles for validation
    idx_city, file_list = uabCrossValMaker.uabUtilGetFolds(
        patchDir, 'fileList.txt', 'city')
    idx_tile, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                   'force_tile')
    idx = [j * 10 + i for i, j in zip(idx_city, idx_tile)]

    # use first city for validation
    filter_train = []
    filter_valid = []
    for i in range(4):
        for j in range(1, 20):
            if i == flags.leave_city and j < 4:
                filter_valid.append(j * 10 + i)
            elif i == flags.leave_city and j >= 4:
                filter_train.append(j * 10 + i)
    # use first city for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, filter_train)
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, filter_valid)

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader(
            [3], [0, 1, 2],
            patchDir,
            file_list_train,
            flags.input_size,
            None,
            flags.batch_size,
            dataAug='flip,rotate',
            block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                          patchDir,
                                                          file_list_valid,
                                                          flags.input_size,
                                                          None,
                                                          flags.batch_size,
                                                          dataAug=' ',
                                                          block_mean=np.append(
                                                              [0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X',
                       'Y',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent',
                       par_dir='{}'.format(flags.ds_name),
                       pos_weight=flags.pos_weight)
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=
        None,  # train from scratch, no need to load pre-trained model
        isTrain=True,
        img_mean=img_mean[1:],
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=5,  # save the model every 5 epochs
        gpu=GPU,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
예제 #9
0
def main(flags):
    # city_dict
    city_dict = {
        'austin': 0,
        'chicago': 1,
        'kitsap': 2,
        'tyrol-w': 3,
        'vienna': 4
    }
    train_city_list = [city_dict[a] for a in flags.train_city.split(',')]

    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_DeepLabV2.DeeplabV3({
        'X': X,
        'Y': y
    },
                                               trainable=mode,
                                               model_name=flags.model_name,
                                               input_size=flags.input_size,
                                               batch_size=flags.batch_size,
                                               learn_rate=flags.learning_rate,
                                               decay_step=flags.decay_step,
                                               decay_rate=flags.decay_rate,
                                               epochs=flags.epochs,
                                               start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(
        255)  # inria GT has value 0 and 255, we map it back to 0 and 1
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                     'Map GT to (0, 1)', [3],
                                                     opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [0, 1, 2, 4],  # extract all 4 channels
        cSize=flags.input_size,  # patch size as 572*572
        numPixOverlap=int(model.get_overlap() / 2),  # overlap as 92
        extSave=['jpg', 'jpg', 'jpg',
                 'png'],  # save rgb files as jpg and gt as png
        isTrain=True,
        gtInd=3,
        pad=model.get_overlap())  # pad around the tiles
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx_city, file_list = uabCrossValMaker.uabUtilGetFolds(
        patchDir, 'fileList.txt', 'city')
    idx_tile, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                   'force_tile')
    idx = [j * 10 + i for i, j in zip(idx_city, idx_tile)]
    # use first city for validation
    filter_train = []
    filter_valid = []
    for i in range(5):
        for j in range(1, 37):
            if i in train_city_list and j > 5:
                filter_train.append(j * 10 + i)
            elif j <= 5:
                filter_valid.append(j * 10 + i)
    file_list_train = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, filter_train)
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, filter_valid)

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader(
            [3], [0, 1, 2],
            patchDir,
            file_list_train,
            flags.input_size,
            flags.tile_size,
            flags.batch_size,
            dataAug='flip,rotate',
            block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                          patchDir,
                                                          file_list_valid,
                                                          flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size,
                                                          dataAug=' ',
                                                          block_mean=np.append(
                                                              [0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X',
                       'Y',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent')
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=flags.
        res_dir,  # train from scratch, no need to load pre-trained model
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=5,  # save the model every 5 epochs
        gpu=GPU,
        tile_size=flags.tile_size,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
예제 #10
0
    def evaluate(self,
                 rgb_list,
                 gt_list,
                 rgb_dir,
                 gt_dir,
                 input_size,
                 tile_size,
                 batch_size,
                 img_mean,
                 model_dir,
                 gpu=None,
                 save_result=True,
                 save_result_parent_dir=None,
                 show_figure=False,
                 verb=True,
                 ds_name='default',
                 load_epoch_num=None,
                 best_model=True):
        if show_figure:
            import matplotlib.pyplot as plt

        if save_result:
            self.model_name = model_dir.split('/')[-1]
            if save_result_parent_dir is None:
                score_save_dir = os.path.join(uabRepoPaths.evalPath,
                                              self.model_name, ds_name)
            else:
                score_save_dir = os.path.join(uabRepoPaths.evalPath,
                                              save_result_parent_dir,
                                              self.model_name, ds_name)
            if not os.path.exists(score_save_dir):
                os.makedirs(score_save_dir)
            with open(os.path.join(score_save_dir, 'result.txt'), 'w'):
                pass

        iou_record = []
        iou_return = {}
        for file_name, file_name_truth in zip(rgb_list, gt_list):
            tile_size = ersa_utils.load_file(
                os.path.join(rgb_dir[0], file_name[0])).shape[:2]

            tile_name = file_name_truth.split('_')[0]
            if verb:
                print('Evaluating {} ... '.format(tile_name))
            start_time = time.time()

            # prepare the reader
            reader = uabDataReader.ImageLabelReader(
                gtInds=[0],
                dataInds=[0],
                nChannels=3,
                parentDir=rgb_dir,
                chipFiles=[file_name],
                chip_size=input_size,
                tile_size=tile_size,
                batchSize=batch_size,
                block_mean=img_mean,
                overlap=self.get_overlap(),
                padding=np.array(
                    (self.get_overlap() / 2, self.get_overlap() / 2)),
                isTrain=False)
            rManager = reader.readManager

            # run the model
            pred = self.run(pretrained_model_dir=model_dir,
                            test_reader=rManager,
                            tile_size=tile_size,
                            patch_size=input_size,
                            gpu=gpu,
                            load_epoch_num=load_epoch_num,
                            best_model=best_model,
                            tile_name=tile_name)

            truth_label_img = imageio.imread(
                os.path.join(gt_dir, file_name_truth))
            iou = util_functions.iou_metric(truth_label_img,
                                            pred,
                                            divide_flag=True)
            iou_record.append(iou)
            iou_return[tile_name] = iou

            duration = time.time() - start_time
            if verb:
                print('{} mean IoU={:.3f}, duration: {:.3f}'.format(
                    tile_name, iou[0] / iou[1], duration))

            # save results
            if save_result:
                pred_save_dir = os.path.join(score_save_dir, 'pred')
                if not os.path.exists(pred_save_dir):
                    os.makedirs(pred_save_dir)
                imageio.imsave(os.path.join(pred_save_dir, tile_name + '.png'),
                               pred.astype(np.uint8))
                with open(os.path.join(score_save_dir, 'result.txt'),
                          'a+') as file:
                    file.write('{} {}\n'.format(tile_name, iou))

            if show_figure:
                plt.figure(figsize=(12, 4))
                ax1 = plt.subplot(121)
                ax1.imshow(truth_label_img)
                plt.title('Truth')
                ax2 = plt.subplot(122, sharex=ax1, sharey=ax1)
                ax2.imshow(pred)
                plt.title('pred')
                plt.suptitle('{} Results on {} IoU={:3f}'.format(
                    self.model_name,
                    file_name_truth.split('_')[0], iou[0] / iou[1]))
                plt.show()

        iou_record = np.array(iou_record)
        mean_iou = np.sum(iou_record[:, 0]) / np.sum(iou_record[:, 1])
        print('Overall mean IoU={:.3f}'.format(mean_iou))
        if save_result:
            if save_result_parent_dir is None:
                score_save_dir = os.path.join(uabRepoPaths.evalPath,
                                              self.model_name, ds_name)
            else:
                score_save_dir = os.path.join(uabRepoPaths.evalPath,
                                              save_result_parent_dir,
                                              self.model_name, ds_name)
            with open(os.path.join(score_save_dir, 'result.txt'),
                      'a+') as file:
                file.write('{}'.format(mean_iou))

        return iou_return
예제 #11
0
def main(flags):
    np.random.seed(int(flags.run_id))
    tf.set_random_seed(int(flags.run_id))

    if flags.start_layer >= 10:
        pass
    else:
        flags.model_name += '_up{}'.format(flags.start_layer)

    # make network
    # define place holder
    X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X')
    y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_UNet.UnetModelCrop({'X': X, 'Y': y},
                                              trainable=mode,
                                              model_name=flags.model_name,
                                              input_size=flags.input_size,
                                              batch_size=flags.batch_size,
                                              learn_rate=flags.learning_rate,
                                              decay_step=flags.decay_step,
                                              decay_rate=flags.decay_rate,
                                              epochs=flags.epochs,
                                              start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection(flags.ds_name)
    blCol.readMetadata()
    img_mean = blCol.getChannelMeans([1, 2, 3])  # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 3],
                                                    cSize=flags.input_size,
                                                    numPixOverlap=int(model.get_overlap()),
                                                    extSave=['png', 'jpg', 'jpg', 'jpg'],
                                                    isTrain=True,
                                                    gtInd=0,
                                                    pad=int(model.get_overlap()//2))
    patchDir = extrObj.run(blCol)

    # make data reader
    # use first 5 tiles for validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'tile')
    file_list_train = uabCrossValMaker.make_file_list_by_key(idx, file_list, [0, 1, 2, 3])
    file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, [4, 5])
    file_list_train = file_list_train[-int(len(file_list_train)*flags.portion):]

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_train, flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size, dataAug='flip,rotate',
                                                          block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_valid, flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size, dataAug=' ', block_mean=np.append([0], img_mean))

    # train
    start_time = time.time()

    if flags.start_layer >= 10:
        model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,
                           loss_type='xent', par_dir='aemo/{}'.format(flags.ds_name))
    else:
        model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,
                           loss_type='xent', par_dir='aemo/{}'.format(flags.ds_name),
                           train_var_filter=['layerup{}'.format(i) for i in range(flags.start_layer, 10)])
    model.run(train_reader=dataReader_train,
              valid_reader=dataReader_valid,
              pretrained_model_dir=flags.model_dir,   # train from scratch, no need to load pre-trained model
              isTrain=True,
              img_mean=img_mean,
              verb_step=100,                        # print a message every 100 step(sample)
              save_epoch=5,                         # save the model every 5 epochs
              gpu=GPU,
              tile_size=flags.tile_size,
              patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration/60/60))
예제 #12
0
        'bellingham', 'bloomington', 'sfo', 'tyrol-e', 'innsbruck', 'vienna_s'
    ])
file_list_valid = uabCrossValMaker.make_file_list_by_key(
    idx,
    file_list, [i for i in range(6, 11)],
    filter_list=[
        'bellingham', 'bloomington', 'sfo', 'tyrol-e', 'innsbruck', 'vienna_s'
    ])

with tf.name_scope('image_loader'):
    # GT has no mean to subtract, append a 0 for block mean
    dataReader_train1 = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                       patchDir,
                                                       file_list_train,
                                                       chip_size,
                                                       tile_size,
                                                       source_control[0],
                                                       dataAug='flip,rotate',
                                                       block_mean=np.append(
                                                           [0], img_mean))
    # no augmentation needed for validation
    dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                      patchDir,
                                                      file_list_valid,
                                                      chip_size,
                                                      tile_size,
                                                      batch_size,
                                                      dataAug=' ',
                                                      block_mean=np.append(
                                                          [0], img_mean))
예제 #13
0
def main(flags):
    # ------------------------------------------Network---------------------------------------------#
    # make network
    # define place holder
    X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X')
    y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_UnetMTL.UnetModelMTL({'X':X, 'Y':y},
                                                trainable=mode,
                                                model_name=flags.model_name,
                                                input_size=flags.input_size,
                                                batch_size=flags.batch_size,
                                                learn_rate=flags.learning_rate,
                                                decay_step=flags.decay_step,
                                                decay_rate=flags.decay_rate,
                                                epochs=flags.epochs,
                                                start_filter_num=flags.sfn,
                                                source_num=flags.s_num,
                                                source_name=flags.s_name,
                                                source_control=flags.s_control)
    model.create_graph('X', class_num=flags.num_classes, start_filter_num=flags.sfn)

    # ------------------------------------------Dataset Inria---------------------------------------------#
    # create collection for inria
    blCol_inria = uab_collectionFunctions.uabCollection('inria')
    opDetObj_inria = bPreproc.uabOperTileDivide(255)
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj_inria)
    rescObj.run(blCol_inria)
    img_mean_inria = blCol_inria.getChannelMeans([0, 1, 2])

    # extract patches
    extrObj_inria = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4],
                                                          cSize=flags.input_size,
                                                          numPixOverlap=int(model.get_overlap()),
                                                          extSave=['jpg', 'jpg', 'jpg', 'png'],
                                                          isTrain=True,
                                                          gtInd=3,
                                                          pad=int(model.get_overlap() / 2))
    patchDir_inria = extrObj_inria.run(blCol_inria)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx_inria, file_list_inria = uabCrossValMaker.uabUtilGetFolds(patchDir_inria, 'fileList.txt', 'force_tile')
    # use first 5 tiles for validation
    file_list_train_inria = uabCrossValMaker.make_file_list_by_key(idx_inria, file_list_inria,
                                                                   [i for i in range(20, 136)])
    file_list_valid_inria = uabCrossValMaker.make_file_list_by_key(idx_inria, file_list_inria,
                                                                   [i for i in range(0, 20)])

    with tf.name_scope('image_loader_inria'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train_inria = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_inria, file_list_train_inria,
                                                                flags.input_size, flags.tile_size, flags.batch_size,
                                                                dataAug='flip,rotate',
                                                                block_mean=np.append([0], img_mean_inria))
        # no augmentation needed for validation
        dataReader_valid_inria = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_inria, file_list_valid_inria,
                                                                flags.input_size, flags.tile_size,
                                                                flags.batch_size, dataAug=' ',
                                                                block_mean=np.append([0], img_mean_inria))

    # ------------------------------------------Dataset Road---------------------------------------------#
    # create collection for road
    blCol_road = uab_collectionFunctions.uabCollection('road_5000')
    opDetObj_road = bPreproc.uabOperTileDivide(255)
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj_road)
    rescObj.run(blCol_road)
    img_mean_road = blCol_road.getChannelMeans([0, 1, 2])

    # extract patches
    extrObj_road = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4],
                                                         cSize=flags.input_size,
                                                         numPixOverlap=int(model.get_overlap()),
                                                         extSave=['jpg', 'jpg', 'jpg', 'png'],
                                                         isTrain=True,
                                                         gtInd=3,
                                                         pad=int(model.get_overlap() / 2))
    patchDir_road = extrObj_road.run(blCol_road)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx_road, file_list_road = uabCrossValMaker.uabUtilGetFolds(patchDir_road, 'fileList.txt', 'city')
    # use first 5 tiles for validation
    file_list_train_road = uabCrossValMaker.make_file_list_by_key(idx_road, file_list_road, [1])
    file_list_valid_road = uabCrossValMaker.make_file_list_by_key(idx_road, file_list_road, [0, 2])

    with tf.name_scope('image_loader_road'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train_road = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_road, file_list_train_road,
                                                                flags.input_size, flags.tile_size, flags.batch_size,
                                                                dataAug='flip,rotate',
                                                                block_mean=np.append([0], img_mean_road))
        # no augmentation needed for validation
        dataReader_valid_road = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_road, file_list_valid_road,
                                                                flags.input_size, flags.tile_size,
                                                                flags.batch_size, dataAug=' ',
                                                                block_mean=np.append([0], img_mean_road))

    # ------------------------------------------Train---------------------------------------------#
    start_time = time.time()

    model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,
                       loss_type='xent')
    model.run(train_reader=[dataReader_train_inria, dataReader_train_road],
              valid_reader=[dataReader_valid_inria, dataReader_valid_road],
              pretrained_model_dir=None,        # train from scratch, no need to load pre-trained model
              isTrain=True,
              img_mean=[img_mean_inria, img_mean_road],
              verb_step=100,                    # print a message every 100 step(sample)
              save_epoch=5,                     # save the model every 5 epochs
              gpu=GPU,
              tile_size=flags.tile_size,
              patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration/60/60))
예제 #14
0
def eval_tiles():
    blCol = uab_collectionFunctions.uabCollection(ds_name)
    blCol.readMetadata()
    file_list, parent_dir = blCol.getAllTileByDirAndExt([1, 2, 3])
    file_list_truth, parent_dir_truth = blCol.getAllTileByDirAndExt(0)
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(None, file_list, 'tile')
    idx_truth, file_list_truth = uabCrossValMaker.uabUtilGetFolds(
        None, file_list_truth, 'tile')
    # use first 5 tiles for validation
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [0, 1, 2, 3, 4, 5])
    file_list_valid_truth = uabCrossValMaker.make_file_list_by_key(
        idx_truth, file_list_truth, [0, 1, 2, 3, 4, 5])
    img_mean = blCol.getChannelMeans([1, 2, 3])

    # make the model
    # define place holder
    X = tf.placeholder(tf.float32,
                       shape=[None, input_size[0], input_size[1], 3],
                       name='X')
    y = tf.placeholder(tf.int32,
                       shape=[None, input_size[0], input_size[1], 1],
                       name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = UnetModelCrop({
        'X': X,
        'Y': y
    },
                          trainable=mode,
                          input_size=input_size,
                          batch_size=batch_size,
                          start_filter_num=32)
    # create graph
    model.create_graph('X', class_num=2)

    # evaluate on each sub folder
    root_dir = r'/home/lab/Documents/bohao/data/aemo_all/align/0584270470{}0_01'
    for fl, p_dir in get_file_list(root_dir):
        for f in fl:
            print('Evaluating {} in {}'.format(f, p_dir))

            pred_save_dir = os.path.join(task_dir, 'aemo_all',
                                         '/'.join(p_dir.split('/')[7:]))
            ersa_utils.make_dir_if_not_exist(pred_save_dir)
            # prepare the reader
            reader = uabDataReader.ImageLabelReader(
                gtInds=[0],
                dataInds=[0],
                nChannels=3,
                parentDir=p_dir,
                chipFiles=[[f]],
                chip_size=input_size,
                tile_size=tile_size,
                batchSize=batch_size,
                block_mean=img_mean,
                overlap=model.get_overlap(),
                padding=np.array(
                    (model.get_overlap() / 2, model.get_overlap() / 2)),
                isTrain=False)
            rManager = reader.readManager

            # run the model
            pred, conf_map = model.run(pretrained_model_dir=model_dir,
                                       test_reader=rManager,
                                       tile_size=tile_size,
                                       patch_size=input_size,
                                       gpu=gpu,
                                       load_epoch_num=75,
                                       best_model=False)
            pred_name = 'pred_{}.png'.format(f.split('.')[0])
            conf_name = 'conf_{}.npy'.format(f.split('.')[0])

            ersa_utils.save_file(os.path.join(pred_save_dir, pred_name),
                                 pred.astype(np.uint8))
            ersa_utils.save_file(os.path.join(pred_save_dir, conf_name),
                                 conf_map)
예제 #15
0
# create graph
model.create_graph('X', class_num=2)

# load data
large_tile = ['{}_RGB.jpg'.format(tile_ids[tile_cnt])]
ct_tile = [
    os.path.join(gt_dir,
                 '{}_RGB.jpg'.format(tile_ids[tile_cnt].replace('_', '-')))
]
reader = uabDataReader.ImageLabelReader(gtInds=[0],
                                        dataInds=[0],
                                        nChannels=3,
                                        parentDir=data_path,
                                        chipFiles=[large_tile],
                                        chip_size=input_size,
                                        tile_size=tile_size,
                                        batchSize=batch_size,
                                        block_mean=img_mean,
                                        overlap=model.get_overlap(),
                                        padding=np.array(
                                            (model.get_overlap() / 2,
                                             model.get_overlap() / 2)),
                                        isTrain=False)
test_reader = reader.readManager
reader2 = uabDataReader.ImageLabelReader(gtInds=[0],
                                         dataInds=[0],
                                         nChannels=3,
                                         parentDir=data_path,
                                         chipFiles=[ct_tile],
                                         chip_size=input_size,
                                         tile_size=tile_size,
                                         batchSize=batch_size,
예제 #16
0
for YEAR in [15, 16, 17]:
    data_dir = r'/hdd/lbnl/SDall{}/data/Original_Tiles'.format(YEAR)
    file_list = natsorted(glob(os.path.join(data_dir, '*.tif')))
    for img_file in file_list:
        file_name = os.path.splitext(os.path.basename(img_file))[0]
        print('Evaluating {} at year 20{}...'.format(file_name, YEAR))

        # prepare the reader
        reader = uabDataReader.ImageLabelReader(
            gtInds=[0],
            dataInds=[0],
            nChannels=3,
            parentDir=data_dir,
            chipFiles=[[os.path.basename(img_file)]],
            chip_size=input_size,
            tile_size=tile_size,
            batchSize=batch_size,
            block_mean=img_mean,
            overlap=model.get_overlap(),
            padding=np.array(
                (model.get_overlap() / 2, model.get_overlap() / 2)),
            isTrain=False)
        rManager = reader.readManager

        # run the model
        pred = model.run(pretrained_model_dir=model_dir,
                         test_reader=rManager,
                         tile_size=tile_size,
                         patch_size=input_size,
                         gpu=gpu,
                         load_epoch_num=EPOCH_NUM,
# use uabCrossValMaker to get fileLists for training and validation
idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                  'force_tile')
# use first 5 tiles for validation
file_list_train = uabCrossValMaker.make_file_list_by_key(
    idx, file_list, [i for i in range(6, 37)])
file_list_valid = uabCrossValMaker.make_file_list_by_key(
    idx, file_list, [i for i in range(0, 6)])

with tf.name_scope('image_loader'):
    # GT has no mean to subtract, append a 0 for block mean
    dataReader_train = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                      patchDir,
                                                      file_list_train,
                                                      chip_size,
                                                      tile_size,
                                                      batch_size,
                                                      dataAug='flip,rotate',
                                                      block_mean=np.append(
                                                          [0], img_mean))
    # no augmentation needed for validation
    dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                      patchDir,
                                                      file_list_train,
                                                      chip_size,
                                                      tile_size,
                                                      batch_size,
                                                      dataAug=' ',
                                                      block_mean=np.append(
                                                          [0], img_mean))
예제 #18
0
        },
                                                  trainable=mode,
                                                  input_size=input_size,
                                                  batch_size=5,
                                                  start_filter_num=32)
        # create graph
        model.create_graph('X', class_num=2)
        pad = np.array((model.get_overlap() / 2, model.get_overlap() / 2))

        # prepare the reader
        reader = uabDataReader.ImageLabelReader(gtInds=[0],
                                                dataInds=[0],
                                                nChannels=3,
                                                parentDir=parent_dir,
                                                chipFiles=[file_name],
                                                chip_size=input_size,
                                                tile_size=tile_size,
                                                batchSize=batch_size,
                                                block_mean=img_mean,
                                                overlap=model.get_overlap(),
                                                padding=pad,
                                                isTrain=False)
        rManager = reader.readManager

        # run the model
        with tf.Session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)
            model.load(model_dir, sess)
            model.model_name = model_dir.split('/')[-1]
            result = model.test('X', sess, rManager)
        image_pred = uabUtilreader.un_patchify_shrink(
예제 #19
0
def main(flags):
    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    z = tf.placeholder(tf.float32, shape=[None, flags.z_dim], name='z')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_UGAN.UGAN({
        'X': X,
        'Z': z
    },
                                     trainable=mode,
                                     model_name=flags.model_name,
                                     input_size=flags.input_size,
                                     batch_size=flags.batch_size,
                                     learn_rate=flags.learning_rate,
                                     decay_step=flags.decay_step,
                                     decay_rate=flags.decay_rate,
                                     epochs=flags.epochs,
                                     start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(
        255)  # inria GT has value 0 and 255, we map it back to 0 and 1
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                     'Map GT to (0, 1)', [3],
                                                     opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [0, 1, 2, 4],  # extract all 4 channels
        cSize=flags.input_size,  # patch size as 572*572
        numPixOverlap=int(model.get_overlap() / 2),  # overlap as 92
        extSave=['jpg', 'jpg', 'jpg',
                 'png'],  # save rgb files as jpg and gt as png
        isTrain=True,
        gtInd=3,
        pad=model.get_overlap())  # pad around the tiles
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                      'force_tile')
    # use first 5 tiles for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [i for i in range(6, 37)])
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [i for i in range(0, 6)])

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader(
            [3], [0, 1, 2],
            patchDir,
            file_list_train,
            flags.input_size,
            flags.tile_size,
            flags.batch_size,
            dataAug='flip,rotate',
            block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                          patchDir,
                                                          file_list_valid,
                                                          flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size,
                                                          dataAug=' ',
                                                          block_mean=np.append(
                                                              [0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X', 'Z', flags.n_train, flags.n_valid,
                       flags.input_size, uabRepoPaths.modelPath)
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=None,
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=5,  # save the model every 5 epochs
        gpu=GPU,
        tile_size=flags.tile_size,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
예제 #20
0
def main(flags, weight_dict):
    path_to_save = os.path.join(flags.weight_dir, 'shift_dict.pkl')
    shift_dict = ersa_utils.load_file(path_to_save)

    # make network
    # define place holder
    X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X')
    Z = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='Z')
    y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_UNet.UnetModelDTDA({'X': X, 'Z': Z, 'Y': y},
                                              trainable=mode,
                                              model_name=flags.model_name,
                                              input_size=flags.input_size,
                                              batch_size=flags.batch_size,
                                              learn_rate=flags.learning_rate,
                                              decay_step=flags.decay_step,
                                              decay_rate=flags.decay_rate,
                                              epochs=flags.epochs,
                                              start_filter_num=flags.sfn)
    model.create_graph('X', 'Z', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(255)          # inria GT has value 0 and 255, we map it back to 0 and 1
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])         # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4],
                                                    cSize=flags.input_size,
                                                    numPixOverlap=int(model.get_overlap()),
                                                    extSave=['jpg', 'jpg', 'jpg', 'png'],
                                                    isTrain=True,
                                                    gtInd=3,
                                                    pad=model.get_overlap() // 2)
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile')
    # use first 5 tiles for validation
    file_list_source = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(6, 37)])
    file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(0, 6)])

    # AIOI dataset
    blCol = uab_collectionFunctions.uabCollection(CITY_LIST[flags.leave_city])

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 3],
                                                    cSize=flags.input_size,
                                                    numPixOverlap=int(model.get_overlap()),
                                                    extSave=['jpg', 'jpg', 'jpg', 'png'],
                                                    isTrain=True,
                                                    gtInd=3,
                                                    pad=model.get_overlap() // 2)
    patchDir_target = extrObj.run(blCol)
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir_target, 'fileList.txt', 'force_tile')
    file_list_target = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(5)])

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_source = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_source, flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size, dataAug='flip,rotate',
                                                          block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_target = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_target, file_list_target, flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size, dataAug='flip,rotate',
                                                          block_mean=np.append([0], img_mean))

        dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_valid, flags.input_size,
                                                           flags.tile_size,
                                                           flags.batch_size, dataAug='flip,rotate',
                                                           block_mean=np.append([0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X', 'Y', 'Z', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,
                       loss_type='xent', par_dir='domain_baseline/contorl_valid', lam=flags.lam)
    model.load_source_weights(flags.model_dir, shift_dict, gpu=flags.GPU)
    model.run(train_reader_source=dataReader_source,
              train_reader_target=dataReader_target,
              valid_reader=dataReader_valid,
              pretrained_model_dir=None,        # train from scratch, no need to load pre-trained model
              isTrain=True,
              img_mean=img_mean,
              verb_step=100,                    # print a message every 100 step(sample)
              save_epoch=5,                     # save the model every 5 epochs
              gpu=flags.GPU,
              tile_size=flags.tile_size,
              patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration/60/60))