Beispiel #1
0
def check_res50_features(model_name, GPU=0):
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = '{}'.format(GPU)
    import keras

    input_size_fit = (224, 224)

    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(255)
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                     'Map GT to (0, 1)', [3],
                                                     opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])

    if model_name == 'deeplab':
        input_size = (321, 321)
        overlap = 0
    else:
        input_size = (572, 572)
        overlap = 184
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [0, 1, 2, 4],
        cSize=input_size,
        numPixOverlap=overlap,
        extSave=['jpg', 'jpg', 'jpg', 'png'],
        isTrain=True,
        gtInd=3,
        pad=overlap // 2)
    patchDir = extrObj.run(blCol)

    file_name = os.path.join(patchDir, 'fileList.txt')
    with open(file_name, 'r') as f:
        files = f.readlines()

    res50 = keras.applications.resnet50.ResNet50(include_top=True,
                                                 weights='imagenet')
    pred_list = np.zeros(len(files))
    for file_cnt, file_line in enumerate(tqdm(files)):
        img = np.zeros((input_size[0], input_size[1], 3), dtype=np.uint8)
        for cnt, file in enumerate(file_line.strip().split(' ')[:3]):
            img[:, :, cnt] = imageio.imread(os.path.join(patchDir,
                                                         file)) - img_mean[cnt]

        img = np.expand_dims(crop_center(img, input_size_fit[0],
                                         input_size_fit[1]),
                             axis=0)

        fc1000 = res50.predict(img).reshape((-1, )).tolist()
        pred_list[file_cnt] = np.argmax(fc1000)
    return pred_list
Beispiel #2
0
def make_res50_features(model_name, task_dir, GPU=0, force_run=False):
    tf.reset_default_graph()
    feature_file_name = os.path.join(task_dir, 'res50_atlanta_{}.csv'.format(model_name))
    patch_file_name = os.path.join(task_dir, 'res50_atlanta_{}.txt'.format(model_name))

    if model_name == 'deeplab':
        input_size = (321, 321)
        overlap = 0
    else:
        input_size = (572, 572)
        overlap = 184
    blCol = uab_collectionFunctions.uabCollection('atlanta')
    img_mean = blCol.getChannelMeans([0, 1, 2])
    extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 3],
                                                    cSize=input_size,
                                                    numPixOverlap=overlap,
                                                    extSave=['jpg', 'jpg', 'jpg', 'png'],
                                                    isTrain=True,
                                                    gtInd=3,
                                                    pad=overlap // 2)
    patchDir = extrObj.run(blCol)

    if not os.path.exists(feature_file_name) or not os.path.exists(patch_file_name) or force_run:
        os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
        os.environ['CUDA_VISIBLE_DEVICES'] = '{}'.format(GPU)
        import keras

        input_size_fit = (224, 224)

        file_name = os.path.join(patchDir, 'fileList.txt')
        with open(file_name, 'r') as f:
            files = f.readlines()

        res50 = keras.applications.resnet50.ResNet50(include_top=True, weights='imagenet')
        fc2048 = keras.models.Model(inputs=res50.input, outputs=res50.get_layer('flatten_1').output)
        with open(feature_file_name, 'w+') as f:
            with open(patch_file_name, 'w+') as f2:
                for file_line in tqdm(files):
                    patch_name = file_line.split('.')[0][:-5]
                    img = np.zeros((input_size[0], input_size[1], 3), dtype=np.uint8)
                    for cnt, file in enumerate(file_line.strip().split(' ')[:3]):
                        img[:, :, cnt] = imageio.imread(os.path.join(patchDir, file)) - img_mean[cnt]

                    img = np.expand_dims(crop_center(img, input_size_fit[0], input_size_fit[1]), axis=0)

                    fc1000 = fc2048.predict(img).reshape((-1,)).tolist()
                    writer = csv.writer(f, lineterminator='\n')
                    writer.writerow(['{}'.format(x) for x in fc1000])
                    f2.write('{}\n'.format(patch_name))

    return feature_file_name, patch_file_name, input_size[0], patchDir
Beispiel #3
0
def main(flags):
    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    z = tf.placeholder(tf.float32, shape=[None, flags.z_dim], name='z')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_UGAN.UGAN({
        'X': X,
        'Z': z
    },
                                     trainable=mode,
                                     model_name=flags.model_name,
                                     input_size=flags.input_size,
                                     batch_size=flags.batch_size,
                                     learn_rate=flags.learning_rate,
                                     decay_step=flags.decay_step,
                                     decay_rate=flags.decay_rate,
                                     epochs=flags.epochs,
                                     start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(
        255)  # inria GT has value 0 and 255, we map it back to 0 and 1
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                     'Map GT to (0, 1)', [3],
                                                     opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [0, 1, 2, 4],  # extract all 4 channels
        cSize=flags.input_size,  # patch size as 572*572
        numPixOverlap=int(model.get_overlap() / 2),  # overlap as 92
        extSave=['jpg', 'jpg', 'jpg',
                 'png'],  # save rgb files as jpg and gt as png
        isTrain=True,
        gtInd=3,
        pad=model.get_overlap())  # pad around the tiles
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                      'force_tile')
    # use first 5 tiles for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [i for i in range(6, 37)])
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [i for i in range(0, 6)])

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader(
            [3], [0, 1, 2],
            patchDir,
            file_list_train,
            flags.input_size,
            flags.tile_size,
            flags.batch_size,
            dataAug='flip,rotate',
            block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                          patchDir,
                                                          file_list_valid,
                                                          flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size,
                                                          dataAug=' ',
                                                          block_mean=np.append(
                                                              [0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X', 'Z', flags.n_train, flags.n_valid,
                       flags.input_size, uabRepoPaths.modelPath)
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=None,
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=5,  # save the model every 5 epochs
        gpu=GPU,
        tile_size=flags.tile_size,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
Beispiel #4
0
                          best_model=best_model)
                self.model_name = pretrained_model_dir.split('/')[-1]
                result = self.test('X', sess, test_reader)
            image_pred = uabUtilreader.un_patchify_shrink(
                result, [tile_size[0] + pad, tile_size[1] + pad],
                tile_size,
                patch_size, [patch_size[0] - pad, patch_size[1] - pad],
                overlap=pad)
            ersa_utils.save_file(
                os.path.join(SAVE_DIR, '{}.png'.format(tile_name)),
                (image_pred[:, :, 1] * 255).astype(np.uint8))
            return util_functions.get_pred_labels(image_pred) * truth_val


# settings
blCol = uab_collectionFunctions.uabCollection(ds_name)
blCol.readMetadata()
file_list, parent_dir = blCol.getAllTileByDirAndExt([1, 2, 3, 4])
file_list_truth, parent_dir_truth = blCol.getAllTileByDirAndExt(0)
idx, file_list = uabCrossValMaker.uabUtilGetFolds(None, file_list,
                                                  'force_tile')
idx_truth, file_list_truth = uabCrossValMaker.uabUtilGetFolds(
    None, file_list_truth, 'force_tile')
# use first 5 tiles for validation
file_list_valid = uabCrossValMaker.make_file_list_by_key(
    idx, file_list, [1, 2, 3])
file_list_valid_truth = uabCrossValMaker.make_file_list_by_key(
    idx_truth, file_list_truth, [1, 2, 3])
img_mean = blCol.getChannelMeans([2, 3, 4])
img_mean = np.concatenate([np.array([0]), img_mean])
Beispiel #5
0
def main(flags):
    city_list = ['austin', 'chicago', 'kitsap', 'tyrol-w', 'vienna']
    flags.llh_file_dir = flags.llh_file_dir.format(flags.finetune_city)
    weight = np.load(flags.llh_file_dir)

    # make network
    # define place holder
    X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X')
    y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_UNet.UnetModelCrop({'X': X, 'Y': y},
                                              trainable=mode,
                                              model_name=flags.model_name,
                                              input_size=flags.input_size,
                                              batch_size=flags.batch_size,
                                              learn_rate=flags.learning_rate,
                                              decay_step=flags.decay_step,
                                              decay_rate=flags.decay_rate,
                                              epochs=flags.epochs,
                                              start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(255)          # inria GT has value 0 and 255, we map it back to 0 and 1
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])         # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4], # extract all 4 channels
                                                    cSize=flags.input_size, # patch size as 572*572
                                                    numPixOverlap=int(model.get_overlap()/2),  # overlap as 92
                                                    extSave=['jpg', 'jpg', 'jpg', 'png'], # save rgb files as jpg and gt as png
                                                    isTrain=True,
                                                    gtInd=3,
                                                    pad=model.get_overlap()) # pad around the tiles
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx_city, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'city')
    idx_tile, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile')
    idx = [j * 10 + i for i, j in zip(idx_city, idx_tile)]
    # use first city for validation
    filter_train = []
    filter_valid = []
    for i in range(5):
        for j in range(1, 37):
            if i != flags.finetune_city and j > 5:
                filter_train.append(j * 10 + i)
            elif i == flags.finetune_city and j <= 5:
                filter_valid.append(j * 10 + i)
    # use first city for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(idx, file_list, filter_train)
    file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, filter_valid)

    dataReader_train = uabDataReader.ImageLabelReaderPatchSampleControl(
        [3], [0, 1, 2], patchDir, file_list_train, flags.input_size, flags.batch_size,
        weight, dataAug='flip,rotate', block_mean=np.append([0], img_mean))
    # no augmentation needed for validation
    dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_valid, flags.input_size,
                                                      flags.batch_size, dataAug=' ',
                                                      block_mean=np.append([0], img_mean), batch_code=0)

    # train
    start_time = time.time()
    model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,
                       loss_type='xent', par_dir='Inria_Domain_Selection')
    model.run(train_reader=dataReader_train,
              valid_reader=dataReader_valid,
              pretrained_model_dir=flags.pred_model_dir.format(flags.finetune_city),
              isTrain=True,
              img_mean=img_mean,
              verb_step=100,  # print a message every 100 step(sample)
              save_epoch=5,  # save the model every 5 epochs
              gpu=GPU,
              tile_size=flags.tile_size,
              patch_size=flags.input_size
              )

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration/60/60))
def main(flags):
    # make network
    # define place holder
    X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X')
    y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_DeepLabV2.DeeplabV3({'X':X, 'Y':y},
                                               trainable=mode,
                                               model_name=flags.model_name,
                                               input_size=flags.input_size,
                                               batch_size=flags.batch_size,
                                               learn_rate=flags.learning_rate,
                                               decay_step=flags.decay_step,
                                               decay_rate=flags.decay_rate,
                                               epochs=flags.epochs,
                                               start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('spca')
    blCol.readMetadata()
    img_mean = blCol.getChannelMeans([1, 2, 3])         # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtrRand([0, 1, 2, 3],  # extract all 4 channels
                                                        cSize=flags.input_size,  # patch size as 572*572
                                                        numPerTile=256,  # overlap as 92
                                                        extSave=['png', 'jpg', 'jpg', 'jpg'],
                                                        # save rgb files as jpg and gt as png
                                                        isTrain=True,
                                                        gtInd=0,
                                                        pad=model.get_overlap(),
                                                        name='Rand{}'.format(flags.run_id))  # pad around the tiles
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile')
    # use first 5 tiles for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(0, 250)])
    file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(250, 500)])

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_train, flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size, dataAug='flip,rotate',
                                                          block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_valid, flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size, dataAug=' ', block_mean=np.append([0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,
                       loss_type='xent')
    model.run(train_reader=dataReader_train,
              valid_reader=dataReader_valid,
              pretrained_model_dir=flags.res_dir,
              isTrain=True,
              img_mean=img_mean,
              verb_step=100,                    # print a message every 100 step(sample)
              save_epoch=5,                     # save the model every 5 epochs
              gpu=GPU,
              tile_size=flags.tile_size,
              patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration/60/60))
def main(flags):
    np.random.seed(int(flags.run_id))
    tf.set_random_seed(int(flags.run_id))

    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = myUnetModelCrop({
        'X': X,
        'Y': y
    },
                            trainable=mode,
                            model_name=flags.model_name,
                            input_size=flags.input_size,
                            batch_size=flags.batch_size,
                            learn_rate=flags.learning_rate,
                            decay_step=flags.decay_step,
                            decay_rate=flags.decay_rate,
                            epochs=flags.epochs,
                            start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection(flags.ds_name)
    blCol.readMetadata()
    img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info
    print(img_mean)

    img_dir, task_dir = sis_utils.get_task_img_folder()
    save_dir = os.path.join(task_dir, 'bihar_patches')
    ersa_utils.make_dir_if_not_exist(save_dir)
    files, par_dir = blCol.getAllTileByDirAndExt([0, 1, 2, 3])
    resize_patches(files, par_dir, flags.input_size, save_dir)

    patchDir = save_dir

    # make data reader
    # use first 5 tiles for validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                      'tile')
    # use first city for validation
    #assert len(file_list) == flags.n_train + flags.n_valid
    file_list_train = [a for a in file_list[:45]]
    file_list_valid = [a for a in file_list[-5:]]

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader(
            [3], [0, 1, 2],
            patchDir,
            file_list_train,
            flags.input_size,
            None,
            flags.batch_size,
            dataAug='flip,rotate',
            block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                          patchDir,
                                                          file_list_valid,
                                                          flags.input_size,
                                                          None,
                                                          flags.batch_size,
                                                          dataAug=' ',
                                                          block_mean=np.append(
                                                              [0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X',
                       'Y',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent',
                       par_dir='{}'.format(flags.ds_name))
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=
        None,  # train from scratch, no need to load pre-trained model
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=200,  # save the model every 5 epochs
        gpu=GPU,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
    def train(self,
              x_name,
              y_name,
              y_name_2,
              n_train,
              sess,
              summary_writer,
              n_valid=1000,
              train_reader=None,
              train_reader_building=None,
              valid_reader=None,
              image_summary=None,
              verb_step=100,
              save_epoch=5,
              img_mean=np.array((0, 0, 0), dtype=np.float32),
              continue_dir=None,
              valid_iou=False):
        # define summary operations
        valid_cross_entropy_summary_op = tf.summary.scalar(
            'xent_validation', self.valid_cross_entropy)
        valid_iou_summary_op = tf.summary.scalar('iou_validation',
                                                 self.valid_iou)
        valid_image_summary_op = tf.summary.image('Validation_images_summary',
                                                  self.valid_images,
                                                  max_outputs=10)

        if continue_dir is not None and os.path.exists(continue_dir):
            self.load(continue_dir, sess)
            gs = sess.run(self.global_step)
            start_epoch = int(np.ceil(gs / n_train * self.bs))
            start_step = gs - int(start_epoch * n_train / self.bs)
        else:
            start_epoch = 0
            start_step = 0

        cross_entropy_valid_min = np.inf
        iou_valid_max = 0
        for epoch in range(start_epoch, self.epochs):
            start_time = time.time()
            for step in range(start_step, n_train, self.bs):
                X_batch, y_batch = train_reader.readerAction(sess)
                _, self.global_step_value = sess.run(
                    [self.optimizer[0], self.global_step],
                    feed_dict={
                        self.inputs[x_name]: X_batch,
                        self.inputs[y_name]: y_batch,
                        self.trainable: True
                    })
                X_batch, _, building_truth = train_reader_building.readerAction(
                    sess)
                _, self.global_step_value = sess.run(
                    [self.optimizer[1], self.global_step],
                    feed_dict={
                        self.inputs[x_name]: X_batch,
                        self.inputs[y_name_2]: building_truth,
                        self.trainable: True
                    })
                if self.global_step_value % verb_step == 0:
                    pred_train, step_cross_entropy, step_summary = sess.run(
                        [self.pred, self.loss, self.summary],
                        feed_dict={
                            self.inputs[x_name]: X_batch,
                            self.inputs[y_name]: y_batch,
                            self.inputs[y_name_2]: building_truth,
                            self.trainable: False
                        })
                    summary_writer.add_summary(step_summary,
                                               self.global_step_value)
                    print(
                        'Epoch {:d} step {:d}\tcross entropy = {:.3f}'.format(
                            epoch, self.global_step_value, step_cross_entropy))
            # validation
            cross_entropy_valid_mean = []
            iou_valid_mean = np.zeros(2)
            for step in range(0, n_valid, self.bs):
                X_batch_val, y_batch_val = valid_reader.readerAction(sess)
                pred_valid, cross_entropy_valid, iou_valid = sess.run(
                    [self.pred, self.loss, self.loss_iou],
                    feed_dict={
                        self.inputs[x_name]: X_batch_val,
                        self.inputs[y_name]: y_batch_val,
                        self.trainable: False
                    })
                cross_entropy_valid_mean.append(cross_entropy_valid)
                iou_valid_mean += iou_valid
            cross_entropy_valid_mean = np.mean(cross_entropy_valid_mean)
            iou_valid_mean = iou_valid_mean[0] / iou_valid_mean[1]
            duration = time.time() - start_time
            if valid_iou:
                print('Validation IoU: {:.3f}, duration: {:.3f}'.format(
                    iou_valid_mean, duration))
            else:
                print('Validation cross entropy: {:.3f}, duration: {:.3f}'.
                      format(cross_entropy_valid_mean, duration))
            valid_cross_entropy_summary = sess.run(
                valid_cross_entropy_summary_op,
                feed_dict={self.valid_cross_entropy: cross_entropy_valid_mean})
            valid_iou_summary = sess.run(
                valid_iou_summary_op,
                feed_dict={self.valid_iou: iou_valid_mean})
            summary_writer.add_summary(valid_cross_entropy_summary,
                                       self.global_step_value)
            summary_writer.add_summary(valid_iou_summary,
                                       self.global_step_value)
            if valid_iou:
                if iou_valid_mean > iou_valid_max:
                    iou_valid_max = iou_valid_mean
                    saver = tf.train.Saver(var_list=tf.global_variables(),
                                           max_to_keep=1)
                    saver.save(sess, '{}/best_model.ckpt'.format(self.ckdir))

            else:
                if cross_entropy_valid_mean < cross_entropy_valid_min:
                    cross_entropy_valid_min = cross_entropy_valid_mean
                    saver = tf.train.Saver(var_list=tf.global_variables(),
                                           max_to_keep=1)
                    saver.save(sess, '{}/best_model.ckpt'.format(self.ckdir))

            if image_summary is not None:
                valid_image_summary = sess.run(
                    valid_image_summary_op,
                    feed_dict={
                        self.valid_images:
                        image_summary(X_batch_val[:, :, :, :3], y_batch_val,
                                      pred_valid, img_mean)
                    })
                summary_writer.add_summary(valid_image_summary,
                                           self.global_step_value)

            if epoch % save_epoch == 0:
                saver = tf.train.Saver(var_list=tf.global_variables(),
                                       max_to_keep=1)
                saver.save(sess,
                           '{}/model_{}.ckpt'.format(self.ckdir, epoch),
                           global_step=self.global_step)

                # remake gts
                blCol = uab_collectionFunctions.uabCollection('inria')
                blCol.readMetadata()
                file_list, parent_dir = blCol.getAllTileByDirAndExt([0, 1, 2])
                file_list_truth, parent_dir_truth = blCol.getAllTileByDirAndExt(
                    4)
                idx, file_list = uabCrossValMaker.uabUtilGetFolds(
                    None, file_list, 'force_tile')
                idx_truth, file_list_truth = uabCrossValMaker.uabUtilGetFolds(
                    None, file_list_truth, 'force_tile')

                # use first 5 tiles for validation
                city_list = [
                    'austin', 'chicago', 'kitsap', 'tyrol-w', 'vienna'
                ]
                file_list_valid = uabCrossValMaker.make_file_list_by_key(
                    idx,
                    file_list, [i for i in range(0, 6)],
                    filter_list=[
                        'bellingham', 'bloomington', 'sfo', 'tyrol-e',
                        'innsbruck'
                    ] +
                    [a for a in city_list if a != city_list[flags.leave_city]])
                file_list_valid_truth = uabCrossValMaker.make_file_list_by_key(
                    idx_truth,
                    file_list_truth, [i for i in range(0, 6)],
                    filter_list=[
                        'bellingham', 'bloomington', 'sfo', 'tyrol-e',
                        'innsbruck'
                    ] +
                    [a for a in city_list if a != city_list[flags.leave_city]])
                img_mean = blCol.getChannelMeans([0, 1, 2])

                self.evaluate(file_list_valid,
                              file_list_valid_truth,
                              parent_dir,
                              parent_dir_truth, (572, 572), (5000, 5000),
                              50,
                              img_mean,
                              self.ckdir,
                              flags.GPU,
                              save_result_parent_dir='domain_selection',
                              ds_name='inria_{}'.format(epoch),
                              best_model=False)
                result_dir = os.path.join(uabRepoPaths.evalPath,
                                          self.model_name)
                make_gt(result_dir, flags.pred_file_dir, 'iter')
def main(flags):
    np.random.seed(int(flags.run_id))
    tf.set_random_seed(int(flags.run_id))

    if flags.start_layer >= 10:
        pass
    else:
        flags.model_name += '_up{}'.format(flags.start_layer)

    # make network
    # define place holder
    X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X')
    y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_UNet.UnetModelCrop({'X': X, 'Y': y},
                                              trainable=mode,
                                              model_name=flags.model_name,
                                              input_size=flags.input_size,
                                              batch_size=flags.batch_size,
                                              learn_rate=flags.learning_rate,
                                              decay_step=flags.decay_step,
                                              decay_rate=flags.decay_rate,
                                              epochs=flags.epochs,
                                              start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection(flags.ds_name)
    blCol.readMetadata()
    img_mean = blCol.getChannelMeans([1, 2, 3])  # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 3],
                                                    cSize=flags.input_size,
                                                    numPixOverlap=int(model.get_overlap()),
                                                    extSave=['png', 'jpg', 'jpg', 'jpg'],
                                                    isTrain=True,
                                                    gtInd=0,
                                                    pad=int(model.get_overlap()//2))
    patchDir = extrObj.run(blCol)

    # make data reader
    # use first 5 tiles for validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'tile')
    file_list_train = uabCrossValMaker.make_file_list_by_key(idx, file_list, [0, 1, 2, 3])
    file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, [4, 5])
    file_list_train = file_list_train[-int(len(file_list_train)*flags.portion):]

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_train, flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size, dataAug='flip,rotate',
                                                          block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([0], [1, 2, 3], patchDir, file_list_valid, flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size, dataAug=' ', block_mean=np.append([0], img_mean))

    # train
    start_time = time.time()

    if flags.start_layer >= 10:
        model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,
                           loss_type='xent', par_dir='aemo/{}'.format(flags.ds_name))
    else:
        model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,
                           loss_type='xent', par_dir='aemo/{}'.format(flags.ds_name),
                           train_var_filter=['layerup{}'.format(i) for i in range(flags.start_layer, 10)])
    model.run(train_reader=dataReader_train,
              valid_reader=dataReader_valid,
              pretrained_model_dir=flags.model_dir,   # train from scratch, no need to load pre-trained model
              isTrain=True,
              img_mean=img_mean,
              verb_step=100,                        # print a message every 100 step(sample)
              save_epoch=5,                         # save the model every 5 epochs
              gpu=GPU,
              tile_size=flags.tile_size,
              patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration/60/60))
Beispiel #10
0
def compute_missing_percentage(rgb):
    stack = np.sum(rgb, axis=2)

    def white_pixel(a):
        if a == 255 * 3:
            return 1
        else:
            return 0

    map_func = np.vectorize(white_pixel)
    mpixel_map = map_func(stack)
    return np.sum(mpixel_map) / (mpixel_map.shape[0]**2), 1 - mpixel_map


blCol = uab_collectionFunctions.uabCollection('road')
opDetObj = bPreproc.uabOperTileDivide(
    255)  # inria GT has value 0 and 255, we map it back to 0 and 1
# [0] is the channel id of GT
rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                 'Map GT to (0, 1)', [0],
                                                 opDetObj)
rescObj.run(blCol)
blCol.readMetadata()
img_mean = blCol.getChannelMeans([1, 2, 3])  # get mean of rgb info

# extract patches
extrObj = uab_DataHandlerFunctions.uabPatchExtr(
    [1, 2, 3, 4],  # extract all 4 channels
    cSize=(572, 572),  # patch size as 572*572
    numPixOverlap=46,  # half overlap for this
Beispiel #11
0
def main(flags):
    # ------------------------------------------Network---------------------------------------------#
    # make network
    # define place holder
    X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X')
    y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_UnetMTL.UnetModelMTL({'X':X, 'Y':y},
                                                trainable=mode,
                                                model_name=flags.model_name,
                                                input_size=flags.input_size,
                                                batch_size=flags.batch_size,
                                                learn_rate=flags.learning_rate,
                                                decay_step=flags.decay_step,
                                                decay_rate=flags.decay_rate,
                                                epochs=flags.epochs,
                                                start_filter_num=flags.sfn,
                                                source_num=flags.s_num,
                                                source_name=flags.s_name,
                                                source_control=flags.s_control)
    model.create_graph('X', class_num=flags.num_classes, start_filter_num=flags.sfn)

    # ------------------------------------------Dataset Inria---------------------------------------------#
    # create collection for inria
    blCol_inria = uab_collectionFunctions.uabCollection('inria')
    opDetObj_inria = bPreproc.uabOperTileDivide(255)
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj_inria)
    rescObj.run(blCol_inria)
    img_mean_inria = blCol_inria.getChannelMeans([0, 1, 2])

    # extract patches
    extrObj_inria = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4],
                                                          cSize=flags.input_size,
                                                          numPixOverlap=int(model.get_overlap()),
                                                          extSave=['jpg', 'jpg', 'jpg', 'png'],
                                                          isTrain=True,
                                                          gtInd=3,
                                                          pad=int(model.get_overlap() / 2))
    patchDir_inria = extrObj_inria.run(blCol_inria)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx_inria, file_list_inria = uabCrossValMaker.uabUtilGetFolds(patchDir_inria, 'fileList.txt', 'force_tile')
    # use first 5 tiles for validation
    file_list_train_inria = uabCrossValMaker.make_file_list_by_key(idx_inria, file_list_inria,
                                                                   [i for i in range(20, 136)])
    file_list_valid_inria = uabCrossValMaker.make_file_list_by_key(idx_inria, file_list_inria,
                                                                   [i for i in range(0, 20)])

    with tf.name_scope('image_loader_inria'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train_inria = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_inria, file_list_train_inria,
                                                                flags.input_size, flags.tile_size, flags.batch_size,
                                                                dataAug='flip,rotate',
                                                                block_mean=np.append([0], img_mean_inria))
        # no augmentation needed for validation
        dataReader_valid_inria = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_inria, file_list_valid_inria,
                                                                flags.input_size, flags.tile_size,
                                                                flags.batch_size, dataAug=' ',
                                                                block_mean=np.append([0], img_mean_inria))

    # ------------------------------------------Dataset Road---------------------------------------------#
    # create collection for road
    blCol_road = uab_collectionFunctions.uabCollection('road_5000')
    opDetObj_road = bPreproc.uabOperTileDivide(255)
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj_road)
    rescObj.run(blCol_road)
    img_mean_road = blCol_road.getChannelMeans([0, 1, 2])

    # extract patches
    extrObj_road = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4],
                                                         cSize=flags.input_size,
                                                         numPixOverlap=int(model.get_overlap()),
                                                         extSave=['jpg', 'jpg', 'jpg', 'png'],
                                                         isTrain=True,
                                                         gtInd=3,
                                                         pad=int(model.get_overlap() / 2))
    patchDir_road = extrObj_road.run(blCol_road)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx_road, file_list_road = uabCrossValMaker.uabUtilGetFolds(patchDir_road, 'fileList.txt', 'city')
    # use first 5 tiles for validation
    file_list_train_road = uabCrossValMaker.make_file_list_by_key(idx_road, file_list_road, [1])
    file_list_valid_road = uabCrossValMaker.make_file_list_by_key(idx_road, file_list_road, [0, 2])

    with tf.name_scope('image_loader_road'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train_road = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_road, file_list_train_road,
                                                                flags.input_size, flags.tile_size, flags.batch_size,
                                                                dataAug='flip,rotate',
                                                                block_mean=np.append([0], img_mean_road))
        # no augmentation needed for validation
        dataReader_valid_road = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_road, file_list_valid_road,
                                                                flags.input_size, flags.tile_size,
                                                                flags.batch_size, dataAug=' ',
                                                                block_mean=np.append([0], img_mean_road))

    # ------------------------------------------Train---------------------------------------------#
    start_time = time.time()

    model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,
                       loss_type='xent')
    model.run(train_reader=[dataReader_train_inria, dataReader_train_road],
              valid_reader=[dataReader_valid_inria, dataReader_valid_road],
              pretrained_model_dir=None,        # train from scratch, no need to load pre-trained model
              isTrain=True,
              img_mean=[img_mean_inria, img_mean_road],
              verb_step=100,                    # print a message every 100 step(sample)
              save_epoch=5,                     # save the model every 5 epochs
              gpu=GPU,
              tile_size=flags.tile_size,
              patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration/60/60))
Beispiel #12
0
import os
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import keras
import numpy as np
import sis_utils
import uabRepoPaths
import uabCrossValMaker
import uab_collectionFunctions
import uab_DataHandlerFunctions
import building_data_reader


if __name__ == '__main__':
    # train on um
    blCol_um = uab_collectionFunctions.uabCollection('um')
    img_mean_um = blCol_um.getChannelMeans([0, 1, 2])
    blCol_um.readMetadata()
    # valid on inria
    blCol_inria = uab_collectionFunctions.uabCollection('inria')
    img_mean_inria = blCol_inria.getChannelMeans([0, 1, 2])
    blCol_inria.readMetadata()

    model_name = 'res50'
    batch_size = 25
    class_num = 2
    n_train = 300
    n_valid = 100
    epoch = 150
    learn_rate = 1e-5
    prescr_name = 'res50'
Beispiel #13
0
mode = tf.placeholder(tf.bool, name='mode')
model = uabMakeNetwork_DeepLabV2.DeeplabV3({
    'X': X,
    'Y': y
},
                                           trainable=mode,
                                           input_size=input_size,
                                           batch_size=batch_size)
# create graph, same as training
model.create_graph('X', class_num=2)

print('-' * 10 + 'Begin to predict testing set' + '-' * 10)
# ------------------------------------------Dataset Inria Training Set---------------------------------------------#
# make collections
# same as what to do in training
blCol = uab_collectionFunctions.uabCollection('fullInriaTest')
blCol.readMetadata()
file_list, parent_dir = blCol.getAllTileByDirAndExt([0, 1, 2])
file_list_truth, parent_dir_truth = blCol.getAllTileByDirAndExt(4)
idx, file_list = uabCrossValMaker.uabUtilGetFolds(None, file_list,
                                                  'force_tile')
idx_truth, file_list_truth = uabCrossValMaker.uabUtilGetFolds(
    None, file_list_truth, 'force_tile')
# use first 5 tiles for validation
file_list_valid = uabCrossValMaker.make_file_list_by_key(
    idx,
    file_list, [i for i in range(0, 5000)],
    filter_list=['bellingham', 'bloomington', 'sfo', 'tyrol-e', 'innsbruck'])
file_list_valid_truth = uabCrossValMaker.make_file_list_by_key(
    idx_truth,
    file_list_truth, [i for i in range(0, 5000)],
Beispiel #14
0
    batch_size=batch_size,  # mini-batch size
    learn_rate=learn_rate,  # learning rate
    decay_step=decay_step,  # learn rate decay after 60 epochs
    decay_rate=decay_rate,  # learn rate decay to 0.1*before
    epochs=epochs,
    source_num=source_num,
    start_filter_num=start_filter_num)  # number of filters at the first layer
model.create_graph('X',
                   class_num=class_num)  # TensorFlow will now draw the graph

####Inira 1 ########
# create collection
# the original file is in /ei-edl01/data/uab_datasets/inria
# create collection
# the original file is in /ei-edl01/data/uab_datasets/inria
blCol = uab_collectionFunctions.uabCollection('inriaNew2')
opDetObj = bPreproc.uabOperTileDivide(
    255)  # inria GT has value 0 and 255, we map it back to 0 and 1
# [3] is the channel id of GT
rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                 'Map GT to (0, 1)', [3],
                                                 opDetObj)
rescObj.run(blCol)
img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info
print(blCol.readMetadata()
      )  # now inria collection has 4 channels, the last one is GT with (0,1)

# extract patches
extrObj = uab_DataHandlerFunctions.\
    uabPatchExtr([0, 1, 2, 4],                              # extract all 4 channels
                 cSize=chip_size,                           # patch size as 572*572
def eval_tiles():
    blCol = uab_collectionFunctions.uabCollection(ds_name)
    blCol.readMetadata()
    file_list, parent_dir = blCol.getAllTileByDirAndExt([1, 2, 3])
    file_list_truth, parent_dir_truth = blCol.getAllTileByDirAndExt(0)
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(None, file_list, 'tile')
    idx_truth, file_list_truth = uabCrossValMaker.uabUtilGetFolds(
        None, file_list_truth, 'tile')
    # use first 5 tiles for validation
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [0, 1, 2, 3, 4, 5])
    file_list_valid_truth = uabCrossValMaker.make_file_list_by_key(
        idx_truth, file_list_truth, [0, 1, 2, 3, 4, 5])
    img_mean = blCol.getChannelMeans([1, 2, 3])

    # make the model
    # define place holder
    X = tf.placeholder(tf.float32,
                       shape=[None, input_size[0], input_size[1], 3],
                       name='X')
    y = tf.placeholder(tf.int32,
                       shape=[None, input_size[0], input_size[1], 1],
                       name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = UnetModelCrop({
        'X': X,
        'Y': y
    },
                          trainable=mode,
                          input_size=input_size,
                          batch_size=batch_size,
                          start_filter_num=32)
    # create graph
    model.create_graph('X', class_num=2)

    # evaluate on each sub folder
    root_dir = r'/home/lab/Documents/bohao/data/aemo_all/align/0584270470{}0_01'
    for fl, p_dir in get_file_list(root_dir):
        for f in fl:
            print('Evaluating {} in {}'.format(f, p_dir))

            pred_save_dir = os.path.join(task_dir, 'aemo_all',
                                         '/'.join(p_dir.split('/')[7:]))
            ersa_utils.make_dir_if_not_exist(pred_save_dir)
            # prepare the reader
            reader = uabDataReader.ImageLabelReader(
                gtInds=[0],
                dataInds=[0],
                nChannels=3,
                parentDir=p_dir,
                chipFiles=[[f]],
                chip_size=input_size,
                tile_size=tile_size,
                batchSize=batch_size,
                block_mean=img_mean,
                overlap=model.get_overlap(),
                padding=np.array(
                    (model.get_overlap() / 2, model.get_overlap() / 2)),
                isTrain=False)
            rManager = reader.readManager

            # run the model
            pred, conf_map = model.run(pretrained_model_dir=model_dir,
                                       test_reader=rManager,
                                       tile_size=tile_size,
                                       patch_size=input_size,
                                       gpu=gpu,
                                       load_epoch_num=75,
                                       best_model=False)
            pred_name = 'pred_{}.png'.format(f.split('.')[0])
            conf_name = 'conf_{}.npy'.format(f.split('.')[0])

            ersa_utils.save_file(os.path.join(pred_save_dir, pred_name),
                                 pred.astype(np.uint8))
            ersa_utils.save_file(os.path.join(pred_save_dir, conf_name),
                                 conf_map)
Beispiel #16
0
def main(flags):
    np.random.seed(int(flags.run_id))
    tf.set_random_seed(int(flags.run_id))

    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = UnetModelCrop({
        'X': X,
        'Y': y
    },
                          trainable=mode,
                          model_name=flags.model_name,
                          input_size=flags.input_size,
                          batch_size=flags.batch_size,
                          learn_rate=flags.learning_rate,
                          decay_step=flags.decay_step,
                          decay_rate=flags.decay_rate,
                          epochs=flags.epochs,
                          start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection(flags.ds_name)
    blCol.readMetadata()
    img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [0, 1, 2, 3],
        cSize=flags.input_size,
        numPixOverlap=int(model.get_overlap()),
        extSave=['jpg', 'jpg', 'jpg', 'png'],
        isTrain=True,
        gtInd=3,
        pad=int(model.get_overlap() // 2))
    patchDir = extrObj.run(blCol)

    # make data reader
    # use first 5 tiles for validation
    idx_city, file_list = uabCrossValMaker.uabUtilGetFolds(
        patchDir, 'fileList.txt', 'city')
    idx_tile, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                   'force_tile')
    idx = [j * 10 + i for i, j in zip(idx_city, idx_tile)]

    # use first city for validation
    filter_train = []
    filter_valid = []
    for i in range(4):
        for j in range(1, 20):
            if i == flags.leave_city and j < 4:
                filter_valid.append(j * 10 + i)
            elif i == flags.leave_city and j >= 4:
                filter_train.append(j * 10 + i)
    # use first city for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, filter_train)
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, filter_valid)

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader(
            [3], [0, 1, 2],
            patchDir,
            file_list_train,
            flags.input_size,
            None,
            flags.batch_size,
            dataAug='flip,rotate',
            block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                          patchDir,
                                                          file_list_valid,
                                                          flags.input_size,
                                                          None,
                                                          flags.batch_size,
                                                          dataAug=' ',
                                                          block_mean=np.append(
                                                              [0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X',
                       'Y',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent',
                       par_dir='{}'.format(flags.ds_name),
                       pos_weight=flags.pos_weight)
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=
        None,  # train from scratch, no need to load pre-trained model
        isTrain=True,
        img_mean=img_mean[1:],
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=5,  # save the model every 5 epochs
        gpu=GPU,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
def main(flags):
    copyfile(
        os.path.join(
            flags.pred_file_dir,
            '1iter_pred_building_binary_{}.npy'.format(flags.leave_city)),
        os.path.join(
            flags.pred_file_dir,
            'iter_pred_building_binary_{}.npy'.format(flags.leave_city)))
    flags.pred_file_dir = os.path.join(
        flags.pred_file_dir,
        'iter_pred_building_binary_{}.npy'.format(flags.leave_city))

    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    y2 = tf.placeholder(tf.float32, shape=[None, 1], name='y2')
    mode = tf.placeholder(tf.bool, name='mode')
    model = UnetModelCrop_Iter({
        'X': X,
        'Y': y,
        'Y2': y2
    },
                               trainable=mode,
                               model_name=flags.model_name,
                               input_size=flags.input_size,
                               batch_size=flags.batch_size,
                               learn_rate=flags.learning_rate,
                               decay_step=flags.decay_step,
                               decay_rate=flags.decay_rate,
                               epochs=flags.epochs,
                               start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(
        255)  # inria GT has value 0 and 255, we map it back to 0 and 1
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                     'Map GT to (0, 1)', [3],
                                                     opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [0, 1, 2, 4],  # extract all 4 channels
        cSize=flags.input_size,  # patch size as 572*572
        numPixOverlap=int(model.get_overlap() / 2),  # overlap as 92
        extSave=['jpg', 'jpg', 'jpg',
                 'png'],  # save rgb files as jpg and gt as png
        isTrain=True,
        gtInd=3,
        pad=model.get_overlap())  # pad around the tiles
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                      'city')
    # use first city for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [i for i in range(5) if i != flags.leave_city])
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [flags.leave_city])

    dataReader_train = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                      patchDir,
                                                      file_list_train,
                                                      flags.input_size,
                                                      flags.batch_size,
                                                      dataAug='flip,rotate',
                                                      block_mean=np.append(
                                                          [0], img_mean),
                                                      batch_code=0)
    dataReader_train_building = uabDataReader.ImageLabelReaderBuildingCustom(
        [3], [0, 1, 2],
        patchDir,
        file_list_valid,
        flags.input_size,
        flags.batch_size,
        dataAug='flip,rotate',
        percent_file=flags.pred_file_dir,
        block_mean=np.append([0], img_mean),
        patch_prob=0.1,
        binary=True)
    # no augmentation needed for validation
    dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                      patchDir,
                                                      file_list_valid,
                                                      flags.input_size,
                                                      flags.batch_size,
                                                      dataAug=' ',
                                                      block_mean=np.append(
                                                          [0], img_mean),
                                                      batch_code=0)

    # train
    start_time = time.time()

    model.train_config('X',
                       'Y',
                       'Y2',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent',
                       par_dir='Inria_Domain_LOO')
    model.run(
        train_reader=dataReader_train,
        train_reader_building=dataReader_train_building,
        valid_reader=dataReader_valid,
        pretrained_model_dir=flags.
        finetune_dir,  # train from scratch, no need to load pre-trained model
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=5,  # save the model every 5 epochs
        gpu=GPU,
        tile_size=flags.tile_size,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
Beispiel #18
0
model = uabMakeNetwork_UnetMTL.UnetModelMTL({
    'X': X,
    'Y': y
},
                                            trainable=mode,
                                            input_size=input_size,
                                            batch_size=5,
                                            source_num=2,
                                            source_name=['INRIA', 'ROAD'])
# create graph, same as training
model.create_graph('X', class_num=[2, 2])

# ------------------------------------------Dataset Inria---------------------------------------------#
# make collections
# same as what to do in training
blCol = uab_collectionFunctions.uabCollection('inria')
blCol.readMetadata()
file_list, parent_dir = blCol.getAllTileByDirAndExt([0, 1, 2])
file_list_truth, parent_dir_truth = blCol.getAllTileByDirAndExt(4)
idx, file_list = uabCrossValMaker.uabUtilGetFolds(None, file_list,
                                                  'force_tile')
idx_truth, file_list_truth = uabCrossValMaker.uabUtilGetFolds(
    None, file_list_truth, 'force_tile')
# use first 5 tiles for validation
file_list_valid = uabCrossValMaker.make_file_list_by_key(
    idx,
    file_list, [i for i in range(0, 6)],
    filter_list=['bellingham', 'bloomington', 'sfo', 'tyrol-e', 'innsbruck'])
file_list_valid_truth = uabCrossValMaker.make_file_list_by_key(
    idx_truth,
    file_list_truth, [i for i in range(0, 6)],
Beispiel #19
0
import ersa_utils
import uab_collectionFunctions
from visualize import visualize_utils
from bohaoCustom import uabMakeNetwork_DeepLabV2

# settings
data_dir = r'/media/ei-edl01/data/uab_datasets/bihar_building/data/Original_Tiles'
input_size = (300, 300)
batch_size = 1

# set gpu to use
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISISBLE_DEVICES'] = '0'

# settings
blCol = uab_collectionFunctions.uabCollection('bihar_building')
blCol.readMetadata()
img_mean = blCol.getChannelMeans([0, 1, 2])

# make the model
# define place holder
model_dir = r'/media/ei-edl01/user/bh163/models/bihar_building2'
X = tf.placeholder(tf.float32,
                   shape=[None, input_size[0], input_size[1], 3],
                   name='X')
y = tf.placeholder(tf.int32,
                   shape=[None, input_size[0], input_size[1], 1],
                   name='y')
mode = tf.placeholder(tf.bool, name='mode')
model = uabMakeNetwork_DeepLabV2.DeeplabV3({
    'X': X,
Beispiel #20
0
    'Y': y
},
                                          trainable=mode,
                                          model_name=model_name,
                                          input_size=chip_size,
                                          batch_size=batch_size,
                                          learn_rate=learn_rate,
                                          decay_step=decay_step,
                                          decay_rate=decay_rate,
                                          epochs=epochs,
                                          start_filter_num=start_filter_num)
model.create_graph('X', class_num=2)

# create collection
# the original file is in /ei-edl01/data/uab_datasets/inria
blCol = uab_collectionFunctions.uabCollection('um')
opDetObj = bPreproc.uabOperTileDivide(
    255)  # inria GT has value 0 and 255, we map it back to 0 and 1
# [3] is the channel id of GT
rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                 'Map GT to (0, 1)', [3],
                                                 opDetObj)
rescObj.run(blCol)
img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info
print(blCol.readMetadata()
      )  # now inria collection has 4 channels, the last one is GT with (0, 1)

# extract patches
extrObj = uab_DataHandlerFunctions.uabPatchExtr(
    [0, 1, 2, 4],  # extract all 4 channels
    cSize=chip_size,  # patch size as 572*572
Beispiel #21
0
tile_size = [5000, 5000]
util_functions.tf_warn_level(3)
city_list = ['austin', 'chicago', 'kitsap', 'tyrol-w', 'vienna']
model_type = 'deeplab'

if model_type == 'unet':
    patch_size = [572, 828, 1084, 1340, 1596, 1852, 2092, 2332, 2636]
    model_dir = r'/hdd6/Models/UNET_rand_gird/UnetCrop_spca_aug_grid_0_PS(572, 572)_BS5_EP100_LR0.0001_DS60_DR0.1_SFN32'
else:
    patch_size = [520, 736, 832, 1088, 1344, 1600, 1856, 2096, 2640]
    model_dir = r'/hdd6/Models/DeepLab_rand_grid/DeeplabV3_spca_aug_grid_0_PS(321, 321)_BS5_EP100_LR1e-05_DS40_DR0.1_SFN32'

for ps in patch_size:
    input_size = [ps, ps]
    tf.reset_default_graph()
    blCol = uab_collectionFunctions.uabCollection('spca')
    blCol.readMetadata()
    file_list, parent_dir = blCol.getAllTileByDirAndExt([1, 2, 3])
    file_list_truth, parent_dir_truth = blCol.getAllTileByDirAndExt(0)
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(None, file_list,
                                                      'force_tile')
    idx_truth, file_list_truth = uabCrossValMaker.uabUtilGetFolds(
        None, file_list_truth, 'force_tile')
    # use first 5 tiles for validation
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [i for i in range(250, 500)])
    file_list_valid_truth = uabCrossValMaker.make_file_list_by_key(
        idx_truth, file_list_truth, [i for i in range(250, 500)])
    img_mean = blCol.getChannelMeans([1, 2, 3])

    # make the model
Beispiel #22
0
util_functions.tf_warn_level(3)
model_type = 'deeplab'

if model_type == 'unet':
    model_dir = r'/hdd6/Models/Inria_decay/UnetCrop_inria_decay_0_PS(572, 572)_BS5_' \
                r'EP100_LR0.0001_DS60.0_DR0.1_SFN32'
    patch_size = [572, 828, 1084, 1340, 1596, 1852, 2092, 2332, 2636]
else:
    patch_size = [520, 736, 832, 1088, 1344, 1600, 1856, 2096, 2640]
    model_dir = r'/hdd6/Models/Inria_decay/DeeplabV3_inria_decay_0_PS(321, 321)_BS5_' \
                r'EP100_LR1e-05_DS40.0_DR0.1_SFN32'

for ps in patch_size:
    input_size = [ps, ps]
    tf.reset_default_graph()
    blCol = uab_collectionFunctions.uabCollection('aioi')
    blCol.readMetadata()
    file_list, parent_dir = blCol.getAllTileByDirAndExt([0, 1, 2])
    file_list_truth, parent_dir_truth = blCol.getAllTileByDirAndExt(3)
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(None, file_list,
                                                      'force_tile')
    idx_truth, file_list_truth = uabCrossValMaker.uabUtilGetFolds(
        None, file_list_truth, 'force_tile')
    # use first 5 tiles for validation
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [i for i in range(0, 6)])
    file_list_valid_truth = uabCrossValMaker.make_file_list_by_key(
        idx_truth, file_list_truth, [i for i in range(0, 6)])
    img_mean = blCol.getChannelMeans([0, 1, 2])

    # make the model
def main(flags):
    city_dict = {
        'austin': 0,
        'chicago': 1,
        'kitsap': 2,
        'tyrol-w': 3,
        'vienna': 4
    }
    city_alpha = [0.2, 0.5, 0.1, 0.1, 0.1]

    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_UNet.UnetModelCrop({
        'X': X,
        'Y': y
    },
                                              trainable=mode,
                                              model_name=flags.model_name,
                                              input_size=flags.input_size,
                                              batch_size=flags.batch_size,
                                              learn_rate=flags.learning_rate,
                                              decay_step=flags.decay_step,
                                              decay_rate=flags.decay_rate,
                                              epochs=flags.epochs,
                                              start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(
        255)  # inria GT has value 0 and 255, we map it back to 0 and 1
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                     'Map GT to (0, 1)', [3],
                                                     opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [0, 1, 2, 4],
        cSize=flags.input_size,
        numPixOverlap=int(model.get_overlap()),
        extSave=['jpg', 'jpg', 'jpg', 'png'],
        isTrain=True,
        gtInd=3,
        pad=model.get_overlap() / 2)
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                      'force_tile')
    # use first 5 tiles for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [i for i in range(6, 37)])
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [i for i in range(0, 6)])

    dataReader_train = uabDataReader.ImageLabelReaderCitySampleControl(
        [3], [0, 1, 2],
        patchDir,
        file_list_train,
        flags.input_size,
        flags.batch_size,
        city_dict,
        city_alpha,
        dataAug='flip,rotate',
        block_mean=np.append([0], img_mean))
    # no augmentation needed for validation
    dataReader_valid = uabDataReader.ImageLabelReaderCitySampleControl(
        [3], [0, 1, 2],
        patchDir,
        file_list_valid,
        flags.input_size,
        flags.batch_size,
        city_dict,
        city_alpha,
        dataAug=' ',
        block_mean=np.append([0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X',
                       'Y',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent',
                       par_dir='Inria_Domain')
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=PRED_DIR,
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=5,  # save the model every 5 epochs
        gpu=GPU,
        tile_size=flags.tile_size,
        patch_size=flags.input_size,
    )

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
# settings
gpu = 1
batch_size = 5
input_size = [572, 572]
tile_size = [1500, 1500]
util_functions.tf_warn_level(3)

model_list = [
    r'UnetGAN_V3ShrinkRGB_road_gan_0_PS(572, 572)_BS20_EP60_LR0.0001_1e-06_1e-06_DS60.0_60.0_60.0_DR0.1_0.1_0.1',
]

for model_dir in model_list:
    for load_epoch in range(0, 60, 10):
        model_dir = os.path.join(r'/hdd6/Models/Inria_GAN/Road/', model_dir)
        tf.reset_default_graph()
        blCol = uab_collectionFunctions.uabCollection('Mass_road')
        blCol.readMetadata()
        file_list, parent_dir = blCol.getAllTileByDirAndExt([0, 1, 2])
        file_list_truth, parent_dir_truth = blCol.getAllTileByDirAndExt(4)
        img_mean = blCol.getChannelMeans([0, 1, 2])

        # use uabCrossValMaker to get fileLists for training and validation
        idx, file_list = uabCrossValMaker.uabUtilGetFolds(
            None, file_list, 'city')
        idx_truth, file_list_truth = uabCrossValMaker.uabUtilGetFolds(
            None, file_list_truth, 'city')
        file_list_test = uabCrossValMaker.make_file_list_by_key(
            idx, file_list, [0])
        file_list_test_truth = uabCrossValMaker.make_file_list_by_key(
            idx_truth, file_list_truth, [0])
                                          trainable=mode,
                                          input_size=chip_size,
                                          batch_size=batch_size,
                                          learn_rate=learn_rate,
                                          decay_step=decay_step,
                                          decay_rate=decay_rate,
                                          epochs=epochs,
                                          start_filter_num=start_filter_num)
model.create_graph('X', class_num=2)
# If you only want to load a specific number of layers, you have to do load_weight() here
# don't give pretrained_model_dir and layers2keep when calling model.run(), that will cause problem
model.load_weights(pre_trained_model_dir, layers2keep)

# create collection
# the original file is in /ei-edl01/data/uab_datasets/inria
blCol = uab_collectionFunctions.uabCollection('inria')
opDetObj = bPreproc.uabOperTileDivide(
    255)  # inria GT has value 0 and 255, we map it back to 0 and 1
# [3] is the channel id of GT
rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                 'Map GT to (0, 1)', [3],
                                                 opDetObj)
rescObj.run(blCol)
img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info
print(blCol.readMetadata()
      )  # now inria collection has 4 channels, the last one is GT with (0, 1)

# extract patches
extrObj = uab_DataHandlerFunctions.uabPatchExtr(
    [0, 1, 2, 4],  # extract all 4 channels
    cSize=chip_size,  # patch size as 572*572
    trainable=mode,  # control if you're training or not
    input_size=chip_size,  # input size to NN, same as extracted
    model_name=model_name,  # patch size
    batch_size=batch_size,  # mini-batch size
    learn_rate=learn_rate,  # learning rate
    decay_step=decay_step,  # learn rate decay after 60 epochs
    decay_rate=decay_rate,  # learn rate decay to 0.1*before
    epochs=epochs,
    backbone_name=backbone,  # total number of epochs to run
    start_filter_num=start_filter_num)  # number of filters at the first layer
model.create_graph('X',
                   class_num=class_num)  # TensorFlow will now draw the graph

# create collection
# the original file is in /ei-edl01/data/uab_datasets/inria
blCol = uab_collectionFunctions.uabCollection('deepglobe')
opDetObj = bPreproc.uabOperTileDivide(
    255)  # inria GT has value 0 and 255, we map it back to 0 and 1
# [3] is the channel id of GT
rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                 'Map GT to (0, 1)', [3],
                                                 opDetObj)
rescObj.run(blCol)
img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info
print(blCol.readMetadata()
      )  # now inria collection has 4 channels, the last one is GT with (0,1)

# extract patches
extrObj = uab_DataHandlerFunctions.\
    uabPatchExtr([0, 1, 2, 4],                              # extract all 4 channels
                 cSize=chip_size,                           # patch size as 572*572
Beispiel #27
0
def main(flags):
    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_ASSN.SSAN_UNet({
        'X': X,
        'Y': y
    },
                                          trainable=mode,
                                          model_name=flags.model_name,
                                          input_size=flags.input_size,
                                          batch_size=flags.batch_size,
                                          learn_rate=flags.learning_rate,
                                          decay_step=flags.decay_step,
                                          decay_rate=flags.decay_rate,
                                          epochs=flags.epochs,
                                          start_filter_num=flags.sfn,
                                          lada=flags.lada,
                                          slow_iter=flags.slow_iter)
    model.create_graph(['X', 'Y'], class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(255)
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                     'Map GT to (0, 1)', [3],
                                                     opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [0, 1, 2, 4],
        cSize=flags.input_size,
        numPixOverlap=int(model.get_overlap()),
        extSave=['jpg', 'jpg', 'jpg', 'png'],
        isTrain=True,
        gtInd=3,
        pad=model.get_overlap() // 2)
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx_city, file_list = uabCrossValMaker.uabUtilGetFolds(
        patchDir, 'fileList.txt', 'city')
    idx_tile, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                   'force_tile')
    idx = [j * 10 + i for i, j in zip(idx_city, idx_tile)]
    # use first city for validation
    filter_train = []
    filter_train_target = []
    filter_valid = []
    for i in range(5):
        for j in range(1, 37):
            if i != flags.finetune_city and j > 5:
                filter_train.append(j * 10 + i)
            elif i == flags.finetune_city and j > 5:
                filter_train_target.append(j * 10 + i)
            elif i == flags.finetune_city and j <= 5:
                filter_valid.append(j * 10 + i)
    # use first city for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, filter_train)
    filter_list_train_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, filter_train_target)
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, filter_valid)

    dataReader_train = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                      patchDir,
                                                      file_list_train,
                                                      flags.input_size,
                                                      flags.batch_size,
                                                      dataAug='flip,rotate',
                                                      block_mean=np.append(
                                                          [0], img_mean),
                                                      batch_code=0)
    dataReader_train_target = uabDataReader.ImageLabelReader(
        [3], [0, 1, 2],
        patchDir,
        filter_list_train_valid,
        flags.input_size,
        flags.batch_size,
        dataAug='flip,rotate',
        block_mean=np.append([0], img_mean),
        batch_code=0)
    # no augmentation needed for validation
    dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                      patchDir,
                                                      file_list_valid,
                                                      flags.input_size,
                                                      flags.batch_size,
                                                      dataAug=' ',
                                                      block_mean=np.append(
                                                          [0], img_mean),
                                                      batch_code=0)

    # train
    start_time = time.time()
    model.load_weights(flags.pred_model_dir.format(flags.finetune_city),
                       layers2load='1,2,3,4,5,6,7,8,9',
                       load_final_layer=True)
    model.train_config('X',
                       'Y',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent',
                       par_dir='Inria_GAN/SSAN')
    model.run(
        train_reader=dataReader_train,
        train_reader_source=dataReader_train,
        train_reader_target=dataReader_train_target,
        valid_reader=dataReader_valid,
        pretrained_model_dir=
        None,  # train from scratch, no need to load pre-trained model
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=flags.save_epoch,  # save the model every 5 epochs
        gpu=GPU,
        tile_size=flags.tile_size,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
Beispiel #28
0
    try:
        gt_orig = imageio.imread(gt_file)
        gt_dest = imageio.imread(os.path.join(dest_dir, file_id.replace('_', '-')+'_GT.png'))
        rgb_orig = imageio.imread(os.path.join(orig_rgb_dir, rgb_orig_fname))
        rgb_dest = imageio.imread(os.path.join(dest_dir, file_id.replace('_', '-')+'_RGB.tif'))

        plt.figure(figsize=(12, 9))
        ax1 = plt.subplot(221)
        plt.imshow(gt_orig)
        plt.title('gt orig')
        plt.axis('off')
        ax2 = plt.subplot(222, sharex=ax1, sharey=ax1)
        plt.imshow(rgb_orig)
        plt.title('rgb orig')
        plt.axis('off')
        ax3 = plt.subplot(223, sharex=ax1, sharey=ax1)
        plt.imshow(gt_dest)
        plt.title('gt dest')
        plt.axis('off')
        ax4 = plt.subplot(224, sharex=ax1, sharey=ax1)
        plt.imshow(rgb_dest)
        plt.title('rgb dest')
        plt.axis('off')
        plt.tight_layout()
        plt.show()
    except OSError:
        continue'''

blCol = uab_collectionFunctions.uabCollection('gbdx2')
blCol.readMetadata()
Beispiel #29
0
from nn import nn_utils
from bohaoCustom import uabMakeNetwork_UNet

if __name__ == '__main__':
    # settings
    nn_utils.tf_warn_level(3)
    model_dir = r'/hdd6/Models/UNET_rand_gird/UnetCrop_inria_aug_grid_0_PS(572, 572)_BS5_' \
                r'EP100_LR0.0001_DS60_DR0.1_SFN32'
    gpu = 0
    batch_size = 1
    input_size = [572, 572]
    #for city_name in ['Arlington', 'Atlanta', 'Austin', 'DC', 'NewHaven', 'NewYork', 'SanFrancisco', 'Seekonk']:
    for city_name in ['Norfolk']:
        tf.reset_default_graph()

        blCol = uab_collectionFunctions.uabCollection(city_name)
        blCol.readMetadata()
        file_list, parent_dir = blCol.getAllTileByDirAndExt([0, 1, 2])
        file_list_truth, parent_dir_truth = blCol.getAllTileByDirAndExt(3)
        img_mean = blCol.getChannelMeans([0, 1, 2])

        # make the model
        # define place holder
        X = tf.placeholder(tf.float32,
                           shape=[None, input_size[0], input_size[1], 3],
                           name='X')
        Z = tf.placeholder(tf.float32,
                           shape=[None, input_size[0], input_size[1], 3],
                           name='Z')
        y = tf.placeholder(tf.int32,
                           shape=[None, input_size[0], input_size[1], 1],
Beispiel #30
0
def main(flags, weight_dict):
    path_to_save = os.path.join(flags.weight_dir, 'shift_dict.pkl')
    shift_dict = ersa_utils.load_file(path_to_save)

    # make network
    # define place holder
    X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X')
    Z = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='Z')
    y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_UNet.UnetModelDTDA({'X': X, 'Z': Z, 'Y': y},
                                              trainable=mode,
                                              model_name=flags.model_name,
                                              input_size=flags.input_size,
                                              batch_size=flags.batch_size,
                                              learn_rate=flags.learning_rate,
                                              decay_step=flags.decay_step,
                                              decay_rate=flags.decay_rate,
                                              epochs=flags.epochs,
                                              start_filter_num=flags.sfn)
    model.create_graph('X', 'Z', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(255)          # inria GT has value 0 and 255, we map it back to 0 and 1
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])         # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4],
                                                    cSize=flags.input_size,
                                                    numPixOverlap=int(model.get_overlap()),
                                                    extSave=['jpg', 'jpg', 'jpg', 'png'],
                                                    isTrain=True,
                                                    gtInd=3,
                                                    pad=model.get_overlap() // 2)
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile')
    # use first 5 tiles for validation
    file_list_source = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(6, 37)])
    file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(0, 6)])

    # AIOI dataset
    blCol = uab_collectionFunctions.uabCollection(CITY_LIST[flags.leave_city])

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 3],
                                                    cSize=flags.input_size,
                                                    numPixOverlap=int(model.get_overlap()),
                                                    extSave=['jpg', 'jpg', 'jpg', 'png'],
                                                    isTrain=True,
                                                    gtInd=3,
                                                    pad=model.get_overlap() // 2)
    patchDir_target = extrObj.run(blCol)
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir_target, 'fileList.txt', 'force_tile')
    file_list_target = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(5)])

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_source = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_source, flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size, dataAug='flip,rotate',
                                                          block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_target = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir_target, file_list_target, flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size, dataAug='flip,rotate',
                                                          block_mean=np.append([0], img_mean))

        dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_valid, flags.input_size,
                                                           flags.tile_size,
                                                           flags.batch_size, dataAug='flip,rotate',
                                                           block_mean=np.append([0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X', 'Y', 'Z', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,
                       loss_type='xent', par_dir='domain_baseline/contorl_valid', lam=flags.lam)
    model.load_source_weights(flags.model_dir, shift_dict, gpu=flags.GPU)
    model.run(train_reader_source=dataReader_source,
              train_reader_target=dataReader_target,
              valid_reader=dataReader_valid,
              pretrained_model_dir=None,        # train from scratch, no need to load pre-trained model
              isTrain=True,
              img_mean=img_mean,
              verb_step=100,                    # print a message every 100 step(sample)
              save_epoch=5,                     # save the model every 5 epochs
              gpu=flags.GPU,
              tile_size=flags.tile_size,
              patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration/60/60))